MNAIO: Copy SSH keys into containers if they are present
When starting an imaged environment, the SSH keys on the host do not match the SSH keys in the containers. In this patch we ensure that if the 'all_containers' group is present, then the deploy-vms playbook will wait for them to come up and copy the new keys into them. It also adds the new public key into the authorized_keys. This ensures that the whole environment is ready to be used just as it was before imaging. We also remove the 'port' argument given to the wait_for_connection module because the argument is invalid. Change-Id: Iff0a3327c2031e0dd977e8e403b417e495fee14f
This commit is contained in:
parent
bff190f00f
commit
33d22c552e
@ -224,12 +224,12 @@
|
||||
import_playbook: vm-status.yml
|
||||
|
||||
|
||||
- name: VM Host Setup
|
||||
hosts: vm_servers
|
||||
- name: Add SSH keys to VM's and containers
|
||||
hosts: vm_servers:all_containers
|
||||
gather_facts: false
|
||||
any_errors_fatal: true
|
||||
tasks:
|
||||
- name: Copy Host Keys
|
||||
- name: Copy Host SSH Keys
|
||||
copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
@ -240,6 +240,12 @@
|
||||
- src: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa.pub"
|
||||
dest: /root/.ssh/id_rsa.pub
|
||||
|
||||
- name: Add authorized key
|
||||
authorized_key:
|
||||
user: root
|
||||
state: present
|
||||
key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
|
||||
|
||||
|
||||
# In vm-post-install-script.sh.j2 we chattr +i the interfaces file to prevent
|
||||
# the preseed system from overwriting the file after we've modified it. The
|
||||
|
@ -18,7 +18,7 @@
|
||||
connection: local
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: VM Servers group
|
||||
- name: Create vm_servers group
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: vm_servers
|
||||
@ -35,7 +35,6 @@
|
||||
- name: Wait for VM
|
||||
wait_for_connection:
|
||||
connect_timeout: 10
|
||||
port: 22
|
||||
sleep: 20
|
||||
timeout: "{{ vm_ssh_timeout }}"
|
||||
rescue:
|
||||
@ -45,34 +44,65 @@
|
||||
name: "{{ inventory_hostname }}"
|
||||
connection: local
|
||||
register: vm_info
|
||||
|
||||
- name: Stop VM (rescue)
|
||||
virt:
|
||||
command: destroy
|
||||
name: "{{ inventory_hostname }}"
|
||||
connection: local
|
||||
when: vm_info.status == 'running'
|
||||
|
||||
- name: Start VM (rescue)
|
||||
virt:
|
||||
command: start
|
||||
name: "{{ inventory_hostname }}"
|
||||
connection: local
|
||||
|
||||
- name: Wait for VM (rescue)
|
||||
wait_for_connection:
|
||||
connect_timeout: 10
|
||||
port: 22
|
||||
sleep: 20
|
||||
timeout: "{{ vm_ssh_timeout }}"
|
||||
register: vm_rescue
|
||||
ignore_errors: true
|
||||
|
||||
- name: Gather VM info 2nd pass (rescue)
|
||||
virt:
|
||||
command: status
|
||||
name: "{{ inventory_hostname }}"
|
||||
connection: local
|
||||
register: vm_info_2
|
||||
|
||||
- name: Fail if VM still offline (rescue)
|
||||
fail:
|
||||
msg: "{{ inventory_hostname }} is not responding and cannot be rescued"
|
||||
when:
|
||||
- vm_info_2.status != 'running'
|
||||
- vm_rescue.failed == 'true'
|
||||
|
||||
- name: Refresh the inventory
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: Refresh the inventory
|
||||
meta: refresh_inventory
|
||||
|
||||
- name: Create vm_servers group
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: vm_servers
|
||||
when:
|
||||
- (hostvars[item]['server_vm'] | default(false)) | bool
|
||||
with_items: "{{ groups['pxe_servers'] }}"
|
||||
|
||||
- name: Container Status
|
||||
hosts: all_containers
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: Wait for container connectivity
|
||||
wait_for_connection:
|
||||
connect_timeout: 10
|
||||
delay: 3
|
||||
sleep: 20
|
||||
timeout: "{{ vm_ssh_timeout }}"
|
||||
|
Loading…
Reference in New Issue
Block a user