252 lines
9.3 KiB
YAML
252 lines
9.3 KiB
YAML
---
|
|
- name: populate inventory
|
|
hosts: localhost
|
|
user: ansible
|
|
# become: yes
|
|
gather_facts: false
|
|
|
|
tasks:
|
|
|
|
######## wipe inventory to ensure this playbook only uses it own dynamically generated variables
|
|
|
|
- name: refresh inventory
|
|
meta: refresh_inventory
|
|
|
|
######## load core group_vars
|
|
#
|
|
# load the following core environment files under vars['testbench']
|
|
# - inventory/group_vars/cluster.yml
|
|
# - inventory/group_vars/networks.yml
|
|
|
|
- name: load core environment configuration
|
|
block:
|
|
|
|
- name: set runtime facts
|
|
ansible.builtin.set_fact:
|
|
_env_files:
|
|
- 'cluster.yml'
|
|
- 'hypervisor.yml'
|
|
- 'networks.yml'
|
|
_env_dir: "{{ ansible_inventory_sources[0] | dirname }}/group_vars"
|
|
config_namespace: "testbench"
|
|
|
|
- name: include vars from core config files
|
|
ansible.builtin.include_vars:
|
|
file: "{{ env_path }}"
|
|
name: "env_import_{{ env_namespace }}"
|
|
loop: "{{ _env_files }}"
|
|
loop_control:
|
|
loop_var: entry
|
|
vars:
|
|
env_path: "{{ _env_dir }}/{{ entry }}"
|
|
env_namespace: "{{ entry.split('.yml')[0] }}"
|
|
|
|
- name: append env vars to temp dict
|
|
ansible.builtin.set_fact:
|
|
_env_dict: "{{ _env_dict | default({}) | combine (env_import, recursive=True) }}"
|
|
loop: "{{ lookup('ansible.builtin.varnames', 'env_import_').split(',') }}"
|
|
loop_control:
|
|
loop_var: entry
|
|
vars:
|
|
env_import: "{{ vars[entry] }}"
|
|
|
|
- name: copy dict of env vars under top level namespace, access @ vars[config_namespace]
|
|
ansible.builtin.set_fact:
|
|
{ "{{ config_namespace }}": "{{ _env_dict }}" }
|
|
|
|
# think i only need to include hypervisor.yml here - it looks nicer to only include a small set of vars then ref directly at top level not config_namespace
|
|
|
|
######## populate arp cache, find dhcp ip of hypervisor and add to inventory
|
|
|
|
# uncomment if arp cache stale, this is slow so comment during dev
|
|
# - name: populate arp cache
|
|
# command: nmap -sn {{ range }}
|
|
# vars:
|
|
# dhcp_network: "{{ vars[config_namespace]['hypervisor']['nmcli_con_names']['primary'] }}"
|
|
# network: "{{ vars[config_namespace]['hypervisor']['cluster_networks'][dhcp_network]['network'] }}"
|
|
# netmask: "{{ vars[config_namespace]['hypervisor']['cluster_networks'][dhcp_network]['netmask'] }}"
|
|
# range: "{{ network }}/{{ (network + '/' + netmask) | ansible.utils.ipaddr('prefix') }}"
|
|
|
|
# WSL2 specific method to get host arp cache
|
|
- name: get arp table
|
|
ansible.builtin.command: '/mnt/c/Windows/system32/arp.exe -a'
|
|
register: _arp_cache
|
|
|
|
# windows arp.exe parse, write new mac_map with dhcp_ip
|
|
- name: find dhcp ip
|
|
ansible.builtin.set_fact:
|
|
_update_mac_map: "{{ _update_mac_map | default([]) + [new_record] }}"
|
|
loop: "{{ _arp_cache['stdout_lines'] }}"
|
|
loop_control:
|
|
loop_var: entry
|
|
vars:
|
|
check_record: "{{ entry | trim | regex_search('^[0-9]+') is not none }}"
|
|
format_record: "{{ entry | trim | regex_replace('\\s+', ',') | split(',') }}"
|
|
dhcp_ip: "{{ format_record[0] }}"
|
|
arp_mac: "{{ format_record[1] | regex_replace('-', ':') }}"
|
|
mac_map: "{{ vars[config_namespace]['hypervisor']['mac_map'] }}"
|
|
match_host: "{{ mac_map | selectattr('mac', '==', arp_mac) | map(attribute='host') }}"
|
|
match_ip: "{{ mac_map | selectattr('mac', '==', arp_mac) | map(attribute='ip') }}"
|
|
ipv6_link_local: "{{ 'fe80::0000:0000:0000:0000' | ansible.utils.slaac(arp_mac) }}"
|
|
nmcli_con: "{{ mac_map | selectattr('mac', '==', arp_mac) | map(attribute='nmcli_con') }}"
|
|
new_record: "{{ { 'host': match_host[0], 'mac': arp_mac, 'dhcp_ip': dhcp_ip, 'ip': match_ip[0], 'ipv6': ipv6_link_local, 'nmcli_con': nmcli_con[0] } }}"
|
|
when:
|
|
- check_record
|
|
- match_host | length >0
|
|
|
|
- name: fail with insufficient hosts matched, check mac_map
|
|
fail:
|
|
when:
|
|
- _update_mac_map is not defined
|
|
- _update_mac_map | length <2
|
|
|
|
# sort to ensure first host in mac_map gets the first vxlan ip, initially the arp cache dictates the order in which hosts are discovered
|
|
- name: sort mac_map
|
|
set_fact:
|
|
_sort_mac_map: "{{ _sort_mac_map | default([]) + mac_map_entry }}"
|
|
loop: "{{ vars[config_namespace]['hypervisor']['mac_map'] }}"
|
|
loop_control:
|
|
loop_var: entry
|
|
vars:
|
|
host: "{{ entry['host'] }}"
|
|
mac_map_entry: "{{ _update_mac_map | selectattr('host', '==', host) }}"
|
|
|
|
- name: write global mac map
|
|
set_fact:
|
|
# mac_map: "{{ _update_mac_map }}"
|
|
mac_map: "{{ _sort_mac_map }}"
|
|
delegate_to: localhost
|
|
delegate_facts: true
|
|
|
|
######## update the in-memory inventory with the hypervisors
|
|
|
|
- name: add hosts to in-memory inventory
|
|
ansible.builtin.add_host: >
|
|
name={{ host }}
|
|
groups={{ host_groups }}
|
|
ansible_ssh_host={{ ansible_ssh_host }}
|
|
ansible_ssh_common_args='-o "UserKnownHostsFile=/dev/null" -o "StrictHostKeyChecking=no"'
|
|
ansible_user={{ ansible_user }}
|
|
ansible_password={{ ansible_password }}
|
|
loop: "{{ hostvars['localhost']['mac_map'] }}"
|
|
loop_control:
|
|
loop_var: entry
|
|
vars:
|
|
host: "{{ entry['host'] }}"
|
|
# set host group membership, auto-create groups
|
|
host_groups:
|
|
- all
|
|
- hypervisor
|
|
- ceph
|
|
ansible_ssh_host: "{{ entry['dhcp_ip'] }}"
|
|
ansible_user: "{{ vars[config_namespace]['hypervisor']['ssh_user'] }}"
|
|
ansible_password: "{{ vars[config_namespace]['hypervisor']['ssh_password'] }}"
|
|
|
|
######## bootstrap hypervisors
|
|
|
|
- name: run roles on hypervisors
|
|
hosts: hypervisor
|
|
gather_facts: yes
|
|
tasks:
|
|
|
|
######## load core group_vars
|
|
#
|
|
# load the following core environment files under vars['testbench']
|
|
# - inventory/group_vars/cluster.yml
|
|
# - inventory/group_vars/networks.yml
|
|
|
|
- name: load core environment configuration
|
|
block:
|
|
|
|
# roles:
|
|
# hypervisor_network - setup interfaces
|
|
# hypervisor_vxlan - setup overlay networks - we also want to add ceph_public and ceph_cluster - we should do an overlay here
|
|
# hypervisor_ceph - great reference https://github.com/jcmdln/cephadm-playbook
|
|
# hypervisor_qemu - not written
|
|
# hypervisor_qemu_gui - not written, great qt5 web container for virt-manager that accepts qemu api endpoints over ssh as ENV vars
|
|
#
|
|
# need a role to replace nested dict items - needs to accept a dict as path maybe
|
|
|
|
- name: set runtime facts
|
|
ansible.builtin.set_fact:
|
|
_run_roles:
|
|
# - hypervisor_network
|
|
# - ntp
|
|
# - os_packages
|
|
# - hypervisor_prep
|
|
# - hypervisor_vxlan
|
|
# - cephadm_prep
|
|
# - cephadm_bootstrap
|
|
- cephadm_services
|
|
_env_dir: "{{ ansible_inventory_sources[0] | dirname }}/group_vars"
|
|
_env_files:
|
|
- 'cluster.yml'
|
|
- 'hypervisor.yml'
|
|
- 'networks.yml'
|
|
config_namespace: "testbench"
|
|
|
|
- name: include vars from core config files
|
|
ansible.builtin.include_vars:
|
|
file: "{{ env_path }}"
|
|
name: "env_import_{{ env_namespace }}"
|
|
loop: "{{ _env_files }}"
|
|
loop_control:
|
|
loop_var: entry
|
|
vars:
|
|
env_path: "{{ _env_dir }}/{{ entry }}"
|
|
env_namespace: "{{ entry.split('.yml')[0] }}"
|
|
|
|
- name: append env vars to temp dict
|
|
ansible.builtin.set_fact:
|
|
_env_dict: "{{ _env_dict | default({}) | combine (env_import, recursive=True) }}"
|
|
loop: "{{ lookup('ansible.builtin.varnames', 'env_import_').split(',') }}"
|
|
loop_control:
|
|
loop_var: entry
|
|
vars:
|
|
env_import: "{{ vars[entry] }}"
|
|
|
|
- name: copy dict of env vars under top level namespace, access @ vars[config_namespace]
|
|
ansible.builtin.set_fact:
|
|
{ "{{ config_namespace }}": "{{ _env_dict }}" }
|
|
|
|
######## set some global variables used by roles for (vm) cluster node provisioning, if these roles are to be reused in the bootstrap of the hypervisors some static values will be required
|
|
|
|
# this needs to loop over hypervisor.cluster_networks but exclude primary/external for vxlan creation//
|
|
|
|
# - debug:
|
|
# msg:
|
|
# - "{{ groups }}"
|
|
# - "{{ ['all'] + hostvars[inventory_hostname]['group_names'] }}"
|
|
|
|
# - fail:
|
|
# msg:
|
|
|
|
- name: populate the active_role_groups variable, add ceph_cluster network for vxlan creation
|
|
ansible.builtin.set_fact:
|
|
# active_role_groups: ['all', 'hypervisor', 'ceph'] # this should be a copy of hostvars['groups'] with additional all group
|
|
active_role_groups: "{{ ['all'] + hostvars[inventory_hostname]['group_names'] }}"
|
|
_cluster_networks: "{{ vars[config_namespace] | combine( {'cluster_networks' :{'cephclus': { 'comment': comment, 'gateway': 'null', 'mtu': 'null', 'nameserver': 'null', 'netmask': netmask, 'network': network } } }, recursive=True) }}"
|
|
vars:
|
|
network: "{{ vars['hypervisor']['cluster_networks']['cephclus']['network'] }}"
|
|
netmask: "{{ vars['hypervisor']['cluster_networks']['cephclus']['netmask'] }}"
|
|
comment: "{{ vars['hypervisor']['cluster_networks']['cephclus']['comment'] }}"
|
|
|
|
- ansible.builtin.set_fact:
|
|
{ "{{ config_namespace }}": "{{ _cluster_networks }}" }
|
|
|
|
######## run roles against hypervisor hosts
|
|
|
|
# - debug:
|
|
# msg:
|
|
# - "{{ hostvars[inventory_hostname] }}"
|
|
|
|
# - fail:
|
|
# msg:
|
|
|
|
|
|
- ansible.builtin.include_role:
|
|
name: "{{ entry }}"
|
|
loop: "{{ _run_roles }}"
|
|
loop_control:
|
|
loop_var: entry
|
|
label: run {{ entry }} role on {{ inventory_hostname }} |