17 KiB
17 KiB
Create Terraform vars template
Create project directory.
mkdir /home/openstack/stack
Create vars template.
nano -cw /home/openstack/stack/vars.tf.envsubst
## vars
variable "provider_config" {
type = map(string)
default = {
auth_url = "${AUTH_URL}"
auth_user = "${ACCOUNT}"
auth_pass = "${ACCOUNT_PASSWORD}"
project = "${PROJECT}"
}
}
variable "extnetid" {
type = string
default = "${PROVIDER_NET_ID}"
}
variable "image" {
type = string
default = "${IMAGE}"
}
variable "flavor" {
type = string
default = "${FLAVOR}"
}
locals {
project = "${var.provider_config["project"]}"
pubkey = "${PUB_KEY}"
}
Initial cluster configuration
This script configures:
- Creates Provider network
- Creates a project
- Creates project based quotas
- Creates a user with RBAC
- Uploads instance disk images
- Creates flavours
- Renders the Terraform vars file
touch /home/openstack/stack/configure_cluster.sh
chmod +x /home/openstack/stack/configure_cluster.sh
nano -cw /home/openstack/stack/configure_cluster.sh
#!/usr/bin/env bash
# load venv and credentials
source /home/openstack/kolla_zed/bin/activate
source /etc/kolla/admin-openrc.sh
# vars
OPENSTACK_CLI=openstack
EXT_NET_CIDR='192.168.140.0/24'
EXT_NET_RANGE='start=192.168.140.200,end=192.168.140.254'
EXT_NET_GATEWAY='192.168.140.1'
PROJECT='test'
ACCOUNT='tseed'
ACCOUNT_PASSWORD='Password0'
ACCOUNT_EMAIL='toby.n.seed@gmail.com'
# check cluster
$OPENSTACK_CLI host list
$OPENSTACK_CLI hypervisor list
$OPENSTACK_CLI user list
# provider shared network
$OPENSTACK_CLI network create --external --share --provider-physical-network physnet1 --provider-network-type flat provider_network
$OPENSTACK_CLI subnet create --dhcp --network provider_network --subnet-range ${EXT_NET_CIDR} --gateway ${EXT_NET_GATEWAY} --allocation-pool ${EXT_NET_RANGE} provider_subnet
# create project
$OPENSTACK_CLI project create --domain default --description "guest project" $PROJECT
# set quota on project
$OPENSTACK_CLI quota set --instances 10 $PROJECT
$OPENSTACK_CLI quota set --cores 4 $PROJECT
$OPENSTACK_CLI quota set --ram 6144 $PROJECT
$OPENSTACK_CLI quota set --gigabytes 30 $PROJECT
$OPENSTACK_CLI quota set --volumes 10 $PROJECT
$OPENSTACK_CLI quota set --backups 0 $PROJECT
$OPENSTACK_CLI quota set --snapshots 0 $PROJECT
$OPENSTACK_CLI quota set --key-pairs 20 $PROJECT
$OPENSTACK_CLI quota set --floating-ips 20 $PROJECT
$OPENSTACK_CLI quota set --networks 10 $PROJECT
$OPENSTACK_CLI quota set --routers 10 $PROJECT
$OPENSTACK_CLI quota set --subnets 10 $PROJECT
$OPENSTACK_CLI quota set --secgroups 20 $PROJECT
$OPENSTACK_CLI quota set --secgroup-rules 100 $PROJECT
# create user
$OPENSTACK_CLI user create --password ${ACCOUNT_PASSWORD} --email ${ACCOUNT_EMAIL} $ACCOUNT
# set the default project in the web console for user
$OPENSTACK_CLI user set --project $PROJECT $ACCOUNT
$OPENSTACK_CLI project show $(openstack user show $ACCOUNT --domain default -f json | jq -r .default_project_id) -f json | jq -r .description
# set RBAC for guest project
$OPENSTACK_CLI role add --project $PROJECT --user $ACCOUNT admin
# download the cirros test image for admin project
wget http://download.cirros-cloud.net/0.5.1/cirros-0.5.1-x86_64-disk.img
$OPENSTACK_CLI image create --disk-format qcow2 --container-format bare --private --project admin --property os_type=linux --file ./cirros-0.5.1-x86_64-disk.img cirros-0.5.1
# download the ubuntu image for all projects
wget https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img
$OPENSTACK_CLI image create --disk-format qcow2 --container-format bare --public --property os_type=linux --file ./bionic-server-cloudimg-amd64.img ubuntu_18.04
# create a flavour for the admin project
$OPENSTACK_CLI flavor create admin.tiny --ram 1048 --disk 1 --vcpus 2 --private --project admin
# create flavours for the guest project
$OPENSTACK_CLI flavor create m1.tiny --ram 512 --disk 5 --vcpus 1 --private --project $PROJECT
$OPENSTACK_CLI flavor create m1.smaller --ram 1024 --disk 10 --vcpus 1 --private --project $PROJECT
# collect vars
export PROJECT=$PROJECT
export ACCOUNT=$ACCOUNT
export ACCOUNT_PASSWORD=$ACCOUNT_PASSWORD
export AUTH_URL=$(openstack endpoint list -f json | jq -r '.[] | select(."Service Name" == "keystone" and ."Interface" == "public") | .URL')
export PROVIDER_NET_ID=$(openstack network list -f json | jq -r '.[] | select(."Name" == "provider_network") | .ID')
export IMAGE=$(openstack image list -f json | jq -r '.[] | select(."Name" == "ubuntu_18.04") | .ID')
export FLAVOR=$(openstack flavor list --all -f json | jq -r '.[] | select(."Name" == "m1.tiny") | .ID')
export PUB_KEY=$(cat /home/openstack/.ssh/id_rsa.pub)
# render terraform vars.tf
envsubst < /home/openstack/stack/vars.tf.envsubst > /home/openstack/stack/vars.tf
Install Terraform
wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
sudo apt update && sudo apt install terraform
Create remaining project config and templates
Create user data template
Salted hash password generated with openssl passwd -6 -salt xyz Password0.
This could be automated in Terraform on a per instance basis to resulting in a different hash for the same password to deter anyone who maybe able to intercept or check cloud-init on instantiation (maybe you can see it at the metadata endpoint in Openstack?).
nano -cw /home/openstack/stack/user_data.sh
#cloud-config
ssh_pwauth: true
groups:
- admingroup: [root,sys]
- openstack
users:
- name: openstack
primary_group: openstack
lock_passwd: false
passwd: $6$xyz$4tTWyuHIT6gXRuzotBZn/9xZBikUp0O2X6rOZ7MDJo26aax.Ok5P4rWYyzdgFkjArIIyB8z8LKVW1wARbcBzn/
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
ssh_authorized_keys:
- ssh-rsa ${pubkey}
Create Ansible inventory template
Gets rendered by Terraform. Ansible will also work with cloud-init seeded ssh pub key.
nano -cw user_data.sh
[nodes]
%{ for index, name in subnet1_instance_name ~}
${name} ansible_host=${subnet1_instance_address[index]} ansible_ssh_common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' ansible_user=${user} ansible_password=${password} ansible_become=true
%{ endfor ~}
%{ for index, name in subnet2_instance_name ~}
${name} ansible_host=${subnet2_instance_address[index]} ansible_ssh_common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' ansible_user=${user} ansible_password=${password} ansible_become=true
%{ endfor ~}
[subnet1_instances]
%{ for index, name in subnet1_instance_name ~}
${name}
%{ endfor ~}
[subnet2_instances]
%{ for index, name in subnet2_instance_name ~}
${name}
%{ endfor ~}
# when rendered this should look a little like the following, notice the provider network IPs provided by floating ip
[nodes]
subnet1_test0 ansible_host=192.168.140.230 ansible_ssh_common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' ansible_user=openstack ansible_password=Password0 ansible_become=true
subnet1_test1 ansible_host=192.168.140.223 ansible_ssh_common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' ansible_user=openstack ansible_password=Password0 ansible_become=true
subnet2_test0 ansible_host=192.168.140.217 ansible_ssh_common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' ansible_user=openstack ansible_password=Password0 ansible_become=true
[subnet1_instances]
subnet1_test0
subnet1_test1
[subnet2_instances]
subnet2_test0
Create Ansible ping test playbook
nano -cw ansible_inventory ping_test.yml
---
- name: build ping_map
hosts: localhost
become: no
gather_facts: false
tasks:
- name: build ping_map
ansible.builtin.set_fact:
_ping_map: "{{ _ping_map | default({}) | combine ({entry: []}, recursive=True) }}"
loop: "{{ inventory_hosts }}"
loop_control:
loop_var: entry
vars:
inventory_hosts: "{{ hostvars[inventory_hostname]['groups']['all'] }}"
# - ansible.builtin.debug:
# msg:
# - "{{ _ping_map }}"
- name: populate ping_map
ansible.builtin.set_fact:
_ping_map: "{{ _ping_map | default({}) | combine ({source: destination_list_append}, recursive=True) }}"
loop: "{{ target_hosts|product(target_hosts) }}"
loop_control:
loop_var: entry
vars:
target_hosts: "{{ hostvars[inventory_hostname]['groups']['all'] }}"
source: "{{ entry[0] }}"
destination: "{{ entry[1] }}"
destination_list: "{{ _ping_map[source] }}"
destination_list_append: "{{ destination_list + [destination] }}"
when: not entry[0] == entry[1]
# - ansible.builtin.debug:
# msg:
# - "{{ _ping_map }}"
- name: write global ping_map
set_fact:
_global_ping_map: "{{ _ping_map }}"
delegate_to: localhost
delegate_facts: true
- name: ping test
hosts: all
become: yes
gather_facts: True
tasks:
- name: load global ping_map
set_fact:
_ping_map: "{{ hostvars['localhost']['_global_ping_map'] }}"
when:
- hostvars['localhost']['_global_ping_map'] is defined
# - debug:
# msg:
# - "{{ _ping_map }}"
- name: ping neighbours
shell: |
echo SOURCE {{ inventory_hostname }}
echo DESTINATION {{ destination_target }}
echo
ping -Rn -c 1 {{ destination_ip }}
loop: "{{ destination_targets }}"
loop_control:
loop_var: entry
vars:
destination_targets: "{{ _ping_map[inventory_hostname] }}"
destination_target: "{{ entry }}"
destination_ip: "{{ hostvars[destination_target]['ansible_default_ipv4']['address'] }}"
source: "{{ inventory_hostname }}"
register: _ping_results
- name: print results
debug:
msg:
- "{{ output }}"
loop: "{{ _ping_results['results'] }}"
loop_control:
loop_var: idx
label: "{{ destination }}"
vars:
destination: "{{ idx['entry'] }}"
output: "{{ idx['stdout_lines'] }}"
Create Terraform configuration
nano -cw /home/openstack/stack/stack.tf
## load provider
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.48.0"
}
}
}
## configure provider
provider "openstack" {
auth_url = "${var.provider_config["auth_url"]}"
user_name = "${var.provider_config["auth_user"]}"
password = "${var.provider_config["auth_pass"]}"
tenant_name = "${var.provider_config["project"]}"
region = "RegionOne"
}
## vars
variable "dns" {
type = list(string)
default = ["1.1.1.1", "8.8.8.8"]
}
variable "subnet1" {
type = map(string)
default = {
subnet_name = "subnet1"
cidr = "172.16.10.0/24"
instance_count = "2"
}
}
variable "subnet2" {
type = map(string)
default = {
subnet_name = "subnet2"
cidr = "172.16.11.0/24"
instance_count = "1"
}
}
## data sources
data "openstack_networking_network_v2" "exnetname" {
network_id = "${var.extnetid}"
}
#output "exnet_name" {
# value = "${data.openstack_networking_network_v2.exnetname.name}"
#}
## resources
# router
resource "openstack_networking_router_v2" "router" {
name = "router_${local.project}"
admin_state_up = true
external_network_id = var.extnetid
}
# network1
resource "openstack_networking_network_v2" "network1" {
name = "network1_${local.project}"
}
# network2
resource "openstack_networking_network_v2" "network2" {
name = "network2_${local.project}"
}
# subnet1
resource "openstack_networking_subnet_v2" "subnet1" {
name = "${var.subnet1["subnet_name"]}_${local.project}"
network_id = openstack_networking_network_v2.network1.id
cidr = var.subnet1["cidr"]
dns_nameservers = var.dns
}
# subnet2
resource "openstack_networking_subnet_v2" "subnet2" {
name = "${var.subnet2["subnet_name"]}_${local.project}"
network_id = openstack_networking_network_v2.network2.id
cidr = var.subnet2["cidr"]
dns_nameservers = var.dns
}
# router interface subnet1
resource "openstack_networking_router_interface_v2" "interface1" {
router_id = openstack_networking_router_v2.router.id
subnet_id = openstack_networking_subnet_v2.subnet1.id
}
# router interface subnet2
resource "openstack_networking_router_interface_v2" "interface2" {
router_id = openstack_networking_router_v2.router.id
subnet_id = openstack_networking_subnet_v2.subnet2.id
}
# security group
resource "openstack_compute_secgroup_v2" "ingress" {
name = "${local.project}"
description = "ingress rules"
rule {
from_port = 22
to_port = 22
ip_protocol = "tcp"
cidr = "192.168.140.0/24"
}
rule {
from_port = -1
to_port = -1
ip_protocol = "icmp"
cidr = "192.168.140.0/24"
}
rule {
from_port = 22
to_port = 22
ip_protocol = "tcp"
self = true
}
rule {
from_port = -1
to_port = -1
ip_protocol = "icmp"
self = true
}
}
# floating ip instance_subnet1
resource "openstack_compute_floatingip_v2" "instance_subnet1_fip" {
count = "${var.subnet1["instance_count"]}"
pool = "${data.openstack_networking_network_v2.exnetname.name}"
#depends_on = ["openstack_networking_router_interface_v2.router"]
}
# floating ip instance_subnet2
resource "openstack_compute_floatingip_v2" "instance_subnet2_fip" {
count = "${var.subnet2["instance_count"]}"
pool = "${data.openstack_networking_network_v2.exnetname.name}"
#depends_on = ["openstack_networking_router_interface_v2.router"]
}
# subnet1 instances
resource "openstack_compute_instance_v2" "instance_subnet1" {
count = "${var.subnet1["instance_count"]}"
name = "${var.subnet1["subnet_name"]}_${local.project}${count.index}"
image_id = var.image
flavor_id = var.flavor
user_data = templatefile("user_data.sh", {
pubkey = local.pubkey
} )
#network {
# uuid = var.extnetid
#}
network {
uuid = openstack_networking_network_v2.network1.id
}
security_groups = [ "${openstack_compute_secgroup_v2.ingress.name}" ]
depends_on = [
openstack_networking_subnet_v2.subnet1
]
}
# subnet2 instances
resource "openstack_compute_instance_v2" "instance_subnet2" {
count = "${var.subnet2["instance_count"]}"
name = "${var.subnet2["subnet_name"]}_${local.project}${count.index}"
image_id = var.image
flavor_id = var.flavor
user_data = templatefile("user_data.sh", {
pubkey = local.pubkey
} )
network {
uuid = openstack_networking_network_v2.network2.id
}
security_groups = [ "${openstack_compute_secgroup_v2.ingress.name}" ]
depends_on = [
openstack_networking_subnet_v2.subnet2
]
}
# subnet1 floating ips
resource "openstack_compute_floatingip_associate_v2" "fip_subnet1" {
count = "${var.subnet1["instance_count"]}"
floating_ip = "${openstack_compute_floatingip_v2.instance_subnet1_fip[count.index].address}"
instance_id = "${openstack_compute_instance_v2.instance_subnet1[count.index].id}"
}
# subnet2 floating ips
resource "openstack_compute_floatingip_associate_v2" "fip_subnet2" {
count = "${var.subnet2["instance_count"]}"
floating_ip = "${openstack_compute_floatingip_v2.instance_subnet2_fip[count.index].address}"
instance_id = "${openstack_compute_instance_v2.instance_subnet2[count.index].id}"
}
# ansible inventory
resource "local_file" "ansible_inventory" {
content = templatefile("inventory.tmpl",
{
user = "openstack"
password = "Password0"
subnet1_instance_name = openstack_compute_instance_v2.instance_subnet1[*].name
subnet1_instance_address = openstack_compute_floatingip_v2.instance_subnet1_fip[*].address
subnet2_instance_name = openstack_compute_instance_v2.instance_subnet2[*].name
subnet2_instance_address = openstack_compute_floatingip_v2.instance_subnet2_fip[*].address
}
)
filename = "ansible_inventory"
}
# cheat, no until connection - wait for nodes to boot and start ssh
resource "time_sleep" "loitering" {
create_duration = "120s"
}
# check ansible instance connectivity
resource "null_resource" "ansible_floating_ip_ping" {
provisioner "local-exec" {
command = "ansible -i ansible_inventory all -m ping"
}
depends_on = [
time_sleep.loitering
]
}
# check ansible inter-instance connectivity
resource "null_resource" "ansible_private_net_ping" {
provisioner "local-exec" {
command = "ansible-playbook -i ansible_inventory ping_test.yml"
}
depends_on = [
null_resource.ansible_floating_ip_ping
]
}
Run
cd /home/openstack/stack
terraform init
terraform plan
terraform apply -auto-approve
terraform destroy -auto-approve