hypervisor: ssh_user: 'root' ssh_password: 'Password0' # connection: 'external' # map of mac addresses to match to the primary/control-plane interface for bootstrap, this should be ordered with master host first mac_map: - host: 'qemu01' mac: 'b8:97:5a:cf:d7:d3' ip: '192.168.140.41' nmcli_con: 'primary' - host: 'qemu02' mac: 'b8:97:5a:cf:da:c6' ip: '192.168.140.42' nmcli_con: 'primary' - host: 'qemu03' mac: 'b8:97:5a:cf:d8:bf' ip: '192.168.140.43' nmcli_con: 'primary' # ceph disk ceph_disk: /dev/nvme0n1 # ceph dasboard admin user password ceph_dash_admin_password: "Password0" # nmcli connection interface names, device eth0 or nmcli interface names nmcli_con_names: primary: 'external' ceph_public: 'storage' ceph_cluster: 'cephclus' ceph_rgw: 'storage' # hypervisor specific networks to add to the cluster_networks dict imported from group_vars/networks.yml cluster_networks: external: network: 192.168.140.0 netmask: 255.255.255.0 gateway: 192.168.140.1 mtu: nameserver: 1.1.1.1 comment: ext # cephpub: # network: 172.26.0.0 # netmask: 255.255.255.0 # gateway: 172.26.0.1 # mtu: # nameserver: 1.1.1.1 # comment: ext cephclus: network: 172.25.0.0 netmask: 255.255.255.0 gateway: mtu: nameserver: comment: int ceph_service_placement: - host: 'qemu01' labels: - _admin - mon - osd - mgr - mds - nfs - rgw - host: 'qemu02' labels: - _admin - mon - osd - mgr - mds - host: 'qemu03' labels: - _admin - mon - osd - mgr - mds # an nfs service uses an cephfs namespace or an rgw bucket, do not include an nfs service spec in this list ceph_service_spec: - service_type: alertmanager service_name: alertmanager placement: count: 1 - service_type: crash service_name: crash placement: host_pattern: '*' - service_type: grafana service_name: grafana placement: count: 1 - service_type: node-exporter service_name: node-exporter placement: host_pattern: '*' - service_type: prometheus service_name: prometheus placement: count: 1 - service_type: mon service_name: mon placement: label: "mon" - service_type: mgr service_name: mgr placement: label: "mgr" # multiple osd spec files on a per host basis can be included with adjusted placement configuration - service_type: osd service_id: osd_using_device_file placement: label: "osd" spec: data_devices: paths: - /dev/ceph/ceph_data # db_devices: # paths: # - /dev/sdc # wal_devices: # paths: # - /dev/sdd - service_type: mds service_id: cephfs placement: label: "mds" # this rgw configuration provisions rgw instance with no realm and a zonegroup and zone named default and a data pool named .rgw.root # there are 4 auto provisioned pools .rgw.root (pg32) / default.rgw.log (pg32) / default.rgw.control (pg32) / default.rgw.meta (pg8) # a multisite configuration (specify realm/zonegroup/zone and specifc data pool) requires additional commands and multiple spec files - service_type: rgw service_id: object placement: label: "rgw" count: 1 spec: ssl: false rgw_frontend_port: 8080 rgw_frontend_type: beast - service_type: nfs service_id: ganesha placement: label: "nfs" spec: port: 2049 # add 'pg: ' entry if you dont want default allocation, pg autoscaling is enabled ceph_pools: - type: rbd name: vms # pg: 64 - type: cephfs name: cephfs.cluster_volume.data cephfs_type: data volume: cephfs_cluster_volume - type: cephfs name: cephfs.cluster_volume.meta cephfs_type: meta volume: cephfs_cluster_volume - type: cephfs name: cephfs.cluster_volume1.data pg: 16 cephfs_type: data volume: cephfs_cluster_volume1 - type: cephfs name: cephfs.cluster_volume1.meta pg: 16 cephfs_type: meta volume: cephfs_cluster_volume1