782 lines
26 KiB
YAML
Executable File
782 lines
26 KiB
YAML
Executable File
---
|
|
# PLAY
|
|
# Gather cloudforms information and set service name
|
|
- name: Query automate workspace
|
|
hosts: localhost
|
|
gather_facts: False
|
|
vars_files:
|
|
vars/main.yml
|
|
|
|
tasks:
|
|
|
|
- set_fact:
|
|
endpoint: "{{ manageiq.api_url }}"
|
|
auth_token: "{{ manageiq.api_token }}"
|
|
request_id: "{{ (manageiq.request).split('/')[-1] }}"
|
|
service_id: "{{ (manageiq.service).split('/')[-1] }}"
|
|
user_id: "{{ (manageiq.user).split('/')[-1] }}"
|
|
when: manageiq is defined
|
|
|
|
# when users run ansible their manageiq auth token does not have sufficient rights to interact with the API, no combination of rights in a role for a non admin user are sufficient
|
|
# use the local admin credentials to get an auth token
|
|
- name: Get auth token
|
|
uri:
|
|
# ansible runner timeout, something changed from 5.11.1.2 to 5.11.6.0?
|
|
#url: "{{ endpoint }}/api/auth"
|
|
url: "https://127.0.0.1/api/auth"
|
|
validate_certs: no
|
|
method: GET
|
|
user: "{{ api_user }}"
|
|
password: "{{ api_pass }}"
|
|
status_code: 200
|
|
register: login
|
|
when: manageiq is defined
|
|
|
|
- set_fact:
|
|
auth_token: "{{ login.json.auth_token }}"
|
|
when: manageiq is defined
|
|
|
|
- name: Get requester user attributes
|
|
uri:
|
|
#url: "{{ endpoint }}/api/users/{{ user_id }}"
|
|
url: "https://127.0.0.1/api/users/{{ user_id }}"
|
|
validate_certs: no
|
|
method: GET
|
|
headers:
|
|
X-Auth-Token: "{{ auth_token }}"
|
|
status_code: 200
|
|
register: user
|
|
when: manageiq is defined
|
|
|
|
- set_fact:
|
|
requester_email: "{{ user.json.email }}"
|
|
when: manageiq is defined and user.json.email is not none # cloudforms admin user has no email address (unless set), this is a null field in json
|
|
|
|
- set_fact:
|
|
requester_email: "{{ from_email }}" # from_email should be able to recieve mail via relay
|
|
when: requester_email is not defined
|
|
|
|
- set_fact:
|
|
requester_user: "{{ user.json.name }}"
|
|
when: manageiq is defined
|
|
|
|
# when run from the command line set a default requester user
|
|
- set_fact:
|
|
requester_user: "command line invocation"
|
|
when: manageiq is not defined
|
|
|
|
- name: define qtree name suffix, use the CloudForms request id as the qtree name suffix
|
|
set_fact:
|
|
qtree_suffix: "{{ request_id }}"
|
|
when: manageiq is defined
|
|
|
|
- name: generate qtree name suffix, generate random string for qtree name suffix
|
|
shell: head /dev/urandom | tr -dc A-Z0-9 | head -c 10 ; echo ''
|
|
register: qtree_suffix
|
|
when: manageiq is not defined
|
|
|
|
- name: define qtree name suffix
|
|
set_fact:
|
|
qtree_suffix: "{{ qtree_suffix.stdout }}"
|
|
when: manageiq is not defined
|
|
|
|
# - name: DEBUG print all user attributes
|
|
# debug:
|
|
# msg:
|
|
# - "{{ user }}"
|
|
#
|
|
# useful fields:
|
|
# "email": "ucats@exmail.nottingham.ac.uk"
|
|
# "name": "Toby Seed"
|
|
# "userid": "toby.seed@nottingham.ac.uk"
|
|
#
|
|
# we see that the UON active directory schema has a different correlation between the AD short login id and the lookup of the userid and name
|
|
# the above user generally would use the login id "ucats" across the UON estate to authenticate against AD
|
|
# interestingly cloudforms queries several fields in the schema and will allow login as "ucats" and "toby.seed@nottingham.ac.uk"
|
|
# to detect if this script is being run in self service mode, a match is performed against the requesting user and a single entry in groupmember list
|
|
# we can only retrieve the expected AD short login id from the email field with what is passed by cloudforms when using UON AD
|
|
|
|
- name: get AD account name
|
|
set_fact:
|
|
requester_user_ad: "{{ (requester_email).split('@')[0] }}"
|
|
#when: manageiq is defined
|
|
|
|
- name: get service
|
|
uri:
|
|
url: "https://127.0.0.1/api/services/{{ service_id }}"
|
|
validate_certs: no
|
|
method: GET
|
|
headers:
|
|
X-Auth-Token: "{{ auth_token }}"
|
|
status_code: 200
|
|
register: service
|
|
when: manageiq is defined
|
|
|
|
- set_fact:
|
|
service_name: "{{ service.json.name }}"
|
|
new_service_name: "{{ service.json.name }} {{ request_id }}"
|
|
when: manageiq is defined
|
|
|
|
# not using yet - will be used in emails
|
|
# - set_fact:
|
|
# service_name: "command line invocation"
|
|
# new_service_name: "command line invocation"
|
|
# when: manageiq is not defined
|
|
|
|
- name: set service name
|
|
uri:
|
|
url: "https://127.0.0.1/api/services/{{ service_id }}"
|
|
validate_certs: no
|
|
method: POST
|
|
headers:
|
|
X-Auth-Token: "{{ auth_token }}"
|
|
body_format: json
|
|
body: { "action" : "edit", "resource" : { "name" : "{{ new_service_name }}" }}
|
|
status_code: 200, 204
|
|
register: service
|
|
when: manageiq is defined
|
|
|
|
# PLAY
|
|
# Validate parameters passed to script from cloudforms
|
|
- name: Script input Validation
|
|
hosts: localhost
|
|
gather_facts: False
|
|
vars_files:
|
|
- vars/main.yml
|
|
vars:
|
|
groupmemberslist: []
|
|
groupmemberslistvalidate: []
|
|
|
|
tasks:
|
|
|
|
# REQUIREMENT
|
|
# need a task to check for placeholder values here when parameterized for cloudforms
|
|
|
|
- name: Split groupmembers parameter on , delimiter
|
|
set_fact:
|
|
groupmemberslist: "{{ groupmemberslist }} + [ '{{ item }}' ]"
|
|
with_items: "{{ members.split(',') }}"
|
|
|
|
- name: Remove empty fields from groupmembers parameter
|
|
set_fact:
|
|
groupmemberslistvalidate: "{{ groupmemberslistvalidate }} + [ '{{ item }}' ]"
|
|
when: item | length != 0
|
|
with_items: "{{ groupmemberslist }}"
|
|
|
|
- name: Remove duplicate entries
|
|
set_fact:
|
|
groupmemberslistvalidate: "{{ groupmemberslistvalidate | unique }}"
|
|
|
|
# PLAY
|
|
# Add winrm host used for AD querys to inventory
|
|
- name: Build inventory for AD server
|
|
hosts: localhost
|
|
gather_facts: False
|
|
vars_files:
|
|
- vars/main.yml
|
|
|
|
tasks:
|
|
|
|
- name: Add host entry for adserver
|
|
add_host: >
|
|
name=adserver
|
|
groups=windows
|
|
ansible_host="{{ ad_host }}"
|
|
|
|
# PLAY
|
|
# Query winrm to validate AD user/group exist, build dict for share ACL and dict of all user/email from a recursively search of user/group
|
|
- name: Check AD user exists
|
|
hosts: adserver
|
|
gather_facts: false
|
|
vars_files:
|
|
- vars/main.yml
|
|
vars:
|
|
groupmemberslistvalidate: "{{ hostvars['localhost']['groupmemberslistvalidate'] }}"
|
|
requester_user_ad: "{{ hostvars['localhost']['requester_user_ad'] }}"
|
|
no_aduser: []
|
|
object_type: []
|
|
|
|
tasks:
|
|
|
|
- name: Add connectivity variables for adserver
|
|
set_fact:
|
|
ansible_user: "{{ ad_user }}"
|
|
ansible_password: "{{ ad_pass }}"
|
|
ansible_connection: "{{ ad_connection }}"
|
|
ansible_winrm_transport: "{{ ad_winrm_transport }}"
|
|
ansible_winrm_kinit_mode: "{{ ad_winrm_kinit_mode }}"
|
|
ansible_winrm_message_encryption: "{{ ad_winrm_message_encryption }}"
|
|
ansible_port: "{{ ad_port }}"
|
|
ansible_winrm_scheme: "{{ ad_winrm_scheme }}"
|
|
ansible_winrm_server_cert_validation: "{{ ad_winrm_server_cert_validation }}"
|
|
# ansible_winrm_operation_timeout_sec: 60
|
|
# ansible_winrm_read_timeout_sec: 60
|
|
|
|
- name: Check AD user/group exists
|
|
win_shell: ([ADSISearcher] "(sAMAccountName={{ item }})").FindOne()
|
|
register: command_result
|
|
with_items:
|
|
- "{{ groupmemberslistvalidate }}"
|
|
|
|
- name: Flag fail where AD user/group not exist
|
|
set_fact:
|
|
no_aduser: "{{ no_aduser + [item.item] }}"
|
|
when: item.stdout | length == 0
|
|
with_items: "{{ command_result.results }}"
|
|
changed_when: true
|
|
#notify: topic_noad # would be used for handler for failure conditions by console and email
|
|
|
|
- name: Check AD object is user or group
|
|
win_shell: ([ADSISearcher] "(sAMAccountName={{ item.item }})").FindOne().Properties.objectcategory
|
|
register: object_result
|
|
with_items: "{{ command_result.results }}"
|
|
when: no_aduser | length == 0
|
|
|
|
- name: Build list of AD object type
|
|
set_fact:
|
|
object_type: "{{ object_type + [(item.stdout.split(',')[0].split('CN=')[1])] }}" # faster than regex
|
|
with_items: "{{ object_result.results }}"
|
|
when: no_aduser | length == 0
|
|
|
|
# if the cloudforms requester's AD account requester_user_ad is in the list of users, set the role requester, this will be used for self service emails
|
|
- name: Build dict of object names, types and roles positionally from list with object name and list with object type
|
|
set_fact:
|
|
object_attributes: "{{ object_attributes | default([]) + [dict(name=item[0], type=item[1], role='requester' if (item[0] == requester_user_ad) else 'member') ] }}"
|
|
loop: "{{ groupmemberslistvalidate|zip(object_type)|list }}"
|
|
when: no_aduser | length == 0
|
|
|
|
- name: Register dummy host with variable object_attributes
|
|
add_host:
|
|
name: "DUMMY_HOST"
|
|
object_attributes: "{{ object_attributes }}"
|
|
when: no_aduser | length == 0
|
|
|
|
- name: Find all group members
|
|
include_tasks: group_lookup.yml
|
|
when: no_aduser | length == 0
|
|
|
|
# - name: Inspect all users who will require email notification
|
|
# debug:
|
|
# msg: "{{ hostvars['DUMMY_HOST']['find_users'] }}"
|
|
# when: no_aduser | length == 0
|
|
|
|
- name: Import dummy host variable from group_lookup.yml
|
|
set_fact:
|
|
unique_users: "{{ hostvars['DUMMY_HOST']['find_users'] }}"
|
|
when: no_aduser | length == 0
|
|
|
|
- name: Get unique name/role/type entries, remove duplicate entries that may arise from group nesting
|
|
set_fact:
|
|
unique_users: "{{ unique_users | unique }}"
|
|
when: no_aduser | length == 0
|
|
|
|
# - debug:
|
|
# msg: "{{ unique_users }}"
|
|
# when: no_aduser | length == 0
|
|
|
|
- name: Get requester name into a list
|
|
set_fact:
|
|
requester_users: "{{ requester_users | default([]) + [item.name] }}"
|
|
with_items: "{{ unique_users }}"
|
|
when: no_aduser | length == 0 and item.role == 'requester'
|
|
|
|
- name: Get member names into a list
|
|
set_fact:
|
|
member_users: "{{ member_users | default([]) + [item.name] }}"
|
|
with_items: "{{ unique_users }}"
|
|
when: no_aduser | length == 0 and item.role == 'member'
|
|
|
|
- name: Get names common in both lists
|
|
set_fact:
|
|
common_users: "{{ requester_users | intersect(member_users) }}"
|
|
when: no_aduser | length == 0 and requester_users is defined
|
|
|
|
- name: Remove member entries where competing requester entry exists
|
|
set_fact:
|
|
email_users: "{{ email_users | default([]) + [dict(name=item.name, role=item.role, type=item.type)] }}"
|
|
with_items: "{{ unique_users }}"
|
|
#when: no_aduser | length == 0 and (item.name not in common_users or (item.name in common_users and item.role == 'requester'))
|
|
when: no_aduser | length == 0 and (requester_users is defined and (item.name not in common_users or (item.name in common_users and item.role == 'requester')))
|
|
|
|
- name: Set email_users where requester not present in the user list
|
|
set_fact:
|
|
email_users: "{{ unique_users }}"
|
|
when: no_aduser | length == 0 and requester_users is not defined
|
|
|
|
# deduplicated dict to send appropriate class of email to users
|
|
# - debug:
|
|
# msg: "{{ email_users }}"
|
|
# when: no_aduser | length == 0
|
|
|
|
- name: Get member email address from AD
|
|
win_shell: Get-ADUser {{ item.name }} -Properties mail | Select-Object -ExpandProperty mail
|
|
register: email_result
|
|
with_items: "{{ email_users }}"
|
|
when: no_aduser | length == 0
|
|
|
|
# - debug:
|
|
# msg: "{{ email_result }}"
|
|
# when: no_aduser | length == 0
|
|
|
|
# this would crash out where customer account has no associated email, UoN are very consistent with account creation and adding email
|
|
# check for an empty email and substitute for 'none', this dict key will be evaluated when sending emails
|
|
- name: Get member emails into a list
|
|
set_fact:
|
|
email_address: "{{ email_address | default([]) + [item.stdout_lines[0] if (item.stdout_lines | length > 0) else 'none' ] }}"
|
|
with_items: "{{ email_result.results }}"
|
|
when: no_aduser | length == 0
|
|
|
|
# - debug:
|
|
# msg: "{{ email_address }}"
|
|
|
|
- name: Build dict of object name, role, type and email
|
|
set_fact:
|
|
user_dict: "{{ user_dict | default([]) + [dict(name=item[0].name, role=item[0].role, type=item[0].type, email=item[1])] }}" # adding a new positional field from the list to the dict
|
|
loop: "{{ email_users|zip(email_address)|list }}"
|
|
when: no_aduser | length == 0
|
|
|
|
- debug:
|
|
msg:
|
|
- "{{ object_attributes }}" # use for the ACL's
|
|
- "{{ user_dict }}" # use to identify users that will get various classes of email
|
|
when: no_aduser | length == 0
|
|
|
|
# REQUIREMENT
|
|
# drop in handlers for noad failure here
|
|
|
|
# PLAY
|
|
# Create qtree / quota / dacl and define windows host
|
|
- hosts: localhost
|
|
gather_facts: false
|
|
name: Create/Delete qtree and quota
|
|
vars:
|
|
#state: "{{ 'present' if perform == 'create' else ( 'absent' if perform == 'delete' else 'placeholder') }}" # no remove logic yet
|
|
qtree_suffix: "{{ hostvars['localhost']['qtree_suffix'] }}"
|
|
ADPSuser: "{{ domain }}\\{{ ad_user }}"
|
|
vars_files:
|
|
- vars/main.yml
|
|
- vars/requests.yml
|
|
|
|
tasks:
|
|
|
|
- set_fact:
|
|
human_to_byte_string: "{{ qtree_quota }} {{ qtree_quota_unit }}"
|
|
|
|
- set_fact:
|
|
quota_hard_limit: "{{ human_to_byte_string|human_to_bytes}}"
|
|
quota_soft_limit: "{{( human_to_byte_string|human_to_bytes | float / 100 * qtree_quota_soft_limit) | round | int | abs }}"
|
|
|
|
# when run from cloudforms we would pass the prefix/suffix from the dialog or set from the retrieved request ID
|
|
- name: generate qtree name suffix, generate random string for qtree name suffix
|
|
shell: head /dev/urandom | tr -dc A-Z0-9 | head -c 10 ; echo ''
|
|
register: qtree_suffix
|
|
|
|
- name: define qtree name suffix
|
|
set_fact:
|
|
qtree_suffix: "{{ qtree_suffix.stdout }}"
|
|
|
|
- set_fact:
|
|
qtree_name: "{{ qtree_prefix }}_{{ qtree_suffix }}"
|
|
|
|
# REQUIREMENT
|
|
# volume space reporting, actual space vs over provisioned space + accociated failure consition and console/email reporting via handler
|
|
|
|
# REQUIREMENT
|
|
# check qtree name doesnt already exist and hard exit (for cloudforms failed request) with a requester email
|
|
|
|
# REQUIREMENT
|
|
# all API requests need their own include tasks and status_code evaluations with handler topics
|
|
# far too much repetition of queued job checking
|
|
|
|
- name: Get svm UUID
|
|
uri:
|
|
url: "{{ getSvmEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: GET
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 200
|
|
register: response
|
|
|
|
- set_fact:
|
|
svm_uuid: "{{ (response.json.records | json_query(jmesquery))[0] }}"
|
|
vars:
|
|
jmesquery: "[?name == '{{ netapp_svm_name }}'].uuid"
|
|
|
|
- name: Get volume UUID
|
|
uri:
|
|
url: "{{ getVolumesEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: GET
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 200
|
|
register: response
|
|
|
|
- set_fact:
|
|
volume_uuid: "{{ (response.json.records | json_query(jmesquery))[0] }}"
|
|
vars:
|
|
jmesquery: "[?name == '{{ volume_name }}'].uuid"
|
|
|
|
- name: Create qtree
|
|
uri:
|
|
url: "{{ postQtreeEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: POST
|
|
body_format: json
|
|
body: "{{ postQtree }}"
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 202
|
|
register: response
|
|
|
|
- set_fact:
|
|
job_uuid: "{{ response.json.job.uuid }}"
|
|
|
|
- name: Get job status
|
|
uri:
|
|
url: "{{ getJobEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: GET
|
|
validate_certs: no
|
|
return_content: yes
|
|
register: check_response
|
|
until: check_response.json.state != 'running'
|
|
retries: "{{ api_retry }}"
|
|
delay: 10
|
|
|
|
- name: Report job failure
|
|
fail:
|
|
msg:
|
|
- "{{ check_response.json.message }}"
|
|
when: check_response.json.state == 'failure'
|
|
|
|
# not required
|
|
# - name: Get qtree ID
|
|
# uri:
|
|
# url: "{{ getQtreeEndpoint }}"
|
|
# user: "{{ netapp_svm_user }}"
|
|
# password: "{{ netapp_svm_pass }}"
|
|
# method: GET
|
|
# validate_certs: no
|
|
# return_content: yes
|
|
# status_code: 200
|
|
# register: response
|
|
|
|
# - set_fact:
|
|
# qtree_id: "{{ (response.json.records | json_query(jmesquery))[0] }}"
|
|
# vars:
|
|
# jmesquery: "[?name == '{{ qtree_name }}'].id"
|
|
|
|
- name: Create quota
|
|
uri:
|
|
url: "{{ postQuotaEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: POST
|
|
body_format: json
|
|
body: "{{ postQuota }}"
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 202
|
|
register: response
|
|
|
|
- set_fact:
|
|
job_uuid: "{{ response.json.job.uuid }}"
|
|
|
|
- name: Get job status
|
|
uri:
|
|
url: "{{ getJobEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: GET
|
|
validate_certs: no
|
|
return_content: yes
|
|
register: check_response
|
|
until: check_response.json.state != 'running'
|
|
retries: "{{ api_retry }}"
|
|
delay: 10
|
|
|
|
- name: Report job failure
|
|
fail:
|
|
msg:
|
|
- "{{ check_response.json.message }}"
|
|
when: check_response.json.state == 'failure'
|
|
|
|
- set_fact:
|
|
toggle_quota: "false"
|
|
|
|
- name: Toggle volume quota off
|
|
uri:
|
|
url: "{{ toggleVolQuotaEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: PATCH
|
|
body_format: json
|
|
body: "{{ toggleVolQuota }}"
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 202
|
|
register: response
|
|
|
|
- set_fact:
|
|
job_uuid: "{{ response.json.job.uuid }}"
|
|
|
|
- name: Get job status
|
|
uri:
|
|
url: "{{ getJobEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: GET
|
|
validate_certs: no
|
|
return_content: yes
|
|
register: response
|
|
until: response.json.state != 'running'
|
|
retries: "{{ api_retry }}"
|
|
delay: 10
|
|
|
|
- name: Report job failure
|
|
fail:
|
|
msg:
|
|
- "{{ response.json.message }}"
|
|
when: response.json.state == 'failure'
|
|
|
|
# the API will report a quota job finished but blocks another quota command, requires a brief pause on a busy system
|
|
- pause:
|
|
seconds: "{{ netapp_cli_sleep }}"
|
|
|
|
- set_fact:
|
|
toggle_quota: "true"
|
|
|
|
- name: Toggle volume quota on
|
|
uri:
|
|
url: "{{ toggleVolQuotaEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: PATCH
|
|
body_format: json
|
|
body: "{{ toggleVolQuota }}"
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 202
|
|
register: response
|
|
|
|
- set_fact:
|
|
job_uuid: "{{ response.json.job.uuid }}"
|
|
|
|
# enabling quota will take some time, ensure the retries cover this with larger filesystems
|
|
- name: Get job status
|
|
uri:
|
|
url: "{{ getJobEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: GET
|
|
validate_certs: no
|
|
return_content: yes
|
|
register: response
|
|
until: response.json.state != 'running'
|
|
retries: "{{ api_retry }}"
|
|
delay: 10
|
|
|
|
- name: Report job failure
|
|
fail:
|
|
msg:
|
|
- "{{ response.json.message }}"
|
|
when: response.json.state == 'failure'
|
|
|
|
# these commented tasks are included to show the ansible native ontap cli module, the play moved to using the SVM instead of the ClusterManager on request of the storage team, necessitating API calls rather than the ontap modules
|
|
# if the ClusterManager target is reinstated for this play, either modify the CLI API endpoints (preferable due to job control) or use this module and be mindful of sleep command
|
|
#
|
|
# there is no simple one liner for the cli to wait for the job to finish, there is a sleep command to mitigate but may need to be tuned on a busy system
|
|
# - name: Apply DACL for winrm powershell user account
|
|
# na_ontap_command:
|
|
# command:
|
|
# - 'vserver security file-directory policy create -vserver {{ netapp_vserver_name }} -policy-name {{ qtree_name }};'
|
|
# - 'vserver security file-directory ntfs dacl add -vserver {{ netapp_vserver_name }} -ntfs-sd {{ qtree_name }} -access-type allow -account {{ ADPSuser }} -rights full-control -apply-to this-folder,sub-folders,files;'
|
|
# - 'vserver security file-directory policy task add -vserver {{ netapp_vserver_name }} -policy-name {{ qtree_name }} -path /{{ volume_name }}/{{ qtree_name }} -ntfs-sd {{ qtree_name }} -ntfs-mode propagate -security-type ntfs;'
|
|
# - 'vserver security file-directory apply -vserver {{ netapp_vserver_name }} -policy-name {{ qtree_name }};'
|
|
# - 'echo about to remove policy {{ qtree_name }} and security descriptor {{ qtree_name }};'
|
|
# - 'sleep {{ netapp_cli_sleep }};'
|
|
# - 'vserver security file-directory policy delete {{ qtree_name }};'
|
|
# - 'vserver security file-directory ntfs delete -ntfs-sd {{ qtree_name }}'
|
|
# privilege: 'admin'
|
|
# return_dict: false # fails with compound commands when true
|
|
# https: true
|
|
# validate_certs: false
|
|
# use_rest: Always
|
|
# hostname: "{{ netapp_hostname }}"
|
|
# username: "{{ netapp_username }}"
|
|
# password: "{{ netapp_password }}"
|
|
# ignore_errors: True
|
|
# register: ontapCmd
|
|
|
|
# - debug:
|
|
# msg:
|
|
# - "DACL application failed, try increasing the timeout in the command list"
|
|
# - "{{ ontapCmd.msg }}"
|
|
# when: ontapCmd.failed
|
|
|
|
- name: Create DACL policy
|
|
uri:
|
|
url: "{{ postDACLPolicyEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: POST
|
|
body_format: json
|
|
body: "{{ postDACLPolicy }}"
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 201
|
|
register: response
|
|
|
|
- name: Create DACL policy attributes
|
|
uri:
|
|
url: "{{ postDACLPolicyAttributesEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: POST
|
|
body_format: json
|
|
body: "{{ postDACLPolicyAttributes }}"
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 200
|
|
register: response
|
|
|
|
- name: Create DACL policy target
|
|
uri:
|
|
url: "{{ postDACLPolicyTargetEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: POST
|
|
body_format: json
|
|
body: "{{ postDACLPolicyTarget }}"
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 200
|
|
register: response
|
|
|
|
- name: Apply DACL policy
|
|
uri:
|
|
url: "{{ postDACLPolicyApplyEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: POST
|
|
body_format: json
|
|
body: "{{ postDACLPolicyApply }}"
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 200
|
|
register: response
|
|
|
|
- set_fact:
|
|
job_uuid: "{{ response.json.job.uuid }}"
|
|
|
|
- name: Get job status
|
|
uri:
|
|
url: "{{ getJobEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: GET
|
|
validate_certs: no
|
|
return_content: yes
|
|
register: check_response
|
|
until: check_response.json.state != 'running'
|
|
retries: "{{ api_retry }}"
|
|
delay: 10
|
|
|
|
- name: Report job failure
|
|
fail:
|
|
msg:
|
|
- "{{ check_response.json.message }}"
|
|
when: check_response.json.state == 'failure'
|
|
|
|
- name: Delete DACL policy
|
|
uri:
|
|
url: "{{ deleteDACLPolicyEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: DELETE
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 200
|
|
register: response
|
|
|
|
- name: Delete DACL security descriptor
|
|
uri:
|
|
url: "{{ deleteDACLPolicyAttributesEndpoint }}"
|
|
user: "{{ netapp_svm_user }}"
|
|
password: "{{ netapp_svm_pass }}"
|
|
method: DELETE
|
|
validate_certs: no
|
|
return_content: yes
|
|
status_code: 200
|
|
register: response
|
|
|
|
# REQUIREMENT
|
|
# handlers needed for this play, console and email failure conditions
|
|
|
|
# PLAY
|
|
# Set windows host connection parameters and run powershell over winrm
|
|
- hosts: adserver
|
|
gather_facts: false
|
|
become_method: runas
|
|
name: Change windows ACL for qtree
|
|
vars:
|
|
qtree_name: "{{ hostvars['localhost']['qtree_name'] }}"
|
|
object_attributes: "{{ hostvars['localhost']['object_attributes'] }}"
|
|
vars_files:
|
|
vars/main.yml
|
|
|
|
tasks:
|
|
|
|
# to avoid using group_vars we set_facts that were not accepted by add_host, add_host does not work with many windows winrm connectivity vars
|
|
- name: Add connectivity variables for adserver
|
|
set_fact:
|
|
ansible_user: "{{ ad_user }}"
|
|
ansible_password: "{{ ad_pass }}"
|
|
ansible_connection: "{{ ad_connection }}"
|
|
ansible_winrm_transport: "{{ ad_winrm_transport }}"
|
|
ansible_winrm_kinit_mode: "{{ ad_winrm_kinit_mode }}"
|
|
ansible_winrm_message_encryption: "{{ ad_winrm_message_encryption }}"
|
|
ansible_port: "{{ ad_port }}"
|
|
ansible_winrm_scheme: "{{ ad_winrm_scheme }}"
|
|
ansible_winrm_server_cert_validation: "{{ ad_winrm_server_cert_validation }}"
|
|
#ansible_winrm_operation_timeout_sec: 60
|
|
#ansible_winrm_read_timeout_sec: 60
|
|
|
|
- name: Copy powershell script to winrm host
|
|
win_template:
|
|
src: templates/ps_acl.ps1.j2
|
|
dest: "{{ temp_dir }}{{ qtree_name }}.ps1"
|
|
|
|
# remove everyone permission set users/group permission, cannot be achieved with DACL
|
|
- name: Apply ACL to share
|
|
#win_command: powershell.exe -ExecutionPolicy Unrestricted {{ qtree_name }}.ps1 # powershell permission model is awkward over win_shell and win_command
|
|
win_command: powershell.exe -ExecutionPolicy ByPass -File {{ temp_dir }}{{ qtree_name }}.ps1
|
|
become: yes
|
|
become_user: Administrator # service_cloudforms may need local admin permissions or some winrm permission elevation in a prod environment
|
|
register: command_result
|
|
|
|
- debug:
|
|
msg: "{{ command_result }}"
|
|
|
|
- name: Remove powershell script from winrm host
|
|
win_file:
|
|
path: "{{ temp_dir }}{{ qtree_name }}.ps1"
|
|
state: absent
|
|
|
|
- debug:
|
|
msg:
|
|
- "//{{netapp_svm_host}}/{{volume_name}}/{{ qtree_name }}"
|
|
|
|
# REQUIREMENT
|
|
# new play for reporting
|
|
# handlers that evaluate state vars needed for this play, console and email failure conditions and (templated) provisioned emails to all users
|
|
# will need logic for self service mode from the requester key in the appropriate dict
|
|
|
|
# REQUIREMENT?
|
|
# there is no logic for delete/un-provision, playbook needs breaking out to include tasks that are conditionally run based on perform parameter |