summaryrefslogtreecommitdiffstats
path: root/ansible/node/roles/storage
diff options
context:
space:
mode:
authorMartin Schwenke <martin@meltin.net>2019-02-06 14:53:10 +1100
committerMartin Schwenke <martin@meltin.net>2019-03-25 16:52:25 +1100
commit51ff83de30db6934e243226ce05c6394b8986a12 (patch)
treef8dbbe3ceabc398a4596c968285a7245b3c70e01 /ansible/node/roles/storage
parent7003df8ad2ec9eaa119439f21976e7117b1771e5 (diff)
downloadautocluster-51ff83de30db6934e243226ce05c6394b8986a12.tar.gz
autocluster-51ff83de30db6934e243226ce05c6394b8986a12.tar.xz
autocluster-51ff83de30db6934e243226ce05c6394b8986a12.zip
Add Ansible playbook for node configuration
This will replace all of the existing node provisioning/configuration. CentOS 7 nodes are currently supported. Signed-off-by: Martin Schwenke <martin@meltin.net>
Diffstat (limited to 'ansible/node/roles/storage')
-rw-r--r--ansible/node/roles/storage/tasks/generic/clusterfs-gpfs-once.yml135
-rw-r--r--ansible/node/roles/storage/tasks/generic/clusterfs-gpfs.yml20
-rw-r--r--ansible/node/roles/storage/tasks/main.yml6
-rw-r--r--ansible/node/roles/storage/templates/gpfs_nodes.j233
-rw-r--r--ansible/node/roles/storage/templates/gpfs_primary_secondary.j219
5 files changed, 213 insertions, 0 deletions
diff --git a/ansible/node/roles/storage/tasks/generic/clusterfs-gpfs-once.yml b/ansible/node/roles/storage/tasks/generic/clusterfs-gpfs-once.yml
new file mode 100644
index 0000000..5ed26eb
--- /dev/null
+++ b/ansible/node/roles/storage/tasks/generic/clusterfs-gpfs-once.yml
@@ -0,0 +1,135 @@
+---
+- name: generate GPFS nodes file
+ template:
+ src: gpfs_nodes.j2
+ dest: /root/.autocluster/gpfs_nodes
+
+- name: generate file containing GPFS primary and secondary nodes
+ template:
+ src: gpfs_primary_secondary.j2
+ dest: /root/.autocluster/gpfs_primary_secondary
+
+- name: check if GPFS active flag file exists
+ stat:
+ path: /root/.autocluster/gpfs_active
+ register: gpfs_active
+
+- name: create GPFS cluster
+ shell: |
+ mmlscluster || {
+ read primary secondary </root/.autocluster/gpfs_primary_secondary && \
+ mmcrcluster -N /root/.autocluster/gpfs_nodes \
+ -p "$primary" -s "$secondary" \
+ -r /usr/bin/ssh -R /usr/bin/scp \
+ -C "{{ cluster }}.{{ resolv_conf.domain | lower }}"
+ }
+ when: not gpfs_active.stat.exists
+
+- name: set GPFS server license mode
+ command: mmchlicense server --accept -N all
+ when: not gpfs_active.stat.exists
+
+- name: set GPFS admin mode to allToAll
+ command: mmchconfig adminMode=allToAll
+ when: not gpfs_active.stat.exists
+
+- name : generate GPFS auth key
+ # Without the commit, this can't be run more than once
+ shell: mmauth genkey new && mmauth genkey commit
+ when: not gpfs_active.stat.exists
+
+- name: set GPFS config options
+ # Can not be run if GPFS is active
+ command: mmchconfig autoload=yes,leaseRecoveryWait=3,maxFilesToCache=20000,failureDetectionTime=10,maxMBpS=500,unmountOnDiskFail=yes,pagepool=64M,allowSambaCaseInsensitiveLookup=no
+ when: not gpfs_active.stat.exists
+
+- name: set GPFS cipher list option
+ # Can not be set with the above, can not be run if GPFS is active
+ command: mmchconfig cipherList=AUTHONLY
+ when: not gpfs_active.stat.exists
+
+- name: start GPFS
+ command: mmstartup -a
+ when: not gpfs_active.stat.exists
+
+- name: wait until GPFS is active on all nodes
+ # The field-separator passed to awk must be protected from YAML
+ shell: |
+ mmgetstate -a -Y | awk -F':' 'NR > 1 && $9 != "active" { exit(1) }'
+ register: result
+ until: result.rc == 0
+ retries: 12
+ delay: 5
+ when: not gpfs_active.stat.exists
+
+- name: flag GPFS as active
+ file:
+ path: /root/.autocluster/gpfs_active
+ state: touch
+
+- name: generate NSD file
+ shell: >
+ ls /dev/disk/by-id/virtio-AUTO-* |
+ xargs -n 1 realpath |
+ awk '{printf "%nsd:\n device=%s\n usage=dataAndMetadata\n failureGroup=1\n\n", $1}' |
+ tee gpfs_nsds
+ args:
+ chdir: /root/.autocluster/
+ creates: gpfs_nsds
+
+- name: check if GPFS NSDs created file exists
+ stat:
+ path: /root/.autocluster/gpfs_nsds_created
+ register: gpfs_nsds_created
+
+- name: create GPFS NSDs
+ command: mmcrnsd -F gpfs_nsds
+ args:
+ chdir: /root/.autocluster/
+ when: not gpfs_nsds_created.stat.exists
+
+- name: flag GPFS NSDs as created
+ file:
+ path: /root/.autocluster/gpfs_nsds_created
+ state: touch
+
+- name: check if GPFS filesystem created file exists
+ stat:
+ path: /root/.autocluster/gpfs_fs_created
+ register: gpfs_fs_created
+
+- name: create GPFS filesystem
+ command: >
+ mmcrfs gpfs0 -F gpfs_nsds
+ -A yes -Q yes -D nfs4 -B 64k -k nfs4 -n 32 -E yes -S no
+ -T {{ clusterfs.mountpoint}} -i 512
+ args:
+ chdir: /root/.autocluster/
+ when: not gpfs_fs_created.stat.exists
+
+- name: flag GPFS filesystem as created
+ file:
+ path: /root/.autocluster/gpfs_fs_created
+ state: touch
+
+- name: check if GPFS filesystem mounted file exists
+ stat:
+ path: /root/.autocluster/gpfs_fs_mounted
+ register: gpfs_fs_mounted
+
+- name: mount GPFS filesystem
+ command: mmmount gpfs0 -a
+ when: not gpfs_fs_mounted.stat.exists
+
+- name: wait until GPFS filesystem is mounted
+ command: findmnt {{ clusterfs.mountpoint }}
+ register: result
+ until: result.rc == 0
+ retries: 12
+ delay: 5
+ when: not gpfs_fs_mounted.stat.exists
+
+- name: flag GPFS filesystem as mounted
+ file:
+ path: /root/.autocluster/gpfs_fs_mounted
+ state: touch
diff --git a/ansible/node/roles/storage/tasks/generic/clusterfs-gpfs.yml b/ansible/node/roles/storage/tasks/generic/clusterfs-gpfs.yml
new file mode 100644
index 0000000..c498443
--- /dev/null
+++ b/ansible/node/roles/storage/tasks/generic/clusterfs-gpfs.yml
@@ -0,0 +1,20 @@
+---
+- name: create cluster filesystem mountpoint
+ file:
+ path: "{{clusterfs.mountpoint}}"
+ state: directory
+
+- name: create cluster filesystem automount directory
+ file:
+ path: "{{clusterfs.mountpoint}}/automount"
+ state: directory
+
+- name: make cluster filesystem mountpoint immutable
+ shell: >
+ if ! findmnt "{{clusterfs.mountpoint}}"; then
+ chattr +i "{{clusterfs.mountpoint}}"
+ fi
+
+- import_tasks: clusterfs-gpfs-once.yml
+ run_once: true
+ when: nodes[ansible_hostname].has_shared_storage
diff --git a/ansible/node/roles/storage/tasks/main.yml b/ansible/node/roles/storage/tasks/main.yml
new file mode 100644
index 0000000..07a5ad1
--- /dev/null
+++ b/ansible/node/roles/storage/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+- include_tasks: generic/{{ task }}.yml
+ with_list:
+ - clusterfs-{{ clusterfs.type }}
+ loop_control:
+ loop_var: task
diff --git a/ansible/node/roles/storage/templates/gpfs_nodes.j2 b/ansible/node/roles/storage/templates/gpfs_nodes.j2
new file mode 100644
index 0000000..5a9ecd7
--- /dev/null
+++ b/ansible/node/roles/storage/templates/gpfs_nodes.j2
@@ -0,0 +1,33 @@
+# GPFS nodes file generated by autocluster
+{# #}
+{# Count dedicated storage nodes, find first CTDB node #}
+{# #}
+{% set num_storage_nodes = 0 %}
+{% set first_ctdb_node = "" %}
+{% for hostname, n in nodes | dictsort %}
+{% if n.has_shared_storage %}
+{% if n.is_ctdb_node %}
+{% if not first_ctdb_node %}
+{% set first_ctdb_node = hostname %}
+{% endif %}
+{% else %}
+{% set num_storage_nodes = num_storage_nodes + 1 %}
+{% endif %}
+{% endif %}
+{% endfor %}
+{# #}
+{# Generate GPFS nodes file lines #}
+{# #}
+{% for hostname, n in nodes | dictsort %}
+{% if n.is_ctdb_node %}
+{% if hostname == first_ctdb_node %}
+{{ hostname }}:manager-quorum:
+{% elif num_storage_nodes > 0 %}
+{{ hostname }}:manager:
+{% else %}
+{{ hostname }}:manager-quorum:
+{% endif %}
+{% elif n.has_shared_storage %}
+{{ hostname }}:manager-quorum:
+{% endif %}
+{% endfor %}
diff --git a/ansible/node/roles/storage/templates/gpfs_primary_secondary.j2 b/ansible/node/roles/storage/templates/gpfs_primary_secondary.j2
new file mode 100644
index 0000000..aec7a37
--- /dev/null
+++ b/ansible/node/roles/storage/templates/gpfs_primary_secondary.j2
@@ -0,0 +1,19 @@
+{# #}
+{# Count dedicated storage nodes #}
+{# #}
+{% set num_storage_nodes = 0 %}
+{% for hostname, n in nodes | dictsort %}
+{% if n.has_shared_storage and not n.is_ctdb_node %}
+{% set num_storage_nodes = num_storage_nodes + 1 %}
+{% endif %}
+{% endfor %}
+{# #}
+{# Write a single line containing "primary secondary" #}
+{# #}
+{% if num_storage_nodes >= 2 %}
+{{ groups['storage-nodes'][0] }} {{ groups['storage-nodes'][1] }}
+{% elif num_storage_nodes == 1 %}
+{{ groups['storage-nodes'][0] }} {{ groups['nas-nodes'][0] }}
+{% else %}
+{{ groups['nas-nodes'][0] }} {{ groups['nas-nodes'][1] }}
+{% endif %}