1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
|
#!/bin/sh
#config: CLUSTER CLUSTERFS_DEFAULT_MOUNTPOINT GPFS_DEFAULT_NSDS
#config: NODES_STORAGE_GPFS SHARED_DISK_IDS
# Automatically setup GPFS. This is a quick way to get setup with an
# autocluster system. It finds NSDs, does various pieces of GPFS
# configuration, creates a filesystem and mounts it.
set -e
gpfs_num_nsds="$GPFS_DEFAULT_NSDS"
cluster_name="$CLUSTER"
mountpoint="$CLUSTERFS_DEFAULT_MOUNTPOINT"
nodes_storage_gpfs="$NODES_STORAGE_GPFS"
shared_disk_ids="$SHARED_DISK_IDS"
dir=$(dirname "$0")
##################################################
# If there are "storage_gpfs" nodes in the cluster (meaning that "nas"
# nodes will not have direct-attached storage) then scripts that
# include this snippet must be run on one of the storage nodes.
# Therefore, in the case, this snippet tries to determine if it is
# running on the 1st GPFS storage node and, if not, attempts to run
# the script there.
if [ -n "$nodes_storage_gpfs" -a \
"${HOSTNAME%%.*}" != "${nodes_storage_gpfs%%[.,]*}" ] ; then
if [ "${0#/}" != "$0" ] ; then
script="$0"
else
script="${PWD}/${0}"
fi
re_exec_node="${nodes_storage_gpfs%%[.,]*}"
echo
echo "Creating NSDs on node \"${re_exec_node}\""
exec ssh "$re_exec_node" "$script" "$@"
fi
##################################################
# Uses: cluster_name
gpfs_setup ()
{
_domain=$(dnsdomainname)
_nodes=$(onnode -q all hostname | grep -i "$_domain" | tr 'A-Z\012' 'a-z\040')
_first="${_nodes%% *}"
# Determine primary and secondary nodes. Give preference to GPFS
# storage nodes, falling back to regular nodes if there aren't any
# or aren't enough.
if [ -n "$nodes_storage_gpfs" ] ; then
_primary="${nodes_storage_gpfs%%,*}"
_rest="${nodes_storage_gpfs#*,}"
_secondary="${_rest%%,*}"
fi
if [ -z "$_primary" ] ; then
_primary="$_first"
_rest="${_nodes#* }"
_secondary="${_rest%% *}"
elif [ -z "$_secondary" ] ; then
_secondary="$_first"
fi
# Create the node description file for mmcrcluster. If there are
# dedicated storage nodes then they are quorum nodes, along with
# the first node. If there are no dedicated storage nodes then
# all nodes are quorum nodes.
_nodefile="${dir}/gpfs_nodes.${cluster_name}"
{
for _n in $_nodes ; do
if [ "$_n" = "$_first" ] ; then
echo "${_n}:manager-quorum:"
elif [ -n "$nodes_storage_gpfs" ] ; then
echo "${_n}:manager:"
else
echo "${_n}:manager-quorum:"
fi
done
for _n in $(echo "$nodes_storage_gpfs" | sed -e 's@,@ @g') ; do
echo "${_n}:manager-quorum:"
done
} >"$_nodefile"
echo "Creating cluster"
# Don't quote secondary, since it might not exist
mmcrcluster -N "$_nodefile" \
-p "$_primary" ${_secondary:+-s} $_secondary \
-r /usr/bin/ssh -R /usr/bin/scp -C "${cluster_name}.${_domain}"
# GPFS >= 3.3 needs this. Earlier versions don't have
# mmchlicense, so be careful.
if type mmchlicense >/dev/null 2>&1 ; then
echo
echo "Attempting to set server license mode for all nodes"
mmchlicense server --accept -N all
fi
echo
echo "Attempting to set adminMode=allToAll"
mmchconfig adminMode=allToAll </dev/null || true
echo
echo "Generating auth key"
mmauth genkey new
echo
echo "Setting GPFS config options"
mmchconfig autoload=yes,leaseRecoveryWait=3,maxFilesToCache=20000,failureDetectionTime=10,maxMBpS=500,unmountOnDiskFail=yes,pagepool=64M,allowSambaCaseInsensitiveLookup=no
mmchconfig cipherList=AUTHONLY
echo "Starting gpfs"
mmstartup -a
echo "Waiting for gpfs to become active"
_count=0
while mmgetstate -a | tail -n +4 | grep -v " active" > /dev/null; do
echo -n "."
_count=$(($_count + 1))
if [ $_count -gt 60 ] ; then
echo "TIMEOUT: gpfs didn't become active"
exit 1
fi
sleep 1
done
echo
}
nsdfile="${dir}/gpfs_nsds_all.${cluster_name}"
# Uses: nodes_storage_gpfs
# Sets: nsdfile
gpfs_mknsd ()
{
echo
echo "Setting up NSDs"
# Create an extended regexp that matches any of the IDs
pat=$(echo "$shared_disk_ids" | sed -e 's@ *@|@g')
# Now get devices and names from multipath
multipath -dl |
sed -r -n -e "s@^[^[:space:]]+[[:space:]]+\(($pat)\)[[:space:]](dm-[^[:space:]]+).*@\1 \2@p" |
while read _name _disk ; do
_name=$(echo "$_name" | tr -d -c '[:alnum:]')
echo "${_disk}:${nodes_storage_gpfs}::dataAndMetadata:1:${_name}:"
done >"$nsdfile"
mmcrnsd -F "$nsdfile"
mmlsnsd -m
}
# Uses: mountpoint, gpfs_num_nsds, nsdfile
gpfs_mkfs ()
{
echo
echo "Creating filesystem"
if [ ! -r "$nsdfile" ] ; then
echo "ERROR: missing NSD file \"${nsdfile}\""
exit 1
fi
mkdir -p "${mountpoint}/automountdir"
nsdfile2="${dir}/gpfs_nsds_defaultfs.${cluster_name}"
if [ -n "$gpfs_num_nsds" ] ; then
head -n $(($gpfs_num_nsds * 2))
else
cat
fi <"$nsdfile" >"$nsdfile2"
chattr +i "$mountpoint"
mmcrfs gpfs0 -F "$nsdfile2" \
-A yes -Q yes -D nfs4 -B 64k -k nfs4 -n 32 -E yes -S no \
-T "$mountpoint" -i 512
rm -f "$nsdfile2"
}
gpfs_mount ()
{
echo
echo "Mounting filesystem"
mmmount gpfs0 -a
echo "Waiting for gpfs to mount"
while ! findmnt "$mountpoint" ; do
echo -n "."
sleep 1
done
echo
}
gpfs_complete ()
{
echo "GPFS setup complete"
}
[ -n "$1" ] || set -- setup mknsd mkfs mount complete
for action ; do
gpfs_$action
done
|