summaryrefslogtreecommitdiffstats
path: root/__root__/run-check
blob: 9a5330c7f1f2ae21c9129b1aa047e39785e3f86a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
#!/bin/sh
# Copyright 2016 Red Hat, Inc.
# Part of clufter project
# Licensed under GPLv2+ (a copy included | http://gnu.org/licenses/gpl-2.0.txt)

# Depending on the usage, quick sanity check or just the prologue for run-tests

s="${0}"
[ $# -gt 0 ] && {
    case "${1}" in
    -*) ;;
    *) s="${1}"; shift ;;
    esac
}

PATH="${PATH:+${PATH}:}$(
  dirname "$(pwd)/$(
    ls -l "$(echo "${s}" | sed 's|\./||')" | cut -d'>' -f2 | awk '{print $NF}'
  )"
)"
unset s

PYTHONEXEC="python2 -Es"

echo "Current path: ${PATH}"
which ccs_flatten 2>/dev/null || (
    cd __root__ 2>/dev/null || :
    ${PYTHONEXEC} setup.py pkg_prepare --build-develop
    # can drop the latter when reasonably recent coreutils spread around (8.22?)
    ln -frs -- ccs-flatten/*.metadata build 2>/dev/null \
    || ( cd build; ln -fs -- ../ccs-flatten/*.metadata . )
    ln -fs -- build/ccs_flatten .
    make -C ccs-flatten symlink
) || { ret=$?; echo "ccs_flatten missing, cannot be built"; exit ${ret}; }

run_check() {
    # self-check for sanity usage
    testcib="$(mktemp)" testcoro="$(mktemp)"
    { { ${PYTHONEXEC} run-dev --dist redhat,7.1,Maipo \
                              ccs2pcs "${@}" -- - - "${testcib}" \
        | grep -Ev 'key: _NOT_SECRET--' > "${testcoro}"; } \
    && echo "TEST: execution OK" \
    || { echo "TEST: execution FAIL"; ret=20; }; }<<EOF
<?xml version="1.0"?>
<cluster name="one" config_version="6">
  <logging debug="on"/>
  <clusternodes>
    <clusternode name="rhel6-node1" votes="1" nodeid="1">
      <fence>
        <method name="single">
          <device name="xvm" domain="rhel6-node1"/>
        </method>
      </fence>
    </clusternode>
    <clusternode name="rhel6-node2" votes="1" nodeid="2">
      <fence>
        <method name="single">
          <device name="xvm" domain="rhel6-node2"/>
        </method>
      </fence>
    </clusternode>
  </clusternodes>
  <fencedevices>
    <fencedevice name="xvm" agent="fence_xvm"/>
  </fencedevices>
  <quorumd label="qdisk"/>
  <rm central_processing="1">
    <failoverdomains>
      <failoverdomain name="failover_domain1" ordered="1" restricted="1" nofailback="1">
        <failoverdomainnode name="rhel6-node1" priority="1"/>
        <failoverdomainnode name="rhel6-node2" priority="1"/>
      </failoverdomain>
    </failoverdomains>
    <resources>
      <ip address="192.168.0.128" monitor_link="1"/>
    </resources>
    <service autostart="1" name="mm" domain="failover_domain1">
    <!-- service exclusive="0" autostart="1" name="mm" -->
      <ip ref="192.168.0.128"/>
    </service>
  </rm>
</cluster>
EOF
    { diff -u -- - "${testcib}" \
      && echo "TEST: cib diff OK" \
      || { echo "TEST: cib diff FAIL"; ret=21; }; }<<EOF
<cib validate-with="pacemaker-1.2" admin_epoch="0" epoch="0" num_updates="0" update-client="$(${PYTHONEXEC} run-dev --version | head -n1)">
  <configuration>
    <crm_config/>
    <nodes>
      <node id="1" uname="rhel6-node1" type="member"/>
      <node id="2" uname="rhel6-node2" type="member"/>
    </nodes>
    <resources>
      <!-- FENCING/STONITH (+ POSSIBLY TOPOLOGY BELOW) -->
      <primitive id="FENCEDEV-xvm" class="stonith" type="fence_xvm">
        <instance_attributes id="FENCEDEV-xvm-ATTRS">
          <nvpair id="FENCEDEV-xvm-ATTRS-pcmk_host_map" name="pcmk_host_map" value="rhel6-node1:rhel6-node1,rhel6-node2:rhel6-node2"/>
        </instance_attributes>
      </primitive>
      <!-- RESOURCES+ARRANGEMENT -->
      <group id="SERVICE-mm-GROUP">
        <primitive id="RESOURCE-ip-192.168.0.128" description="natively converted from ip RA" class="ocf" provider="heartbeat" type="IPaddr2">
          <instance_attributes id="RESOURCE-ip-192.168.0.128-ATTRS">
            <nvpair id="RESOURCE-ip-192.168.0.128-ATTRS-ip" name="ip" value="192.168.0.128"/>
          </instance_attributes>
          <operations>
            <op id="RESOURCE-ip-192.168.0.128-OP-monitor" name="monitor" interval="20s"/>
          </operations>
        </primitive>
        <!-- mimic NOFAILBACK failoverdomain (FAILOVERDOMAIN-failover_domain1)-->
        <meta_attributes id="SERVICE-mm-META-ATTRS-nofailback">
          <rule id="SERVICE-mm-META-RULE-stickiness" score="0" boolean-op="or">
            <expression id="STICKINESS-SERVICE-mm-rhel6-node1" attribute="#uname" operation="eq" value="rhel6-node1"/>
            <expression id="STICKINESS-SERVICE-mm-rhel6-node2" attribute="#uname" operation="eq" value="rhel6-node2"/>
          </rule>
          <nvpair id="SERVICE-mm-META-ATTRS-nofailback-pair" name="resource-stickiness" value="INFINITY"/>
        </meta_attributes>
      </group>
    </resources>
    <constraints>
      <rsc_location id="CONSTRAINT-LOCATION-SERVICE-mm-GROUP" rsc="SERVICE-mm-GROUP">
        <!-- mimic failoverdomain (failover_domain1) for SERVICE-mm -->
        <rule id="CONSTRAINT-LOCATION-SERVICE-mm-GROUP-rhel6-node1" score="INFINITY">
          <expression id="CONSTRAINT-LOCATION-SERVICE-mm-GROUP-rhel6-node1-expr" attribute="#uname" operation="eq" value="rhel6-node1"/>
        </rule>
        <rule id="CONSTRAINT-LOCATION-SERVICE-mm-GROUP-rhel6-node2" score="INFINITY">
          <expression id="CONSTRAINT-LOCATION-SERVICE-mm-GROUP-rhel6-node2-expr" attribute="#uname" operation="eq" value="rhel6-node2"/>
        </rule>
        <!-- mimic RESTRICTED failoverdomain (failover_domain1) for SERVICE-mm -->
        <rule id="CONSTRAINT-LOCATION-SERVICE-mm-GROUP-RESTRICTED" boolean-op="and" score="-INFINITY">
          <expression id="CONSTRAINT-LOCATION-SERVICE-mm-GROUP-RESTRICTED-rhel6-node1-expr" attribute="#uname" operation="ne" value="rhel6-node1"/>
          <expression id="CONSTRAINT-LOCATION-SERVICE-mm-GROUP-RESTRICTED-rhel6-node2-expr" attribute="#uname" operation="ne" value="rhel6-node2"/>
        </rule>
      </rsc_location>
    </constraints>
  </configuration>
  <status/>
</cib>
EOF
    { diff -u -- - "${testcoro}" \
      && echo "TEST: coro diff OK" \
      || { echo "TEST: coro diff FAIL"; ret=22; }; }<<EOF
nodelist {
	node {
		nodeid: 1
		ring0_addr: rhel6-node1
	}
	node {
		nodeid: 2
		ring0_addr: rhel6-node2
	}
}
quorum {
	provider: corosync_votequorum
}
logging {
	debug: on
}
totem {
	cluster_name: one
	consensus: 12000
	join: 60
	token: 10000
	version: 2
}
EOF
    rm -f -- "${testcib}" "${testcoro}"
    unset testcib testcoro
    return ${ret}
}

run_check_cmd() {
    # self-check for sanity usage
    testcmd="$(mktemp)"
    { ${PYTHONEXEC} run-dev --dist redhat,7.1,Maipo \
                            ccs2pcscmd "${@}" -sg -- - "${testcmd}" \
      && echo "TEST: execution OK" \
      || { echo "TEST: execution FAIL"; ret=20; }; }<<EOF
<?xml version="1.0"?>
<cluster name="one" config_version="6">
  <logging debug="on"/>
  <clusternodes>
    <clusternode name="rhel6-node1" votes="1" nodeid="1">
      <fence>
        <method name="single">
          <device name="xvm" domain="rhel6-node1"/>
        </method>
      </fence>
    </clusternode>
    <clusternode name="rhel6-node2" votes="1" nodeid="2">
      <fence>
        <method name="single">
          <device name="xvm" domain="rhel6-node2"/>
        </method>
      </fence>
    </clusternode>
  </clusternodes>
  <fencedevices>
    <fencedevice name="xvm" agent="fence_xvm"/>
  </fencedevices>
  <quorumd label="qdisk"/>
  <rm central_processing="1">
    <failoverdomains>
      <failoverdomain name="failover_domain1" ordered="1" restricted="1" nofailback="1">
        <failoverdomainnode name="rhel6-node1" priority="1"/>
        <failoverdomainnode name="rhel6-node2" priority="1"/>
      </failoverdomain>
    </failoverdomains>
    <resources>
      <ip address="192.168.0.128" monitor_link="1"/>
    </resources>
    <service autostart="1" name="mm" domain="failover_domain1">
    <!-- service exclusive="0" autostart="1" name="mm" -->
      <ip ref="192.168.0.128"/>
    </service>
  </rm>
</cluster>
EOF
    { diff -u -- - "${testcmd}" \
      && echo "TEST: cmd diff OK" \
      || { echo "TEST: cmd diff FAIL"; ret=21; }; }<<EOF
pcs cluster auth rhel6-node1 rhel6-node2
pcs cluster setup --start --name one rhel6-node1 rhel6-node2 \\
  --consensus 12000 --token 10000 --join 60
sleep 60
pcs cluster cib tmp-cib.xml --config
pcs -f tmp-cib.xml stonith create FENCEDEV-xvm fence_xvm \\
  pcmk_host_map=rhel6-node1:rhel6-node1,rhel6-node2:rhel6-node2
pcs -f tmp-cib.xml \\
  resource create RESOURCE-ip-192.168.0.128 ocf:heartbeat:IPaddr2 \\
  ip=192.168.0.128 op monitor interval=20s
pcs -f tmp-cib.xml \\
  resource group add SERVICE-mm-GROUP RESOURCE-ip-192.168.0.128
pcs -f tmp-cib.xml \\
  constraint location SERVICE-mm-GROUP rule \\
  id=CONSTRAINT-LOCATION-SERVICE-mm-GROUP-rhel6-node1 \\
  constraint-id=CONSTRAINT-LOCATION-SERVICE-mm-GROUP score=INFINITY \\
  '#uname' eq rhel6-node1
pcs -f tmp-cib.xml \\
  constraint rule add CONSTRAINT-LOCATION-SERVICE-mm-GROUP \\
  id=CONSTRAINT-LOCATION-SERVICE-mm-GROUP-rhel6-node2 score=INFINITY \\
  '#uname' eq rhel6-node2
pcs -f tmp-cib.xml \\
  constraint rule add CONSTRAINT-LOCATION-SERVICE-mm-GROUP \\
  id=CONSTRAINT-LOCATION-SERVICE-mm-GROUP-RESTRICTED score=-INFINITY \\
  '#uname' ne rhel6-node1 '#uname' ne rhel6-node2
pcs cluster cib-push tmp-cib.xml --config
EOF
    rm -f -- "${testcmd}"
    unset testcmd
    return ${ret}
}

[ "${NORUN:-0}" -ne 0 ] || { run_check "$@"; run_check_cmd "$@"; }