diff options
author | Sanju Rakonde <srakonde@redhat.com> | 2020-05-29 10:53:11 +0530 |
---|---|---|
committer | Amar Tumballi <amar@kadalu.io> | 2020-06-17 13:15:26 +0000 |
commit | c325082370b2588e267d04f2a9c1a8dd0a9ba878 (patch) | |
tree | 2fe95bf391b934ae670a464fc79685e3fda8a4d2 /tests/bugs/glusterd | |
parent | efaab5ec0258073b6e380754e8fb336db09d05da (diff) | |
download | glusterfs-c325082370b2588e267d04f2a9c1a8dd0a9ba878.tar.gz glusterfs-c325082370b2588e267d04f2a9c1a8dd0a9ba878.tar.xz glusterfs-c325082370b2588e267d04f2a9c1a8dd0a9ba878.zip |
tests/glusterd: spurious failure of tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
Test Summary Report
-------------------
tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
(Wstat: 0 Tests: 23 Failed: 3)
Failed tests: 21-23
After glusterd restart, volume start is failing. Looks like, it need some
time to sync the data. Adding sleep for the same.
Note: All other changes are made to avoid spurious failures in the future.
fixes: #1272
Change-Id: Ib184757fb936e03b5b6208465e44a8e790b71c1c
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Diffstat (limited to 'tests/bugs/glusterd')
-rw-r--r-- | tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t index fdc0a73f60..8001359e6b 100644 --- a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t +++ b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t @@ -4,7 +4,7 @@ . $(dirname $0)/../../cluster.rc function check_peers { -$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l } cleanup @@ -36,23 +36,35 @@ TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]] #bug-948686 - volume sync after bringing up the killed node TEST $CLI_1 peer probe $H3 -EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers; +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1 +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2 +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3 TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0 TEST $CLI_1 volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status' TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0 #kill a node TEST kill_node 3 +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 1 +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 2 #modify volume config to see change in volume-sync TEST $CLI_1 volume set $V0 write-behind off #add some files to the volume to see effect of volume-heal cmd TEST touch $M0/{1..100}; TEST $CLI_1 volume stop $V0; +EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 'Stopped' volinfo_field_1 $V0 'Status' + TEST $glusterd_3; -EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1 +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2 +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3 + +sleep 5 TEST $CLI_3 volume start $V0; +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status' TEST $CLI_2 volume stop $V0; TEST $CLI_2 volume delete $V0; |