diff options
author | Xavier Hernandez <xhernandez@datalab.es> | 2015-08-05 23:42:41 +0200 |
---|---|---|
committer | Pranith Kumar Karampuri <pkarampu@redhat.com> | 2015-08-06 10:12:22 -0700 |
commit | 7298b622ab39c2e78d6d745ae8b6e8413e1d9f1a (patch) | |
tree | 5b8e7a1688532f2a3d80733a16304e5b6306cea8 /tests/bugs/disperse | |
parent | a3faffb259d5288907fac33a2822a8f61c3e86fe (diff) | |
download | glusterfs-7298b622ab39c2e78d6d745ae8b6e8413e1d9f1a.tar.gz glusterfs-7298b622ab39c2e78d6d745ae8b6e8413e1d9f1a.tar.xz glusterfs-7298b622ab39c2e78d6d745ae8b6e8413e1d9f1a.zip |
cluster/ec: Fix tracking of good bricks
The bitmask of good and bad bricks was kept in the context of the
corresponding inode or fd. This was problematic when an external
process (another client or the self-heal process) did heal the
bricks but no one changed the bitmaks of other clients.
This patch removes the bitmask stored in the context and calculates
which bricks are healthy after locking them and doing the initial
xattrop. After that, it's updated using the result of each fop.
Change-Id: I225e31cd219a12af4ca58871d8a4bb6f742b223c
BUG: 1236065
Signed-off-by: Xavier Hernandez <xhernandez@datalab.es>
Reviewed-on: http://review.gluster.org/11844
Tested-by: NetBSD Build System <jenkins@build.gluster.org>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
Diffstat (limited to 'tests/bugs/disperse')
-rw-r--r-- | tests/bugs/disperse/bug-1236065.t | 95 |
1 files changed, 95 insertions, 0 deletions
diff --git a/tests/bugs/disperse/bug-1236065.t b/tests/bugs/disperse/bug-1236065.t new file mode 100644 index 0000000000..53dbcc5731 --- /dev/null +++ b/tests/bugs/disperse/bug-1236065.t @@ -0,0 +1,95 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup + +ec_test_dir=$M0/test + +function ec_test_generate_src() +{ + mkdir -p $ec_test_dir + for i in `seq 0 19`; do + dd if=/dev/zero of=$ec_test_dir/$i.c bs=1024 count=2 + done +} + +function ec_test_make() +{ + for i in `ls *.c`; do + file=`basename $i` + filename=${file%.*} + cp $i $filename.o + done +} + +## step 1 +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 disperse 7 redundancy 3 $H0:$B0/${V0}{0..6} +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "7" ec_child_up_count $V0 0 + +## step 2 +TEST ec_test_generate_src + +cd $ec_test_dir +TEST ec_test_make + +## step 3 +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST kill_brick $V0 $H0 $B0/${V0}1 +EXPECT '5' online_brick_count + +TEST rm -f *.o +TEST ec_test_make + +## step 4 +TEST $CLI volume start $V0 force +EXPECT '7' online_brick_count + +# active heal +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid +TEST $CLI volume heal $V0 full +EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 + +TEST rm -f *.o +TEST ec_test_make + +## step 5 +TEST kill_brick $V0 $H0 $B0/${V0}2 +TEST kill_brick $V0 $H0 $B0/${V0}3 +EXPECT '5' online_brick_count + +TEST rm -f *.o +TEST ec_test_make + +EXPECT '5' online_brick_count + +## step 6 +TEST $CLI volume start $V0 force +EXPECT '7' online_brick_count + +# self-healing +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid +TEST $CLI volume heal $V0 full +EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 + +TEST rm -f *.o +TEST ec_test_make + +TEST pidof glusterd +EXPECT "$V0" volinfo_field $V0 'Volume Name' +EXPECT 'Started' volinfo_field $V0 'Status' +EXPECT '7' online_brick_count + +## cleanup +cd +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 +TEST rm -rf $B0/* + +cleanup; |