summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAshish Pandey <aspandey@redhat.com>2016-03-04 13:05:09 +0530
committerPranith Kumar Karampuri <pkarampu@redhat.com>2016-03-15 21:54:28 -0700
commit23ccabbeb7879fd05f415690124bd7b4a74d4d33 (patch)
tree76282645ef4bb1b360470aa59d6b8cd7c184a148
parentb145cd15ded130078c9f1105e76f35d0886e62d5 (diff)
downloadglusterfs-23ccabbeb7879fd05f415690124bd7b4a74d4d33.tar.gz
glusterfs-23ccabbeb7879fd05f415690124bd7b4a74d4d33.tar.xz
glusterfs-23ccabbeb7879fd05f415690124bd7b4a74d4d33.zip
cluster/ec: Provide an option to enable/disable eager lock
Problem: If a fop takes lock, and completes its operation, it waits for 1 second before releasing the lock. However, If ec find any lock contention within this time period, it release the lock immediately before time expires. As we take lock on first brick, for few operations, like read, it might happen that discovery of lock contention might take long time and can degrades the performance. Solution: Provide an option to enable/disable eager lock. If eager lock is disabled, lock will be released as soon as fop completes. gluster v set <VOLUME NAME> disperse.eager-lock on gluster v set <VOLUME NAME> disperse.eager-lock off Change-Id: I000985a787eba3c190fdcd5981dfbf04e64af166 BUG: 1314649 Signed-off-by: Ashish Pandey <aspandey@redhat.com> Reviewed-on: http://review.gluster.org/13605 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com> Tested-by: Pranith Kumar Karampuri <pkarampu@redhat.com> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
-rw-r--r--libglusterfs/src/globals.h2
-rw-r--r--tests/bugs/replicate/bug-1297695.t2
-rw-r--r--tests/bugs/replicate/bug-821056.t2
-rw-r--r--tests/bugs/replicate/bug-921231.t4
-rw-r--r--tests/bugs/replicate/bug-966018.t2
-rw-r--r--tests/bugs/replicate/bug-976800.t2
-rwxr-xr-xtests/bugs/replicate/bug-979365.t2
-rw-r--r--xlators/cluster/ec/src/ec-common.c9
-rw-r--r--xlators/cluster/ec/src/ec.c9
-rw-r--r--xlators/cluster/ec/src/ec.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c5
11 files changed, 30 insertions, 10 deletions
diff --git a/libglusterfs/src/globals.h b/libglusterfs/src/globals.h
index cd4582a12f..9bc12f74ea 100644
--- a/libglusterfs/src/globals.h
+++ b/libglusterfs/src/globals.h
@@ -66,6 +66,8 @@
#define GD_OP_VERSION_3_7_9 30709 /* Op-version for GlusterFS 3.7.9 */
+#define GD_OP_VERSION_3_7_10 30710 /* Op-version for GlusterFS 3.7.10 */
+
#define GD_OP_VERSION_4_0_0 40000 /* Op-version for GlusterFS 4.0.0 */
#define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_3_6_0
diff --git a/tests/bugs/replicate/bug-1297695.t b/tests/bugs/replicate/bug-1297695.t
index e0f431684e..d5c1a214fe 100644
--- a/tests/bugs/replicate/bug-1297695.t
+++ b/tests/bugs/replicate/bug-1297695.t
@@ -13,7 +13,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
-TEST $CLI volume set $V0 eager-lock on
+TEST $CLI volume set $V0 cluster.eager-lock on
TEST $CLI volume set $V0 post-op-delay-secs 3
TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume set $V0 cluster.data-self-heal off
diff --git a/tests/bugs/replicate/bug-821056.t b/tests/bugs/replicate/bug-821056.t
index 02a9c78b6f..a163300440 100644
--- a/tests/bugs/replicate/bug-821056.t
+++ b/tests/bugs/replicate/bug-821056.t
@@ -8,7 +8,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume set $V0 cluster.eager-lock off
TEST $CLI volume set $V0 cluster.self-heal-daemon off
TEST $CLI volume set $V0 performance.quick-read off
TEST $CLI volume set $V0 performance.open-behind off
diff --git a/tests/bugs/replicate/bug-921231.t b/tests/bugs/replicate/bug-921231.t
index 93c642beb1..81504612f6 100644
--- a/tests/bugs/replicate/bug-921231.t
+++ b/tests/bugs/replicate/bug-921231.t
@@ -3,7 +3,7 @@
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
-# This test writes to same file with 2 fds and tests that eager-lock is not
+# This test writes to same file with 2 fds and tests that cluster.eager-lock is not
# causing extra delay because of post-op-delay-secs
cleanup;
@@ -14,7 +14,7 @@ function write_to_file {
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
-TEST $CLI volume set $V0 eager-lock on
+TEST $CLI volume set $V0 cluster.eager-lock on
TEST $CLI volume set $V0 post-op-delay-secs 3
TEST $CLI volume set $V0 client-log-level DEBUG
TEST $CLI volume start $V0
diff --git a/tests/bugs/replicate/bug-966018.t b/tests/bugs/replicate/bug-966018.t
index be4d0b97b8..ec3beb15d5 100644
--- a/tests/bugs/replicate/bug-966018.t
+++ b/tests/bugs/replicate/bug-966018.t
@@ -4,7 +4,7 @@
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../nfs.rc
-#This tests if eager-lock blocks metadata operations on nfs/fuse mounts.
+#This tests if cluster.eager-lock blocks metadata operations on nfs/fuse mounts.
#If it is not woken up, INODELK from the next command waits
#for post-op-delay secs.
diff --git a/tests/bugs/replicate/bug-976800.t b/tests/bugs/replicate/bug-976800.t
index 8311734ab2..27f8b27619 100644
--- a/tests/bugs/replicate/bug-976800.t
+++ b/tests/bugs/replicate/bug-976800.t
@@ -20,7 +20,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
TEST $CLI volume set $V0 ensure-durability off
-TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume set $V0 cluster.eager-lock off
TEST $CLI volume set $V0 flush-behind off
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
diff --git a/tests/bugs/replicate/bug-979365.t b/tests/bugs/replicate/bug-979365.t
index b1396c2334..c09c7d5177 100755
--- a/tests/bugs/replicate/bug-979365.t
+++ b/tests/bugs/replicate/bug-979365.t
@@ -15,7 +15,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
TEST $CLI volume set $V0 ensure-durability on
-TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume set $V0 cluster.eager-lock off
TEST $CLI volume start $V0
TEST $CLI volume profile $V0 start
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
index c371eb6687..68422f8a72 100644
--- a/xlators/cluster/ec/src/ec-common.c
+++ b/xlators/cluster/ec/src/ec-common.c
@@ -2034,11 +2034,13 @@ void ec_flush_size_version(ec_fop_data_t * fop)
void ec_lock_reuse(ec_fop_data_t *fop)
{
ec_cbk_data_t *cbk;
+ ec_t *ec = NULL;
int32_t i, count;
gf_boolean_t release = _gf_false;
-
+ ec = fop->xl->private;
cbk = fop->answer;
- if (cbk != NULL) {
+
+ if (ec->eager_lock && cbk != NULL) {
if (cbk->xdata != NULL) {
if ((dict_get_int32(cbk->xdata, GLUSTERFS_INODELK_COUNT,
&count) == 0) && (count > 1)) {
@@ -2050,7 +2052,8 @@ void ec_lock_reuse(ec_fop_data_t *fop)
}
}
} else {
- /* If we haven't get an answer with enough quorum, we always release
+ /* If eager lock is disabled or If we haven't get
+ * an answer with enough quorum, we always release
* the lock. */
release = _gf_true;
}
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
index 1f15af10da..1d53981303 100644
--- a/xlators/cluster/ec/src/ec.c
+++ b/xlators/cluster/ec/src/ec.c
@@ -261,6 +261,8 @@ reconfigure (xlator_t *this, dict_t *options)
failed);
GF_OPTION_RECONF ("iam-self-heal-daemon", ec->shd.iamshd, options,
bool, failed);
+ GF_OPTION_RECONF ("eager-lock", ec->eager_lock, options,
+ bool, failed);
GF_OPTION_RECONF ("background-heals", background_heals, options,
uint32, failed);
GF_OPTION_RECONF ("heal-wait-qlength", heal_wait_qlen, options,
@@ -601,6 +603,7 @@ init (xlator_t *this)
ec_method_initialize();
GF_OPTION_INIT ("self-heal-daemon", ec->shd.enabled, bool, failed);
GF_OPTION_INIT ("iam-self-heal-daemon", ec->shd.iamshd, bool, failed);
+ GF_OPTION_INIT ("eager-lock", ec->eager_lock, bool, failed);
GF_OPTION_INIT ("background-heals", ec->background_heals, uint32, failed);
GF_OPTION_INIT ("heal-wait-qlength", ec->heal_wait_qlen, uint32, failed);
ec_configure_background_heal_opts (ec, ec->background_heals,
@@ -1321,6 +1324,12 @@ struct volume_options options[] =
"translator is running as part of self-heal-daemon "
"or not."
},
+ { .key = {"eager-lock"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "on",
+ .description = "This option will enable/diable eager lock for"
+ "disperse volume "
+ },
{ .key = {"background-heals"},
.type = GF_OPTION_TYPE_INT,
.min = 0,/*Disabling background heals*/
diff --git a/xlators/cluster/ec/src/ec.h b/xlators/cluster/ec/src/ec.h
index 480125e35d..49af5c2daf 100644
--- a/xlators/cluster/ec/src/ec.h
+++ b/xlators/cluster/ec/src/ec.h
@@ -54,6 +54,7 @@ struct _ec
gf_lock_t lock;
gf_timer_t * timer;
gf_boolean_t shutdown;
+ gf_boolean_t eager_lock;
uint32_t background_heals;
uint32_t heal_wait_qlen;
struct list_head pending_fops;
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 32d95f01e1..523fa3978e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -1075,6 +1075,11 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.op_version = 1,
.flags = OPT_FLAG_CLIENT_OPT
},
+ { .key = "disperse.eager-lock",
+ .voltype = "cluster/disperse",
+ .op_version = GD_OP_VERSION_3_7_10,
+ .flags = OPT_FLAG_CLIENT_OPT
+ },
{ .key = "cluster.quorum-type",
.voltype = "cluster/replicate",
.option = "quorum-type",