summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvra Sengupta <asengupt@redhat.com>2014-05-12 00:06:32 +0000
committerVenky Shankar <vshankar@redhat.com>2014-05-14 10:24:25 -0700
commit09e9775127c7def49202e68c923e36a6032a3628 (patch)
tree4f481e3a6d2375314c78246fe278d7f6d9d81c2d
parent48201f4faeef3602cb095bf47d14deebf91899ba (diff)
downloadglusterfs-09e9775127c7def49202e68c923e36a6032a3628.tar.gz
glusterfs-09e9775127c7def49202e68c923e36a6032a3628.tar.xz
glusterfs-09e9775127c7def49202e68c923e36a6032a3628.zip
glusterd/geo-rep: Allow gverify.sh and S56glusterd-geo-rep-create-post.sh
to operate for non-root privileged slave volume Mounting the slave-volume on local node, to perform disk checks in order to allow gverify.sh to operate for non-root privileged slave volume Allowing the hook script S56glusterd-geo-rep-create-post.sh to operate for non-root privileged slave volume Modified peer_add_secret_pub.in to accept username as argument and add the pem keys to the users's_home_dir/.ssh/authorized_keys Wrote set_geo_rep_pem_keys.sh which accepts username as argument and copies the pem keys from the user's home directory to $GLUSTERD_WORKING_DIR/geo-replication/ and then copies the keys to other nodes in the cluster and add them to the respective authorized keys. The script takes as argument the user name and assumes that the user will be present in all the nodes in the cluster. It is not needed for root. To summarize: For a privileged slave user, execute the following on master node as super user: gluster system:: execute gsec_create gluster volume geo-replication <master_vol> [root@]<slave_ip>::<slave_vol> create push_pem For a non-privileged slave user execute the following on master node as super user: gluster system:: execute gsec_create gluster volume geo-replication <master_vol> <slave_user>@<slave_ip>::<slave_vol> create push_pem then on the slave node execute the following as super user: /usr/local/libexec/glusterfs/set_geo_rep_pem_keys.sh <slave_user> BUG: 1077452 Change-Id: I88020968aa5b13a2c2ab86b1d6661b60071f6f5e Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/7744 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Venky Shankar <vshankar@redhat.com> Tested-by: Venky Shankar <vshankar@redhat.com>
-rwxr-xr-xextras/hook-scripts/S56glusterd-geo-rep-create-post.sh34
-rw-r--r--geo-replication/src/Makefile.am4
-rwxr-xr-xgeo-replication/src/gverify.sh69
-rw-r--r--geo-replication/src/peer_add_secret_pub.in21
-rwxr-xr-xgeo-replication/src/set_geo_rep_pem_keys.sh41
-rw-r--r--glusterfs.spec.in1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.c84
7 files changed, 192 insertions, 62 deletions
diff --git a/extras/hook-scripts/S56glusterd-geo-rep-create-post.sh b/extras/hook-scripts/S56glusterd-geo-rep-create-post.sh
index 1369c22fc1..c1400148bf 100755
--- a/extras/hook-scripts/S56glusterd-geo-rep-create-post.sh
+++ b/extras/hook-scripts/S56glusterd-geo-rep-create-post.sh
@@ -1,8 +1,9 @@
#!/bin/bash
-key_val_pair1=`echo $2 | cut -d ' ' -f 1`
-key_val_pair2=`echo $2 | cut -d ' ' -f 2`
-key_val_pair3=`echo $2 | cut -d ' ' -f 3`
+key_val_pair1=`echo $2 | cut -d ',' -f 1`
+key_val_pair2=`echo $2 | cut -d ',' -f 2`
+key_val_pair3=`echo $2 | cut -d ',' -f 3`
+key_val_pair4=`echo $2 | cut -d ',' -f 4`
key=`echo $key_val_pair1 | cut -d '=' -f 1`
val=`echo $key_val_pair1 | cut -d '=' -f 2`
@@ -26,6 +27,16 @@ pub_file_tmp=`echo $val`_tmp
key=`echo $key_val_pair3 | cut -d '=' -f 1`
val=`echo $key_val_pair3 | cut -d '=' -f 2`
+if [ "$key" != "slave_user" ]; then
+ exit;
+fi
+if [ "$val" == "" ]; then
+ exit;
+fi
+slave_user=`echo $val`
+
+key=`echo $key_val_pair4 | cut -d '=' -f 1`
+val=`echo $key_val_pair4 | cut -d '=' -f 2`
if [ "$key" != "slave_ip" ]; then
exit;
fi
@@ -35,8 +46,17 @@ fi
slave_ip=`echo $val`
if [ -f $pub_file ]; then
- scp $pub_file $slave_ip:$pub_file_tmp
- ssh $slave_ip "mv $pub_file_tmp $pub_file"
- ssh $slave_ip "gluster system:: copy file /geo-replication/common_secret.pem.pub > /dev/null"
- ssh $slave_ip "gluster system:: execute add_secret_pub > /dev/null"
+ # For a non-root user copy the pub file to the user's home directory
+ # For a root user copy the pub files to priv_dir->geo-rep.
+ if [ "$slave_user" != "root" ]; then
+ slave_user_home_dir=`ssh $slave_user@$slave_ip 'source /etc/profile; echo $HOME'`
+ echo $slave_user_home_dir
+ scp $pub_file $slave_user@$slave_ip:$slave_user_home_dir/common_secret.pem.pub_tmp
+ ssh $slave_user@$slave_ip "mv $slave_user_home_dir/common_secret.pem.pub_tmp $slave_user_home_dir/common_secret.pem.pub"
+ else
+ scp $pub_file $slave_ip:$pub_file_tmp
+ ssh $slave_ip "mv $pub_file_tmp $pub_file"
+ ssh $slave_ip "gluster system:: copy file /geo-replication/common_secret.pem.pub > /dev/null"
+ ssh $slave_ip "gluster system:: execute add_secret_pub > /dev/null"
+ fi
fi
diff --git a/geo-replication/src/Makefile.am b/geo-replication/src/Makefile.am
index 324d8869f8..20b5b6bde6 100644
--- a/geo-replication/src/Makefile.am
+++ b/geo-replication/src/Makefile.am
@@ -1,11 +1,11 @@
gsyncddir = $(libexecdir)/glusterfs
-gsyncd_SCRIPTS = gverify.sh peer_add_secret_pub peer_gsec_create
+gsyncd_SCRIPTS = gverify.sh peer_add_secret_pub peer_gsec_create set_geo_rep_pem_keys.sh
# peer_gsec_create and peer_add_secret_pub are not added to
# EXTRA_DIST as it's derived from a .in file
-EXTRA_DIST = gverify.sh
+EXTRA_DIST = gverify.sh set_geo_rep_pem_keys.sh
gsyncd_PROGRAMS = gsyncd
diff --git a/geo-replication/src/gverify.sh b/geo-replication/src/gverify.sh
index e9c9b28837..c986f78a7e 100755
--- a/geo-replication/src/gverify.sh
+++ b/geo-replication/src/gverify.sh
@@ -49,29 +49,13 @@ echo $cmd_line;
function cmd_slave()
{
- VOL=$1;
local cmd_line;
cmd_line=$(cat <<EOF
function do_verify() {
-v=\$1;
-d=\$(mktemp -d 2>/dev/null);
-glusterfs -s localhost --xlator-option="*dht.lookup-unhashed=off" --volfile-id \$v -l $slave_log_file \$d;
-i=\$(stat -c "%i" \$d);
-if [[ "\$i" -ne "1" ]]; then
-echo 0:0;
-exit 1;
-fi;
-cd \$d;
-disk_size=\$(df -B1 \$d | tail -1 | awk "{print \\\$2}");
-used_size=\$(df -B1 \$d | tail -1 | awk "{print \\\$3}");
-no_of_files=\$(find \$d -maxdepth 0 -empty);
-umount -l \$d;
-rmdir \$d;
ver=\$(gluster --version | head -1 | cut -f2 -d " ");
-echo \$disk_size:\$used_size:\$ver:\$no_of_files;
+echo \$ver;
};
-cd /tmp;
-[ x$VOL != x ] && do_verify $VOL;
+source /etc/profile && do_verify;
EOF
);
@@ -89,37 +73,58 @@ function master_stats()
function slave_stats()
{
- SLAVEHOST=$1;
- SLAVEVOL=$2;
+ SLAVEUSER=$1;
+ SLAVEHOST=$2;
+ SLAVEVOL=$3;
local cmd_line;
- cmd_line=$(cmd_slave $SLAVEVOL);
- SSHM $SLAVEHOST bash -c "'$cmd_line'";
+ local ver;
+ local status;
+
+ d=$(mktemp -d 2>/dev/null);
+ glusterfs --xlator-option="*dht.lookup-unhashed=off" --volfile-server $SLAVEHOST --volfile-id $SLAVEVOL -l $slave_log_file \$d;
+ mount -t glusterfs $SLAVEHOST:$SLAVEVOL $d
+ i=$(stat -c "%i" $d);
+ if [[ "$i" -ne "1" ]]; then
+ echo 0:0;
+ exit 1;
+ fi;
+ cd $d;
+ disk_size=$(df -B1 $d | tail -1 | awk "{print \$2}");
+ used_size=$(df -B1 $d | tail -1 | awk "{print \$3}");
+ no_of_files=$(find $d -maxdepth 0 -empty);
+ umount -l $d;
+ rmdir $d;
+
+ cmd_line=$(cmd_slave);
+ ver=`SSHM $SLAVEUSER@$SLAVEHOST bash -c "'$cmd_line'"`;
+ status=$disk_size:$used_size:$ver:$no_of_files;
+ echo $status
}
function main()
{
- log_file=$4
+ log_file=$5
> $log_file
# Use FORCE_BLOCKER flag in the error message to differentiate
# between the errors which the force command should bypass
- ping -w 5 $2;
+ ping -w 5 $3;
if [ $? -ne 0 ]; then
- echo "FORCE_BLOCKER|$2 not reachable." > $log_file
+ echo "FORCE_BLOCKER|$3 not reachable." > $log_file
exit 1;
fi;
- ssh -oNumberOfPasswordPrompts=0 $2 "echo Testing_Passwordless_SSH";
+ ssh -oNumberOfPasswordPrompts=0 $2@$3 "echo Testing_Passwordless_SSH";
if [ $? -ne 0 ]; then
- echo "FORCE_BLOCKER|Passwordless ssh login has not been setup with $2." > $log_file
+ echo "FORCE_BLOCKER|Passwordless ssh login has not been setup with $3 for user $2." > $log_file
exit 1;
fi;
ERRORS=0;
master_data=$(master_stats $1);
- slave_data=$(slave_stats $2 $3);
+ slave_data=$(slave_stats $2 $3 $4);
master_disk_size=$(echo $master_data | cut -f1 -d':');
slave_disk_size=$(echo $slave_data | cut -f1 -d':');
master_used_size=$(echo $master_data | cut -f2 -d':');
@@ -129,7 +134,7 @@ function main()
slave_no_of_files=$(echo $slave_data | cut -f4 -d':');
if [[ "x$master_disk_size" = "x" || "x$master_version" = "x" || "$master_disk_size" -eq "0" ]]; then
- echo "FORCE_BLOCKER|Unable to fetch master volume details. Please check the master cluster and master volume." > $log_file;
+ echo "FORCE_BLOCKER|Unable to fetch master volume details. Please check the master cluster and master volume." > $log_file;
exit 1;
fi;
@@ -157,13 +162,13 @@ function main()
fi
if [ -z $slave_no_of_files ]; then
- echo "$2::$3 is not empty. Please delete existing files in $2::$3 and retry, or use force to continue without deleting the existing files." >> $log_file;
+ echo "$3::$4 is not empty. Please delete existing files in $3::$4 and retry, or use force to continue without deleting the existing files." >> $log_file;
ERRORS=$(($ERRORS + 1));
fi;
if [[ $master_version > $slave_version ]]; then
- echo "Gluster version mismatch between master and slave." >> $log_file;
- ERRORS=$(($ERRORS + 1));
+ echo "Gluster version mismatch between master and slave." >> $log_file;
+ ERRORS=$(($ERRORS + 1));
fi;
exit $ERRORS;
diff --git a/geo-replication/src/peer_add_secret_pub.in b/geo-replication/src/peer_add_secret_pub.in
index c036cf3341..04dee1b1ea 100644
--- a/geo-replication/src/peer_add_secret_pub.in
+++ b/geo-replication/src/peer_add_secret_pub.in
@@ -1,9 +1,20 @@
#!/bin/bash
-if [ ! -d ~/.ssh ]; then
- mkdir ~/.ssh;
- chmod 700 ~/.ssh
- chown root:root ~/.ssh
+if [ "$1" == "" ]; then
+ home_dir=`echo $HOME`
+else
+ home_dir=`getent passwd | grep $1 | cut -d ':' -f 6`;
fi
-cat "$GLUSTERD_WORKING_DIR"/geo-replication/common_secret.pem.pub >> ~/.ssh/authorized_keys
+if [ "$home_dir" == "" ]; then
+ echo "Invalid User";
+ exit 1;
+fi
+
+if [ ! -d $home_dir/.ssh ]; then
+ mkdir $home_dir/.ssh;
+ chmod 700 $home_dir/.ssh;
+ chown root:root $home_dir/.ssh;
+fi
+
+cat "$GLUSTERD_WORKING_DIR"/geo-replication/common_secret.pem.pub >> $home_dir/.ssh/authorized_keys;
diff --git a/geo-replication/src/set_geo_rep_pem_keys.sh b/geo-replication/src/set_geo_rep_pem_keys.sh
new file mode 100755
index 0000000000..16c55ed0a4
--- /dev/null
+++ b/geo-replication/src/set_geo_rep_pem_keys.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+# Script to copy the pem keys from the user's home directory
+# to $GLUSTERD_WORKING_DIR/geo-replication/ and then copy
+# the keys to other nodes in the cluster and add them to the
+# respective authorized keys. The script takes as argument the
+# user name and assumes that the user will be present in all
+# the nodes in the cluster. Not to be used for root user
+
+function main()
+{
+ user=$1
+ if [ "$user" == "" ]; then
+ echo "Please enter the user's name"
+ exit 1;
+ fi
+
+ if [ "$user" == "root" ]; then
+ echo "This script is not needed for root"
+ exit 1;
+ fi
+
+ home_dir=`getent passwd | grep $user | cut -d ':' -f 6`;
+
+ if [ "$home_dir" == "" ]; then
+ echo "No user $user found"
+ exit 1;
+ fi
+
+ if [ -f $home_dir/common_secret.pem.pub ]; then
+ cp $home_dir/common_secret.pem.pub /var/lib/glusterd/geo-replication/
+ gluster system:: copy file /geo-replication/common_secret.pem.pub
+ gluster system:: execute add_secret_pub $user
+ else
+ echo "$home_dir/common_secret.pem.pub not present. Please run geo-replication command on master with push-pem option to generate the file"
+ exit 1;
+ fi
+ exit 0;
+}
+
+main "$@";
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index dd539a1e15..6d67a93a33 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -909,6 +909,7 @@ fi
%{_libexecdir}/glusterfs/gsyncd
%{_libexecdir}/glusterfs/python/syncdaemon/*
%{_libexecdir}/glusterfs/gverify.sh
+%{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
%{_libexecdir}/glusterfs/peer_add_secret_pub
%{_libexecdir}/glusterfs/peer_gsec_create
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/geo-replication
diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
index a2315f97ef..3e2e308ec1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
@@ -1948,7 +1948,7 @@ out:
}
static int
-glusterd_verify_slave (char *volname, char *slave_ip, char *slave,
+glusterd_verify_slave (char *volname, char *slave_url, char *slave_vol,
char **op_errstr, gf_boolean_t *is_force_blocker)
{
int32_t ret = -1;
@@ -1956,17 +1956,38 @@ glusterd_verify_slave (char *volname, char *slave_ip, char *slave,
char log_file_path[PATH_MAX] = "";
char buf[PATH_MAX] = "";
char *tmp = NULL;
+ char *slave_url_buf = NULL;
char *save_ptr = NULL;
+ char *slave_user = NULL;
+ char *slave_ip = NULL;
glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (volname);
- GF_ASSERT (slave_ip);
- GF_ASSERT (slave);
+ GF_ASSERT (slave_url);
+ GF_ASSERT (slave_vol);
- if (THIS)
- priv = THIS->private;
- if (priv == NULL) {
- gf_log ("", GF_LOG_ERROR, "priv of glusterd not present");
+ /* Fetch the slave_user and slave_ip from the slave_url.
+ * If the slave_user is not present. Use "root"
+ */
+ if (strstr(slave_url, "@")) {
+ slave_url_buf = gf_strdup (slave_url);
+ if (!slave_url_buf)
+ goto out;
+
+ slave_user = strtok_r (slave_url_buf, "@", &save_ptr);
+ slave_ip = strtok_r (NULL, "@", &save_ptr);
+ } else {
+ slave_user = "root";
+ slave_ip = slave_url;
+ }
+
+ if (!slave_user || !slave_ip) {
+ gf_log (this->name, GF_LOG_ERROR, "Invalid slave url.");
goto out;
}
@@ -1976,8 +1997,9 @@ glusterd_verify_slave (char *volname, char *slave_ip, char *slave,
runinit (&runner);
runner_add_args (&runner, GSYNCD_PREFIX"/gverify.sh", NULL);
runner_argprintf (&runner, "%s", volname);
+ runner_argprintf (&runner, "%s", slave_user);
runner_argprintf (&runner, "%s", slave_ip);
- runner_argprintf (&runner, "%s", slave);
+ runner_argprintf (&runner, "%s", slave_vol);
runner_argprintf (&runner, "%s", log_file_path);
runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
synclock_unlock (&priv->big_lock);
@@ -2017,6 +2039,7 @@ glusterd_verify_slave (char *volname, char *slave_ip, char *slave,
}
ret = 0;
out:
+ GF_FREE (slave_url_buf);
unlink (log_file_path);
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
@@ -2102,7 +2125,7 @@ glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
char *volname = NULL;
char *host_uuid = NULL;
char *statefile = NULL;
- char *slave_ip = NULL;
+ char *slave_url = NULL;
char *slave_host = NULL;
char *slave_vol = NULL;
char *conf_path = NULL;
@@ -2154,7 +2177,7 @@ glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
return -1;
}
- ret = glusterd_get_slave_details_confpath (volinfo, dict, &slave_ip,
+ ret = glusterd_get_slave_details_confpath (volinfo, dict, &slave_url,
&slave_host, &slave_vol,
&conf_path, op_errstr);
if (ret) {
@@ -2199,7 +2222,7 @@ glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
* ssh login setup, slave volume is created, slave vol is empty,
* and if it has enough memory and bypass in case of force if
* the error is not a force blocker */
- ret = glusterd_verify_slave (volname, slave_host, slave_vol,
+ ret = glusterd_verify_slave (volname, slave_url, slave_vol,
op_errstr, &is_force_blocker);
if (ret) {
if (is_force && !is_force_blocker) {
@@ -5049,7 +5072,11 @@ glusterd_op_gsync_create (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
char hooks_args[PATH_MAX] = "";
char uuid_str [64] = "";
char *host_uuid = NULL;
+ char *slave_url = NULL;
+ char *slave_url_buf = NULL;
+ char *slave_user = NULL;
char *slave_ip = NULL;
+ char *save_ptr = NULL;
char *slave_host = NULL;
char *slave_vol = NULL;
char *arg_buf = NULL;
@@ -5060,9 +5087,11 @@ glusterd_op_gsync_create (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
gf_boolean_t is_force = -1;
glusterd_conf_t *conf = NULL;
glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
- GF_ASSERT (THIS);
- conf = THIS->private;
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
GF_ASSERT (conf);
GF_ASSERT (dict);
GF_ASSERT (op_errstr);
@@ -5090,7 +5119,7 @@ glusterd_op_gsync_create (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
- ret = dict_get_str (dict, "slave_ip", &slave_ip);
+ ret = dict_get_str (dict, "slave_ip", &slave_url);
if (ret) {
snprintf (errmsg, sizeof (errmsg),
"Unable to fetch slave IP.");
@@ -5099,6 +5128,28 @@ glusterd_op_gsync_create (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
+ /* Fetch the slave_user and slave_ip from the slave_url.
+ * If the slave_user is not present. Use "root"
+ */
+ if (strstr(slave_url, "@")) {
+ slave_url_buf = gf_strdup (slave_url);
+ if (!slave_url_buf) {
+ ret = -1;
+ goto out;
+ }
+ slave_user = strtok_r (slave_url, "@", &save_ptr);
+ slave_ip = strtok_r (NULL, "@", &save_ptr);
+ } else {
+ slave_user = "root";
+ slave_ip = slave_url;
+ }
+
+ if (!slave_user || !slave_ip) {
+ gf_log (this->name, GF_LOG_ERROR, "Invalid slave url.");
+ ret = -1;
+ goto out;
+ }
+
ret = dict_get_str (dict, "slave_host", &slave_host);
if (ret) {
snprintf (errmsg, sizeof (errmsg),
@@ -5121,8 +5172,8 @@ glusterd_op_gsync_create (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
is_pem_push = 0;
snprintf(hooks_args, sizeof(hooks_args),
- "is_push_pem=%d pub_file=%s slave_ip=%s",
- is_pem_push, common_pem_file, slave_host);
+ "is_push_pem=%d,pub_file=%s,slave_user=%s,slave_ip=%s",
+ is_pem_push, common_pem_file, slave_user, slave_ip);
} else
snprintf(hooks_args, sizeof(hooks_args),
@@ -5170,6 +5221,7 @@ create_essentials:
}
out:
+ GF_FREE (slave_url_buf);
gf_log ("", GF_LOG_DEBUG,"Returning %d", ret);
return ret;
}