/*
Copyright (c) 2006-2011 Gluster, Inc. <http://www.gluster.com>
This file is part of GlusterFS.
GlusterFS is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
GlusterFS is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see
<http://www.gnu.org/licenses/>.
*/
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include <inttypes.h>
#include "globals.h"
#include "glusterfs.h"
#include "compat.h"
#include "dict.h"
#include "xlator.h"
#include "logging.h"
#include "timer.h"
#include "defaults.h"
#include "compat.h"
#include "md5.h"
#include "run.h"
#include "compat-errno.h"
#include "statedump.h"
#include "syscall.h"
#include "glusterd-mem-types.h"
#include "glusterd.h"
#include "glusterd-op-sm.h"
#include "glusterd-sm.h"
#include "glusterd-utils.h"
#include "glusterd-store.h"
#include "glusterd-volgen.h"
#include "glusterd-pmap.h"
#include "xdr-generic.h"
#include <sys/resource.h>
#include <inttypes.h>
#include <signal.h>
#include <sys/types.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <rpc/pmap_clnt.h>
#include <unistd.h>
#ifdef GF_SOLARIS_HOST_OS
#include <sys/sockio.h>
#endif
#define MOUNT_PROGRAM 100005
#define NFS_PROGRAM 100003
#define NFSV3_VERSION 3
#define MOUNTV3_VERSION 3
#define MOUNTV1_VERSION 1
char *glusterd_sock_dir = "/tmp";
static glusterd_lock_t lock;
int32_t
glusterd_get_lock_owner (uuid_t *uuid)
{
uuid_copy (*uuid, lock.owner) ;
return 0;
}
static int32_t
glusterd_set_lock_owner (uuid_t owner)
{
uuid_copy (lock.owner, owner);
//TODO: set timestamp
return 0;
}
static int32_t
glusterd_unset_lock_owner (uuid_t owner)
{
uuid_clear (lock.owner);
//TODO: set timestamp
return 0;
}
gf_boolean_t
glusterd_is_fuse_available ()
{
int fd = 0;
fd = open ("/dev/fuse", O_RDWR);
if (fd > -1 && !close (fd))
return _gf_true;
else
return _gf_false;
}
gf_boolean_t
glusterd_is_loopback_localhost (const struct sockaddr *sa, char *hostname)
{
GF_ASSERT (sa);
gf_boolean_t is_local = _gf_false;
const struct in_addr *addr4 = NULL;
const struct in6_addr *addr6 = NULL;
uint8_t *ap = NULL;
struct in6_addr loopbackaddr6 = IN6ADDR_LOOPBACK_INIT;
switch (sa->sa_family) {
case AF_INET:
addr4 = &(((struct sockaddr_in *)sa)->sin_addr);
ap = (uint8_t*)&addr4->s_addr;
if (ap[0] == 127)
is_local = _gf_true;
break;
case AF_INET6:
addr6 = &(((struct sockaddr_in6 *)sa)->sin6_addr);
if (memcmp (addr6, &loopbackaddr6,
sizeof (loopbackaddr6)) == 0)
is_local = _gf_true;
break;
default:
if (hostname)
gf_log ("glusterd", GF_LOG_ERROR,
"unknown address family %d for %s",
sa->sa_family, hostname);
break;
}
return is_local;
}
char *
get_ip_from_addrinfo (struct addrinfo *addr, char **ip)
{
char buf[64];
void *in_addr = NULL;
struct sockaddr_in *s4 = NULL;
struct sockaddr_in6 *s6 = NULL;
switch (addr->ai_family)
{
case AF_INET:
s4 = (struct sockaddr_in *)addr->ai_addr;
in_addr = &s4->sin_addr;
break;
case AF_INET6:
s6 = (struct sockaddr_in6 *)addr->ai_addr;
in_addr = &s6->sin6_addr;
break;
default:
gf_log ("glusterd", GF_LOG_ERROR, "Invalid family");
return NULL;
}
if (!inet_ntop(addr->ai_family, in_addr, buf, sizeof(buf))) {
gf_log ("glusterd", GF_LOG_ERROR, "String conversion failed");
return NULL;
}
*ip = strdup (buf);
return *ip;
}
int32_t
glusterd_is_local_addr (char *hostname)
{
int32_t ret = -1;
struct addrinfo *result = NULL;
struct addrinfo *res = NULL;
int32_t found = 0;
int sd = -1;
char *ip = NULL;
ret = getaddrinfo (hostname, NULL, NULL, &result);
if (ret != 0) {
gf_log ("", GF_LOG_ERROR, "error in getaddrinfo: %s\n",
gai_strerror(ret));
goto out;
}
for (res = result; res != NULL; res = res->ai_next) {
found = glusterd_is_loopback_localhost (res->ai_addr, hostname);
if (found)
goto out;
}
for (res = result; res != NULL; res = res->ai_next) {
gf_log ("glusterd", GF_LOG_DEBUG, "%s ", get_ip_from_addrinfo (res, &ip));
sd = socket (res->ai_family, SOCK_DGRAM, 0);
if (sd == -1)
goto out;
/*If bind succeeds then its a local address*/
ret = bind (sd, res->ai_addr, res->ai_addrlen);
if (ret == 0) {
found = _gf_true;
gf_log ("glusterd", GF_LOG_INFO, "%s is local", get_ip_from_addrinfo (res, &ip));
close (sd);
break;
}
close (sd);
}
out:
if (result)
freeaddrinfo (result);
if (found)
gf_log ("glusterd", GF_LOG_DEBUG, "%s is local", hostname);
else
gf_log ("glusterd", GF_LOG_DEBUG, "%s is not local", hostname);
return !found;
}
int32_t
glusterd_lock (uuid_t uuid)
{
uuid_t owner;
char new_owner_str[50];
char owner_str[50];
int ret = -1;
GF_ASSERT (uuid);
glusterd_get_lock_owner (&owner);
if (!uuid_is_null (owner)) {
gf_log ("glusterd", GF_LOG_ERROR, "Unable to get lock"
" for uuid: %s, lock held by: %s",
uuid_utoa_r (uuid, new_owner_str),
uuid_utoa_r (owner, owner_str));
goto out;
}
ret = glusterd_set_lock_owner (uuid);
if (!ret) {
gf_log ("glusterd", GF_LOG_INFO, "Cluster lock held by"
" %s", uuid_utoa (uuid));
}
out:
return ret;
}
int32_t
glusterd_unlock (uuid_t uuid)
{
uuid_t owner;
char new_owner_str[50];
char owner_str[50];
int32_t ret = -1;
GF_ASSERT (uuid);
glusterd_get_lock_owner (&owner);
if (NULL == owner) {
gf_log ("glusterd", GF_LOG_ERROR, "Cluster lock not held!");
goto out;
}
ret = uuid_compare (uuid, owner);
if (ret) {
gf_log ("glusterd", GF_LOG_ERROR, "Cluster lock held by %s"
" ,unlock req from %s!", uuid_utoa_r (owner ,owner_str)
, uuid_utoa_r (uuid, new_owner_str));
goto out;
}
ret = glusterd_unset_lock_owner (uuid);
if (ret) {
gf_log ("glusterd", GF_LOG_ERROR, "Unable to clear cluster "
"lock");
goto out;
}
ret = 0;
out:
return ret;
}
int
glusterd_get_uuid (uuid_t *uuid)
{
glusterd_conf_t *priv = NULL;
priv = THIS->private;
GF_ASSERT (priv);
uuid_copy (*uuid, priv->uuid);
return 0;
}
int
glusterd_submit_request (struct rpc_clnt *rpc, void *req,
call_frame_t *frame, rpc_clnt_prog_t *prog,
int procnum, struct iobref *iobref,
xlator_t *this, fop_cbk_fn_t cbkfn, xdrproc_t xdrproc)
{
int ret = -1;
struct iobuf *iobuf = NULL;
int count = 0;
char new_iobref = 0, start_ping = 0;
struct iovec iov = {0, };
ssize_t req_size = 0;
GF_ASSERT (rpc);
GF_ASSERT (this);
if (req) {
req_size = xdr_sizeof (xdrproc, req);
iobuf = iobuf_get2 (this->ctx->iobuf_pool, req_size);
if (!iobuf) {
goto out;
};
if (!iobref) {
iobref = iobref_new ();
if (!iobref) {
goto out;
}
new_iobref = 1;
}
iobref_add (iobref, iobuf);
iov.iov_base = iobuf->ptr;
iov.iov_len = iobuf_pagesize (iobuf);
/* Create the xdr payload */
ret = xdr_serialize_generic (iov, req, xdrproc);
if (ret == -1) {
goto out;
}
iov.iov_len = ret;
count = 1;
}
/* Send the msg */
ret = rpc_clnt_submit (rpc, prog, procnum, cbkfn,
&iov, count,
NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
if (ret == 0) {
pthread_mutex_lock (&rpc->conn.lock);
{
if (!rpc->conn.ping_started) {
start_ping = 1;
}
}
pthread_mutex_unlock (&rpc->conn.lock);
}
if (start_ping)
//client_start_ping ((void *) this);
ret = 0;
out:
if (new_iobref) {
iobref_unref (iobref);
}
iobuf_unref (iobuf);
return ret;
}
struct iobuf *
glusterd_serialize_reply (rpcsvc_request_t *req, void *arg,
struct iovec *outmsg, xdrproc_t xdrproc)
{
struct iobuf *iob = NULL;
ssize_t retlen = -1;
ssize_t rsp_size = 0;
/* First, get the io buffer into which the reply in arg will
* be serialized.
*/
rsp_size = xdr_sizeof (xdrproc, arg);
iob = iobuf_get2 (req->svc->ctx->iobuf_pool, rsp_size);
if (!iob) {
gf_log ("", GF_LOG_ERROR, "Failed to get iobuf");
goto ret;
}
iobuf_to_iovec (iob, outmsg);
/* Use the given serializer to translate the give C structure in arg
* to XDR format which will be written into the buffer in outmsg.
*/
/* retlen is used to received the error since size_t is unsigned and we
* need -1 for error notification during encoding.
*/
retlen = xdr_serialize_generic (*outmsg, arg, xdrproc);
if (retlen == -1) {
gf_log ("", GF_LOG_ERROR, "Failed to encode message");
goto ret;
}
outmsg->iov_len = retlen;
ret:
if (retlen == -1) {
iobuf_unref (iob);
iob = NULL;
}
return iob;
}
int
glusterd_submit_reply (rpcsvc_request_t *req, void *arg,
struct iovec *payload, int payloadcount,
struct iobref *iobref, xdrproc_t xdrproc)
{
struct iobuf *iob = NULL;
int ret = -1;
struct iovec rsp = {0,};
char new_iobref = 0;
if (!req) {
GF_ASSERT (req);
goto out;
}
if (!iobref) {
iobref = iobref_new ();
if (!iobref) {
gf_log ("", GF_LOG_ERROR, "out of memory");
goto out;
}
new_iobref = 1;
}
iob = glusterd_serialize_reply (req, arg, &rsp, xdrproc);
if (!iob) {
gf_log ("", GF_LOG_ERROR, "Failed to serialize reply");
} else {
iobref_add (iobref, iob);
}
ret = rpcsvc_submit_generic (req, &rsp, 1, payload, payloadcount,
iobref);
/* Now that we've done our job of handing the message to the RPC layer
* we can safely unref the iob in the hope that RPC layer must have
* ref'ed the iob on receiving into the txlist.
*/
if (ret == -1) {
gf_log ("", GF_LOG_ERROR, "Reply submission failed");
goto out;
}
ret = 0;
out:
if (new_iobref) {
iobref_unref (iobref);
}
if (iob)
iobuf_unref (iob);
return ret;
}
gf_boolean_t
glusterd_check_volume_exists (char *volname)
{
char pathname[1024] = {0,};
struct stat stbuf = {0,};
int32_t ret = -1;
glusterd_conf_t *priv = NULL;
priv = THIS->private;
snprintf (pathname, 1024, "%s/vols/%s", priv->workdir,
volname);
ret = stat (pathname, &stbuf);
if (ret) {
gf_log ("", GF_LOG_DEBUG, "Volume %s does not exist."
"stat failed with errno : %d on path: %s",
volname, errno, pathname);
return _gf_false;
}
return _gf_true;
}
int32_t
glusterd_volinfo_new (glusterd_volinfo_t **volinfo)
{
glusterd_volinfo_t *new_volinfo = NULL;
int32_t ret = -1;
GF_ASSERT (volinfo);
new_volinfo = GF_CALLOC (1, sizeof(*new_volinfo),
gf_gld_mt_glusterd_volinfo_t);
if (!new_volinfo)
goto out;
INIT_LIST_HEAD (&new_volinfo->vol_list);
INIT_LIST_HEAD (&new_volinfo->bricks);
new_volinfo->dict = dict_new ();
if (!new_volinfo->dict) {
if (new_volinfo)
GF_FREE (new_volinfo);
goto out;
}
new_volinfo->gsync_slaves = dict_new ();
if (!new_volinfo->gsync_slaves) {
if (new_volinfo)
GF_FREE (new_volinfo);
goto out;
}
new_volinfo->xl = THIS;
*volinfo = new_volinfo;
ret = 0;
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
glusterd_brickinfo_delete (glusterd_brickinfo_t *brickinfo)
{
int32_t ret = -1;
GF_ASSERT (brickinfo);
list_del_init (&brickinfo->brick_list);
if (brickinfo->logfile)
GF_FREE (brickinfo->logfile);
GF_FREE (brickinfo);
ret = 0;
return ret;
}
int32_t
glusterd_volume_brickinfos_delete (glusterd_volinfo_t *volinfo)
{
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_brickinfo_t *tmp = NULL;
int32_t ret = 0;
GF_ASSERT (volinfo);
list_for_each_entry_safe (brickinfo, tmp, &volinfo->bricks,
brick_list) {
ret = glusterd_brickinfo_delete (brickinfo);
if (ret)
goto out;
}
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
glusterd_volinfo_delete (glusterd_volinfo_t *volinfo)
{
int32_t ret = -1;
GF_ASSERT (volinfo);
list_del_init (&volinfo->vol_list);
ret = glusterd_volume_brickinfos_delete (volinfo);
if (ret)
goto out;
if (volinfo->dict)
dict_unref (volinfo->dict);
if (volinfo->gsync_slaves)
dict_unref (volinfo->gsync_slaves);
if (volinfo->logdir)
GF_FREE (volinfo->logdir);
GF_FREE (volinfo);
ret = 0;
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
glusterd_brickinfo_new (glusterd_brickinfo_t **brickinfo)
{
glusterd_brickinfo_t *new_brickinfo = NULL;
int32_t ret = -1;
GF_ASSERT (brickinfo);
new_brickinfo = GF_CALLOC (1, sizeof(*new_brickinfo),
gf_gld_mt_glusterd_brickinfo_t);
if (!new_brickinfo)
goto out;
INIT_LIST_HEAD (&new_brickinfo->brick_list);
*brickinfo = new_brickinfo;
ret = 0;
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
glusterd_resolve_brick (glusterd_brickinfo_t *brickinfo)
{
int32_t ret = -1;
GF_ASSERT (brickinfo);
ret = glusterd_hostname_to_uuid (brickinfo->hostname, brickinfo->uuid);
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
glusterd_brickinfo_from_brick (char *brick,
glusterd_brickinfo_t **brickinfo)
{
int32_t ret = -1;
glusterd_brickinfo_t *new_brickinfo = NULL;
char *hostname = NULL;
char *path = NULL;
char *tmp_host = NULL;
char *tmp_path = NULL;
GF_ASSERT (brick);
GF_ASSERT (brickinfo);
tmp_host = gf_strdup (brick);
if (tmp_host)
get_host_name (tmp_host, &hostname);
tmp_path = gf_strdup (brick);
if (tmp_path)
get_path_name (tmp_path, &path);
GF_ASSERT (hostname);
GF_ASSERT (path);
ret = glusterd_brickinfo_new (&new_brickinfo);
if (ret)
goto out;
strncpy (new_brickinfo->hostname, hostname, 1024);
strncpy (new_brickinfo->path, path, 1024);
*brickinfo = new_brickinfo;
ret = 0;
out:
if (tmp_host)
GF_FREE (tmp_host);
if (tmp_host)
GF_FREE (tmp_path);
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
glusterd_volume_brickinfo_get (uuid_t uuid, char *hostname, char *path,
glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t **brickinfo,
gf_path_match_t path_match)
{
glusterd_brickinfo_t *brickiter = NULL;
uuid_t peer_uuid = {0};
int32_t ret = -1;
int32_t brick_path_len = 0;
int32_t path_len = 0;
int32_t smaller_path = 0;
gf_boolean_t is_path_smaller = _gf_true;
if (uuid) {
uuid_copy (peer_uuid, uuid);
} else {
ret = glusterd_hostname_to_uuid (hostname, peer_uuid);
if (ret)
goto out;
}
ret = -1;
path_len = strlen (path);
list_for_each_entry (brickiter, &volinfo->bricks, brick_list) {
if (uuid_is_null (brickiter->uuid) &&
glusterd_resolve_brick (brickiter))
goto out;
if (uuid_compare (peer_uuid, brickiter->uuid))
continue;
brick_path_len = strlen (brickiter->path);
smaller_path = min (brick_path_len, path_len);
if (smaller_path != path_len)
is_path_smaller = _gf_false;
if (!strcmp (brickiter->path, path)) {
gf_log (THIS->name, GF_LOG_INFO, "Found brick");
ret = 0;
if (brickinfo)
*brickinfo = brickiter;
break;
} else if (path_match == GF_PATH_PARTIAL &&
!strncmp (brickiter->path, path, smaller_path)) {
/* GF_PATH_PARTIAL:check during create, add-brick ops */
if (is_path_smaller == _gf_true &&
brickiter->path[smaller_path] == '/') {
gf_log (THIS->name, GF_LOG_ERROR,
"given path %s lies within brick %s",
path, brickiter->path);
*brickinfo = brickiter;
ret = 0;
break;
} else if (path[smaller_path] == '/') {
gf_log (THIS->name, GF_LOG_ERROR,
"brick %s is a part of %s",
brickiter->path, path);
*brickinfo = brickiter;
ret = 0;
break;
} else {
ret = -1;
}
}
}
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
glusterd_volume_brickinfo_get_by_brick (char *brick,
glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t **brickinfo,
gf_path_match_t path_match)
{
int32_t ret = -1;
char *hostname = NULL;
char *path = NULL;
char *tmp_host = NULL;
char *tmp_path = NULL;
GF_ASSERT (brick);
GF_ASSERT (volinfo);
gf_log ("", GF_LOG_INFO, "brick: %s", brick);
tmp_host = gf_strdup (brick);
if (tmp_host)
get_host_name (tmp_host, &hostname);
tmp_path = gf_strdup (brick);
if (tmp_path)
get_path_name (tmp_path, &path);
if (!hostname || !path) {
gf_log ("", GF_LOG_ERROR,
"brick %s is not of form <HOSTNAME>:<export-dir>",
brick);
ret = -1;
goto out;
}
ret = glusterd_volume_brickinfo_get (NULL, hostname, path, volinfo,
brickinfo, path_match);
out:
if (tmp_host)
GF_FREE (tmp_host);
if (tmp_path)
GF_FREE (tmp_path);
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
gf_boolean_t
glusterd_is_brick_decommissioned (glusterd_volinfo_t *volinfo, char *hostname,
char *path)
{
gf_boolean_t decommissioned = _gf_false;
glusterd_brickinfo_t *brickinfo = NULL;
int ret = -1;
ret = glusterd_volume_brickinfo_get (NULL, hostname, path, volinfo,
&brickinfo, GF_PATH_COMPLETE);
if (ret)
goto out;
decommissioned = brickinfo->decommissioned;
out:
return decommissioned;
}
int32_t
glusterd_friend_cleanup (glusterd_peerinfo_t *peerinfo)
{
GF_ASSERT (peerinfo);
glusterd_peerctx_t *peerctx = NULL;
if (peerinfo->rpc) {
peerctx = peerinfo->rpc->mydata;
peerinfo->rpc->mydata = NULL;
peerinfo->rpc = rpc_clnt_unref (peerinfo->rpc);
peerinfo->rpc = NULL;
if (peerctx)
GF_FREE (peerctx);
}
glusterd_peer_destroy (peerinfo);
return 0;
}
int32_t
glusterd_volinfo_find (char *volname, glusterd_volinfo_t **volinfo)
{
glusterd_volinfo_t *tmp_volinfo = NULL;
int32_t ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
GF_ASSERT (volname);
this = THIS;
GF_ASSERT (this);
priv = this->private;
list_for_each_entry (tmp_volinfo, &priv->volumes, vol_list) {
if (!strcmp (tmp_volinfo->volname, volname)) {
gf_log ("", GF_LOG_DEBUG, "Volume %s found", volname);
ret = 0;
*volinfo = tmp_volinfo;
break;
}
}
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
glusterd_service_stop (const char *service, char *pidfile, int sig,
gf_boolean_t force_kill)
{
int32_t ret = -1;
pid_t pid = -1;
FILE *file = NULL;
gf_boolean_t is_locked = _gf_false;
file = fopen (pidfile, "r+");
if (!file) {
gf_log ("", GF_LOG_ERROR, "Unable to open pidfile: %s",
pidfile);
if (errno == ENOENT) {
gf_log ("",GF_LOG_TRACE, "%s may not be running",
service);
ret = 0;
goto out;
}
ret = -1;
goto out;
}
ret = lockf (fileno (file), F_TLOCK, 0);
if (!ret) {
is_locked = _gf_true;
ret = unlink (pidfile);
if (ret && (ENOENT != errno)) {
gf_log ("", GF_LOG_ERROR, "Unable to "
"unlink stale pidfile: %s", pidfile);
} else if (ret && (ENOENT == errno)){
ret = 0;
gf_log ("", GF_LOG_INFO, "Brick already stopped");
}
goto out;
}
ret = fscanf (file, "%d", &pid);
if (ret <= 0) {
gf_log ("", GF_LOG_ERROR, "Unable to read pidfile: %s",
pidfile);
ret = -1;
goto out;
}
fclose (file);
file = NULL;
gf_log ("", GF_LOG_INFO, "Stopping gluster %s running in pid: %d",
service, pid);
ret = kill (pid, sig);
if (force_kill) {
sleep (1);
file = fopen (pidfile, "r+");
if (!file) {
ret = 0;
goto out;
}
ret = lockf (fileno (file), F_TLOCK, 0);
if (ret && ((EAGAIN == errno) || (EACCES == errno))) {
ret = kill (pid, SIGKILL);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Unable to "
"kill pid %d reason: %s", pid,
strerror(errno));
goto out;
}
} else if (0 == ret){
is_locked = _gf_true;
}
ret = unlink (pidfile);
if (ret && (ENOENT != errno)) {
gf_log ("", GF_LOG_ERROR, "Unable to "
"unlink pidfile: %s", pidfile);
goto out;
}
}
ret = 0;
out:
if (is_locked && file)
if (lockf (fileno (file), F_ULOCK, 0) < 0)
gf_log ("", GF_LOG_WARNING, "Cannot unlock pidfile: %s"
" reason: %s", pidfile, strerror(errno));
if (file)
fclose (file);
return ret;
}
void
glusterd_set_brick_socket_filepath (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo,
char *sockpath, size_t len)
{
char export_path[PATH_MAX] = {0,};
char sock_filepath[PATH_MAX] = {0,};
char md5_sum[MD5_DIGEST_LEN*2+1] = {0,};
char volume_dir[PATH_MAX] = {0,};
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
int expected_file_len = 0;
expected_file_len = strlen (glusterd_sock_dir) + strlen ("/") +
MD5_DIGEST_LEN*2 + strlen (".socket") + 1;
GF_ASSERT (len >= expected_file_len);
this = THIS;
GF_ASSERT (this);
priv = this->private;
GLUSTERD_GET_VOLUME_DIR (volume_dir, volinfo, priv);
GLUSTERD_REMOVE_SLASH_FROM_PATH (brickinfo->path, export_path);
snprintf (sock_filepath, PATH_MAX, "%s/run/%s-%s",
volume_dir, brickinfo->hostname, export_path);
_get_md5_str (md5_sum, sizeof (md5_sum),
(uint8_t*)sock_filepath, strlen (sock_filepath));
snprintf (sockpath, len, "%s/%s.socket", glusterd_sock_dir, md5_sum);
}
/* connection happens only if it is not aleady connected,
* reconnections are taken care by rpc-layer
*/
int32_t
glusterd_brick_connect (glusterd_volinfo_t *volinfo,
|