/*
Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include <inttypes.h>
#if defined(GF_LINUX_HOST_OS)
#include <mntent.h>
#else
#include "mntent_compat.h"
#endif
#include <dlfcn.h>
#if (HAVE_LIB_XML)
#include <libxml/encoding.h>
#include <libxml/xmlwriter.h>
#endif
#include "globals.h"
#include "glusterfs.h"
#include "compat.h"
#include "dict.h"
#include "xlator.h"
#include "logging.h"
#include "glusterd-messages.h"
#include "timer.h"
#include "defaults.h"
#include "compat.h"
#include "syncop.h"
#include "run.h"
#include "compat-errno.h"
#include "statedump.h"
#include "syscall.h"
#include "glusterd-mem-types.h"
#include "glusterd.h"
#include "glusterd-op-sm.h"
#include "glusterd-geo-rep.h"
#include "glusterd-sm.h"
#include "glusterd-utils.h"
#include "glusterd-store.h"
#include "glusterd-volgen.h"
#include "glusterd-pmap.h"
#include "glusterfs-acl.h"
#include "glusterd-syncop.h"
#include "glusterd-locks.h"
#include "glusterd-messages.h"
#include "glusterd-volgen.h"
#include "glusterd-snapshot-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
#include "glusterd-shd-svc.h"
#include "glusterd-nfs-svc.h"
#include "glusterd-quotad-svc.h"
#include "glusterd-snapd-svc.h"
#include "glusterd-bitd-svc.h"
#include "xdr-generic.h"
#include <sys/resource.h>
#include <inttypes.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <rpc/pmap_clnt.h>
#include <unistd.h>
#include <fnmatch.h>
#include <sys/statvfs.h>
#include <ifaddrs.h>
#ifdef HAVE_BD_XLATOR
#include <lvm2app.h>
#endif
#ifdef GF_SOLARIS_HOST_OS
#include <sys/sockio.h>
#endif
#define NFS_PROGRAM 100003
#define NFSV3_VERSION 3
#define MOUNT_PROGRAM 100005
#define MOUNTV3_VERSION 3
#define MOUNTV1_VERSION 1
#define NLM_PROGRAM 100021
#define NLMV4_VERSION 4
#define NLMV1_VERSION 1
extern struct volopt_map_entry glusterd_volopt_map[];
static glusterd_lock_t lock;
int32_t
glusterd_get_lock_owner (uuid_t *uuid)
{
gf_uuid_copy (*uuid, lock.owner) ;
return 0;
}
static int32_t
glusterd_set_lock_owner (uuid_t owner)
{
gf_uuid_copy (lock.owner, owner);
//TODO: set timestamp
return 0;
}
static int32_t
glusterd_unset_lock_owner (uuid_t owner)
{
gf_uuid_clear (lock.owner);
//TODO: set timestamp
return 0;
}
gf_boolean_t
glusterd_is_fuse_available ()
{
int fd = 0;
#ifdef __NetBSD__
fd = open ("/dev/puffs", O_RDWR);
#else
fd = open ("/dev/fuse", O_RDWR);
#endif
if (fd > -1 && !close (fd))
return _gf_true;
else
return _gf_false;
}
int32_t
glusterd_lock (uuid_t uuid)
{
uuid_t owner;
char new_owner_str[50];
char owner_str[50];
int ret = -1;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (uuid);
glusterd_get_lock_owner (&owner);
if (!gf_uuid_is_null (owner)) {
gf_log (this->name, GF_LOG_ERROR, "Unable to get lock"
" for uuid: %s, lock held by: %s",
uuid_utoa_r (uuid, new_owner_str),
uuid_utoa_r (owner, owner_str));
goto out;
}
ret = glusterd_set_lock_owner (uuid);
if (!ret) {
gf_log (this->name, GF_LOG_DEBUG, "Cluster lock held by"
" %s", uuid_utoa (uuid));
}
out:
return ret;
}
int32_t
glusterd_unlock (uuid_t uuid)
{
uuid_t owner;
char new_owner_str[50];
char owner_str[50];
int32_t ret = -1;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (uuid);
glusterd_get_lock_owner (&owner);
if (gf_uuid_is_null (owner)) {
gf_log (this->name, GF_LOG_ERROR, "Cluster lock not held!");
goto out;
}
ret = gf_uuid_compare (uuid, owner);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Cluster lock held by %s ,"
"unlock req from %s!", uuid_utoa_r (owner ,owner_str)
, uuid_utoa_r (uuid, new_owner_str));
goto out;
}
ret = glusterd_unset_lock_owner (uuid);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Unable to clear cluster "
"lock");
goto out;
}
ret = 0;
out:
return ret;
}
int
glusterd_get_uuid (uuid_t *uuid)
{
glusterd_conf_t *priv = NULL;
priv = THIS->private;
GF_ASSERT (priv);
gf_uuid_copy (*uuid, MY_UUID);
return 0;
}
int
glusterd_submit_request_unlocked (struct rpc_clnt *rpc, void *req,
call_frame_t *frame, rpc_clnt_prog_t *prog,
int procnum, struct iobref *iobref,
xlator_t *this, fop_cbk_fn_t cbkfn,
xdrproc_t xdrproc)
{
char new_iobref = 0;
int ret = -1;
int count = 0;
ssize_t req_size = 0;
struct iobuf *iobuf = NULL;
struct iovec iov = {0, };
GF_ASSERT (rpc);
GF_ASSERT (this);
if (req) {
req_size = xdr_sizeof (xdrproc, req);
iobuf = iobuf_get2 (this->ctx->iobuf_pool, req_size);
if (!iobuf) {
goto out;
};
if (!iobref) {
iobref = iobref_new ();
if (!iobref) {
goto out;
}
new_iobref = 1;
}
iobref_add (iobref, iobuf);
iov.iov_base = iobuf->ptr;
iov.iov_len = iobuf_pagesize (iobuf);
/* Create the xdr payload */
ret = xdr_serialize_generic (iov, req, xdrproc);
if (ret == -1) {
goto out;
}
iov.iov_len = ret;
count = 1;
}
/* Send the msg */
rpc_clnt_submit (rpc, prog, procnum, cbkfn, &iov, count, NULL, 0,
iobref, frame, NULL, 0, NULL, 0, NULL);
/* Unconditionally set ret to 0 here. This is to guard against a double
* STACK_DESTROY in case of a failure in rpc_clnt_submit AFTER the
* request is sent over the wire: once in the callback function of the
* request and once in the error codepath of some of the callers of
* glusterd_submit_request().
*/
ret = 0;
out:
if (new_iobref) {
iobref_unref (iobref);
}
iobuf_unref (iobuf);
return ret;
}
int
glusterd_submit_request (struct rpc_clnt *rpc, void *req,
call_frame_t *frame, rpc_clnt_prog_t *prog,
int procnum, struct iobref *iobref,
xlator_t *this, fop_cbk_fn_t cbkfn, xdrproc_t xdrproc)
{
glusterd_conf_t *priv = THIS->private;
int ret = -1;
synclock_unlock (&priv->big_lock);
{
ret = glusterd_submit_request_unlocked (rpc, req, frame, prog,
procnum, iobref, this,
cbkfn, xdrproc);
}
synclock_lock (&priv->big_lock);
return ret;
}
struct iobuf *
glusterd_serialize_reply (rpcsvc_request_t *req, void *arg,
struct iovec *outmsg, xdrproc_t xdrproc)
{
struct iobuf *iob = NULL;
ssize_t retlen = -1;
ssize_t rsp_size = 0;
/* First, get the io buffer into which the reply in arg will
* be serialized.
*/
rsp_size = xdr_sizeof (xdrproc, arg);
iob = iobuf_get2 (req->svc->ctx->iobuf_pool, rsp_size);
if (!iob) {
gf_log ("", GF_LOG_ERROR, "Failed to get iobuf");
goto ret;
}
iobuf_to_iovec (iob, outmsg);
/* Use the given serializer to translate the give C structure in arg
* to XDR format which will be written into the buffer in outmsg.
*/
/* retlen is used to received the error since size_t is unsigned and we
* need -1 for error notification during encoding.
*/
retlen = xdr_serialize_generic (*outmsg, arg, xdrproc);
if (retlen == -1) {
gf_log ("", GF_LOG_ERROR, "Failed to encode message");
goto ret;
}
outmsg->iov_len = retlen;
ret:
if (retlen == -1) {
iobuf_unref (iob);
iob = NULL;
}
return iob;
}
int
glusterd_submit_reply (rpcsvc_request_t *req, void *arg,
struct iovec *payload, int payloadcount,
struct iobref *iobref, xdrproc_t xdrproc)
{
struct iobuf *iob = NULL;
int ret = -1;
struct iovec rsp = {0,};
char new_iobref = 0;
if (!req) {
GF_ASSERT (req);
goto out;
}
if (!iobref) {
iobref = iobref_new ();
if (!iobref) {
gf_log ("", GF_LOG_ERROR, "out of memory");
goto out;
}
new_iobref = 1;
}
iob = glusterd_serialize_reply (req, arg, &rsp, xdrproc);
if (!iob) {
gf_log ("", GF_LOG_ERROR, "Failed to serialize reply");
} else {
iobref_add (iobref, iob);
}
ret = rpcsvc_submit_generic (req, &rsp, 1, payload, payloadcount,
iobref);
/* Now that we've done our job of handing the message to the RPC layer
* we can safely unref the iob in the hope that RPC layer must have
* ref'ed the iob on receiving into the txlist.
*/
if (ret == -1) {
gf_log ("", GF_LOG_ERROR, "Reply submission failed");
goto out;
}
ret = 0;
out:
if (new_iobref) {
iobref_unref (iobref);
}
if (iob)
iobuf_unref (iob);
return ret;
}
gf_boolean_t
glusterd_check_volume_exists (char *volname)
{
glusterd_volinfo_t *volinfo = NULL;
return (glusterd_volinfo_find (volname, &volinfo) == 0);
}
glusterd_volinfo_t *
glusterd_volinfo_unref (glusterd_volinfo_t *volinfo)
{
int refcnt = -1;
pthread_mutex_lock (&volinfo->reflock);
{
refcnt = --volinfo->refcnt;
}
pthread_mutex_unlock (&volinfo->reflock);
if (!refcnt) {
glusterd_volinfo_delete (volinfo);
return NULL;
}
return volinfo;
}
glusterd_volinfo_t *
glusterd_volinfo_ref (glusterd_volinfo_t *volinfo)
{
pthread_mutex_lock (&volinfo->reflock);
{
++volinfo->refcnt;
}
pthread_mutex_unlock (&volinfo->reflock);
return volinfo;
}
int32_t
glusterd_volinfo_new (glusterd_volinfo_t **volinfo)
{
glusterd_volinfo_t *new_volinfo = NULL;
int32_t ret = -1;
GF_ASSERT (volinfo);
new_volinfo = GF_CALLOC (1, sizeof(*new_volinfo),
gf_gld_mt_glusterd_volinfo_t);
if (!new_volinfo)
goto out;
LOCK_INIT (&new_volinfo->lock);
CDS_INIT_LIST_HEAD (&new_volinfo->vol_list);
CDS_INIT_LIST_HEAD (&new_volinfo->snapvol_list);
CDS_INIT_LIST_HEAD (&new_volinfo->bricks);
CDS_INIT_LIST_HEAD (&new_volinfo->snap_volumes);
new_volinfo->dict = dict_new ();
if (!new_volinfo->dict) {
GF_FREE (new_volinfo);
goto out;
}
new_volinfo->gsync_slaves = dict_new ();
if (!new_volinfo->gsync_slaves) {
dict_unref (new_volinfo->dict);
GF_FREE (new_volinfo);
goto out;
}
new_volinfo->gsync_active_slaves = dict_new ();
if (!new_volinfo->gsync_active_slaves) {
dict_unref (new_volinfo->dict);
dict_unref (new_volinfo->gsync_slaves);
GF_FREE (new_volinfo);
goto out;
}
snprintf (new_volinfo->parent_volname, GD_VOLUME_NAME_MAX, "N/A");
new_volinfo->snap_max_hard_limit = GLUSTERD_SNAPS_MAX_HARD_LIMIT;
new_volinfo->xl = THIS;
pthread_mutex_init (&new_volinfo->reflock, NULL);
*volinfo = glusterd_volinfo_ref (new_volinfo);
ret = 0;
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
/* This function will create a new volinfo and then
* dup the entries from volinfo to the new_volinfo.
*
* @param volinfo volinfo which will be duplicated
* @param dup_volinfo new volinfo which will be created
* @param set_userauth if this true then auth info is also set
*
* @return 0 on success else -1
*/
int32_t
glusterd_volinfo_dup (glusterd_volinfo_t *volinfo,
glusterd_volinfo_t **dup_volinfo,
gf_boolean_t set_userauth)
{
int32_t ret = -1;
xlator_t *this = NULL;
glusterd_volinfo_t *new_volinfo = NULL;
this = THIS;
GF_ASSERT (this);
GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
GF_VALIDATE_OR_GOTO (this->name, dup_volinfo, out);
ret = glusterd_volinfo_new (&new_volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "not able to create the "
"duplicate volinfo for the volume %s",
volinfo->volname);
goto out;
}
new_volinfo->type = volinfo->type;
new_volinfo->replica_count = volinfo->replica_count;
new_volinfo->stripe_count = volinfo->stripe_count;
new_volinfo->disperse_count = volinfo->disperse_count;
new_volinfo->redundancy_count = volinfo->redundancy_count;
new_volinfo->dist_leaf_count = volinfo->dist_leaf_count;
new_volinfo->sub_count = volinfo->sub_count;
new_volinfo->transport_type = volinfo->transport_type;
new_volinfo->brick_count = volinfo->brick_count;
new_volinfo->tier_info = volinfo->tier_info;
new_volinfo->quota_conf_version = volinfo->quota_conf_version;
dict_copy (volinfo->dict, new_volinfo->dict);
dict_copy (volinfo->gsync_slaves, new_volinfo->gsync_slaves);
dict_copy (volinfo->gsync_active_slaves,
new_volinfo->gsync_active_slaves);
gd_update_volume_op_versions (new_volinfo);
if (set_userauth) {
glusterd_auth_set_username (new_volinfo,
volinfo->auth.username);
glusterd_auth_set_password (new_volinfo,
volinfo->auth.password);
}
*dup_volinfo = new_volinfo;
ret = 0;
out:
if (ret && (NULL != new_volinfo)) {
(void) glusterd_volinfo_delete (new_volinfo);
}
return ret;
}
/* This function will duplicate brickinfo
*
* @param brickinfo Source brickinfo
* @param dup_brickinfo Destination brickinfo
*
* @return 0 on success else -1
*/
int32_t
glusterd_brickinfo_dup (glusterd_brickinfo_t *brickinfo,
glusterd_brickinfo_t *dup_brickinfo)
{
int32_t ret = -1;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
GF_VALIDATE_OR_GOTO (this->name, brickinfo, out);
GF_VALIDATE_OR_GOTO (this->name, dup_brickinfo, out);
strcpy (dup_brickinfo->hostname, brickinfo->hostname);
strcpy (dup_brickinfo->path, brickinfo->path);
strcpy (dup_brickinfo->device_path, brickinfo->device_path);
strcpy (dup_brickinfo->fstype, brickinfo->fstype);
strcpy (dup_brickinfo->mnt_opts, brickinfo->mnt_opts);
ret = gf_canonicalize_path (dup_brickinfo->path);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to canonicalize "
"brick path");
goto out;
}
gf_uuid_copy (dup_brickinfo->uuid, brickinfo->uuid);
dup_brickinfo->port = brickinfo->port;
dup_brickinfo->rdma_port = brickinfo->rdma_port;
if (NULL != brickinfo->logfile) {
dup_brickinfo->logfile = gf_strdup (brickinfo->logfile);
if (NULL == dup_brickinfo->logfile) {
ret = -1;
goto out;
}
}
strcpy (dup_brickinfo->brick_id, brickinfo->brick_id);
strcpy (dup_brickinfo->mount_dir, brickinfo->mount_dir);
dup_brickinfo->status = brickinfo->status;
dup_brickinfo->snap_status = brickinfo->snap_status;
out:
return ret;
}
/*
* gd_vol_is_geo_rep_active:
* This function checks for any running geo-rep session for
* the volume given.
*
* Return Value:
* _gf_true : If any running geo-rep session.
* _gf_false: If no running geo-rep session.
*/
gf_boolean_t
gd_vol_is_geo_rep_active (glusterd_volinfo_t *volinfo)
{
gf_boolean_t active = _gf_false;
GF_ASSERT (volinfo);
if (volinfo->gsync_active_slaves &&
volinfo->gsync_active_slaves->count > 0)
active = _gf_true;
return active;
}
void
glusterd_auth_cleanup (glusterd_volinfo_t *volinfo) {
GF_ASSERT (volinfo);
GF_FREE (volinfo->auth.username);
GF_FREE (volinfo->auth.password);
}
char *
glusterd_auth_get_username (glusterd_volinfo_t *volinfo) {
GF_ASSERT (volinfo);
return volinfo->auth.username;
}
char *
glusterd_auth_get_password (glusterd_volinfo_t *volinfo) {
GF_ASSERT (volinfo);
return volinfo->auth.password;
}
int32_t
glusterd_auth_set_username (glusterd_volinfo_t *volinfo, char *username) {
GF_ASSERT (volinfo);
GF_ASSERT (username);
volinfo->auth.username = gf_strdup (username);
return 0;
}
int32_t
glusterd_auth_set_password (glusterd_volinfo_t *volinfo, char *password) {
GF_ASSERT (volinfo);
GF_ASSERT (password);
volinfo->auth.password = gf_strdup (password);
return 0;
}
int32_t
glusterd_brickinfo_delete (glusterd_brickinfo_t *brickinfo)
{
int32_t ret = -1;
GF_ASSERT (brickinfo);
cds_list_del_init (&brickinfo->brick_list);
GF_FREE (brickinfo->logfile);
GF_FREE (brickinfo);
ret = 0;
return ret;
}
int32_t
glusterd_volume_brickinfos_delete (glusterd_volinfo_t *volinfo)
{
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_brickinfo_t *tmp = NULL;
int32_t ret = 0;
GF_ASSERT (volinfo);
cds_list_for_each_entry_safe (brickinfo, tmp, &volinfo->bricks,
brick_list) {
ret = glusterd_brickinfo_delete (brickinfo);
if (ret)
goto out;
}
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_volinfo_remove (glusterd_volinfo_t *volinfo)
{
cds_list_del_init (&volinfo->vol_list);
glusterd_volinfo_unref (volinfo);
return 0;
}
int32_t
glusterd_volinfo_delete (glusterd_volinfo_t *volinfo)
{
int32_t ret = -1;
GF_ASSERT (volinfo);
cds_list_del_init (&volinfo->vol_list);
cds_list_del_init (&volinfo->snapvol_list);
ret = glusterd_volume_brickinfos_delete (volinfo);
if (ret)
goto out;
if (volinfo->dict)
dict_unref (volinfo->dict);
if (volinfo->gsync_slaves)
dict_unref (volinfo->gsync_slaves);
if (volinfo->gsync_active_slaves)
dict_unref (volinfo->gsync_active_slaves);
GF_FREE (volinfo->logdir);
if (volinfo->rebal.dict)
dict_unref (volinfo->rebal.dict);
gf_store_handle_destroy (volinfo->quota_conf_shandle);
glusterd_auth_cleanup (volinfo);
pthread_mutex_destroy (&volinfo->reflock);
GF_FREE (volinfo);
ret = 0;
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
glusterd_brickinfo_new (glusterd_brickinfo_t **brickinfo)
{
glusterd_brickinfo_t *new_brickinfo = NULL;
int32_t ret = -1;
GF_ASSERT (brickinfo);
new_brickinfo = GF_CALLOC (1, sizeof(*new_brickinfo),
gf_gld_mt_glusterd_brickinfo_t);
if (!new_brickinfo)
goto out;
CDS_INIT_LIST_HEAD (&new_brickinfo->brick_list);
*brickinfo = new_brickinfo;
ret = 0;
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_get_next_available_brickid (glusterd_volinfo_t *volinfo)
{
glusterd_brickinfo_t *brickinfo = NULL;
char *token = NULL;
int brickid = 0;
int max_brickid = -1;
int ret = -1;
cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
token = strrchr (brickinfo->brick_id, '-');
ret = gf_string2int32 (++token, &brickid);
if (ret < 0) {
gf_log (THIS->name, GF_LOG_ERROR,
"Unable to generate brick ID");
return ret;
}
if (brickid > max_brickid)
max_brickid = brickid;
}
return max_brickid + 1 ;
}
int32_t
glusterd_resolve_brick (glusterd_brickinfo_t *brickinfo)
{
int32_t ret = -1;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (brickinfo);
ret = glusterd_hostname_to_uuid (brickinfo->hostname, brickinfo->uuid);
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
glusterd_get_brick_mount_dir (char *brickpath, char *hostname, char *mount_dir)
{
char *mnt_pt = NULL;
char *brick_dir = NULL;
int32_t ret = -1;
uuid_t brick_uuid = {0, };
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (brickpath);
GF_ASSERT (hostname);
GF_ASSERT (mount_dir);
ret = glusterd_hostname_to_uuid (hostname, brick_uuid);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Failed to convert hostname %s to uuid",
hostname);
goto out;
}
if (!gf_uuid_compare (brick_uuid, MY_UUID)) {
ret = glusterd_get_brick_root (brickpath, &mnt_pt);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
"Could not get the root of the brick path %s",
brickpath);
goto out;
}
if (strncmp (brickpath, mnt_pt, strlen(mnt_pt))) {
gf_log (this->name, GF_LOG_WARNING,
"brick: %s brick mount: %s",
brickpath, mnt_pt);
ret = -1;
goto out;
}
brick_dir = &brickpath[strlen (mnt_pt)];
brick_dir++;
snprintf (mount_dir, PATH_MAX, "/%s", brick_dir);
}
out:
gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int32_t
glusterd_brickinfo_new_from_brick (char *brick,
glusterd_brickinfo_t **brickinfo)
{
char *hostname = NULL;
char *path = NULL;
char *tmp_host = NULL;
char *tmp_path = NULL;
char *vg = NULL;
int32_t ret = -1;
glusterd_brickinfo_t *new_brickinfo = NULL;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (brick);
GF_ASSERT (brickinfo);
tmp_host = gf_strdup (brick);
if (tmp_host && !get_host_name (tmp_host, &hostname))
goto out;
tmp_path = gf_strdup (brick);
if (tmp_path && !get_path_name (tmp_path, &path))
goto out;
GF_ASSERT (hostname);
GF_ASSERT (path);
ret = glusterd_brickinfo_new (&new_brickinfo);
if (ret)
goto out;
#ifdef HAVE_BD_XLATOR
vg = strchr (path, '?');
/* ? is used as a delimiter for vg */
if (vg) {
strncpy (new_brickinfo->vg, vg + 1, PATH_MAX - 1);
*vg = '\0';
}
new_brickinfo->caps = CAPS_BD;
#else
vg = NULL; /* Avoid compiler warnings when BD not enabled */
#endif
ret = gf_canonicalize_path (path);
if (ret)
goto out;
strncpy (new_brickinfo->hostname, hostname, 1024);
strncpy (new_brickinfo->path, path, 1024);
*brickinfo = new_brickinfo;
ret = 0;
out:
GF_FREE (tmp_host);
if (tmp_host)
GF_FREE (tmp_path);
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
static gf_boolean_t
_is_prefix (char *str1, char *str2)
{
GF_ASSERT (str1);
GF_ASSERT (str2);
int i = 0;
int len1 = 0;
int len2 = 0;
int small_len = 0;
char *bigger = NULL;
gf_boolean_t prefix = _gf_true;
len1 = strlen (str1);
len2 = strlen (str2);
small_len = min (len1, len2);
for (i = 0; i < small_len; i++) {
if (str1[i] != str2[i]) {
prefix = _gf_false;
break;
}
}
if (len1 < len2)
bigger = str2;
else if (len1 > len2)
bigger = str1;
else
return prefix;
if (bigger[small_len] != '/')
prefix = _gf_false;
return prefix;
}
/* Checks if @path is available in the peer identified by @uuid
* 'availability' is determined by querying current state of volumes
* in the cluster. */
gf_boolean_t
glusterd_is_brickpath_available (uuid_t uuid, char *path)
{
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_volinfo_t *volinfo = NULL;
glusterd_conf_t *priv = NULL;
gf_boolean_t available = _gf_false;
char tmp_path[PATH_MAX+1] = {0};
char tmp_brickpath[PATH_MAX+1] = {0};
priv = THIS->private;
strncpy (tmp_path, path, PATH_MAX);
/* path may not yet exist */
if (!realpath (path, tmp_path)) {
if (errno != ENOENT) {
goto out;
}
/* When realpath(3) fails, tmp_path is undefined. */
strncpy(tmp_path,path,PATH_MAX);
}
cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
cds_list_for_each_entry (brickinfo, &volinfo->bricks,
brick_list) {
if (gf_uuid_compare (uuid, brickinfo->uuid))
continue;
if (!realpath (brickinfo->path, tmp_brickpath)) {
if (errno == ENOENT)
strncpy (tmp_brickpath, brickinfo->path,
PATH_MAX);
else
goto out;
}
if (_is_prefix (tmp_brickpath, tmp_path))
goto out;
}
}
available = _gf_true;
out:
return available;
}
#ifdef HAVE_BD_XLATOR
/*
* Sets the tag of the format "trusted.glusterfs.volume-id:<uuid>" in
* the brick VG. It is used to avoid using same VG for another brick.
* @volume-id - gfid, @brick - brick info, @msg - Error message returned
* to the caller
*/
int
glusterd_bd_set_vg_tag (unsigned char *volume_id, glusterd_brickinfo_t *brick,
char *msg, int msg_size)
{
lvm_t handle = NULL;
vg_t vg = NULL;
char *uuid = NULL;
int ret = -1;
gf_asprintf (&uuid, "%s:%s", GF_XATTR_VOL_ID_KEY,
uuid_utoa (volume_id));
if (!uuid) {
snprintf (msg, sizeof(*msg), "Could not allocate memory "
"for tag");
return -1;
}
handle = lvm_init (NULL);
if (!handle) {
snprintf (msg, sizeof(*msg), "lvm_init failed");
goto out;
}
vg = lvm_vg_open (handle, brick->vg, "w", 0);
if (!vg) {
snprintf (msg, sizeof(*msg), "Could not open VG %s",
brick->vg);
goto out;
}
if (lvm_vg_add_tag (vg, uuid) < 0) {
snprintf (msg, sizeof(*msg), "Could not set tag %s for "
"VG %s", uuid, brick->vg);
goto out;
}
lvm_vg_write (vg);
ret = 0;
out:
GF_FREE (uuid);
if (vg)
lvm_vg_close (vg);
if (handle)
lvm_quit (handle);
|