summaryrefslogtreecommitdiffstats
path: root/cli/src/cli-cmd-misc.c
diff options
context:
space:
mode:
authorVijay Bellur <vbellur@redhat.com>2013-09-16 11:41:32 +0530
committerAnand Avati <avati@redhat.com>2013-09-19 14:05:30 -0700
commit56e8bc6464d14ceaa42649dea1f5a4abef064668 (patch)
treedfff9bf54ac2fb1092b00fbf7359fe65d68f020b /cli/src/cli-cmd-misc.c
parentac2f281ad3105236b024550bac48395d513260ec (diff)
mount/fuse: Implement forget in cbks for fuse.
With the introduction of inode_ctx_set in fuse as part of 2991503d014, forget cbk gets called for fuse xlator. Though nothing needs to be done inf forget_cbk, excessive log messages of the following kind are observed: [2013-09-16 06:09:50.758063] W [defaults.c:1331:default_forget] (-->/usr/local/lib/glusterfs/3git/xlator/mount/fuse.so(+0xa1f2) [0x7f51432781f2] (-->/usr/local/lib/libglusterfs.so.0(inode_unref+0x3c) [0x7f5144e5 816c] (-->/usr/local/lib/libglusterfs.so.0(+0x2d061) [0x7f5144e58061]))) 0-fuse: xlator does not implement forget_cbk This patch prevents such log messages from being seen. Signed-off-by: Vijay Bellur <vbellur@redhat.com> BUG: 979910 Change-Id: Ie5874138f46822b10ff4213bd1134d78330ec460 Reviewed-on: http://review.gluster.org/5932 Reviewed-by: Anand Avati <avati@redhat.com> Tested-by: Anand Avati <avati@redhat.com> Reviewed-on: http://review.gluster.org/5975 Tested-by: Gluster Build System <jenkins@build.gluster.com>
Diffstat (limited to 'cli/src/cli-cmd-misc.c')
0 files changed, 0 insertions, 0 deletions
d class='none' style='width: 99.1%;'/> -rw-r--r--xlators/mgmt/glusterd/src/glusterd-log-ops.c9
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mem-types.h5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c936
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c1899
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.h45
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.c1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c1524
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.h43
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.c12
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.c1426
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c106
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c40
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c430
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c52
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c5787
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c1834
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h106
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c611
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.h17
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c4854
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h232
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c1024
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h23
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c488
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c351
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c378
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h489
38 files changed, 23929 insertions, 2707 deletions
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
index a6f49ae01..9b33edf4d 100644
--- a/xlators/mgmt/glusterd/src/Makefile.am
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -11,7 +11,9 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
glusterd-volgen.c glusterd-rebalance.c glusterd-quota.c \
glusterd-geo-rep.c glusterd-replace-brick.c glusterd-log-ops.c \
glusterd-volume-ops.c glusterd-brick-ops.c glusterd-mountbroker.c \
- glusterd-syncop.c glusterd-hooks.c glusterd-volume-set.c
+ glusterd-syncop.c glusterd-hooks.c glusterd-volume-set.c \
+ glusterd-locks.c glusterd-snapshot.c glusterd-mgmt-handler.c \
+ glusterd-mgmt.c glusterd-etcd.c
glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la \
@@ -21,12 +23,13 @@ glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \
glusterd-sm.h glusterd-store.h glusterd-mem-types.h \
glusterd-pmap.h glusterd-volgen.h glusterd-mountbroker.h \
- glusterd-syncop.h glusterd-hooks.h
+ glusterd-syncop.h glusterd-hooks.h glusterd-locks.h \
+ glusterd-mgmt.h glusterd-etcd.h
AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
-I$(rpclibdir) -I$(CONTRIBDIR)/rbtree \
-I$(top_srcdir)/rpc/xdr/src -I$(top_srcdir)/rpc/rpc-lib/src \
- -I$(CONTRIBDIR)/uuid \
+ -I$(CONTRIBDIR)/uuid -I$(CONTRIBDIR)/mount \
-DSBIN_DIR=\"$(sbindir)\" -DDATADIR=\"$(localstatedir)\" \
-DGSYNCD_PREFIX=\"$(libexecdir)/glusterfs\"\
-DSYNCDAEMON_COMPILE=$(SYNCDAEMON_COMPILE) $(XML_CPPFLAGS)
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index 854d8c728..1804dd02e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -546,6 +546,84 @@ glusterd_handle_add_brick (rpcsvc_request_t *req)
return glusterd_big_locked_handler (req, __glusterd_handle_add_brick);
}
+static int
+subvol_matcher_init (int **subvols, int count)
+{
+ int ret = -1;
+
+ *subvols = GF_CALLOC (count, sizeof(int), gf_gld_mt_int);
+ if (*subvols)
+ ret = 0;
+
+ return ret;
+}
+
+static void
+subvol_matcher_update (int *subvols, glusterd_volinfo_t *volinfo,
+ glusterd_brickinfo_t *brickinfo)
+{
+ glusterd_brickinfo_t *tmp = NULL;
+ int32_t sub_volume = 0;
+ int pos = 0;
+
+ list_for_each_entry (tmp, &volinfo->bricks, brick_list) {
+
+ if (strcmp (tmp->hostname, brickinfo->hostname) ||
+ strcmp (tmp->path, brickinfo->path)) {
+ pos++;
+ continue;
+ }
+ gf_log (THIS->name, GF_LOG_DEBUG, LOGSTR_FOUND_BRICK,
+ brickinfo->hostname, brickinfo->path,
+ volinfo->volname);
+ sub_volume = (pos / volinfo->dist_leaf_count);
+ subvols[sub_volume]++;
+ break;
+ }
+
+}
+
+static int
+subvol_matcher_verify (int *subvols, glusterd_volinfo_t *volinfo, char *err_str,
+ size_t err_len, char *vol_type, int replica_count)
+{
+ int i = 0;
+ int ret = 0;
+ int count = volinfo->replica_count-replica_count;
+
+ if (replica_count) {
+ for (i = 0; i < volinfo->subvol_count; i++) {
+ if (subvols[i] != count) {
+ ret = -1;
+ snprintf (err_str, err_len, "Remove exactly %d"
+ " brick(s) from each subvolume.", count);
+ break;
+ }
+ }
+ return ret;
+ }
+
+ do {
+
+ if (subvols[i] % volinfo->dist_leaf_count == 0) {
+ continue;
+ } else {
+ ret = -1;
+ snprintf (err_str, err_len,
+ "Bricks not from same subvol for %s", vol_type);
+ break;
+ }
+ } while (++i < volinfo->subvol_count);
+
+ return ret;
+}
+
+static void
+subvol_matcher_destroy (int *subvols)
+{
+ GF_FREE (subvols);
+}
+
int
__glusterd_handle_remove_brick (rpcsvc_request_t *req)
{
@@ -559,20 +637,12 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req)
int i = 1;
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
- int32_t pos = 0;
- int32_t sub_volume = 0;
- int32_t sub_volume_start = 0;
- int32_t sub_volume_end = 0;
- glusterd_brickinfo_t *tmp = NULL;
+ int *subvols = NULL;
char err_str[2048] = {0};
gf_cli_rsp rsp = {0,};
void *cli_rsp = NULL;
char vol_type[256] = {0,};
int32_t replica_count = 0;
- int32_t brick_index = 0;
- int32_t tmp_brick_idx = 0;
- int found = 0;
- int diff_count = 0;
char *volname = 0;
xlator_t *this = NULL;
@@ -681,17 +751,6 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req)
goto out;
}
- /*Do not allow remove-brick if the volume is a replicate volume*/
- if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
- (volinfo->brick_count == volinfo->replica_count)) {
- snprintf (err_str, sizeof(err_str),
- "Removing brick from a replicate volume "
- "is not allowed");
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
- ret = -1;
- goto out;
- }
-
if (!replica_count &&
(volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) &&
(volinfo->brick_count == volinfo->dist_leaf_count)) {
@@ -738,6 +797,14 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req)
}
strcpy (brick_list, " ");
+
+ if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
+ (volinfo->subvol_count > 1)) {
+ ret = subvol_matcher_init (&subvols, volinfo->subvol_count);
+ if (ret)
+ goto out;
+ }
+
while ( i <= count) {
snprintf (key, sizeof (key), "brick%d", i);
ret = dict_get_str (dict, key, &brick);
@@ -766,77 +833,18 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req)
(volinfo->brick_count <= volinfo->dist_leaf_count))
continue;
- if (replica_count) {
- /* do the validation of bricks here */
- /* -2 because i++ is already done, and i starts with 1,
- instead of 0 */
- diff_count = (volinfo->replica_count - replica_count);
- brick_index = (((i -2) / diff_count) * volinfo->replica_count);
- tmp_brick_idx = 0;
- found = 0;
- list_for_each_entry (tmp, &volinfo->bricks, brick_list) {
- tmp_brick_idx++;
- gf_log (this->name, GF_LOG_TRACE,
- "validate brick %s:%s (%d %d %d)",
- tmp->hostname, tmp->path, tmp_brick_idx,
- brick_index, volinfo->replica_count);
- if (tmp_brick_idx <= brick_index)
- continue;
- if (tmp_brick_idx >
- (brick_index + volinfo->replica_count))
- break;
- if ((!strcmp (tmp->hostname,brickinfo->hostname)) &&
- !strcmp (tmp->path, brickinfo->path)) {
- found = 1;
- break;
- }
- }
- if (found)
- continue;
+ /* Find which subvolume the brick belongs to */
+ subvol_matcher_update (subvols, volinfo, brickinfo);
+ }
- snprintf (err_str, sizeof (err_str), "Bricks are from "
- "same subvol");
- gf_log (this->name, GF_LOG_INFO,
- "failed to validate brick %s:%s (%d %d %d)",
- tmp->hostname, tmp->path, tmp_brick_idx,
- brick_index, volinfo->replica_count);
- ret = -1;
- /* brick order is not valid */
+ /* Check if the bricks belong to the same subvolumes.*/
+ if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
+ (volinfo->subvol_count > 1)) {
+ ret = subvol_matcher_verify (subvols, volinfo,
+ err_str, sizeof(err_str),
+ vol_type, replica_count);
+ if (ret)
goto out;
- }
-
- pos = 0;
- list_for_each_entry (tmp, &volinfo->bricks, brick_list) {
-
- if (strcmp (tmp->hostname,brickinfo->hostname) ||
- strcmp (tmp->path, brickinfo->path)) {
- pos++;
- continue;
- }
-
- gf_log (this->name, GF_LOG_DEBUG, LOGSTR_FOUND_BRICK,
- brickinfo->hostname, brickinfo->path,
- volinfo->volname);
- if (!sub_volume && (volinfo->dist_leaf_count > 1)) {
- sub_volume = (pos / volinfo->dist_leaf_count) + 1;
- sub_volume_start = (volinfo->dist_leaf_count *
- (sub_volume - 1));
- sub_volume_end = (volinfo->dist_leaf_count *
- sub_volume) - 1;
- } else {
- if (pos < sub_volume_start ||
- pos >sub_volume_end) {
- ret = -1;
- snprintf (err_str, sizeof (err_str),
- "Bricks not from same subvol "
- "for %s", vol_type);
- gf_log (this->name, GF_LOG_ERROR,
- "%s", err_str);
- goto out;
- }
- }
- break;
- }
}
ret = glusterd_op_begin_synctask (req, GD_OP_REMOVE_BRICK, dict);
@@ -859,6 +867,7 @@ out:
}
GF_FREE (brick_list);
+ subvol_matcher_destroy (subvols);
free (cli_req.dict.dict_val); //its malloced by xdr
return ret;
@@ -871,23 +880,122 @@ glusterd_handle_remove_brick (rpcsvc_request_t *req)
__glusterd_handle_remove_brick);
}
+static int
+_glusterd_restart_gsync_session (dict_t *this, char *key,
+ data_t *value, void *data)
+{
+ char *slave = NULL;
+ char *slave_buf = NULL;
+ char *path_list = NULL;
+ char *slave_vol = NULL;
+ char *slave_ip = NULL;
+ char *conf_path = NULL;
+ char **errmsg = NULL;
+ int ret = -1;
+ glusterd_gsync_status_temp_t *param = NULL;
+ gf_boolean_t is_running = _gf_false;
+
+ param = (glusterd_gsync_status_temp_t *)data;
+
+ GF_ASSERT (param);
+ GF_ASSERT (param->volinfo);
+
+ slave = strchr(value->data, ':');
+ if (slave) {
+ slave++;
+ slave_buf = gf_strdup (slave);
+ if (!slave_buf) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to gf_strdup");
+ ret = -1;
+ goto out;
+ }
+ }
+ else
+ return 0;
+
+ ret = dict_set_dynstr (param->rsp_dict, "slave", slave_buf);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to store slave");
+ if (slave_buf)
+ GF_FREE(slave_buf);
+ goto out;
+ }
+
+ ret = glusterd_get_slave_details_confpath (param->volinfo,
+ param->rsp_dict,
+ &slave_ip, &slave_vol,
+ &conf_path, errmsg);
+ if (ret) {
+ if (*errmsg)
+ gf_log ("", GF_LOG_ERROR, "%s", *errmsg);
+ else
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to fetch slave or confpath details.");
+ goto out;
+ }
+
+ /* In cases that gsyncd is not running, we will not invoke it
+ * because of add-brick. */
+ ret = glusterd_check_gsync_running_local (param->volinfo->volname,
+ slave, conf_path,
+ &is_running);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "gsync running validation failed.");
+ goto out;
+ }
+ if (_gf_false == is_running) {
+ gf_log ("", GF_LOG_DEBUG, "gsync session for %s and %s is"
+ " not running on this node. Hence not restarting.",
+ param->volinfo->volname, slave);
+ ret = 0;
+ goto out;
+ }
+
+ ret = glusterd_get_local_brickpaths (param->volinfo, &path_list);
+ if (!path_list) {
+ gf_log ("", GF_LOG_DEBUG, "This node not being part of"
+ " volume should not be running gsyncd. Hence"
+ " no gsyncd process to restart.");
+ ret = 0;
+ goto out;
+ }
+
+ ret = glusterd_check_restart_gsync_session (param->volinfo, slave,
+ param->rsp_dict, path_list,
+ conf_path, 0);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to restart gsync session.");
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d.", ret);
+ return ret;
+}
+
/* op-sm */
int
glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
char *bricks, dict_t *dict)
{
- glusterd_brickinfo_t *brickinfo = NULL;
- char *brick = NULL;
- int32_t i = 1;
- char *brick_list = NULL;
- char *free_ptr1 = NULL;
- char *free_ptr2 = NULL;
- char *saveptr = NULL;
- int32_t ret = -1;
- int32_t stripe_count = 0;
- int32_t replica_count = 0;
- int32_t type = 0;
+ char *brick = NULL;
+ int32_t i = 1;
+ char *brick_list = NULL;
+ char *free_ptr1 = NULL;
+ char *free_ptr2 = NULL;
+ char *saveptr = NULL;
+ int32_t ret = -1;
+ int32_t stripe_count = 0;
+ int32_t replica_count = 0;
+ int32_t type = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_gsync_status_temp_t param = {0, };
+ gf_boolean_t restart_needed = 0;
+ char msg[1024] __attribute__((unused)) = {0, };
+ int caps = 0;
+ int brickid = 0;
GF_ASSERT (volinfo);
@@ -915,11 +1023,17 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
"type is set %d, need to change it", type);
}
+ brickid = glusterd_get_next_available_brickid (volinfo);
+ if (brickid < 0)
+ goto out;
while ( i <= count) {
ret = glusterd_brickinfo_new_from_brick (brick, &brickinfo);
if (ret)
goto out;
+ GLUSTERD_ASSIGN_BRICKID_TO_BRICKINFO (brickinfo, volinfo,
+ brickid++);
+
ret = glusterd_resolve_brick (brickinfo);
if (ret)
goto out;
@@ -968,13 +1082,40 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
if (count)
brick = strtok_r (brick_list+1, " \n", &saveptr);
+#ifdef HAVE_BD_XLATOR
+ if (brickinfo->vg[0])
+ caps = CAPS_BD | CAPS_THIN |
+ CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT;
+#endif
while (i <= count) {
-
ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
&brickinfo);
if (ret)
goto out;
+#ifdef HAVE_BD_XLATOR
+ /* Check for VG/thin pool if its BD volume */
+ if (brickinfo->vg[0]) {
+ ret = glusterd_is_valid_vg (brickinfo, 0, msg);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_CRITICAL, "%s", msg);
+ goto out;
+ }
+ /* if anyone of the brick does not have thin support,
+ disable it for entire volume */
+ caps &= brickinfo->caps;
+ } else
+ caps = 0;
+#endif
+
+ if (uuid_is_null (brickinfo->uuid)) {
+ ret = glusterd_resolve_brick (brickinfo);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, FMTSTR_RESOLVE_BRICK,
+ brickinfo->hostname, brickinfo->path);
+ goto out;
+ }
+ }
ret = glusterd_brick_start (volinfo, brickinfo,
_gf_true);
@@ -982,8 +1123,27 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
goto out;
i++;
brick = strtok_r (NULL, " \n", &saveptr);
+
+ /* Check if the brick is added in this node, and set
+ * the restart_needed flag. */
+ if ((!uuid_compare (brickinfo->uuid, MY_UUID)) &&
+ !restart_needed) {
+ restart_needed = 1;
+ gf_log ("", GF_LOG_DEBUG,
+ "Restart gsyncd session, if it's already "
+ "running.");
+ }
}
+ /* If the restart_needed flag is set, restart gsyncd sessions for that
+ * particular master with all the slaves. */
+ if (restart_needed) {
+ param.rsp_dict = dict;
+ param.volinfo = volinfo;
+ dict_foreach (volinfo->gsync_slaves,
+ _glusterd_restart_gsync_session, &param);
+ }
+ volinfo->caps = caps;
out:
GF_FREE (free_ptr1);
GF_FREE (free_ptr2);
@@ -1047,6 +1207,7 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr)
int ret = 0;
char *volname = NULL;
int count = 0;
+ int replica_count = 0;
int i = 0;
char *bricks = NULL;
char *brick_list = NULL;
@@ -1055,17 +1216,31 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr)
char *brick = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_volinfo_t *volinfo = NULL;
- glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
char msg[2048] = {0,};
gf_boolean_t brick_alloc = _gf_false;
char *all_bricks = NULL;
char *str_ret = NULL;
gf_boolean_t is_force = _gf_false;
- priv = THIS->private;
- if (!priv)
- goto out;
+ this = THIS;
+ GF_ASSERT (this);
+ ret = dict_get_int32 (dict, "replica-count", &replica_count);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Unable to get replica count");
+ }
+
+ if (replica_count > 0) {
+ ret = op_version_check (this, GD_OP_VER_PERSISTENT_AFR_XATTRS,
+ msg, sizeof(msg));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s", msg);
+ *op_errstr = gf_strdup (msg);
+ goto out;
+ }
+ }
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR,
@@ -1080,15 +1255,6 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr)
goto out;
}
- if (volinfo->backend == GD_VOL_BK_BD) {
- snprintf (msg, sizeof (msg), "Add brick is not supported for "
- "Block backend volume %s.", volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
-
ret = glusterd_validate_volume_id (dict, volinfo);
if (ret)
goto out;
@@ -1166,6 +1332,18 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr)
}
if (!uuid_compare (brickinfo->uuid, MY_UUID)) {
+#ifdef HAVE_BD_XLATOR
+ if (brickinfo->vg[0]) {
+ ret = glusterd_is_valid_vg (brickinfo, 1, msg);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "%s",
+ msg);
+ *op_errstr = gf_strdup (msg);
+ goto out;
+ }
+ }
+#endif
+
ret = glusterd_validate_and_create_brickpath (brickinfo,
volinfo->volume_id,
op_errstr, is_force);
@@ -1195,20 +1373,32 @@ out:
int
glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr)
{
- int ret = -1;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- char *errstr = NULL;
- int32_t brick_count = 0;
- char msg[2048] = {0,};
- int32_t flag = 0;
- gf1_op_commands cmd = GF_OP_CMD_NONE;
- char *task_id_str = NULL;
- xlator_t *this = NULL;
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *errstr = NULL;
+ int32_t brick_count = 0;
+ char msg[2048] = {0,};
+ int32_t flag = 0;
+ gf1_op_commands cmd = GF_OP_CMD_NONE;
+ char *task_id_str = NULL;
+ xlator_t *this = NULL;
+ int i = 1;
+ char key[256] = {0,};
+ char *brick = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
this = THIS;
GF_ASSERT (this);
+ ret = op_version_check (this, GD_OP_VER_PERSISTENT_AFR_XATTRS,
+ msg, sizeof(msg));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s", msg);
+ *op_errstr = gf_strdup (msg);
+ goto out;
+ }
+
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
@@ -1238,11 +1428,26 @@ glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr)
ret = dict_get_int32 (dict, "command", &flag);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to get brick count");
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get brick command");
goto out;
}
cmd = flag;
+ ret = dict_get_int32 (dict, "count", &brick_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get brick count");
+ goto out;
+ }
+
+ ret = 0;
+ if (volinfo->brick_count == brick_count) {
+ errstr = gf_strdup ("Deleting all the bricks of the "
+ "volume is not allowed");
+ ret = -1;
+ goto out;
+ }
+
ret = -1;
switch (cmd) {
case GF_OP_CMD_NONE:
@@ -1255,6 +1460,16 @@ glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr)
case GF_OP_CMD_START:
{
+ if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
+ dict_get (dict, "replica-count")) {
+ snprintf (msg, sizeof(msg), "Migration of data is not "
+ "needed when reducing replica count. Use the"
+ " 'force' option");
+ errstr = gf_strdup (msg);
+ gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ goto out;
+ }
+
if (GLUSTERD_STATUS_STARTED != volinfo->status) {
snprintf (msg, sizeof (msg), "Volume %s needs to be "
"started before remove-brick (you can use "
@@ -1282,7 +1497,7 @@ glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr)
goto out;
}
- if (is_origin_glusterd ()) {
+ if (is_origin_glusterd (dict)) {
ret = glusterd_generate_and_set_task_id
(dict, GF_REMOVE_BRICK_TID_KEY);
if (ret) {
@@ -1312,25 +1527,44 @@ glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr)
"is in progress");
goto out;
}
+
+ /* Do not allow commit if the bricks are not decommissioned */
+ for ( i = 1; i <= brick_count; i++ ) {
+ snprintf (key, sizeof (key), "brick%d", i);
+ ret = dict_get_str (dict, key, &brick);
+ if (ret) {
+ snprintf (msg, sizeof (msg),
+ "Unable to get %s", key);
+ errstr = gf_strdup (msg);
+ goto out;
+ }
+
+ ret =
+ glusterd_volume_brickinfo_get_by_brick(brick, volinfo,
+ &brickinfo);
+ if (ret) {
+ snprintf (msg, sizeof (msg), "Incorrect brick "
+ "%s for volume %s", brick, volname);
+ errstr = gf_strdup (msg);
+ goto out;
+ }
+ if ( !brickinfo->decommissioned ) {
+ snprintf (msg, sizeof (msg), "Brick %s "
+ "is not decommissioned. "
+ "Use start or force option",
+ brick);
+ errstr = gf_strdup (msg);
+ ret = -1;
+ goto out;
+ }
+ }
+
break;
case GF_OP_CMD_COMMIT_FORCE:
break;
}
-
- ret = dict_get_int32 (dict, "count", &brick_count);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to get brick count");
- goto out;
- }
-
ret = 0;
- if (volinfo->brick_count == brick_count) {
- errstr = gf_strdup ("Deleting all the bricks of the "
- "volume is not allowed");
- ret = -1;
- goto out;
- }
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -1469,15 +1703,6 @@ glusterd_op_add_brick (dict_t *dict, char **op_errstr)
goto out;
}
- /* Need to reset the defrag/rebalance status accordingly */
- switch (volinfo->rebal.defrag_status) {
- case GF_DEFRAG_STATUS_FAILED:
- case GF_DEFRAG_STATUS_COMPLETE:
- volinfo->rebal.defrag_status = 0;
- default:
- break;
- }
-
ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret)
goto out;
@@ -1509,6 +1734,8 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
glusterd_brickinfo_t *tmp = NULL;
char *task_id_str = NULL;
xlator_t *this = NULL;
+ dict_t *bricks_dict = NULL;
+ char *brick_tmpstr = NULL;
this = THIS;
GF_ASSERT (this);
@@ -1536,7 +1763,7 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
/* Set task-id, if available, in ctx dict for operations other than
* start
*/
- if (is_origin_glusterd () && (cmd != GF_OP_CMD_START)) {
+ if (is_origin_glusterd (dict) && (cmd != GF_OP_CMD_START)) {
if (!uuid_is_null (volinfo->rebal.rebalance_id)) {
ret = glusterd_copy_uuid_to_dict
(volinfo->rebal.rebalance_id, dict,
@@ -1549,10 +1776,13 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
}
}
- /* Clear task-id & rebal.op on commmitting/stopping remove-brick */
+ /* Clear task-id, rebal.op and stored bricks on commmitting/stopping
+ * remove-brick */
if ((cmd != GF_OP_CMD_START) || (cmd != GF_OP_CMD_STATUS)) {
uuid_clear (volinfo->rebal.rebalance_id);
volinfo->rebal.op = GD_OP_NONE;
+ dict_unref (volinfo->rebal.dict);
+ volinfo->rebal.dict = NULL;
}
ret = -1;
@@ -1593,6 +1823,11 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
}
case GF_OP_CMD_START:
+ /* Reset defrag status to 'NOT STARTED' whenever a
+ * remove-brick/rebalance command is issued to remove
+ * stale information from previous run.
+ */
+ volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
ret = dict_get_str (dict, GF_REMOVE_BRICK_TID_KEY, &task_id_str);
if (ret) {
gf_log (this->name, GF_LOG_DEBUG,
@@ -1637,7 +1872,23 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
goto out;
}
-
+ /* Save the list of bricks for later usage only on starting a
+ * remove-brick. Right now this is required for displaying the task
+ * parameters with task status in volume status.
+ */
+ if (GF_OP_CMD_START == cmd) {
+ bricks_dict = dict_new ();
+ if (!bricks_dict) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_int32 (bricks_dict, "count", count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to save remove-brick count");
+ goto out;
+ }
+ }
while ( i <= count) {
snprintf (key, 256, "brick%d", i);
ret = dict_get_str (dict, key, &brick);
@@ -1647,12 +1898,32 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
goto out;
}
+ if (GF_OP_CMD_START == cmd) {
+ brick_tmpstr = gf_strdup (brick);
+ if (!brick_tmpstr) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to duplicate brick name");
+ goto out;
+ }
+ ret = dict_set_dynstr (bricks_dict, key, brick_tmpstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add brick to dict");
+ goto out;
+ }
+ brick_tmpstr = NULL;
+ }
+
ret = glusterd_op_perform_remove_brick (volinfo, brick, force,
&need_rebalance);
if (ret)
goto out;
i++;
}
+ if (GF_OP_CMD_START == cmd)
+ volinfo->rebal.dict = dict_ref (bricks_dict);
+
ret = dict_get_int32 (dict, "replica-count", &replica_count);
if (!ret) {
gf_log (this->name, GF_LOG_INFO,
@@ -1660,6 +1931,7 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
volinfo->replica_count, replica_count,
volinfo->volname);
volinfo->replica_count = replica_count;
+ volinfo->sub_count = replica_count;
volinfo->dist_leaf_count = glusterd_get_dist_leaf_count (volinfo);
volinfo->subvol_count = (volinfo->brick_count /
volinfo->dist_leaf_count);
@@ -1689,6 +1961,16 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
goto out;
}
+ if (GF_OP_CMD_START == cmd &&
+ volinfo->status == GLUSTERD_STATUS_STARTED) {
+ ret = glusterd_nodesvcs_handle_reconfigure (volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Unable to reconfigure NFS-Server");
+ goto out;
+ }
+ }
+
/* Need to reset the defrag/rebalance status accordingly */
switch (volinfo->rebal.defrag_status) {
case GF_DEFRAG_STATUS_FAILED:
@@ -1720,5 +2002,9 @@ out:
if (ret && err_str[0] && op_errstr)
*op_errstr = gf_strdup (err_str);
+ GF_FREE (brick_tmpstr);
+ if (bricks_dict)
+ dict_unref (bricks_dict);
+
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-etcd.c b/xlators/mgmt/glusterd/src/glusterd-etcd.c
new file mode 100644
index 000000000..656ea3b9b
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-etcd.c
@@ -0,0 +1,87 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <errno.h>
+#include <signal.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include "glusterfs.h"
+#include "run.h"
+#include "glusterd-etcd.h"
+
+#define GLUSTERD_ETCD_DIR "/var/lib/glusterd/etcd"
+#define GLUSTERD_ETCD_CMD "/root/etcd/bin/etcd"
+
+pid_t
+start_etcd (char *this_host, char *other_host)
+{
+ runner_t runner;
+ char me[256];
+
+ if (gethostname(me,sizeof(me)-1) != 0) {
+ gf_log (__func__, GF_LOG_ERROR, "gethostname failed?!?");
+ return -1;
+ }
+ me[sizeof(me)-1] = '\0';
+
+ if ((mkdir(GLUSTERD_ETCD_DIR,0700) < 0) && (errno != EEXIST)) {
+ gf_log (__func__, GF_LOG_ERROR,
+ "failed to create %s", GLUSTERD_ETCD_DIR);
+ return -1;
+ }
+
+ runinit (&runner);
+ runner_add_args (&runner, GLUSTERD_ETCD_CMD,
+ "-name", this_host,
+ "-data-dir", GLUSTERD_ETCD_DIR,
+ "-bind-addr", NULL);
+ runner_argprintf( &runner, "%s:4001", me);
+ runner_add_arg (&runner, "-peer-addr");
+ runner_argprintf (&runner, "%s:7001", me);
+ if (other_host) {
+ runner_add_arg (&runner, "-peers");
+ runner_argprintf (&runner, "%s:7001", other_host);
+ gf_log (__func__, GF_LOG_INFO, "starting etcd via %s", other_host);
+ } else {
+ gf_log (__func__, GF_LOG_INFO, "starting etcd standalone");
+ }
+
+ /*
+ * Runner_run would wait for it. Runner_run_nowait would not wait,
+ * but would detach it so thoroughly that it won't die when we do.
+ * Also, runner->chpid would be the PID of the transient middle
+ * process, not the one we might actually need to kill later. This
+ * seems to do exactly what we need.
+ */
+ if (runner_start(&runner) != 0) {
+ gf_log (__func__, GF_LOG_ERROR,
+ "failed to start %s", GLUSTERD_ETCD_CMD);
+ return -1;
+ }
+
+ return runner.chpid;
+}
+
+void
+stop_etcd (pid_t pid)
+{
+ if (pid > 0) {
+ gf_log (__func__, GF_LOG_INFO, "killing etcd %d", pid);
+ (void)kill(pid,SIGKILL);
+ (void)waitpid(pid,NULL,0);
+ }
+}
+
+void
+nuke_etcd_dir (void)
+{
+ (void)runcmd("rm","-rf",GLUSTERD_ETCD_DIR,NULL);
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-etcd.h b/xlators/mgmt/glusterd/src/glusterd-etcd.h
new file mode 100644
index 000000000..9459f6bbd
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-etcd.h
@@ -0,0 +1,23 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_ETCD_H_
+#define _GLUSTERD_ETCD_H_
+
+#include <sys/types.h>
+#include "glusterfs.h"
+
+pid_t start_etcd (char *this_host, char *other_host);
+
+void stop_etcd (pid_t pid);
+
+void nuke_etcd_dir (void);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
index 67748daa3..9433a128e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
@@ -28,20 +28,30 @@
static int
dict_get_param (dict_t *dict, char *key, char **param);
-static int
-glusterd_get_statefile_name (glusterd_volinfo_t *volinfo, char *slave,
- char *conf_path, char **statefile);
-
-static int
-glusterd_get_slave_details_confpath (glusterd_volinfo_t *volinfo, dict_t *dict,
- char **slave_ip, char **slave_vol,
- char **conf_path);
-
-static int
-glusterd_get_slave_info (dict_t *dict, char **slave_ip, char **slave_vol);
-
-static int
-glusterd_gsync_read_frm_status (char *path, char *buf, size_t blen);
+struct gsync_config_opt_vals_ gsync_confopt_vals[] = {
+ {.op_name = "change_detector",
+ .no_of_pos_vals = 2,
+ .case_sensitive = _gf_true,
+ .values = {"xsync", "changelog"},
+ },
+ {.op_name = "special_sync_mode",
+ .no_of_pos_vals = 2,
+ .case_sensitive = _gf_true,
+ .values = {"partial", "recover"}
+ },
+ {.op_name = "log-level",
+ .no_of_pos_vals = 5,
+ .case_sensitive = _gf_false,
+ .values = {"critical", "error", "warning", "info", "debug"}
+ },
+ {.op_name = "use-tarssh",
+ .no_of_pos_vals = 6,
+ .case_sensitive = _gf_false,
+ .values = {"true", "false", "0", "1", "yes", "no"}
+ },
+ {.op_name = NULL,
+ },
+};
static char *gsync_reserved_opts[] = {
"gluster-command-dir",
@@ -58,6 +68,11 @@ static char *gsync_reserved_opts[] = {
NULL
};
+static char *gsync_no_restart_opts[] = {
+ "checkpoint",
+ NULL
+};
+
int
__glusterd_handle_sys_exec (rpcsvc_request_t *req)
{
@@ -270,7 +285,7 @@ __glusterd_handle_gsync_set (rpcsvc_request_t *req)
ret = dict_get_str (dict, "slave", &slave);
if (ret < 0) {
- gf_log (this->name, GF_LOG_INFO, "slave not found, while"
+ gf_log (this->name, GF_LOG_INFO, "slave not found, while "
"handling "GEOREP" options");
slave = "(No Slave)";
}
@@ -786,15 +801,19 @@ out:
static int
gsync_verify_config_options (dict_t *dict, char **op_errstr, char *volname)
{
- char **resopt = NULL;
- int i = 0;
- int ret = -1;
- char *subop = NULL;
- char *slave = NULL;
- char *op_name = NULL;
- char *op_value = NULL;
- char *t = NULL;
- gf_boolean_t banned = _gf_true;
+ char **resopt = NULL;
+ int i = 0;
+ int ret = -1;
+ char *subop = NULL;
+ char *slave = NULL;
+ char *op_name = NULL;
+ char *op_value = NULL;
+ char *t = NULL;
+ char errmsg[PATH_MAX] = "";
+ gf_boolean_t banned = _gf_true;
+ gf_boolean_t op_match = _gf_true;
+ gf_boolean_t val_match = _gf_true;
+ struct gsync_config_opt_vals_ *conf_vals = NULL;
if (dict_get_str (dict, "subop", &subop) != 0) {
gf_log ("", GF_LOG_WARNING, "missing subop");
@@ -868,6 +887,44 @@ gsync_verify_config_options (dict_t *dict, char **op_errstr, char *volname)
}
}
+ /* Check options in gsync_confopt_vals for invalid values */
+ for (conf_vals = gsync_confopt_vals; conf_vals->op_name; conf_vals++) {
+ op_match = _gf_true;
+ for (i = 0; conf_vals->op_name[i] && op_name[i]; i++) {
+ if (conf_vals->op_name[i] == op_name[i] ||
+ (conf_vals->op_name[i] == '_' && op_name[i] == '-'))
+ continue;
+ op_match = _gf_false;
+ }
+
+ if (op_match) {
+ if (!op_value)
+ goto out;
+ val_match = _gf_false;
+ for (i = 0; i < conf_vals->no_of_pos_vals; i++) {
+ if(conf_vals->case_sensitive){
+ if (!strcmp (conf_vals->values[i], op_value))
+ val_match = _gf_true;
+ } else {
+ if (!strcasecmp (conf_vals->values[i], op_value))
+ val_match = _gf_true;
+ }
+ }
+
+ if (!val_match) {
+ ret = snprintf (errmsg, sizeof(errmsg) - 1,
+ "Invalid value(%s) for"
+ " option %s", op_value,
+ op_name);
+ errmsg[ret] = '\0';
+
+ gf_log ("", GF_LOG_ERROR, "%s", errmsg);
+ *op_errstr = gf_strdup (errmsg);
+ return -1;
+ }
+ }
+ }
+out:
return 0;
}
@@ -882,9 +939,9 @@ _get_status_mst_slv (dict_t *this, char *key, data_t *value, void *data)
glusterd_gsync_status_temp_t *param = NULL;
char *slave = NULL;
char *slave_buf = NULL;
+ char *slave_ip = NULL;
char *slave_vol = NULL;
- char *tmp = NULL;
- char *save_ptr = NULL;
+ char *errmsg = NULL;
char conf_path[PATH_MAX] = "";
int ret = -1;
glusterd_conf_t *priv = NULL;
@@ -902,34 +959,26 @@ _get_status_mst_slv (dict_t *this, char *key, data_t *value, void *data)
}
slave = strchr(value->data, ':');
- if (slave) {
- slave ++;
- slave_buf = gf_strdup (slave);
- if (!slave_buf) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to gf_strdup");
- ret = -1;
- goto out;
- }
- } else
+ if (!slave)
return 0;
+ slave++;
- tmp = strtok_r (slave_buf, ":", &save_ptr);
- while (tmp) {
- slave_vol = tmp;
- tmp = strtok_r (NULL, ":", &save_ptr);
- }
-
- if (!slave_vol) {
- gf_log ("", GF_LOG_ERROR, "Invalid slave name");
+ ret = glusterd_get_slave_info (slave, &slave_ip, &slave_vol, &errmsg);
+ if (ret) {
+ if (errmsg)
+ gf_log ("", GF_LOG_ERROR, "Unable to fetch "
+ "slave details. Error: %s", errmsg);
+ else
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to fetch slave details.");
ret = -1;
goto out;
}
ret = snprintf (conf_path, sizeof(conf_path) - 1,
- "%s/"GEOREP"/%s-%s/gsyncd.conf",
+ "%s/"GEOREP"/%s_%s_%s/gsyncd.conf",
priv->workdir, param->volinfo->volname,
- slave_vol);
+ slave_ip, slave_vol);
conf_path[ret] = '\0';
ret = glusterd_get_gsync_status_mst_slv(param->volinfo,
@@ -938,6 +987,8 @@ _get_status_mst_slv (dict_t *this, char *key, data_t *value, void *data)
param->node);
out:
+ GF_FREE (errmsg);
+
if (slave_buf)
GF_FREE(slave_buf);
@@ -1031,7 +1082,7 @@ glusterd_gsync_get_uuid (char *slave, glusterd_volinfo_t *vol,
return ret;
}
-static int
+int
glusterd_check_gsync_running_local (char *master, char *slave,
char *conf_path,
gf_boolean_t *is_run)
@@ -1246,6 +1297,159 @@ glusterd_check_gsync_running (glusterd_volinfo_t *volinfo, gf_boolean_t *flag)
return 0;
}
+/*
+ * is_geo_rep_active:
+ * This function reads the state_file and sets is_active to 1 if the
+ * monitor status is neither "Stopped" or "Not Started"
+ *
+ * RETURN VALUE:
+ * 0: On successful read of state_file.
+ * -1: error.
+ */
+
+static int
+is_geo_rep_active (glusterd_volinfo_t *volinfo, char *slave,
+ char *conf_path, int *is_active)
+{
+ dict_t *confd = NULL;
+ char *statefile = NULL;
+ char *master = NULL;
+ char monitor_status[PATH_MAX] = "";
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ master = volinfo->volname;
+
+ confd = dict_new ();
+ if (!confd) {
+ gf_log ("", GF_LOG_ERROR, "Not able to create dict.");
+ goto out;
+ }
+
+ ret = glusterd_gsync_get_config (master, slave, conf_path,
+ confd);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get configuration data "
+ "for %s(master), %s(slave)", master, slave);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_param (confd, "state_file", &statefile);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get state_file's name "
+ "for %s(master), %s(slave). Please check gsync "
+ "config file.", master, slave);
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_gsync_read_frm_status (statefile, monitor_status,
+ sizeof (monitor_status));
+ if (ret <= 0) {
+ gf_log ("", GF_LOG_ERROR, "Unable to read the status "
+ "file for %s(master), %s(slave)", master, slave);
+ strncpy (monitor_status, "defunct", sizeof (monitor_status));
+ }
+
+ if ((!strcmp(monitor_status, "Stopped")) ||
+ (!strcmp(monitor_status, "Not Started"))) {
+ *is_active = 0;
+ } else {
+ *is_active = 1;
+ }
+ ret = 0;
+out:
+ if (confd)
+ dict_destroy (confd);
+ return ret;
+}
+
+/*
+ * _get_slave_status:
+ * Called for each slave in the volume from dict_foreach.
+ * It calls is_geo_rep_active to get the monitor status.
+ *
+ * RETURN VALUE:
+ * 0: On successful read of state_file from is_geo_rep_active.
+ * When it is found geo-rep is already active from previous calls.
+ * When there is no slave.
+ * -1: On error.
+ */
+
+int
+_get_slave_status (dict_t *dict, char *key, data_t *value, void *data)
+{
+ gsync_status_param_t *param = NULL;
+ char *slave = NULL;
+ char *slave_ip = NULL;
+ char *slave_vol = NULL;
+ char *errmsg = NULL;
+ char conf_path[PATH_MAX] = "";
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ param = (gsync_status_param_t *)data;
+
+ GF_ASSERT (param);
+ GF_ASSERT (param->volinfo);
+
+ if (param->is_active) {
+ ret = 0;
+ goto out;
+ }
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ if (this)
+ priv = this->private;
+ if (priv == NULL) {
+ gf_log ("", GF_LOG_ERROR, "priv of glusterd not present");
+ goto out;
+ }
+
+ slave = strchr(value->data, ':');
+ if (!slave) {
+ ret = 0;
+ goto out;
+ }
+ slave++;
+
+ ret = glusterd_get_slave_info (slave, &slave_ip, &slave_vol, &errmsg);
+ if (ret) {
+ if (errmsg)
+ gf_log ("", GF_LOG_ERROR, "Unable to fetch "
+ "slave details. Error: %s", errmsg);
+ else
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to fetch slave details.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = snprintf (conf_path, sizeof(conf_path) - 1,
+ "%s/"GEOREP"/%s_%s_%s/gsyncd.conf",
+ priv->workdir, param->volinfo->volname,
+ slave_ip, slave_vol);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "Unable to assign conf_path.");
+ ret = -1;
+ goto out;
+ }
+ conf_path[ret] = '\0';
+
+ ret = is_geo_rep_active (param->volinfo,slave, conf_path,
+ &param->is_active);
+out:
+ GF_FREE(errmsg);
+ return ret;
+}
+
static int
glusterd_op_verify_gsync_running (glusterd_volinfo_t *volinfo,
char *slave, char *conf_path,
@@ -1342,11 +1546,11 @@ glusterd_verify_gsync_status_opts (dict_t *dict, char **op_errstr)
}
ret = glusterd_get_slave_details_confpath (volinfo, dict, &slave_ip,
- &slave_vol, &conf_path);
+ &slave_vol, &conf_path,
+ op_errstr);
if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to fetch slave or confpath details.");
- gf_log ("", GF_LOG_ERROR, "%s", errmsg);
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to fetch slave or confpath details.");
ret = -1;
goto out;
}
@@ -1354,7 +1558,6 @@ glusterd_verify_gsync_status_opts (dict_t *dict, char **op_errstr)
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
-
}
@@ -1408,6 +1611,22 @@ glusterd_op_stage_sys_exec (dict_t *dict, char **op_errstr)
char command_path[PATH_MAX] = "";
struct stat st = {0,};
int ret = -1;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ if (conf->op_version < 2) {
+ gf_log ("", GF_LOG_ERROR, "Op Version not supported.");
+ snprintf (errmsg, sizeof(errmsg), "One or more nodes do not"
+ " support the required op version.");
+ *op_errstr = gf_strdup (errmsg);
+ ret = -1;
+ goto out;
+ }
ret = dict_get_str (dict, "command", &command);
if (ret) {
@@ -1466,6 +1685,15 @@ glusterd_op_stage_copy_file (dict_t *dict, char **op_errstr)
goto out;
}
+ if (priv->op_version < 2) {
+ gf_log ("", GF_LOG_ERROR, "Op Version not supported.");
+ snprintf (errmsg, sizeof(errmsg), "One or more nodes do not"
+ " support the required op version.");
+ *op_errstr = gf_strdup (errmsg);
+ ret = -1;
+ goto out;
+ }
+
ret = dict_get_str (dict, "host-uuid", &host_uuid);
if (ret < 0) {
gf_log ("", GF_LOG_ERROR, "Unable to fetch"
@@ -1509,7 +1737,7 @@ out:
return ret;
}
-static int
+int
glusterd_get_statefile_name (glusterd_volinfo_t *volinfo, char *slave,
char *conf_path, char **statefile)
{
@@ -1582,7 +1810,7 @@ glusterd_get_statefile_name (glusterd_volinfo_t *volinfo, char *slave,
}
static int
-glusterd_create_status_file (char *master, char *slave,
+glusterd_create_status_file (char *master, char *slave, char *slave_ip,
char *slave_vol, char *status)
{
int ret = -1;
@@ -1605,8 +1833,8 @@ glusterd_create_status_file (char *master, char *slave,
runinit (&runner);
runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "--create",
status, "-c", NULL);
- runner_argprintf (&runner, "%s/"GEOREP"/%s-%s/gsyncd.conf",
- priv->workdir, master, slave_vol);
+ runner_argprintf (&runner, "%s/"GEOREP"/%s_%s_%s/gsyncd.conf",
+ priv->workdir, master, slave_ip, slave_vol);
runner_argprintf (&runner, ":%s", master);
runner_add_args (&runner, slave, NULL);
synclock_unlock (&priv->big_lock);
@@ -1626,13 +1854,15 @@ out:
static int
glusterd_verify_slave (char *volname, char *slave_ip, char *slave,
- char **op_errstr)
+ char **op_errstr, gf_boolean_t *is_force_blocker)
{
- int32_t ret = -1;
- runner_t runner = {0,};
- glusterd_conf_t *priv = NULL;
- char log_file_path[PATH_MAX] = "";
- char buf[PATH_MAX] = "";
+ int32_t ret = -1;
+ runner_t runner = {0,};
+ char log_file_path[PATH_MAX] = "";
+ char buf[PATH_MAX] = "";
+ char *tmp = NULL;
+ char *save_ptr = NULL;
+ glusterd_conf_t *priv = NULL;
GF_ASSERT (volname);
GF_ASSERT (slave_ip);
@@ -1662,12 +1892,31 @@ glusterd_verify_slave (char *volname, char *slave_ip, char *slave,
gf_log ("", GF_LOG_ERROR, "Not a valid slave");
ret = glusterd_gsync_read_frm_status (log_file_path,
buf, sizeof(buf));
- if (ret) {
+ if (ret <= 0) {
gf_log ("", GF_LOG_ERROR, "Unable to read from %s",
log_file_path);
goto out;
}
- *op_errstr = gf_strdup (buf);
+
+ /* Tokenize the error message from gverify.sh to figure out
+ * if the error is a force blocker or not. */
+ tmp = strtok_r (buf, "|", &save_ptr);
+ if (!strcmp (tmp, "FORCE_BLOCKER"))
+ *is_force_blocker = 1;
+ else {
+ /* No FORCE_BLOCKER flag present so all that is
+ * present is the error message. */
+ *is_force_blocker = 0;
+ if (tmp)
+ *op_errstr = gf_strdup (tmp);
+ ret = -1;
+ goto out;
+ }
+
+ /* Copy rest of the error message to op_errstr */
+ tmp = strtok_r (NULL, "|", &save_ptr);
+ if (tmp)
+ *op_errstr = gf_strdup (tmp);
ret = -1;
goto out;
}
@@ -1679,6 +1928,72 @@ out:
}
int
+glusterd_mountbroker_check (char **slave_ip, char **op_errstr)
+{
+ int ret = -1;
+ char *tmp = NULL;
+ char *save_ptr = NULL;
+ char *username = NULL;
+ char *host = NULL;
+ char errmsg[PATH_MAX] = "";
+
+ GF_ASSERT (slave_ip);
+ GF_ASSERT (*slave_ip);
+
+ /* Checking if hostname has user specified */
+ host = strstr (*slave_ip, "@");
+ if (!host) {
+ gf_log ("", GF_LOG_DEBUG, "No username provided.");
+ ret = 0;
+ goto out;
+ } else {
+ /* Moving the host past the '@' and checking if the
+ * actual hostname also has '@' */
+ host++;
+ if (strstr (host, "@")) {
+ gf_log ("", GF_LOG_DEBUG, "host = %s", host);
+ ret = snprintf (errmsg, sizeof(errmsg) - 1,
+ "Invalid Hostname (%s).", host);
+ errmsg[ret] = '\0';
+ gf_log ("", GF_LOG_ERROR, "%s", errmsg);
+ ret = -1;
+ if (op_errstr)
+ *op_errstr = gf_strdup (errmsg);
+ goto out;
+ }
+
+ /* Fetching the username and hostname
+ * and checking if the username is non-root */
+ username = strtok_r (*slave_ip, "@", &save_ptr);
+ tmp = strtok_r (NULL, "@", &save_ptr);
+ if (strcmp (username, "root")) {
+ ret = snprintf (errmsg, sizeof(errmsg) - 1,
+ "Non-root username (%s@%s) not allowed.",
+ username, tmp);
+ errmsg[ret] = '\0';
+ if (op_errstr)
+ *op_errstr = gf_strdup (errmsg);
+ gf_log ("", GF_LOG_ERROR,
+ "Non-Root username not allowed.");
+ ret = -1;
+ goto out;
+ }
+
+ *slave_ip = gf_strdup (tmp);
+ if (!*slave_ip) {
+ gf_log ("", GF_LOG_ERROR, "Out of memory");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int
glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
{
char *down_peerstr = NULL;
@@ -1691,10 +2006,12 @@ glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
char *conf_path = NULL;
char errmsg[PATH_MAX] = "";
char common_pem_file[PATH_MAX] = "";
+ char hook_script[PATH_MAX] = "";
char uuid_str [64] = "";
int ret = -1;
int is_pem_push = -1;
gf_boolean_t is_force = -1;
+ gf_boolean_t is_force_blocker = -1;
gf_boolean_t exists = _gf_false;
glusterd_conf_t *conf = NULL;
glusterd_volinfo_t *volinfo = NULL;
@@ -1714,6 +2031,15 @@ glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
return -1;
}
+ if (conf->op_version < 2) {
+ gf_log ("", GF_LOG_ERROR, "Op Version not supported.");
+ snprintf (errmsg, sizeof(errmsg), "One or more nodes do not"
+ " support the required op version.");
+ *op_errstr = gf_strdup (errmsg);
+ ret = -1;
+ goto out;
+ }
+
exists = glusterd_check_volume_exists (volname);
ret = glusterd_volinfo_find (volname, &volinfo);
if ((ret) || (!exists)) {
@@ -1726,11 +2052,11 @@ glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
}
ret = glusterd_get_slave_details_confpath (volinfo, dict, &slave_ip,
- &slave_vol, &conf_path);
+ &slave_vol, &conf_path,
+ op_errstr);
if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to fetch slave or confpath details.");
- gf_log ("", GF_LOG_ERROR, "%s", errmsg);
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to fetch slave or confpath details.");
ret = -1;
goto out;
}
@@ -1766,19 +2092,25 @@ glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
volinfo->volname, slave);
}
+ /* Checking if slave host is pingable, has proper passwordless
+ * ssh login setup, slave volume is created, slave vol is empty,
+ * and if it has enough memory and bypass in case of force if
+ * the error is not a force blocker */
ret = glusterd_verify_slave (volname, slave_ip, slave_vol,
- op_errstr);
- if ((ret) && !is_force) {
- gf_log ("", GF_LOG_ERROR,
- "%s is not a valid slave volume. Error: %s",
- slave, *op_errstr);
- ret = -1;
- goto out;
- } else if (ret)
- gf_log ("", GF_LOG_INFO, "%s is not a valid slave"
- " volume. Error: %s. Force creating geo-rep"
- " session.", slave, *op_errstr);
-
+ op_errstr, &is_force_blocker);
+ if (ret) {
+ if (is_force && !is_force_blocker) {
+ gf_log ("", GF_LOG_INFO, "%s is not a valid slave"
+ " volume. Error: %s. Force creating geo-rep"
+ " session.", slave, *op_errstr);
+ } else {
+ gf_log ("", GF_LOG_ERROR,
+ "%s is not a valid slave volume. Error: %s",
+ slave, *op_errstr);
+ ret = -1;
+ goto out;
+ }
+ }
ret = dict_get_int32 (dict, "push_pem", &is_pem_push);
if (!ret && is_pem_push) {
@@ -1788,6 +2120,11 @@ glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
conf->workdir);
common_pem_file[ret] = '\0';
+ ret = snprintf (hook_script, sizeof(hook_script) - 1,
+ "%s"GLUSTERD_CREATE_HOOK_SCRIPT,
+ conf->workdir);
+ hook_script[ret] = '\0';
+
ret = lstat (common_pem_file, &stbuf);
if (ret) {
snprintf (errmsg, sizeof (errmsg), "%s"
@@ -1801,6 +2138,19 @@ glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
goto out;
}
+ ret = lstat (hook_script, &stbuf);
+ if (ret) {
+ snprintf (errmsg, sizeof (errmsg),
+ "The hook-script (%s) required "
+ "for push-pem is not present. "
+ "Please install the hook-script "
+ "and retry", hook_script);
+ gf_log ("", GF_LOG_ERROR, "%s", errmsg);
+ *op_errstr = gf_strdup (errmsg);
+ ret = -1;
+ goto out;
+ }
+
if (!S_ISREG(stbuf.st_mode)) {
snprintf (errmsg, sizeof (errmsg), "%s"
" required for push-pem is"
@@ -1823,6 +2173,7 @@ glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
snprintf (errmsg, sizeof (errmsg), "Please check gsync "
"config file. Unable to get statefile's name");
gf_log ("", GF_LOG_ERROR, "%s", errmsg);
+ ret = -1;
goto out;
}
@@ -1881,6 +2232,7 @@ glusterd_op_stage_gsync_set (dict_t *dict, char **op_errstr)
char errmsg[PATH_MAX] = {0,};
dict_t *ctx = NULL;
gf_boolean_t is_force = 0;
+ gf_boolean_t is_force_blocker = -1;
gf_boolean_t is_running = _gf_false;
uuid_t uuid = {0};
char uuid_str [64] = {0};
@@ -1907,10 +2259,21 @@ glusterd_op_stage_gsync_set (dict_t *dict, char **op_errstr)
}
ret = glusterd_op_gsync_args_get (dict, op_errstr,
- &volname, &slave, NULL);
+ &volname, &slave, &host_uuid);
if (ret)
goto out;
+ uuid_utoa_r (MY_UUID, uuid_str);
+
+ if (conf->op_version < 2) {
+ gf_log ("", GF_LOG_ERROR, "Op Version not supported.");
+ snprintf (errmsg, sizeof(errmsg), "One or more nodes do not"
+ " support the required op version.");
+ *op_errstr = gf_strdup (errmsg);
+ ret = -1;
+ goto out;
+ }
+
exists = glusterd_check_volume_exists (volname);
ret = glusterd_volinfo_find (volname, &volinfo);
if ((ret) || (!exists)) {
@@ -1923,17 +2286,28 @@ glusterd_op_stage_gsync_set (dict_t *dict, char **op_errstr)
}
ret = glusterd_get_slave_details_confpath (volinfo, dict, &slave_ip,
- &slave_vol, &conf_path);
+ &slave_vol, &conf_path,
+ op_errstr);
if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to fetch slave or confpath details.");
- gf_log ("", GF_LOG_ERROR, "%s", errmsg);
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to fetch slave or confpath details.");
ret = -1;
goto out;
}
ret = glusterd_get_statefile_name (volinfo, slave, conf_path, &statefile);
if (ret) {
+ /* Checking if slave host is pingable, has proper passwordless
+ * ssh login setup */
+ ret = glusterd_verify_slave (volname, slave_ip, slave_vol,
+ op_errstr, &is_force_blocker);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "%s is not a valid slave volume. Error: %s",
+ slave, *op_errstr);
+ goto out;
+ }
+
if (!strstr(slave, "::"))
snprintf (errmsg, sizeof (errmsg),
"%s is not a valid slave url.", slave);
@@ -1952,7 +2326,13 @@ glusterd_op_stage_gsync_set (dict_t *dict, char **op_errstr)
goto out;
}
+ is_force = dict_get_str_boolean (dict, "force", _gf_false);
+
+ /* Allowing stop force to bypass the statefile check
+ * as this command acts as a fail safe method to stop geo-rep
+ * session. */
if ((type == GF_GSYNC_OPTION_TYPE_CONFIG) ||
+ ((type == GF_GSYNC_OPTION_TYPE_STOP) && !is_force) ||
(type == GF_GSYNC_OPTION_TYPE_DELETE)) {
ret = lstat (statefile, &stbuf);
if (ret) {
@@ -1967,16 +2347,9 @@ glusterd_op_stage_gsync_set (dict_t *dict, char **op_errstr)
}
}
- is_force = dict_get_str_boolean (dict, "force", _gf_false);
-
/* Check if all peers that are a part of the volume are up or not */
if ((type == GF_GSYNC_OPTION_TYPE_DELETE) ||
((type == GF_GSYNC_OPTION_TYPE_STOP) && !is_force)) {
- ret = dict_get_str (dict, "host-uuid", &host_uuid);
- if (ret < 0)
- goto out;
-
- uuid_utoa_r (MY_UUID, uuid_str);
if (!strcmp (uuid_str, host_uuid)) {
ret = glusterd_are_vol_all_peers_up (volinfo,
&conf->peers,
@@ -2158,10 +2531,89 @@ out:
return ret;
}
-static int
-glusterd_check_restart_gsync_session (glusterd_volinfo_t *volinfo, char *slave,
- dict_t *resp_dict, char *path_list,
- char *conf_path, gf_boolean_t is_force);
+/*
+ * glusterd_gsync_op_already_set:
+ * This funcion checks whether the op_value is same as in the
+ * gsyncd.conf file.
+ *
+ * RETURN VALUE:
+ * 0 : op_value matches the conf file.
+ * 1 : op_value does not matches the conf file or op_param not
+ * found in conf file.
+ * -1 : error
+ */
+
+int
+glusterd_gsync_op_already_set (char* master, char* slave, char* conf_path,
+ char* op_name, char* op_value)
+{
+ dict_t *confd = NULL;
+ char *op_val_buf = NULL;
+ int32_t op_val_conf = 0;
+ int32_t op_val_cli = 0;
+ int32_t ret = -1;
+ gf_boolean_t is_bool = _gf_true;
+
+ confd = dict_new ();
+ if (!confd) {
+ gf_log ("", GF_LOG_ERROR, "Not able to create dict.");
+ return -1;
+ }
+
+ ret = glusterd_gsync_get_config (master, slave, conf_path,
+ confd);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get configuration data"
+ "for %s(master), %s(slave)", master, slave);
+ goto out;
+ }
+
+ ret = dict_get_param (confd, op_name, &op_val_buf);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get op_value "
+ "for %s(master), %s(slave). Please check gsync "
+ "config file.", master, slave);
+ ret = 1;
+ goto out;
+ }
+
+ gf_log("",GF_LOG_DEBUG, "val_cli:%s val_conf:%s",op_value,op_val_buf);
+
+ if (!strcmp(op_val_buf,"true") || !strcmp(op_val_buf,"1")
+ || !strcmp(op_val_buf,"yes")) {
+ op_val_conf = 1;
+ } else if(!strcmp(op_val_buf,"false") || !strcmp(op_val_buf,"0")
+ || !strcmp(op_val_buf,"no")) {
+ op_val_conf = 0;
+ } else {
+ is_bool = _gf_false;
+ }
+
+ if (is_bool) {
+ if (!strcmp(op_value,"true") || !strcmp(op_value,"1")
+ || !strcmp(op_value,"yes")) {
+ op_val_cli = 1;
+ } else {
+ op_val_cli = 0;
+ }
+
+ if ( op_val_cli == op_val_conf ) {
+ ret = 0;
+ goto out;
+ }
+ } else {
+ if (!strcmp(op_val_buf,op_value)) {
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ret = 1;
+
+out:
+ dict_unref(confd);
+ return ret;
+}
static int
glusterd_gsync_configure (glusterd_volinfo_t *volinfo, char *slave,
@@ -2176,8 +2628,12 @@ glusterd_gsync_configure (glusterd_volinfo_t *volinfo, char *slave,
char *subop = NULL;
char *master = NULL;
char *conf_path = NULL;
+ char *slave_ip = NULL;
char *slave_vol = NULL;
struct stat stbuf = {0, };
+ gf_boolean_t restart_required = _gf_true;
+ char **resopt = NULL;
+ gf_boolean_t op_already_set = _gf_false;
GF_ASSERT (slave);
GF_ASSERT (op_errstr);
@@ -2232,6 +2688,24 @@ glusterd_gsync_configure (glusterd_volinfo_t *volinfo, char *slave,
runner_add_arg (&runner, op_name);
if (op_value)
runner_add_arg (&runner, op_value);
+
+ if ( strcmp(op_name,"checkpoint") != 0 ) {
+ ret = glusterd_gsync_op_already_set(master,slave,conf_path,
+ op_name,op_value);
+ if (ret == -1) {
+ gf_log ("", GF_LOG_WARNING,
+ "glusterd_gsync_op_already_set failed.");
+ gf_asprintf (op_errstr, GEOREP" config-%s failed for "
+ "%s %s", subop, master, slave);
+ goto out;
+ }
+ if (ret == 0) {
+ gf_log("", GF_LOG_DEBUG, "op_value is already set");
+ op_already_set = _gf_true;
+ goto out;
+ }
+ }
+
synclock_unlock (&priv->big_lock);
ret = runner_run (&runner);
synclock_lock (&priv->big_lock);
@@ -2246,10 +2720,17 @@ glusterd_gsync_configure (glusterd_volinfo_t *volinfo, char *slave,
goto out;
}
- if (!strcmp (op_name, "state_file")) {
+ if ((!strcmp (op_name, "state_file")) && (op_value)) {
ret = lstat (op_value, &stbuf);
if (ret) {
+ ret = dict_get_str (dict, "slave_ip", &slave_ip);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to fetch slave IP.");
+ goto out;
+ }
+
ret = dict_get_str (dict, "slave_vol", &slave_vol);
if (ret) {
gf_log ("", GF_LOG_ERROR,
@@ -2258,7 +2739,7 @@ glusterd_gsync_configure (glusterd_volinfo_t *volinfo, char *slave,
}
ret = glusterd_create_status_file (volinfo->volname, slave,
- slave_vol,
+ slave_ip, slave_vol,
"Switching Status File");
if (ret || lstat (op_value, &stbuf)) {
gf_log ("", GF_LOG_ERROR, "Unable to create %s"
@@ -2274,19 +2755,29 @@ glusterd_gsync_configure (glusterd_volinfo_t *volinfo, char *slave,
gf_asprintf (op_errstr, "config-%s successful", subop);
out:
- if (!ret && volinfo) {
+ if (!ret && volinfo && !op_already_set) {
+ for (resopt = gsync_no_restart_opts; *resopt; resopt++) {
+ restart_required = _gf_true;
+ if (!strcmp ((*resopt), op_name)){
+ restart_required = _gf_false;
+ break;
+ }
+ }
+
+ if (restart_required) {
ret = glusterd_check_restart_gsync_session (volinfo, slave,
resp_dict, path_list,
conf_path, 0);
if (ret)
- *op_errstr = gf_strdup ("internal error");
+ *op_errstr = gf_strdup ("internal error");
+ }
}
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
-static int
+int
glusterd_gsync_read_frm_status (char *path, char *buf, size_t blen)
{
int ret = 0;
@@ -2310,7 +2801,6 @@ glusterd_gsync_read_frm_status (char *path, char *buf, size_t blen)
char *p = buf + len - 1;
while (isspace (*p))
*p-- = '\0';
- ret = 0;
}
} else if (ret < 0)
gf_log ("", GF_LOG_ERROR, "Status file of gsyncd is corrupt");
@@ -2320,20 +2810,146 @@ glusterd_gsync_read_frm_status (char *path, char *buf, size_t blen)
}
static int
-glusterd_gsync_fetch_status_extra (char *path, char *buf, size_t blen)
+dict_get_param (dict_t *dict, char *key, char **param)
+{
+ char *dk = NULL;
+ char *s = NULL;
+ char x = '\0';
+ int ret = 0;
+
+ if (dict_get_str (dict, key, param) == 0)
+ return 0;
+
+ dk = gf_strdup (key);
+ if (!key)
+ return -1;
+
+ s = strpbrk (dk, "-_");
+ if (!s)
+ return -1;
+ x = (*s == '-') ? '_' : '-';
+ *s++ = x;
+ while ((s = strpbrk (s, "-_")))
+ *s++ = x;
+
+ ret = dict_get_str (dict, dk, param);
+
+ GF_FREE (dk);
+ return ret;
+}
+
+static int
+glusterd_parse_gsync_status (char *buf, gf_gsync_status_t *sts_val)
+{
+ int ret = -1;
+ int i = -1;
+ int num_of_fields = 8;
+ char *token = NULL;
+ char **tokens = NULL;
+ char **ptr = NULL;
+ char *save_ptr = NULL;
+ char na_buf[] = "N/A";
+
+ if (!buf) {
+ gf_log ("", GF_LOG_ERROR, "Empty buf");
+ goto out;
+ }
+
+ tokens = calloc (num_of_fields, sizeof (char *));
+ if (!tokens) {
+ gf_log ("", GF_LOG_ERROR, "Out of memory");
+ goto out;
+ }
+
+ ptr = tokens;
+
+ for (token = strtok_r (buf, ",", &save_ptr); token;
+ token = strtok_r (NULL, ",", &save_ptr)) {
+ *ptr = gf_strdup(token);
+ if (!*ptr) {
+ gf_log ("", GF_LOG_ERROR, "Out of memory");
+ goto out;
+ }
+ ptr++;
+ }
+
+ for (i = 0; i < num_of_fields; i++) {
+ token = strtok_r (tokens[i], ":", &save_ptr);
+ token = strtok_r (NULL, "\0", &save_ptr);
+ token++;
+
+ /* token NULL check */
+ if (!token && (i != 0) &&
+ (i != 5) && (i != 7))
+ token = na_buf;
+
+ if (i == 0) {
+ if (!token)
+ token = na_buf;
+ else {
+ token++;
+ if (!token)
+ token = na_buf;
+ else
+ token[strlen(token) - 1] = '\0';
+ }
+ memcpy (sts_val->slave_node, token, strlen(token));
+ }
+ if (i == 1)
+ memcpy (sts_val->files_syncd, token, strlen(token));
+ if (i == 2)
+ memcpy (sts_val->purges_remaining, token, strlen(token));
+ if (i == 3)
+ memcpy (sts_val->total_files_skipped, token, strlen(token));
+ if (i == 4)
+ memcpy (sts_val->files_remaining, token, strlen(token));
+ if (i == 5) {
+ if (!token)
+ token = na_buf;
+ else {
+ token++;
+ if (!token)
+ token = na_buf;
+ else
+ token[strlen(token) - 1] = '\0';
+ }
+ memcpy (sts_val->worker_status, token, strlen(token));
+ }
+ if (i == 6)
+ memcpy (sts_val->bytes_remaining, token, strlen(token));
+ if (i == 7) {
+ if (!token)
+ token = na_buf;
+ else {
+ token++;
+ if (!token)
+ token = na_buf;
+ else
+ token[strlen(token) - 2] = '\0';
+ }
+ memcpy (sts_val->crawl_status, token, strlen(token));
+ }
+ }
+
+ ret = 0;
+out:
+ for (i = 0; i< num_of_fields; i++)
+ if (tokens[i])
+ GF_FREE(tokens[i]);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_gsync_fetch_status_extra (char *path, gf_gsync_status_t *sts_val)
{
char sockpath[PATH_MAX] = {0,};
struct sockaddr_un sa = {0,};
- size_t l = 0;
int s = -1;
struct pollfd pfd = {0,};
int ret = 0;
- l = strlen (buf);
- /* seek to end of data in buf */
- buf += l;
- blen -= l;
-
glusterd_set_socket_filepath (path, sockpath, sizeof (sockpath));
strncpy(sa.sun_path, sockpath, sizeof(sa.sun_path));
@@ -2361,66 +2977,40 @@ glusterd_gsync_fetch_status_extra (char *path, char *buf, size_t blen)
ret = -1;
goto out;
}
- ret = read(s, buf, blen);
+ ret = read(s, sts_val->checkpoint_status,
+ sizeof(sts_val->checkpoint_status));
/* we expect a terminating 0 byte */
- if (ret == 0 || (ret > 0 && buf[ret - 1]))
+ if (ret == 0 || (ret > 0 && sts_val->checkpoint_status[ret - 1]))
ret = -1;
- if (ret > 0)
+ if (ret > 0) {
ret = 0;
+ }
- out:
+out:
close (s);
return ret;
}
-static int
-dict_get_param (dict_t *dict, char *key, char **param)
-{
- char *dk = NULL;
- char *s = NULL;
- char x = '\0';
- int ret = 0;
-
- if (dict_get_str (dict, key, param) == 0)
- return 0;
-
- dk = gf_strdup (key);
- if (!key)
- return -1;
-
- s = strpbrk (dk, "-_");
- if (!s)
- return -1;
- x = (*s == '-') ? '_' : '-';
- *s++ = x;
- while ((s = strpbrk (s, "-_")))
- *s++ = x;
-
- ret = dict_get_str (dict, dk, param);
-
- GF_FREE (dk);
- return ret;
-}
-
-static int
+int
glusterd_read_status_file (glusterd_volinfo_t *volinfo, char *slave,
char *conf_path, dict_t *dict, char *node)
{
- glusterd_conf_t *priv = NULL;
- int ret = 0;
- char *statefile = NULL;
- char *master = NULL;
- char buf[1024] = "defunct";
- char nds[1024] = {0, };
- char mst[1024] = {0, };
- char slv[1024] = {0, };
- char sts[1024] = {0, };
- char *bufp = NULL;
- dict_t *confd = NULL;
- int gsync_count = 0;
- int status = 0;
- char *dyn_node = NULL;
- char *path_list = NULL;
+ char brick_state_file[PATH_MAX] = "";
+ char brick_path[PATH_MAX] = "";
+ char *georep_session_wrkng_dir = NULL;
+ char *master = NULL;
+ char tmp[1024] = "";
+ char sts_val_name[1024] = "";
+ char monitor_status[NAME_MAX] = "";
+ char *statefile = NULL;
+ char *socketfile = NULL;
+ dict_t *confd = NULL;
+ int gsync_count = 0;
+ int i = 0;
+ int ret = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ gf_gsync_status_t *sts_val = NULL;
+ glusterd_conf_t *priv = NULL;
GF_ASSERT (THIS);
GF_ASSERT (THIS->private);
@@ -2441,7 +3031,7 @@ glusterd_read_status_file (glusterd_volinfo_t *volinfo, char *slave,
if (ret) {
gf_log ("", GF_LOG_ERROR, "Unable to get configuration data"
"for %s(master), %s(slave)", master, slave);
- goto done;
+ goto out;
}
@@ -2450,126 +3040,174 @@ glusterd_read_status_file (glusterd_volinfo_t *volinfo, char *slave,
gf_log ("", GF_LOG_ERROR, "Unable to get state_file's name "
"for %s(master), %s(slave). Please check gsync "
"config file.", master, slave);
- goto done;
+ goto out;
}
- ret = glusterd_gsync_read_frm_status (statefile, buf, sizeof (buf));
- if (ret) {
+
+ ret = glusterd_gsync_read_frm_status (statefile, monitor_status,
+ sizeof (monitor_status));
+ if (ret <= 0) {
gf_log ("", GF_LOG_ERROR, "Unable to read the status"
"file for %s(master), %s(slave)", master, slave);
- strncpy (buf, "defunct", sizeof (buf));
- goto done;
- }
-
- ret = gsync_status (master, slave, conf_path, &status);
- if (ret == 0 && status == -1) {
- if ((strcmp (buf, "Not Started")) &&
- (strcmp (buf, "Stopped")))
- strncpy (buf, "defunct", sizeof (buf));
- goto done;
- } else if (ret == -1) {
- gf_log ("", GF_LOG_ERROR, "Unable to get gsync status");
- goto done;
+ strncpy (monitor_status, "defunct", sizeof (monitor_status));
}
- if (strcmp (buf, "Stable") != 0)
- goto done;
-
- ret = dict_get_param (confd, "state_socket_unencoded", &statefile);
+ ret = dict_get_param (confd, "georep_session_working_dir",
+ &georep_session_wrkng_dir);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get state_socket_unencoded"
- " filepath. Please check gsync config file.");
- goto done;
+ gf_log ("", GF_LOG_ERROR, "Unable to get geo-rep session's "
+ "working directory name for %s(master), %s(slave). "
+ "Please check gsync config file.", master, slave);
+ goto out;
}
- ret = glusterd_gsync_fetch_status_extra (statefile, buf, sizeof (buf));
+
+ ret = dict_get_param (confd, "state_socket_unencoded", &socketfile);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to fetch extra status"
- "for %s(master), %s(slave)", master, slave);
- /* there is a slight chance that this occurs due to race
- * -- in that case, the following options all seem bad:
- *
- * - suppress irregurlar behavior by just leaving status
- * on "OK"
- * - freak out users with a misleading "defunct"
- * - overload the meaning of the regular error signal
- * mechanism of gsyncd, that is, when status is "faulty"
- *
- * -- so we just come up with something new...
- */
- strncpy (buf, "N/A", sizeof (buf));
- goto done;
+ gf_log ("", GF_LOG_ERROR, "Unable to get socket file's name "
+ "for %s(master), %s(slave). Please check gsync "
+ "config file.", master, slave);
+ goto out;
}
- done:
- if ((!strcmp (buf, "defunct")) ||
- (!strcmp (buf, "Not Started")) ||
- (!strcmp (buf, "Stopped"))) {
- ret = glusterd_get_local_brickpaths (volinfo, &path_list);
- if (!path_list) {
- gf_log ("", GF_LOG_DEBUG, "This node not being part of"
- " volume should not be running gsyncd. Hence"
- " shouldn't display status for this node.");
- ret = 0;
+ ret = dict_get_int32 (dict, "gsync-count", &gsync_count);
+ if (ret)
+ gsync_count = 0;
+
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ if (uuid_compare (brickinfo->uuid, MY_UUID))
+ continue;
+
+ sts_val = GF_CALLOC (1, sizeof(gf_gsync_status_t),
+ gf_common_mt_gsync_status_t);
+ if (!sts_val) {
+ gf_log ("", GF_LOG_ERROR, "Out Of Memory");
goto out;
}
- }
- ret = dict_get_int32 (dict, "gsync-count", &gsync_count);
+ /* Creating the brick state file's path */
+ memset(brick_state_file, '\0', PATH_MAX);
+ memcpy (brick_path, brickinfo->path, PATH_MAX - 1);
+ for (i = 0; i < strlen(brick_path) - 1; i++)
+ if (brick_path[i] == '/')
+ brick_path[i] = '_';
+ ret = snprintf(brick_state_file, PATH_MAX - 1, "%s%s.status",
+ georep_session_wrkng_dir, brick_path);
+ brick_state_file[ret] = '\0';
+
+ gf_log ("", GF_LOG_DEBUG, "brick_state_file = %s", brick_state_file);
+
+ memset (tmp, '\0', sizeof(tmp));
+
+ ret = glusterd_gsync_read_frm_status (brick_state_file,
+ tmp, sizeof (tmp));
+ if (ret <= 0) {
+ gf_log ("", GF_LOG_ERROR, "Unable to read the status"
+ "file for %s brick for %s(master), %s(slave) "
+ "session", brickinfo->path, master, slave);
+ memcpy (sts_val->slave_node, slave, strlen(slave));
+ sts_val->slave_node[strlen(slave)] = '\0';
+ ret = snprintf (sts_val->worker_status, sizeof(sts_val->worker_status), "N/A");
+ sts_val->worker_status[ret] = '\0';
+ ret = snprintf (sts_val->checkpoint_status, sizeof(sts_val->checkpoint_status), "N/A");
+ sts_val->checkpoint_status[ret] = '\0';
+ ret = snprintf (sts_val->crawl_status, sizeof(sts_val->crawl_status), "N/A");
+ sts_val->crawl_status[ret] = '\0';
+ ret = snprintf (sts_val->files_syncd, sizeof(sts_val->files_syncd), "N/A");
+ sts_val->files_syncd[ret] = '\0';
+ ret = snprintf (sts_val->purges_remaining, sizeof(sts_val->purges_remaining), "N/A");
+ sts_val->purges_remaining[ret] = '\0';
+ ret = snprintf (sts_val->total_files_skipped, sizeof(sts_val->total_files_skipped), "N/A");
+ sts_val->total_files_skipped[ret] = '\0';
+ ret = snprintf (sts_val->files_remaining, sizeof(sts_val->files_remaining), "N/A");
+ sts_val->files_remaining[ret] = '\0';
+ ret = snprintf (sts_val->bytes_remaining, sizeof(sts_val->bytes_remaining), "N/A");
+ sts_val->bytes_remaining[ret] = '\0';
+ goto store_status;
+ }
- if (ret)
- gsync_count = 1;
- else
- gsync_count++;
+ ret = glusterd_gsync_fetch_status_extra (socketfile, sts_val);
+ if (ret || strlen(sts_val->checkpoint_status) == 0) {
+ gf_log ("", GF_LOG_DEBUG, "No checkpoint status"
+ "for %s(master), %s(slave)", master, slave);
+ ret = snprintf (sts_val->checkpoint_status, sizeof(sts_val->checkpoint_status), "N/A");
+ sts_val->checkpoint_status[ret] = '\0';
+ }
- (void) snprintf (nds, sizeof (nds), "node%d", gsync_count);
- dyn_node = gf_strdup (node);
- if (!dyn_node)
- goto out;
- ret = dict_set_dynstr (dict, nds, dyn_node);
- if (ret) {
- GF_FREE (dyn_node);
- goto out;
- }
+ ret = glusterd_parse_gsync_status (tmp, sts_val);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to parse the gsync status for %s",
+ brickinfo->path);
+ memcpy (sts_val->slave_node, slave, strlen(slave));
+ sts_val->slave_node[strlen(slave)] = '\0';
+ ret = snprintf (sts_val->worker_status, sizeof(sts_val->worker_status), "N/A");
+ sts_val->worker_status[ret] = '\0';
+ ret = snprintf (sts_val->checkpoint_status, sizeof(sts_val->checkpoint_status), "N/A");
+ sts_val->checkpoint_status[ret] = '\0';
+ ret = snprintf (sts_val->crawl_status, sizeof(sts_val->crawl_status), "N/A");
+ sts_val->crawl_status[ret] = '\0';
+ ret = snprintf (sts_val->files_syncd, sizeof(sts_val->files_syncd), "N/A");
+ sts_val->files_syncd[ret] = '\0';
+ ret = snprintf (sts_val->purges_remaining, sizeof(sts_val->purges_remaining), "N/A");
+ sts_val->purges_remaining[ret] = '\0';
+ ret = snprintf (sts_val->total_files_skipped, sizeof(sts_val->total_files_skipped), "N/A");
+ sts_val->total_files_skipped[ret] = '\0';
+ ret = snprintf (sts_val->files_remaining, sizeof(sts_val->files_remaining), "N/A");
+ sts_val->files_remaining[ret] = '\0';
+ ret = snprintf (sts_val->bytes_remaining, sizeof(sts_val->bytes_remaining), "N/A");
+ sts_val->bytes_remaining[ret] = '\0';
+ }
- snprintf (mst, sizeof (mst), "master%d", gsync_count);
- master = gf_strdup (master);
- if (!master)
- goto out;
- ret = dict_set_dynstr (dict, mst, master);
- if (ret) {
- GF_FREE (master);
- goto out;
- }
+store_status:
+ if ((strcmp (monitor_status, "Stable"))) {
+ memcpy (sts_val->worker_status, monitor_status, strlen(monitor_status));
+ sts_val->worker_status[strlen(monitor_status)] = '\0';
+ ret = snprintf (sts_val->crawl_status, sizeof(sts_val->crawl_status), "N/A");
+ sts_val->crawl_status[ret] = '\0';
+ ret = snprintf (sts_val->checkpoint_status, sizeof(sts_val->checkpoint_status), "N/A");
+ sts_val->checkpoint_status[ret] = '\0';
+ }
- snprintf (slv, sizeof (slv), "slave%d", gsync_count);
- slave = gf_strdup (slave);
- if (!slave)
- goto out;
- ret = dict_set_dynstr (dict, slv, slave);
- if (ret) {
- GF_FREE (slave);
- goto out;
- }
+ if (strcmp (sts_val->worker_status, "Active")) {
+ ret = snprintf (sts_val->checkpoint_status, sizeof(sts_val->checkpoint_status), "N/A");
+ sts_val->checkpoint_status[ret] = '\0';
+ ret = snprintf (sts_val->crawl_status, sizeof(sts_val->crawl_status), "N/A");
+ sts_val->crawl_status[ret] = '\0';
+ }
- snprintf (sts, sizeof (slv), "status%d", gsync_count);
- bufp = gf_strdup (buf);
- if (!bufp)
- goto out;
- ret = dict_set_dynstr (dict, sts, bufp);
- if (ret) {
- GF_FREE (bufp);
- goto out;
+ if (!strcmp (sts_val->slave_node, "N/A")) {
+ memcpy (sts_val->slave_node, slave, strlen(slave));
+ sts_val->slave_node[strlen(slave)] = '\0';
+ }
+
+ memcpy (sts_val->node, node, strlen(node));
+ sts_val->node[strlen(node)] = '\0';
+ memcpy (sts_val->brick, brickinfo->path, strlen(brickinfo->path));
+ sts_val->brick[strlen(brickinfo->path)] = '\0';
+ memcpy (sts_val->master, master, strlen(master));
+ sts_val->master[strlen(master)] = '\0';
+
+ snprintf (sts_val_name, sizeof (sts_val_name), "status_value%d", gsync_count);
+ ret = dict_set_bin (dict, sts_val_name, sts_val, sizeof(gf_gsync_status_t));
+ if (ret) {
+ GF_FREE (sts_val);
+ goto out;
+ }
+
+ gsync_count++;
+ sts_val = NULL;
}
+
ret = dict_set_int32 (dict, "gsync-count", gsync_count);
if (ret)
goto out;
- out:
+out:
dict_destroy (confd);
return 0;
}
-static int
+int
glusterd_check_restart_gsync_session (glusterd_volinfo_t *volinfo, char *slave,
dict_t *resp_dict, char *path_list,
char *conf_path, gf_boolean_t is_force)
@@ -2679,6 +3317,15 @@ glusterd_set_gsync_confs (glusterd_volinfo_t *volinfo)
if (ret)
goto out;
+ /**
+ * enable ignore-pid-check blindly as it could be needed for
+ * cascading setups.
+ */
+ ret = glusterd_set_gsync_knob (volinfo, VKEY_MARKER_XTIME_FORCE,
+ &volfile_changed);
+ if (ret)
+ goto out;
+
ret = glusterd_set_gsync_knob (volinfo,
VKEY_CHANGELOG, &volfile_changed);
if (ret)
@@ -2846,7 +3493,7 @@ glusterd_get_gsync_status (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
}
static int
-glusterd_gsync_delete (glusterd_volinfo_t *volinfo, char *slave,
+glusterd_gsync_delete (glusterd_volinfo_t *volinfo, char *slave, char *slave_ip,
char *slave_vol, char *path_list, dict_t *dict,
dict_t *resp_dict, char **op_errstr)
{
@@ -2859,6 +3506,7 @@ glusterd_gsync_delete (glusterd_volinfo_t *volinfo, char *slave,
char *conf_path = NULL;
GF_ASSERT (slave);
+ GF_ASSERT (slave_ip);
GF_ASSERT (slave_vol);
GF_ASSERT (op_errstr);
GF_ASSERT (dict);
@@ -2908,8 +3556,8 @@ glusterd_gsync_delete (glusterd_volinfo_t *volinfo, char *slave,
}
ret = snprintf (geo_rep_dir, sizeof(geo_rep_dir) - 1,
- "%s/"GEOREP"/%s-%s", gl_workdir,
- volinfo->volname, slave_vol);
+ "%s/"GEOREP"/%s_%s_%s", gl_workdir,
+ volinfo->volname, slave_ip, slave_vol);
geo_rep_dir[ret] = '\0';
ret = rmdir (geo_rep_dir);
@@ -3016,30 +3664,32 @@ glusterd_op_sys_exec (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
- ptr = fgets(buf, sizeof(buf), runner_chio (&runner, STDOUT_FILENO));
- if (ptr) {
- ret = dict_get_int32 (rsp_dict, "output_count", &output_count);
- if (ret)
- output_count = 1;
- else
- output_count++;
- memset (output_name, '\0', sizeof (output_name));
- snprintf (output_name, sizeof (output_name),
- "output_%d", output_count);
- if (buf[strlen(buf) - 1] == '\n')
- buf[strlen(buf) - 1] = '\0';
- bufp = gf_strdup (buf);
- if (!bufp)
- gf_log ("", GF_LOG_ERROR, "gf_strdup failed.");
- ret = dict_set_dynstr (rsp_dict, output_name, bufp);
- if (ret) {
- GF_FREE (bufp);
- gf_log ("", GF_LOG_ERROR, "output set failed.");
+ do {
+ ptr = fgets(buf, sizeof(buf), runner_chio (&runner, STDOUT_FILENO));
+ if (ptr) {
+ ret = dict_get_int32 (rsp_dict, "output_count", &output_count);
+ if (ret)
+ output_count = 1;
+ else
+ output_count++;
+ memset (output_name, '\0', sizeof (output_name));
+ snprintf (output_name, sizeof (output_name),
+ "output_%d", output_count);
+ if (buf[strlen(buf) - 1] == '\n')
+ buf[strlen(buf) - 1] = '\0';
+ bufp = gf_strdup (buf);
+ if (!bufp)
+ gf_log ("", GF_LOG_ERROR, "gf_strdup failed.");
+ ret = dict_set_dynstr (rsp_dict, output_name, bufp);
+ if (ret) {
+ GF_FREE (bufp);
+ gf_log ("", GF_LOG_ERROR, "output set failed.");
+ }
+ ret = dict_set_int32 (rsp_dict, "output_count", output_count);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR, "output_count set failed.");
}
- ret = dict_set_int32 (rsp_dict, "output_count", output_count);
- if (ret)
- gf_log ("", GF_LOG_ERROR, "output_count set failed.");
- }
+ } while (ptr);
ret = runner_end (&runner);
if (ret) {
@@ -3250,10 +3900,9 @@ glusterd_op_gsync_set (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
int32_t ret = -1;
int32_t type = -1;
- dict_t *ctx = NULL;
- dict_t *resp_dict = NULL;
char *host_uuid = NULL;
char *slave = NULL;
+ char *slave_ip = NULL;
char *slave_vol = NULL;
char *volname = NULL;
char *path_list = NULL;
@@ -3268,6 +3917,7 @@ glusterd_op_gsync_set (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
GF_ASSERT (THIS->private);
GF_ASSERT (dict);
GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
priv = THIS->private;
@@ -3279,12 +3929,8 @@ glusterd_op_gsync_set (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (ret < 0)
goto out;
- ctx = glusterd_op_get_ctx ();
- resp_dict = ctx ? ctx : rsp_dict;
- GF_ASSERT (resp_dict);
-
if (type == GF_GSYNC_OPTION_TYPE_STATUS) {
- ret = glusterd_get_gsync_status (dict, op_errstr, resp_dict);
+ ret = glusterd_get_gsync_status (dict, op_errstr, rsp_dict);
goto out;
}
@@ -3292,6 +3938,12 @@ glusterd_op_gsync_set (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (ret < 0)
goto out;
+ ret = dict_get_str (dict, "slave_ip", &slave_ip);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to fetch slave volume name.");
+ goto out;
+ }
+
ret = dict_get_str (dict, "slave_vol", &slave_vol);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Unable to fetch slave volume name.");
@@ -3308,8 +3960,8 @@ glusterd_op_gsync_set (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (dict_get_str (dict, "master", &volname) == 0) {
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- gf_log ("", GF_LOG_WARNING, "Volinfo for %s (master) not found",
- volname);
+ gf_log ("", GF_LOG_WARNING, "Volinfo for %s (master)"
+ " not found", volname);
goto out;
}
@@ -3318,7 +3970,15 @@ glusterd_op_gsync_set (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (type == GF_GSYNC_OPTION_TYPE_CONFIG) {
ret = glusterd_gsync_configure (volinfo, slave, path_list,
- dict, resp_dict, op_errstr);
+ dict, rsp_dict, op_errstr);
+ if (!ret) {
+ ret = dict_set_str (rsp_dict, "conf_path", conf_path);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to store conf_file_path.");
+ goto out;
+ }
+ }
goto out;
}
@@ -3327,9 +3987,9 @@ glusterd_op_gsync_set (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (ret && !is_force && path_list)
goto out;
- ret = glusterd_gsync_delete (volinfo, slave, slave_vol,
- path_list, dict, resp_dict,
- op_errstr);
+ ret = glusterd_gsync_delete (volinfo, slave, slave_ip,
+ slave_vol, path_list, dict,
+ rsp_dict, op_errstr);
goto out;
}
@@ -3344,8 +4004,9 @@ glusterd_op_gsync_set (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = glusterd_set_gsync_confs (volinfo);
if (ret != 0) {
- gf_log ("", GF_LOG_WARNING, "marker/changelog start failed");
- *op_errstr = gf_strdup ("failed to initialize indexing");
+ gf_log ("", GF_LOG_WARNING, "marker/changelog"
+ " start failed");
+ *op_errstr = gf_strdup ("Index initialization failed");
ret = -1;
goto out;
}
@@ -3366,16 +4027,18 @@ glusterd_op_gsync_set (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
- ret = stop_gsync (volname, slave, &status_msg, conf_path, is_force);
+ ret = stop_gsync (volname, slave, &status_msg,
+ conf_path, is_force);
if (ret == 0 && status_msg)
- ret = dict_set_str (resp_dict, "gsync-status",
+ ret = dict_set_str (rsp_dict, "gsync-status",
status_msg);
if (ret != 0 && !is_force && path_list)
*op_errstr = gf_strdup ("internal error");
if (!ret) {
ret = glusterd_create_status_file (volinfo->volname,
- slave, slave_vol, "Stopped");
+ slave, slave_ip,
+ slave_vol,"Stopped");
if (ret) {
gf_log ("", GF_LOG_ERROR, "Unable to update"
"state_file. Error : %s",
@@ -3394,20 +4057,28 @@ out:
return ret;
}
-static int
+int
glusterd_get_slave_details_confpath (glusterd_volinfo_t *volinfo, dict_t *dict,
char **slave_ip, char **slave_vol,
- char **conf_path)
+ char **conf_path, char **op_errstr)
{
int ret = -1;
char confpath[PATH_MAX] = "";
glusterd_conf_t *priv = NULL;
+ char *slave = NULL;
GF_ASSERT (THIS);
priv = THIS->private;
GF_ASSERT (priv);
- ret = glusterd_get_slave_info (dict, slave_ip, slave_vol);
+ ret = dict_get_str (dict, "slave", &slave);
+ if (ret || !slave) {
+ gf_log ("", GF_LOG_ERROR, "Unable to fetch slave from dict");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_get_slave_info (slave, slave_ip, slave_vol, op_errstr);
if (ret) {
gf_log ("", GF_LOG_ERROR,
"Unable to fetch slave details.");
@@ -3415,9 +4086,24 @@ glusterd_get_slave_details_confpath (glusterd_volinfo_t *volinfo, dict_t *dict,
goto out;
}
+ ret = dict_set_str (dict, "slave_ip", *slave_ip);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to store slave IP.");
+ goto out;
+ }
+
+ ret = dict_set_str (dict, "slave_vol", *slave_vol);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to store slave volume name.");
+ goto out;
+ }
+
ret = snprintf (confpath, sizeof(confpath) - 1,
- "%s/"GEOREP"/%s-%s/gsyncd.conf",
- priv->workdir, volinfo->volname, *slave_vol);
+ "%s/"GEOREP"/%s_%s_%s/gsyncd.conf",
+ priv->workdir, volinfo->volname,
+ *slave_ip, *slave_vol);
confpath[ret] = '\0';
*conf_path = gf_strdup (confpath);
if (!(*conf_path)) {
@@ -3440,75 +4126,67 @@ out:
}
-static int
-glusterd_get_slave_info (dict_t *dict, char **slave_ip, char **slave_vol)
+int
+glusterd_get_slave_info (char *slave, char **slave_ip,
+ char **slave_vol, char **op_errstr)
{
- char *tmp = NULL;
- char *save_ptr = NULL;
- char *slave = NULL;
- char **linearr = NULL;
- int32_t ret = -1;
-
- GF_ASSERT (dict);
+ char *tmp = NULL;
+ char *save_ptr = NULL;
+ char **linearr = NULL;
+ int32_t ret = -1;
+ char errmsg[PATH_MAX] = "";
+
+ ret = glusterd_urltransform_single (slave, "normalize",
+ &linearr);
+ if (ret == -1) {
+ ret = snprintf (errmsg, sizeof(errmsg) - 1,
+ "Invalid Url: %s", slave);
+ errmsg[ret] = '\0';
+ *op_errstr = gf_strdup (errmsg);
+ gf_log ("", GF_LOG_ERROR, "Failed to normalize url");
+ goto out;
+ }
- ret = dict_get_str (dict, "slave", &slave);
+ tmp = strtok_r (linearr[0], "/", &save_ptr);
+ tmp = strtok_r (NULL, "/", &save_ptr);
+ slave = strtok_r (tmp, ":", &save_ptr);
if (slave) {
- ret = glusterd_urltransform_single (slave, "normalize",
- &linearr);
- if (ret == -1) {
- gf_log ("", GF_LOG_ERROR, "Failed to normalize url");
- goto out;
- }
-
- tmp = strtok_r (linearr[0], "/", &save_ptr);
- tmp = strtok_r (NULL, "/", &save_ptr);
- slave = strtok_r (tmp, ":", &save_ptr);
- if (slave) {
- *slave_ip = gf_strdup (slave);
- if (!*slave_ip) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to gf_strdup");
- ret = -1;
- goto out;
- }
- gf_log ("", GF_LOG_DEBUG, "Slave IP : %s", *slave_ip);
- ret = 0;
- } else {
- gf_log ("", GF_LOG_ERROR, "Invalid slave name");
- goto out;
- }
-
- slave = strtok_r (NULL, ":", &save_ptr);
- if (slave) {
- *slave_vol = gf_strdup (slave);
- if (!*slave_vol) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to gf_strdup");
- ret = -1;
- goto out;
- }
- gf_log ("", GF_LOG_DEBUG, "Slave Vol : %s", *slave_vol);
- ret = 0;
- } else {
- gf_log ("", GF_LOG_ERROR, "Invalid slave name");
+ ret = glusterd_mountbroker_check (&slave, op_errstr);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Invalid slave url: %s", *op_errstr);
goto out;
}
- ret = dict_set_str (dict, "slave_ip", *slave_ip);
- if (ret) {
+ *slave_ip = gf_strdup (slave);
+ if (!*slave_ip) {
gf_log ("", GF_LOG_ERROR,
- "Unable to store slave IP.");
+ "Failed to gf_strdup");
+ ret = -1;
goto out;
}
+ gf_log ("", GF_LOG_DEBUG, "Slave IP : %s", *slave_ip);
+ ret = 0;
+ } else {
+ gf_log ("", GF_LOG_ERROR, "Invalid slave name");
+ goto out;
+ }
- ret = dict_set_str (dict, "slave_vol", *slave_vol);
- if (ret) {
+ slave = strtok_r (NULL, ":", &save_ptr);
+ if (slave) {
+ *slave_vol = gf_strdup (slave);
+ if (!*slave_vol) {
gf_log ("", GF_LOG_ERROR,
- "Unable to store slave volume name.");
+ "Failed to gf_strdup");
+ ret = -1;
goto out;
}
- } else
- gf_log ("", GF_LOG_ERROR, "Unable to fetch slave from dict");
+ gf_log ("", GF_LOG_DEBUG, "Slave Vol : %s", *slave_vol);
+ ret = 0;
+ } else {
+ gf_log ("", GF_LOG_ERROR, "Invalid slave name");
+ goto out;
+ }
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
@@ -3628,7 +4306,7 @@ create_conf_file (glusterd_conf_t *conf, char *conf_path)
/* gluster-params */
runinit_gsyncd_setrx (&runner, conf_path);
runner_add_args (&runner, "gluster-params",
- "aux-gfid-mount xlator-option=*-dht.assert-no-child-down=true",
+ "aux-gfid-mount",
".", ".", NULL);
RUN_GSYNCD_CMD;
@@ -3642,31 +4320,48 @@ create_conf_file (glusterd_conf_t *conf, char *conf_path)
runner_add_args (&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
+ /* ssh-command tar */
+ runinit_gsyncd_setrx (&runner, conf_path);
+ runner_add_arg (&runner, "ssh-command-tar");
+ runner_argprintf (&runner,
+ "ssh -oPasswordAuthentication=no "
+ "-oStrictHostKeyChecking=no "
+ "-i %s/tar_ssh.pem", georepdir);
+ runner_add_args (&runner, ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
/* pid-file */
runinit_gsyncd_setrx (&runner, conf_path);
runner_add_arg (&runner, "pid-file");
- runner_argprintf (&runner, "%s/${mastervol}-${slavevol}/${eSlave}.pid", georepdir);
+ runner_argprintf (&runner, "%s/${mastervol}_${remotehost}_${slavevol}/${eSlave}.pid", georepdir);
+ runner_add_args (&runner, ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* geo-rep-working-dir */
+ runinit_gsyncd_setrx (&runner, conf_path);
+ runner_add_arg (&runner, "georep-session-working-dir");
+ runner_argprintf (&runner, "%s/${mastervol}_${remotehost}_${slavevol}/", georepdir);
runner_add_args (&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* state-file */
runinit_gsyncd_setrx (&runner, conf_path);
runner_add_arg (&runner, "state-file");
- runner_argprintf (&runner, "%s/${mastervol}-${slavevol}/${eSlave}.status", georepdir);
+ runner_argprintf (&runner, "%s/${mastervol}_${remotehost}_${slavevol}/${eSlave}.status", georepdir);
runner_add_args (&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* state-detail-file */
runinit_gsyncd_setrx (&runner, conf_path);
runner_add_arg (&runner, "state-detail-file");
- runner_argprintf (&runner, "%s/${mastervol}-${slavevol}/${eSlave}-detail.status", georepdir);
+ runner_argprintf (&runner, "%s/${mastervol}_${remotehost}_${slavevol}/${eSlave}-detail.status", georepdir);
runner_add_args (&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
/* state-socket */
runinit_gsyncd_setrx (&runner, conf_path);
runner_add_arg (&runner, "state-socket-unencoded");
- runner_argprintf (&runner, "%s/${mastervol}-${slavevol}/${eSlave}.socket", georepdir);
+ runner_argprintf (&runner, "%s/${mastervol}_${remotehost}_${slavevol}/${eSlave}.socket", georepdir);
runner_add_args (&runner, ".", ".", NULL);
RUN_GSYNCD_CMD;
@@ -3726,7 +4421,7 @@ create_conf_file (glusterd_conf_t *conf, char *conf_path)
/* gluster-params */
runinit_gsyncd_setrx (&runner, conf_path);
runner_add_args (&runner, "gluster-params",
- "aux-gfid-mount xlator-option=*-dht.assert-no-child-down=true",
+ "aux-gfid-mount",
".", NULL);
RUN_GSYNCD_CMD;
@@ -3759,7 +4454,9 @@ create_conf_file (glusterd_conf_t *conf, char *conf_path)
}
static int
-glusterd_create_essential_dir_files (glusterd_volinfo_t *volinfo, dict_t *dict, char *slave, char *slave_vol, char **op_errstr)
+glusterd_create_essential_dir_files (glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *slave, char *slave_ip,
+ char *slave_vol, char **op_errstr)
{
int ret = -1;
char *conf_path = NULL;
@@ -3788,8 +4485,8 @@ glusterd_create_essential_dir_files (glusterd_volinfo_t *volinfo, dict_t *dict,
goto out;
}
- ret = snprintf (buf, sizeof(buf) - 1, "%s/"GEOREP"/%s-%s",
- conf->workdir, volinfo->volname, slave_vol);
+ ret = snprintf (buf, sizeof(buf) - 1, "%s/"GEOREP"/%s_%s_%s",
+ conf->workdir, volinfo->volname, slave_ip, slave_vol);
buf[ret] = '\0';
ret = mkdir_p (buf, 0777, _gf_true);
if (ret) {
@@ -3832,7 +4529,8 @@ glusterd_create_essential_dir_files (glusterd_volinfo_t *volinfo, dict_t *dict,
" Not creating status file again.");
goto out;
} else {
- ret = glusterd_create_status_file (volinfo->volname, slave, slave_vol,
+ ret = glusterd_create_status_file (volinfo->volname, slave,
+ slave_ip, slave_vol,
"Not Started");
if (ret || lstat (statefile, &stbuf)) {
snprintf (errmsg, sizeof (errmsg), "Unable to create %s"
@@ -3897,24 +4595,19 @@ glusterd_op_gsync_create (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
+ ret = dict_get_str (dict, "slave_ip", &slave_ip);
+ if (ret) {
+ snprintf (errmsg, sizeof (errmsg),
+ "Unable to fetch slave IP.");
+ gf_log ("", GF_LOG_ERROR, "%s", errmsg);
+ ret = -1;
+ goto out;
+ }
+
is_force = dict_get_str_boolean (dict, "force", _gf_false);
uuid_utoa_r (MY_UUID, uuid_str);
if (!strcmp (uuid_str, host_uuid)) {
- ret = dict_get_str (dict, "slave_ip", &slave_ip);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to fetch slave IP."
- " No Hooks Arguments.");
- gf_log ("", GF_LOG_ERROR, "%s", errmsg);
- if (is_force) {
- ret = 0;
- goto create_essentials;
- }
- ret = -1;
- goto out;
- }
-
ret = dict_get_int32 (dict, "push_pem", &is_pem_push);
if (!ret && is_pem_push) {
gf_log ("", GF_LOG_DEBUG, "Trying to setup"
@@ -3956,7 +4649,9 @@ glusterd_op_gsync_create (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
create_essentials:
- ret = glusterd_create_essential_dir_files (volinfo, dict, slave, slave_vol, op_errstr);
+ ret = glusterd_create_essential_dir_files (volinfo, dict, slave,
+ slave_ip, slave_vol,
+ op_errstr);
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index a74c52303..e0373c774 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -33,6 +33,7 @@
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
#include "glusterd-store.h"
+#include "glusterd-locks.h"
#include "glusterd1-xdr.h"
#include "cli1-xdr.h"
@@ -49,11 +50,14 @@
#include "globals.h"
#include "glusterd-syncop.h"
+#include "glusterd-etcd.h"
#ifdef HAVE_BD_XLATOR
#include <lvm2app.h>
#endif
+extern glusterd_op_info_t opinfo;
+
int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event,
void *data, rpc_clnt_notify_t notify_fn)
@@ -307,10 +311,23 @@ _build_option_key (dict_t *d, char *k, data_t *v, void *tmp)
char reconfig_key[256] = {0, };
struct args_pack *pack = NULL;
int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
pack = tmp;
if (strcmp (k, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
return 0;
+
+ if (priv->op_version > GD_OP_VERSION_MIN) {
+ if ((strcmp (k, "features.limit-usage") == 0) ||
+ (strcmp (k, "features.soft-limit") == 0))
+ return 0;
+ }
snprintf (reconfig_key, 256, "volume%d.option.%s",
pack->vol_count, k);
ret = dict_set_str (pack->dict, reconfig_key, v->data);
@@ -335,7 +352,7 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
char *volume_id_str = NULL;
struct args_pack pack = {0,};
xlator_t *this = NULL;
-
+ GF_UNUSED int caps = 0;
GF_ASSERT (volinfo);
GF_ASSERT (volumes);
@@ -360,6 +377,21 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
if (ret)
goto out;
+ /* As of now, the snap volumes are also displayed as part of
+ volume info command. So this change is to display whether
+ the volume is original volume or the snap_volume. If
+ displaying of snap volumes in volume info o/p is not needed
+ this should be removed.
+ */
+ snprintf (key, 256, "volume%d.snap_volume", count);
+ ret = dict_set_int32 (volumes, key, volinfo->is_snap_volume);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "failed to set whether "
+ "the volume is a snap volume or actual volume (%s)",
+ volinfo->volname);
+ goto out;
+ }
+
snprintf (key, 256, "volume%d.brick_count", count);
ret = dict_set_int32 (volumes, key, volinfo->brick_count);
if (ret)
@@ -400,14 +432,92 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
goto out;
#ifdef HAVE_BD_XLATOR
- snprintf (key, 256, "volume%d.backend", count);
- ret = dict_set_int32 (volumes, key, volinfo->backend);
- if (ret)
- goto out;
+ if (volinfo->caps) {
+ caps = 0;
+ snprintf (key, 256, "volume%d.xlator0", count);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ if (volinfo->caps & CAPS_BD)
+ snprintf (buf, 256, "BD");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+
+ if (volinfo->caps & CAPS_THIN) {
+ snprintf (key, 256, "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ snprintf (buf, 256, "thin");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+ }
+
+ if (volinfo->caps & CAPS_OFFLOAD_COPY) {
+ snprintf (key, 256, "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ snprintf (buf, 256, "offload_copy");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+ }
+
+ if (volinfo->caps & CAPS_OFFLOAD_SNAPSHOT) {
+ snprintf (key, 256, "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ snprintf (buf, 256, "offload_snapshot");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+ }
+
+ if (volinfo->caps & CAPS_OFFLOAD_ZERO) {
+ snprintf (key, 256, "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC (256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ snprintf (buf, 256, "offload_zerofill");
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret) {
+ GF_FREE (buf);
+ goto out;
+ }
+ }
+
+ }
#endif
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
char brick[1024] = {0,};
+ char brick_uuid[64] = {0,};
snprintf (key, 256, "volume%d.brick%d", count, i);
snprintf (brick, 1024, "%s:%s", brickinfo->hostname,
brickinfo->path);
@@ -415,6 +525,25 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
ret = dict_set_dynstr (volumes, key, buf);
if (ret)
goto out;
+ snprintf (key, 256, "volume%d.brick%d.uuid", count, i);
+ snprintf (brick_uuid, 64, "%s", uuid_utoa (brickinfo->uuid));
+ buf = gf_strdup (brick_uuid);
+ if (!buf)
+ goto out;
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret)
+ goto out;
+
+#ifdef HAVE_BD_XLATOR
+ if (volinfo->caps & CAPS_BD) {
+ snprintf (key, 256, "volume%d.vg%d", count, i);
+ snprintf (brick, 1024, "%s", brickinfo->vg);
+ buf = gf_strdup (brick);
+ ret = dict_set_dynstr (volumes, key, buf);
+ if (ret)
+ goto out;
+ }
+#endif
i++;
}
@@ -478,10 +607,16 @@ int32_t
glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
char *err_str, size_t err_len)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- int32_t locked = 0;
+ int32_t ret = -1;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t locked = 0;
+ char *tmp = NULL;
+ char *volname = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_op_info_t txn_op_info = {{0},};
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
GF_ASSERT (req);
GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX));
@@ -492,33 +627,122 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
priv = this->private;
GF_ASSERT (priv);
- ret = glusterd_lock (MY_UUID);
+ dict = ctx;
+
+ /* Generate a transaction-id for this operation and
+ * save it in the dict. This transaction id distinguishes
+ * each transaction, and helps separate opinfos in the
+ * op state machine. */
+ ret = glusterd_generate_txn_id (dict, &txn_id);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to generate transaction id");
+ goto out;
+ }
+
+ /* Save the MY_UUID as the originator_uuid. This originator_uuid
+ * will be used by is_origin_glusterd() to determine if a node
+ * is the originator node for a command. */
+ ret = glusterd_set_originator_uuid (dict);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire lock on localhost, ret: %d", ret);
- snprintf (err_str, err_len, "Another transaction is in progress. "
- "Please try again after sometime.");
+ "Failed to set originator_uuid.");
goto out;
}
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
+ if (priv->op_version < GD_OP_VERSION_4) {
+ ret = glusterd_lock (MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock on localhost, ret: %d",
+ ret);
+ snprintf (err_str, err_len,
+ "Another transaction is in progress. "
+ "Please try again after sometime.");
+ goto out;
+ }
+ } else {
+ /* If no volname is given as a part of the command, locks will
+ * not be held */
+ ret = dict_get_str (dict, "volname", &tmp);
+ if (ret) {
+ gf_log ("", GF_LOG_INFO,
+ "No Volume name present. "
+ "Locks not being held.");
+ goto local_locking_done;
+ } else {
+ /* Use a copy of volname, as cli response will be
+ * sent before the unlock, and the volname in the
+ * dict, might be removed */
+ volname = gf_strdup (tmp);
+ if (!volname)
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol");
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock for %s", volname);
+ snprintf (err_str, err_len,
+ "Another transaction is in progress for %s. "
+ "Please try again after sometime.", volname);
+ goto out;
+ }
+ }
+
locked = 1;
gf_log (this->name, GF_LOG_DEBUG, "Acquired lock on localhost");
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_START_LOCK, NULL);
+local_locking_done:
+
+ /* If no volname is given as a part of the command, locks will
+ * not be held, hence sending stage event. */
+ if (volname || (priv->op_version < GD_OP_VERSION_4))
+ event_type = GD_OP_EVENT_START_LOCK;
+ else {
+ txn_op_info.state.state = GD_OP_STATE_LOCK_SENT;
+ event_type = GD_OP_EVENT_ALL_ACC;
+ }
+
+ /* Save opinfo for this transaction with the transaction id */
+ glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, ctx, req);
+
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ if (ctx)
+ dict_unref (ctx);
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, ctx);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to acquire cluster"
" lock.");
goto out;
}
- glusterd_op_set_op (op);
- glusterd_op_set_ctx (ctx);
- glusterd_op_set_req (req);
-
-
out:
- if (locked && ret)
- glusterd_unlock (MY_UUID);
+ if (locked && ret) {
+ /* Based on the op-version, we release the
+ * cluster or mgmt_v3 lock */
+ if (priv->op_version < GD_OP_VERSION_4)
+ glusterd_unlock (MY_UUID);
+ else {
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
+ "vol");
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release lock for %s",
+ volname);
+ ret = -1;
+ }
+ }
+
+ if (volname)
+ GF_FREE (volname);
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
@@ -527,16 +751,25 @@ out:
int
__glusterd_handle_cluster_lock (rpcsvc_request_t *req)
{
- gd1_mgmt_cluster_lock_req lock_req = {{0},};
- int32_t ret = -1;
- glusterd_op_lock_ctx_t *ctx = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- xlator_t *this = NULL;
+ dict_t *op_ctx = NULL;
+ int32_t ret = -1;
+ gd1_mgmt_cluster_lock_req lock_req = {{0},};
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_op_t op = GD_OP_EVENT_LOCK;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_op_info_t txn_op_info = {{0},};
+ glusterd_conf_t *priv = NULL;
+ uuid_t *txn_id = NULL;
+ xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (req);
+ txn_id = &priv->global_txn_id;
+
ret = xdr_to_generic (req->msg[0], &lock_req,
(xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
if (ret < 0) {
@@ -566,8 +799,29 @@ __glusterd_handle_cluster_lock (rpcsvc_request_t *req)
uuid_copy (ctx->uuid, lock_req.uuid);
ctx->req = req;
+ ctx->dict = NULL;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, ctx);
+ op_ctx = dict_new ();
+ if (!op_ctx) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set new dict");
+ goto out;
+ }
+
+ glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, op_ctx, req);
+
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ dict_unref (txn_op_info.op_ctx);
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, txn_id, ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_LOCK");
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -643,11 +897,19 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)
gd1_mgmt_stage_op_req op_req = {{0},};
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_op_info_t txn_op_info = {{0},};
+ glusterd_op_sm_state_info_t state = {0,};
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (req);
+ txn_id = &priv->global_txn_id;
+
ret = xdr_to_generic (req->msg[0], &op_req,
(xdrproc_t)xdr_gd1_mgmt_stage_op_req);
if (ret < 0) {
@@ -671,7 +933,36 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)
if (ret)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP, req_ctx);
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
+ /* In cases where there is no volname, the receivers won't have a
+ * transaction opinfo created, as for those operations, the locking
+ * phase where the transaction opinfos are created, won't be called. */
+ ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "No transaction's opinfo set");
+
+ state.state = GD_OP_STATE_LOCKED;
+ glusterd_txn_opinfo_init (&txn_op_info, &state,
+ &op_req.op, req_ctx->dict, req);
+
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ dict_unref (req_ctx->dict);
+ goto out;
+ }
+ }
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP,
+ txn_id, req_ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_STAGE_OP");
out:
free (op_req.buf.buf_val);//malloced by xdr
@@ -695,11 +986,17 @@ __glusterd_handle_commit_op (rpcsvc_request_t *req)
gd1_mgmt_commit_op_req op_req = {{0},};
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (req);
+ txn_id = &priv->global_txn_id;
+
ret = xdr_to_generic (req->msg[0], &op_req,
(xdrproc_t)xdr_gd1_mgmt_commit_op_req);
if (ret < 0) {
@@ -725,11 +1022,12 @@ __glusterd_handle_commit_op (rpcsvc_request_t *req)
if (ret)
goto out;
- ret = glusterd_op_init_ctx (op_req.op);
- if (ret)
- goto out;
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP, req_ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP,
+ txn_id, req_ctx);
out:
free (op_req.buf.buf_val);//malloced by xdr
@@ -870,6 +1168,8 @@ __glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
char *hostname = NULL;
int port = 0;
int flags = 0;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *tmp = NULL;
this = THIS;
GF_ASSERT (this);
@@ -910,7 +1210,6 @@ __glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_ERROR, "Failed to get port");
goto out;
}
-
ret = dict_get_int32 (dict, "flags", &flags);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get flags");
@@ -930,22 +1229,30 @@ __glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
}
if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
- if (!uuid_is_null (uuid)) {
- /* Check if peers are connected, except peer being detached*/
- if (!glusterd_chk_peers_connected_befriended (uuid)) {
- ret = -1;
- op_errno = GF_DEPROBE_FRIEND_DOWN;
- goto out;
- }
- ret = glusterd_all_volume_cond_check (
- glusterd_friend_brick_belongs,
- -1, &uuid);
- if (ret) {
- op_errno = GF_DEPROBE_BRICK_EXIST;
- goto out;
- }
+ /* Check if peers are connected, except peer being
+ * detached*/
+ if (!glusterd_chk_peers_connected_befriended (uuid)) {
+ ret = -1;
+ op_errno = GF_DEPROBE_FRIEND_DOWN;
+ goto out;
}
+ }
+ /* Check for if volumes exist with some bricks on the peer being
+ * detached. It's not a problem if a volume contains none or all
+ * of its bricks on the peer being detached
+ */
+ list_for_each_entry_safe (volinfo, tmp, &priv->volumes,
+ vol_list) {
+ ret = glusterd_friend_contains_vol_bricks (volinfo,
+ uuid);
+ if (ret == 1) {
+ op_errno = GF_DEPROBE_BRICK_EXIST;
+ goto out;
+ }
+ }
+
+ if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
if (glusterd_is_any_volume_in_server_quorum (this) &&
!does_gd_meet_server_quorum (this)) {
gf_log (this->name, GF_LOG_ERROR, "Quorum does not "
@@ -1098,79 +1405,6 @@ glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
__glusterd_handle_cli_get_volume);
}
-#ifdef HAVE_BD_XLATOR
-int
-__glusterd_handle_cli_bd_op (rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- gf_cli_req cli_req = { {0,} };
- dict_t *dict = NULL;
- char *volname = NULL;
- char op_errstr[2048] = {0,};
- glusterd_op_t cli_op = GD_OP_BD_OP;
-
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- /* failed to decode msg */
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_log ("glusterd", GF_LOG_DEBUG, "Received bd op req");
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
- }
-
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "failed to get volname");
- goto out;
- }
-
- ret = glusterd_op_begin (req, GD_OP_BD_OP, dict, op_errstr,
- sizeof (op_errstr));
-out:
- if (ret && dict)
- dict_unref (dict);
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- if (ret) {
- if (op_errstr[0] == '\0')
- snprintf (op_errstr, sizeof (op_errstr),
- "Operation failed");
- ret = glusterd_op_send_cli_response (cli_op, ret, 0,
- req, NULL, op_errstr);
- }
-
- return ret;
-}
-
-int
-glusterd_handle_cli_bd_op (rpcsvc_request_t *req)
-{
- return glusterd_big_locked_handler (req, __glusterd_handle_cli_bd_op);
-}
-#endif
-
int
__glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req)
{
@@ -1853,6 +2087,56 @@ glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
}
int
+glusterd_op_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
+{
+
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ GF_ASSERT (txn_id);
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+ uuid_copy (rsp.txn_id, *txn_id);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to mgmt_v3 lock, ret: %d",
+ ret);
+
+ return ret;
+}
+
+int
+glusterd_op_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
+{
+
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ GF_ASSERT (txn_id);
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+ glusterd_get_uuid (&rsp.uuid);
+ uuid_copy (rsp.txn_id, *txn_id);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to mgmt_v3 unlock, ret: %d",
+ ret);
+
+ return ret;
+}
+
+int
__glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
{
gd1_mgmt_cluster_unlock_req unlock_req = {{0}, };
@@ -1860,11 +2144,17 @@ __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
glusterd_op_lock_ctx_t *ctx = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (req);
+ txn_id = &priv->global_txn_id;
+
ret = xdr_to_generic (req->msg[0], &unlock_req,
(xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
if (ret < 0) {
@@ -1894,8 +2184,9 @@ __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
}
uuid_copy (ctx->uuid, unlock_req.uuid);
ctx->req = req;
+ ctx->dict = NULL;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, txn_id, ctx);
out:
glusterd_friend_sm ();
@@ -2341,7 +2632,18 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req)
gf_log ("", GF_LOG_ERROR, "Failed to add peer %s",
remote_hostname);
rsp.op_errno = GF_PROBE_ADD_FAILED;
+ goto respond;
}
+ gf_log (THIS->name, GF_LOG_INFO,
+ "joining, should point etcd at %s", remote_hostname);
+ /*
+ * We should have started a standalone etcd before. Now we
+ * need a new one, with a new config.
+ */
+ stop_etcd(conf->etcd_pid);
+ nuke_etcd_dir();
+ conf->etcd_pid = start_etcd (uuid_utoa(MY_UUID),
+ remote_hostname);
}
respond:
@@ -2486,8 +2788,10 @@ __glusterd_handle_mount (rpcsvc_request_t *req)
gf1_cli_mount_rsp rsp = {0,};
dict_t *dict = NULL;
int ret = 0;
+ glusterd_conf_t *priv = NULL;
GF_ASSERT (req);
+ priv = THIS->private;
ret = xdr_to_generic (req->msg[0], &mnt_req,
(xdrproc_t)xdr_gf1_cli_mount_req);
@@ -2520,8 +2824,10 @@ __glusterd_handle_mount (rpcsvc_request_t *req)
}
}
+ synclock_unlock (&priv->big_lock);
rsp.op_ret = glusterd_do_mount (mnt_req.label, dict,
&rsp.path, &rsp.op_errno);
+ synclock_lock (&priv->big_lock);
out:
if (!rsp.path)
@@ -3045,9 +3351,13 @@ glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *myhostname,
}
static void
-get_probe_error_str (int op_ret, int op_errno, char *errstr, size_t len,
- char *hostname, int port)
+set_probe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
+ size_t len, char *hostname, int port)
{
+ if ((op_errstr) && (strcmp (op_errstr, ""))) {
+ snprintf (errstr, len, "%s", op_errstr);
+ return;
+ }
if (!op_ret) {
switch (op_errno) {
@@ -3105,6 +3415,18 @@ get_probe_error_str (int op_ret, int op_errno, char *errstr, size_t len,
"in this state");
break;
+ case GF_PROBE_MISSED_SNAP_CONFLICT:
+ snprintf (errstr, len, "Failed to update "
+ "list of missed snapshots from "
+ "peer %s", hostname);
+ break;
+
+ case GF_PROBE_SNAP_CONFLICT:
+ snprintf (errstr, len, "Conflict in comparing "
+ "list of snapshots from "
+ "peer %s", hostname);
+ break;
+
default:
snprintf (errstr, len, "Probe returned with "
"unknown errno %d", op_errno);
@@ -3127,11 +3449,8 @@ glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret,
GF_ASSERT (req);
GF_ASSERT (this);
- if (op_errstr == NULL)
- (void) get_probe_error_str (op_ret, op_errno, errstr,
- sizeof (errstr), hostname, port);
- else
- snprintf (errstr, sizeof (errstr), "%s", op_errstr);
+ (void) set_probe_error_str (op_ret, op_errno, op_errstr, errstr,
+ sizeof (errstr), hostname, port);
if (dict) {
ret = dict_get_str (dict, "cmd-str", &cmd_str);
@@ -3160,9 +3479,14 @@ glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret,
}
static void
-get_deprobe_error_str (int op_ret, int op_errno, char *errstr, size_t len,
- char *hostname)
+set_deprobe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
+ size_t len, char *hostname)
{
+ if ((op_errstr) && (strcmp (op_errstr, ""))) {
+ snprintf (errstr, len, "%s", op_errstr);
+ return;
+ }
+
if (op_ret) {
switch (op_errno) {
case GF_DEPROBE_LOCALHOST:
@@ -3214,8 +3538,8 @@ glusterd_xfer_cli_deprobe_resp (rpcsvc_request_t *req, int32_t op_ret,
GF_ASSERT (req);
- (void) get_deprobe_error_str (op_ret, op_errno, errstr, sizeof (errstr),
- hostname);
+ (void) set_deprobe_error_str (op_ret, op_errno, op_errstr, errstr,
+ sizeof (errstr), hostname);
if (dict) {
ret = dict_get_str (dict, "cmd-str", &cmd_str);
@@ -3434,10 +3758,13 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
glusterd_op_t cli_op = GD_OP_STATUS_VOLUME;
char err_str[2048] = {0,};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
GF_ASSERT (req);
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
@@ -3478,6 +3805,14 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
"Received status volume req for volume %s", volname);
}
+ if ((cmd & GF_CLI_STATUS_QUOTAD) &&
+ (conf->op_version == GD_OP_VERSION_MIN)) {
+ snprintf (err_str, sizeof (err_str), "The cluster is operating "
+ "at version 1. Getting the status of quotad is not "
+ "allowed in this state.");
+ ret = -1;
+ goto out;
+ }
ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
@@ -3579,6 +3914,52 @@ glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
}
static int
+get_volinfo_from_brickid (char *brickid, glusterd_volinfo_t **volinfo)
+{
+ int ret = -1;
+ char *volid_str = NULL;
+ char *brick = NULL;
+ char *brickid_dup = NULL;
+ uuid_t volid = {0};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (brickid);
+
+ brickid_dup = gf_strdup (brickid);
+ if (!brickid_dup)
+ goto out;
+
+ volid_str = brickid_dup;
+ brick = strchr (brickid_dup, ':');
+ if (!brick) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Invalid brickid");
+ goto out;
+ }
+
+ *brick = '\0';
+ brick++;
+ uuid_parse (volid_str, volid);
+ ret = glusterd_volinfo_find_by_volume_id (volid, volinfo);
+ if (ret) {
+ /* Check if it is a snapshot volume */
+ ret = glusterd_snap_volinfo_find_by_volume_id (volid, volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Failed to find volinfo");
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ GF_FREE (brickid_dup);
+ return ret;
+}
+
+static int
get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo)
{
glusterd_volinfo_t *volinfo = NULL;
@@ -3594,15 +3975,19 @@ get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo)
volid_str = brickid_dup;
brick = strchr (brickid_dup, ':');
- *brick = '\0';
- brick++;
if (!volid_str || !brick)
goto out;
+ *brick = '\0';
+ brick++;
uuid_parse (volid_str, volid);
ret = glusterd_volinfo_find_by_volume_id (volid, &volinfo);
- if (ret)
- goto out;
+ if (ret) {
+ /* Check if it a snapshot volume */
+ ret = glusterd_snap_volinfo_find_by_volume_id (volid, &volinfo);
+ if (ret)
+ goto out;
+ }
ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
brickinfo);
@@ -3617,13 +4002,14 @@ out:
int
__glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data)
+ rpc_clnt_event_t event, void *data)
{
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- int ret = 0;
- char *brickid = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
+ char *brickid = NULL;
+ int ret = 0;
+ glusterd_conf_t *conf = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
brickid = mydata;
if (!brickid)
@@ -3640,6 +4026,37 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
switch (event) {
case RPC_CLNT_CONNECT:
+ /* If a node on coming back up, already starts a brick
+ * before the handshake, and the notification comes after
+ * the handshake is done, then we need to check if this
+ * is a restored brick with a snapshot pending. If so, we
+ * need to stop the brick
+ */
+ if (brickinfo->snap_status == -1) {
+ gf_log (this->name, GF_LOG_INFO,
+ "Snapshot is pending on %s:%s. "
+ "Hence not starting the brick",
+ brickinfo->hostname,
+ brickinfo->path);
+ ret = get_volinfo_from_brickid (brickid, &volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get volinfo from "
+ "brickid(%s)", brickid);
+ goto out;
+ }
+
+ ret = glusterd_brick_stop (volinfo, brickinfo,
+ _gf_false);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Unable to stop %s:%s",
+ brickinfo->hostname, brickinfo->path);
+ goto out;
+ }
+
+ break;
+ }
gf_log (this->name, GF_LOG_DEBUG, "Connected to %s:%s",
brickinfo->hostname, brickinfo->path);
glusterd_set_brick_status (brickinfo, GF_BRICK_STARTED);
@@ -3653,16 +4070,19 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
"%s:%s", brickinfo->hostname, brickinfo->path);
glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPED);
- if (rpc_clnt_is_disabled (rpc))
- GF_FREE (brickid);
break;
+ case RPC_CLNT_DESTROY:
+ GF_FREE (mydata);
+ mydata = NULL;
+ break;
default:
gf_log (this->name, GF_LOG_TRACE,
"got some other RPC event %d", event);
break;
}
+out:
return ret;
}
@@ -3776,6 +4196,8 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_peerctx_t *peerctx = NULL;
gf_boolean_t quorum_action = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
+ uuid_t uuid;
peerctx = mydata;
if (!peerctx)
@@ -3803,6 +4225,31 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT %d",
peerinfo->state.state);
+ if (peerinfo->connected) {
+ if (conf->op_version < GD_OP_VERSION_4) {
+ glusterd_get_lock_owner (&uuid);
+ if (!uuid_is_null (uuid) &&
+ !uuid_compare (peerinfo->uuid, uuid))
+ glusterd_unlock (peerinfo->uuid);
+ } else {
+ list_for_each_entry (volinfo, &conf->volumes,
+ vol_list) {
+ ret = glusterd_mgmt_v3_unlock
+ (volinfo->volname,
+ peerinfo->uuid,
+ "vol");
+ if (ret)
+ gf_log (this->name,
+ GF_LOG_TRACE,
+ "Lock not released "
+ "for %s",
+ volinfo->volname);
+ }
+ }
+
+ ret = 0;
+ }
+
if ((peerinfo->quorum_contrib != QUORUM_DOWN) &&
(peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) {
peerinfo->quorum_contrib = QUORUM_DOWN;
@@ -3918,11 +4365,9 @@ rpcsvc_actor_t gd_svc_cli_actors[] = {
[GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume, NULL, 0, DRC_NA},
[GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA},
[GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", GLUSTER_CLI_CLRLOCKS_VOLUME, glusterd_handle_cli_clearlocks_volume, NULL, 0, DRC_NA},
-#ifdef HAVE_BD_XLATOR
- [GLUSTER_CLI_BD_OP] = {"BD_OP", GLUSTER_CLI_BD_OP, glusterd_handle_cli_bd_op, NULL, 0, DRC_NA},
-#endif
- [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE, glusterd_handle_copy_file, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE, glusterd_handle_copy_file, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot, NULL, 0, DRC_NA},
};
struct rpcsvc_program gd_svc_cli_prog = {
@@ -3933,3 +4378,24 @@ struct rpcsvc_program gd_svc_cli_prog = {
.actors = gd_svc_cli_actors,
.synctask = _gf_true,
};
+
+/* This is a minimal RPC prog, which contains only the readonly RPC procs from
+ * the cli rpcsvc
+ */
+rpcsvc_actor_t gd_svc_cli_actors_ro[] = {
+ [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA},
+};
+
+struct rpcsvc_program gd_svc_cli_prog_ro = {
+ .progname = "GlusterD svc cli read-only",
+ .prognum = GLUSTER_CLI_PROGRAM,
+ .progver = GLUSTER_CLI_VERSION,
+ .numactors = GLUSTER_CLI_MAXVALUE,
+ .actors = gd_svc_cli_actors_ro,
+ .synctask = _gf_true,
+};
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index ab172f69a..5078526e9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -30,26 +30,145 @@
extern struct rpc_clnt_program gd_peer_prog;
extern struct rpc_clnt_program gd_mgmt_prog;
+extern struct rpc_clnt_program gd_mgmt_v3_prog;
#define TRUSTED_PREFIX "trusted-"
typedef ssize_t (*gfs_serialize_t) (struct iovec outmsg, void *data);
+static int
+get_snap_volname_and_volinfo (const char *volpath, char **volname,
+ glusterd_volinfo_t **volinfo)
+{
+ int ret = -1;
+ char *save_ptr = NULL;
+ char *str_token = NULL;
+ char *snapname = NULL;
+ char *volname_token = NULL;
+ char *vol = NULL;
+ glusterd_snap_t *snap = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (volpath);
+ GF_ASSERT (volinfo);
+
+ str_token = gf_strdup (volpath);
+ if (NULL == str_token) {
+ goto out;
+ }
+
+ /* Input volname will have below formats:
+ * /snaps/<snapname>/<volname>.<hostname>
+ * or
+ * /snaps/<snapname>/<parent-volname>
+ * We need to extract snapname and parent_volname */
+
+ /*split string by "/" */
+ strtok_r (str_token, "/", &save_ptr);
+ snapname = strtok_r(NULL, "/", &save_ptr);
+ if (!snapname) {
+ gf_log(this->name, GF_LOG_ERROR, "Invalid path: %s", volpath);
+ goto out;
+ }
+
+ volname_token = strtok_r(NULL, "/", &save_ptr);
+ if (!volname_token) {
+ gf_log(this->name, GF_LOG_ERROR, "Invalid path: %s", volpath);
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to "
+ "fetch snap %s", snapname);
+ goto out;
+ }
+
+ /* Find if its a parent volume name or snap volume
+ * name. This function will succeed if volname_token
+ * is a parent volname
+ */
+ ret = glusterd_volinfo_find (volname_token, volinfo);
+ if (ret) {
+ *volname = gf_strdup (volname_token);
+ if (NULL == *volname) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_snap_volinfo_find (volname_token, snap,
+ volinfo);
+ if (ret) {
+ /* Split the volume name */
+ vol = strtok_r (volname_token, ".", &save_ptr);
+ if (!vol) {
+ gf_log(this->name, GF_LOG_ERROR, "Invalid "
+ "volname (%s)", volname_token);
+ goto out;
+ }
+
+ ret = glusterd_snap_volinfo_find (vol, snap, volinfo);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to "
+ "fetch snap volume from volname (%s)",
+ vol);
+ goto out;
+ }
+ }
+ } else {
+ /*volname_token is parent volname*/
+ ret = glusterd_snap_volinfo_find_from_parent_volname (
+ volname_token, snap, volinfo);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to "
+ "fetch snap volume from parent "
+ "volname (%s)", volname_token);
+ goto out;
+ }
+
+ /* Since volname_token is a parent volname we should
+ * get the snap volname here*/
+ *volname = gf_strdup ((*volinfo)->volname);
+ if (NULL == *volname) {
+ ret = -1;
+ goto out;
+ }
+ }
+
+out:
+ if (ret && NULL != *volname) {
+ GF_FREE (*volname);
+ *volname = NULL;
+ }
+ return ret;
+}
+
static size_t
build_volfile_path (const char *volname, char *path,
size_t path_len, char *trusted_str)
{
- struct stat stbuf = {0,};
- int32_t ret = -1;
- glusterd_conf_t *priv = NULL;
- char *vol = NULL;
- char *dup_volname = NULL;
- char *free_ptr = NULL;
- char *tmp = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- char *server = NULL;
-
- priv = THIS->private;
+ struct stat stbuf = {0,};
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ char *vol = NULL;
+ char *dup_volname = NULL;
+ char *free_ptr = NULL;
+ char *save_ptr = NULL;
+ char *str_token = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *server = NULL;
+ const char *volname_ptr = NULL;
+ char path_prefix [PATH_MAX] = {0,};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (volname);
+ GF_ASSERT (path);
if (strstr (volname, "gluster/")) {
server = strchr (volname, '/') + 1;
@@ -57,6 +176,22 @@ build_volfile_path (const char *volname, char *path,
path, path_len);
ret = 1;
goto out;
+ } else if ((str_token = strstr (volname, "/snaps/"))) {
+ ret = get_snap_volname_and_volinfo (str_token, &dup_volname,
+ &volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get snap"
+ " volinfo from path (%s)", volname);
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (path_prefix, sizeof (path_prefix), "%s/snaps/%s",
+ priv->workdir, volinfo->snapshot->snapname);
+
+ free_ptr = dup_volname;
+ volname_ptr = dup_volname;
+ goto gotvolinfo;
} else if (volname[0] != '/') {
/* Normal behavior */
dup_volname = gf_strdup (volname);
@@ -67,39 +202,53 @@ build_volfile_path (const char *volname, char *path,
dup_volname = gf_strdup (&volname[1]);
}
+ if (!dup_volname) {
+ gf_log(THIS->name, GF_LOG_ERROR, "strdup failed");
+ ret = -1;
+ goto out;
+ }
free_ptr = dup_volname;
+ volname_ptr = volname;
+
+ snprintf (path_prefix, sizeof (path_prefix), "%s/vols",
+ priv->workdir);
ret = glusterd_volinfo_find (dup_volname, &volinfo);
+
if (ret) {
/* Split the volume name */
- vol = strtok_r (dup_volname, ".", &tmp);
+ vol = strtok_r (dup_volname, ".", &save_ptr);
if (!vol)
goto out;
+
ret = glusterd_volinfo_find (vol, &volinfo);
if (ret)
goto out;
}
+gotvolinfo:
if (!glusterd_auth_get_username (volinfo))
trusted_str = NULL;
- ret = snprintf (path, path_len, "%s/vols/%s/%s.vol",
- priv->workdir, volinfo->volname, volname);
+ ret = snprintf (path, path_len, "%s/%s/%s.vol", path_prefix,
+ volinfo->volname, volname_ptr);
if (ret == -1)
goto out;
ret = stat (path, &stbuf);
if ((ret == -1) && (errno == ENOENT)) {
- snprintf (path, path_len, "%s/vols/%s/%s%s-fuse.vol",
- priv->workdir, volinfo->volname,
- (trusted_str ? trusted_str : ""), dup_volname);
+ snprintf (path, path_len, "%s/%s/%s%s-fuse.vol",
+ path_prefix, volinfo->volname,
+ (trusted_str ? trusted_str : ""),
+ dup_volname);
+
ret = stat (path, &stbuf);
}
if ((ret == -1) && (errno == ENOENT)) {
- snprintf (path, path_len, "%s/vols/%s/%s-tcp.vol",
- priv->workdir, volinfo->volname, volname);
+ snprintf (path, path_len, "%s/%s/%s-tcp.vol",
+ path_prefix, volinfo->volname, volname_ptr);
}
ret = 1;
@@ -244,16 +393,22 @@ __server_getspec (rpcsvc_request_t *req)
}
trans = req->trans;
+ /* addrstr will be empty for cli socket connections */
ret = rpcsvc_transport_peername (trans, (char *)&addrstr,
sizeof (addrstr));
if (ret)
goto fail;
- tmp = strrchr (addrstr, ':');
- *tmp = '\0';
+ tmp = strrchr (addrstr, ':');
+ if (tmp)
+ *tmp = '\0';
- /* we trust the local admin */
- if (gf_is_local_addr (addrstr)) {
+ /* The trusted volfiles are given to the glusterd owned process like NFS
+ * server, self-heal daemon etc., so that they are not inadvertently
+ * blocked by a auth.{allow,reject} setting. The trusted volfile is not
+ * meant for external users.
+ */
+ if (strlen (addrstr) && gf_is_local_addr (addrstr)) {
ret = build_volfile_path (volume, filename,
sizeof (filename),
@@ -408,12 +563,16 @@ gd_validate_cluster_op_version (xlator_t *this, int cluster_op_version,
goto out;
}
- if (cluster_op_version < conf->op_version) {
+ /* The peer can only reduce its op-version when it doesn't have any
+ * volumes. Reducing op-version when it already contains volumes can
+ * lead to inconsistencies in the cluster
+ */
+ if ((cluster_op_version < conf->op_version) &&
+ !list_empty (&conf->volumes)) {
gf_log (this->name, GF_LOG_ERROR,
- "operating version %d is less than the currently "
- "running version (%d) on the machine (as per peer "
- "request from %s)", cluster_op_version,
- conf->op_version, peerid);
+ "cannot reduce operating version to %d from current "
+ "version %d as volumes exist (as per peer request from "
+ "%s)", cluster_op_version, conf->op_version, peerid);
goto out;
}
@@ -596,6 +755,19 @@ struct rpcsvc_program gluster_handshake_prog = {
.numactors = GF_HNDSK_MAXVALUE,
};
+/* A minimal RPC program just for the cli getspec command */
+rpcsvc_actor_t gluster_cli_getspec_actors[] = {
+ [GF_HNDSK_GETSPEC] = {"GETSPEC", GF_HNDSK_GETSPEC, server_getspec, NULL, 0, DRC_NA},
+};
+
+struct rpcsvc_program gluster_cli_getspec_prog = {
+ .progname = "Gluster Handshake (CLI Getspec)",
+ .prognum = GLUSTER_HNDSK_PROGRAM,
+ .progver = GLUSTER_HNDSK_VERSION,
+ .actors = gluster_cli_getspec_actors,
+ .numactors = GF_HNDSK_MAXVALUE,
+};
+
char *glusterd_dump_proc[GF_DUMP_MAXVALUE] = {
[GF_DUMP_NULL] = "NULL",
@@ -733,16 +905,6 @@ gd_validate_peer_op_version (xlator_t *this, glusterd_peerinfo_t *peerinfo,
goto out;
}
- /* If peer is already operating at a higher op_version reject it.
- * Cluster cannot be moved to higher op_version to accomodate a peer.
- */
- if (peer_op_version > conf->op_version) {
- ret = gf_asprintf (errstr, "Peer %s is already at a higher "
- "op-version", peerinfo->hostname);
- ret = -1;
- goto out;
- }
-
ret = 0;
out:
gf_log (this->name , GF_LOG_DEBUG, "Peer %s %s", peerinfo->hostname,
@@ -799,6 +961,7 @@ __glusterd_mgmt_hndsk_version_ack_cbk (struct rpc_req *req, struct iovec *iov,
*/
peerinfo->mgmt = &gd_mgmt_prog;
peerinfo->peer = &gd_peer_prog;
+ peerinfo->mgmt_v3 = &gd_mgmt_v3_prog;
ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
@@ -1025,6 +1188,15 @@ glusterd_set_clnt_mgmt_program (glusterd_peerinfo_t *peerinfo,
peerinfo->peer->progname, peerinfo->peer->prognum,
peerinfo->peer->progver);
}
+
+ if (peerinfo->mgmt_v3) {
+ gf_log ("", GF_LOG_INFO,
+ "Using Program %s, Num (%d), Version (%d)",
+ peerinfo->mgmt_v3->progname,
+ peerinfo->mgmt_v3->prognum,
+ peerinfo->mgmt_v3->progver);
+ }
+
ret = 0;
out:
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-hooks.c b/xlators/mgmt/glusterd/src/glusterd-hooks.c
index 2b43a452e..78730a564 100644
--- a/xlators/mgmt/glusterd/src/glusterd-hooks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-hooks.c
@@ -143,6 +143,24 @@ glusterd_hooks_get_hooks_cmd_subdir (glusterd_op_t op)
return glusterd_hook_dirnames[op];
}
+void
+glusterd_hooks_add_working_dir (runner_t *runner, glusterd_conf_t *priv)
+{
+ runner_argprintf (runner, "--gd-workdir=%s", priv->workdir);
+}
+
+void
+glusterd_hooks_add_op (runner_t *runner, char *op)
+{
+ runner_argprintf (runner, "--volume-op=%s", op);
+}
+
+void
+glusterd_hooks_add_hooks_version (runner_t* runner)
+{
+ runner_argprintf (runner, "--version=%d", GLUSTERD_HOOK_VER);
+}
+
int
glusterd_hooks_set_volume_args (dict_t *dict, runner_t *runner)
{
@@ -163,7 +181,7 @@ glusterd_hooks_set_volume_args (dict_t *dict, runner_t *runner)
goto out;
runner_add_arg (runner, "-o");
- for (i = 1; (ret == 0); i++) {
+ for (i = 1; ret == 0; i++) {
snprintf (query, sizeof (query), "key%d", i);
ret = dict_get_str (dict, query, &key);
if (ret)
@@ -216,6 +234,11 @@ glusterd_hooks_add_op_args (runner_t *runner, glusterd_op_t op,
runner_argprintf (runner, "--first=%s",
truth? "yes":"no");
+
+ glusterd_hooks_add_hooks_version (runner);
+ glusterd_hooks_add_op (runner, "start");
+ glusterd_hooks_add_working_dir (runner, priv);
+
break;
case GD_OP_STOP_VOLUME:
@@ -250,6 +273,11 @@ glusterd_hooks_add_op_args (runner_t *runner, glusterd_op_t op,
runner_argprintf (runner, "%s", hooks_args);
break;
+ case GD_OP_ADD_BRICK:
+ glusterd_hooks_add_hooks_version (runner);
+ glusterd_hooks_add_op (runner, "add-brick");
+ glusterd_hooks_add_working_dir (runner, priv);
+
default:
break;
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
new file mode 100644
index 000000000..28358aa55
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -0,0 +1,656 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "common-utils.h"
+#include "cli1-xdr.h"
+#include "xdr-generic.h"
+#include "glusterd.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-store.h"
+#include "glusterd-utils.h"
+#include "glusterd-volgen.h"
+#include "glusterd-locks.h"
+#include "run.h"
+#include "syscall.h"
+
+#include <signal.h>
+
+#define GF_MAX_LOCKING_ENTITIES 2
+
+/* Valid entities that the mgmt_v3 lock can hold locks upon *
+ * To add newer entities to be locked, we can just add more *
+ * entries to this table along with the type and default value */
+glusterd_valid_entities valid_types[] = {
+ { "vol", _gf_true },
+ { "snap", _gf_false },
+ { NULL },
+};
+
+/* Checks if the lock request is for a valid entity */
+gf_boolean_t
+glusterd_mgmt_v3_is_type_valid (char *type)
+{
+ int32_t i = 0;
+ gf_boolean_t ret = _gf_false;
+
+ GF_ASSERT (type);
+
+ for (i = 0; valid_types[i].type; i++) {
+ if (!strcmp (type, valid_types[i].type)) {
+ ret = _gf_true;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* Initialize the global mgmt_v3 lock list(dict) when
+ * glusterd is spawned */
+int32_t
+glusterd_mgmt_v3_lock_init ()
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ priv->mgmt_v3_lock = dict_new ();
+ if (!priv->mgmt_v3_lock)
+ goto out;
+
+ ret = 0;
+out:
+ return ret;
+}
+
+/* Destroy the global mgmt_v3 lock list(dict) when
+ * glusterd cleanup is performed */
+void
+glusterd_mgmt_v3_lock_fini ()
+{
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (priv->mgmt_v3_lock)
+ dict_unref (priv->mgmt_v3_lock);
+}
+
+int32_t
+glusterd_get_mgmt_v3_lock_owner (char *key, uuid_t *uuid)
+{
+ int32_t ret = -1;
+ glusterd_mgmt_v3_lock_obj *lock_obj = NULL;
+ glusterd_conf_t *priv = NULL;
+ uuid_t no_owner = {0,};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (!key || !uuid) {
+ gf_log (this->name, GF_LOG_ERROR, "key or uuid is null.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_bin (priv->mgmt_v3_lock, key, (void **) &lock_obj);
+ if (!ret)
+ uuid_copy (*uuid, lock_obj->lock_owner);
+ else
+ uuid_copy (*uuid, no_owner);
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+/* This function is called with the locked_count and type, to *
+ * release all the acquired locks. */
+static int32_t
+glusterd_release_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,
+ int32_t locked_count,
+ char *type)
+{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t i = -1;
+ int32_t op_ret = 0;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ if (locked_count == 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "No %s locked as part of this transaction",
+ type);
+ goto out;
+ }
+
+ /* Release all the locks held */
+ for (i = 0; i < locked_count; i++) {
+ snprintf (name_buf, sizeof(name_buf),
+ "%sname%d", type, i+1);
+
+ /* Looking for volname1, volname2 or snapname1, *
+ * as key in the dict snapname2 */
+ ret = dict_get_str (dict, name_buf, &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get %s locked_count = %d",
+ name_buf, locked_count);
+ op_ret = ret;
+ continue;
+ }
+
+ ret = glusterd_mgmt_v3_unlock (name, uuid, type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release lock for %s.",
+ name);
+ op_ret = ret;
+ }
+ }
+
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", op_ret);
+ return op_ret;
+}
+
+/* Given the count and type of the entity this function acquires *
+ * locks on multiple elements of the same entity. For example: *
+ * If type is "vol" this function tries to acquire locks on multiple *
+ * volumes */
+static int32_t
+glusterd_acquire_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,
+ int32_t count, char *type)
+{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t i = -1;
+ int32_t ret = -1;
+ int32_t locked_count = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ /* Locking one element after other */
+ for (i = 0; i < count; i++) {
+ snprintf (name_buf, sizeof(name_buf),
+ "%sname%d", type, i+1);
+
+ /* Looking for volname1, volname2 or snapname1, *
+ * as key in the dict snapname2 */
+ ret = dict_get_str (dict, name_buf, &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get %s count = %d",
+ name_buf, count);
+ break;
+ }
+
+ ret = glusterd_mgmt_v3_lock (name, uuid, type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire lock for %s %s "
+ "on behalf of %s. Reversing "
+ "this transaction", type, name,
+ uuid_utoa(uuid));
+ break;
+ }
+ locked_count++;
+ }
+
+ if (count == locked_count) {
+ /* If all locking ops went successfuly, return as success */
+ ret = 0;
+ goto out;
+ }
+
+ /* If we failed to lock one element, unlock others and return failure */
+ ret = glusterd_release_multiple_locks_per_entity (dict, uuid,
+ locked_count,
+ type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release multiple %s locks",
+ type);
+ }
+ ret = -1;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+/* Given the type of entity, this function figures out if it should unlock a *
+ * single element of multiple elements of the said entity. For example: *
+ * if the type is "vol", this function will accordingly unlock a single volume *
+ * or multiple volumes */
+static int32_t
+glusterd_mgmt_v3_unlock_entity (dict_t *dict, uuid_t uuid, char *type,
+ gf_boolean_t default_value)
+{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t count = -1;
+ int32_t ret = -1;
+ gf_boolean_t hold_locks = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ snprintf (name_buf, sizeof(name_buf), "hold_%s_locks", type);
+ hold_locks = dict_get_str_boolean (dict, name_buf, default_value);
+
+ if (hold_locks == _gf_false) {
+ /* Locks were not held for this particular entity *
+ * Hence nothing to release */
+ ret = 0;
+ goto out;
+ }
+
+ /* Looking for volcount or snapcount in the dict */
+ snprintf (name_buf, sizeof(name_buf), "%scount", type);
+ ret = dict_get_int32 (dict, name_buf, &count);
+ if (ret) {
+ /* count is not present. Only one *
+ * element name needs to be unlocked */
+ snprintf (name_buf, sizeof(name_buf), "%sname",
+ type);
+ ret = dict_get_str (dict, name_buf, &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch %sname", type);
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_unlock (name, uuid, type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release lock for %s %s "
+ "on behalf of %s.", type, name,
+ uuid_utoa(uuid));
+ goto out;
+ }
+ } else {
+ /* Unlocking one element name after another */
+ ret = glusterd_release_multiple_locks_per_entity (dict,
+ uuid,
+ count,
+ type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release all %s locks", type);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+/* Given the type of entity, this function figures out if it should lock a *
+ * single element or multiple elements of the said entity. For example: *
+ * if the type is "vol", this function will accordingly lock a single volume *
+ * or multiple volumes */
+static int32_t
+glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, char *type,
+ gf_boolean_t default_value)
+{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t count = -1;
+ int32_t ret = -1;
+ gf_boolean_t hold_locks = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ snprintf (name_buf, sizeof(name_buf), "hold_%s_locks", type);
+ hold_locks = dict_get_str_boolean (dict, name_buf, default_value);
+
+ if (hold_locks == _gf_false) {
+ /* Not holding locks for this particular entity */
+ ret = 0;
+ goto out;
+ }
+
+ /* Looking for volcount or snapcount in the dict */
+ snprintf (name_buf, sizeof(name_buf), "%scount", type);
+ ret = dict_get_int32 (dict, name_buf, &count);
+ if (ret) {
+ /* count is not present. Only one *
+ * element name needs to be locked */
+ snprintf (name_buf, sizeof(name_buf), "%sname",
+ type);
+ ret = dict_get_str (dict, name_buf, &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch %sname", type);
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_lock (name, uuid, type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire lock for %s %s "
+ "on behalf of %s.", type, name,
+ uuid_utoa(uuid));
+ goto out;
+ }
+ } else {
+ /* Locking one element name after another */
+ ret = glusterd_acquire_multiple_locks_per_entity (dict,
+ uuid,
+ count,
+ type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire all %s locks", type);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+/* Try to release locks of multiple entities like *
+ * volume, snaps etc. */
+int32_t
+glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid)
+{
+ int32_t i = -1;
+ int32_t ret = -1;
+ int32_t op_ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (!dict) {
+ gf_log (this->name, GF_LOG_ERROR, "dict is null.");
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 0; valid_types[i].type; i++) {
+ ret = glusterd_mgmt_v3_unlock_entity
+ (dict, uuid,
+ valid_types[i].type,
+ valid_types[i].default_value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to unlock all %s",
+ valid_types[i].type);
+ op_ret = ret;
+ }
+ }
+
+ ret = op_ret;
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+/* Try to acquire locks on multiple entities like *
+ * volume, snaps etc. */
+int32_t
+glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid)
+{
+ int32_t i = -1;
+ int32_t ret = -1;
+ int32_t locked_count = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (!dict) {
+ gf_log (this->name, GF_LOG_ERROR, "dict is null.");
+ ret = -1;
+ goto out;
+ }
+
+ /* Locking one entity after other */
+ for (i = 0; valid_types[i].type; i++) {
+ ret = glusterd_mgmt_v3_lock_entity
+ (dict, uuid,
+ valid_types[i].type,
+ valid_types[i].default_value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to lock all %s",
+ valid_types[i].type);
+ break;
+ }
+ locked_count++;
+ }
+
+ if (locked_count == GF_MAX_LOCKING_ENTITIES) {
+ /* If all locking ops went successfuly, return as success */
+ ret = 0;
+ goto out;
+ }
+
+ /* If we failed to lock one entity, unlock others and return failure */
+ for (i = 0; i < locked_count; i++) {
+ ret = glusterd_mgmt_v3_unlock_entity
+ (dict, uuid,
+ valid_types[i].type,
+ valid_types[i].default_value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to unlock all %s",
+ valid_types[i].type);
+ }
+ }
+ ret = -1;
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, char *type)
+{
+ char key[PATH_MAX] = "";
+ int32_t ret = -1;
+ glusterd_mgmt_v3_lock_obj *lock_obj = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_boolean_t is_valid = _gf_true;
+ uuid_t owner = {0};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (!name || !type) {
+ gf_log (this->name, GF_LOG_ERROR, "name or type is null.");
+ ret = -1;
+ goto out;
+ }
+
+ is_valid = glusterd_mgmt_v3_is_type_valid (type);
+ if (is_valid != _gf_true) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Invalid entity. Cannot perform locking "
+ "operation on %s types", type);
+ ret = -1;
+ goto out;
+ }
+
+ ret = snprintf (key, sizeof(key), "%s_%s", name, type);
+ if (ret != strlen(name) + 1 + strlen(type)) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Unable to create key");
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Trying to acquire lock of %s %s for %s as %s",
+ type, name, uuid_utoa (uuid), key);
+
+ ret = glusterd_get_mgmt_v3_lock_owner (key, &owner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Unable to get mgmt_v3 lock owner");
+ goto out;
+ }
+
+ /* If the lock has already been held for the given volume
+ * we fail */
+ if (!uuid_is_null (owner)) {
+ gf_log (this->name, GF_LOG_WARNING, "Lock for %s held by %s",
+ name, uuid_utoa (owner));
+ ret = -1;
+ goto out;
+ }
+
+ lock_obj = GF_CALLOC (1, sizeof(glusterd_mgmt_v3_lock_obj),
+ gf_common_mt_mgmt_v3_lock_obj_t);
+ if (!lock_obj) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (lock_obj->lock_owner, uuid);
+
+ ret = dict_set_bin (priv->mgmt_v3_lock, key, lock_obj,
+ sizeof(glusterd_mgmt_v3_lock_obj));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set lock owner in mgmt_v3 lock");
+ if (lock_obj)
+ GF_FREE (lock_obj);
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Lock for %s %s successfully held by %s",
+ type, name, uuid_utoa (uuid));
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
+{
+ char key[PATH_MAX] = "";
+ int32_t ret = -1;
+ gf_boolean_t is_valid = _gf_true;
+ glusterd_conf_t *priv = NULL;
+ uuid_t owner = {0};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (!name || !type) {
+ gf_log (this->name, GF_LOG_ERROR, "name is null.");
+ ret = -1;
+ goto out;
+ }
+
+ is_valid = glusterd_mgmt_v3_is_type_valid (type);
+ if (is_valid != _gf_true) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Invalid entity. Cannot perform unlocking "
+ "operation on %s types", type);
+ ret = -1;
+ goto out;
+ }
+
+ ret = snprintf (key, sizeof(key), "%s_%s",
+ name, type);
+ if (ret != strlen(name) + 1 + strlen(type)) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to create key");
+ ret = -1;
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Trying to release lock of %s %s for %s as %s",
+ type, name, uuid_utoa (uuid), key);
+
+ ret = glusterd_get_mgmt_v3_lock_owner (key, &owner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Unable to get mgmt_v3 lock owner");
+ goto out;
+ }
+
+ if (uuid_is_null (owner)) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Lock for %s %s not held", type, name);
+ ret = -1;
+ goto out;
+ }
+
+ ret = uuid_compare (uuid, owner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Lock owner mismatch. "
+ "Lock for %s %s held by %s",
+ type, name, uuid_utoa (owner));
+ goto out;
+ }
+
+ /* Removing the mgmt_v3 lock from the global list */
+ dict_del (priv->mgmt_v3_lock, key);
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Lock for %s %s successfully released",
+ type, name);
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h
new file mode 100644
index 000000000..b9cc8c0d1
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.h
@@ -0,0 +1,51 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _GLUSTERD_LOCKS_H_
+#define _GLUSTERD_LOCKS_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+typedef struct glusterd_mgmt_v3_lock_object_ {
+ uuid_t lock_owner;
+} glusterd_mgmt_v3_lock_obj;
+
+typedef struct glusterd_mgmt_v3_lock_valid_entities {
+ char *type; /* Entity type like vol, snap */
+ gf_boolean_t default_value; /* The default value that *
+ * determines if the locks *
+ * should be held for that *
+ * entity */
+} glusterd_valid_entities;
+
+int32_t
+glusterd_mgmt_v3_lock_init ();
+
+void
+glusterd_mgmt_v3_lock_fini ();
+
+int32_t
+glusterd_get_mgmt_v3_lock_owner (char *volname, uuid_t *uuid);
+
+int32_t
+glusterd_mgmt_v3_lock (const char *key, uuid_t uuid, char *type);
+
+int32_t
+glusterd_mgmt_v3_unlock (const char *key, uuid_t uuid, char *type);
+
+int32_t
+glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid);
+
+int32_t
+glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-log-ops.c b/xlators/mgmt/glusterd/src/glusterd-log-ops.c
index 0136cddc9..33bd95c03 100644
--- a/xlators/mgmt/glusterd/src/glusterd-log-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-log-ops.c
@@ -24,7 +24,7 @@
#include <signal.h>
int
-glusterd_handle_log_rotate (rpcsvc_request_t *req)
+__glusterd_handle_log_rotate (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,}};
@@ -90,6 +90,13 @@ out:
return ret;
}
+int
+glusterd_handle_log_rotate (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_log_rotate);
+}
+
/* op-sm */
int
glusterd_op_stage_log_rotate (dict_t *dict, char **op_errstr)
diff --git a/xlators/mgmt/glusterd/src/glusterd-mem-types.h b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
index 98216e28a..e6f6a0333 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mem-types.h
+++ b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
@@ -66,7 +66,10 @@ typedef enum gf_gld_mem_types_ {
gf_gld_mt_hooks_stub_t = gf_common_mt_end + 50,
gf_gld_mt_hooks_priv_t = gf_common_mt_end + 51,
gf_gld_mt_mop_commit_req_t = gf_common_mt_end + 52,
- gf_gld_mt_end = gf_common_mt_end + 53,
+ gf_gld_mt_int = gf_common_mt_end + 53,
+ gf_gld_mt_snap_t = gf_common_mt_end + 54,
+ gf_gld_mt_missed_snapinfo_t = gf_common_mt_end + 55,
+ gf_gld_mt_end = gf_common_mt_end + 56,
} gf_gld_mem_types_t;
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
new file mode 100644
index 000000000..81c5aa579
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
@@ -0,0 +1,936 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+/* rpc related syncops */
+#include "rpc-clnt.h"
+#include "protocol-common.h"
+#include "xdr-generic.h"
+#include "glusterd1-xdr.h"
+#include "glusterd-syncop.h"
+
+#include "glusterd.h"
+#include "glusterd-utils.h"
+#include "glusterd-locks.h"
+#include "glusterd-mgmt.h"
+#include "glusterd-op-sm.h"
+
+static int
+glusterd_mgmt_v3_null (rpcsvc_request_t *req)
+{
+ return 0;
+}
+
+static int
+glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status)
+{
+
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+
+ glusterd_get_uuid (&rsp.uuid);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Responded to mgmt_v3 lock, ret: %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_synctasked_mgmt_v3_lock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_lock_req *lock_req,
+ glusterd_op_lock_ctx_t *ctx)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (ctx);
+ GF_ASSERT (ctx->dict);
+
+ /* Trying to acquire multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_lock (ctx->dict, ctx->uuid);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire mgmt_v3 locks for %s",
+ uuid_utoa (ctx->uuid));
+
+ ret = glusterd_mgmt_v3_lock_send_resp (req, ret);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_op_state_machine_mgmt_v3_lock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_lock_req *lock_req,
+ glusterd_op_lock_ctx_t *ctx)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_op_info_t txn_op_info = {{0},};
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ glusterd_txn_opinfo_init (&txn_op_info, NULL, &lock_req->op,
+ ctx->dict, req);
+
+ ret = glusterd_set_txn_opinfo (&lock_req->txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK,
+ &lock_req->txn_id, ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_LOCK");
+
+out:
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_mgmt_v3_lock_fn (rpcsvc_request_t *req)
+{
+ gd1_mgmt_v3_lock_req lock_req = {{0},};
+ int32_t ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t is_synctasked = _gf_false;
+ gf_boolean_t free_ctx = _gf_false;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &lock_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG, "Received mgmt_v3 lock req "
+ "from uuid: %s", uuid_utoa (lock_req.uuid));
+
+ if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (lock_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
+ if (!ctx) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (ctx->uuid, lock_req.uuid);
+ ctx->req = req;
+
+ ctx->dict = dict_new ();
+ if (!ctx->dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (lock_req.dict.dict_val,
+ lock_req.dict.dict_len, &ctx->dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ is_synctasked = dict_get_str_boolean (ctx->dict,
+ "is_synctasked", _gf_false);
+ if (is_synctasked) {
+ ret = glusterd_synctasked_mgmt_v3_lock (req, &lock_req, ctx);
+ /* The above function does not take ownership of ctx.
+ * Therefore we need to free the ctx explicitly. */
+ free_ctx = _gf_true;
+ }
+ else {
+ ret = glusterd_op_state_machine_mgmt_v3_lock (req, &lock_req,
+ ctx);
+ }
+
+out:
+
+ if (ret || free_ctx) {
+ if (ctx->dict)
+ dict_unref (ctx->dict);
+ if (ctx)
+ GF_FREE (ctx);
+ }
+
+ free (lock_req.dict.dict_val);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_mgmt_v3_pre_validate_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status,
+ char *op_errstr, dict_t *rsp_dict)
+{
+ gd1_mgmt_v3_pre_val_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_rsp);
+
+ GF_FREE (rsp.dict.dict_val);
+out:
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Responded to pre validation, ret: %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_pre_validate_fn (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_pre_val_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to decode pre validation "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize (op_req.dict.dict_val,
+ op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_mgmt_v3_pre_validate_fn (op_req.op, dict, &op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Pre Validation failed on operation %s",
+ gd_op_list[op_req.op]);
+ }
+
+ ret = glusterd_mgmt_v3_pre_validate_send_resp (req, op_req.op,
+ ret, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to send Pre Validation "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+out:
+ if (op_errstr && (strcmp (op_errstr, "")))
+ GF_FREE (op_errstr);
+
+ free (op_req.dict.dict_val);
+
+ if (dict)
+ dict_unref (dict);
+
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_mgmt_v3_brick_op_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status,
+ char *op_errstr, dict_t *rsp_dict)
+{
+ gd1_mgmt_v3_brick_op_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_rsp);
+
+ GF_FREE (rsp.dict.dict_val);
+out:
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Responded to brick op, ret: %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_brick_op_fn (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_brick_op_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode brick op "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize (op_req.dict.dict_val,
+ op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_mgmt_v3_brick_op_fn (op_req.op, dict, &op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Brick Op failed on operation %s",
+ gd_op_list[op_req.op]);
+ }
+
+ ret = glusterd_mgmt_v3_brick_op_send_resp (req, op_req.op,
+ ret, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to send brick op "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+out:
+ if (op_errstr && (strcmp (op_errstr, "")))
+ GF_FREE (op_errstr);
+
+ free (op_req.dict.dict_val);
+
+ if (dict)
+ dict_unref (dict);
+
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_mgmt_v3_commit_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status,
+ char *op_errstr, dict_t *rsp_dict)
+{
+ gd1_mgmt_v3_commit_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_commit_rsp);
+
+ GF_FREE (rsp.dict.dict_val);
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Responded to commit, ret: %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_commit_fn (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_commit_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_commit_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode commit "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize (op_req.dict.dict_val,
+ op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_mgmt_v3_commit_fn (op_req.op, dict, &op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "commit failed on operation %s",
+ gd_op_list[op_req.op]);
+ }
+
+ ret = glusterd_mgmt_v3_commit_send_resp (req, op_req.op,
+ ret, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to send commit "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+out:
+ if (op_errstr && (strcmp (op_errstr, "")))
+ GF_FREE (op_errstr);
+
+ free (op_req.dict.dict_val);
+
+ if (dict)
+ dict_unref (dict);
+
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_mgmt_v3_post_validate_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status,
+ char *op_errstr, dict_t *rsp_dict)
+{
+ gd1_mgmt_v3_post_val_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_val_rsp);
+
+ GF_FREE (rsp.dict.dict_val);
+out:
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Responded to post validation, ret: %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_post_validate_fn (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_post_val_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_val_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to decode post validation "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize (op_req.dict.dict_val,
+ op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_mgmt_v3_post_validate_fn (op_req.op, op_req.op_ret, dict,
+ &op_errstr, rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Post Validation failed on operation %s",
+ gd_op_list[op_req.op]);
+ }
+
+ ret = glusterd_mgmt_v3_post_validate_send_resp (req, op_req.op,
+ ret, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to send Post Validation "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+out:
+ if (op_errstr && (strcmp (op_errstr, "")))
+ GF_FREE (op_errstr);
+
+ free (op_req.dict.dict_val);
+
+ if (dict)
+ dict_unref (dict);
+
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
+{
+
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+
+ glusterd_get_uuid (&rsp.uuid);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Responded to mgmt_v3 unlock, ret: %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_syctasked_mgmt_v3_unlock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_unlock_req *unlock_req,
+ glusterd_op_lock_ctx_t *ctx)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (ctx);
+
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (ctx->dict, ctx->uuid);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release mgmt_v3 locks for %s",
+ uuid_utoa(ctx->uuid));
+ }
+
+ ret = glusterd_mgmt_v3_unlock_send_resp (req, ret);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+
+static int
+glusterd_op_state_machine_mgmt_v3_unlock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_unlock_req *lock_req,
+ glusterd_op_lock_ctx_t *ctx)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK,
+ &lock_req->txn_id, ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_UNLOCK");
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_mgmt_v3_unlock_fn (rpcsvc_request_t *req)
+{
+ gd1_mgmt_v3_unlock_req lock_req = {{0},};
+ int32_t ret = -1;
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t is_synctasked = _gf_false;
+ gf_boolean_t free_ctx = _gf_false;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &lock_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG, "Received volume unlock req "
+ "from uuid: %s", uuid_utoa (lock_req.uuid));
+
+ if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (lock_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
+ if (!ctx) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (ctx->uuid, lock_req.uuid);
+ ctx->req = req;
+
+ ctx->dict = dict_new ();
+ if (!ctx->dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (lock_req.dict.dict_val,
+ lock_req.dict.dict_len, &ctx->dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ is_synctasked = dict_get_str_boolean (ctx->dict,
+ "is_synctasked", _gf_false);
+ if (is_synctasked) {
+ ret = glusterd_syctasked_mgmt_v3_unlock (req, &lock_req, ctx);
+ /* The above function does not take ownership of ctx.
+ * Therefore we need to free the ctx explicitly. */
+ free_ctx = _gf_true;
+ }
+ else {
+ ret = glusterd_op_state_machine_mgmt_v3_unlock (req, &lock_req,
+ ctx);
+ }
+
+out:
+
+ if (ret || free_ctx) {
+ if (ctx->dict)
+ dict_unref (ctx->dict);
+ if (ctx)
+ GF_FREE (ctx);
+ }
+
+ free (lock_req.dict.dict_val);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_handle_mgmt_v3_lock (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_mgmt_v3_lock_fn);
+}
+
+static int
+glusterd_handle_pre_validate (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_pre_validate_fn);
+}
+
+static int
+glusterd_handle_brick_op (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_brick_op_fn);
+}
+
+static int
+glusterd_handle_commit (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_commit_fn);
+}
+
+static int
+glusterd_handle_post_validate (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_post_validate_fn);
+}
+
+int
+glusterd_handle_mgmt_v3_unlock (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_mgmt_v3_unlock_fn);
+}
+
+rpcsvc_actor_t gd_svc_mgmt_v3_actors[] = {
+ [GLUSTERD_MGMT_V3_NULL] = { "NULL", GLUSTERD_MGMT_V3_NULL, glusterd_mgmt_v3_null, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_LOCK] = { "MGMT_V3_LOCK", GLUSTERD_MGMT_V3_LOCK, glusterd_handle_mgmt_v3_lock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_PRE_VALIDATE] = { "PRE_VAL", GLUSTERD_MGMT_V3_PRE_VALIDATE, glusterd_handle_pre_validate, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_BRICK_OP] = { "BRCK_OP", GLUSTERD_MGMT_V3_BRICK_OP, glusterd_handle_brick_op, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_COMMIT] = { "COMMIT", GLUSTERD_MGMT_V3_COMMIT, glusterd_handle_commit, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_POST_VALIDATE] = { "POST_VAL", GLUSTERD_MGMT_V3_POST_VALIDATE, glusterd_handle_post_validate, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_UNLOCK] = { "MGMT_V3_UNLOCK", GLUSTERD_MGMT_V3_UNLOCK, glusterd_handle_mgmt_v3_unlock, NULL, 0, DRC_NA},
+};
+
+struct rpcsvc_program gd_svc_mgmt_v3_prog = {
+ .progname = "GlusterD svc mgmt v3",
+ .prognum = GD_MGMT_PROGRAM,
+ .progver = GD_MGMT_V3_VERSION,
+ .numactors = GLUSTERD_MGMT_V3_MAXVALUE,
+ .actors = gd_svc_mgmt_v3_actors,
+ .synctask = _gf_true,
+};
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
new file mode 100644
index 000000000..5295f889e
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -0,0 +1,1899 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+/* rpc related syncops */
+#include "rpc-clnt.h"
+#include "protocol-common.h"
+#include "xdr-generic.h"
+#include "glusterd1-xdr.h"
+#include "glusterd-syncop.h"
+
+#include "glusterd.h"
+#include "glusterd-utils.h"
+#include "glusterd-locks.h"
+#include "glusterd-mgmt.h"
+#include "glusterd-op-sm.h"
+
+extern struct rpc_clnt_program gd_mgmt_v3_prog;
+
+
+static void
+gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
+ char *op_errstr, int op_code,
+ glusterd_peerinfo_t *peerinfo, u_char *uuid)
+{
+ char *peer_str = NULL;
+ char err_str[PATH_MAX] = "Please check log file for details.";
+ char op_err[PATH_MAX] = "";
+ int32_t len = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (args);
+ GF_ASSERT (uuid);
+
+ if (op_ret) {
+ args->op_ret = op_ret;
+ args->op_errno = op_errno;
+
+ if (peerinfo)
+ peer_str = peerinfo->hostname;
+ else
+ peer_str = uuid_utoa (uuid);
+
+ if (op_errstr && strcmp (op_errstr, "")) {
+ len = snprintf (err_str, sizeof(err_str) - 1,
+ "Error: %s", op_errstr);
+ err_str[len] = '\0';
+ }
+
+ switch (op_code) {
+ case GLUSTERD_MGMT_V3_LOCK:
+ {
+ len = snprintf (op_err, sizeof(op_err) - 1,
+ "Locking failed "
+ "on %s. %s", peer_str, err_str);
+ break;
+ }
+ case GLUSTERD_MGMT_V3_PRE_VALIDATE:
+ {
+ len = snprintf (op_err, sizeof(op_err) - 1,
+ "Pre Validation failed "
+ "on %s. %s", peer_str, err_str);
+ break;
+ }
+ case GLUSTERD_MGMT_V3_BRICK_OP:
+ {
+ len = snprintf (op_err, sizeof(op_err) - 1,
+ "Brick ops failed "
+ "on %s. %s", peer_str, err_str);
+ break;
+ }
+ case GLUSTERD_MGMT_V3_COMMIT:
+ {
+ len = snprintf (op_err, sizeof(op_err) - 1,
+ "Commit failed on %s. %s",
+ peer_str, err_str);
+ break;
+ }
+ case GLUSTERD_MGMT_V3_POST_VALIDATE:
+ {
+ len = snprintf (op_err, sizeof(op_err) - 1,
+ "Post Validation failed "
+ "on %s. %s", peer_str, err_str);
+ break;
+ }
+ case GLUSTERD_MGMT_V3_UNLOCK:
+ {
+ len = snprintf (op_err, sizeof(op_err) - 1,
+ "Unlocking failed "
+ "on %s. %s", peer_str, err_str);
+ break;
+ }
+ }
+ op_err[len] = '\0';
+
+ if (args->errstr) {
+ len = snprintf (err_str, sizeof(err_str) - 1,
+ "%s\n%s", args->errstr,
+ op_err);
+ GF_FREE (args->errstr);
+ args->errstr = NULL;
+ } else
+ len = snprintf (err_str, sizeof(err_str) - 1,
+ "%s", op_err);
+ err_str[len] = '\0';
+
+ gf_log (this->name, GF_LOG_ERROR, "%s", op_err);
+ args->errstr = gf_strdup (err_str);
+ }
+
+ return;
+}
+
+int32_t
+gd_mgmt_v3_pre_validate_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
+
+ switch (op) {
+ case GD_OP_SNAP:
+ ret = glusterd_snapshot_prevalidate (dict, op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Snapshot Prevalidate Failed");
+ goto out;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
+ return ret;
+}
+
+int32_t
+gd_mgmt_v3_brick_op_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
+
+ switch (op) {
+ case GD_OP_SNAP:
+ {
+ ret = glusterd_snapshot_brickop (dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "snapshot brickop "
+ "failed");
+ goto out;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "OP = %d. Returning %d", op, ret);
+ return ret;
+}
+
+int32_t
+gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
+
+ switch (op) {
+ case GD_OP_SNAP:
+ {
+ ret = glusterd_snapshot (dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Snapshot Commit Failed");
+ goto out;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
+ return ret;
+}
+
+int32_t
+gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
+
+ switch (op) {
+ case GD_OP_SNAP:
+ {
+ ret = glusterd_snapshot_postvalidate (dict, op_ret,
+ op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "postvalidate operation failed");
+ goto out;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ ret = 0;
+
+out:
+ gf_log (this->name, GF_LOG_TRACE, "OP = %d. Returning %d", op, ret);
+ return ret;
+}
+
+int32_t
+gd_mgmt_v3_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
+
+ /* Even though the lock command has failed, while collating the errors
+ (gd_mgmt_v3_collate_errors), args->op_ret and args->op_errno will be
+ used. @args is obtained from frame->local. So before checking the
+ status of the request and going out if its a failure, args should be
+ set to frame->local. Otherwise, while collating args will be NULL.
+ This applies to other phases such as prevalidate, brickop, commit and
+ postvalidate also.
+ */
+ frame = myframe;
+ args = frame->local;
+ peerinfo = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
+ if (ret < 0)
+ goto out;
+
+ uuid_copy (args->uuid, rsp.uuid);
+
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+
+out:
+ gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
+ GLUSTERD_MGMT_V3_LOCK,
+ peerinfo, rsp.uuid);
+ if (rsp.dict.dict_val)
+ free (rsp.dict.dict_val);
+ STACK_DESTROY (frame->root);
+ synctask_barrier_wake(args);
+ return 0;
+}
+
+int32_t
+gd_mgmt_v3_lock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ gd_mgmt_v3_lock_cbk_fn);
+}
+
+int
+gd_mgmt_v3_lock (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
+{
+ gd1_mgmt_v3_lock_req req = {{0},};
+ glusterd_conf_t *conf = THIS->private;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
+
+ ret = dict_allocate_and_serialize (op_ctx,
+ &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ uuid_copy (req.uuid, my_uuid);
+ req.op = op;
+ synclock_unlock (&conf->big_lock);
+ ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
+ &gd_mgmt_v3_prog,
+ GLUSTERD_MGMT_V3_LOCK,
+ gd_mgmt_v3_lock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_lock_req);
+ synclock_lock (&conf->big_lock);
+out:
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
+ dict_t *dict, char **op_errstr, int npeers,
+ gf_boolean_t *is_acquired)
+{
+ char *volname = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ struct syncargs args = {0};
+ struct list_head *peers = NULL;
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (is_acquired);
+
+ peers = &conf->xaction_peers;
+
+ /* Trying to acquire multiple mgmt_v3 locks on local node */
+ ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire mgmt_v3 locks on localhost");
+ goto out;
+ }
+
+ *is_acquired = _gf_true;
+
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Sending mgmt_v3 lock req to other nodes in the cluster */
+ gd_syncargs_init (&args, NULL);
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
+ list_for_each_entry (peerinfo, peers, op_peers_list) {
+ gd_mgmt_v3_lock (op, dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
+ peer_cnt++;
+ }
+ gd_synctask_barrier_wait((&args), peer_cnt);
+
+ if (args.errstr)
+ *op_errstr = gf_strdup (args.errstr);
+
+ ret = args.op_ret;
+
+ gf_log (this->name, GF_LOG_DEBUG, "Sent lock op req for %s "
+ "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+out:
+ if (ret) {
+ if (*op_errstr)
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ *op_errstr);
+
+ if (volname)
+ ret = gf_asprintf (op_errstr,
+ "Another transaction is in progress "
+ "for %s. Please try again after "
+ "sometime.", volname);
+ else
+ ret = gf_asprintf (op_errstr,
+ "Another transaction is in progress "
+ "Please try again after sometime.");
+
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ ret = -1;
+ }
+
+ return ret;
+}
+
+int
+glusterd_pre_validate_aggr_rsp_dict (glusterd_op_t op,
+ dict_t *aggr, dict_t *rsp)
+{
+ int32_t ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (aggr);
+ GF_ASSERT (rsp);
+
+ switch (op) {
+ case GD_OP_SNAP:
+ ret = glusterd_snap_pre_validate_use_rsp_dict (aggr, rsp);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to aggregate prevalidate "
+ "response dictionaries.");
+ goto out;
+ }
+ break;
+ default:
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Invalid op (%s)",
+ gd_op_list[op]);
+
+ break;
+ }
+out:
+ return ret;
+}
+
+int32_t
+gd_mgmt_v3_pre_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ gd1_mgmt_v3_pre_val_rsp rsp = {{0},};
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerinfo = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_rsp);
+ if (ret < 0)
+ goto out;
+
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ rsp_dict = dict_new ();
+
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &rsp_dict);
+ if (ret < 0) {
+ free (rsp.dict.dict_val);
+ goto out;
+ } else {
+ rsp_dict->extra_stdfree = rsp.dict.dict_val;
+ }
+ }
+
+ uuid_copy (args->uuid, rsp.uuid);
+ pthread_mutex_lock (&args->lock_dict);
+ {
+ ret = glusterd_pre_validate_aggr_rsp_dict (rsp.op, args->dict,
+ rsp_dict);
+ }
+ pthread_mutex_unlock (&args->lock_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ if (!rsp.op_ret)
+ op_ret = ret;
+ else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+ } else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+
+out:
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
+ gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
+ GLUSTERD_MGMT_V3_PRE_VALIDATE,
+ peerinfo, rsp.uuid);
+
+ if (rsp.op_errstr)
+ free (rsp.op_errstr);
+
+ STACK_DESTROY (frame->root);
+ synctask_barrier_wake(args);
+ return 0;
+}
+
+int32_t
+gd_mgmt_v3_pre_validate_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ gd_mgmt_v3_pre_validate_cbk_fn);
+}
+
+int
+gd_mgmt_v3_pre_validate_req (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_pre_val_req req = {{0},};
+ glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
+
+ ret = dict_allocate_and_serialize (op_ctx,
+ &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ uuid_copy (req.uuid, my_uuid);
+ req.op = op;
+ synclock_unlock (&conf->big_lock);
+ ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
+ &gd_mgmt_v3_prog,
+ GLUSTERD_MGMT_V3_PRE_VALIDATE,
+ gd_mgmt_v3_pre_validate_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_pre_val_req);
+ synclock_lock (&conf->big_lock);
+out:
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
+ dict_t *req_dict, char **op_errstr, int npeers)
+{
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ dict_t *rsp_dict = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ struct list_head *peers = NULL;
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
+ peers = &conf->xaction_peers;
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to create response dictionary");
+ goto out;
+ }
+
+ /* Pre Validation on local node */
+ ret = gd_mgmt_v3_pre_validate_fn (op, req_dict, op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Pre Validation failed for "
+ "operation %s on local node",
+ gd_op_list[op]);
+
+ if (*op_errstr == NULL) {
+ ret = gf_asprintf (op_errstr,
+ "Pre-validation failed "
+ "on localhost. Please "
+ "check log file for details");
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ ret = -1;
+ }
+ goto out;
+ }
+
+ ret = glusterd_pre_validate_aggr_rsp_dict (op, req_dict,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ goto out;
+ }
+
+ dict_unref (rsp_dict);
+ rsp_dict = NULL;
+
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Sending Pre Validation req to other nodes in the cluster */
+ gd_syncargs_init (&args, req_dict);
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
+ list_for_each_entry (peerinfo, peers, op_peers_list) {
+ gd_mgmt_v3_pre_validate_req (op, req_dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
+ peer_cnt++;
+ }
+ gd_synctask_barrier_wait((&args), peer_cnt);
+
+ if (args.op_ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Pre Validation failed on peers");
+
+ if (args.errstr)
+ *op_errstr = gf_strdup (args.errstr);
+ }
+
+ ret = args.op_ret;
+
+ gf_log (this->name, GF_LOG_DEBUG, "Sent pre valaidation req for %s "
+ "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+out:
+ return ret;
+}
+
+int
+glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
+ glusterd_op_t op)
+{
+ int32_t ret = -1;
+ dict_t *req_dict = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (dict);
+
+ req_dict = dict_new ();
+ if (!req_dict)
+ goto out;
+
+ switch (op) {
+ case GD_OP_SNAP:
+ dict_copy (dict, req_dict);
+ break;
+ default:
+ break;
+ }
+
+ *req = req_dict;
+ ret = 0;
+out:
+ return ret;
+}
+
+int32_t
+gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ gd1_mgmt_v3_brick_op_rsp rsp = {{0},};
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerinfo = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ /* If the operation failed, then iov can be NULL. So better check the
+ status of the operation and then worry about iov (if the status of
+ the command is success)
+ */
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_rsp);
+ if (ret < 0)
+ goto out;
+
+ uuid_copy (args->uuid, rsp.uuid);
+
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+
+out:
+ gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
+ GLUSTERD_MGMT_V3_BRICK_OP,
+ peerinfo, rsp.uuid);
+
+ if (rsp.op_errstr)
+ free (rsp.op_errstr);
+
+ if (rsp.dict.dict_val)
+ free (rsp.dict.dict_val);
+
+ STACK_DESTROY (frame->root);
+ synctask_barrier_wake(args);
+ return 0;
+}
+
+int32_t
+gd_mgmt_v3_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ gd_mgmt_v3_brick_op_cbk_fn);
+}
+
+int
+gd_mgmt_v3_brick_op_req (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_brick_op_req req = {{0},};
+ glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
+
+ ret = dict_allocate_and_serialize (op_ctx,
+ &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ uuid_copy (req.uuid, my_uuid);
+ req.op = op;
+ synclock_unlock (&conf->big_lock);
+ ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
+ &gd_mgmt_v3_prog,
+ GLUSTERD_MGMT_V3_BRICK_OP,
+ gd_mgmt_v3_brick_op_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_brick_op_req);
+ synclock_lock (&conf->big_lock);
+out:
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
+ dict_t *req_dict, char **op_errstr, int npeers)
+{
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ dict_t *rsp_dict = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ struct list_head *peers = NULL;
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
+ peers = &conf->xaction_peers;
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to create response dictionary");
+ goto out;
+ }
+
+ /* Perform brick op on local node */
+ ret = gd_mgmt_v3_brick_op_fn (op, req_dict, op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Brick ops failed for "
+ "operation %s on local node",
+ gd_op_list[op]);
+
+ if (*op_errstr == NULL) {
+ ret = gf_asprintf (op_errstr,
+ "Brick ops failed "
+ "on localhost. Please "
+ "check log file for details");
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ ret = -1;
+ }
+ goto out;
+ }
+
+ dict_unref (rsp_dict);
+ rsp_dict = NULL;
+
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Sending brick op req to other nodes in the cluster */
+ gd_syncargs_init (&args, NULL);
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
+ list_for_each_entry (peerinfo, peers, op_peers_list) {
+ gd_mgmt_v3_brick_op_req (op, req_dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
+ peer_cnt++;
+ }
+ gd_synctask_barrier_wait((&args), peer_cnt);
+
+ if (args.op_ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Brick ops failed on peers");
+
+ if (args.errstr)
+ *op_errstr = gf_strdup (args.errstr);
+ }
+
+ ret = args.op_ret;
+
+ gf_log (this->name, GF_LOG_DEBUG, "Sent brick op req for %s "
+ "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+out:
+ return ret;
+}
+
+int32_t
+gd_mgmt_v3_commit_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ gd1_mgmt_v3_commit_rsp rsp = {{0},};
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerinfo = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_v3_commit_rsp);
+ if (ret < 0)
+ goto out;
+
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ rsp_dict = dict_new ();
+
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &rsp_dict);
+ if (ret < 0) {
+ free (rsp.dict.dict_val);
+ goto out;
+ } else {
+ rsp_dict->extra_stdfree = rsp.dict.dict_val;
+ }
+ }
+
+ uuid_copy (args->uuid, rsp.uuid);
+ pthread_mutex_lock (&args->lock_dict);
+ {
+ ret = glusterd_syncop_aggr_rsp_dict (rsp.op, args->dict,
+ rsp_dict);
+ }
+ pthread_mutex_unlock (&args->lock_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ if (!rsp.op_ret)
+ op_ret = ret;
+ else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+ } else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+
+out:
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
+ gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
+ GLUSTERD_MGMT_V3_COMMIT,
+ peerinfo, rsp.uuid);
+
+ STACK_DESTROY (frame->root);
+ synctask_barrier_wake(args);
+ return 0;
+}
+
+int32_t
+gd_mgmt_v3_commit_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ gd_mgmt_v3_commit_cbk_fn);
+}
+
+int
+gd_mgmt_v3_commit_req (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_commit_req req = {{0},};
+ glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
+
+ ret = dict_allocate_and_serialize (op_ctx,
+ &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ uuid_copy (req.uuid, my_uuid);
+ req.op = op;
+ synclock_unlock (&conf->big_lock);
+ ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
+ &gd_mgmt_v3_prog,
+ GLUSTERD_MGMT_V3_COMMIT,
+ gd_mgmt_v3_commit_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_commit_req);
+ synclock_lock (&conf->big_lock);
+out:
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
+ dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr, int npeers)
+{
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ dict_t *rsp_dict = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ struct list_head *peers = NULL;
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
+ peers = &conf->xaction_peers;
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to create response dictionary");
+ goto out;
+ }
+
+ /* Commit on local node */
+ ret = gd_mgmt_v3_commit_fn (op, req_dict, op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Commit failed for "
+ "operation %s on local node",
+ gd_op_list[op]);
+
+ if (*op_errstr == NULL) {
+ ret = gf_asprintf (op_errstr,
+ "Commit failed "
+ "on localhost. Please "
+ "check log file for details.");
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ ret = -1;
+ }
+ goto out;
+ }
+
+ ret = glusterd_syncop_aggr_rsp_dict (op, op_ctx,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ goto out;
+ }
+
+ dict_unref (rsp_dict);
+ rsp_dict = NULL;
+
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Sending commit req to other nodes in the cluster */
+ gd_syncargs_init (&args, op_ctx);
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
+ list_for_each_entry (peerinfo, peers, op_peers_list) {
+ gd_mgmt_v3_commit_req (op, req_dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
+ peer_cnt++;
+ }
+ gd_synctask_barrier_wait((&args), peer_cnt);
+
+ if (args.op_ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Commit failed on peers");
+
+ if (args.errstr)
+ *op_errstr = gf_strdup (args.errstr);
+ }
+
+ ret = args.op_ret;
+
+ gf_log (this->name, GF_LOG_DEBUG, "Sent commit req for %s to %d "
+ "peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+out:
+ return ret;
+}
+
+int32_t
+gd_mgmt_v3_post_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ gd1_mgmt_v3_post_val_rsp rsp = {{0},};
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerinfo = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_val_rsp);
+ if (ret < 0)
+ goto out;
+
+ uuid_copy (args->uuid, rsp.uuid);
+
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+
+out:
+ gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
+ GLUSTERD_MGMT_V3_POST_VALIDATE,
+ peerinfo, rsp.uuid);
+ if (rsp.op_errstr)
+ free (rsp.op_errstr);
+
+ if (rsp.dict.dict_val)
+ free (rsp.dict.dict_val);
+ STACK_DESTROY (frame->root);
+ synctask_barrier_wake(args);
+ return 0;
+}
+
+int32_t
+gd_mgmt_v3_post_validate_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ gd_mgmt_v3_post_validate_cbk_fn);
+}
+
+int
+gd_mgmt_v3_post_validate_req (glusterd_op_t op, int32_t op_ret, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_post_val_req req = {{0},};
+ glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
+
+ ret = dict_allocate_and_serialize (op_ctx,
+ &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ uuid_copy (req.uuid, my_uuid);
+ req.op = op;
+ req.op_ret = op_ret;
+ synclock_unlock (&conf->big_lock);
+ ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
+ &gd_mgmt_v3_prog,
+ GLUSTERD_MGMT_V3_POST_VALIDATE,
+ gd_mgmt_v3_post_validate_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_post_val_req);
+ synclock_lock (&conf->big_lock);
+out:
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
+ int32_t op_ret, dict_t *dict, dict_t *req_dict,
+ char **op_errstr, int npeers)
+{
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ dict_t *rsp_dict = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ struct list_head *peers = NULL;
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (dict);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
+ peers = &conf->xaction_peers;
+ GF_ASSERT (peers);
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to create response dictionary");
+ goto out;
+ }
+
+ /* Copy the contents of dict like missed snaps info to req_dict */
+ dict_copy (dict, req_dict);
+
+ /* Post Validation on local node */
+ ret = gd_mgmt_v3_post_validate_fn (op, op_ret, req_dict, op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Post Validation failed for "
+ "operation %s on local node",
+ gd_op_list[op]);
+
+ if (*op_errstr == NULL) {
+ ret = gf_asprintf (op_errstr,
+ "Post-validation failed "
+ "on localhost. Please check "
+ "log file for details");
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ ret = -1;
+ }
+ goto out;
+ }
+
+ dict_unref (rsp_dict);
+ rsp_dict = NULL;
+
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Sending Post Validation req to other nodes in the cluster */
+ gd_syncargs_init (&args, req_dict);
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
+ list_for_each_entry (peerinfo, peers, op_peers_list) {
+ gd_mgmt_v3_post_validate_req (op, op_ret, req_dict, peerinfo,
+ &args, MY_UUID, peer_uuid);
+ peer_cnt++;
+ }
+ gd_synctask_barrier_wait((&args), peer_cnt);
+
+ if (args.op_ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Post Validation failed on peers");
+
+ if (args.errstr)
+ *op_errstr = gf_strdup (args.errstr);
+ }
+
+ ret = args.op_ret;
+
+ gf_log (this->name, GF_LOG_DEBUG, "Sent post valaidation req for %s "
+ "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+out:
+ return ret;
+}
+
+int32_t
+gd_mgmt_v3_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerinfo = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
+ if (ret < 0)
+ goto out;
+
+ uuid_copy (args->uuid, rsp.uuid);
+
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+
+out:
+ gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
+ GLUSTERD_MGMT_V3_UNLOCK,
+ peerinfo, rsp.uuid);
+ if (rsp.dict.dict_val)
+ free (rsp.dict.dict_val);
+ STACK_DESTROY (frame->root);
+ synctask_barrier_wake(args);
+ return 0;
+}
+
+int32_t
+gd_mgmt_v3_unlock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ gd_mgmt_v3_unlock_cbk_fn);
+}
+
+int
+gd_mgmt_v3_unlock (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_unlock_req req = {{0},};
+ glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
+
+ ret = dict_allocate_and_serialize (op_ctx,
+ &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ uuid_copy (req.uuid, my_uuid);
+ req.op = op;
+ synclock_unlock (&conf->big_lock);
+ ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
+ &gd_mgmt_v3_prog,
+ GLUSTERD_MGMT_V3_UNLOCK,
+ gd_mgmt_v3_unlock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_unlock_req);
+ synclock_lock (&conf->big_lock);
+out:
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
+ dict_t *dict, int32_t op_ret,
+ char **op_errstr, int npeers,
+ gf_boolean_t is_acquired)
+{
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ struct list_head *peers = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+
+ peers = &conf->xaction_peers;
+
+ /* If the lock has not been held during this
+ * transaction, do not send unlock requests */
+ if (!is_acquired)
+ goto out;
+
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Sending mgmt_v3 unlock req to other nodes in the cluster */
+ gd_syncargs_init (&args, NULL);
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
+ list_for_each_entry (peerinfo, peers, op_peers_list) {
+ gd_mgmt_v3_unlock (op, dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
+ peer_cnt++;
+ }
+ gd_synctask_barrier_wait((&args), peer_cnt);
+
+ if (args.op_ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unlock failed on peers");
+
+ if (!op_ret && args.errstr)
+ *op_errstr = gf_strdup (args.errstr);
+ }
+
+ ret = args.op_ret;
+
+ gf_log (this->name, GF_LOG_DEBUG, "Sent unlock op req for %s "
+ "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict)
+{
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ int32_t npeers = 0;
+ dict_t *req_dict = NULL;
+ dict_t *tmp_dict = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *op_errstr = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t is_acquired = _gf_false;
+ uuid_t *originator_uuid = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ /* Save the MY_UUID as the originator_uuid. This originator_uuid
+ * will be used by is_origin_glusterd() to determine if a node
+ * is the originator node for a command. */
+ originator_uuid = GF_CALLOC (1, sizeof(uuid_t),
+ gf_common_mt_uuid_t);
+ if (!originator_uuid) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (*originator_uuid, MY_UUID);
+ ret = dict_set_bin (dict, "originator_uuid",
+ originator_uuid, sizeof (uuid_t));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set originator_uuid.");
+ goto out;
+ }
+
+ /* Marking the operation as complete synctasked */
+ ret = dict_set_int32 (dict, "is_synctasked", _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set synctasked flag.");
+ goto out;
+ }
+
+ /* Use a copy at local unlock as cli response will be sent before
+ * the unlock and the volname in the dict might be removed */
+ tmp_dict = dict_new();
+ if (!tmp_dict) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to create dict");
+ goto out;
+ }
+ dict_copy (dict, tmp_dict);
+
+ /* BUILD PEERS LIST */
+ INIT_LIST_HEAD (&conf->xaction_peers);
+ npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
+
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
+ ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr,
+ npeers, &is_acquired);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
+ goto out;
+ }
+
+ /* BUILD PAYLOAD */
+ ret = glusterd_mgmt_v3_build_payload (&req_dict, &op_errstr, dict, op);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, LOGSTR_BUILD_PAYLOAD,
+ gd_op_list[op]);
+ if (op_errstr == NULL)
+ gf_asprintf (&op_errstr, OPERRSTR_BUILD_PAYLOAD);
+ goto out;
+ }
+
+ /* PRE-COMMIT VALIDATE PHASE */
+ ret = glusterd_mgmt_v3_pre_validate (conf, op, req_dict,
+ &op_errstr, npeers);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Pre Validation Failed");
+ goto out;
+ }
+
+ /* COMMIT OP PHASE */
+ ret = glusterd_mgmt_v3_commit (conf, op, dict, req_dict,
+ &op_errstr, npeers);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Commit Op Failed");
+ goto out;
+ }
+
+ /* POST-COMMIT VALIDATE PHASE */
+ /* As of now, post_validate is not handling any other
+ commands other than snapshot. So as of now, I am
+ sending 0 (op_ret as 0).
+ */
+ ret = glusterd_mgmt_v3_post_validate (conf, op, 0, dict, req_dict,
+ &op_errstr, npeers);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Post Validation Failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ op_ret = ret;
+ /* UNLOCK PHASE FOR PEERS*/
+ (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict,
+ op_ret, &op_errstr,
+ npeers, is_acquired);
+
+ /* LOCAL VOLUME(S) UNLOCK */
+ if (is_acquired) {
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release mgmt_v3 locks on localhost");
+ op_ret = ret;
+ }
+ }
+
+ /* SEND CLI RESPONSE */
+ glusterd_op_send_cli_response (op, op_ret, 0, req, dict, op_errstr);
+
+ if (req_dict)
+ dict_unref (req_dict);
+
+ if (tmp_dict)
+ dict_unref (tmp_dict);
+
+ if (op_errstr) {
+ GF_FREE (op_errstr);
+ op_errstr = NULL;
+ }
+
+ return 0;
+}
+
+int32_t
+glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict)
+{
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ int32_t npeers = 0;
+ dict_t *req_dict = NULL;
+ dict_t *tmp_dict = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *op_errstr = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t is_acquired = _gf_false;
+ uuid_t *originator_uuid = NULL;
+ gf_boolean_t success = _gf_false;
+ char *tmp_errstr = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ /* Save the MY_UUID as the originator_uuid. This originator_uuid
+ * will be used by is_origin_glusterd() to determine if a node
+ * is the originator node for a command. */
+ originator_uuid = GF_CALLOC (1, sizeof(uuid_t),
+ gf_common_mt_uuid_t);
+ if (!originator_uuid) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (*originator_uuid, MY_UUID);
+ ret = dict_set_bin (dict, "originator_uuid",
+ originator_uuid, sizeof (uuid_t));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set originator_uuid.");
+ goto out;
+ }
+
+ /* Marking the operation as complete synctasked */
+ ret = dict_set_int32 (dict, "is_synctasked", _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set synctasked flag.");
+ goto out;
+ }
+
+ /* Use a copy at local unlock as cli response will be sent before
+ * the unlock and the volname in the dict might be removed */
+ tmp_dict = dict_new();
+ if (!tmp_dict) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to create dict");
+ goto out;
+ }
+ dict_copy (dict, tmp_dict);
+
+ /* BUILD PEERS LIST */
+ INIT_LIST_HEAD (&conf->xaction_peers);
+ npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
+
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
+ ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr,
+ npeers, &is_acquired);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
+ goto out;
+ }
+
+ /* BUILD PAYLOAD */
+ ret = glusterd_mgmt_v3_build_payload (&req_dict, &op_errstr, dict, op);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, LOGSTR_BUILD_PAYLOAD,
+ gd_op_list[op]);
+ if (op_errstr == NULL)
+ gf_asprintf (&op_errstr, OPERRSTR_BUILD_PAYLOAD);
+ goto out;
+ }
+
+ /* PRE-COMMIT VALIDATE PHASE */
+ ret = glusterd_mgmt_v3_pre_validate (conf, op, req_dict,
+ &op_errstr, npeers);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Pre Validation Failed");
+ goto out;
+ }
+
+ /* BRICK OP PHASE for initiating barrier*/
+ ret = dict_set_int32 (req_dict, "barrier", 1);
+ if (ret)
+ goto out;
+ ret = glusterd_mgmt_v3_brick_op (conf, op, req_dict,
+ &op_errstr, npeers);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Brick Ops Failed");
+ goto unbarrier;
+ }
+
+ /* COMMIT OP PHASE */
+ /* TODO: As of now, the plan is to do quorum check before sending the
+ commit fop and if the quorum succeeds, then commit is sent to all
+ the other glusterds.
+ snap create functionality now creates the in memory and on disk
+ objects for the snapshot (marking them as incomplete), takes the lvm
+ snapshot and then updates the status of the in memory and on disk
+ snap objects as complete. Suppose one of the glusterds goes down
+ after taking the lvm snapshot, but before updating the snap object,
+ then treat it as a snapshot create failure and trigger cleanup.
+ i.e the number of commit responses received by the originator
+ glusterd shold be the same as the number of peers it has sent the
+ request to (i.e npeers variable). If not, then originator glusterd
+ will initiate cleanup in post-validate fop.
+ Question: What if one of the other glusterds goes down as explained
+ above and along with it the originator glusterd also goes down?
+ Who will initiate the cleanup?
+ */
+ ret = glusterd_mgmt_v3_commit (conf, op, dict, req_dict,
+ &op_errstr, npeers);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Commit Op Failed");
+ /* If the main op fails, we should save the error string.
+ Because, op_errstr will be used for unbarrier and
+ unlock ops also. We might lose the actual error that
+ caused the failure.
+ */
+ tmp_errstr = op_errstr;
+ op_errstr = NULL;
+ goto unbarrier;
+ }
+
+ success = _gf_true;
+unbarrier:
+ /* BRICK OP PHASE for removing the barrier*/
+ ret = dict_set_int32 (req_dict, "barrier", 0);
+ if (ret)
+ goto out;
+ ret = glusterd_mgmt_v3_brick_op (conf, op, req_dict,
+ &op_errstr, npeers);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Brick Ops Failed");
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ op_ret = ret;
+
+ if (success == _gf_false)
+ op_ret = -1;
+
+ /* POST-COMMIT VALIDATE PHASE */
+ ret = glusterd_mgmt_v3_post_validate (conf, op, op_ret, dict, req_dict,
+ &op_errstr, npeers);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Post Validation Failed");
+ op_ret = -1;
+ }
+
+ /* UNLOCK PHASE FOR PEERS*/
+ (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict,
+ op_ret, &op_errstr,
+ npeers, is_acquired);
+
+ /* If the commit op (snapshot taking) failed, then the error is stored
+ in tmp_errstr and unbarrier is called. Suppose, if unbarrier also
+ fails, then the error happened in unbarrier is logged and freed.
+ The error happened in commit op, which is stored in tmp_errstr
+ is sent to cli.
+ */
+ if (tmp_errstr) {
+ if (op_errstr) {
+ gf_log (this->name, GF_LOG_ERROR, "unbarrier brick op"
+ "failed with the error %s", op_errstr);
+ GF_FREE (op_errstr);
+ op_errstr = NULL;
+ }
+ op_errstr = tmp_errstr;
+ }
+
+ /* LOCAL VOLUME(S) UNLOCK */
+ if (is_acquired) {
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release mgmt_v3 locks on localhost");
+ op_ret = ret;
+ }
+ }
+
+ /* SEND CLI RESPONSE */
+ glusterd_op_send_cli_response (op, op_ret, 0, req, dict, op_errstr);
+
+ if (req_dict)
+ dict_unref (req_dict);
+
+ if (tmp_dict)
+ dict_unref (tmp_dict);
+
+ if (op_errstr) {
+ GF_FREE (op_errstr);
+ op_errstr = NULL;
+ }
+
+ return 0;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
new file mode 100644
index 000000000..b185a9bec
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
@@ -0,0 +1,45 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _GLUSTERD_MGMT_H_
+#define _GLUSTERD_MGMT_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+int32_t
+gd_mgmt_v3_pre_validate_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict);
+
+int32_t
+gd_mgmt_v3_brick_op_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict);
+
+int32_t
+gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict);
+
+int32_t
+gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict);
+
+int32_t
+glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict);
+
+int32_t
+glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict);
+
+int
+glusterd_snap_pre_validate_use_rsp_dict (dict_t *dst, dict_t *src);
+
+#endif /* _GLUSTERD_MGMT_H_ */
diff --git a/xlators/mgmt/glusterd/src/glusterd-mountbroker.c b/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
index 0d67d1303..4ce441da8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
@@ -231,7 +231,6 @@ parse_mount_pattern_desc (gf_mount_spec_t *mspec, char *pdesc)
const char *georep_mnt_desc_template =
"SUP("
- "xlator-option=\\*-dht.assert-no-child-down=true "
"volfile-server=localhost "
"client-pid=%d "
"user-map-root=%s "
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 1774b133b..9b130b4c6 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -37,6 +37,7 @@
#include "glusterd-store.h"
#include "glusterd-hooks.h"
#include "glusterd-volgen.h"
+#include "glusterd-locks.h"
#include "syscall.h"
#include "cli1-xdr.h"
#include "common-utils.h"
@@ -67,6 +68,253 @@
static struct list_head gd_op_sm_queue;
pthread_mutex_t gd_op_sm_lock;
glusterd_op_info_t opinfo = {{0},};
+
+int32_t
+glusterd_txn_opinfo_dict_init ()
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ priv->glusterd_txn_opinfo = dict_new ();
+ if (!priv->glusterd_txn_opinfo) {
+ ret = -1;
+ goto out;
+ }
+
+ memset (priv->global_txn_id, '\0', sizeof(uuid_t));
+
+ ret = 0;
+out:
+ return ret;
+}
+
+void
+glusterd_txn_opinfo_dict_fini ()
+{
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (priv->glusterd_txn_opinfo)
+ dict_unref (priv->glusterd_txn_opinfo);
+}
+
+void
+glusterd_txn_opinfo_init (glusterd_op_info_t *opinfo,
+ glusterd_op_sm_state_info_t *state,
+ glusterd_op_t *op,
+ dict_t *op_ctx,
+ rpcsvc_request_t *req)
+{
+ GF_ASSERT (opinfo);
+
+ if (state)
+ opinfo->state = *state;
+
+ if (op)
+ opinfo->op = *op;
+
+ if (op_ctx)
+ opinfo->op_ctx = dict_ref(op_ctx);
+ else
+ opinfo->op_ctx = NULL;
+
+ if (req)
+ opinfo->req = req;
+
+ return;
+}
+
+int32_t
+glusterd_generate_txn_id (dict_t *dict, uuid_t **txn_id)
+{
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (dict);
+
+ *txn_id = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!*txn_id)
+ goto out;
+
+ if (priv->op_version < GD_OP_VERSION_4)
+ uuid_copy (**txn_id, priv->global_txn_id);
+ else
+ uuid_generate (**txn_id);
+
+ ret = dict_set_bin (dict, "transaction_id",
+ *txn_id, sizeof (**txn_id));
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to set transaction id.");
+ goto out;
+ }
+
+ gf_log ("", GF_LOG_DEBUG,
+ "Transaction_id = %s", uuid_utoa (**txn_id));
+out:
+ if (ret && *txn_id) {
+ GF_FREE (*txn_id);
+ *txn_id = NULL;
+ }
+
+ return ret;
+}
+
+int32_t
+glusterd_get_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo)
+{
+ int32_t ret = -1;
+ glusterd_txn_opinfo_obj *opinfo_obj = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (!txn_id || !opinfo) {
+ gf_log ("", GF_LOG_ERROR,
+ "Empty transaction id or opinfo received.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_bin(priv->glusterd_txn_opinfo,
+ uuid_utoa (*txn_id),
+ (void **) &opinfo_obj);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to get transaction opinfo "
+ "for transaction ID : %s",
+ uuid_utoa (*txn_id));
+ goto out;
+ }
+
+ (*opinfo) = opinfo_obj->opinfo;
+
+ gf_log ("", GF_LOG_DEBUG,
+ "Successfully got opinfo for transaction ID : %s",
+ uuid_utoa (*txn_id));
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_set_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo)
+{
+ int32_t ret = -1;
+ glusterd_txn_opinfo_obj *opinfo_obj = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (!txn_id) {
+ gf_log ("", GF_LOG_ERROR, "Empty transaction id received.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_bin(priv->glusterd_txn_opinfo,
+ uuid_utoa (*txn_id),
+ (void **) &opinfo_obj);
+ if (ret) {
+ opinfo_obj = GF_CALLOC (1, sizeof(glusterd_txn_opinfo_obj),
+ gf_common_mt_txn_opinfo_obj_t);
+ if (!opinfo_obj) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_bin(priv->glusterd_txn_opinfo,
+ uuid_utoa (*txn_id), opinfo_obj,
+ sizeof(glusterd_txn_opinfo_obj));
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to set opinfo for transaction ID : %s",
+ uuid_utoa (*txn_id));
+ goto out;
+ }
+ }
+
+ opinfo_obj->opinfo = (*opinfo);
+
+ gf_log ("", GF_LOG_DEBUG,
+ "Successfully set opinfo for transaction ID : %s",
+ uuid_utoa (*txn_id));
+ ret = 0;
+out:
+ if (ret)
+ if (opinfo_obj)
+ GF_FREE (opinfo_obj);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_clear_txn_opinfo (uuid_t *txn_id)
+{
+ int32_t ret = -1;
+ glusterd_op_info_t txn_op_info = {{0},};
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (!txn_id) {
+ gf_log ("", GF_LOG_ERROR, "Empty transaction id received.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Transaction opinfo not found");
+ goto out;
+ }
+
+ if (txn_op_info.op_ctx)
+ dict_unref (txn_op_info.op_ctx);
+
+ dict_del(priv->glusterd_txn_opinfo, uuid_utoa (*txn_id));
+
+ gf_log ("", GF_LOG_DEBUG,
+ "Successfully cleared opinfo for transaction ID : %s",
+ uuid_utoa (*txn_id));
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
static int glusterfs_port = GLUSTERD_DEFAULT_PORT;
static char *glusterd_op_sm_state_names[] = {
"Default",
@@ -147,14 +395,48 @@ glusterd_is_volume_started (glusterd_volinfo_t *volinfo)
}
static int
-glusterd_op_sm_inject_all_acc ()
+glusterd_op_sm_inject_all_acc (uuid_t *txn_id)
{
int32_t ret = -1;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC, txn_id, NULL);
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
+static int
+glusterd_check_quota_cmd (char *key, char *value, char *errstr, size_t size)
+{
+ int ret = -1;
+ gf_boolean_t b = _gf_false;
+
+ if ((strcmp (key, "quota") == 0) ||
+ (strcmp (key, "features.quota") == 0)) {
+ ret = gf_string2boolean (value, &b);
+ if (ret)
+ goto out;
+ if (b) {
+ snprintf (errstr, size," 'gluster "
+ "volume set <VOLNAME> %s %s' is "
+ "deprecated. Use 'gluster volume "
+ "quota <VOLNAME> enable' instead.",
+ key, value);
+ ret = -1;
+ goto out;
+ } else {
+ snprintf (errstr, size, " 'gluster "
+ "volume set <VOLNAME> %s %s' is "
+ "deprecated. Use 'gluster volume "
+ "quota <VOLNAME> disable' instead.",
+ key, value);
+ ret = -1;
+ goto out;
+ }
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
int
glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickinfo,
gd1_mgmt_brick_op_req **req, dict_t *dict)
@@ -235,20 +517,20 @@ glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickin
brick_req->name = gf_strdup (name);
break;
-
-#ifdef HAVE_BD_XLATOR
- case GD_OP_BD_OP:
- {
+ case GD_OP_SNAP:
brick_req = GF_CALLOC (1, sizeof (*brick_req),
gf_gld_mt_mop_brick_req_t);
if (!brick_req)
goto out;
- brick_req->op = GLUSTERD_BRICK_BD_OP;
- brick_req->name = "";
- }
+ brick_req->op = GLUSTERD_VOLUME_BARRIER_OP;
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret)
+ goto out;
+ snprintf (name, 1024, "%s-server",volname);
+ brick_req->name = gf_strdup (name);
+
break;
-#endif
default:
goto out;
break;
@@ -333,6 +615,10 @@ glusterd_validate_quorum_options (xlator_t *this, char *fullkey, char *value,
if (!glusterd_is_quorum_option (fullkey))
goto out;
key = strchr (fullkey, '.');
+ if (key == NULL) {
+ ret = -1;
+ goto out;
+ }
key++;
opt = xlator_volume_option_get (this, key);
ret = xlator_option_validate (this, key, value, opt, op_errstr);
@@ -420,7 +706,7 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
* This check is not done on the originator glusterd. The originator
* glusterd sets this value.
*/
- origin_glusterd = is_origin_glusterd ();
+ origin_glusterd = is_origin_glusterd (dict);
if (!origin_glusterd) {
/* Check for v3.3.x origin glusterd */
@@ -557,6 +843,10 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
}
}
+ ret = glusterd_check_quota_cmd (key, value, errstr, sizeof (errstr));
+ if (ret)
+ goto out;
+
if (is_key_glusterd_hooks_friendly (key))
continue;
@@ -730,7 +1020,7 @@ glusterd_op_stage_reset_volume (dict_t *dict, char **op_errstr)
{
int ret = 0;
char *volname = NULL;
- gf_boolean_t exists = _gf_false;
+ int exists = 0;
char msg[2048] = {0};
char *key = NULL;
char *key_fixed = NULL;
@@ -778,6 +1068,7 @@ glusterd_op_stage_reset_volume (dict_t *dict, char **op_errstr)
ret = -1;
goto out;
}
+
if (!exists) {
ret = snprintf (msg, sizeof (msg),
"Option %s does not exist", key);
@@ -901,18 +1192,24 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
if (cmd & GF_CLI_STATUS_ALL)
goto out;
+ if ((cmd & GF_CLI_STATUS_QUOTAD) &&
+ (priv->op_version == GD_OP_VERSION_MIN)) {
+ snprintf (msg, sizeof (msg), "The cluster is operating at "
+ "version 1. Getting the status of quotad is not "
+ "allowed in this state.");
+ ret = -1;
+ goto out;
+ }
+
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Unable to get volume name");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- snprintf (msg, sizeof(msg), "Volume %s does not exist",
- volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
+ snprintf (msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
ret = -1;
goto out;
}
@@ -925,7 +1222,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
if (!ret) {
snprintf (msg, sizeof (msg), "Volume %s is not started",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
ret = -1;
goto out;
}
@@ -940,7 +1236,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof (msg),
"NFS server is disabled for volume %s",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
@@ -949,7 +1244,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof (msg),
"Volume %s is not of type replicate",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
@@ -961,10 +1255,15 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof (msg),
"Self-heal Daemon is disabled for volume %s",
volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
-
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ if (!glusterd_is_volume_quota_enabled (volinfo)) {
+ ret = -1;
+ snprintf (msg, sizeof (msg), "Volume %s does not have "
+ "quota enabled", volname);
+ goto out;
+ }
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_str (dict, "brick", &brick);
if (ret)
@@ -975,8 +1274,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
if (ret) {
snprintf (msg, sizeof(msg), "No brick %s in"
" volume %s", brick, volname);
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
-
ret = -1;
goto out;
}
@@ -992,7 +1289,7 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
*op_errstr = gf_strdup ("Validation Failed for Status");
}
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning: %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG, "Returning: %d", ret);
return ret;
}
@@ -1101,14 +1398,17 @@ _delete_reconfig_opt (dict_t *this, char *key, data_t *value, void *data)
GF_ASSERT (data);
is_force = (int32_t*)data;
- if (*is_force != 1 &&
- (_gf_true == glusterd_check_voloption_flags (key,
- OPT_FLAG_FORCE))) {
+ if (*is_force != 1) {
+ if (_gf_true == glusterd_check_voloption_flags (key,
+ OPT_FLAG_FORCE)) {
/* indicate to caller that we don't set the option
* due to being protected
*/
- *is_force = -1;
- goto out;
+ *is_force = *is_force | GD_OP_PROTECTED;
+ goto out;
+ } else {
+ *is_force = *is_force | GD_OP_UNPROTECTED;
+ }
}
gf_log ("", GF_LOG_DEBUG, "deleting dict with key=%s,value=%s",
@@ -1160,8 +1460,9 @@ glusterd_options_reset (glusterd_volinfo_t *volinfo, char *key,
_delete_reconfig_opt (volinfo->dict, key, value, is_force);
}
- ret = glusterd_create_volfiles_and_notify_services (volinfo);
+ gd_update_volume_op_versions (volinfo);
+ ret = glusterd_create_volfiles_and_notify_services (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Unable to create volfile for"
" 'volume reset'");
@@ -1276,7 +1577,7 @@ out:
}
static int
-glusterd_op_reset_volume (dict_t *dict, char **op_errstr)
+glusterd_op_reset_volume (dict_t *dict, char **op_rspstr)
{
glusterd_volinfo_t *volinfo = NULL;
int ret = -1;
@@ -1331,14 +1632,20 @@ glusterd_op_reset_volume (dict_t *dict, char **op_errstr)
quorum_action = _gf_true;
ret = glusterd_options_reset (volinfo, key, &is_force);
- if (is_force == -1) {
- ret = -1;
- gf_asprintf(op_errstr, "'%s' is protected. To reset use 'force'.",
- key);
+ if (ret == -1) {
+ gf_asprintf(op_rspstr, "Volume reset : failed");
+ } else if (is_force & GD_OP_PROTECTED) {
+ if (is_force & GD_OP_UNPROTECTED) {
+ gf_asprintf (op_rspstr, "All unprotected fields were"
+ " reset. To reset the protected fields,"
+ " use 'force'.");
+ } else {
+ ret = -1;
+ gf_asprintf (op_rspstr, "'%s' is protected. To reset"
+ " use 'force'.", key);
+ }
}
- gd_update_volume_op_versions (volinfo);
-
out:
GF_FREE (key_fixed);
if (quorum_action)
@@ -1366,14 +1673,25 @@ glusterd_stop_bricks (glusterd_volinfo_t *volinfo)
int
glusterd_start_bricks (glusterd_volinfo_t *volinfo)
{
- glusterd_brickinfo_t *brickinfo = NULL;
+ int ret = -1;
+ glusterd_brickinfo_t *brickinfo = NULL;
+
+ GF_ASSERT (volinfo);
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (glusterd_brick_start (volinfo, brickinfo, _gf_false))
- return -1;
+ ret = glusterd_brick_start (volinfo, brickinfo, _gf_false);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to start %s:%s for %s",
+ brickinfo->hostname, brickinfo->path,
+ volinfo->volname);
+ goto out;
+ }
}
- return 0;
+ ret = 0;
+out:
+ return ret;
}
static int
@@ -1428,10 +1746,6 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict)
if (ret)
goto out;
- dup_value = gf_strdup (value);
- if (!dup_value)
- goto out;
-
ret = glusterd_store_options (this, dup_opt);
if (ret)
goto out;
@@ -1446,10 +1760,18 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict)
else
next_version = NULL;
+ dup_value = gf_strdup (value);
+ if (!dup_value)
+ goto out;
+
ret = dict_set_dynstr (conf->opts, key, dup_value);
if (ret)
goto out;
+ else
+ dup_value = NULL; /* Protect the allocation from GF_FREE */
+
out:
+ GF_FREE (dup_value);
GF_FREE (key_fixed);
if (dup_opt)
dict_unref (dup_opt);
@@ -1476,6 +1798,7 @@ glusterd_op_set_volume (dict_t *dict)
char str[50] = {0, };
char *op_errstr = NULL;
gf_boolean_t global_opt = _gf_false;
+ gf_boolean_t global_opts_set = _gf_false;
glusterd_volinfo_t *voliter = NULL;
int32_t dict_count = 0;
gf_boolean_t check_op_version = _gf_false;
@@ -1497,10 +1820,12 @@ glusterd_op_set_volume (dict_t *dict)
if (dict_count == 0) {
ret = glusterd_volset_help (NULL, &op_errstr);
if (ret) {
- op_errstr = (op_errstr)? op_errstr:
- "Volume set help internal error";
- gf_log (this->name, GF_LOG_ERROR, "%s", op_errstr);
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ (op_errstr)? op_errstr:
+ "Volume set help internal error");
}
+
+ GF_FREE(op_errstr);
goto out;
}
@@ -1537,7 +1862,6 @@ glusterd_op_set_volume (dict_t *dict)
for (count = 1; ret != -1 ; count++) {
- global_opt = _gf_false;
sprintf (str, "key%d", count);
ret = dict_get_str (dict, str, &key);
if (ret)
@@ -1585,8 +1909,11 @@ glusterd_op_set_volume (dict_t *dict)
}
}
- if (glusterd_check_globaloption (key))
+ global_opt = _gf_false;
+ if (glusterd_check_globaloption (key)) {
global_opt = _gf_true;
+ global_opts_set = _gf_true;
+ }
if (!global_opt)
value = gf_strdup (value);
@@ -1642,7 +1969,8 @@ glusterd_op_set_volume (dict_t *dict)
}
}
- if (!global_opt) {
+ if (!global_opts_set) {
+ gd_update_volume_op_versions (volinfo);
ret = glusterd_create_volfiles_and_notify_services (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
@@ -1664,11 +1992,11 @@ glusterd_op_set_volume (dict_t *dict)
goto out;
}
}
- gd_update_volume_op_versions (volinfo);
} else {
list_for_each_entry (voliter, &priv->volumes, vol_list) {
volinfo = voliter;
+ gd_update_volume_op_versions (volinfo);
ret = glusterd_create_volfiles_and_notify_services (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
@@ -1691,7 +2019,6 @@ glusterd_op_set_volume (dict_t *dict)
goto out;
}
}
- gd_update_volume_op_versions (volinfo);
}
}
@@ -1755,12 +2082,12 @@ glusterd_op_sync_volume (dict_t *dict, char **op_errstr,
if (volname) {
ret = glusterd_add_volume_to_dict (volinfo, rsp_dict,
- 1);
+ 1, "volume");
vol_count = 1;
} else {
list_for_each_entry (volinfo, &priv->volumes, vol_list) {
- ret = glusterd_add_volume_to_dict (volinfo,
- rsp_dict, count);
+ ret = glusterd_add_volume_to_dict (volinfo, rsp_dict,
+ count, "volume");
if (ret)
goto out;
@@ -1902,6 +2229,105 @@ out:
}
static int
+_add_brick_name_to_dict (dict_t *dict, char *key, glusterd_brickinfo_t *brick)
+{
+ int ret = -1;
+ char tmp[1024] = {0,};
+ char *brickname = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (key);
+ GF_ASSERT (brick);
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ snprintf (tmp, sizeof (tmp), "%s:%s", brick->hostname, brick->path);
+ brickname = gf_strdup (tmp);
+ if (!brickname) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to dup brick name");
+ goto out;
+ }
+
+ ret = dict_set_dynstr (dict, key, brickname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add brick name to dict");
+ goto out;
+ }
+ brickname = NULL;
+out:
+ if (brickname)
+ GF_FREE (brickname);
+ return ret;
+}
+
+static int
+_add_remove_bricks_to_dict (dict_t *dict, glusterd_volinfo_t *volinfo,
+ char *prefix)
+{
+ int ret = -1;
+ int count = 0;
+ int i = 0;
+ char brick_key[1024] = {0,};
+ char dict_key[1024] ={0,};
+ char *brick = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (volinfo);
+ GF_ASSERT (prefix);
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = dict_get_int32 (volinfo->rebal.dict, "count", &count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get brick count");
+ goto out;
+ }
+
+ snprintf (dict_key, sizeof (dict_key), "%s.count", prefix);
+ ret = dict_set_int32 (dict, dict_key, count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set brick count in dict");
+ goto out;
+ }
+
+ for (i = 1; i <= count; i++) {
+ memset (brick_key, 0, sizeof (brick_key));
+ snprintf (brick_key, sizeof (brick_key), "brick%d", i);
+
+ ret = dict_get_str (volinfo->rebal.dict, brick_key, &brick);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get %s", brick_key);
+ goto out;
+ }
+
+ memset (dict_key, 0, sizeof (dict_key));
+ snprintf (dict_key, sizeof (dict_key), "%s.%s", prefix,
+ brick_key);
+ ret = dict_set_str (dict, dict_key, brick);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add brick to dict");
+ goto out;
+ }
+ brick = NULL;
+ }
+
+out:
+ return ret;
+}
+
+/* This adds the respective task-id and all available parameters of a task into
+ * a dictionary
+ */
+static int
_add_task_to_dict (dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
{
@@ -1918,13 +2344,34 @@ _add_task_to_dict (dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
GF_ASSERT (this);
switch (op) {
- case GD_OP_REBALANCE:
case GD_OP_REMOVE_BRICK:
+ snprintf (key, sizeof (key), "task%d", index);
+ ret = _add_remove_bricks_to_dict (dict, volinfo, key);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add remove bricks to dict");
+ goto out;
+ }
+ case GD_OP_REBALANCE:
uuid_str = gf_strdup (uuid_utoa (volinfo->rebal.rebalance_id));
status = volinfo->rebal.defrag_status;
break;
case GD_OP_REPLACE_BRICK:
+ snprintf (key, sizeof (key), "task%d.src-brick", index);
+ ret = _add_brick_name_to_dict (dict, key,
+ volinfo->rep_brick.src_brick);
+ if (ret)
+ goto out;
+ memset (key, 0, sizeof (key));
+
+ snprintf (key, sizeof (key), "task%d.dst-brick", index);
+ ret = _add_brick_name_to_dict (dict, key,
+ volinfo->rep_brick.dst_brick);
+ if (ret)
+ goto out;
+ memset (key, 0, sizeof (key));
+
uuid_str = gf_strdup (uuid_utoa (volinfo->rep_brick.rb_id));
status = volinfo->rep_brick.rb_status;
break;
@@ -1937,8 +2384,7 @@ _add_task_to_dict (dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
}
snprintf (key, sizeof (key), "task%d.type", index);
- ret = dict_set_str (dict, key,
- (char *)gd_op_list[op]);
+ ret = dict_set_str (dict, key, (char *)gd_op_list[op]);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Error setting task type in dict");
@@ -1948,7 +2394,6 @@ _add_task_to_dict (dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "task%d.id", index);
-
if (!uuid_str)
goto out;
ret = dict_set_dynstr (dict, key, uuid_str);
@@ -1975,6 +2420,50 @@ out:
}
static int
+glusterd_aggregate_task_status (dict_t *rsp_dict, glusterd_volinfo_t *volinfo)
+{
+ int ret = -1;
+ int tasks = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ if (!uuid_is_null (volinfo->rebal.rebalance_id)) {
+ ret = _add_task_to_dict (rsp_dict, volinfo, volinfo->rebal.op,
+ tasks);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add task details to dict");
+ goto out;
+ }
+ tasks++;
+ }
+
+ if (!uuid_is_null (volinfo->rep_brick.rb_id)) {
+ ret = _add_task_to_dict (rsp_dict, volinfo, GD_OP_REPLACE_BRICK,
+ tasks);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add task details to dict");
+ goto out;
+ }
+ tasks++;
+ }
+
+ ret = dict_set_int32 (rsp_dict, "tasks", tasks);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Error setting tasks count in dict");
+ goto out;
+ }
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int
glusterd_op_status_volume (dict_t *dict, char **op_errstr,
dict_t *rsp_dict)
{
@@ -1994,7 +2483,6 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
gf_boolean_t nfs_disabled = _gf_false;
gf_boolean_t shd_enabled = _gf_true;
gf_boolean_t origin_glusterd = _gf_false;
- int tasks = 0;
this = THIS;
GF_ASSERT (this);
@@ -2004,13 +2492,13 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
GF_ASSERT (dict);
- origin_glusterd = is_origin_glusterd ();
+ origin_glusterd = is_origin_glusterd (dict);
ret = dict_get_uint32 (dict, "cmd", &cmd);
if (ret)
goto out;
- if (is_origin_glusterd ()) {
+ if (origin_glusterd) {
ret = 0;
if ((cmd & GF_CLI_STATUS_ALL)) {
ret = glusterd_get_all_volnames (rsp_dict);
@@ -2055,6 +2543,14 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
other_count++;
node_count++;
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ ret = glusterd_add_node_to_dict ("quotad", rsp_dict, 0,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_str (dict, "brick", &brick);
if (ret)
@@ -2077,6 +2573,10 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
brick_index);
node_count++;
+ } else if ((cmd & GF_CLI_STATUS_TASKS) != 0) {
+ ret = glusterd_aggregate_task_status (rsp_dict, volinfo);
+ goto out;
+
} else {
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
brick_index++;
@@ -2126,6 +2626,17 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
goto out;
other_count++;
node_count++;
+ other_index++;
+ }
+ if (glusterd_is_volume_quota_enabled (volinfo)) {
+ ret = glusterd_add_node_to_dict ("quotad",
+ rsp_dict,
+ other_index,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
}
}
}
@@ -2150,35 +2661,16 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
}
/* Active tasks */
- if (((cmd & GF_CLI_STATUS_MASK) != GF_CLI_STATUS_NONE) ||
- !origin_glusterd)
+ /* Tasks are added only for normal volume status request for either a
+ * single volume or all volumes
+ */
+ if (!glusterd_status_has_tasks (cmd))
goto out;
- if (!uuid_is_null (volinfo->rebal.rebalance_id)) {
- ret = _add_task_to_dict (rsp_dict, volinfo, volinfo->rebal.op,
- tasks);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Failed to add task details to dict");
- goto out;
- }
- tasks++;
- }
- if (!uuid_is_null (volinfo->rep_brick.rb_id)) {
- ret = _add_task_to_dict (rsp_dict, volinfo, GD_OP_REPLACE_BRICK,
- tasks);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Failed to add task details to dict");
- goto out;
- }
- tasks++;
- }
-
- ret = dict_set_int32 (rsp_dict, "tasks", tasks);
+ ret = glusterd_aggregate_task_status (rsp_dict, volinfo);
if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Error setting tasks count in dict");
+ goto out;
+ ret = 0;
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -2205,6 +2697,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
uint32_t pending_count = 0;
+ dict_t *dict = NULL;
this = THIS;
priv = this->private;
@@ -2219,27 +2712,61 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
continue;
- proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_LOCK];
- if (proc->fn) {
- ret = proc->fn (NULL, this, peerinfo);
- if (ret) {
- gf_log (this->name, GF_LOG_WARNING, "Failed to "
- "send lock request for operation "
- "'Volume %s' to peer %s",
- gd_op_list[opinfo.op],
- peerinfo->hostname);
- continue;
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
+ if (priv->op_version < GD_OP_VERSION_4) {
+ proc = &peerinfo->mgmt->proctable
+ [GLUSTERD_MGMT_CLUSTER_LOCK];
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, peerinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Failed to send lock request "
+ "for operation 'Volume %s' to "
+ "peer %s",
+ gd_op_list[opinfo.op],
+ peerinfo->hostname);
+ continue;
+ }
+ pending_count++;
+ }
+ } else {
+ dict = glusterd_op_get_ctx ();
+ dict_ref (dict);
+
+ proc = &peerinfo->mgmt_v3->proctable
+ [GLUSTERD_MGMT_V3_LOCK];
+ if (proc->fn) {
+ ret = dict_set_static_ptr (dict, "peerinfo",
+ peerinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to set peerinfo");
+ dict_unref (dict);
+ goto out;
+ }
+
+ ret = proc->fn (NULL, this, dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Failed to send mgmt_v3 lock "
+ "request for operation "
+ "'Volume %s' to peer %s",
+ gd_op_list[opinfo.op],
+ peerinfo->hostname);
+ dict_unref (dict);
+ continue;
+ }
+ pending_count++;
}
- pending_count++;
}
}
opinfo.pending_count = pending_count;
if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_all_acc ();
+ ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
+out:
gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret);
-
return ret;
}
@@ -2252,17 +2779,12 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
uint32_t pending_count = 0;
+ dict_t *dict = NULL;
this = THIS;
priv = this->private;
GF_ASSERT (priv);
- /*ret = glusterd_unlock (MY_UUID);
-
- if (ret)
- goto out;
- */
-
list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
GF_ASSERT (peerinfo);
@@ -2272,29 +2794,63 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
continue;
- proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_UNLOCK];
- if (proc->fn) {
- ret = proc->fn (NULL, this, peerinfo);
- if (ret) {
- gf_log (this->name, GF_LOG_WARNING, "Failed to "
- "send unlock request for operation "
- "'Volume %s' to peer %s",
- gd_op_list[opinfo.op],
- peerinfo->hostname);
- continue;
+ /* Based on the op_version,
+ * release the cluster or mgmt_v3 lock */
+ if (priv->op_version < GD_OP_VERSION_4) {
+ proc = &peerinfo->mgmt->proctable
+ [GLUSTERD_MGMT_CLUSTER_UNLOCK];
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, peerinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Failed to send unlock request "
+ "for operation 'Volume %s' to "
+ "peer %s",
+ gd_op_list[opinfo.op],
+ peerinfo->hostname);
+ continue;
+ }
+ pending_count++;
+ }
+ } else {
+ dict = glusterd_op_get_ctx ();
+ dict_ref (dict);
+
+ proc = &peerinfo->mgmt_v3->proctable
+ [GLUSTERD_MGMT_V3_UNLOCK];
+ if (proc->fn) {
+ ret = dict_set_static_ptr (dict, "peerinfo",
+ peerinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to set peerinfo");
+ dict_unref (dict);
+ goto out;
+ }
+
+ ret = proc->fn (NULL, this, dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Failed to send volume unlock "
+ "request for operation "
+ "'Volume %s' to peer %s",
+ gd_op_list[opinfo.op],
+ peerinfo->hostname);
+ dict_unref (dict);
+ continue;
+ }
+ pending_count++;
}
- pending_count++;
}
}
opinfo.pending_count = pending_count;
if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_all_acc ();
+ ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
+out:
gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret);
-
return ret;
-
}
static int
@@ -2306,7 +2862,8 @@ glusterd_op_ac_ack_drain (glusterd_op_sm_event_t *event, void *ctx)
opinfo.pending_count--;
if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
+ &event->txn_id, NULL);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning with %d", ret);
@@ -2322,43 +2879,95 @@ glusterd_op_ac_send_unlock_drain (glusterd_op_sm_event_t *event, void *ctx)
static int
glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
{
- glusterd_op_lock_ctx_t *lock_ctx = NULL;
- int32_t ret = 0;
+ int32_t ret = 0;
+ char *volname = NULL;
+ glusterd_op_lock_ctx_t *lock_ctx = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
GF_ASSERT (event);
GF_ASSERT (ctx);
+ this = THIS;
+ priv = this->private;
+
lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
- ret = glusterd_lock (lock_ctx->uuid);
+ /* If the req came from a node running on older op_version
+ * the dict won't be present. Based on it acquiring a cluster
+ * or mgmt_v3 lock */
+ if (lock_ctx->dict == NULL) {
+ ret = glusterd_lock (lock_ctx->uuid);
+ glusterd_op_lock_send_resp (lock_ctx->req, ret);
+ } else {
+ ret = dict_get_str (lock_ctx->dict, "volname", &volname);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire volname");
+ else {
+ ret = glusterd_mgmt_v3_lock (volname, lock_ctx->uuid,
+ "vol");
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock for %s",
+ volname);
+ }
- gf_log (THIS->name, GF_LOG_DEBUG, "Lock Returned %d", ret);
+ glusterd_op_mgmt_v3_lock_send_resp (lock_ctx->req,
+ &event->txn_id, ret);
- glusterd_op_lock_send_resp (lock_ctx->req, ret);
+ dict_unref (lock_ctx->dict);
+ }
+ gf_log (THIS->name, GF_LOG_DEBUG, "Lock Returned %d", ret);
return ret;
}
static int
glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
- glusterd_op_lock_ctx_t *lock_ctx = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
+ int32_t ret = 0;
+ char *volname = NULL;
+ glusterd_op_lock_ctx_t *lock_ctx = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
GF_ASSERT (event);
GF_ASSERT (ctx);
this = THIS;
priv = this->private;
+
lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
- ret = glusterd_unlock (lock_ctx->uuid);
+ /* If the req came from a node running on older op_version
+ * the dict won't be present. Based on it releasing the cluster
+ * or mgmt_v3 lock */
+ if (lock_ctx->dict == NULL) {
+ ret = glusterd_unlock (lock_ctx->uuid);
+ glusterd_op_unlock_send_resp (lock_ctx->req, ret);
+ } else {
+ ret = dict_get_str (lock_ctx->dict, "volname", &volname);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire volname");
+ else {
+ ret = glusterd_mgmt_v3_unlock (volname, lock_ctx->uuid,
+ "vol");
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release lock for %s",
+ volname);
+ }
- gf_log (this->name, GF_LOG_DEBUG, "Unlock Returned %d", ret);
+ glusterd_op_mgmt_v3_unlock_send_resp (lock_ctx->req,
+ &event->txn_id, ret);
- glusterd_op_unlock_send_resp (lock_ctx->req, ret);
+ dict_unref (lock_ctx->dict);
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG, "Unlock Returned %d", ret);
if (priv->pending_quorum_action)
glusterd_do_quorum_action ();
@@ -2396,7 +3005,8 @@ glusterd_op_ac_rcvd_lock_acc (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC,
+ &event->txn_id, NULL);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -2600,9 +3210,6 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx)
case GD_OP_STATEDUMP_VOLUME:
case GD_OP_CLEARLOCKS_VOLUME:
case GD_OP_DEFRAG_BRICK_VOLUME:
-#ifdef HAVE_BD_XLATOR
- case GD_OP_BD_OP:
-#endif
{
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
@@ -2656,7 +3263,7 @@ glusterd_is_get_op (xlator_t *this, glusterd_op_t op, dict_t *dict)
if (op == GD_OP_STATUS_VOLUME)
return _gf_true;
- if ((op == GD_OP_SET_VOLUME)) {
+ if (op == GD_OP_SET_VOLUME) {
//check for set volume help
ret = dict_get_str (dict, "volname", &volname);
if (volname &&
@@ -2831,7 +3438,8 @@ out:
if (dict)
dict_unref (dict);
if (ret) {
- glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
+ glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
+ &event->txn_id, NULL);
opinfo.op_ret = ret;
}
@@ -2840,7 +3448,7 @@ out:
opinfo.pending_count);
if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_all_acc ();
+ ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret);
@@ -2849,10 +3457,10 @@ out:
}
static int32_t
-glusterd_op_start_rb_timer (dict_t *dict)
+glusterd_op_start_rb_timer (dict_t *dict, uuid_t *txn_id)
{
int32_t op = 0;
- struct timeval timeout = {0, };
+ struct timespec timeout = {0, };
glusterd_conf_t *priv = NULL;
int32_t ret = -1;
dict_t *rb_ctx = NULL;
@@ -2868,12 +3476,12 @@ glusterd_op_start_rb_timer (dict_t *dict)
}
if (op != GF_REPLACE_OP_START) {
- ret = glusterd_op_sm_inject_all_acc ();
+ ret = glusterd_op_sm_inject_all_acc (txn_id);
goto out;
}
timeout.tv_sec = 5;
- timeout.tv_usec = 0;
+ timeout.tv_nsec = 0;
rb_ctx = dict_copy (dict, rb_ctx);
@@ -2883,6 +3491,17 @@ glusterd_op_start_rb_timer (dict_t *dict)
ret = -1;
goto out;
}
+
+ ret = dict_set_bin (rb_ctx, "transaction_id",
+ txn_id, sizeof (*txn_id));
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to set transaction id.");
+ goto out;
+ } else
+ gf_log ("", GF_LOG_DEBUG,
+ "transaction_id = %s", uuid_utoa (*txn_id));
+
priv->timer = gf_timer_call_after (THIS->ctx, timeout,
glusterd_do_replace_brick,
(void *) rb_ctx);
@@ -2951,6 +3570,97 @@ out:
return ret;
}
+static int
+reassign_defrag_status (dict_t *dict, char *key, gf_defrag_status_t *status)
+{
+ int ret = 0;
+
+ if (!*status)
+ return ret;
+
+ switch (*status) {
+ case GF_DEFRAG_STATUS_STARTED:
+ *status = GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED;
+ break;
+
+ case GF_DEFRAG_STATUS_STOPPED:
+ *status = GF_DEFRAG_STATUS_LAYOUT_FIX_STOPPED;
+ break;
+
+ case GF_DEFRAG_STATUS_COMPLETE:
+ *status = GF_DEFRAG_STATUS_LAYOUT_FIX_COMPLETE;
+ break;
+
+ case GF_DEFRAG_STATUS_FAILED:
+ *status = GF_DEFRAG_STATUS_LAYOUT_FIX_FAILED;
+ break;
+ default:
+ break;
+ }
+
+ ret = dict_set_int32(dict, key, *status);
+ if (ret)
+ gf_log (THIS->name, GF_LOG_WARNING,
+ "failed to reset defrag %s in dict", key);
+
+ return ret;
+}
+
+/* Check and reassign the defrag_status enum got from the rebalance process
+ * of all peers so that the rebalance-status CLI command can display if a
+ * full-rebalance or just a fix-layout was carried out.
+ */
+static int
+glusterd_op_check_peer_defrag_status (dict_t *dict, int count)
+{
+ glusterd_volinfo_t *volinfo = NULL;
+ gf_defrag_status_t status = GF_DEFRAG_STATUS_NOT_STARTED;
+ char key[256] = {0,};
+ char *volname = NULL;
+ int ret = -1;
+ int i = 1;
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_WARNING, "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_WARNING, FMTSTR_CHECK_VOL_EXISTS,
+ volname);
+ goto out;
+ }
+
+ if (volinfo->rebal.defrag_cmd != GF_DEFRAG_CMD_START_LAYOUT_FIX) {
+ /* Fix layout was not issued; we don't need to reassign
+ the status */
+ ret = 0;
+ goto out;
+ }
+
+ do {
+ memset (key, 0, 256);
+ snprintf (key, 256, "status-%d", i);
+ ret = dict_get_int32 (dict, key, (int32_t *)&status);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_WARNING,
+ "failed to get defrag %s", key);
+ goto out;
+ }
+ ret = reassign_defrag_status (dict, key, &status);
+ if (ret)
+ goto out;
+ i++;
+ } while (i <= count);
+
+ ret = 0;
+out:
+ return ret;
+
+}
+
/* This function is used to modify the op_ctx dict before sending it back
* to cli. This is useful in situations like changing the peer uuids to
* hostnames etc.
@@ -3013,6 +3723,38 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx)
count = brick_index_max + other_count + 1;
+ /* add 'brick%d.peerid' into op_ctx with value of 'brick%d.path'.
+ nfs/sshd like services have this additional uuid */
+ {
+ char key[1024];
+ char *uuid_str = NULL;
+ char *uuid = NULL;
+ int i;
+
+ for (i = brick_index_max + 1; i < count; i++) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "brick%d.path", i);
+ ret = dict_get_str (op_ctx, key, &uuid_str);
+ if (!ret) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "brick%d.peerid", i);
+ uuid = gf_strdup (uuid_str);
+ if (!uuid) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "unable to create dup of"
+ " uuid_str");
+ continue;
+ }
+ ret = dict_set_dynstr (op_ctx, key,
+ uuid);
+ if (ret != 0) {
+ GF_FREE (uuid);
+ }
+ }
+ }
+ }
+
ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx,
"brick%d.path",
0, count);
@@ -3054,12 +3796,49 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx)
goto out;
}
+ /* add 'node-name-%d' into op_ctx with value uuid_str.
+ this will be used to convert to hostname later */
+ {
+ char key[1024];
+ char *uuid_str = NULL;
+ char *uuid = NULL;
+ int i;
+
+ for (i = 1; i <= count; i++) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "node-uuid-%d", i);
+ ret = dict_get_str (op_ctx, key, &uuid_str);
+ if (!ret) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "node-name-%d", i);
+ uuid = gf_strdup (uuid_str);
+ if (!uuid) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "unable to create dup of"
+ " uuid_str");
+ continue;
+ }
+ ret = dict_set_dynstr (op_ctx, key,
+ uuid);
+ if (ret != 0) {
+ GF_FREE (uuid);
+ }
+ }
+ }
+ }
+
ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx,
- "node-uuid-%d",
+ "node-name-%d",
1, (count + 1));
if (ret)
gf_log (this->name, GF_LOG_WARNING,
"Failed uuid to hostname conversion");
+
+ ret = glusterd_op_check_peer_defrag_status (op_ctx, count);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to reset defrag status for fix-layout");
break;
default:
@@ -3212,17 +3991,19 @@ out:
if (dict)
dict_unref (dict);
if (ret) {
- glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
+ glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
+ &event->txn_id, NULL);
opinfo.op_ret = ret;
}
if (!opinfo.pending_count) {
if (op == GD_OP_REPLACE_BRICK) {
- ret = glusterd_op_start_rb_timer (op_dict);
+ ret = glusterd_op_start_rb_timer (op_dict,
+ &event->txn_id);
} else {
glusterd_op_modify_op_ctx (op, NULL);
- ret = glusterd_op_sm_inject_all_acc ();
+ ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
}
goto err;
}
@@ -3247,7 +4028,8 @@ glusterd_op_ac_rcvd_stage_op_acc (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_ACC, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_ACC,
+ &event->txn_id, NULL);
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -3268,7 +4050,8 @@ glusterd_op_ac_stage_op_failed (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
+ &event->txn_id, NULL);
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -3289,7 +4072,8 @@ glusterd_op_ac_commit_op_failed (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
+ &event->txn_id, NULL);
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -3332,7 +4116,8 @@ glusterd_op_ac_brick_op_failed (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.brick_pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, ev_ctx->commit_ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
+ &event->txn_id, ev_ctx->commit_ctx);
out:
if (ev_ctx->rsp_dict)
@@ -3374,7 +4159,7 @@ glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx)
goto out;
}
- ret = glusterd_op_start_rb_timer (op_ctx);
+ ret = glusterd_op_start_rb_timer (op_ctx, &event->txn_id);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Couldn't start "
"replace-brick operation.");
@@ -3389,10 +4174,14 @@ glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx)
out:
if (commit_ack_inject) {
if (ret)
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
+ &event->txn_id,
+ NULL);
else if (!opinfo.pending_count) {
glusterd_op_modify_op_ctx (op, NULL);
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_ACC, NULL);
+ ret = glusterd_op_sm_inject_event
+ (GD_OP_EVENT_COMMIT_ACC,
+ &event->txn_id, NULL);
}
/*else do nothing*/
}
@@ -3413,7 +4202,8 @@ glusterd_op_ac_rcvd_unlock_acc (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC,
+ &event->txn_id, NULL);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -3447,7 +4237,7 @@ glusterd_op_reset_ctx ()
}
int32_t
-glusterd_op_txn_complete ()
+glusterd_op_txn_complete (uuid_t *txn_id)
{
int32_t ret = -1;
glusterd_conf_t *priv = NULL;
@@ -3457,6 +4247,7 @@ glusterd_op_txn_complete ()
rpcsvc_request_t *req = NULL;
void *ctx = NULL;
char *op_errstr = NULL;
+ char *volname = NULL;
xlator_t *this = NULL;
this = THIS;
@@ -3479,14 +4270,30 @@ glusterd_op_txn_complete ()
glusterd_op_reset_ctx ();
glusterd_op_clear_errstr ();
- ret = glusterd_unlock (MY_UUID);
-
- /* unlock cant/shouldnt fail here!! */
- if (ret) {
- gf_log (this->name, GF_LOG_CRITICAL,
- "Unable to clear local lock, ret: %d", ret);
+ /* Based on the op-version, we release the cluster or mgmt_v3 lock */
+ if (priv->op_version < GD_OP_VERSION_4) {
+ ret = glusterd_unlock (MY_UUID);
+ /* unlock cant/shouldnt fail here!! */
+ if (ret)
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "Unable to clear local lock, ret: %d", ret);
+ else
+ gf_log (this->name, GF_LOG_DEBUG, "Cleared local lock");
} else {
- gf_log (this->name, GF_LOG_DEBUG, "Cleared local lock");
+ ret = dict_get_str (ctx, "volname", &volname);
+ if (ret)
+ gf_log ("", GF_LOG_INFO,
+ "No Volume name present. "
+ "Locks have not been held.");
+
+ if (volname) {
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
+ "vol");
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release lock for %s",
+ volname);
+ }
}
ret = glusterd_op_send_cli_response (op, op_ret,
@@ -3505,6 +4312,13 @@ glusterd_op_txn_complete ()
if (priv->pending_quorum_action)
glusterd_do_quorum_action ();
+
+ /* Clearing the transaction opinfo */
+ ret = glusterd_clear_txn_opinfo (txn_id);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to clear transaction's opinfo");
+
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@@ -3516,7 +4330,7 @@ glusterd_op_ac_unlocked_all (glusterd_op_sm_event_t *event, void *ctx)
GF_ASSERT (event);
- ret = glusterd_op_txn_complete ();
+ ret = glusterd_op_txn_complete (&event->txn_id);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -3533,6 +4347,7 @@ glusterd_op_ac_stage_op (glusterd_op_sm_event_t *event, void *ctx)
char *op_errstr = NULL;
dict_t *dict = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
this = THIS;
GF_ASSERT (this);
@@ -3558,9 +4373,27 @@ glusterd_op_ac_stage_op (glusterd_op_sm_event_t *event, void *ctx)
status);
}
+ txn_id = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+
+ if (txn_id)
+ uuid_copy (*txn_id, event->txn_id);
+ else {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_bin (rsp_dict, "transaction_id",
+ txn_id, sizeof(*txn_id));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set transaction id.");
+ goto out;
+ }
+
ret = glusterd_op_stage_send_resp (req_ctx->req, req_ctx->op,
status, op_errstr, rsp_dict);
+out:
if (op_errstr && (strcmp (op_errstr, "")))
GF_FREE (op_errstr);
@@ -3622,6 +4455,7 @@ glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
dict_t *dict = NULL;
dict_t *rsp_dict = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
this = THIS;
GF_ASSERT (this);
@@ -3651,10 +4485,27 @@ glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
"'Volume %s' failed: %d", gd_op_list[req_ctx->op],
status);
+ txn_id = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+
+ if (txn_id)
+ uuid_copy (*txn_id, event->txn_id);
+ else {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_bin (rsp_dict, "transaction_id",
+ txn_id, sizeof(*txn_id));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set transaction id.");
+ goto out;
+ }
+
ret = glusterd_op_commit_send_resp (req_ctx->req, req_ctx->op,
status, op_errstr, rsp_dict);
- glusterd_op_fini_ctx ();
+out:
if (op_errstr && (strcmp (op_errstr, "")))
GF_FREE (op_errstr);
@@ -3683,7 +4534,6 @@ glusterd_op_ac_send_commit_failed (glusterd_op_sm_event_t *event, void *ctx)
opinfo.op_ret, opinfo.op_errstr,
op_ctx);
- glusterd_op_fini_ctx ();
if (opinfo.op_errstr && (strcmp (opinfo.op_errstr, ""))) {
GF_FREE (opinfo.op_errstr);
opinfo.op_errstr = NULL;
@@ -3781,7 +4631,8 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr,
break;
case GD_OP_QUOTA:
- ret = glusterd_op_stage_quota (dict, op_errstr);
+ ret = glusterd_op_stage_quota (dict, op_errstr,
+ rsp_dict);
break;
case GD_OP_STATUS_VOLUME:
@@ -3805,11 +4656,6 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_op_stage_clearlocks_volume (dict,
op_errstr);
break;
-#ifdef HAVE_BD_XLATOR
- case GD_OP_BD_OP:
- ret = glusterd_op_stage_bd (dict, op_errstr);
- break;
-#endif
case GD_OP_COPY_FILE:
ret = glusterd_op_stage_copy_file (dict, op_errstr);
@@ -3921,11 +4767,6 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_op_clearlocks_volume (dict, op_errstr,
rsp_dict);
break;
-#ifdef HAVE_BD_XLATOR
- case GD_OP_BD_OP:
- ret = 0;
- break;
-#endif
case GD_OP_COPY_FILE:
ret = glusterd_op_copy_file (dict, op_errstr);
@@ -3948,6 +4789,7 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr,
return ret;
}
+
static int
glusterd_bricks_select_stop_volume (dict_t *dict, char **op_errstr,
struct list_head *selected)
@@ -4264,24 +5106,95 @@ out:
}
int
+get_replica_index_for_per_replica_cmd (glusterd_volinfo_t *volinfo,
+ dict_t *dict) {
+ int ret = 0;
+ char *hostname = NULL;
+ char *path = NULL;
+ int index = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ int cmd_replica_index = -1;
+ int replica_count = -1;
+
+
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "per-replica-cmd-hostname", &hostname);
+ if (ret)
+ goto out;
+ ret = dict_get_str (dict, "per-replica-cmd-path", &path);
+ if (ret)
+ goto out;
+
+ replica_count = volinfo->replica_count;
+
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ if (uuid_is_null (brickinfo->uuid))
+ (void)glusterd_resolve_brick (brickinfo);
+ if (!strcmp (brickinfo->path, path) &&
+ !strcmp (brickinfo->hostname, hostname)) {
+ cmd_replica_index = index/(replica_count);
+ goto out;
+ }
+ index++;
+ }
+
+
+out:
+ if (ret)
+ cmd_replica_index = -1;
+
+ return cmd_replica_index;
+}
+
+int
_select_rxlators_with_local_bricks (xlator_t *this, glusterd_volinfo_t *volinfo,
- dict_t *dict)
+ dict_t *dict, cli_cmd_type type)
{
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_conf_t *priv = NULL;
- int index = 1;
+ int index = 0;
int rxlator_count = 0;
int replica_count = 0;
gf_boolean_t add = _gf_false;
+ int ret = 0;
+ int cmd_replica_index = -1;
priv = this->private;
replica_count = volinfo->replica_count;
+
+ if (type == PER_REPLICA) {
+
+ cmd_replica_index = get_replica_index_for_per_replica_cmd
+ (volinfo, dict);
+ if (cmd_replica_index == -1) {
+ ret = -1;
+ goto err;
+ }
+ }
+
+ index = 1;
+
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
if (uuid_is_null (brickinfo->uuid))
(void)glusterd_resolve_brick (brickinfo);
- if (!uuid_compare (MY_UUID, brickinfo->uuid))
- add = _gf_true;
+ switch (type) {
+ case ALL_REPLICA:
+ if (!uuid_compare (MY_UUID, brickinfo->uuid))
+ add = _gf_true;
+ break;
+ case PER_REPLICA:
+ if (!uuid_compare (MY_UUID, brickinfo->uuid) &&
+ ((index-1)/replica_count == cmd_replica_index))
+
+ add = _gf_true;
+ break;
+ }
+
if (index % replica_count == 0) {
if (add) {
_add_rxlator_to_dict (dict, volinfo->volname,
@@ -4294,6 +5207,10 @@ _select_rxlators_with_local_bricks (xlator_t *this, glusterd_volinfo_t *volinfo,
index++;
}
+err:
+ if (ret)
+ rxlator_count = -1;
+
return rxlator_count;
}
@@ -4334,9 +5251,10 @@ _select_rxlators_for_full_self_heal (xlator_t *this,
return rxlator_count;
}
-#ifdef HAVE_BD_XLATOR
+
static int
-glusterd_bricks_select_bd (dict_t *dict, char **op_errstr)
+glusterd_bricks_select_snap (dict_t *dict, char **op_errstr,
+ struct list_head *selected)
{
int ret = -1;
glusterd_conf_t *priv = NULL;
@@ -4354,31 +5272,31 @@ glusterd_bricks_select_bd (dict_t *dict, char **op_errstr)
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to get volname");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get"
+ " volname");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret)
goto out;
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
-
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
brick_index++;
if (uuid_compare (brickinfo->uuid, MY_UUID) ||
!glusterd_is_brick_started (brickinfo)) {
continue;
}
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
pending_node->node = brickinfo;
pending_node->type = GD_NODE_BRICK;
pending_node->index = brick_index;
list_add_tail (&pending_node->list,
- &opinfo.pending_bricks);
+ selected);
pending_node = NULL;
}
@@ -4388,10 +5306,10 @@ out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning ret %d", ret);
return ret;
}
-#endif
static int
-fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo)
+fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo,
+ cli_cmd_type type, dict_t *req_dict)
{
glusterd_brickinfo_t *brickinfo = NULL;
char msg[1024] = {0,};
@@ -4400,10 +5318,22 @@ fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo)
int index = 0;
int ret = 0;
xlator_t *this = NULL;
+ int cmd_replica_index = -1;
this = THIS;
snprintf (msg, sizeof (msg), "self-heal-daemon is not running on");
+ if (type == PER_REPLICA) {
+ cmd_replica_index = get_replica_index_for_per_replica_cmd
+ (volinfo, req_dict);
+ if (cmd_replica_index == -1) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Could not find the "
+ "replica index for per replica type command");
+ ret = -1;
+ goto out;
+ }
+ }
+
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
if (uuid_is_null (brickinfo->uuid))
(void)glusterd_resolve_brick (brickinfo);
@@ -4412,6 +5342,14 @@ fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo)
index++;
continue;
}
+
+ if (type == PER_REPLICA) {
+ if (cmd_replica_index != (index/volinfo->replica_count)) {
+ index++;
+ continue;
+ }
+
+ }
snprintf (key, sizeof (key), "%d-status",index);
snprintf (value, sizeof (value), "%s %s",msg,
uuid_utoa(MY_UUID));
@@ -4480,21 +5418,49 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
goto out;
}
+ switch (heal_op) {
+ case GF_AFR_OP_INDEX_SUMMARY:
+ case GF_AFR_OP_STATISTICS_HEAL_COUNT:
+ if (!glusterd_is_nodesvc_online ("glustershd")) {
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR, "Received "
+ "empty ctx.");
+ goto out;
+ }
- if (!glusterd_is_nodesvc_online ("glustershd") &&
- (heal_op == GF_AFR_OP_INDEX_SUMMARY)) {
-
- if (!rsp_dict) {
- gf_log (this->name, GF_LOG_ERROR, "Received empty "
- "ctx.");
+ ret = fill_shd_status_for_local_bricks (rsp_dict,
+ volinfo,
+ ALL_REPLICA,
+ dict);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR, "Unable to "
+ "fill the shd status for the local "
+ "bricks");
goto out;
+
}
+ break;
+ case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
+ if (!glusterd_is_nodesvc_online ("glustershd")) {
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR, "Received "
+ "empty ctx.");
+ goto out;
+ }
+ ret = fill_shd_status_for_local_bricks (rsp_dict,
+ volinfo,
+ PER_REPLICA,
+ dict);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR, "Unable to "
+ "fill the shd status for the local"
+ " bricks.");
+ goto out;
- ret = fill_shd_status_for_local_bricks (rsp_dict, volinfo);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR, "Unable to fill the shd"
- " status for the local bricks");
- goto out;
+ }
+ break;
+ default:
+ break;
}
@@ -4504,14 +5470,28 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
volinfo,
dict);
break;
+ case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
+ rxlator_count = _select_rxlators_with_local_bricks (this,
+ volinfo,
+ dict,
+ PER_REPLICA);
+ break;
default:
rxlator_count = _select_rxlators_with_local_bricks (this,
volinfo,
- dict);
+ dict,
+ ALL_REPLICA);
break;
}
if (!rxlator_count)
goto out;
+ if (rxlator_count == -1){
+ gf_log (this->name, GF_LOG_ERROR, "Could not determine the"
+ "translator count");
+ ret = -1;
+ goto out;
+ }
+
ret = dict_set_int32 (dict, "count", rxlator_count);
if (ret)
goto out;
@@ -4580,9 +5560,6 @@ out:
return ret;
}
-
-
-
static int
glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
struct list_head *selected)
@@ -4622,6 +5599,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATUS_CALLPOOL:
case GF_CLI_STATUS_NFS:
case GF_CLI_STATUS_SHD:
+ case GF_CLI_STATUS_QUOTAD:
break;
default:
goto out;
@@ -4703,6 +5681,25 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
list_add_tail (&pending_node->list, selected);
ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ if (!glusterd_is_nodesvc_online ("quotad")) {
+ gf_log (this->name, GF_LOG_ERROR, "Quotad is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = priv->quotad;
+ pending_node->type = GD_NODE_QUOTAD;
+ pending_node->index = 0;
+ list_add_tail (&pending_node->list, selected);
+
+ ret = 0;
} else {
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
brick_index++;
@@ -4773,7 +5770,8 @@ glusterd_op_ac_send_brick_op (glusterd_op_sm_event_t *event, void *ctx)
if (!opinfo.pending_count && !opinfo.brick_pending_count) {
glusterd_clear_pending_nodes (&opinfo.pending_bricks);
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, req_ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
+ &event->txn_id, req_ctx);
}
out:
@@ -4827,7 +5825,8 @@ glusterd_op_ac_rcvd_brick_op_acc (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.brick_pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, ev_ctx->commit_ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, &event->txn_id,
+ ev_ctx->commit_ctx);
out:
if (ev_ctx->rsp_dict)
@@ -4879,11 +5878,9 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_bricks_select_rebalance_volume (dict, op_errstr,
selected);
break;
-#ifdef HAVE_BD_XLATOR
- case GD_OP_BD_OP:
- ret = glusterd_bricks_select_bd (dict, op_errstr);
+ case GD_OP_SNAP:
+ ret = glusterd_bricks_select_snap (dict, op_errstr, selected);
break;
-#endif
default:
break;
}
@@ -5205,7 +6202,7 @@ glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type,
int
glusterd_op_sm_inject_event (glusterd_op_sm_event_type_t event_type,
- void *ctx)
+ uuid_t *txn_id, void *ctx)
{
int32_t ret = -1;
glusterd_op_sm_event_t *event = NULL;
@@ -5220,6 +6217,9 @@ glusterd_op_sm_inject_event (glusterd_op_sm_event_type_t event_type,
event->ctx = ctx;
+ if (txn_id)
+ uuid_copy (event->txn_id, *txn_id);
+
gf_log (THIS->name, GF_LOG_DEBUG, "Enqueue event: '%s'",
glusterd_op_sm_event_name_get (event->event));
list_add_tail (&event->list, &gd_op_sm_queue);
@@ -5280,6 +6280,7 @@ glusterd_op_sm ()
glusterd_op_sm_t *state = NULL;
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
xlator_t *this = NULL;
+ glusterd_op_info_t txn_op_info;
this = THIS;
GF_ASSERT (this);
@@ -5300,6 +6301,20 @@ glusterd_op_sm ()
"type: '%s'",
glusterd_op_sm_event_name_get(event_type));
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s",
+ uuid_utoa (event->txn_id));
+
+ ret = glusterd_get_txn_opinfo (&event->txn_id,
+ &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get transaction's opinfo");
+ glusterd_destroy_op_event_ctx (event);
+ GF_FREE (event);
+ continue;
+ } else
+ opinfo = txn_op_info;
+
state = glusterd_op_state_table[opinfo.state.state];
GF_ASSERT (state);
@@ -5330,8 +6345,27 @@ glusterd_op_sm ()
return ret;
}
+ if ((state[event_type].next_state ==
+ GD_OP_STATE_DEFAULT) &&
+ (event_type == GD_OP_EVENT_UNLOCK)) {
+ /* Clearing the transaction opinfo */
+ ret = glusterd_clear_txn_opinfo(&event->txn_id);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to clear "
+ "transaction's opinfo");
+ } else {
+ ret = glusterd_set_txn_opinfo (&event->txn_id,
+ &opinfo);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set "
+ "transaction's opinfo");
+ }
+
glusterd_destroy_op_event_ctx (event);
GF_FREE (event);
+
}
}
@@ -5385,52 +6419,6 @@ glusterd_op_clear_op (glusterd_op_t op)
}
int32_t
-glusterd_op_init_ctx (glusterd_op_t op)
-{
- int ret = 0;
- dict_t *dict = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (GD_OP_NONE < op && op < GD_OP_MAX);
-
- if (_gf_false == glusterd_need_brick_op (op)) {
- gf_log (this->name, GF_LOG_DEBUG, "Received op: %s, returning",
- gd_op_list[op]);
- goto out;
- }
- dict = dict_new ();
- if (dict == NULL) {
- ret = -1;
- goto out;
- }
- ret = glusterd_op_set_ctx (dict);
- if (ret)
- goto out;
-out:
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
-}
-
-
-
-int32_t
-glusterd_op_fini_ctx ()
-{
- dict_t *dict = NULL;
-
- dict = glusterd_op_get_ctx ();
- if (dict)
- dict_unref (dict);
-
- glusterd_op_reset_ctx ();
- return 0;
-}
-
-
-
-int32_t
glusterd_op_free_ctx (glusterd_op_t op, void *ctx)
{
@@ -5456,9 +6444,6 @@ glusterd_op_free_ctx (glusterd_op_t op, void *ctx)
case GD_OP_STATEDUMP_VOLUME:
case GD_OP_CLEARLOCKS_VOLUME:
case GD_OP_DEFRAG_BRICK_VOLUME:
-#ifdef HAVE_BD_XLATOR
- case GD_OP_BD_OP:
-#endif
dict_unref (ctx);
break;
default:
@@ -5487,4 +6472,3 @@ glusterd_op_sm_init ()
pthread_mutex_init (&gd_op_sm_lock, NULL);
return 0;
}
-
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
index 321c5c484..4a73b08f4 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -32,6 +32,8 @@
#include "protocol-common.h"
#define GD_VOLUME_NAME_MAX 256
+#define GD_OP_PROTECTED (0x02)
+#define GD_OP_UNPROTECTED (0x04)
typedef enum glusterd_op_sm_state_ {
GD_OP_STATE_DEFAULT = 0,
@@ -75,6 +77,7 @@ struct glusterd_op_sm_event_ {
struct list_head list;
void *ctx;
glusterd_op_sm_event_type_t event;
+ uuid_t txn_id;
};
typedef struct glusterd_op_sm_event_ glusterd_op_sm_event_t;
@@ -117,6 +120,7 @@ typedef struct glusterd_op_log_filename_ctx_ glusterd_op_log_filename_ctx_t;
struct glusterd_op_lock_ctx_ {
uuid_t uuid;
+ dict_t *dict;
rpcsvc_request_t *req;
};
@@ -162,12 +166,27 @@ typedef struct glusterd_gsync_status_temp {
glusterd_volinfo_t *volinfo;
char *node;
}glusterd_gsync_status_temp_t;
+
+typedef struct gsync_status_param {
+ int is_active;
+ glusterd_volinfo_t *volinfo;
+}gsync_status_param_t;
+
+typedef struct glusterd_txn_opinfo_object_ {
+ glusterd_op_info_t opinfo;
+} glusterd_txn_opinfo_obj;
+
+typedef enum cli_cmd_type_ {
+ PER_REPLICA,
+ ALL_REPLICA,
+ } cli_cmd_type;
+
int
glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type,
glusterd_op_sm_event_t **new_event);
int
glusterd_op_sm_inject_event (glusterd_op_sm_event_type_t event_type,
- void *ctx);
+ uuid_t *txn_id, void *ctx);
int
glusterd_op_sm_init ();
@@ -251,10 +270,7 @@ glusterd_op_init_commit_rsp_dict (glusterd_op_t op);
void
glusterd_op_modify_op_ctx (glusterd_op_t op, void *op_ctx);
-int32_t
-glusterd_op_init_ctx (glusterd_op_t op);
-int32_t
-glusterd_op_fini_ctx ();
+
int32_t
glusterd_volume_stats_read_perf (char *brick_path, int32_t blk_size,
int32_t blk_count, double *throughput, double *time);
@@ -278,4 +294,21 @@ glusterd_check_gsync_running (glusterd_volinfo_t *volinfo, gf_boolean_t *flag);
int
glusterd_defrag_volume_node_rsp (dict_t *req_dict, dict_t *rsp_dict,
dict_t *op_ctx);
+#ifdef HAVE_BD_XLATOR
+int
+glusterd_is_valid_vg (glusterd_brickinfo_t *brick, int check_tag, char *msg);
+#endif
+
+int32_t
+glusterd_get_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo);
+
+int32_t
+glusterd_set_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo);
+
+int32_t
+glusterd_clear_txn_opinfo (uuid_t *txn_id);
+
+int32_t
+glusterd_generate_txn_id (dict_t *dict, uuid_t **txn_id);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.c b/xlators/mgmt/glusterd/src/glusterd-pmap.c
index 9fdd782fe..a153ca1a9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-pmap.c
+++ b/xlators/mgmt/glusterd/src/glusterd-pmap.c
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2010-2012 Red Hat, Inc. <http://www.redhat.com>
+ Copyright (c) 2010-2013 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
@@ -52,8 +52,8 @@ pmap_port_isfree (int port)
}
-struct pmap_registry *
-pmap_registry_new (void)
+static struct pmap_registry *
+pmap_registry_new (xlator_t *this)
{
struct pmap_registry *pmap = NULL;
int i = 0;
@@ -69,8 +69,8 @@ pmap_registry_new (void)
pmap->ports[i].type = GF_PMAP_PORT_FOREIGN;
}
- pmap->base_port = GF_IANA_PRIV_PORTS_START;
- pmap->last_alloc = GF_IANA_PRIV_PORTS_START;
+ pmap->base_port = pmap->last_alloc =
+ ((glusterd_conf_t *)(this->private))->base_port;
return pmap;
}
@@ -86,7 +86,7 @@ pmap_registry_get (xlator_t *this)
pmap = priv->pmap;
if (!pmap) {
- pmap = pmap_registry_new ();
+ pmap = pmap_registry_new (this);
if (!pmap)
return NULL;
priv->pmap = pmap;
diff --git a/xlators/mgmt/glusterd/src/glusterd-quota.c b/xlators/mgmt/glusterd/src/glusterd-quota.c
index 318267199..7f798ad26 100644
--- a/xlators/mgmt/glusterd/src/glusterd-quota.c
+++ b/xlators/mgmt/glusterd/src/glusterd-quota.c
@@ -21,25 +21,51 @@
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "run.h"
+#include "syscall.h"
+#include "byte-order.h"
+#include "compat-errno.h"
#include <sys/wait.h>
+#include <dlfcn.h>
+
+/* Any negative pid to make it special client */
+#define QUOTA_CRAWL_PID "-100"
+
+const char *gd_quota_op_list[GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT+1] = {
+ [GF_QUOTA_OPTION_TYPE_NONE] = "none",
+ [GF_QUOTA_OPTION_TYPE_ENABLE] = "enable",
+ [GF_QUOTA_OPTION_TYPE_DISABLE] = "disable",
+ [GF_QUOTA_OPTION_TYPE_LIMIT_USAGE] = "limit-usage",
+ [GF_QUOTA_OPTION_TYPE_REMOVE] = "remove",
+ [GF_QUOTA_OPTION_TYPE_LIST] = "list",
+ [GF_QUOTA_OPTION_TYPE_VERSION] = "version",
+ [GF_QUOTA_OPTION_TYPE_ALERT_TIME] = "alert-time",
+ [GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT] = "soft-timeout",
+ [GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT] = "hard-timeout",
+ [GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT] = "default-soft-limit",
+};
int
+glusterd_store_quota_config (glusterd_volinfo_t *volinfo, char *path,
+ char *gfid_str, int opcode, char **op_errstr);
+int
__glusterd_handle_quota (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,}};
dict_t *dict = NULL;
glusterd_op_t cli_op = GD_OP_QUOTA;
- char operation[256] = {0, };
char *volname = NULL;
int32_t type = 0;
char msg[2048] = {0,};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
GF_ASSERT (req);
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
@@ -82,23 +108,16 @@ __glusterd_handle_quota (rpcsvc_request_t *req)
goto out;
}
- switch (type) {
- case GF_QUOTA_OPTION_TYPE_ENABLE:
- strncpy (operation, "enable", sizeof (operation));
- break;
-
- case GF_QUOTA_OPTION_TYPE_DISABLE:
- strncpy (operation, "disable", sizeof (operation));
- break;
-
- case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE:
- strncpy (operation, "limit-usage", sizeof (operation));
- break;
+ if ((conf->op_version == GD_OP_VERSION_MIN) &&
+ (type > GF_QUOTA_OPTION_TYPE_VERSION)) {
+ snprintf (msg, sizeof (msg), "Cannot execute command. The "
+ "cluster is operating at version %d. Quota command %s "
+ "is unavailable in this version", conf->op_version,
+ gd_quota_op_list[type]);
+ ret = -1;
+ goto out;
+ }
- case GF_QUOTA_OPTION_TYPE_REMOVE:
- strncpy (operation, "remove", sizeof (operation));
- break;
- }
ret = glusterd_op_begin_synctask (req, GD_OP_QUOTA, dict);
out:
@@ -132,7 +151,6 @@ glusterd_check_if_quota_trans_enabled (glusterd_volinfo_t *volinfo)
}
if (flag == _gf_false) {
- gf_log ("", GF_LOG_ERROR, "first enable the quota translator");
ret = -1;
goto out;
}
@@ -141,107 +159,9 @@ out:
return ret;
}
-/* At the end of the function, the variable found will be set
- * to true if the path to be removed was present in the limit-list,
- * else will be false.
- */
-int32_t
-_glusterd_quota_remove_limits (char **quota_limits, char *path,
- gf_boolean_t *found)
-{
- int ret = 0;
- int i = 0;
- int size = 0;
- int len = 0;
- int pathlen = 0;
- int skiplen = 0;
- int flag = 0;
- char *limits = NULL;
- char *qlimits = NULL;
-
- if (found != NULL)
- *found = _gf_false;
-
- if (*quota_limits == NULL)
- return -1;
-
- qlimits = *quota_limits;
-
- pathlen = strlen (path);
-
- len = strlen (qlimits);
-
- limits = GF_CALLOC (len + 1, sizeof (char), gf_gld_mt_char);
- if (!limits)
- return -1;
-
- while (i < len) {
- if (!memcmp ((void *) &qlimits [i], (void *)path, pathlen))
- if (qlimits [i + pathlen] == ':') {
- flag = 1;
- if (found != NULL)
- *found = _gf_true;
- }
-
- while (qlimits [i + size] != ',' &&
- qlimits [i + size] != '\0')
- size++;
-
- if (!flag) {
- memcpy ((void *) &limits [i], (void *) &qlimits [i], size + 1);
- } else {
- skiplen = size + 1;
- size = len - i - size;
- memcpy ((void *) &limits [i], (void *) &qlimits [i + skiplen], size);
- break;
- }
-
- i += size + 1;
- size = 0;
- }
-
- if (!flag) {
- ret = 1;
- } else {
- len = strlen (limits);
-
- if (len == 0) {
- GF_FREE (qlimits);
-
- *quota_limits = NULL;
-
- goto out;
- }
-
- if (limits[len - 1] == ',') {
- limits[len - 1] = '\0';
- len --;
- }
-
- GF_FREE (qlimits);
-
- qlimits = GF_CALLOC (len + 1, sizeof (char), gf_gld_mt_char);
-
- if (!qlimits) {
- ret = -1;
- goto out;
- }
-
- memcpy ((void *) qlimits, (void *) limits, len + 1);
-
- *quota_limits = qlimits;
-
- ret = 0;
- }
-
-out:
- GF_FREE (limits);
-
- return ret;
-}
-
int32_t
-glusterd_quota_initiate_fs_crawl (glusterd_conf_t *priv, char *volname)
+glusterd_quota_initiate_fs_crawl (glusterd_conf_t *priv, char *volname,
+ int type)
{
pid_t pid;
int32_t ret = 0;
@@ -260,6 +180,8 @@ glusterd_quota_initiate_fs_crawl (glusterd_conf_t *priv, char *volname)
runner_add_args (&runner, SBIN_DIR"/glusterfs",
"-s", "localhost",
"--volfile-id", volname,
+ "--use-readdirp=no",
+ "--client-pid", QUOTA_CRAWL_PID,
"-l", DEFAULT_LOG_FILE_DIRECTORY"/quota-crawl.log",
mountdir, NULL);
@@ -292,7 +214,19 @@ glusterd_quota_initiate_fs_crawl (glusterd_conf_t *priv, char *volname)
exit (EXIT_FAILURE);
}
runinit (&runner);
- runner_add_args (&runner, "/usr/bin/find", "find", ".", NULL);
+
+ if (type == GF_QUOTA_OPTION_TYPE_ENABLE)
+
+ runner_add_args (&runner, "/usr/bin/find", "find", ".",
+ NULL);
+
+ else if (type == GF_QUOTA_OPTION_TYPE_DISABLE)
+
+ runner_add_args (&runner, "/usr/bin/find", ".",
+ "-exec", "/usr/bin/setfattr", "-n",
+ VIRTUAL_QUOTA_XATTR_CLEANUP_KEY, "-v",
+ "1", "{}", "\\", ";", NULL);
+
if (runner_start (&runner) == -1)
_exit (EXIT_FAILURE);
@@ -312,114 +246,39 @@ out:
return ret;
}
-char *
-glusterd_quota_get_limit_value (char *quota_limits, char *path)
-{
- int32_t i, j, k, l, len;
- int32_t pat_len, diff;
- char *ret_str = NULL;
-
- len = strlen (quota_limits);
- pat_len = strlen (path);
- i = 0;
- j = 0;
-
- while (i < len) {
- j = i;
- k = 0;
- while (path [k] == quota_limits [j]) {
- j++;
- k++;
- }
-
- l = j;
-
- while (quota_limits [j] != ',' &&
- quota_limits [j] != '\0')
- j++;
-
- if (quota_limits [l] == ':' && pat_len == (l - i)) {
- diff = j - i;
- ret_str = GF_CALLOC (diff + 1, sizeof (char),
- gf_gld_mt_char);
-
- strncpy (ret_str, &quota_limits [i], diff);
-
- break;
- }
- i = ++j; //skip ','
- }
-
- return ret_str;
-}
-
-char*
-_glusterd_quota_get_limit_usages (glusterd_volinfo_t *volinfo,
- char *path, char **op_errstr)
-{
- int32_t ret = 0;
- char *quota_limits = NULL;
- char *ret_str = NULL;
-
- if (volinfo == NULL)
- return NULL;
-
- ret = glusterd_volinfo_get (volinfo, VKEY_FEATURES_LIMIT_USAGE,
- &quota_limits);
- if (ret)
- return NULL;
- if (quota_limits == NULL) {
- ret_str = NULL;
- *op_errstr = gf_strdup ("Limit not set on any directory");
- } else if (path == NULL)
- ret_str = gf_strdup (quota_limits);
- else
- ret_str = glusterd_quota_get_limit_value (quota_limits, path);
-
- return ret_str;
-}
-
int32_t
-glusterd_quota_get_limit_usages (glusterd_conf_t *priv,
- glusterd_volinfo_t *volinfo,
- char *volname,
- dict_t *dict,
- char **op_errstr,
- dict_t *rsp_dict)
+glusterd_quota_get_default_soft_limit (glusterd_volinfo_t *volinfo,
+ dict_t *rsp_dict)
{
- int32_t i = 0;
- int32_t ret = 0;
- int32_t count = 0;
- char *path = NULL;
- char cmd_str [1024] = {0, };
- char *ret_str = NULL;
+ int32_t ret = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *default_limit = NULL;
+ char *val = NULL;
if (rsp_dict == NULL)
- return 0;
-
- ret = dict_get_int32 (dict, "count", &count);
- if (ret < 0)
- goto out;
+ return -1;
- if (count == 0) {
- ret_str = _glusterd_quota_get_limit_usages (volinfo, NULL,
- op_errstr);
- } else {
- i = 0;
- while (count--) {
- snprintf (cmd_str, 1024, "path%d", i++);
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
- ret = dict_get_str (dict, cmd_str, &path);
- if (ret < 0)
- goto out;
+ ret = glusterd_volinfo_get (volinfo, "features.default-soft-limit",
+ &default_limit);
+ if (default_limit)
+ val = gf_strdup (default_limit);
+ else
+ val = gf_strdup ("80%");
- ret_str = _glusterd_quota_get_limit_usages (volinfo, path, op_errstr);
- }
+ ret = dict_set_dynstr (rsp_dict, "default-soft-limit", val);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set default "
+ "soft-limit into dict");
+ goto out;
}
+ ret = 0;
- if (ret_str) {
- ret = dict_set_dynstr (rsp_dict, "limit_list", ret_str);
- }
out:
return ret;
}
@@ -430,54 +289,79 @@ glusterd_quota_enable (glusterd_volinfo_t *volinfo, char **op_errstr,
{
int32_t ret = -1;
char *quota_status = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- GF_VALIDATE_OR_GOTO ("glusterd", crawl, out);
- GF_VALIDATE_OR_GOTO ("glusterd", op_errstr, out);
+ GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (this->name, crawl, out);
+ GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
if (glusterd_is_volume_started (volinfo) == 0) {
*op_errstr = gf_strdup ("Volume is stopped, start volume "
"to enable quota.");
+ ret = -1;
goto out;
}
ret = glusterd_check_if_quota_trans_enabled (volinfo);
if (ret == 0) {
*op_errstr = gf_strdup ("Quota is already enabled");
+ ret = -1;
goto out;
}
quota_status = gf_strdup ("on");
if (!quota_status) {
- gf_log ("", GF_LOG_ERROR, "memory allocation failed");
- *op_errstr = gf_strdup ("Enabling quota has been unsuccessful");
+ gf_log (this->name, GF_LOG_ERROR, "memory allocation failed");
+ ret = -1;
goto out;
}
- ret = dict_set_dynstr (volinfo->dict, VKEY_FEATURES_QUOTA, quota_status);
+ ret = dict_set_dynstr (volinfo->dict, VKEY_FEATURES_QUOTA,
+ quota_status);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "dict set failed");
- *op_errstr = gf_strdup ("Enabling quota has been unsuccessful");
+ gf_log (this->name, GF_LOG_ERROR, "dict set failed");
goto out;
}
- *op_errstr = gf_strdup ("Enabling quota has been successful");
-
*crawl = _gf_true;
+ ret = glusterd_store_quota_config (volinfo, NULL, NULL,
+ GF_QUOTA_OPTION_TYPE_ENABLE,
+ op_errstr);
+
ret = 0;
out:
+ if (ret && op_errstr && !*op_errstr)
+ gf_asprintf (op_errstr, "Enabling quota on volume %s has been "
+ "unsuccessful", volinfo->volname);
return ret;
}
int32_t
-glusterd_quota_disable (glusterd_volinfo_t *volinfo, char **op_errstr)
+glusterd_quota_disable (glusterd_volinfo_t *volinfo, char **op_errstr,
+ gf_boolean_t *crawl)
{
- int32_t ret = -1;
- char *quota_status = NULL, *quota_limits = NULL;
+ int32_t ret = -1;
+ int i = 0;
+ char *quota_status = NULL;
+ char *value = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *quota_options[] = {"features.soft-timeout",
+ "features.hard-timeout",
+ "features.alert-time",
+ "features.default-soft-limit", NULL};
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- GF_VALIDATE_OR_GOTO ("glusterd", op_errstr, out);
+ GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
ret = glusterd_check_if_quota_trans_enabled (volinfo);
if (ret == -1) {
@@ -487,190 +371,637 @@ glusterd_quota_disable (glusterd_volinfo_t *volinfo, char **op_errstr)
quota_status = gf_strdup ("off");
if (!quota_status) {
- gf_log ("", GF_LOG_ERROR, "memory allocation failed");
- *op_errstr = gf_strdup ("Disabling quota has been unsuccessful");
+ gf_log (this->name, GF_LOG_ERROR, "memory allocation failed");
+ ret = -1;
goto out;
}
ret = dict_set_dynstr (volinfo->dict, VKEY_FEATURES_QUOTA, quota_status);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "dict set failed");
- *op_errstr = gf_strdup ("Disabling quota has been unsuccessful");
+ gf_log (this->name, GF_LOG_ERROR, "dict set failed");
goto out;
}
- *op_errstr = gf_strdup ("Disabling quota has been successful");
+ for (i = 0; quota_options [i]; i++) {
+ ret = glusterd_volinfo_get (volinfo, quota_options[i], &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_INFO, "failed to get option"
+ " %s",
+ quota_options[i]);
+ } else {
+ dict_del (volinfo->dict, quota_options[i]);
+ }
+ }
+
+ //Remove aux mount of the volume on every node in the cluster
+ ret = glusterd_remove_auxiliary_mount (volinfo->volname);
+ if (ret)
+ goto out;
+
+ *crawl = _gf_true;
+
+ (void) glusterd_clean_up_quota_store (volinfo);
+
+ ret = 0;
+out:
+ if (ret && op_errstr && !*op_errstr)
+ gf_asprintf (op_errstr, "Disabling quota on volume %s has been "
+ "unsuccessful", volinfo->volname);
+ return ret;
+}
+
+
+static int
+glusterd_set_quota_limit (char *volname, char *path, char *hard_limit,
+ char *soft_limit, char **op_errstr)
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+ char abspath[PATH_MAX] = {0,};
+ glusterd_conf_t *priv = NULL;
+ double soft_lim = 0;
+
+ typedef struct quota_limits {
+ int64_t hl;
+ int64_t sl;
+ } __attribute__ ((__packed__)) quota_limits_t;
+
+ quota_limits_t existing_limit = {0,};
+ quota_limits_t new_limit = {0,};
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
- ret = glusterd_volinfo_get (volinfo, VKEY_FEATURES_LIMIT_USAGE,
- &quota_limits);
+ GLUSTERD_GET_QUOTA_AUX_MOUNT_PATH (abspath, volname, path);
+ ret = gf_lstat_dir (abspath, NULL);
if (ret) {
- gf_log ("", GF_LOG_WARNING, "failed to get the quota limits");
+ gf_asprintf (op_errstr, "Failed to find the directory %s. "
+ "Reason : %s", abspath, strerror (errno));
+ goto out;
+ }
+
+ if (!soft_limit) {
+ ret = sys_lgetxattr (abspath,
+ "trusted.glusterfs.quota.limit-set",
+ (void *)&existing_limit,
+ sizeof (existing_limit));
+ if (ret < 0) {
+ switch (errno) {
+ case ENOATTR:
+ existing_limit.sl = -1;
+ break;
+ default:
+ gf_asprintf (op_errstr, "Failed to get the xattr "
+ "'trusted.glusterfs.quota.limit-set' from "
+ "%s. Reason : %s", abspath,
+ strerror (errno));
+ goto out;
+ }
+ } else {
+ existing_limit.hl = ntoh64 (existing_limit.hl);
+ existing_limit.sl = ntoh64 (existing_limit.sl);
+ }
+ new_limit.sl = existing_limit.sl;
+
} else {
- GF_FREE (quota_limits);
+ ret = gf_string2percent (soft_limit, &soft_lim);
+ if (ret)
+ goto out;
+ new_limit.sl = soft_lim;
}
- dict_del (volinfo->dict, VKEY_FEATURES_LIMIT_USAGE);
+ new_limit.sl = hton64 (new_limit.sl);
+
+ ret = gf_string2bytesize_uint64 (hard_limit, (uint64_t*)&new_limit.hl);
+ if (ret)
+ goto out;
+
+ new_limit.hl = hton64 (new_limit.hl);
+
+ ret = sys_lsetxattr (abspath, "trusted.glusterfs.quota.limit-set",
+ (char *)(void *)&new_limit, sizeof (new_limit), 0);
+ if (ret) {
+ gf_asprintf (op_errstr, "setxattr of "
+ "'trusted.glusterfs.quota.limit-set' failed on %s."
+ " Reason : %s", abspath, strerror (errno));
+ goto out;
+ }
+ ret = 0;
out:
return ret;
}
-int32_t
-glusterd_quota_limit_usage (glusterd_volinfo_t *volinfo, dict_t *dict, char **op_errstr)
+static int
+glusterd_update_quota_conf_version (glusterd_volinfo_t *volinfo)
+{
+ volinfo->quota_conf_version++;
+ return 0;
+}
+
+/*The function glusterd_find_gfid_match () does the following:
+ * Given a buffer of gfids, the number of bytes read and the key gfid that needs
+ * to be found, the function compares 16 bytes at a time from @buf against
+ * @gfid.
+ *
+ * What happens when the match is found:
+ * i. If the function was called as part of 'limit-usage' operation, the call
+ * returns with write_byte_count = bytes_read
+ *ii. If the function as called as part of 'quota remove' operation, @buf
+ * is modified in memory such that the match is deleted from the buffer, and
+ * also @write_byte_count is set to original buf size minus the sixteen bytes
+ * that was deleted as part of 'remove'.
+ *
+ * What happens when the match is not found in the current buffer:
+ * The function returns with write_byte_count = bytes_read, which means to say
+ * that the caller of this function must write the entire buffer to the tmp file
+ * and continue the search.
+ */
+static gf_boolean_t
+glusterd_find_gfid_match (uuid_t gfid, unsigned char *buf, size_t bytes_read,
+ int opcode, size_t *write_byte_count)
+{
+ int gfid_index = 0;
+ int shift_count = 0;
+ unsigned char tmp_buf[17] = {0,};
+
+ while (gfid_index != bytes_read) {
+ memcpy ((void *)tmp_buf, (void *)&buf[gfid_index], 16);
+ if (!uuid_compare (gfid, tmp_buf)) {
+ if (opcode == GF_QUOTA_OPTION_TYPE_REMOVE) {
+ shift_count = bytes_read - (gfid_index + 16);
+ memmove ((void *)&buf[gfid_index],
+ (void *)&buf[gfid_index+16],
+ shift_count);
+ *write_byte_count = bytes_read - 16;
+ } else {
+ *write_byte_count = bytes_read;
+ }
+ return _gf_true;
+ } else {
+ gfid_index+=16;
+ }
+ }
+ if (gfid_index == bytes_read)
+ *write_byte_count = bytes_read;
+
+ return _gf_false;
+}
+
+/* The function glusterd_copy_to_tmp_file() reads the "remaining" bytes from
+ * the source fd and writes them to destination fd, at the rate of 128K bytes
+ * of read+write at a time.
+ */
+
+static int
+glusterd_copy_to_tmp_file (int src_fd, int dst_fd)
{
- int32_t ret = -1;
- char *path = NULL;
- char *limit = NULL;
- char *value = NULL;
- char msg [1024] = {0,};
- char *quota_limits = NULL;
+ int ret = 0;
+ size_t entry_sz = 131072;
+ ssize_t bytes_read = 0;
+ unsigned char buf[131072] = {0,};
+ xlator_t *this = NULL;
- GF_VALIDATE_OR_GOTO ("glusterd", dict, out);
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- GF_VALIDATE_OR_GOTO ("glusterd", op_errstr, out);
+ this = THIS;
+ GF_ASSERT (this);
- ret = glusterd_check_if_quota_trans_enabled (volinfo);
- if (ret == -1) {
- *op_errstr = gf_strdup ("Quota is disabled, please enable "
- "quota");
+ while ((bytes_read = read (src_fd, (void *)&buf, entry_sz)) > 0) {
+ if (bytes_read % 16 != 0) {
+ gf_log (this->name, GF_LOG_ERROR, "quota.conf "
+ "corrupted");
+ ret = -1;
+ goto out;
+ }
+ ret = write (dst_fd, (void *) buf, bytes_read);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "write into quota.conf failed. Reason : %s",
+ strerror (errno));
+ goto out;
+ }
+ }
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+glusterd_store_quota_config (glusterd_volinfo_t *volinfo, char *path,
+ char *gfid_str, int opcode, char **op_errstr)
+{
+ int ret = -1;
+ int fd = -1;
+ int conf_fd = -1;
+ size_t entry_sz = 131072;
+ ssize_t bytes_read = 0;
+ size_t bytes_to_write = 0;
+ unsigned char buf[131072] = {0,};
+ uuid_t gfid = {0,};
+ xlator_t *this = NULL;
+ gf_boolean_t found = _gf_false;
+ gf_boolean_t modified = _gf_false;
+ gf_boolean_t is_file_empty = _gf_false;
+ gf_boolean_t is_first_read = _gf_true;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ glusterd_store_create_quota_conf_sh_on_absence (volinfo);
+
+ fd = gf_store_mkstemp (volinfo->quota_conf_shandle);
+ if (fd < 0) {
+ ret = -1;
goto out;
}
- ret = glusterd_volinfo_get (volinfo, VKEY_FEATURES_LIMIT_USAGE,
- &quota_limits);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "failed to get the quota limits");
- *op_errstr = gf_strdup ("failed to set limit");
+ conf_fd = open (volinfo->quota_conf_shandle->path, O_RDONLY);
+ if (conf_fd == -1) {
+ ret = -1;
goto out;
}
- ret = dict_get_str (dict, "path", &path);
+ ret = glusterd_store_quota_conf_skip_header (this, conf_fd);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to fetch quota limits" );
- *op_errstr = gf_strdup ("failed to set limit");
goto out;
}
- ret = dict_get_str (dict, "limit", &limit);
+ ret = glusterd_store_quota_conf_stamp_header (this, fd);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to fetch quota limits" );
- *op_errstr = gf_strdup ("failed to set limit");
+ gf_log (this->name, GF_LOG_ERROR, "Failed to add header to tmp "
+ "file.");
goto out;
}
- if (quota_limits) {
- ret = _glusterd_quota_remove_limits (&quota_limits, path, NULL);
- if (ret == -1) {
- gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
- *op_errstr = gf_strdup ("failed to set limit");
+ /* Just create empty quota.conf file if create */
+ if (GF_QUOTA_OPTION_TYPE_ENABLE == opcode) {
+ modified = _gf_true;
+ goto out;
+ }
+
+ /* Check if gfid_str is given for opts other than ENABLE */
+ if (!gfid_str) {
+ ret = -1;
+ goto out;
+ }
+ uuid_parse (gfid_str, gfid);
+
+ for (;;) {
+ bytes_read = read (conf_fd, (void*)&buf, entry_sz);
+ if (bytes_read <= 0) {
+ /*The flag @is_first_read is TRUE when the loop is
+ * entered, and is set to false if the first read
+ * reads non-zero bytes of data. The flag is used to
+ * detect if quota.conf is an empty file, but for the
+ * header. This is done to log appropriate error message
+ * when 'quota remove' is attempted when there are no
+ * limits set on the given volume.
+ */
+ if (is_first_read)
+ is_file_empty = _gf_true;
+ break;
+ }
+ if ((bytes_read % 16) != 0) {
+ gf_log (this->name, GF_LOG_ERROR, "quota.conf "
+ "corrupted");
+ ret = -1;
goto out;
}
- }
+ found = glusterd_find_gfid_match (gfid, buf, bytes_read, opcode,
+ &bytes_to_write);
- if (quota_limits == NULL) {
- ret = gf_asprintf (&value, "%s:%s", path, limit);
+ ret = write (fd, (void *) buf, bytes_to_write);
if (ret == -1) {
- gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
- *op_errstr = gf_strdup ("failed to set limit");
+ gf_log (this->name, GF_LOG_ERROR,
+ "write into quota.conf failed. Reason : %s",
+ strerror (errno));
goto out;
}
- } else {
- ret = gf_asprintf (&value, "%s,%s:%s",
- quota_limits, path, limit);
- if (ret == -1) {
- gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
- *op_errstr = gf_strdup ("failed to set limit");
+
+ /*If the match is found in this iteration, copy the rest of
+ * quota.conf into quota.conf.tmp and break.
+ * Else continue with the search.
+ */
+ if (found) {
+ ret = glusterd_copy_to_tmp_file (conf_fd, fd);
+ if (ret)
+ goto out;
+ break;
+ }
+ is_first_read = _gf_false;
+ }
+
+ switch (opcode) {
+ case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE:
+ if (!found) {
+ ret = write (fd, gfid, 16);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "write into quota.conf failed. "
+ "Reason : %s",
+ strerror (errno));
+ goto out;
+ }
+ modified = _gf_true;
+ }
+ break;
+
+ case GF_QUOTA_OPTION_TYPE_REMOVE:
+ if (is_file_empty) {
+ gf_asprintf (op_errstr, "Cannot remove limit on"
+ " %s. The quota configuration file"
+ " for volume %s is empty.", path,
+ volinfo->volname);
+ ret = -1;
+ goto out;
+ } else {
+ if (!found) {
+ gf_asprintf (op_errstr, "Error. gfid %s"
+ " for path %s not found in"
+ " store", gfid_str, path);
+ ret = -1;
+ goto out;
+ } else {
+ modified = _gf_true;
+ }
+ }
+ break;
+
+ default:
+ ret = 0;
+ break;
+ }
+
+ if (modified)
+ glusterd_update_quota_conf_version (volinfo);
+
+ ret = 0;
+out:
+ if (conf_fd != -1) {
+ close (conf_fd);
+ }
+
+ if (fd != -1) {
+ close (fd);
+ }
+
+ if (ret && (fd > 0)) {
+ gf_store_unlink_tmppath (volinfo->quota_conf_shandle);
+ } else if (!ret) {
+ ret = gf_store_rename_tmppath (volinfo->quota_conf_shandle);
+ if (modified) {
+ ret = glusterd_compute_cksum (volinfo, _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "compute cksum for quota conf file");
+ goto out;
+ }
+
+ ret = glusterd_store_save_quota_version_and_cksum
+ (volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "store quota version and cksum");
+ goto out;
+ }
+ }
+ }
+
+ return ret;
+}
+
+int32_t
+glusterd_quota_limit_usage (glusterd_volinfo_t *volinfo, dict_t *dict,
+ int opcode, char **op_errstr)
+{
+ int32_t ret = -1;
+ char *path = NULL;
+ char *hard_limit = NULL;
+ char *soft_limit = NULL;
+ char *gfid_str = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+ GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
+
+ ret = glusterd_check_if_quota_trans_enabled (volinfo);
+ if (ret == -1) {
+ *op_errstr = gf_strdup ("Quota is disabled, please enable "
+ "quota");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "path", &path);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch path");
+ goto out;
+ }
+ ret = gf_canonicalize_path (path);
+ if (ret)
+ goto out;
+
+ ret = dict_get_str (dict, "hard-limit", &hard_limit);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch hard limit");
+ goto out;
+ }
+
+ if (dict_get (dict, "soft-limit")) {
+ ret = dict_get_str (dict, "soft-limit", &soft_limit);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch "
+ "soft limit");
goto out;
}
+ }
+
+ if (is_origin_glusterd (dict)) {
+ ret = glusterd_set_quota_limit (volinfo->volname, path,
+ hard_limit, soft_limit,
+ op_errstr);
+ if (ret)
+ goto out;
+ }
- GF_FREE (quota_limits);
+ ret = dict_get_str (dict, "gfid", &gfid_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get gfid of path "
+ "%s", path);
+ goto out;
}
- quota_limits = value;
+ ret = glusterd_store_quota_config (volinfo, path, gfid_str, opcode,
+ op_errstr);
+ if (ret)
+ goto out;
+
+ ret = 0;
+out:
+
+ if (ret && op_errstr && !*op_errstr)
+ gf_asprintf (op_errstr, "Failed to set hard limit on path %s "
+ "for volume %s", path, volinfo->volname);
+ return ret;
+}
+
+static int
+glusterd_remove_quota_limit (char *volname, char *path, char **op_errstr)
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+ char abspath[PATH_MAX] = {0,};
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
- ret = dict_set_str (volinfo->dict, VKEY_FEATURES_LIMIT_USAGE,
- quota_limits);
+ GLUSTERD_GET_QUOTA_AUX_MOUNT_PATH (abspath, volname, path);
+ ret = gf_lstat_dir (abspath, NULL);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set quota limits" );
- *op_errstr = gf_strdup ("failed to set limit");
+ gf_asprintf (op_errstr, "Failed to find the directory %s. "
+ "Reason : %s", abspath, strerror (errno));
goto out;
}
- snprintf (msg, 1024, "limit set on %s", path);
- *op_errstr = gf_strdup (msg);
+ ret = sys_lremovexattr (abspath, "trusted.glusterfs.quota.limit-set");
+ if (ret) {
+ gf_asprintf (op_errstr, "removexattr failed on %s. Reason : %s",
+ abspath, strerror (errno));
+ goto out;
+ }
ret = 0;
+
out:
return ret;
}
int32_t
-glusterd_quota_remove_limits (glusterd_volinfo_t *volinfo, dict_t *dict, char **op_errstr)
+glusterd_quota_remove_limits (glusterd_volinfo_t *volinfo, dict_t *dict,
+ int opcode, char **op_errstr)
{
int32_t ret = -1;
- char str [PATH_MAX + 1024] = {0,};
- char *quota_limits = NULL;
char *path = NULL;
- gf_boolean_t flag = _gf_false;
+ char *gfid_str = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
- GF_VALIDATE_OR_GOTO ("glusterd", dict, out);
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- GF_VALIDATE_OR_GOTO ("glusterd", op_errstr, out);
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+ GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
ret = glusterd_check_if_quota_trans_enabled (volinfo);
if (ret == -1) {
- *op_errstr = gf_strdup ("Quota is disabled, please enable quota");
+ *op_errstr = gf_strdup ("Quota is disabled, please enable "
+ "quota");
goto out;
}
- ret = glusterd_volinfo_get (volinfo, VKEY_FEATURES_LIMIT_USAGE,
- &quota_limits);
+ ret = dict_get_str (dict, "path", &path);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "failed to get the quota limits");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch path");
goto out;
}
- ret = dict_get_str (dict, "path", &path);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to fetch quota limits" );
+ ret = gf_canonicalize_path (path);
+ if (ret)
goto out;
+
+ if (is_origin_glusterd (dict)) {
+ ret = glusterd_remove_quota_limit (volinfo->volname, path,
+ op_errstr);
+ if (ret)
+ goto out;
}
- ret = _glusterd_quota_remove_limits (&quota_limits, path, &flag);
- if (ret == -1) {
- if (flag == _gf_true)
- snprintf (str, sizeof (str), "Removing limit on %s has "
- "been unsuccessful", path);
- else
- snprintf (str, sizeof (str), "%s has no limit set", path);
- *op_errstr = gf_strdup (str);
+ ret = dict_get_str (dict, "gfid", &gfid_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get gfid of path "
+ "%s", path);
goto out;
- } else {
- if (flag == _gf_true)
- snprintf (str, sizeof (str), "Removed quota limit on "
- "%s", path);
- else
- snprintf (str, sizeof (str), "no limit set on %s",
- path);
- *op_errstr = gf_strdup (str);
- }
-
- if (quota_limits) {
- ret = dict_set_str (volinfo->dict, VKEY_FEATURES_LIMIT_USAGE,
- quota_limits);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set quota limits" );
- goto out;
- }
- } else {
- dict_del (volinfo->dict, VKEY_FEATURES_LIMIT_USAGE);
}
+ ret = glusterd_store_quota_config (volinfo, path, gfid_str, opcode,
+ op_errstr);
+ if (ret)
+ goto out;
+
+
ret = 0;
out:
return ret;
}
+int
+glusterd_set_quota_option (glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *key, char **op_errstr)
+{
+ int ret = 0;
+ char *value = NULL;
+ xlator_t *this = NULL;
+ char *option = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = glusterd_check_if_quota_trans_enabled (volinfo);
+ if (ret == -1) {
+ gf_asprintf (op_errstr, "Cannot set %s. Quota on volume %s is "
+ "disabled", key, volinfo->volname);
+ return -1;
+ }
+
+ ret = dict_get_str (dict, "value", &value);
+ if(ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Option value absent.");
+ return -1;
+ }
+
+ option = gf_strdup (value);
+ ret = dict_set_dynstr (volinfo->dict, key, option);
+ if(ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set option %s",
+ key);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+glusterd_quotad_op (int opcode)
+{
+ int ret = -1;
+
+ switch (opcode) {
+ case GF_QUOTA_OPTION_TYPE_ENABLE:
+ case GF_QUOTA_OPTION_TYPE_DISABLE:
+
+ if (glusterd_all_volumes_with_quota_stopped ())
+ ret = glusterd_quotad_stop ();
+ else
+ ret = glusterd_check_generate_start_quotad ();
+ break;
+
+ default:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
int
glusterd_op_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
@@ -681,76 +1012,127 @@ glusterd_op_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
int type = -1;
gf_boolean_t start_crawl = _gf_false;
glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
GF_ASSERT (dict);
GF_ASSERT (op_errstr);
- priv = THIS->private;
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get volume name " );
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_asprintf (op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
goto out;
}
ret = dict_get_int32 (dict, "type", &type);
- if (type == GF_QUOTA_OPTION_TYPE_ENABLE) {
- ret = glusterd_quota_enable (volinfo, op_errstr, &start_crawl);
- if (ret < 0)
- goto out;
-
- goto create_vol;
+ if ((priv->op_version == GD_OP_VERSION_MIN) &&
+ (type > GF_QUOTA_OPTION_TYPE_VERSION)) {
+ gf_asprintf (op_errstr, "Volume quota failed. The cluster is "
+ "operating at version %d. Quota command"
+ " %s is unavailable in this version.",
+ priv->op_version,
+ gd_quota_op_list[type]);
+ ret = -1;
+ goto out;
}
- if (type == GF_QUOTA_OPTION_TYPE_DISABLE) {
- ret = glusterd_quota_disable (volinfo, op_errstr);
- if (ret < 0)
- goto out;
+ switch (type) {
+ case GF_QUOTA_OPTION_TYPE_ENABLE:
+ ret = glusterd_quota_enable (volinfo, op_errstr,
+ &start_crawl);
+ if (ret < 0)
+ goto out;
+ break;
- goto create_vol;
- }
+ case GF_QUOTA_OPTION_TYPE_DISABLE:
+ ret = glusterd_quota_disable (volinfo, op_errstr,
+ &start_crawl);
+ if (ret < 0)
+ goto out;
+
+ break;
- if (type == GF_QUOTA_OPTION_TYPE_LIMIT_USAGE) {
- ret = glusterd_quota_limit_usage (volinfo, dict, op_errstr);
- if (ret < 0)
+ case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE:
+ ret = glusterd_quota_limit_usage (volinfo, dict, type,
+ op_errstr);
goto out;
- goto create_vol;
- }
+ case GF_QUOTA_OPTION_TYPE_REMOVE:
+ ret = glusterd_quota_remove_limits (volinfo, dict, type,
+ op_errstr);
+ goto out;
- if (type == GF_QUOTA_OPTION_TYPE_REMOVE) {
- ret = glusterd_quota_remove_limits (volinfo, dict, op_errstr);
- if (ret < 0)
+ case GF_QUOTA_OPTION_TYPE_LIST:
+ ret = glusterd_check_if_quota_trans_enabled (volinfo);
+ if (ret == -1) {
+ *op_errstr = gf_strdup ("Cannot list limits, "
+ "quota is disabled");
+ goto out;
+ }
+ ret = glusterd_quota_get_default_soft_limit (volinfo,
+ rsp_dict);
goto out;
- goto create_vol;
- }
+ case GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT:
+ ret = glusterd_set_quota_option (volinfo, dict,
+ "features.soft-timeout",
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
- if (type == GF_QUOTA_OPTION_TYPE_LIST) {
- ret = glusterd_check_if_quota_trans_enabled (volinfo);
- if (ret == -1) {
- *op_errstr = gf_strdup ("cannot list the limits, "
- "quota is disabled");
- goto out;
- }
+ case GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT:
+ ret = glusterd_set_quota_option (volinfo, dict,
+ "features.hard-timeout",
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
- ret = glusterd_quota_get_limit_usages (priv, volinfo, volname,
- dict, op_errstr, rsp_dict);
+ case GF_QUOTA_OPTION_TYPE_ALERT_TIME:
+ ret = glusterd_set_quota_option (volinfo, dict,
+ "features.alert-time",
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
- goto out;
+ case GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT:
+ ret = glusterd_set_quota_option (volinfo, dict,
+ "features.default-soft-limit",
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
+
+ default:
+ gf_asprintf (op_errstr, "Quota command failed. Invalid "
+ "opcode");
+ ret = -1;
+ goto out;
}
-create_vol:
+
+ if (priv->op_version > GD_OP_VERSION_MIN) {
+ ret = glusterd_quotad_op (type);
+ if (ret)
+ goto out;
+ }
+
ret = glusterd_create_volfiles_and_notify_services (volinfo);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to re-create volfile for"
- " 'quota'");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to re-create "
+ "volfiles");
ret = -1;
goto out;
}
@@ -759,81 +1141,311 @@ create_vol:
if (ret)
goto out;
- if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_check_generate_start_nfs ();
+ if (GLUSTERD_STATUS_STARTED == volinfo->status) {
+ if (priv->op_version == GD_OP_VERSION_MIN)
+ ret = glusterd_check_generate_start_nfs ();
+ }
- ret = 0;
+ if (rsp_dict && start_crawl == _gf_true)
+ glusterd_quota_initiate_fs_crawl (priv, volname, type);
+ ret = 0;
out:
- if (rsp_dict && start_crawl == _gf_true)
- glusterd_quota_initiate_fs_crawl (priv, volname);
+ return ret;
+}
+
+/*
+ * glusterd_get_gfid_from_brick() fetches the 'trusted.gfid' attribute of @path
+ * from each brick in the backend and places the same in the rsp_dict with the
+ * keys being gfid0, gfid1, gfid2 and so on. The absence of @path in the backend
+ * is not treated as error.
+ */
+static int
+glusterd_get_gfid_from_brick (dict_t *dict, glusterd_volinfo_t *volinfo,
+ dict_t *rsp_dict, char **op_errstr)
+{
+ int ret = -1;
+ int count = 0;
+ char *path = NULL;
+ char backend_path[PATH_MAX] = {0,};
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ char key[256] = {0,};
+ char *gfid_str = NULL;
+ uuid_t gfid;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_get_str (dict, "path", &path);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get path");
+ goto out;
+ }
+
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ ret = glusterd_resolve_brick (brickinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, FMTSTR_RESOLVE_BRICK,
+ brickinfo->hostname, brickinfo->path);
+ goto out;
+ }
+
+ if (uuid_compare (brickinfo->uuid, MY_UUID))
+ continue;
+
+ if (brickinfo->vg[0])
+ continue;
+
+ snprintf (backend_path, sizeof (backend_path), "%s%s",
+ brickinfo->path, path);
+
+ ret = gf_lstat_dir (backend_path, NULL);
+ if (ret) {
+ gf_log (this->name, GF_LOG_INFO, "Failed to find "
+ "directory %s. Reason : %s", backend_path,
+ strerror (errno));
+ ret = 0;
+ continue;
+ }
+ ret = sys_lgetxattr (backend_path, GFID_XATTR_KEY, gfid, 16);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_INFO, "Failed to get "
+ "extended attribute %s for directory %s. "
+ "Reason : %s", GFID_XATTR_KEY, backend_path,
+ strerror (errno));
+ ret = 0;
+ continue;
+ }
+ snprintf (key, sizeof (key), "gfid%d", count);
+
+ gfid_str = gf_strdup (uuid_utoa (gfid));
+ if (!gfid_str) {
+ ret = -1;
+ goto out;
+ }
- if (rsp_dict && *op_errstr) {
- ret = dict_set_dynstr (rsp_dict, "errstr", *op_errstr);
+ ret = dict_set_dynstr (rsp_dict, key, gfid_str);
if (ret) {
- GF_FREE (*op_errstr);
- gf_log ("", GF_LOG_DEBUG,
- "failed to set error message in ctx");
+ gf_log (this->name, GF_LOG_ERROR, "Failed to place "
+ "gfid of %s in dict", backend_path);
+ GF_FREE (gfid_str);
+ goto out;
}
- *op_errstr = NULL;
+ count++;
+ }
+
+ ret = dict_set_int32 (rsp_dict, "count", count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set count");
+ goto out;
}
+ ret = 0;
+out:
return ret;
}
+static int
+_glusterd_validate_quota_opts (dict_t *dict, int type, char **errstr)
+{
+ int ret = -1;
+ xlator_t *this = THIS;
+ void *quota_xl = NULL;
+ volume_opt_list_t opt_list = {{0},};
+ volume_option_t *opt = NULL;
+ char *key = NULL;
+ char *value = NULL;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (this);
+
+ ret = xlator_volopt_dynload ("features/quota", &quota_xl, &opt_list);
+ if (ret)
+ goto out;
+
+ switch (type) {
+ case GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT:
+ case GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT:
+ case GF_QUOTA_OPTION_TYPE_ALERT_TIME:
+ case GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT:
+ key = (char *)gd_quota_op_list[type];
+ break;
+ default:
+ ret = -1;
+ goto out;
+ }
+
+ opt = xlator_volume_option_get_list (&opt_list, key);
+ if (!opt) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Unknown option: %s", key);
+ goto out;
+ }
+ ret = dict_get_str (dict, "value", &value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Value not found for key %s",
+ key);
+ goto out;
+ }
+
+ ret = xlator_option_validate (this, key, value, opt, errstr);
+
+out:
+ if (quota_xl) {
+ dlclose (quota_xl);
+ quota_xl = NULL;
+ }
+ return ret;
+}
int
-glusterd_op_stage_quota (dict_t *dict, char **op_errstr)
+glusterd_op_stage_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- int ret = 0;
- char *volname = NULL;
- gf_boolean_t exists = _gf_false;
- int type = 0;
- dict_t *ctx = NULL;
+ int ret = 0;
+ char *volname = NULL;
+ gf_boolean_t exists = _gf_false;
+ int type = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *hard_limit_str = NULL;
+ uint64_t hard_limit = 0;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (dict);
GF_ASSERT (op_errstr);
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get volume name");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
goto out;
}
exists = glusterd_check_volume_exists (volname);
if (!exists) {
- gf_log ("", GF_LOG_ERROR, "Volume with name: %s "
- "does not exist",
- volname);
- *op_errstr = gf_strdup ("Invalid volume name");
+ gf_asprintf (op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
+ ret = -1;
+ goto out;
+ }
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ gf_asprintf (op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+
+ if (!glusterd_is_volume_started (volinfo)) {
+ *op_errstr = gf_strdup ("Volume is stopped, start volume "
+ "before executing quota command.");
ret = -1;
goto out;
}
ret = dict_get_int32 (dict, "type", &type);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get 'type' for quota op");
- *op_errstr = gf_strdup ("Volume quota failed, internal error "
- ", unable to get type of operation");
+ *op_errstr = gf_strdup ("Volume quota failed, internal error, "
+ "unable to get type of operation");
+ goto out;
+ }
+
+ if ((!glusterd_is_volume_quota_enabled (volinfo)) &&
+ (type != GF_QUOTA_OPTION_TYPE_ENABLE)) {
+ *op_errstr = gf_strdup ("Quota is disabled, please enable "
+ "quota");
+ ret = -1;
goto out;
}
+ if ((priv->op_version == GD_OP_VERSION_MIN) &&
+ (type > GF_QUOTA_OPTION_TYPE_VERSION)) {
+ gf_asprintf (op_errstr, "Volume quota failed. The cluster is "
+ "operating at version %d. Quota command"
+ " %s is unavailable in this version.",
+ priv->op_version,
+ gd_quota_op_list[type]);
+ ret = -1;
+ goto out;
+ }
- ctx = glusterd_op_get_ctx();
- if (ctx && (type == GF_QUOTA_OPTION_TYPE_ENABLE
- || type == GF_QUOTA_OPTION_TYPE_LIST)) {
+ if ((GF_QUOTA_OPTION_TYPE_ENABLE != type) &&
+ (glusterd_check_if_quota_trans_enabled (volinfo) != 0)) {
+ ret = -1;
+ gf_asprintf (op_errstr, "Quota is not enabled on volume %s",
+ volname);
+ goto out;
+ }
+
+ switch (type) {
+ case GF_QUOTA_OPTION_TYPE_ENABLE:
+ case GF_QUOTA_OPTION_TYPE_LIST:
/* Fuse mount req. only for enable & list-usage options*/
- if (!glusterd_is_fuse_available ()) {
- gf_log ("glusterd", GF_LOG_ERROR, "Unable to open /dev/"
- "fuse (%s), quota command failed",
- strerror (errno));
+ if (is_origin_glusterd (dict) &&
+ !glusterd_is_fuse_available ()) {
*op_errstr = gf_strdup ("Fuse unavailable");
ret = -1;
goto out;
}
+ break;
+
+ case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE:
+ ret = dict_get_str (dict, "hard-limit", &hard_limit_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Faild to get hard-limit from dict");
+ goto out;
+ }
+ ret = gf_string2bytesize_uint64 (hard_limit_str, &hard_limit);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to convert hard-limit string to value");
+ goto out;
+ }
+ if (hard_limit > UINT64_MAX) {
+ ret = -1;
+ ret = gf_asprintf (op_errstr, "Hard-limit %s is greater"
+ " than %"PRId64"bytes. Please set a "
+ "smaller limit.", hard_limit_str,
+ INT64_MAX);
+ gf_log (this->name, GF_LOG_ERROR, "hard-limit %s "
+ "greater than INT64_MAX", hard_limit_str);
+ goto out;
+ }
+ /*The break statement is missing here to allow intentional fall
+ * through of code execution to the next switch case
+ */
+
+ case GF_QUOTA_OPTION_TYPE_REMOVE:
+ ret = glusterd_get_gfid_from_brick (dict, volinfo, rsp_dict,
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
+
+ case GF_QUOTA_OPTION_TYPE_SOFT_TIMEOUT:
+ case GF_QUOTA_OPTION_TYPE_HARD_TIMEOUT:
+ case GF_QUOTA_OPTION_TYPE_ALERT_TIME:
+ case GF_QUOTA_OPTION_TYPE_DEFAULT_SOFT_LIMIT:
+ ret = _glusterd_validate_quota_opts (dict, type, op_errstr);
+ if (ret)
+ goto out;
+ break;
+
+ default:
+ break;
}
-out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ ret = 0;
- return ret;
+ out:
+ if (ret && op_errstr && *op_errstr)
+ gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index 60dd9c19b..bdedf4c04 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -126,7 +126,7 @@ __glusterd_defrag_notify (struct rpc_clnt *rpc, void *mydata,
UNLOCK (&defrag->lock);
gf_log ("", GF_LOG_DEBUG, "%s got RPC_CLNT_CONNECT",
- rpc->conn.trans->name);
+ rpc->conn.name);
break;
}
@@ -141,20 +141,18 @@ __glusterd_defrag_notify (struct rpc_clnt *rpc, void *mydata,
}
UNLOCK (&defrag->lock);
- if (!glusterd_is_service_running (pidfile, NULL)) {
+ if (!gf_is_service_running (pidfile, NULL)) {
if (volinfo->rebal.defrag_status ==
GF_DEFRAG_STATUS_STARTED) {
volinfo->rebal.defrag_status =
GF_DEFRAG_STATUS_FAILED;
- } else {
- volinfo->rebal.defrag_cmd = 0;
}
}
glusterd_store_perform_node_state_store (volinfo);
if (defrag->rpc) {
- rpc_clnt_unref (defrag->rpc);
+ glusterd_rpc_clnt_unref (priv, defrag->rpc);
defrag->rpc = NULL;
}
if (defrag->cbk_fn)
@@ -163,9 +161,12 @@ __glusterd_defrag_notify (struct rpc_clnt *rpc, void *mydata,
GF_FREE (defrag);
gf_log ("", GF_LOG_DEBUG, "%s got RPC_CLNT_DISCONNECT",
- rpc->conn.trans->name);
+ rpc->conn.name);
break;
}
+ case RPC_CLNT_DESTROY:
+ glusterd_volinfo_unref (volinfo);
+ break;
default:
gf_log ("", GF_LOG_TRACE,
"got some other RPC event %d", event);
@@ -197,7 +198,6 @@ glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr,
char sockfile[PATH_MAX] = {0,};
char pidfile[PATH_MAX] = {0,};
char logfile[PATH_MAX] = {0,};
- dict_t *options = NULL;
char valgrind_logfile[PATH_MAX] = {0,};
priv = THIS->private;
@@ -219,6 +219,7 @@ glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr,
defrag->cmd = cmd;
+ volinfo->rebal.defrag_cmd = cmd;
volinfo->rebal.op = op;
LOCK_INIT (&defrag->lock);
@@ -236,7 +237,7 @@ glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr,
goto out;
}
- GLUSTERD_GET_DEFRAG_SOCK_FILE (sockfile, volinfo, priv);
+ GLUSTERD_GET_DEFRAG_SOCK_FILE (sockfile, volinfo);
GLUSTERD_GET_DEFRAG_PID_FILE (pidfile, volinfo, priv);
snprintf (logfile, PATH_MAX, "%s/%s-rebalance.log",
DEFAULT_LOG_FILE_DIRECTORY, volinfo->volname);
@@ -287,26 +288,10 @@ glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr,
sleep (5);
- /* Setting frame-timeout to 10mins (600seconds).
- * Unix domain sockets ensures that the connection is reliable. The
- * default timeout of 30mins used for unreliable network connections is
- * too long for unix domain socket connections.
- */
- ret = rpc_transport_unix_options_build (&options, sockfile, 600);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Unix options build failed");
- goto out;
- }
-
- synclock_unlock (&priv->big_lock);
- ret = glusterd_rpc_create (&defrag->rpc, options,
- glusterd_defrag_notify, volinfo);
- synclock_lock (&priv->big_lock);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "RPC create failed");
- goto out;
- }
+ ret = glusterd_rebalance_rpc_create (volinfo, _gf_false);
+ //FIXME: this cbk is passed as NULL in all occurrences. May be
+ //we never needed it.
if (cbk)
defrag->cbk_fn = cbk;
@@ -318,28 +303,54 @@ out:
int
glusterd_rebalance_rpc_create (glusterd_volinfo_t *volinfo,
- glusterd_conf_t *priv, int cmd)
+ gf_boolean_t reconnect)
{
dict_t *options = NULL;
char sockfile[PATH_MAX] = {0,};
int ret = -1;
- glusterd_defrag_info_t *defrag = NULL;
+ glusterd_defrag_info_t *defrag = volinfo->rebal.defrag;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ struct stat buf = {0,};
- if (!volinfo->rebal.defrag)
- volinfo->rebal.defrag =
- GF_CALLOC (1, sizeof (*volinfo->rebal.defrag),
- gf_gld_mt_defrag_info);
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
- if (!volinfo->rebal.defrag)
+ //rebalance process is not started
+ if (!defrag)
goto out;
- defrag = volinfo->rebal.defrag;
-
- defrag->cmd = cmd;
-
- LOCK_INIT (&defrag->lock);
-
- GLUSTERD_GET_DEFRAG_SOCK_FILE (sockfile, volinfo, priv);
+ //rpc obj for rebalance process already in place.
+ if (defrag->rpc) {
+ ret = 0;
+ goto out;
+ }
+ GLUSTERD_GET_DEFRAG_SOCK_FILE (sockfile, volinfo);
+ /* If reconnecting check if defrag sockfile exists in the new location
+ * in /var/run/ , if it does not try the old location
+ */
+ if (reconnect) {
+ ret = sys_stat (sockfile, &buf);
+ /* TODO: Remove this once we don't need backward compatability
+ * with the older path
+ */
+ if (ret && (errno == ENOENT)) {
+ gf_log (this->name, GF_LOG_WARNING, "Rebalance sockfile "
+ "%s does not exist. Trying old path.",
+ sockfile);
+ GLUSTERD_GET_DEFRAG_SOCK_FILE_OLD (sockfile, volinfo,
+ priv);
+ ret =sys_stat (sockfile, &buf);
+ if (ret && (ENOENT == errno)) {
+ gf_log (this->name, GF_LOG_ERROR, "Rebalance "
+ "sockfile %s does not exist.",
+ sockfile);
+ goto out;
+ }
+ }
+ }
/* Setting frame-timeout to 10mins (600seconds).
* Unix domain sockets ensures that the connection is reliable. The
@@ -352,6 +363,7 @@ glusterd_rebalance_rpc_create (glusterd_volinfo_t *volinfo,
goto out;
}
+ glusterd_volinfo_ref (volinfo);
synclock_unlock (&priv->big_lock);
ret = glusterd_rpc_create (&defrag->rpc, options,
glusterd_defrag_notify, volinfo);
@@ -533,7 +545,7 @@ glusterd_op_stage_rebalance (dict_t *dict, char **op_errstr)
case GF_DEFRAG_CMD_START:
case GF_DEFRAG_CMD_START_LAYOUT_FIX:
case GF_DEFRAG_CMD_START_FORCE:
- if (is_origin_glusterd ()) {
+ if (is_origin_glusterd (dict)) {
op_ctx = glusterd_op_get_ctx ();
if (!op_ctx) {
ret = -1;
@@ -657,6 +669,12 @@ glusterd_op_rebalance (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
case GF_DEFRAG_CMD_START:
case GF_DEFRAG_CMD_START_LAYOUT_FIX:
case GF_DEFRAG_CMD_START_FORCE:
+ /* Reset defrag status to 'NOT STARTED' whenever a
+ * remove-brick/rebalance command is issued to remove
+ * stale information from previous run.
+ */
+ volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
+
ret = dict_get_str (dict, GF_REBALANCE_TID_KEY, &task_id_str);
if (ret) {
gf_log (this->name, GF_LOG_DEBUG, "Missing rebalance "
@@ -666,9 +684,11 @@ glusterd_op_rebalance (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
uuid_parse (task_id_str, volinfo->rebal.rebalance_id) ;
volinfo->rebal.op = GD_OP_REBALANCE;
}
+ if (!gd_should_i_start_rebalance (volinfo))
+ break;
ret = glusterd_handle_defrag_start (volinfo, msg, sizeof (msg),
cmd, NULL, GD_OP_REBALANCE);
- break;
+ break;
case GF_DEFRAG_CMD_STOP:
/* Clear task-id only on explicitly stopping rebalance.
* Also clear the stored operation, so it doesn't cause trouble
diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
index 5c879b64c..ed6d7fd57 100644
--- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
+++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
@@ -31,6 +31,7 @@
DEFAULT_VAR_RUN_DIRECTORY"/%s-"RB_CLIENT_MOUNTPOINT, \
volinfo->volname);
+extern uuid_t global_txn_id;
int
glusterd_get_replace_op_str (gf1_cli_replace_op op, char *op_str)
@@ -268,13 +269,6 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
goto out;
}
- if (volinfo->backend == GD_VOL_BK_BD) {
- snprintf (msg, sizeof (msg), "replace brick not supported "
- "for Block backend volume");
- *op_errstr = gf_strdup (msg);
- goto out;
- }
-
if (GLUSTERD_STATUS_STARTED != volinfo->status) {
ret = -1;
snprintf (msg, sizeof (msg), "volume: %s is not started",
@@ -332,7 +326,7 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
- if (is_origin_glusterd ()) {
+ if (is_origin_glusterd (dict)) {
if (!ctx) {
ret = -1;
gf_log (this->name, GF_LOG_ERROR,
@@ -457,7 +451,7 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo, src_brickinfo,
priv);
if ((replace_op != GF_REPLACE_OP_COMMIT_FORCE) &&
- !glusterd_is_service_running (pidfile, NULL)) {
+ !gf_is_service_running (pidfile, NULL)) {
snprintf(msg, sizeof(msg), "Source brick %s:%s "
"is not online.", src_brickinfo->hostname,
src_brickinfo->path);
@@ -694,7 +688,7 @@ rb_src_brick_restart (glusterd_volinfo_t *volinfo,
sleep (2);
ret = glusterd_volume_start_glusterfs (volinfo, src_brickinfo,
- _gf_false);
+ _gf_false);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Unable to start "
"glusterfs, ret: %d", ret);
@@ -1522,6 +1516,9 @@ glusterd_op_perform_replace_brick (glusterd_volinfo_t *volinfo,
if (ret)
goto out;
+ strncpy (new_brickinfo->brick_id, old_brickinfo->brick_id,
+ sizeof (new_brickinfo->brick_id));
+
list_add_tail (&new_brickinfo->brick_list,
&old_brickinfo->brick_list);
@@ -1638,7 +1635,7 @@ glusterd_op_replace_brick (dict_t *dict, dict_t *rsp_dict)
/* Set task-id, if available, in op_ctx dict for operations
* other than start
*/
- if (is_origin_glusterd ()) {
+ if (is_origin_glusterd (dict)) {
ctx = glusterd_op_get_ctx();
if (!ctx) {
gf_log (this->name, GF_LOG_ERROR, "Failed to "
@@ -1901,14 +1898,17 @@ glusterd_do_replace_brick (void *data)
glusterd_brickinfo_t *src_brickinfo = NULL;
glusterd_brickinfo_t *dst_brickinfo = NULL;
glusterd_conf_t *priv = NULL;
+ uuid_t *txn_id = NULL;
int ret = 0;
dict = data;
GF_ASSERT (THIS);
-
priv = THIS->private;
+ GF_ASSERT (priv);
+
+ txn_id = &priv->global_txn_id;
if (priv->timer) {
gf_timer_call_cancel (THIS->ctx, priv->timer);
@@ -1920,6 +1920,10 @@ glusterd_do_replace_brick (void *data)
gf_log ("", GF_LOG_DEBUG,
"Replace brick operation detected");
+ ret = dict_get_bin (dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
ret = dict_get_int32 (dict, "operation", &op);
if (ret) {
gf_log ("", GF_LOG_DEBUG,
@@ -2015,9 +2019,15 @@ glusterd_do_replace_brick (void *data)
out:
if (ret)
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
+ txn_id, NULL);
else
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_ACC, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_ACC,
+ txn_id, NULL);
- glusterd_op_sm ();
+ synclock_lock (&priv->big_lock);
+ {
+ glusterd_op_sm ();
+ }
+ synclock_unlock (&priv->big_lock);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 2d8d381bf..babd5a3be 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -33,6 +33,7 @@
extern glusterd_op_info_t opinfo;
+extern uuid_t global_txn_id;
int32_t
glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
@@ -96,13 +97,6 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
break;
}
- case GD_OP_QUOTA:
- {
- if (ctx && !op_errstr) {
- ret = dict_get_str (ctx, "errstr", &errstr);
- }
- break;
- }
case GD_OP_PROFILE_VOLUME:
{
if (ctx && dict_get_int32 (ctx, "count", &count)) {
@@ -142,7 +136,8 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
case GD_OP_LIST_VOLUME:
case GD_OP_CLEARLOCKS_VOLUME:
case GD_OP_HEAL_VOLUME:
- case GD_OP_BD_OP:
+ case GD_OP_QUOTA:
+ case GD_OP_SNAP:
{
/*nothing specific to be done*/
break;
@@ -581,11 +576,17 @@ __glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (req);
+ txn_id = &priv->global_txn_id;
+
if (-1 == req->rpc_status) {
rsp.op_ret = -1;
rsp.op_errno = EINVAL;
@@ -625,7 +626,7 @@ out:
event_type = GD_OP_EVENT_RCVD_ACC;
}
- ret = glusterd_op_sm_inject_event (event_type, NULL);
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
if (!ret) {
glusterd_friend_sm ();
@@ -644,6 +645,168 @@ glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
__glusterd_cluster_lock_cbk);
}
+static int32_t
+glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to decode mgmt_v3 lock "
+ "response received from peer");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ op_ret = rsp.op_ret;
+
+ txn_id = &rsp.txn_id;
+
+ gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG,
+ "Received mgmt_v3 lock %s from uuid: %s",
+ (op_ret) ? "RJT" : "ACC", uuid_utoa (rsp.uuid));
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "mgmt_v3 lock response received "
+ "from unknown peer: %s. Ignoring response",
+ uuid_utoa (rsp.uuid));
+ goto out;
+ }
+
+ if (op_ret) {
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ opinfo.op_ret = op_ret;
+ opinfo.op_errstr = gf_strdup ("Another transaction could be in "
+ "progress. Please try again after"
+ " sometime.");
+ } else {
+ event_type = GD_OP_EVENT_RCVD_ACC;
+ }
+
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+out:
+ GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe));
+ return ret;
+}
+
+int32_t
+glusterd_mgmt_v3_lock_peers_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ glusterd_mgmt_v3_lock_peers_cbk_fn);
+}
+
+static int32_t
+glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to decode mgmt_v3 unlock "
+ "response received from peer");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ op_ret = rsp.op_ret;
+
+ txn_id = &rsp.txn_id;
+
+ gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG,
+ "Received mgmt_v3 unlock %s from uuid: %s",
+ (op_ret) ? "RJT" : "ACC",
+ uuid_utoa (rsp.uuid));
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "mgmt_v3 unlock response received "
+ "from unknown peer: %s. Ignoring response",
+ uuid_utoa (rsp.uuid));
+ goto out;
+ }
+
+ if (op_ret) {
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ opinfo.op_ret = op_ret;
+ opinfo.op_errstr = gf_strdup ("Another transaction could be in "
+ "progress. Please try again after"
+ " sometime.");
+ } else {
+ event_type = GD_OP_EVENT_RCVD_ACC;
+ }
+
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+out:
+ GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe));
+ return ret;
+}
+
+int32_t
+glusterd_mgmt_v3_unlock_peers_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ glusterd_mgmt_v3_unlock_peers_cbk_fn);
+}
+
int32_t
__glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
@@ -654,11 +817,17 @@ __glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (req);
+ txn_id = &priv->global_txn_id;
+
if (-1 == req->rpc_status) {
rsp.op_ret = -1;
rsp.op_errno = EINVAL;
@@ -695,7 +864,7 @@ out:
event_type = GD_OP_EVENT_RCVD_ACC;
}
- ret = glusterd_op_sm_inject_event (event_type, NULL);
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
if (!ret) {
glusterd_friend_sm ();
@@ -727,6 +896,7 @@ __glusterd_stage_op_cbk (struct rpc_req *req, struct iovec *iov,
char err_str[2048] = {0};
char *peer_str = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
this = THIS;
GF_ASSERT (this);
@@ -779,11 +949,17 @@ out:
"Received stage %s from uuid: %s",
(op_ret) ? "RJT" : "ACC", uuid_utoa (rsp.uuid));
+ ret = dict_get_bin (dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
if (ret) {
gf_log (this->name, GF_LOG_CRITICAL, "Stage response received "
- "from unknown peer: %s", uuid_utoa (rsp.uuid));
+ "from unknown peer: %s. Ignoring response.",
+ uuid_utoa (rsp.uuid));
+ goto out;
}
if (op_ret) {
@@ -814,7 +990,7 @@ out:
break;
}
- ret = glusterd_op_sm_inject_event (event_type, NULL);
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
if (!ret) {
glusterd_friend_sm ();
@@ -854,6 +1030,8 @@ __glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
char err_str[2048] = {0};
char *peer_str = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+
this = THIS;
GF_ASSERT (this);
@@ -907,6 +1085,10 @@ __glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
"Received commit %s from uuid: %s",
(op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid));
+ ret = dict_get_bin (dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
if (ret) {
@@ -986,7 +1168,7 @@ __glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
}
out:
- ret = glusterd_op_sm_inject_event (event_type, NULL);
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
if (!ret) {
glusterd_friend_sm ();
@@ -1060,12 +1242,12 @@ int32_t
glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this,
void *data)
{
- gd1_mgmt_friend_req req = {{0},};
- int ret = 0;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_friend_sm_event_t *event = NULL;
- dict_t *vols = NULL;
+ gd1_mgmt_friend_req req = {{0},};
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ dict_t *peer_data = NULL;
if (!frame || !this || !data) {
@@ -1080,15 +1262,37 @@ glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this,
peerinfo = event->peerinfo;
- ret = glusterd_build_volume_dict (&vols);
- if (ret)
+ ret = glusterd_add_volumes_to_export_dict (&peer_data);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to add list of volumes "
+ "in the peer_data dict for handshake");
goto out;
+ }
+
+ if (priv->op_version >= GD_OP_VERSION_4) {
+ ret = glusterd_add_missed_snaps_to_export_dict (peer_data);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to add list of missed snapshots "
+ "in the peer_data dict for handshake");
+ goto out;
+ }
+
+ ret = glusterd_add_snapshots_to_export_dict (peer_data);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to add list of snapshots "
+ "in the peer_data dict for handshake");
+ goto out;
+ }
+ }
uuid_copy (req.uuid, MY_UUID);
req.hostname = peerinfo->hostname;
req.port = peerinfo->port;
- ret = dict_allocate_and_serialize (vols, &req.vols.vols_val,
+ ret = dict_allocate_and_serialize (peer_data, &req.vols.vols_val,
&req.vols.vols_len);
if (ret)
goto out;
@@ -1102,8 +1306,8 @@ glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this,
out:
GF_FREE (req.vols.vols_val);
- if (vols)
- dict_unref (vols);
+ if (peer_data)
+ dict_unref (peer_data);
gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
@@ -1223,6 +1427,139 @@ out:
}
int32_t
+glusterd_mgmt_v3_lock_peers (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_v3_lock_req req = {{0},};
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ dict_t *dict = NULL;
+ uuid_t *txn_id = NULL;
+
+ if (!this)
+ goto out;
+
+ dict = data;
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_get_ptr (dict, "peerinfo", VOID (&peerinfo));
+ if (ret)
+ goto out;
+
+ //peerinfo should not be in payload
+ dict_del (dict, "peerinfo");
+
+ glusterd_get_uuid (&req.uuid);
+
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to serialize dict "
+ "to request buffer");
+ goto out;
+ }
+
+ /* Sending valid transaction ID to peers */
+ ret = dict_get_bin (dict, "transaction_id",
+ (void **)&txn_id);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get transaction id.");
+ goto out;
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Transaction_id = %s", uuid_utoa (*txn_id));
+ uuid_copy (req.txn_id, *txn_id);
+ }
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+ if (!dummy_frame) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame,
+ peerinfo->mgmt_v3,
+ GLUSTERD_MGMT_V3_LOCK, NULL,
+ this, glusterd_mgmt_v3_lock_peers_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_mgmt_v3_unlock_peers (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_v3_unlock_req req = {{0},};
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ dict_t *dict = NULL;
+ uuid_t *txn_id = NULL;
+
+ if (!this)
+ goto out;
+
+ dict = data;
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_get_ptr (dict, "peerinfo", VOID (&peerinfo));
+ if (ret)
+ goto out;
+
+ //peerinfo should not be in payload
+ dict_del (dict, "peerinfo");
+
+ glusterd_get_uuid (&req.uuid);
+
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to serialize dict "
+ "to request buffer");
+ goto out;
+ }
+
+ /* Sending valid transaction ID to peers */
+ ret = dict_get_bin (dict, "transaction_id",
+ (void **)&txn_id);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get transaction id.");
+ goto out;
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Transaction_id = %s", uuid_utoa (*txn_id));
+ uuid_copy (req.txn_id, *txn_id);
+ }
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+ if (!dummy_frame) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame,
+ peerinfo->mgmt_v3,
+ GLUSTERD_MGMT_V3_UNLOCK, NULL,
+ this, glusterd_mgmt_v3_unlock_peers_cbk,
+ (xdrproc_t)
+ xdr_gd1_mgmt_v3_unlock_req);
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
glusterd_cluster_unlock (call_frame_t *frame, xlator_t *this,
void *data)
{
@@ -1385,11 +1722,16 @@ __glusterd_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
glusterd_req_ctx_t *req_ctx = NULL;
glusterd_pending_node_t *node = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
-
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (req);
+
+ txn_id = &priv->global_txn_id;
frame = myframe;
req_ctx = frame->local;
@@ -1447,6 +1789,11 @@ __glusterd_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
}
}
out:
+
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
ev_ctx = GF_CALLOC (1, sizeof (*ev_ctx), gf_gld_mt_brick_rsp_ctx_t);
GF_ASSERT (ev_ctx);
if (op_ret) {
@@ -1459,7 +1806,7 @@ out:
ev_ctx->pending_node = frame->cookie;
ev_ctx->rsp_dict = dict;
ev_ctx->commit_ctx = frame->local;
- ret = glusterd_op_sm_inject_event (event_type, ev_ctx);
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, ev_ctx);
if (!ret) {
glusterd_friend_sm ();
glusterd_op_sm ();
@@ -1482,8 +1829,9 @@ glusterd_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
int32_t
glusterd_brick_op (call_frame_t *frame, xlator_t *this,
- void *data)
+ void *data)
{
+
gd1_mgmt_brick_op_req *req = NULL;
int ret = 0;
glusterd_conf_t *priv = NULL;
@@ -1494,6 +1842,7 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,
glusterd_req_ctx_t *req_ctx = NULL;
struct rpc_clnt *rpc = NULL;
dict_t *op_ctx = NULL;
+ uuid_t *txn_id = NULL;
if (!this) {
ret = -1;
@@ -1502,6 +1851,8 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,
priv = this->private;
GF_ASSERT (priv);
+ txn_id = &priv->global_txn_id;
+
req_ctx = data;
GF_ASSERT (req_ctx);
INIT_LIST_HEAD (&opinfo.pending_bricks);
@@ -1516,14 +1867,20 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,
goto out;
}
+
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
list_for_each_entry (pending_node, &opinfo.pending_bricks, list) {
dummy_frame = create_frame (this, this->ctx->pool);
if (!dummy_frame)
continue;
if ((pending_node->type == GD_NODE_NFS) ||
+ (pending_node->type == GD_NODE_QUOTAD) ||
((pending_node->type == GD_NODE_SHD) &&
- (req_ctx->op == GD_OP_STATUS_VOLUME)))
+ (req_ctx->op == GD_OP_STATUS_VOLUME)))
ret = glusterd_node_op_build_payload
(req_ctx->op,
(gd1_mgmt_brick_op_req **)&req,
@@ -1593,7 +1950,8 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,
out:
if (ret) {
- glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, data);
+ glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
+ txn_id, data);
opinfo.op_ret = ret;
}
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -1621,6 +1979,12 @@ struct rpc_clnt_procedure gd_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
[GLUSTERD_MGMT_COMMIT_OP] = {"COMMIT_OP", glusterd_commit_op},
};
+struct rpc_clnt_procedure gd_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = {
+ [GLUSTERD_MGMT_V3_NULL] = {"NULL", NULL },
+ [GLUSTERD_MGMT_V3_LOCK] = {"MGMT_V3_LOCK", glusterd_mgmt_v3_lock_peers},
+ [GLUSTERD_MGMT_V3_UNLOCK] = {"MGMT_V3_UNLOCK", glusterd_mgmt_v3_unlock_peers},
+};
+
struct rpc_clnt_program gd_mgmt_prog = {
.progname = "glusterd mgmt",
.prognum = GD_MGMT_PROGRAM,
@@ -1645,4 +2009,10 @@ struct rpc_clnt_program gd_peer_prog = {
.numproc = GLUSTERD_FRIEND_MAXVALUE,
};
-
+struct rpc_clnt_program gd_mgmt_v3_prog = {
+ .progname = "glusterd mgmt v3",
+ .prognum = GD_MGMT_PROGRAM,
+ .progver = GD_MGMT_V3_VERSION,
+ .proctable = gd_mgmt_v3_actors,
+ .numproc = GLUSTERD_MGMT_V3_MAXVALUE,
+};
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
index c671edf68..7a8b2c94f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -34,6 +34,7 @@
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
#include "glusterd-store.h"
+#include "glusterd-etcd.h"
static struct list_head gd_friend_sm_queue;
@@ -527,6 +528,9 @@ out:
return ret;
}
+/* Clean up stale volumes on the peer being detached. The volumes which have
+ * bricks on other peers are stale with respect to the detached peer.
+ */
static int
glusterd_peer_detach_cleanup (glusterd_conf_t *priv)
{
@@ -538,6 +542,12 @@ glusterd_peer_detach_cleanup (glusterd_conf_t *priv)
list_for_each_entry_safe (volinfo,tmp_volinfo,
&priv->volumes, vol_list) {
+ /* The peer detach checks make sure that, at this point in the
+ * detach process, there are only volumes contained completely
+ * within or completely outside the detached peer.
+ * The only stale volumes at this point are the ones
+ * completely outside the peer and can be safely deleted.
+ */
if (!glusterd_friend_contains_vol_bricks (volinfo,
MY_UUID)) {
gf_log (THIS->name, GF_LOG_INFO,
@@ -596,6 +606,9 @@ glusterd_ac_handle_friend_remove_req (glusterd_friend_sm_event_t *event,
"Peer detach cleanup was not successful");
ret = 0;
}
+ gf_log (THIS->name, GF_LOG_INFO, "detached, stopping etcd");
+ stop_etcd(priv->etcd_pid);
+ nuke_etcd_dir();
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning with %d", ret);
@@ -639,10 +652,14 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
glusterd_friend_update_ctx_t *new_ev_ctx = NULL;
glusterd_friend_sm_event_t *new_event = NULL;
glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
+ glusterd_conf_t *conf = NULL;
int status = 0;
int32_t op_ret = -1;
int32_t op_errno = 0;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (ctx);
ev_ctx = ctx;
uuid_copy (uuid, ev_ctx->uuid);
@@ -650,6 +667,9 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT (peerinfo);
uuid_copy (peerinfo->uuid, ev_ctx->uuid);
+ conf = this->private;
+ GF_ASSERT (conf);
+
//Build comparison logic here.
ret = glusterd_compare_friend_data (ev_ctx->vols, &status,
peerinfo->hostname);
@@ -665,6 +685,31 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
op_ret = -1;
}
+ /* Compare missed_snapshot list with the peer *
+ * if volume comparison is successful */
+ if ((op_ret == 0) &&
+ (conf->op_version >= GD_OP_VERSION_4)) {
+ ret = glusterd_import_friend_missed_snap_list (ev_ctx->vols);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to import peer's "
+ "missed_snaps_list.");
+ event_type = GD_FRIEND_EVENT_LOCAL_RJT;
+ op_errno = GF_PROBE_MISSED_SNAP_CONFLICT;
+ op_ret = -1;
+ }
+
+ ret = glusterd_compare_friend_snapshots (ev_ctx->vols,
+ peerinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Conflict in comparing peer's snapshots");
+ event_type = GD_FRIEND_EVENT_LOCAL_RJT;
+ op_errno = GF_PROBE_SNAP_CONFLICT;
+ op_ret = -1;
+ }
+ }
+
ret = glusterd_friend_sm_new_event (event_type, &new_event);
if (ret) {
@@ -692,6 +737,13 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
peerinfo->hostname, ev_ctx->port,
op_ret, op_errno);
+ // apply a deterministic function to decide via whom we should join the cluster
+ if (strcmp(peerinfo->hostname, ev_ctx->hostname) > 0) {
+ stop_etcd(conf->etcd_pid);
+ nuke_etcd_dir();
+ conf->etcd_pid = start_etcd (uuid_utoa(MY_UUID), peerinfo->hostname);
+ }
+
out:
gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.h b/xlators/mgmt/glusterd/src/glusterd-sm.h
index e29bb7277..b9bedbe69 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.h
@@ -97,6 +97,7 @@ struct glusterd_peerinfo_ {
struct rpc_clnt *rpc;
rpc_clnt_prog_t *mgmt;
rpc_clnt_prog_t *peer;
+ rpc_clnt_prog_t *mgmt_v3;
int connected;
gf_store_handle_t *shandle;
glusterd_sm_tr_log_t sm_log;
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
new file mode 100644
index 000000000..0e824a022
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -0,0 +1,5787 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <inttypes.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/resource.h>
+#include <sys/statvfs.h>
+#include <sys/mount.h>
+#include <signal.h>
+
+
+#if !defined(__NetBSD__) && !defined(GF_DARWIN_HOST_OS)
+#include <mntent.h>
+#else
+#include "mntent_compat.h"
+#endif
+
+#include "globals.h"
+#include "compat.h"
+#include "protocol-common.h"
+#include "xlator.h"
+#include "logging.h"
+#include "timer.h"
+#include "glusterd-mem-types.h"
+#include "glusterd.h"
+#include "glusterd-sm.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-utils.h"
+#include "glusterd-store.h"
+#include "run.h"
+#include "glusterd-volgen.h"
+#include "glusterd-mgmt.h"
+#include "glusterd-syncop.h"
+
+#include "syscall.h"
+#include "cli1-xdr.h"
+#include "xdr-generic.h"
+
+#include "lvm-defaults.h"
+
+char snap_mount_folder[PATH_MAX];
+
+/* Look for disconnected peers, for missed snap creates or deletes */
+static int32_t
+glusterd_find_missed_snap (dict_t *rsp_dict, glusterd_volinfo_t *vol,
+ struct list_head *peers, int32_t op)
+{
+ int32_t brick_count = -1;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (peers);
+ GF_ASSERT (vol);
+
+ brick_count = 0;
+ list_for_each_entry (brickinfo, &vol->bricks, brick_list) {
+ if (!uuid_compare (brickinfo->uuid, MY_UUID)) {
+ /* If the brick belongs to the same node */
+ brick_count++;
+ continue;
+ }
+
+ list_for_each_entry (peerinfo, peers, uuid_list) {
+ if (uuid_compare (peerinfo->uuid, brickinfo->uuid)) {
+ /* If the brick doesnt belong to this peer */
+ continue;
+ }
+
+ /* Found peer who owns the brick, *
+ * if peer is not connected or not *
+ * friend add it to missed snap list */
+ if (!(peerinfo->connected) ||
+ (peerinfo->state.state !=
+ GD_FRIEND_STATE_BEFRIENDED)) {
+ ret = glusterd_add_missed_snaps_to_dict
+ (rsp_dict,
+ vol, brickinfo,
+ brick_count + 1,
+ op);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add missed snapshot "
+ "info for %s:%s in the "
+ "rsp_dict", brickinfo->hostname,
+ brickinfo->path);
+ goto out;
+ }
+ }
+ }
+ brick_count++;
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+/* This function will restore a snapshot volumes
+ *
+ * @param dict dictionary containing snapshot restore request
+ * @param op_errstr In case of any failure error message will be returned
+ * in this variable
+ * @return Negative value on Failure and 0 in success
+ */
+int
+glusterd_snapshot_restore (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+{
+ int ret = -1;
+ char *snapname = NULL;
+ xlator_t *this = NULL;
+ glusterd_volinfo_t *snap_volinfo = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "snap name");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (NULL == snap) {
+ ret = gf_asprintf (op_errstr, "Snap (%s) not found",
+ snapname);
+ if (ret < 0) {
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ ret = -1;
+ goto out;
+ }
+
+
+ /* TODO : As of now there is only volume in snapshot.
+ * Change this when multiple volume snapshot is introduced
+ */
+ snap_volinfo = list_entry (snap->volumes.next, glusterd_volinfo_t,
+ vol_list);
+
+ ret = glusterd_volinfo_find (snap_volinfo->parent_volname, &volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not get volinfo of "
+ "%s", snap_volinfo->parent_volname);
+ goto out;
+ }
+
+ if (is_origin_glusterd (dict) == _gf_true) {
+ /* From origin glusterd check if *
+ * any peers with snap bricks is down */
+ ret = glusterd_find_missed_snap (rsp_dict, snap_volinfo,
+ &priv->peers,
+ GF_SNAP_OPTION_TYPE_RESTORE);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to find missed snap restores");
+ goto out;
+ }
+ }
+
+ ret = gd_restore_snap_volume (rsp_dict, volinfo, snap_volinfo);
+ if (ret) {
+ /* No need to update op_errstr because it is assumed
+ * that the called function will do that in case of
+ * failure.
+ */
+ gf_log (this->name, GF_LOG_ERROR, "Failed to restore "
+ "snap for %s", snapname);
+ goto out;
+ }
+
+ ret = 0;
+
+ /* TODO: Need to check if we need to delete the snap after the
+ * operation is successful or not. Also need to persist the state
+ * of restore operation in the store.
+ */
+out:
+ return ret;
+}
+
+/* This function is called before actual restore is taken place. This function
+ * will validate whether the snapshot volumes are ready to be restored or not.
+ *
+ * @param dict dictionary containing snapshot restore request
+ * @param op_errstr In case of any failure error message will be returned
+ * in this variable
+ * @param rsp_dict response dictionary
+ * @return Negative value on Failure and 0 in success
+ */
+int32_t
+glusterd_snapshot_restore_prevalidate (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int ret = -1;
+ int32_t i = 0;
+ int32_t volcount = 0;
+ gf_boolean_t snap_restored = _gf_false;
+ char key[PATH_MAX] = {0, };
+ char *volname = NULL;
+ char *snapname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_snap_t *snap = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "snap name");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (NULL == snap) {
+ ret = gf_asprintf (op_errstr, "Snap (%s) not found",
+ snapname);
+ if (ret < 0) {
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ ret = -1;
+ goto out;
+ }
+
+ snap_restored = snap->snap_restored;
+
+ if (snap_restored) {
+ ret = gf_asprintf (op_errstr, "Snap (%s) is already "
+ "restored", snapname);
+ if (ret < 0) {
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_str (rsp_dict, "snapname", snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "snap name(%s)", snapname);
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "volcount", &volcount);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get volume count");
+ goto out;
+ }
+
+ /* Snapshot restore will only work if all the volumes,
+ that are part of the snapshot, are stopped. */
+ for (i = 1; i <= volcount; ++i) {
+ snprintf (key, sizeof (key), "volname%d", i);
+ ret = dict_get_str (dict, key, &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ ret = gf_asprintf (op_errstr, "Volume (%s) not found",
+ volname);
+ if (ret < 0) {
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ ret = -1;
+ goto out;
+ }
+
+ if (glusterd_is_volume_started (volinfo)) {
+ ret = gf_asprintf (op_errstr, "Volume (%s) has been "
+ "started. Volume needs to be stopped before restoring "
+ "a snapshot.", volname);
+ if (ret < 0) {
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+snap_max_hard_limits_validate (dict_t *dict, char *volname,
+ uint64_t value, char **op_errstr)
+{
+ char err_str[PATH_MAX] = "";
+ glusterd_conf_t *conf = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = -1;
+ uint64_t max_limit = GLUSTERD_SNAPS_MAX_HARD_LIMIT;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+
+ conf = this->private;
+
+ GF_ASSERT (conf);
+
+ if (volname) {
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (!ret) {
+ if (volinfo->is_snap_volume) {
+ ret = -1;
+ snprintf (err_str, PATH_MAX,
+ "%s is a snap volume. Configuring "
+ "snap-max-hard-limit for a snap "
+ "volume is prohibited.", volname);
+ goto out;
+ }
+ }
+ }
+
+ if (value) {
+ /* Max limit for the system is GLUSTERD_SNAPS_MAX_HARD_LIMIT
+ * but max limit for a volume is conf->snap_max_hard_limit.
+ */
+ if (volname) {
+ max_limit = conf->snap_max_hard_limit;
+ } else {
+ max_limit = GLUSTERD_SNAPS_MAX_HARD_LIMIT;
+ }
+ }
+
+ if ((value < 0) || (value > max_limit)) {
+ ret = -1;
+ snprintf (err_str, PATH_MAX, "Invalid snap-max-hard-limit"
+ "%"PRIu64 ". Expected range 0 - %"PRIu64,
+ value, max_limit);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ *op_errstr = gf_strdup (err_str);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ }
+ return ret;
+}
+
+int
+glusterd_snapshot_config_prevalidate (dict_t *dict, char **op_errstr)
+{
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ int ret = -1;
+ int config_command = 0;
+ char err_str[PATH_MAX] = {0,};
+ glusterd_conf_t *conf = NULL;
+ uint64_t value = 0;
+ uint64_t hard_limit = 0;
+ uint64_t soft_limit = 0;
+ gf_loglevel_t loglevel = GF_LOG_ERROR;
+ uint64_t max_limit = GLUSTERD_SNAPS_MAX_HARD_LIMIT;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+
+ conf = this->private;
+
+ GF_ASSERT (conf);
+
+ ret = dict_get_int32 (dict, "config-command", &config_command);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str),
+ "failed to get config-command type");
+ goto out;
+ }
+
+ ret = dict_get_uint64 (dict, "snap-max-hard-limit", &hard_limit);
+
+ ret = dict_get_uint64 (dict, "snap-max-soft-limit", &soft_limit);
+
+ ret = dict_get_str (dict, "volname", &volname);
+
+ if (volname) {
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str),
+ "Volume %s does not exist.", volname);
+ goto out;
+ }
+ }
+
+ switch (config_command) {
+ case GF_SNAP_CONFIG_TYPE_SET:
+ if (hard_limit) {
+ /* Validations for snap-max-hard-limits */
+ ret = snap_max_hard_limits_validate (dict, volname,
+ hard_limit, op_errstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "snap-max-hard-limit validation "
+ "failed.");
+ goto out;
+ }
+ }
+
+ if (soft_limit) {
+ max_limit = GLUSTERD_SNAPS_MAX_SOFT_LIMIT_PERCENT;
+ if ((soft_limit < 0) || (soft_limit > max_limit)) {
+ ret = -1;
+ snprintf (err_str, PATH_MAX, "Invalid "
+ "snap-max-soft-limit ""%"
+ PRIu64 ". Expected range 0 - %"PRIu64,
+ value, max_limit);
+ goto out;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ ret = 0;
+out:
+
+ if (ret && err_str[0] != '\0') {
+ gf_log (this->name, loglevel, "%s", err_str);
+ *op_errstr = gf_strdup (err_str);
+ }
+
+ return ret;
+}
+
+int
+glusterd_snap_create_pre_val_use_rsp_dict (dict_t *dst, dict_t *src)
+{
+ char *snap_brick_dir = NULL;
+ char *snap_device = NULL;
+ char *tmpstr = NULL;
+ char key[PATH_MAX] = "";
+ char snapbrckcnt[PATH_MAX] = "";
+ char snapbrckord[PATH_MAX] = "";
+ int ret = -1;
+ int64_t i = -1;
+ int64_t j = -1;
+ int64_t volume_count = 0;
+ int64_t brick_count = 0;
+ int64_t brick_order = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dst);
+ GF_ASSERT (src);
+
+ ret = dict_get_int64 (src, "volcount", &volume_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "get the volume count");
+ goto out;
+ }
+
+ for (i = 0; i < volume_count; i++) {
+ memset (snapbrckcnt, '\0', sizeof(snapbrckcnt));
+ ret = snprintf (snapbrckcnt, sizeof(snapbrckcnt) - 1,
+ "vol%"PRId64"_brickcount", i+1);
+ ret = dict_get_int64 (src, snapbrckcnt, &brick_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "No bricks for this volume in this dict");
+ continue;
+ }
+
+ for (j = 0; j < brick_count; j++) {
+ /* Fetching data from source dict */
+ snprintf (key, sizeof(key) - 1,
+ "vol%"PRId64".brickdir%"PRId64, i+1, j);
+
+ ret = dict_get_ptr (src, key,
+ (void **)&snap_brick_dir);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Unable to fetch %s", key);
+ continue;
+ }
+
+ snprintf (key, sizeof(key) - 1,
+ "vol%"PRId64".brick_snapdevice%"PRId64, i+1, j);
+
+ ret = dict_get_ptr (src, key,
+ (void **)&snap_device);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch snap_device");
+ goto out;
+ }
+
+ snprintf (snapbrckord, sizeof(snapbrckord) - 1,
+ "vol%"PRId64".brick%"PRId64".order", i+1, j);
+
+ ret = dict_get_int64 (src, snapbrckord, &brick_order);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get brick order");
+ goto out;
+ }
+
+ /* Adding the data in the dst dict */
+ snprintf (key, sizeof(key) - 1,
+ "vol%"PRId64".brickdir%"PRId64, i+1, brick_order);
+
+ tmpstr = gf_strdup (snap_brick_dir);
+ if (!tmpstr) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Out Of Memory");
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr (dst, key, tmpstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set %s", key);
+ GF_FREE (tmpstr);
+ goto out;
+ }
+
+ snprintf (key, sizeof(key) - 1,
+ "vol%"PRId64".brick_snapdevice%"PRId64,
+ i+1, brick_order);
+
+ tmpstr = gf_strdup (snap_device);
+ if (!tmpstr) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr (dst, key, tmpstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set %s", key);
+ GF_FREE (tmpstr);
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+out:
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_snap_pre_validate_use_rsp_dict (dict_t *dst, dict_t *src)
+{
+ int ret = -1;
+ int32_t snap_command = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ if (!dst || !src) {
+ gf_log (this->name, GF_LOG_ERROR, "Source or Destination "
+ "dict is empty.");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dst, "type", &snap_command);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "unable to get the type of "
+ "the snapshot command");
+ goto out;
+ }
+
+ switch (snap_command) {
+ case GF_SNAP_OPTION_TYPE_CREATE:
+ ret = glusterd_snap_create_pre_val_use_rsp_dict (dst, src);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to use "
+ "rsp dict");
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_snapshot_create_prevalidate (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ char *volname = NULL;
+ char *snapname = NULL;
+ char *device = NULL;
+ char *tmpstr = NULL;
+ char *brick_dir = NULL;
+ char snap_brick_dir[PATH_MAX] = "";
+ char *mnt_pt = NULL;
+ char key[PATH_MAX] = "";
+ char snap_mount[PATH_MAX] = "";
+ char snap_volname[64] = "";
+ char err_str[PATH_MAX] = "";
+ int ret = -1;
+ int64_t i = 0;
+ int64_t volcount = 0;
+ int64_t brick_count = 0;
+ int64_t brick_order = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *snap_volid = NULL;
+ gf_loglevel_t loglevel = GF_LOG_ERROR;
+ glusterd_conf_t *conf = NULL;
+ int64_t effective_max_limit = 0;
+
+ this = THIS;
+ GF_ASSERT (op_errstr);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ ret = dict_get_int64 (dict, "volcount", &volcount);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Failed to "
+ "get the volume count");
+ goto out;
+ }
+ if (volcount <= 0) {
+ snprintf (err_str, sizeof (err_str), "Invalid volume count %"PRId64
+ " supplied", volcount);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Failed to get snapname");
+ goto out;
+ }
+
+ if (glusterd_find_snap_by_name (snapname)) {
+ ret = -1;
+ snprintf (err_str, sizeof (err_str), "Snap %s already exists",
+ snapname);
+ goto out;
+ }
+
+ for (i = 1; i <= volcount; i++) {
+ snprintf (key, sizeof (key), "volname%"PRId64, i);
+ ret = dict_get_str (dict, key, &volname);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str),
+ "failed to get volume name");
+ goto out;
+ }
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str),
+ "Volume (%s) does not exist ", volname);
+ goto out;
+ }
+
+ ret = -1;
+ if (!glusterd_is_volume_started (volinfo)) {
+ snprintf (err_str, sizeof (err_str), "volume %s is "
+ "not started", volinfo->volname);
+ loglevel = GF_LOG_WARNING;
+ goto out;
+ }
+ if (glusterd_is_defrag_on (volinfo)) {
+ snprintf (err_str, sizeof (err_str),
+ "rebalance process is running for the "
+ "volume %s", volname);
+ loglevel = GF_LOG_WARNING;
+ goto out;
+ }
+ /* TODO: Also check whether geo replication is running */
+
+ if (volinfo->is_snap_volume == _gf_true) {
+ snprintf (err_str, sizeof (err_str),
+ "Volume %s is a snap volume", volname);
+ loglevel = GF_LOG_WARNING;
+ goto out;
+ }
+
+ if (volinfo->snap_max_hard_limit < conf->snap_max_hard_limit)
+ effective_max_limit = volinfo->snap_max_hard_limit;
+ else
+ effective_max_limit = conf->snap_max_hard_limit;
+
+ if (volinfo->snap_count >= effective_max_limit) {
+ snprintf (err_str, sizeof (err_str),
+ "The number of existing snaps has reached "
+ "the effective maximum limit of %"PRIu64" ,"
+ "for the volume %s", effective_max_limit,
+ volname);
+ loglevel = GF_LOG_WARNING;
+ goto out;
+ }
+
+ snprintf (key, sizeof(key) - 1, "vol%"PRId64"_volid", i);
+ ret = dict_get_bin (dict, key, (void **)&snap_volid);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch snap_volid");
+ goto out;
+ }
+
+ /* snap volume uuid is used as lvm snapshot name.
+ This will avoid restrictions on snapshot names
+ provided by user */
+ GLUSTERD_GET_UUID_NOHYPHEN (snap_volname, *snap_volid);
+
+ brick_count = 0;
+ brick_order = 0;
+ /* Adding snap bricks mount paths to the dict */
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ if (uuid_compare (brickinfo->uuid, MY_UUID)) {
+ brick_order++;
+ continue;
+ }
+
+ if (!glusterd_is_brick_started (brickinfo)) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "brick %s:%s is not started",
+ brickinfo->hostname,
+ brickinfo->path);
+ brick_order++;
+ brick_count++;
+ continue;
+ }
+
+ device = glusterd_get_brick_mount_details (brickinfo);
+ if (!device) {
+ snprintf (err_str, sizeof (err_str),
+ "getting device name for the brick "
+ "%s:%s failed", brickinfo->hostname,
+ brickinfo->path);
+ ret = -1;
+ goto out;
+ }
+
+ device = glusterd_build_snap_device_path (device,
+ snap_volname);
+ if (!device) {
+ snprintf (err_str, sizeof (err_str),
+ "cannot copy the snapshot device "
+ "name (volname: %s, snapname: %s)",
+ volinfo->volname, snapname);
+ loglevel = GF_LOG_WARNING;
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof(key),
+ "vol%"PRId64".brick_snapdevice%"PRId64, i,
+ brick_count);
+ ret = dict_set_dynstr (rsp_dict, key, device);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set %s", key);
+ GF_FREE (device);
+ goto out;
+ }
+
+ ret = glusterd_get_brick_root (brickinfo->path,
+ &mnt_pt);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str),
+ "could not get the root of the brick path %s",
+ brickinfo->path);
+ loglevel = GF_LOG_WARNING;
+ goto out;
+ }
+ if (strncmp (brickinfo->path, mnt_pt, strlen(mnt_pt))) {
+ snprintf (err_str, sizeof (err_str),
+ "brick: %s brick mount: %s",
+ brickinfo->path, mnt_pt);
+ loglevel = GF_LOG_WARNING;
+ goto out;
+ }
+
+ brick_dir = &brickinfo->path[strlen (mnt_pt)];
+ brick_dir++;
+
+ snprintf (snap_brick_dir, sizeof (snap_brick_dir),
+ "/%s", brick_dir);
+
+ tmpstr = gf_strdup (snap_brick_dir);
+ if (!tmpstr) {
+ ret = -1;
+ goto out;
+ }
+ snprintf (key, sizeof(key), "vol%"PRId64".brickdir%"PRId64, i,
+ brick_count);
+ ret = dict_set_dynstr (rsp_dict, key, tmpstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set %s", snap_mount);
+ goto out;
+ }
+ tmpstr = NULL;
+
+ snprintf (key, sizeof(key) - 1, "vol%"PRId64".brick%"PRId64".order",
+ i, brick_count);
+ ret = dict_set_int64 (rsp_dict, key, brick_order);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set %s", key);
+ goto out;
+ }
+
+ brick_count++;
+ brick_order++;
+ }
+ snprintf (key, sizeof(key) - 1, "vol%"PRId64"_brickcount", i);
+ ret = dict_set_int64 (rsp_dict, key, brick_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set %s",
+ key);
+ goto out;
+ }
+ }
+
+ ret = dict_set_int64 (rsp_dict, "volcount", volcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set volcount");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret)
+ GF_FREE (tmpstr);
+
+ if (ret && err_str[0] != '\0') {
+ gf_log (this->name, loglevel, "%s", err_str);
+ *op_errstr = gf_strdup (err_str);
+ }
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+glusterd_snap_t*
+glusterd_new_snap_object()
+{
+ glusterd_snap_t *snap = NULL;
+
+ snap = GF_CALLOC (1, sizeof (*snap), gf_gld_mt_snap_t);
+
+ if (snap) {
+ if (LOCK_INIT (&snap->lock)) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed initiating"
+ " snap lock");
+ GF_FREE (snap);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD (&snap->snap_list);
+ INIT_LIST_HEAD (&snap->volumes);
+ snap->snapname[0] = 0;
+ snap->snap_status = GD_SNAP_STATUS_INIT;
+ }
+
+ return snap;
+
+};
+
+/* Function glusterd_list_add_snapvol adds the volinfo object (snapshot volume)
+ to the snapshot object list and to the parent volume list */
+int32_t
+glusterd_list_add_snapvol (glusterd_volinfo_t *origin_vol,
+ glusterd_volinfo_t *snap_vol)
+{
+ int ret = -1;
+ glusterd_snap_t *snap = NULL;
+
+ GF_VALIDATE_OR_GOTO ("glusterd", origin_vol, out);
+ GF_VALIDATE_OR_GOTO ("glusterd", snap_vol, out);
+
+ snap = snap_vol->snapshot;
+ GF_ASSERT (snap);
+
+ list_add_tail (&snap_vol->vol_list, &snap->volumes);
+ LOCK (&origin_vol->lock);
+ {
+ list_add_order (&snap_vol->snapvol_list,
+ &origin_vol->snap_volumes,
+ glusterd_compare_snap_vol_time);
+ origin_vol->snap_count++;
+ }
+ UNLOCK (&origin_vol->lock);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Snap %s added to the list",
+ snap->snapname);
+ ret = 0;
+ out:
+ return ret;
+}
+
+glusterd_snap_t*
+glusterd_find_snap_by_name (char *snapname)
+{
+ glusterd_snap_t *snap = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ priv = THIS->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (snapname);
+
+ list_for_each_entry (snap, &priv->snapshots, snap_list) {
+ if (!strcmp (snap->snapname, snapname)) {
+ gf_log (THIS->name, GF_LOG_DEBUG, "Found "
+ "snap %s (%s)", snap->snapname,
+ uuid_utoa (snap->snap_id));
+ goto out;
+ }
+ }
+ snap = NULL;
+out:
+ return snap;
+}
+
+glusterd_snap_t*
+glusterd_find_snap_by_id (uuid_t snap_id)
+{
+ glusterd_snap_t *snap = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ priv = THIS->private;
+ GF_ASSERT (priv);
+
+ if (uuid_is_null(snap_id))
+ goto out;
+
+ list_for_each_entry (snap, &priv->snapshots, snap_list) {
+ if (!uuid_compare (snap->snap_id, snap_id)) {
+ gf_log (THIS->name, GF_LOG_DEBUG, "Found "
+ "snap %s (%s)", snap->snapname,
+ uuid_utoa (snap->snap_id));
+ goto out;
+ }
+ }
+ snap = NULL;
+out:
+ return snap;
+}
+
+int
+glusterd_do_lvm_snapshot_remove (glusterd_volinfo_t *snap_vol,
+ glusterd_brickinfo_t *brickinfo,
+ const char *mount_pt, const char *snap_device)
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ runner_t runner = {0,};
+ char msg[1024] = {0, };
+ char pidfile[PATH_MAX] = {0, };
+ pid_t pid = -1;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (!brickinfo) {
+ gf_log (this->name, GF_LOG_ERROR, "brickinfo NULL");
+ goto out;
+ }
+ GF_ASSERT (snap_vol);
+ GF_ASSERT (mount_pt);
+ GF_ASSERT (snap_device);
+
+ GLUSTERD_GET_BRICK_PIDFILE (pidfile, snap_vol, brickinfo, priv);
+ if (gf_is_service_running (pidfile, &pid)) {
+ ret = kill (pid, SIGKILL);
+ if (ret && errno != ESRCH) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to kill pid "
+ "%d reason : %s", pid, strerror(errno));
+ goto out;
+ }
+ }
+
+ runinit (&runner);
+ snprintf (msg, sizeof (msg), "umount the snapshot mounted path %s",
+ mount_pt);
+ runner_add_args (&runner, "umount", mount_pt, NULL);
+ runner_log (&runner, "", GF_LOG_DEBUG, msg);
+
+ /* We need not do synclock_unlock => runner_run => synclock_lock here.
+ Because it is needed if we are running a glusterfs process in
+ runner_run, so that when the glusterfs process started wants to
+ communicate to glusterd, glusterd wont be able to respond if it
+ has held the big lock. So we do unlock, run glusterfs process
+ (thus communicate to glusterd), lock. But since this is not a
+ glusterfs command that is being run, unlocking and then relocking
+ is not needed.
+ */
+ ret = runner_run (&runner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "unmounting the "
+ "path %s (brick: %s) failed (%s)", mount_pt,
+ brickinfo->path, strerror (errno));
+ goto out;
+ }
+
+ runinit (&runner);
+ snprintf (msg, sizeof(msg), "remove snapshot of the brick %s:%s, "
+ "device: %s", brickinfo->hostname, brickinfo->path,
+ snap_device);
+ runner_add_args (&runner, LVM_REMOVE, "-f", snap_device, NULL);
+ runner_log (&runner, "", GF_LOG_DEBUG, msg);
+
+ ret = runner_run (&runner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "removing snapshot of the "
+ "brick (%s:%s) of device %s failed",
+ brickinfo->hostname, brickinfo->path, snap_device);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd_lvm_snapshot_remove (dict_t *rsp_dict, glusterd_volinfo_t *snap_vol)
+{
+ char *mnt_pt = NULL;
+ struct mntent *entry = NULL;
+ int32_t brick_count = -1;
+ int32_t ret = -1;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ xlator_t *this = NULL;
+ FILE *mtab = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (snap_vol);
+
+ if ((snap_vol->is_snap_volume == _gf_false) &&
+ (uuid_is_null (snap_vol->restored_from_snap))) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Not a snap volume, or a restored snap volume.");
+ ret = 0;
+ goto out;
+ }
+
+ brick_count = -1;
+ list_for_each_entry (brickinfo, &snap_vol->bricks, brick_list) {
+ brick_count++;
+ if (uuid_compare (brickinfo->uuid, MY_UUID)) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%s:%s belongs to a different node",
+ brickinfo->hostname, brickinfo->path);
+ continue;
+ }
+
+ if (brickinfo->snap_status == -1) {
+ gf_log (this->name, GF_LOG_INFO,
+ "snapshot was pending. lvm not present "
+ "for brick %s:%s of the snap %s.",
+ brickinfo->hostname, brickinfo->path,
+ snap_vol->snapshot->snapname);
+
+ if (rsp_dict &&
+ (snap_vol->is_snap_volume == _gf_true)) {
+ /* Adding missed delete to the dict */
+ ret = glusterd_add_missed_snaps_to_dict
+ (rsp_dict,
+ snap_vol,
+ brickinfo,
+ brick_count + 1,
+ GF_SNAP_OPTION_TYPE_DELETE);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add missed snapshot "
+ "info for %s:%s in the "
+ "rsp_dict", brickinfo->hostname,
+ brickinfo->path);
+ goto out;
+ }
+ }
+
+ continue;
+ }
+
+ ret = glusterd_get_brick_root (brickinfo->path, &mnt_pt);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "getting the root "
+ "of the brick for volume %s (snap %s) failed ",
+ snap_vol->volname, snap_vol->snapshot->snapname);
+ goto out;
+ }
+
+ entry = glusterd_get_mnt_entry_info (mnt_pt, mtab);
+ if (!entry) {
+ gf_log (this->name, GF_LOG_WARNING, "getting the mount"
+ " entry for the brick %s:%s of the snap %s "
+ "(volume: %s) failed", brickinfo->hostname,
+ brickinfo->path, snap_vol->snapshot->snapname,
+ snap_vol->volname);
+ ret = -1;
+ goto out;
+ }
+ ret = glusterd_do_lvm_snapshot_remove (snap_vol, brickinfo,
+ mnt_pt,
+ entry->mnt_fsname);
+ if (mtab)
+ endmntent (mtab);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "remove the snapshot %s (%s)",
+ brickinfo->path, entry->mnt_fsname);
+ goto out;
+ }
+
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+
+int32_t
+glusterd_snap_volume_remove (dict_t *rsp_dict,
+ glusterd_volinfo_t *snap_vol,
+ gf_boolean_t remove_lvm,
+ gf_boolean_t force)
+{
+ int ret = -1;
+ int save_ret = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_volinfo_t *origin_vol = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (snap_vol);
+
+ if (!snap_vol) {
+ gf_log(this->name, GF_LOG_WARNING, "snap_vol in NULL");
+ ret = -1;
+ goto out;
+ }
+
+ list_for_each_entry (brickinfo, &snap_vol->bricks, brick_list) {
+ if (uuid_compare (brickinfo->uuid, MY_UUID))
+ continue;
+
+ ret = glusterd_brick_stop (snap_vol, brickinfo, _gf_false);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to stop "
+ "brick for volume %s", snap_vol->volname);
+ save_ret = ret;
+
+ /* Continue to cleaning up the snap in case of error
+ if force flag is enabled */
+ if (!force)
+ goto out;
+ }
+ }
+
+ /* Only remove the backend lvm when required */
+ if (remove_lvm) {
+ ret = glusterd_lvm_snapshot_remove (rsp_dict, snap_vol);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to remove "
+ "lvm snapshot volume %s", snap_vol->volname);
+ save_ret = ret;
+ if (!force)
+ goto out;
+ }
+ }
+
+ ret = glusterd_store_delete_volume (snap_vol);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to remove volume %s "
+ "from store", snap_vol->volname);
+ save_ret = ret;
+ if (!force)
+ goto out;
+ }
+
+ if (!list_empty(&snap_vol->snapvol_list)) {
+ ret = glusterd_volinfo_find (snap_vol->parent_volname,
+ &origin_vol);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "parent volinfo %s for volume %s",
+ snap_vol->parent_volname, snap_vol->volname);
+ save_ret = ret;
+ if (!force)
+ goto out;
+ }
+ origin_vol->snap_count--;
+ }
+
+ ret = glusterd_volinfo_delete (snap_vol);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to remove volinfo "
+ "%s ", snap_vol->volname);
+ save_ret = ret;
+ if (!force)
+ goto out;
+ }
+
+ if (save_ret)
+ ret = save_ret;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_snapobject_delete (glusterd_snap_t *snap)
+{
+ if (snap == NULL) {
+ gf_log(THIS->name, GF_LOG_WARNING, "snap is NULL");
+ return -1;
+ }
+
+ list_del_init (&snap->snap_list);
+ list_del_init (&snap->volumes);
+ if (LOCK_DESTROY(&snap->lock))
+ gf_log (THIS->name, GF_LOG_WARNING, "Failed destroying lock"
+ "of snap %s", snap->snapname);
+
+ GF_FREE (snap->description);
+ GF_FREE (snap);
+
+ return 0;
+}
+
+int32_t
+glusterd_snap_remove (dict_t *rsp_dict,
+ glusterd_snap_t *snap,
+ gf_boolean_t remove_lvm,
+ gf_boolean_t force)
+{
+ int ret = -1;
+ int save_ret = 0;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (snap);
+
+ if (!snap) {
+ gf_log(this->name, GF_LOG_WARNING, "snap is NULL");
+ ret = -1;
+ goto out;
+ }
+
+ list_for_each_entry_safe (snap_vol, tmp, &snap->volumes, vol_list) {
+ ret = glusterd_snap_volume_remove (rsp_dict, snap_vol,
+ remove_lvm, force);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to remove "
+ "volinfo %s for snap %s", snap_vol->volname,
+ snap->snapname);
+ save_ret = ret;
+
+ /* Continue to cleaning up the snap in case of error
+ if force flag is enabled */
+ if (!force)
+ goto out;
+ }
+ }
+
+ ret = glusterd_store_delete_snap (snap);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to remove snap %s "
+ "from store", snap->snapname);
+ save_ret = ret;
+ if (!force)
+ goto out;
+ }
+
+ ret = glusterd_snapobject_delete (snap);
+ if (ret)
+ gf_log (this->name, GF_LOG_WARNING, "Failed to delete "
+ "snap object %s", snap->snapname);
+
+ if (save_ret)
+ ret = save_ret;
+out:
+ gf_log (THIS->name, GF_LOG_TRACE, "returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_snapshot_get_snapvol_detail (dict_t *dict,
+ glusterd_volinfo_t *snap_vol,
+ char *keyprefix, int detail)
+{
+ int ret = -1;
+ int snap_limit = 0;
+ char key[PATH_MAX] = {0,};
+ char *value = NULL;
+ glusterd_volinfo_t *origin_vol = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ GF_ASSERT (dict);
+ GF_ASSERT (snap_vol);
+ GF_ASSERT (keyprefix);
+
+ /* Volume Name */
+ value = gf_strdup (snap_vol->volname);
+ if (!value)
+ goto out;
+
+ snprintf (key, sizeof (key), "%s.volname", keyprefix);
+ ret = dict_set_dynstr (dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "volume name in dictionary: %s", key);
+ goto out;
+ }
+
+ /* Volume ID */
+ value = gf_strdup (uuid_utoa (snap_vol->volume_id));
+ if (NULL == value) {
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof (key), "%s.vol-id", keyprefix);
+ ret = dict_set_dynstr (dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "volume id in dictionary: %s", key);
+ goto out;
+ }
+ value = NULL;
+
+ /* volume status */
+ snprintf (key, sizeof (key), "%s.vol-status", keyprefix);
+ switch (snap_vol->status) {
+ case GLUSTERD_STATUS_STARTED:
+ ret = dict_set_str (dict, key, "Started");
+ break;
+ case GLUSTERD_STATUS_STOPPED:
+ ret = dict_set_str (dict, key, "Stopped");
+ break;
+ case GD_SNAP_STATUS_NONE:
+ ret = dict_set_str (dict, key, "None");
+ break;
+ default:
+ gf_log (this->name, GF_LOG_ERROR, "Invalid volume status");
+ ret = -1;
+ goto out;
+ }
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set volume status"
+ " in dictionary: %s", key);
+ goto out;
+ }
+
+
+ ret = glusterd_volinfo_find (snap_vol->parent_volname, &origin_vol);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to get the parent "
+ "volinfo for the volume %s", snap_vol->volname);
+ goto out;
+ }
+
+ /* Snaps available */
+ if (conf->snap_max_hard_limit < origin_vol->snap_max_hard_limit) {
+ snap_limit = conf->snap_max_hard_limit;
+ gf_log(this->name, GF_LOG_DEBUG, "system snap-max-hard-limit is"
+ " lesser than volume snap-max-hard-limit, "
+ "snap-max-hard-limit value is set to %d", snap_limit);
+ } else {
+ snap_limit = origin_vol->snap_max_hard_limit;
+ gf_log(this->name, GF_LOG_DEBUG, "volume snap-max-hard-limit is"
+ " lesser than system snap-max-hard-limit, "
+ "snap-max-hard-limit value is set to %d", snap_limit);
+ }
+
+ snprintf (key, sizeof (key), "%s.snaps-available", keyprefix);
+ if (snap_limit > origin_vol->snap_count)
+ ret = dict_set_int32 (dict, key,
+ snap_limit - origin_vol->snap_count);
+ else
+ ret = dict_set_int32 (dict, key, 0);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set available snaps");
+ goto out;
+ }
+
+ snprintf (key, sizeof (key), "%s.snapcount", keyprefix);
+ ret = dict_set_int32 (dict, key, origin_vol->snap_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not save snapcount");
+ goto out;
+ }
+
+ if (!detail)
+ goto out;
+
+ /* Parent volume name */
+ value = gf_strdup (snap_vol->parent_volname);
+ if (!value)
+ goto out;
+
+ snprintf (key, sizeof (key), "%s.origin-volname", keyprefix);
+ ret = dict_set_dynstr (dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set parent "
+ "volume name in dictionary: %s", key);
+ goto out;
+ }
+ value = NULL;
+
+ ret = 0;
+out:
+ if (value)
+ GF_FREE (value);
+
+ return ret;
+}
+
+static int
+glusterd_snapshot_get_snap_detail (dict_t *dict, glusterd_snap_t *snap,
+ char *keyprefix, glusterd_volinfo_t *volinfo)
+{
+ int ret = -1;
+ int volcount = 0;
+ char key[PATH_MAX] = {0,};
+ char *value = NULL;
+ char *timestr = NULL;
+ struct tm *tmptr = NULL;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp_vol = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (snap);
+ GF_ASSERT (keyprefix);
+
+ /* Snap Name */
+ value = gf_strdup (snap->snapname);
+ if (!value)
+ goto out;
+
+ snprintf (key, sizeof (key), "%s.snapname", keyprefix);
+ ret = dict_set_dynstr (dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "snap name in dictionary");
+ goto out;
+ }
+
+ /* Snap ID */
+ value = gf_strdup (uuid_utoa (snap->snap_id));
+ if (NULL == value) {
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof (key), "%s.snap-id", keyprefix);
+ ret = dict_set_dynstr (dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "snap id in dictionary");
+ goto out;
+ }
+ value = NULL;
+
+ tmptr = localtime (&(snap->time_stamp));
+ if (NULL == tmptr) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to convert "
+ "time_t to *tm");
+ ret = -1;
+ goto out;
+ }
+
+ timestr = GF_CALLOC (1, PATH_MAX, gf_gld_mt_char);
+ if (NULL == timestr) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = strftime (timestr, PATH_MAX, "%Y-%m-%d %H:%M:%S", tmptr);
+ if (0 == ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to convert time_t "
+ "to string");
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof (key), "%s.snap-time", keyprefix);
+ ret = dict_set_dynstr (dict, key, timestr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "snap time stamp in dictionary");
+ goto out;
+ }
+ timestr = NULL;
+
+ /* If snap description is provided then add that into dictionary */
+ if (NULL != snap->description) {
+ value = gf_strdup (snap->description);
+ if (NULL == value) {
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof (key), "%s.snap-desc", keyprefix);
+ ret = dict_set_dynstr (dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "snap description in dictionary");
+ goto out;
+ }
+ value = NULL;
+ }
+
+ snprintf (key, sizeof (key), "%s.snap-status", keyprefix);
+ switch (snap->snap_status) {
+ case GD_SNAP_STATUS_INIT:
+ ret = dict_set_str (dict, key, "Init");
+ break;
+ case GD_SNAP_STATUS_IN_USE:
+ ret = dict_set_str (dict, key, "In-use");
+ break;
+ case GD_SNAP_STATUS_DECOMMISSION:
+ ret = dict_set_str (dict, key, "Decommisioned");
+ break;
+ case GD_SNAP_STATUS_RESTORED:
+ ret = dict_set_str (dict, key, "Restored");
+ break;
+ case GD_SNAP_STATUS_NONE:
+ ret = dict_set_str (dict, key, "None");
+ break;
+ default:
+ gf_log (this->name, GF_LOG_ERROR, "Invalid snap status");
+ ret = -1;
+ goto out;
+ }
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snap status "
+ "in dictionary");
+ goto out;
+ }
+
+ if (volinfo) {
+ volcount = 1;
+ snprintf (key, sizeof (key), "%s.vol%d", keyprefix, volcount);
+ ret = glusterd_snapshot_get_snapvol_detail (dict,
+ volinfo, key, 0);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "get volume detail %s for snap %s",
+ snap_vol->volname, snap->snapname);
+ goto out;
+ }
+ goto done;
+ }
+
+ list_for_each_entry_safe (snap_vol, tmp_vol, &snap->volumes, vol_list) {
+ volcount++;
+ snprintf (key, sizeof (key), "%s.vol%d", keyprefix, volcount);
+ ret = glusterd_snapshot_get_snapvol_detail (dict,
+ snap_vol, key, 1);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "get volume detail %s for snap %s",
+ snap_vol->volname, snap->snapname);
+ goto out;
+ }
+ }
+
+done:
+ snprintf (key, sizeof (key), "%s.vol-count", keyprefix);
+ ret = dict_set_int32 (dict, key, volcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set %s",
+ key);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (value)
+ GF_FREE (value);
+
+ if (timestr)
+ GF_FREE(timestr);
+
+ return ret;
+}
+
+static int
+glusterd_snapshot_get_all_snap_info (dict_t *dict)
+{
+ int ret = -1;
+ int snapcount = 0;
+ char key[PATH_MAX] = {0,};
+ glusterd_snap_t *snap = NULL;
+ glusterd_snap_t *tmp_snap = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ /* General parameter validation */
+ GF_ASSERT (dict);
+
+ list_for_each_entry_safe (snap, tmp_snap, &priv->snapshots, snap_list) {
+ snapcount++;
+ snprintf (key, sizeof (key), "snap%d", snapcount);
+ ret = glusterd_snapshot_get_snap_detail (dict, snap, key, NULL);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "snapdetail for snap %s", snap->snapname);
+ goto out;
+ }
+ }
+
+ ret = dict_set_int32 (dict, "snap-count", snapcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snapcount");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_snapshot_get_info_by_volume (dict_t *dict, char *volname,
+ char *err_str, size_t len)
+{
+ int ret = -1;
+ int snapcount = 0;
+ int snap_limit = 0;
+ char *value = NULL;
+ char key[PATH_MAX] = "";
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp_vol = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ GF_ASSERT (dict);
+ GF_ASSERT (volname);
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (err_str, len, "Volume (%s) does not exist", volname);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ goto out;
+ }
+
+ /* Snaps available */
+ if (conf->snap_max_hard_limit < volinfo->snap_max_hard_limit) {
+ snap_limit = conf->snap_max_hard_limit;
+ gf_log(this->name, GF_LOG_DEBUG, "system snap-max-hard-limit is"
+ " lesser than volume snap-max-hard-limit, "
+ "snap-max-hard-limit value is set to %d", snap_limit);
+ } else {
+ snap_limit = volinfo->snap_max_hard_limit;
+ gf_log(this->name, GF_LOG_DEBUG, "volume snap-max-hard-limit is"
+ " lesser than system snap-max-hard-limit, "
+ "snap-max-hard-limit value is set to %d", snap_limit);
+ }
+
+ if (snap_limit > volinfo->snap_count)
+ ret = dict_set_int32 (dict, "snaps-available",
+ snap_limit - volinfo->snap_count);
+ else
+ ret = dict_set_int32 (dict, "snaps-available", 0);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set available snaps");
+ goto out;
+ }
+
+ /* Origin volume name */
+ value = gf_strdup (volinfo->volname);
+ if (!value)
+ goto out;
+
+ ret = dict_set_dynstr (dict, "origin-volname", value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set parent "
+ "volume name in dictionary: %s", key);
+ goto out;
+ }
+ value = NULL;
+
+ list_for_each_entry_safe (snap_vol, tmp_vol, &volinfo->snap_volumes,
+ snapvol_list) {
+ snapcount++;
+ snprintf (key, sizeof (key), "snap%d", snapcount);
+ ret = glusterd_snapshot_get_snap_detail (dict,
+ snap_vol->snapshot,
+ key, snap_vol);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "snapdetail for snap %s",
+ snap_vol->snapshot->snapname);
+ goto out;
+ }
+ }
+ ret = dict_set_int32 (dict, "snap-count", snapcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snapcount");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (value)
+ GF_FREE (value);
+
+ return ret;
+}
+
+/* This function will be called from RPC handler routine.
+ * This function is responsible for getting the requested
+ * snapshot info into the dictionary.
+ *
+ * @param req RPC request object. Required for sending a response back.
+ * @param op glusterd operation. Required for sending a response back.
+ * @param dict pointer to dictionary which will contain both
+ * request and response key-pair values.
+ * @return -1 on error and 0 on success
+ */
+int
+glusterd_handle_snapshot_info (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict, char *err_str, size_t len)
+{
+ int ret = -1;
+ int8_t snap_driven = 1;
+ char *volname = NULL;
+ char *snapname = NULL;
+ glusterd_snap_t *snap = NULL;
+ xlator_t *this = NULL;
+ int32_t cmd = GF_SNAP_INFO_TYPE_ALL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ GF_VALIDATE_OR_GOTO (this->name, req, out);
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+
+
+ ret = dict_get_int32 (dict, "cmd", &cmd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get type "
+ "of snapshot info");
+ goto out;
+ }
+
+ switch (cmd) {
+ case GF_SNAP_INFO_TYPE_ALL:
+ {
+ ret = glusterd_snapshot_get_all_snap_info (dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get info of all snaps");
+ goto out;
+ }
+ break;
+ }
+
+ case GF_SNAP_INFO_TYPE_SNAP:
+ {
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get snap name");
+ goto out;
+ }
+
+ ret = dict_set_int32 (dict, "snap-count", 1);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set snapcount");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ snprintf (err_str, len,
+ "Snap (%s) does not exist", snapname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s", err_str);
+ ret = -1;
+ goto out;
+ }
+ ret = glusterd_snapshot_get_snap_detail (dict, snap,
+ "snap1", NULL);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get snap detail of snap "
+ "%s", snap->snapname);
+ goto out;
+ }
+ break;
+ }
+
+ case GF_SNAP_INFO_TYPE_VOL:
+ {
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get volname");
+ goto out;
+ }
+ ret = glusterd_snapshot_get_info_by_volume (dict,
+ volname, err_str, len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get volume info of volume "
+ "%s", volname);
+ goto out;
+ }
+ snap_driven = 0;
+ break;
+ }
+ }
+
+ ret = dict_set_int8 (dict, "snap-driven", snap_driven);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snap-driven");
+ goto out;
+ }
+
+ /* If everything is successful then send the response back to cli.
+ * In case of failure the caller of this function will take care
+ of the response */
+ ret = glusterd_op_send_cli_response (op, 0, 0, req, dict, err_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to send cli "
+ "response");
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/* This function sets all the snapshot names in the dictionary */
+int
+glusterd_snapshot_get_all_snapnames (dict_t *dict)
+{
+ int ret = -1;
+ int snapcount = 0;
+ char *snapname = NULL;
+ char key[PATH_MAX] = {0,};
+ glusterd_snap_t *snap = NULL;
+ glusterd_snap_t *tmp_snap = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (dict);
+
+ list_for_each_entry_safe (snap, tmp_snap, &priv->snapshots, snap_list) {
+ snapcount++;
+ snapname = gf_strdup (snap->snapname);
+ if (!snapname) {
+ gf_log (this->name, GF_LOG_ERROR, "strdup failed");
+ ret = -1;
+ goto out;
+ }
+ snprintf (key, sizeof (key), "snapname%d", snapcount);
+ ret = dict_set_dynstr (dict, key, snapname);
+ if (ret) {
+ GF_FREE (snapname);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set %s",
+ key);
+ goto out;
+ }
+ }
+
+ ret = dict_set_int32 (dict, "snap-count", snapcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snapcount");
+ goto out;
+ }
+
+ ret = 0;
+out:
+
+ return ret;
+}
+
+/* This function sets all the snapshot names
+ under a given volume in the dictionary */
+int
+glusterd_snapshot_get_vol_snapnames (dict_t *dict, glusterd_volinfo_t *volinfo)
+{
+ int ret = -1;
+ int snapcount = 0;
+ char *snapname = NULL;
+ char key[PATH_MAX] = {0,};
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp_vol = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (dict);
+ GF_ASSERT (volinfo);
+
+ list_for_each_entry_safe (snap_vol, tmp_vol,
+ &volinfo->snap_volumes, snapvol_list) {
+ snapcount++;
+ snapname = gf_strdup (snap_vol->snapshot->snapname);
+ if (!snapname) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "strdup failed");
+ ret = -1;
+ goto out;
+ }
+ snprintf (key, sizeof (key), "snapname%d", snapcount);
+ ret = dict_set_dynstr (dict, key, snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "set %s", key);
+ GF_FREE (snapname);
+ goto out;
+ }
+ }
+
+ ret = dict_set_int32 (dict, "snap-count", snapcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snapcount");
+ goto out;
+ }
+
+ ret = 0;
+out:
+
+ return ret;
+}
+
+int
+glusterd_handle_snapshot_list (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict, char *err_str, size_t len)
+{
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ GF_VALIDATE_OR_GOTO (this->name, req, out);
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+
+ /* Ignore error for getting volname as it is optional */
+ ret = dict_get_str (dict, "volname", &volname);
+
+ if (NULL == volname) {
+ ret = glusterd_snapshot_get_all_snapnames (dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get snapshot list");
+ goto out;
+ }
+ } else {
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (err_str, len,
+ "Volume (%s) does not exist", volname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s", err_str);
+ goto out;
+ }
+
+ ret = glusterd_snapshot_get_vol_snapnames (dict, volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get snapshot list for volume %s",
+ volname);
+ goto out;
+ }
+ }
+
+ /* If everything is successful then send the response back to cli.
+ In case of failure the caller of this function will take of response.*/
+ ret = glusterd_op_send_cli_response (op, 0, 0, req, dict, err_str);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to send cli "
+ "response");
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/* This is a snapshot create handler function. This function will be
+ * executed in the originator node. This function is responsible for
+ * calling mgmt_v3 framework to do the actual snap creation on all the bricks
+ *
+ * @param req RPC request object
+ * @param op gluster operation
+ * @param dict dictionary containing snapshot restore request
+ * @param err_str In case of an err this string should be populated
+ * @param len length of err_str buffer
+ *
+ * @return Negative value on Failure and 0 in success
+ */
+int
+glusterd_handle_snapshot_create (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict, char *err_str, size_t len)
+{
+ int ret = -1;
+ char *volname = NULL;
+ char *snapname = NULL;
+ int64_t volcount = 0;
+ xlator_t *this = NULL;
+ char key[PATH_MAX] = "";
+ char *username = NULL;
+ char *password = NULL;
+ uuid_t *uuid_ptr = NULL;
+ uuid_t tmp_uuid = {0};
+ int i = 0;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
+ GF_ASSERT (err_str);
+
+ ret = dict_get_int64 (dict, "volcount", &volcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "get the volume count");
+ goto out;
+ }
+ if (volcount <= 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Invalid volume count %"PRId64
+ " supplied", volcount);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to get the snapname");
+ goto out;
+ }
+
+ if (strlen(snapname) >= GLUSTERD_MAX_SNAP_NAME) {
+ snprintf (err_str, len, "snapname cannot exceed 255 "
+ "characters");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ ret = -1;
+ goto out;
+ }
+
+ uuid_ptr = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!uuid_ptr) {
+ gf_log (this->name, GF_LOG_ERROR, "Out Of Memory");
+ ret = -1;
+ goto out;
+ }
+
+ uuid_generate (*uuid_ptr);
+ ret = dict_set_bin (dict, "snap-id", uuid_ptr, sizeof(uuid_t));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to set snap-id");
+ GF_FREE (uuid_ptr);
+ goto out;
+ }
+ uuid_ptr = NULL;
+
+ ret = dict_set_int64 (dict, "snap-time", (int64_t)time(NULL));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to set snap-time");
+ goto out;
+ }
+
+ for (i = 1; i <= volcount; i++) {
+ snprintf (key, sizeof (key), "volname%d", i);
+ ret = dict_get_str (dict, key, &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get volume name");
+ goto out;
+ }
+
+ /* generate internal username and password for the snap*/
+ uuid_generate (tmp_uuid);
+ username = gf_strdup (uuid_utoa (tmp_uuid));
+ snprintf (key, sizeof(key), "volume%d_username", i);
+ ret = dict_set_dynstr (dict, key, username);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snap "
+ "username for volume %s", volname);
+ GF_FREE (username);
+ goto out;
+ }
+
+ uuid_generate (tmp_uuid);
+ password = gf_strdup (uuid_utoa (tmp_uuid));
+ snprintf (key, sizeof(key), "volume%d_password", i);
+ ret = dict_set_dynstr (dict, key, password);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snap "
+ "password for volume %s", volname);
+ GF_FREE (password);
+ goto out;
+ }
+
+ uuid_ptr = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!uuid_ptr) {
+ gf_log (this->name, GF_LOG_ERROR, "Out Of Memory");
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof(key) - 1, "vol%d_volid", i);
+ uuid_generate (*uuid_ptr);
+ ret = dict_set_bin (dict, key, uuid_ptr, sizeof(uuid_t));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set snap_volid");
+ GF_FREE (uuid_ptr);
+ goto out;
+ }
+ }
+
+ ret = glusterd_mgmt_v3_initiate_snap_phases (req, op, dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to initiate snap "
+ "phases");
+ }
+
+out:
+ return ret;
+}
+
+/* This is a snapshot status handler function. This function will be
+ * executed in a originator node. This function is responsible for
+ * calling mgmt v3 framework to get the actual snapshot status from
+ * all the bricks
+ *
+ * @param req RPC request object
+ * @param op gluster operation
+ * @param dict dictionary containing snapshot status request
+ * @param err_str In case of an err this string should be populated
+ * @param len length of err_str buffer
+ *
+ * return : 0 in case of success.
+ * -1 in case of failure.
+ *
+ */
+int
+glusterd_handle_snapshot_status (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict, char *err_str, size_t len)
+{
+ int ret = -1;
+ char *volname = NULL;
+ char *snapname = NULL;
+ char *buf = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+ int32_t cmd = -1;
+ int i = 0;
+ dict_t *voldict = NULL;
+ char key[PATH_MAX] = "";
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_volinfo_t *snap_volinfo = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+
+ GF_ASSERT (conf);
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
+ GF_ASSERT (err_str);
+
+ ret = dict_get_int32 (dict, "cmd", &cmd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not get status type");
+ goto out;
+ }
+ switch (cmd) {
+ case GF_SNAP_STATUS_TYPE_ALL:
+ {
+ /* IF we give "gluster snapshot status"
+ * then lock is held on all snaps.
+ * This is the place where necessary information
+ * (snapname and snapcount)is populated in dictionary
+ * for locking.
+ */
+ ++i;
+ list_for_each_entry (snap, &conf->snapshots, snap_list)
+ {
+ snprintf (key, sizeof (key), "snapname%d", i);
+ buf = gf_strdup (snap->snapname);
+ if (!buf) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr (dict, key, buf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save snapname (%s) "
+ "in the dictionary",
+ snap->snapname);
+ GF_FREE (buf);
+ goto out;
+ }
+
+ buf = NULL;
+ i++;
+ }
+
+ ret = dict_set_int32 (dict, "snapcount", i - 1);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not "
+ "save snapcount in the dictionary");
+ goto out;
+ }
+ break;
+ }
+
+ case GF_SNAP_STATUS_TYPE_SNAP:
+ {
+ /* IF we give "gluster snapshot status <snapname>"
+ * then lock is held on single snap.
+ * This is the place where necessary information
+ * (snapname)is populated in dictionary
+ * for locking.
+ */
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to fetch snap name");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ snprintf (err_str, len, "Snap (%s)"
+ "does not exist", snapname);
+ gf_log(this->name, GF_LOG_ERROR,
+ "%s", err_str);
+ ret = -1;
+ goto out;
+ }
+ break;
+ }
+ case GF_SNAP_STATUS_TYPE_VOL:
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to fetch volname");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (err_str, len, "Volume (%s) "
+ "does not exist", volname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s", err_str);
+ goto out;
+ }
+
+ i = 1;
+ list_for_each_entry (snap_volinfo,
+ &volinfo->snap_volumes, snapvol_list) {
+ snprintf (key, sizeof (key), "snapname%d", i);
+
+ buf = gf_strdup
+ (snap_volinfo->snapshot->snapname);
+ if (!buf) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (dict, key, buf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save snapname");
+ GF_FREE (buf);
+ goto out;
+ }
+
+ buf = NULL;
+ i++;
+ }
+
+ ret = dict_set_int32 (dict, "snapcount", i-1);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save snapcount");
+ goto out;
+ }
+ break;
+ default:
+ {
+ gf_log (this->name, GF_LOG_ERROR, "Unknown type");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ /* Volume lock is not necessary for snapshot status, hence
+ * turning it off
+ */
+ ret = dict_set_int8 (dict, "hold_vol_locks", 0);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Setting volume lock "
+ "flag failed");
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_initiate_snap_phases (req, op, dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to initiate "
+ "snap phases");
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (voldict) {
+ dict_unref (voldict);
+ }
+ return ret;
+}
+
+
+/* This is a snapshot restore handler function. This function will be
+ * executed in the originator node. This function is responsible for
+ * calling mgmt_v3 framework to do the actual restore on all the bricks
+ *
+ * @param req RPC request object
+ * @param op gluster operation
+ * @param dict dictionary containing snapshot restore request
+ * @param err_str In case of an err this string should be populated
+ * @param len length of err_str buffer
+ *
+ * @return Negative value on Failure and 0 in success
+ */
+int
+glusterd_handle_snapshot_restore (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict, char *err_str, size_t len)
+{
+ int ret = -1;
+ char *snapname = NULL;
+ char *buf = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_volinfo_t *snap_volinfo = NULL;
+ int32_t i = 0;
+ char key[PATH_MAX] = "";
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+
+ GF_ASSERT (conf);
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
+ GF_ASSERT (err_str);
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "get snapname");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ snprintf (err_str, len, "Snap (%s) does not exist", snapname);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ ret = -1;
+ goto out;
+ }
+
+ list_for_each_entry (snap_volinfo, &snap->volumes, vol_list) {
+ i++;
+ snprintf (key, sizeof (key), "volname%d", i);
+ buf = gf_strdup (snap_volinfo->parent_volname);
+ if (!buf) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr (dict, key, buf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not set "
+ "parent volume name %s in the dict",
+ snap_volinfo->parent_volname);
+ GF_FREE (buf);
+ goto out;
+ }
+ buf = NULL;
+ }
+
+ ret = dict_set_int32 (dict, "volcount", i);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save volume count");
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_initiate_snap_phases (req, op, dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to initiate snap "
+ "phases");
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+glusterd_snap_t*
+glusterd_create_snap_object (dict_t *dict, dict_t *rsp_dict)
+{
+ char *snapname = NULL;
+ uuid_t *snap_id = NULL;
+ char *description = NULL;
+ glusterd_snap_t *snap = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ int ret = -1;
+ int64_t time_stamp = 0;
+
+ this = THIS;
+ priv = this->private;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
+
+ /* Fetch snapname, description, id and time from dict */
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch snapname");
+ goto out;
+ }
+
+ /* Ignore ret value for description*/
+ ret = dict_get_str (dict, "description", &description);
+
+ ret = dict_get_bin (dict, "snap-id", (void **)&snap_id);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch snap_id");
+ goto out;
+ }
+
+ ret = dict_get_int64 (dict, "snap-time", &time_stamp);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch snap-time");
+ goto out;
+ }
+ if (time_stamp <= 0) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Invalid time-stamp: %"PRId64,
+ time_stamp);
+ goto out;
+ }
+
+ list_for_each_entry (snap, &priv->snapshots, snap_list) {
+ if (!strcmp (snap->snapname, snapname) ||
+ !uuid_compare (snap->snap_id, *snap_id)) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Found duplicate snap %s (%s)",
+ snap->snapname, uuid_utoa (snap->snap_id));
+ ret = -1;
+ break;
+ }
+ }
+ if (ret) {
+ snap = NULL;
+ goto out;
+ }
+
+ snap = glusterd_new_snap_object ();
+ if (!snap) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not create "
+ "the snap object for snap %s", snapname);
+ goto out;
+ }
+
+ strcpy (snap->snapname, snapname);
+ uuid_copy (snap->snap_id, *snap_id);
+ snap->time_stamp = (time_t)time_stamp;
+ /* Set the status as GD_SNAP_STATUS_INIT and once the backend snapshot
+ is taken and snap is really ready to use, set the status to
+ GD_SNAP_STATUS_IN_USE. This helps in identifying the incomplete
+ snapshots and cleaning them up.
+ */
+ snap->snap_status = GD_SNAP_STATUS_INIT;
+ if (description) {
+ snap->description = gf_strdup (description);
+ if (snap->description == NULL) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Saving the Snap Description Failed");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = glusterd_store_snap (snap);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Could not store snap"
+ "object %s", snap->snapname);
+ goto out;
+ }
+
+ list_add_order (&snap->snap_list, &priv->snapshots,
+ glusterd_compare_snap_time);
+
+ gf_log (this->name, GF_LOG_TRACE, "Snap %s added to the list",
+ snap->snapname);
+
+ ret = 0;
+
+out:
+ if (ret) {
+ if (snap)
+ glusterd_snap_remove (rsp_dict, snap,
+ _gf_true, _gf_true);
+ snap = NULL;
+ }
+
+ return snap;
+}
+
+/* Added missed_snap_entry to rsp_dict */
+int32_t
+glusterd_add_missed_snaps_to_dict (dict_t *rsp_dict,
+ glusterd_volinfo_t *snap_vol,
+ glusterd_brickinfo_t *brickinfo,
+ int32_t brick_number, int32_t op)
+{
+ char *snap_uuid = NULL;
+ char missed_snap_entry[PATH_MAX] = "";
+ char name_buf[PATH_MAX] = "";
+ int32_t missed_snap_count = -1;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (snap_vol);
+ GF_ASSERT (brickinfo);
+
+ snap_uuid = gf_strdup (uuid_utoa (snap_vol->snapshot->snap_id));
+ if (!snap_uuid) {
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (missed_snap_entry, sizeof(missed_snap_entry),
+ "%s:%s=%s:%d:%s:%d:%d", uuid_utoa(brickinfo->uuid),
+ snap_uuid, snap_vol->volname, brick_number, brickinfo->path,
+ op, GD_MISSED_SNAP_PENDING);
+
+ /* Fetch the missed_snap_count from the dict */
+ ret = dict_get_int32 (rsp_dict, "missed_snap_count",
+ &missed_snap_count);
+ if (ret) {
+ /* Initialize the missed_snap_count for the first time */
+ missed_snap_count = 0;
+ }
+
+ /* Setting the missed_snap_entry in the rsp_dict */
+ snprintf (name_buf, sizeof(name_buf), "missed_snaps_%d",
+ missed_snap_count);
+ ret = dict_set_dynstr_with_alloc (rsp_dict, name_buf,
+ missed_snap_entry);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set missed_snap_entry (%s) "
+ "in the rsp_dict.", missed_snap_entry);
+ goto out;
+ }
+ missed_snap_count++;
+
+ /* Setting the new missed_snap_count in the dict */
+ ret = dict_set_int32 (rsp_dict, "missed_snap_count",
+ missed_snap_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set missed_snap_count for %s "
+ "in the rsp_dict.", missed_snap_entry);
+ goto out;
+ }
+
+out:
+ if (snap_uuid)
+ GF_FREE (snap_uuid);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+/* This function is called to get the device path of the snap lvm. Usually
+ if /dev/mapper/<group-name>-<lvm-name> is the device for the lvm,
+ then the snap device will be /dev/<group-name>/<snapname>.
+ This function takes care of building the path for the snap device.
+*/
+char *
+glusterd_build_snap_device_path (char *device, char *snapname)
+{
+ char snap[PATH_MAX] = "";
+ char msg[1024] = "";
+ char volgroup[PATH_MAX] = "";
+ char *snap_device = NULL;
+ xlator_t *this = NULL;
+ runner_t runner = {0,};
+ char *ptr = NULL;
+ int ret = -1;
+
+ this = THIS;
+ GF_ASSERT (this);
+ if (!device) {
+ gf_log (this->name, GF_LOG_ERROR, "device is NULL");
+ goto out;
+ }
+ if (!snapname) {
+ gf_log (this->name, GF_LOG_ERROR, "snapname is NULL");
+ goto out;
+ }
+
+ runinit (&runner);
+ runner_add_args (&runner, "/sbin/lvs", "--noheadings", "-o", "vg_name",
+ device, NULL);
+ runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
+ snprintf (msg, sizeof (msg), "Get volume group for device %s", device);
+ runner_log (&runner, this->name, GF_LOG_DEBUG, msg);
+ ret = runner_start (&runner);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get volume group "
+ "for device %s", device);
+ runner_end (&runner);
+ goto out;
+ }
+ ptr = fgets(volgroup, sizeof(volgroup),
+ runner_chio (&runner, STDOUT_FILENO));
+ if (!ptr || !strlen(volgroup)) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get volume group "
+ "for snap %s", snapname);
+ runner_end (&runner);
+ ret = -1;
+ goto out;
+ }
+ runner_end (&runner);
+
+ snprintf (snap, sizeof(snap), "/dev/%s/%s", gf_trim(volgroup),
+ snapname);
+ snap_device = gf_strdup (snap);
+ if (!snap_device) {
+ gf_log (this->name, GF_LOG_WARNING, "Cannot copy the "
+ "snapshot device name for snapname: %s)", snapname);
+ }
+
+out:
+ return snap_device;
+}
+
+/* This function actually calls the command (or the API) for taking the
+ snapshot of the backend brick filesystem. If this is successful,
+ then call the glusterd_snap_create function to create the snap object
+ for glusterd
+*/
+char *
+glusterd_take_lvm_snapshot (glusterd_volinfo_t *snap_vol,
+ glusterd_brickinfo_t *brickinfo)
+{
+ char msg[NAME_MAX] = "";
+ char buf[PATH_MAX] = "";
+ char *snap_device = NULL;
+ char *ptr = NULL;
+ char *device = NULL;
+ int ret = -1;
+ gf_boolean_t match = _gf_false;
+ runner_t runner = {0,};
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ if (!brickinfo) {
+ gf_log (this->name, GF_LOG_ERROR, "brickinfo NULL");
+ goto out;
+ }
+
+ device = glusterd_get_brick_mount_details (brickinfo);
+ if (!device) {
+ gf_log (this->name, GF_LOG_ERROR, "getting device name for "
+ "the brick %s:%s failed", brickinfo->hostname,
+ brickinfo->path);
+ goto out;
+ }
+
+ /* Figuring out if setactivationskip flag is supported or not */
+ runinit (&runner);
+ snprintf (msg, sizeof (msg), "running lvcreate help");
+ runner_add_args (&runner, LVM_CREATE, "--help", NULL);
+ runner_log (&runner, "", GF_LOG_DEBUG, msg);
+ runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
+ ret = runner_start (&runner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to run lvcreate help");
+ runner_end (&runner);
+ goto out;
+ }
+
+ /* Looking for setactivationskip in lvcreate --help */
+ do {
+ ptr = fgets(buf, sizeof(buf),
+ runner_chio (&runner, STDOUT_FILENO));
+ if (ptr) {
+ if (strstr(buf, "setactivationskip")) {
+ match = _gf_true;
+ break;
+ }
+ }
+ } while (ptr != NULL);
+ runner_end (&runner);
+
+ /* Takng the actual snapshot */
+ runinit (&runner);
+ snprintf (msg, sizeof (msg), "taking snapshot of the brick %s:%s",
+ brickinfo->hostname, brickinfo->path);
+ if (match == _gf_true)
+ runner_add_args (&runner, LVM_CREATE, "-s", device,
+ "--setactivationskip", "n", "--name",
+ snap_vol->volname, NULL);
+ else
+ runner_add_args (&runner, LVM_CREATE, "-s", device,
+ "--name", snap_vol->volname, NULL);
+ runner_log (&runner, "", GF_LOG_DEBUG, msg);
+ ret = runner_start (&runner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "taking snapshot of the "
+ "brick (%s:%s) of device %s failed",
+ brickinfo->hostname, brickinfo->path, device);
+ runner_end (&runner);
+ goto out;
+ }
+ runner_end (&runner);
+
+ snap_device = glusterd_build_snap_device_path (device,
+ snap_vol->volname);
+ if (!snap_device) {
+ gf_log (this->name, GF_LOG_WARNING, "Cannot copy the snapshot "
+ "device name for snap %s (volume id: %s)",
+ snap_vol->snapshot->snapname, snap_vol->volname);
+ ret = -1;
+ goto out;
+ }
+
+out:
+ return snap_device;
+}
+
+int32_t
+glusterd_snap_brick_create (char *device, glusterd_volinfo_t *snap_volinfo,
+ glusterd_brickinfo_t *original_brickinfo,
+ int32_t brick_count, char *snap_brick_dir)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ char snap_brick_mount_path[PATH_MAX] = "";
+ char snap_brick_path[PATH_MAX] = "";
+ struct stat statbuf = {0, };
+
+ this = THIS;
+ priv = this->private;
+
+ GF_ASSERT (device);
+ GF_ASSERT (snap_volinfo);
+ GF_ASSERT (original_brickinfo);
+ GF_ASSERT (snap_brick_dir);
+
+ snprintf (snap_brick_mount_path, sizeof (snap_brick_mount_path),
+ "%s/%s/brick%d", snap_mount_folder, snap_volinfo->volname,
+ brick_count+1);
+
+ snprintf (snap_brick_path, sizeof (snap_brick_path), "%s%s",
+ snap_brick_mount_path, snap_brick_dir);
+
+ ret = mkdir_p (snap_brick_mount_path, 0777, _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "creating the brick directory"
+ " %s for the snapshot %s(device: %s) failed",
+ snap_brick_mount_path, snap_volinfo->volname, device);
+ goto out;
+ }
+ /* mount the snap logical device on the directory inside
+ /run/gluster/snaps/<snapname>/@snap_brick_mount_path
+ Way to mount the snap brick via mount api is this.
+ ret = mount (device, snap_brick_mount_path, entry->mnt_type,
+ MS_MGC_VAL, "nouuid");
+ But for now, mounting using runner apis.
+ */
+ ret = glusterd_mount_lvm_snapshot (device, snap_brick_mount_path);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to mount lvm snapshot.");
+ goto out;
+ }
+
+ ret = stat (snap_brick_path, &statbuf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "stat of the brick %s"
+ "(brick mount: %s) failed (%s)", snap_brick_path,
+ snap_brick_mount_path, strerror (errno));
+ goto out;
+ }
+ ret = sys_lsetxattr (snap_brick_path,
+ GF_XATTR_VOL_ID_KEY,
+ snap_volinfo->volume_id, 16,
+ XATTR_REPLACE);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "extended attribute %s on %s. Reason: "
+ "%s, snap: %s", GF_XATTR_VOL_ID_KEY,
+ snap_brick_path, strerror (errno),
+ snap_volinfo->volname);
+ goto out;
+ }
+
+out:
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "unmounting the snap brick"
+ " mount %s", snap_brick_mount_path);
+#if !defined(GF_DARWIN_HOST_OS)
+ umount (snap_brick_mount_path);
+#else
+ unmount (snap_brick_mount_path, 0);
+#endif
+ }
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int32_t
+glusterd_add_bricks_to_snap_volume (dict_t *dict, dict_t *rsp_dict,
+ glusterd_volinfo_t *snap_vol,
+ glusterd_brickinfo_t *original_brickinfo,
+ glusterd_brickinfo_t *snap_brickinfo,
+ char **snap_brick_dir, int64_t volcount,
+ int32_t brick_count)
+{
+ char key[PATH_MAX] = "";
+ char snap_brick_path[PATH_MAX] = "";
+ char *snap_device = NULL;
+ gf_boolean_t add_missed_snap = _gf_false;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (snap_vol);
+ GF_ASSERT (original_brickinfo);
+ GF_ASSERT (snap_brickinfo);
+ GF_ASSERT (snap_brick_dir);
+
+ snprintf (key, sizeof(key) - 1, "vol%"PRId64".brickdir%d", volcount,
+ brick_count);
+ ret = dict_get_ptr (dict, key, (void **)snap_brick_dir);
+ if (ret) {
+ /* Using original brickinfo here because it will be a
+ * pending snapshot and storing the original brickinfo
+ * will help in mapping while recreating the missed snapshot
+ */
+ gf_log (this->name, GF_LOG_WARNING, "Unable to fetch "
+ "snap mount path (%s). Using original brickinfo", key);
+ snap_brickinfo->snap_status = -1;
+ strcpy (snap_brick_path, original_brickinfo->path);
+
+ /* In origiator node add snaps missed
+ * from different nodes to the dict
+ */
+ if (is_origin_glusterd (dict) == _gf_true)
+ add_missed_snap = _gf_true;
+ } else {
+ /* Create brick-path in the format /var/run/gluster/snaps/ *
+ * <snap-uuid>/<original-brick#>/snap-brick-dir *
+ */
+ snprintf (snap_brick_path, sizeof(snap_brick_path),
+ "%s/%s/brick%d%s", snap_mount_folder,
+ snap_vol->volname, brick_count+1,
+ *snap_brick_dir);
+ }
+
+ if ((snap_brickinfo->snap_status != -1) &&
+ (!uuid_compare (original_brickinfo->uuid, MY_UUID)) &&
+ (!glusterd_is_brick_started (original_brickinfo))) {
+ /* In case if the brick goes down after prevalidate. */
+ gf_log (this->name, GF_LOG_WARNING, "brick %s:%s is not"
+ " started (snap: %s)",
+ original_brickinfo->hostname,
+ original_brickinfo->path,
+ snap_vol->snapshot->snapname);
+
+ snap_brickinfo->snap_status = -1;
+ strcpy (snap_brick_path, original_brickinfo->path);
+ add_missed_snap = _gf_true;
+ }
+
+ if (add_missed_snap) {
+ ret = glusterd_add_missed_snaps_to_dict (rsp_dict,
+ snap_vol,
+ original_brickinfo,
+ brick_count + 1,
+ GF_SNAP_OPTION_TYPE_CREATE);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to add missed"
+ " snapshot info for %s:%s in the rsp_dict",
+ original_brickinfo->hostname,
+ original_brickinfo->path);
+ goto out;
+ }
+ }
+
+ snprintf (key, sizeof(key), "vol%"PRId64".brick_snapdevice%d",
+ volcount, brick_count);
+ ret = dict_get_ptr (dict, key, (void **)&snap_device);
+ if (ret) {
+ /* If the device name is empty, so will be the brick path
+ * Hence the missed snap has already been added above
+ */
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch "
+ "snap device (%s). Leaving empty", key);
+ } else
+ strcpy (snap_brickinfo->device_path, snap_device);
+
+ ret = gf_canonicalize_path (snap_brick_path);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to canonicalize path");
+ goto out;
+ }
+
+ strcpy (snap_brickinfo->hostname, original_brickinfo->hostname);
+ strcpy (snap_brickinfo->path, snap_brick_path);
+ uuid_copy (snap_brickinfo->uuid, original_brickinfo->uuid);
+ /* AFR changelog names are based on brick_id and hence the snap
+ * volume's bricks must retain the same ID */
+ strcpy (snap_brickinfo->brick_id, original_brickinfo->brick_id);
+ list_add_tail (&snap_brickinfo->brick_list, &snap_vol->bricks);
+
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int32_t
+glusterd_take_brick_snapshot (glusterd_volinfo_t *origin_vol,
+ glusterd_volinfo_t *snap_vol, dict_t *rsp_dict,
+ glusterd_brickinfo_t *original_brickinfo,
+ glusterd_brickinfo_t *snap_brickinfo,
+ char *snap_brick_dir, int32_t brick_count)
+{
+ char *device = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (origin_vol);
+ GF_ASSERT (snap_vol);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (original_brickinfo);
+ GF_ASSERT (snap_brickinfo);
+ GF_ASSERT (snap_brick_dir);
+
+ device = glusterd_take_lvm_snapshot (snap_vol, original_brickinfo);
+ /* Fail the snapshot even though snapshot on one of
+ the bricks fails. At the end when we check whether
+ the snapshot volume meets quorum or not, then the
+ the snapshot can either be treated as success, or
+ in case of failure we can undo the changes and return
+ failure to cli. */
+ if (!device) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to take snapshot of %s:%s",
+ original_brickinfo->hostname,
+ original_brickinfo->path);
+ goto out;
+ }
+
+ /* create the complete brick here */
+ ret = glusterd_snap_brick_create (device, snap_vol,
+ original_brickinfo,
+ brick_count, snap_brick_dir);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "not able to"
+ " create the brickinfo for the snap %s"
+ ", volume %s", snap_vol->snapshot->snapname,
+ origin_vol->volname);
+ goto out;
+ }
+
+out:
+ if (device)
+ GF_FREE (device);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+glusterd_volinfo_t *
+glusterd_do_snap_vol (glusterd_volinfo_t *origin_vol, glusterd_snap_t *snap,
+ dict_t *dict, dict_t *rsp_dict, int64_t volcount)
+{
+ char key[PATH_MAX] = "";
+ char *snap_brick_dir = NULL;
+ char *username = NULL;
+ char *password = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *snap_vol = NULL;
+ uuid_t *snap_volid = NULL;
+ int32_t ret = -1;
+ int32_t brick_count = 0;
+ glusterd_brickinfo_t *snap_brickinfo = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (dict);
+ GF_ASSERT (origin_vol);
+ GF_ASSERT (rsp_dict);
+
+ /* fetch username, password and vol_id from dict*/
+ snprintf (key, sizeof(key), "volume%"PRId64"_username", volcount);
+ ret = dict_get_str (dict, key, &username);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get %s for "
+ "snap %s", key, snap->snapname);
+ goto out;
+ }
+
+ snprintf (key, sizeof(key), "volume%"PRId64"_password", volcount);
+ ret = dict_get_str (dict, key, &password);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get %s for "
+ "snap %s", key, snap->snapname);
+ goto out;
+ }
+
+ snprintf (key, sizeof(key) - 1, "vol%"PRId64"_volid", volcount);
+ ret = dict_get_bin (dict, key, (void **)&snap_volid);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch snap_volid");
+ goto out;
+ }
+
+ /* We are not setting the username and password here as
+ * we need to set the user name and password passed in
+ * the dictionary
+ */
+ ret = glusterd_volinfo_dup (origin_vol, &snap_vol, _gf_false);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to duplicate volinfo "
+ "for the snapshot %s", snap->snapname);
+ goto out;
+ }
+
+ /* uuid is used as lvm snapshot name.
+ This will avoid restrictions on snapshot names provided by user */
+ GLUSTERD_GET_UUID_NOHYPHEN (snap_vol->volname, *snap_volid);
+ uuid_copy (snap_vol->volume_id, *snap_volid);
+ snap_vol->is_snap_volume = _gf_true;
+ strcpy (snap_vol->parent_volname, origin_vol->volname);
+ snap_vol->snapshot = snap;
+
+ glusterd_auth_set_username (snap_vol, username);
+ glusterd_auth_set_password (snap_vol, password);
+
+ /* Adding snap brickinfos to the snap volinfo */
+ brick_count = 0;
+ list_for_each_entry (brickinfo, &origin_vol->bricks, brick_list) {
+ snap_brickinfo = NULL;
+
+ ret = glusterd_brickinfo_new (&snap_brickinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "initializing the brick for the snap "
+ "volume failed (snapname: %s)", snap->snapname);
+ goto out;
+ }
+
+ ret = glusterd_add_bricks_to_snap_volume (dict, rsp_dict,
+ snap_vol,
+ brickinfo,
+ snap_brickinfo,
+ &snap_brick_dir,
+ volcount,
+ brick_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add the snap brick for "
+ "%s:%s to the snap volume",
+ brickinfo->hostname, brickinfo->path);
+ GF_FREE (snap_brickinfo);
+ goto out;
+ }
+
+ /* Take snapshot of the brick */
+ if ((uuid_compare (brickinfo->uuid, MY_UUID)) ||
+ (snap_brickinfo->snap_status == -1)) {
+ brick_count++;
+ continue;
+ }
+
+ ret = glusterd_take_brick_snapshot (origin_vol, snap_vol,
+ rsp_dict, brickinfo,
+ snap_brickinfo,
+ snap_brick_dir,
+ brick_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to take snapshot for %s:%s",
+ brickinfo->hostname, brickinfo->path);
+ goto out;
+ }
+ brick_count++;
+ }
+
+ /*TODO: the quorum check of the snap volume here */
+
+ ret = glusterd_store_volinfo (snap_vol,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to store snapshot "
+ "volinfo (%s) for snap %s", snap_vol->volname,
+ snap->snapname);
+ goto out;
+ }
+
+ ret = generate_brick_volfiles (snap_vol);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "generating the brick "
+ "volfiles for the snap %s (volume: %s) failed",
+ snap->snapname, origin_vol->volname);
+ goto out;
+ }
+
+ ret = generate_client_volfiles (snap_vol, GF_CLIENT_TRUSTED);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "generating the trusted "
+ "client volfiles for the snap %s (volume: %s) failed",
+ snap->snapname, origin_vol->volname);
+ goto out;
+ }
+
+ ret = generate_client_volfiles (snap_vol, GF_CLIENT_OTHER);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "generating the client "
+ "volfiles for the snap %s (volume: %s) failed",
+ snap->snapname, origin_vol->volname);
+ goto out;
+ }
+
+ ret = glusterd_list_add_snapvol (origin_vol, snap_vol);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "could not add the snap "
+ "volume %s to the list", snap_vol->volname);
+ goto out;
+ }
+
+ list_for_each_entry (brickinfo, &snap_vol->bricks, brick_list) {
+ if (uuid_compare (brickinfo->uuid, MY_UUID))
+ continue;
+
+ if (brickinfo->snap_status == -1) {
+ gf_log (this->name, GF_LOG_INFO,
+ "not starting snap brick %s:%s for "
+ "for the snap %s (volume: %s)",
+ brickinfo->hostname, brickinfo->path,
+ snap->snapname, origin_vol->volname);
+ continue;
+ }
+
+ ret = glusterd_brick_start (snap_vol, brickinfo, _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "starting the "
+ "brick %s:%s for the snap %s (volume: %s) "
+ "failed", brickinfo->hostname, brickinfo->path,
+ snap->snapname, origin_vol->volname);
+ goto out;
+ }
+ }
+
+ snap_vol->status = GLUSTERD_STATUS_STARTED;
+ ret = glusterd_store_volinfo (snap_vol,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to store snap volinfo");
+ goto out;
+ }
+
+out:
+ if (ret) {
+ if (snap_vol)
+ glusterd_snap_volume_remove (rsp_dict, snap_vol,
+ _gf_true, _gf_true);
+ snap_vol = NULL;
+ }
+
+ return snap_vol;
+}
+
+/* This is a snapshot remove handler function. This function will be
+ * executed in the originator node. This function is responsible for
+ * calling mgmt v3 framework to do the actual remove on all the bricks
+ *
+ * @param req RPC request object
+ * @param op gluster operation
+ * @param dict dictionary containing snapshot remove request
+ * @param err_str In case of an err this string should be populated
+ * @param len length of err_str buffer
+ *
+ * @return Negative value on Failure and 0 in success
+ */
+int
+glusterd_handle_snapshot_remove (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict, char *err_str, size_t len)
+{
+ int ret = -1;
+ int64_t volcount = 0;
+ char *snapname = NULL;
+ char *volname = NULL;
+ char key[PATH_MAX] = "";
+ glusterd_snap_t *snap = NULL;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
+ GF_ASSERT (err_str);
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get snapname");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ snprintf (err_str, len, "Snap (%s) does not exist", snapname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s", err_str);
+ ret = -1;
+ goto out;
+ }
+
+ /* Set volnames in the dict to get mgmt_v3 lock */
+ list_for_each_entry_safe (snap_vol, tmp, &snap->volumes, vol_list) {
+ volcount++;
+ volname = gf_strdup (snap_vol->parent_volname);
+ if (!volname) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "strdup failed");
+ goto out;
+ }
+
+ snprintf (key, sizeof (key), "volname%"PRId64, volcount);
+ ret = dict_set_dynstr (dict, key, volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "volume name in dictionary");
+ GF_FREE (volname);
+ goto out;
+ }
+ volname = NULL;
+ }
+ ret = dict_set_int64 (dict, "volcount", volcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set volcount");
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_initiate_snap_phases (req, op, dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to initiate snap "
+ "phases");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_snapshot_remove_prevalidate (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ char *snapname = NULL;
+ xlator_t *this = NULL;
+ glusterd_snap_t *snap = NULL;
+
+ this = THIS;
+
+ if (!dict || !op_errstr) {
+ gf_log (this->name, GF_LOG_ERROR, "input parameters NULL");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Getting the snap name "
+ "failed");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ gf_log (this->name, GF_LOG_ERROR, "Snap %s does not exist",
+ snapname);
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_snapshot_status_prevalidate (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int ret = -1;
+ char *snapname = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+ int32_t cmd = -1;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *volname = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+
+ GF_ASSERT (conf);
+ GF_ASSERT (op_errstr);
+ if (!dict) {
+ gf_log (this->name, GF_LOG_ERROR, "Input dict is NULL");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "cmd", &cmd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not fetch status cmd");
+ goto out;
+ }
+
+ switch (cmd) {
+ case GF_SNAP_STATUS_TYPE_ALL:
+ {
+ break;
+ }
+ case GF_SNAP_STATUS_TYPE_SNAP:
+ {
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not fetch snapname");
+ goto out;
+ }
+
+ if (!glusterd_find_snap_by_name (snapname)) {
+ ret = gf_asprintf (op_errstr, "Snap (%s) "
+ "not found", snapname);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Snap (%s) "
+ "not found", snapname);
+ goto out;
+ }
+ break;
+ }
+ case GF_SNAP_STATUS_TYPE_VOL:
+ {
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not fetch volname");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ ret = gf_asprintf (op_errstr, "Volume (%s)"
+ "not found", volname);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Volume "
+ "%s not present", volname);
+ goto out;
+ }
+ break;
+
+ }
+ default:
+ {
+ gf_log (this->name, GF_LOG_ERROR, "Invalid command");
+ break;
+ }
+ }
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd_snapshot_remove_commit (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ char *snapname = NULL;
+ char *dup_snapname = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *snap_volinfo = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (op_errstr);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (!dict || !op_errstr) {
+ gf_log (this->name, GF_LOG_ERROR, "input parameters NULL");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Getting the snap name "
+ "failed");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ gf_log (this->name, GF_LOG_ERROR, "Snap %s does not exist",
+ snapname);
+ ret = -1;
+ goto out;
+ }
+
+ if (is_origin_glusterd (dict) == _gf_true) {
+ /* TODO : As of now there is only volume in snapshot.
+ * Change this when multiple volume snapshot is introduced
+ */
+ snap_volinfo = list_entry (snap->volumes.next,
+ glusterd_volinfo_t,
+ vol_list);
+ if (!snap_volinfo) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch snap_volinfo");
+ ret = -1;
+ goto out;
+ }
+
+ /* From origin glusterd check if *
+ * any peers with snap bricks is down */
+ ret = glusterd_find_missed_snap (rsp_dict, snap_volinfo,
+ &priv->peers,
+ GF_SNAP_OPTION_TYPE_DELETE);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to find missed snap deletes");
+ goto out;
+ }
+ }
+
+ ret = glusterd_snap_remove (rsp_dict, snap, _gf_true, _gf_false);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to remove snap %s",
+ snapname);
+ goto out;
+ }
+
+ dup_snapname = gf_strdup (snapname);
+ if (!dup_snapname) {
+ gf_log (this->name, GF_LOG_ERROR, "Strdup failed");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (rsp_dict, "snapname", dup_snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set the snapname");
+ GF_FREE (dup_snapname);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int32_t
+glusterd_do_snap_cleanup (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ char *name = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_snap_t *snap = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ if (!dict || !op_errstr) {
+ gf_log (this->name, GF_LOG_ERROR, "input parameters NULL");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "getting the snap "
+ "name failed (volume: %s)", volinfo->volname);
+ goto out;
+ }
+
+ /*
+ If the snapname is not found that means the failure happened at
+ staging, or in commit, before the snap object is created, in which
+ case there is nothing to cleanup. So set ret to 0.
+ */
+ snap = glusterd_find_snap_by_name (name);
+ if (!snap) {
+ gf_log (this->name, GF_LOG_INFO, "snap %s is not found", name);
+ ret = 0;
+ goto out;
+ }
+
+ ret = glusterd_snap_remove (rsp_dict, snap, _gf_true, _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "removing the snap %s failed",
+ name);
+ goto out;
+ }
+
+ name = NULL;
+
+ ret = 0;
+
+out:
+
+ return ret;
+}
+
+/* In case of a successful, delete or create operation, during post_validate *
+ * look for missed snap operations and update the missed snap lists */
+int32_t
+glusterd_snapshot_update_snaps_post_validate (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ int32_t missed_snap_count = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (op_errstr);
+
+ ret = dict_get_int32 (dict, "missed_snap_count",
+ &missed_snap_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG, "No missed snaps");
+ ret = 0;
+ goto out;
+ }
+
+ ret = glusterd_add_missed_snaps_to_list (dict, missed_snap_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add missed snaps to list");
+ goto out;
+ }
+
+ ret = glusterd_store_update_missed_snaps ();
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to update missed_snaps_list");
+ goto out;
+ }
+
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_snapshot_create_commit (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int ret = -1;
+ int64_t i = 0;
+ int64_t volcount = 0;
+ char *snapname = NULL;
+ char *volname = NULL;
+ char *tmp_name = NULL;
+ char key[PATH_MAX] = "";
+ xlator_t *this = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_volinfo_t *origin_vol = NULL;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(rsp_dict);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_int64 (dict, "volcount", &volcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "get the volume count");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch snapname");
+ goto out;
+ }
+ tmp_name = gf_strdup (snapname);
+ if (!tmp_name) {
+ gf_log (this->name, GF_LOG_ERROR, "Out of memory");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (rsp_dict, "snapname", tmp_name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set snapname in rsp_dict");
+ GF_FREE (tmp_name);
+ goto out;
+ }
+ tmp_name = NULL;
+
+ snap = glusterd_create_snap_object (dict, rsp_dict);
+ if (!snap) {
+ gf_log (this->name, GF_LOG_ERROR, "creating the"
+ "snap object %s failed", snapname);
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 1; i <= volcount; i++) {
+ snprintf (key, sizeof (key), "volname%"PRId64, i);
+ ret = dict_get_str (dict, key, &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find (volname, &origin_vol);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get the volinfo for "
+ "the volume %s", volname);
+ goto out;
+ }
+
+ /* TODO: Create a stub where the bricks are
+ added parallely by worker threads so that
+ the snap creating happens parallely. */
+ snap_vol = glusterd_do_snap_vol (origin_vol, snap, dict,
+ rsp_dict, i);
+ if (!snap_vol) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_WARNING, "taking the "
+ "snapshot of the volume %s failed", volname);
+ goto out;
+ }
+ }
+
+ snap->snap_status = GD_SNAP_STATUS_IN_USE;
+ ret = glusterd_store_snap (snap);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Could not store snap"
+ "object %s", snap->snapname);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (ret) {
+ if (snap)
+ glusterd_snap_remove (rsp_dict, snap,
+ _gf_true, _gf_true);
+ snap = NULL;
+ }
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+snap_max_hard_limit_set_commit (dict_t *dict, uint64_t value,
+ char *volname, char **op_errstr)
+{
+ char err_str[PATH_MAX] = "";
+ glusterd_conf_t *conf = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (volname);
+ GF_ASSERT (op_errstr);
+
+ conf = this->private;
+
+ GF_ASSERT (conf);
+
+ /* TODO: Initiate auto deletion when there is a limit change */
+ if (!volname) {
+ /* For system limit */
+ conf->snap_max_hard_limit = value;
+
+ ret = glusterd_store_global_info (this);
+ if (ret) {
+ snprintf (err_str, PATH_MAX, "Failed to store "
+ "snap-max-hard-limit for system");
+ goto out;
+ }
+ } else {
+ /* For one volume */
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (err_str, PATH_MAX, "Failed to get the"
+ " volinfo for volume %s", volname);
+ goto out;
+ }
+
+ volinfo->snap_max_hard_limit = value;
+
+ ret = glusterd_store_volinfo (volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ snprintf (err_str, PATH_MAX, "Failed to store "
+ "snap-max-hard-limit for volume %s", volname);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ *op_errstr = gf_strdup (err_str);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ }
+ return ret;
+}
+
+int
+snap_max_limits_display_commit (dict_t *rsp_dict, char *volname,
+ char **op_errstr)
+{
+ char err_str[PATH_MAX] = "";
+ char buf[PATH_MAX] = "";
+ glusterd_conf_t *conf = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = -1;
+ uint64_t active_hard_limit = 0;
+ uint64_t snap_max_limit = 0;
+ uint64_t soft_limit_value = -1;
+ uint64_t count = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (volname);
+ GF_ASSERT (op_errstr);
+
+ conf = this->private;
+
+ GF_ASSERT (conf);
+
+ if (!volname) {
+ /* For system limit */
+ list_for_each_entry (volinfo, &conf->volumes, vol_list) {
+ if (volinfo->is_snap_volume == _gf_true)
+ continue;
+ snap_max_limit = volinfo->snap_max_hard_limit;
+ if (snap_max_limit > conf->snap_max_hard_limit)
+ active_hard_limit = conf->snap_max_hard_limit;
+ else
+ active_hard_limit = snap_max_limit;
+ soft_limit_value = (active_hard_limit *
+ conf->snap_max_soft_limit) / 100;
+
+ snprintf (buf, sizeof(buf), "volume%"PRIu64"-volname", count);
+ ret = dict_set_str (rsp_dict, buf, volinfo->volname);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+
+ snprintf (buf, sizeof(buf),
+ "volume%"PRIu64"-snap-max-hard-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf, snap_max_limit);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+
+ snprintf (buf, sizeof(buf),
+ "volume%"PRIu64"-active-hard-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf,
+ active_hard_limit);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+
+ snprintf (buf, sizeof(buf),
+ "volume%"PRIu64"-snap-max-soft-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf, soft_limit_value);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+ count++;
+ }
+
+ ret = dict_set_uint64 (rsp_dict, "voldisplaycount", count);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set voldisplaycount");
+ goto out;
+ }
+ } else {
+ /* For one volume */
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (err_str, PATH_MAX, "Failed to get the"
+ " volinfo for volume %s", volname);
+ goto out;
+ }
+
+ snap_max_limit = volinfo->snap_max_hard_limit;
+ if (snap_max_limit > conf->snap_max_hard_limit)
+ active_hard_limit = conf->snap_max_hard_limit;
+ else
+ active_hard_limit = snap_max_limit;
+
+ soft_limit_value = (active_hard_limit *
+ conf->snap_max_soft_limit) / 100;
+
+ snprintf (buf, sizeof(buf), "volume%"PRIu64"-volname", count);
+ ret = dict_set_str (rsp_dict, buf, volinfo->volname);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+
+ snprintf (buf, sizeof(buf),
+ "volume%"PRIu64"-snap-max-hard-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf, snap_max_limit);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+
+ snprintf (buf, sizeof(buf),
+ "volume%"PRIu64"-active-hard-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf, active_hard_limit);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+
+ snprintf (buf, sizeof(buf),
+ "volume%"PRIu64"-snap-max-soft-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf, soft_limit_value);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+
+ count++;
+
+ ret = dict_set_uint64 (rsp_dict, "voldisplaycount", count);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set voldisplaycount");
+ goto out;
+ }
+
+ }
+
+ ret = dict_set_uint64 (rsp_dict, "snap-max-hard-limit",
+ conf->snap_max_hard_limit);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set sys-snap-max-hard-limit ");
+ goto out;
+ }
+
+ ret = dict_set_uint64 (rsp_dict, "snap-max-soft-limit",
+ conf->snap_max_soft_limit);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set sys-snap-max-hard-limit ");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ *op_errstr = gf_strdup (err_str);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ }
+ return ret;
+}
+
+int
+glusterd_snapshot_config_commit (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ char *volname = NULL;
+ xlator_t *this = NULL;
+ int ret = -1;
+ char err_str[PATH_MAX] = {0,};
+ glusterd_conf_t *conf = NULL;
+ int config_command = 0;
+ uint64_t hard_limit = 0;
+ uint64_t soft_limit = 0;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+
+ conf = this->private;
+
+ GF_ASSERT (conf);
+
+ ret = dict_get_int32 (dict, "config-command", &config_command);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get config-command type");
+ goto out;
+ }
+
+ /* Ignore the return value of the following dict_get,
+ * as they are optional
+ */
+ ret = dict_get_str (dict, "volname", &volname);
+
+ ret = dict_get_uint64 (dict, "snap-max-hard-limit", &hard_limit);
+
+ ret = dict_get_uint64 (dict, "snap-max-soft-limit", &soft_limit);
+
+ switch (config_command) {
+ case GF_SNAP_CONFIG_TYPE_SET:
+ if (hard_limit) {
+ /* Commit ops for snap-max-hard-limit */
+ ret = snap_max_hard_limit_set_commit (dict, hard_limit,
+ volname,
+ op_errstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "snap-max-hard-limit set "
+ "commit failed.");
+ goto out;
+ }
+ }
+
+ if (soft_limit) {
+ /* For system limit */
+ conf->snap_max_soft_limit = soft_limit;
+
+ ret = glusterd_store_global_info (this);
+ if (ret) {
+ snprintf (err_str, PATH_MAX, "Failed to store "
+ "snap-max-soft-limit for system");
+ *op_errstr = gf_strdup (err_str);
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ err_str);
+ goto out;
+ }
+ }
+ break;
+
+ case GF_SNAP_CONFIG_DISPLAY:
+ /* Reading data from local node only */
+ if (!is_origin_glusterd (dict)) {
+ ret = 0;
+ break;
+ }
+
+ ret = snap_max_limits_display_commit (rsp_dict, volname,
+ op_errstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "snap-max-limit "
+ "display commit failed.");
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_get_brick_lvm_details (dict_t *rsp_dict,