summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cli/src/cli-cmd-parser.c13
-rw-r--r--cli/src/cli-rpc-ops.c5
-rw-r--r--rpc/xdr/src/cli1-xdr.x3
-rw-r--r--tests/bugs/bug-1111041.t36
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c59
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c213
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c67
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h9
12 files changed, 412 insertions, 9 deletions
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index d855dbf8a0a..1a39be8d121 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -2412,6 +2412,8 @@ cli_cmd_volume_status_parse (const char **words, int wordcount,
cmd |= GF_CLI_STATUS_SHD;
} else if (!strcmp (words[3], "quotad")) {
cmd |= GF_CLI_STATUS_QUOTAD;
+ } else if (!strcmp (words[3], "snapd")) {
+ cmd |= GF_CLI_STATUS_SNAPD;
} else {
cmd = GF_CLI_STATUS_BRICK;
ret = dict_set_str (dict, "brick",
@@ -2478,6 +2480,17 @@ cli_cmd_volume_status_parse (const char **words, int wordcount,
goto out;
}
cmd |= GF_CLI_STATUS_QUOTAD;
+ } else if (!strcmp (words[3], "snapd")) {
+ if (cmd == GF_CLI_STATUS_FD ||
+ cmd == GF_CLI_STATUS_CLIENTS ||
+ cmd == GF_CLI_STATUS_DETAIL ||
+ cmd == GF_CLI_STATUS_INODE) {
+ cli_err ("Detail/FD/Clients/Inode status not "
+ "available for snap daemon");
+ ret = -1;
+ goto out;
+ }
+ cmd |= GF_CLI_STATUS_SNAPD;
} else {
if (cmd == GF_CLI_STATUS_TASKS) {
cli_err ("Tasks status not available for "
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index c4846be2379..9d64917e3b7 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -6531,7 +6531,7 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov,
}
if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD) ||
- (cmd & GF_CLI_STATUS_QUOTAD))
+ (cmd & GF_CLI_STATUS_QUOTAD) || (cmd & GF_CLI_STATUS_SNAPD))
notbrick = _gf_true;
if (global_state->mode & GLUSTER_MODE_XML) {
@@ -6647,7 +6647,8 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov,
memset (status.brick, 0, PATH_MAX + 255);
if (!strcmp (hostname, "NFS Server") ||
!strcmp (hostname, "Self-heal Daemon") ||
- !strcmp (hostname, "Quota Daemon"))
+ !strcmp (hostname, "Quota Daemon") ||
+ !strcmp (hostname, "Snapshot Daemon"))
snprintf (status.brick, PATH_MAX + 255, "%s on %s",
hostname, path);
else
diff --git a/rpc/xdr/src/cli1-xdr.x b/rpc/xdr/src/cli1-xdr.x
index f852ab65070..3c43e374d95 100644
--- a/rpc/xdr/src/cli1-xdr.x
+++ b/rpc/xdr/src/cli1-xdr.x
@@ -135,7 +135,8 @@ enum gf_cli_status_type {
GF_CLI_STATUS_BRICK = 0x0400, /*00010000000000*/
GF_CLI_STATUS_NFS = 0x0800, /*00100000000000*/
GF_CLI_STATUS_SHD = 0x1000, /*01000000000000*/
- GF_CLI_STATUS_QUOTAD = 0x2000 /*10000000000000*/
+ GF_CLI_STATUS_QUOTAD = 0x2000, /*10000000000000*/
+ GF_CLI_STATUS_SNAPD = 0x4000 /*100000000000000*/
};
/* Identifiers for snapshot clis */
diff --git a/tests/bugs/bug-1111041.t b/tests/bugs/bug-1111041.t
new file mode 100644
index 00000000000..d6cf2e91df0
--- /dev/null
+++ b/tests/bugs/bug-1111041.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../fileio.rc
+. $(dirname $0)/../nfs.rc
+
+cleanup;
+
+function is_snapd_running {
+ $CLI volume status $1 | grep "Snap Daemon" | wc -l;
+}
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+
+TEST $CLI volume start $V0;
+
+EXPECT "0" is_snapd_running $v0
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT "1" is_snapd_running $V0
+
+SNAPD_PID=$(ps aux | grep snapd | grep -v grep | awk '{print $2}');
+
+TEST [ $SNAPD_PID -gt 0 ];
+
+SNAPD_PID2=$($CLI volume status $V0 | grep "Snap Daemon" | awk {'print $7'});
+
+TEST [ $SNAPD_PID -eq $SNAPD_PID2 ]
+
+cleanup ;
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 9d952cec4d7..ef9888b3537 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -3830,6 +3830,16 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
goto out;
}
+ if ((cmd & GF_CLI_STATUS_SNAPD) &&
+ (conf->op_version < GD_OP_VERSION_3_6_0)) {
+ snprintf (err_str, sizeof (err_str), "The cluster is operating "
+ "at a lesser version than %d. Getting the status of "
+ "snapd is not allowed in this state",
+ GD_OP_VERSION_3_6_0);
+ ret = -1;
+ goto out;
+ }
+
ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index c9776366715..7174a9376de 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -1262,6 +1262,16 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
goto out;
}
+ if ((cmd & GF_CLI_STATUS_SNAPD) &&
+ (priv->op_version < GD_OP_VERSION_3_6_0)) {
+ snprintf (msg, sizeof (msg), "The cluster is operating at "
+ "version less than %d. Getting the "
+ "status of snapd is not allowed in this state.",
+ GD_OP_VERSION_3_6_0);
+ ret = -1;
+ goto out;
+ }
+
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
@@ -1325,6 +1335,13 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
"quota enabled", volname);
goto out;
}
+ } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
+ if (!glusterd_is_snapd_enabled (volinfo)) {
+ ret = -1;
+ snprintf (msg, sizeof (msg), "Volume %s does not have "
+ "uss enabled", volname);
+ goto out;
+ }
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_str (dict, "brick", &brick);
if (ret)
@@ -2661,7 +2678,13 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
goto out;
other_count++;
node_count++;
-
+ } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
+ ret = glusterd_add_node_to_dict ("snapd", rsp_dict, 0,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_str (dict, "brick", &brick);
if (ret)
@@ -2708,6 +2731,16 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
other_index = brick_index + 1;
+ if (glusterd_is_snapd_enabled (volinfo)) {
+ ret = glusterd_add_snapd_to_dict (volinfo,
+ rsp_dict,
+ other_index);
+ if (ret)
+ goto out;
+ other_count++;
+ other_index++;
+ node_count++;
+ }
nfs_disabled = dict_get_str_boolean (vol_opts,
"nfs.disable",
@@ -5717,6 +5750,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
glusterd_pending_node_t *pending_node = NULL;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
+ glusterd_snapd_t *snapd = NULL;
GF_ASSERT (dict);
@@ -5743,6 +5777,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATUS_NFS:
case GF_CLI_STATUS_SHD:
case GF_CLI_STATUS_QUOTAD:
+ case GF_CLI_STATUS_SNAPD:
break;
default:
goto out;
@@ -5843,6 +5878,28 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
list_add_tail (&pending_node->list, selected);
ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
+ if (!glusterd_is_snapd_online (volinfo)) {
+ gf_log (this->name, GF_LOG_ERROR, "snapd is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to allocate "
+ "memory for pending node");
+ ret = -1;
+ goto out;
+ }
+
+ pending_node->node = (void *)(&volinfo->snapd);
+ pending_node->type = GD_NODE_SNAPD;
+ pending_node->index = 0;
+ list_add_tail (&pending_node->list, selected);
+
+ ret = 0;
} else {
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
brick_index++;
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 7c7be9355b8..10d5d7f2752 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -1886,6 +1886,7 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,
if ((pending_node->type == GD_NODE_NFS) ||
(pending_node->type == GD_NODE_QUOTAD) ||
+ (pending_node->type == GD_NODE_SNAPD) ||
((pending_node->type == GD_NODE_SHD) &&
(req_ctx->op == GD_OP_STATUS_VOLUME)))
ret = glusterd_node_op_build_payload
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 839d77b7f8d..0177ed169fc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -131,6 +131,24 @@ glusterd_store_brickinfopath_set (glusterd_volinfo_t *volinfo,
snprintf (brickpath, len, "%s/%s", brickdirpath, brickfname);
}
+static void
+glusterd_store_snapd_path_set (glusterd_volinfo_t *volinfo,
+ char *snapd_path, size_t len)
+{
+ char volpath[PATH_MAX] = {0, };
+ glusterd_conf_t *priv = NULL;
+
+ GF_ASSERT (volinfo);
+ GF_ASSERT (len >= PATH_MAX);
+
+ priv = THIS->private;
+ GF_ASSERT (priv);
+
+ GLUSTERD_GET_VOLUME_DIR (volpath, volinfo, priv);
+
+ snprintf (snapd_path, len, "%s/snapd.info", volpath);
+}
+
gf_boolean_t
glusterd_store_is_valid_brickpath (char *volname, char *brick)
{
@@ -249,6 +267,21 @@ glusterd_store_create_brick_shandle_on_absence (glusterd_volinfo_t *volinfo,
return ret;
}
+int32_t
+glusterd_store_create_snapd_shandle_on_absence (glusterd_volinfo_t *volinfo)
+{
+ char snapd_path[PATH_MAX] = {0,};
+ int32_t ret = 0;
+
+ GF_ASSERT (volinfo);
+
+ glusterd_store_snapd_path_set (volinfo, snapd_path,
+ sizeof (snapd_path));
+ ret = gf_store_handle_create_on_absence (&volinfo->snapd.handle,
+ snapd_path);
+ return ret;
+}
+
/* Store the bricks snapshot details only if required
*
* The snapshot details will be stored only if the cluster op-version is
@@ -353,6 +386,30 @@ out:
}
int32_t
+glusterd_store_snapd_write (int fd, glusterd_volinfo_t *volinfo)
+{
+ char value[256] = {0,};
+ int32_t ret = 0;
+ xlator_t *this = NULL;
+
+ GF_ASSERT (volinfo);
+ GF_ASSERT (fd > 0);
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ snprintf (value, sizeof(value), "%d", volinfo->snapd.port);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAPD_PORT, value);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR, "failed to store the snapd "
+ "port of volume %s", volinfo->volname);
+
+
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
glusterd_store_perform_brick_store (glusterd_brickinfo_t *brickinfo)
{
int fd = -1;
@@ -377,6 +434,42 @@ out:
}
int32_t
+glusterd_store_perform_snapd_store (glusterd_volinfo_t *volinfo)
+{
+ int fd = -1;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ GF_ASSERT (volinfo);
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ fd = gf_store_mkstemp (volinfo->snapd.handle);
+ if (fd <= 0) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to create the "
+ "temporary file for the snapd store handle of volume "
+ "%s", volinfo->volname);
+ goto out;
+ }
+
+ ret = glusterd_store_snapd_write (fd, volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to write snapd port "
+ "info to store handle (volume: %s", volinfo->volname);
+ goto out;
+ }
+
+ ret = gf_store_rename_tmppath (volinfo->snapd.handle);
+
+out:
+ if (ret && (fd > 0))
+ gf_store_unlink_tmppath (volinfo->snapd.handle);
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
glusterd_store_brickinfo (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo, int32_t brick_count,
int vol_fd)
@@ -407,6 +500,37 @@ out:
}
int32_t
+glusterd_store_snapd_info (glusterd_volinfo_t *volinfo)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ GF_ASSERT (volinfo);
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = glusterd_store_create_snapd_shandle_on_absence (volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to create store "
+ "handle for snapd (volume: %s)", volinfo->volname);
+ goto out;
+ }
+
+ ret = glusterd_store_perform_snapd_store (volinfo);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR, "failed to store snapd info "
+ "of the volume %s", volinfo->volname);
+
+out:
+ if (ret)
+ gf_store_unlink_tmppath (volinfo->snapd.handle);
+
+ gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret);
+ return ret;
+}
+
+int32_t
glusterd_store_delete_brick (glusterd_brickinfo_t *brickinfo, char *delete_path)
{
int32_t ret = -1;
@@ -654,6 +778,11 @@ glusterd_volume_write_snap_details (int fd, glusterd_volinfo_t *volinfo)
goto out;
}
+ ret = glusterd_store_snapd_info (volinfo);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR, "snapd info store failed "
+ "volume: %s", volinfo->volname);
+
out:
if (ret)
gf_log (this->name, GF_LOG_ERROR, "Failed to write snap details"
@@ -1329,6 +1458,8 @@ glusterd_store_volume_cleanup_tmp (glusterd_volinfo_t *volinfo)
gf_store_unlink_tmppath (volinfo->rb_shandle);
gf_store_unlink_tmppath (volinfo->node_state_shandle);
+
+ gf_store_unlink_tmppath (volinfo->snapd.handle);
}
int32_t
@@ -1948,6 +2079,81 @@ out:
return ret;
}
+int
+glusterd_store_retrieve_snapd (glusterd_volinfo_t *volinfo)
+{
+ int ret = -1;
+ int exists = 0;
+ char *key = NULL;
+ char *value = NULL;
+ char volpath[PATH_MAX] = {0,};
+ char path[PATH_MAX] = {0,};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gf_store_iter_t *iter = NULL;
+ gf_store_op_errno_t op_errno = GD_STORE_SUCCESS;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = THIS->private;
+ GF_ASSERT (volinfo);
+
+ if (conf->op_version < GD_OP_VERSION_3_6_0) {
+ ret = 0;
+ goto out;
+ }
+
+ GLUSTERD_GET_VOLUME_DIR(volpath, volinfo, conf);
+
+ snprintf (path, sizeof (path), "%s/%s", volpath,
+ GLUSTERD_VOLUME_SNAPD_INFO_FILE);
+
+ ret = gf_store_handle_retrieve (path, &volinfo->snapd.handle);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "volinfo handle is NULL");
+ goto out;
+ }
+
+ ret = gf_store_iter_new (volinfo->snapd.handle, &iter);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get new store "
+ "iter");
+ goto out;
+ }
+
+ ret = gf_store_iter_get_next (iter, &key, &value, &op_errno);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get next store "
+ "iter");
+ goto out;
+ }
+
+ while (!ret) {
+ if (!strncmp (key, GLUSTERD_STORE_KEY_SNAPD_PORT,
+ strlen (GLUSTERD_STORE_KEY_SNAPD_PORT))) {
+ volinfo->snapd.port = atoi (value);
+ }
+
+ ret = gf_store_iter_get_next (iter, &key, &value,
+ &op_errno);
+ }
+
+ if (op_errno != GD_STORE_EOF)
+ goto out;
+
+ ret = gf_store_iter_destroy (iter);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to destroy store "
+ "iter");
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
int32_t
glusterd_store_retrieve_bricks (glusterd_volinfo_t *volinfo)
{
@@ -2452,7 +2658,8 @@ glusterd_store_update_volinfo (glusterd_volinfo_t *volinfo)
"failed to parse restored snap's uuid");
} else if (!strncmp (key, GLUSTERD_STORE_KEY_PARENT_VOLNAME,
strlen (GLUSTERD_STORE_KEY_PARENT_VOLNAME))) {
- strncpy (volinfo->parent_volname, value, sizeof(volinfo->parent_volname) - 1);
+ strncpy (volinfo->parent_volname, value,
+ sizeof(volinfo->parent_volname) - 1);
} else {
if (is_key_glusterd_hooks_friendly (key)) {
@@ -2598,6 +2805,10 @@ glusterd_store_retrieve_volume (char *volname, glusterd_snap_t *snap)
if (ret)
goto out;
+ ret = glusterd_store_retrieve_snapd (volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_compute_cksum (volinfo, _gf_false);
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h
index ba3662bc7b2..4c0f0d42321 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.h
+++ b/xlators/mgmt/glusterd/src/glusterd-store.h
@@ -71,6 +71,7 @@ typedef enum glusterd_store_ver_ac_{
#define GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT "snap-max-hard-limit"
#define GLUSTERD_STORE_KEY_SNAP_AUTO_DELETE "auto-delete"
#define GLUSTERD_STORE_KEY_SNAP_MAX_SOFT_LIMIT "snap-max-soft-limit"
+#define GLUSTERD_STORE_KEY_SNAPD_PORT "snapd-port"
#define GLUSTERD_STORE_KEY_BRICK_HOSTNAME "hostname"
#define GLUSTERD_STORE_KEY_BRICK_PATH "path"
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 6ff444862a4..04a2e62ee65 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -5781,6 +5781,7 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)
glusterd_volinfo_t *volinfo = NULL;
nodesrv_t *nfs = NULL;
nodesrv_t *quotad = NULL;
+ glusterd_snapd_t *snapd = NULL;
GF_VALIDATE_OR_GOTO (THIS->name, pending_node, out);
GF_VALIDATE_OR_GOTO (THIS->name, pending_node->node, out);
@@ -5805,7 +5806,9 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)
} else if (pending_node->type == GD_NODE_QUOTAD) {
quotad = pending_node->node;
rpc = quotad->rpc;
-
+ } else if (pending_node->type == GD_NODE_SNAPD) {
+ snapd = pending_node->node;
+ rpc = quotad->rpc;
} else {
GF_ASSERT (0);
}
@@ -7495,6 +7498,67 @@ out:
}
int32_t
+glusterd_add_snapd_to_dict (glusterd_volinfo_t *volinfo,
+ dict_t *dict, int32_t count)
+{
+
+ int ret = -1;
+ int32_t pid = -1;
+ int32_t brick_online = -1;
+ char key[1024] = {0};
+ char base_key[1024] = {0};
+ char pidfile[PATH_MAX] = {0};
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+
+ GF_ASSERT (volinfo);
+ GF_ASSERT (dict);
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+
+ snprintf (base_key, sizeof (base_key), "brick%d", count);
+ snprintf (key, sizeof (key), "%s.hostname", base_key);
+ ret = dict_set_str (dict, key, "Snap Daemon");
+ if (ret)
+ goto out;
+
+ snprintf (key, sizeof (key), "%s.path", base_key);
+ ret = dict_set_dynstr (dict, key, gf_strdup (uuid_utoa (MY_UUID)));
+ if (ret)
+ goto out;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "%s.port", base_key);
+ ret = dict_set_int32 (dict, key, volinfo->snapd.port);
+ if (ret)
+ goto out;
+
+ glusterd_get_snapd_pidfile (volinfo, pidfile, sizeof (pidfile));
+
+ brick_online = gf_is_service_running (pidfile, &pid);
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "%s.pid", base_key);
+ ret = dict_set_int32 (dict, key, pid);
+ if (ret)
+ goto out;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "%s.status", base_key);
+ ret = dict_set_int32 (dict, key, brick_online);
+
+out:
+ if (ret)
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int32_t
glusterd_get_all_volnames (dict_t *dict)
{
int ret = -1;
@@ -13497,6 +13561,7 @@ glusterd_handle_snapd_option (glusterd_volinfo_t *volinfo)
volinfo->volname);
goto out;
}
+ volinfo->snapd.port = 0;
}
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 320bc20cdd2..3edb0c55db4 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -442,6 +442,10 @@ glusterd_add_brick_to_dict (glusterd_volinfo_t *volinfo,
dict_t *dict, int32_t count);
int32_t
+glusterd_add_snapd_to_dict (glusterd_volinfo_t *volinfo,
+ dict_t *dict, int32_t count);
+
+int32_t
glusterd_get_all_volnames (dict_t *dict);
gf_boolean_t
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index b8e8933258a..a8ecb505a5b 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -123,9 +123,10 @@ typedef struct {
} nodesrv_t;
typedef struct {
- struct rpc_clnt *rpc;
- int port;
- gf_boolean_t online;
+ struct rpc_clnt *rpc;
+ int port;
+ gf_boolean_t online;
+ gf_store_handle_t *handle;
} glusterd_snapd_t;
typedef struct {
@@ -426,6 +427,7 @@ typedef enum gd_node_type_ {
GD_NODE_REBALANCE,
GD_NODE_NFS,
GD_NODE_QUOTAD,
+ GD_NODE_SNAPD,
} gd_node_type;
typedef enum missed_snap_stat {
@@ -466,6 +468,7 @@ enum glusterd_vol_comp_status_ {
#define GLUSTERD_VOLUME_DIR_PREFIX "vols"
#define GLUSTERD_PEER_DIR_PREFIX "peers"
#define GLUSTERD_VOLUME_INFO_FILE "info"
+#define GLUSTERD_VOLUME_SNAPD_INFO_FILE "snapd.info"
#define GLUSTERD_SNAP_INFO_FILE "info"
#define GLUSTERD_VOLUME_RBSTATE_FILE "rbstate"
#define GLUSTERD_BRICK_INFO_DIR "bricks"