summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKaushal M <kaushal@redhat.com>2012-03-27 15:52:47 +0530
committerVijay Bellur <vijay@gluster.com>2012-03-29 07:47:53 -0700
commit2dea3b34755d9af1d1c1ffe517c6a087cf44512a (patch)
tree997887495dc88ae47210fd68f32ae3bba7a4231d
parent735714edca08e2de16c0e447f8c3256913186ce6 (diff)
cli,glusterd: more volume status improvements
The major changes are, * "volume status" now supports getting details of the self-heal daemon processes for replica volumes. A new cli options "shd", similar to "nfs", has been introduced for this. "detail", "fd" and "clients" status ops are not supported for self-heal daemons. * The default/normal ouput of "volume status" has been enhanced to contain information about nfs-server and self-heal daemon processes as well. Some tweaks have been done to the cli output to show appropriate output. Also, changes have been done to rebalance/remove-brick status, so that hostnames are displayed instead of uuids. Change-Id: I3972396dcf72d45e14837fa5f9c7d62410901df8 BUG: 803676 Signed-off-by: Kaushal M <kaushal@redhat.com> Reviewed-on: http://review.gluster.com/3016 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Krishnan Parthasarathi <kp@gluster.com> Reviewed-by: Vijay Bellur <vijay@gluster.com>
-rw-r--r--cli/src/cli-cmd-parser.c18
-rw-r--r--cli/src/cli-cmd-volume.c13
-rw-r--r--cli/src/cli-rpc-ops.c190
-rw-r--r--cli/src/cli-xml-output.c42
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c78
-rw-r--r--rpc/rpc-lib/src/protocol-common.h4
-rw-r--r--rpc/xdr/src/cli1-xdr.h25
-rw-r--r--rpc/xdr/src/cli1-xdr.x25
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c459
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.h5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c60
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c12
12 files changed, 648 insertions, 283 deletions
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index 4c86ab488c5..ba9ca63d849 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -1905,6 +1905,8 @@ cli_cmd_volume_status_parse (const char **words, int wordcount,
if (cmd == GF_CLI_STATUS_NONE) {
if (!strcmp (words[3], "nfs")) {
cmd |= GF_CLI_STATUS_NFS;
+ } else if (!strcmp (words[3], "shd")) {
+ cmd |= GF_CLI_STATUS_SHD;
} else {
cmd = GF_CLI_STATUS_BRICK;
ret = dict_set_str (dict, "brick",
@@ -1940,12 +1942,24 @@ cli_cmd_volume_status_parse (const char **words, int wordcount,
goto out;
if (!strcmp (words[3], "nfs")) {
- if (cmd == GF_CLI_STATUS_FD) {
- cli_out ("FD status not available for NFS");
+ if (cmd == GF_CLI_STATUS_FD ||
+ cmd == GF_CLI_STATUS_DETAIL) {
+ cli_out ("Detail/FD status not available"
+ " for NFS Servers");
ret = -1;
goto out;
}
cmd |= GF_CLI_STATUS_NFS;
+ } else if (!strcmp (words[3], "shd")){
+ if (cmd == GF_CLI_STATUS_FD ||
+ cmd == GF_CLI_STATUS_CLIENTS ||
+ cmd == GF_CLI_STATUS_DETAIL) {
+ cli_out ("Detail/FD/Clients status not "
+ "available for Self-heal Daemons");
+ ret = -1;
+ goto out;
+ }
+ cmd |= GF_CLI_STATUS_SHD;
} else {
cmd |= GF_CLI_STATUS_BRICK;
ret = dict_set_str (dict, "brick", (char *)words[3]);
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
index aca5b1b3210..e045964a7b9 100644
--- a/cli/src/cli-cmd-volume.c
+++ b/cli/src/cli-cmd-volume.c
@@ -1559,9 +1559,14 @@ cli_print_brick_status (cli_volume_status_t *status)
printf ("%s", p);
while (num_tabs-- != 0)
printf ("\t");
- cli_out ("%d\t%c\t%s",
- status->port, status->online?'Y':'N',
- status->pid_str);
+ if (status->port)
+ cli_out ("%d\t%c\t%s",
+ status->port, status->online?'Y':'N',
+ status->pid_str);
+ else
+ cli_out ("%s\t%c\t%s",
+ "N/A", status->online?'Y':'N',
+ status->pid_str);
bricklen = 0;
}
}
@@ -1838,7 +1843,7 @@ struct cli_cmd volume_cmds[] = {
cli_cmd_volume_top_cbk,
"volume top operations"},
- { "volume status [all | <VOLNAME> [nfs|<BRICK>]]"
+ { "volume status [all | <VOLNAME> [nfs|shd|<BRICK>]]"
" [detail|clients|mem|inode|fd|callpool]",
cli_cmd_volume_status_cbk,
"display status of all or specified volume(s)/brick"},
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index d75d095b555..6deda9c0007 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -4308,7 +4308,7 @@ out:
}
void
-cli_print_volume_status_mem (dict_t *dict, gf_boolean_t nfs)
+cli_print_volume_status_mem (dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
@@ -4316,7 +4316,9 @@ cli_print_volume_status_mem (dict_t *dict, gf_boolean_t nfs)
char *path = NULL;
int online = -1;
char key[1024] = {0,};
- int brick_count = 0;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
int val = 0;
int i = 0;
@@ -4327,24 +4329,29 @@ cli_print_volume_status_mem (dict_t *dict, gf_boolean_t nfs)
goto out;
cli_out ("Memory status for volume : %s", volname);
- ret = dict_get_int32 (dict, "count", &brick_count);
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
+ if (ret)
+ goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
if (ret)
goto out;
- for (i = 0; i < brick_count; i++) {
+ index_max = brick_index_max + other_count;
+
+ for (i = 0; i <= index_max; i++) {
cli_out ("----------------------------------------------");
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.hostname", i);
ret = dict_get_str (dict, key, &hostname);
if (ret)
- goto out;
+ continue;
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.path", i);
ret = dict_get_str (dict, key, &path);
if (ret)
- goto out;
- if (nfs)
+ continue;
+ if (notbrick)
cli_out ("%s : %s", hostname, path);
else
cli_out ("Brick : %s:%s", hostname, path);
@@ -4355,7 +4362,10 @@ cli_print_volume_status_mem (dict_t *dict, gf_boolean_t nfs)
if (ret)
goto out;
if (!online) {
- cli_out ("Brick is offline");
+ if (notbrick)
+ cli_out ("%s is offline", hostname);
+ else
+ cli_out ("Brick is offline");
continue;
}
@@ -4442,11 +4452,13 @@ out:
}
void
-cli_print_volume_status_clients (dict_t *dict, gf_boolean_t nfs)
+cli_print_volume_status_clients (dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
- int brick_count = 0;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
char *hostname = NULL;
char *path = NULL;
int online = -1;
@@ -4465,11 +4477,16 @@ cli_print_volume_status_clients (dict_t *dict, gf_boolean_t nfs)
goto out;
cli_out ("Client connections for volume %s", volname);
- ret = dict_get_int32 (dict, "count", &brick_count);
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
+ if (ret)
+ goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
if (ret)
goto out;
- for ( i = 0; i < brick_count; i++) {
+ index_max = brick_index_max + other_count;
+
+ for (i = 0; i <= index_max; i++) {
cli_out ("----------------------------------------------");
memset (key, 0, sizeof (key));
@@ -4483,7 +4500,7 @@ cli_print_volume_status_clients (dict_t *dict, gf_boolean_t nfs)
if (ret)
goto out;
- if (nfs)
+ if (notbrick)
cli_out ("%s : %s", hostname, path);
else
cli_out ("Brick : %s:%s", hostname, path);
@@ -4494,7 +4511,10 @@ cli_print_volume_status_clients (dict_t *dict, gf_boolean_t nfs)
if (ret)
goto out;
if (!online) {
- cli_out ("Brick is offline");
+ if (notbrick)
+ cli_out ("%s is offline", hostname);
+ else
+ cli_out ("Brick is offline");
continue;
}
@@ -4690,11 +4710,13 @@ out:
}
void
-cli_print_volume_status_inode (dict_t *dict, gf_boolean_t nfs)
+cli_print_volume_status_inode (dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
- int brick_count = 0;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
char *hostname = NULL;
char *path = NULL;
int online = -1;
@@ -4710,11 +4732,16 @@ cli_print_volume_status_inode (dict_t *dict, gf_boolean_t nfs)
goto out;
cli_out ("Inode tables for volume %s", volname);
- ret = dict_get_int32 (dict, "count", &brick_count);
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
+ if (ret)
+ goto out;
+
+ index_max = brick_index_max + other_count;
- for (i = 0; i < brick_count; i++) {
+ for ( i = 0; i <= index_max; i++) {
cli_out ("----------------------------------------------");
memset (key, 0, sizeof (key));
@@ -4727,7 +4754,7 @@ cli_print_volume_status_inode (dict_t *dict, gf_boolean_t nfs)
ret = dict_get_str (dict, key, &path);
if (ret)
goto out;
- if (nfs)
+ if (notbrick)
cli_out ("%s : %s", hostname, path);
else
cli_out ("Brick : %s:%s", hostname, path);
@@ -4738,7 +4765,10 @@ cli_print_volume_status_inode (dict_t *dict, gf_boolean_t nfs)
if (ret)
goto out;
if (!online) {
- cli_out ("Brick is offline");
+ if (notbrick)
+ cli_out ("%s is offline", hostname);
+ else
+ cli_out ("Brick is offline");
continue;
}
@@ -4846,11 +4876,13 @@ out:
}
void
-cli_print_volume_status_fd (dict_t *dict, gf_boolean_t nfs)
+cli_print_volume_status_fd (dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
- int brick_count = 0;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
char *hostname = NULL;
char *path = NULL;
int online = -1;
@@ -4866,11 +4898,16 @@ cli_print_volume_status_fd (dict_t *dict, gf_boolean_t nfs)
goto out;
cli_out ("FD tables for volume %s", volname);
- ret = dict_get_int32 (dict, "count", &brick_count);
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
+ if (ret)
+ goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
if (ret)
goto out;
- for (i = 0; i < brick_count; i++) {
+ index_max = brick_index_max + other_count;
+
+ for (i = 0; i <= index_max; i++) {
cli_out ("----------------------------------------------");
memset (key, 0, sizeof (key));
@@ -4884,7 +4921,7 @@ cli_print_volume_status_fd (dict_t *dict, gf_boolean_t nfs)
if (ret)
goto out;
- if (nfs)
+ if (notbrick)
cli_out ("%s : %s", hostname, path);
else
cli_out ("Brick : %s:%s", hostname, path);
@@ -4895,7 +4932,10 @@ cli_print_volume_status_fd (dict_t *dict, gf_boolean_t nfs)
if (ret)
goto out;
if (!online) {
- cli_out ("Brick is offline");
+ if (notbrick)
+ cli_out ("%s is offline", hostname);
+ else
+ cli_out ("Brick is offline");
continue;
}
@@ -5064,11 +5104,13 @@ cli_print_volume_status_call_stack (dict_t *dict, char *prefix)
}
void
-cli_print_volume_status_callpool (dict_t *dict, gf_boolean_t nfs)
+cli_print_volume_status_callpool (dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
- int brick_count = 0;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
char *hostname = NULL;
char *path = NULL;
int online = -1;
@@ -5084,11 +5126,16 @@ cli_print_volume_status_callpool (dict_t *dict, gf_boolean_t nfs)
goto out;
cli_out ("Pending calls for volume %s", volname);
- ret = dict_get_int32 (dict, "count", &brick_count);
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
+ if (ret)
+ goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
if (ret)
goto out;
- for (i = 0; i < brick_count; i++) {
+ index_max = brick_index_max + other_count;
+
+ for (i = 0; i <= index_max; i++) {
cli_out ("----------------------------------------------");
memset (key, 0, sizeof (key));
@@ -5102,7 +5149,7 @@ cli_print_volume_status_callpool (dict_t *dict, gf_boolean_t nfs)
if (ret)
goto out;
- if (nfs)
+ if (notbrick)
cli_out ("%s : %s", hostname, path);
else
cli_out ("Brick : %s:%s", hostname, path);
@@ -5113,7 +5160,10 @@ cli_print_volume_status_callpool (dict_t *dict, gf_boolean_t nfs)
if (ret)
goto out;
if (!online) {
- cli_out ("Brick is offline");
+ if (notbrick)
+ cli_out ("%s is offline", hostname);
+ else
+ cli_out ("Brick is offline");
continue;
}
@@ -5146,18 +5196,21 @@ static int
gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
- int ret = -1;
- int i = 0;
- int pid = -1;
- uint32_t cmd = 0;
- gf_boolean_t nfs = _gf_false;
- char key[1024] = {0,};
- char *hostname = NULL;
- char *path = NULL;
- char *volname = NULL;
- dict_t *dict = NULL;
- gf_cli_rsp rsp = {0,};
- cli_volume_status_t status = {0};
+ int ret = -1;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
+ int i = 0;
+ int pid = -1;
+ uint32_t cmd = 0;
+ gf_boolean_t notbrick = _gf_false;
+ char key[1024] = {0,};
+ char *hostname = NULL;
+ char *path = NULL;
+ char *volname = NULL;
+ dict_t *dict = NULL;
+ gf_cli_rsp rsp = {0,};
+ cli_volume_status_t status = {0};
if (req->rpc_status == -1)
goto out;
@@ -5195,8 +5248,8 @@ gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
- if (cmd & GF_CLI_STATUS_NFS)
- nfs = _gf_true;
+ if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD))
+ notbrick = _gf_true;
ret = dict_get_int32 (dict, "count", &count);
if (ret)
@@ -5206,6 +5259,15 @@ gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
+ if (ret)
+ goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
+ if (ret)
+ goto out;
+
+ index_max = brick_index_max + other_count;
+
#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_vol_status (dict, rsp.op_ret,
@@ -5222,23 +5284,23 @@ gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov,
switch (cmd & GF_CLI_STATUS_MASK) {
case GF_CLI_STATUS_MEM:
- cli_print_volume_status_mem (dict, nfs);
+ cli_print_volume_status_mem (dict, notbrick);
goto cont;
break;
case GF_CLI_STATUS_CLIENTS:
- cli_print_volume_status_clients (dict, nfs);
+ cli_print_volume_status_clients (dict, notbrick);
goto cont;
break;
case GF_CLI_STATUS_INODE:
- cli_print_volume_status_inode (dict, nfs);
+ cli_print_volume_status_inode (dict, notbrick);
goto cont;
break;
case GF_CLI_STATUS_FD:
- cli_print_volume_status_fd (dict, nfs);
+ cli_print_volume_status_fd (dict, notbrick);
goto cont;
break;
case GF_CLI_STATUS_CALLPOOL:
- cli_print_volume_status_callpool (dict, nfs);
+ cli_print_volume_status_callpool (dict, notbrick);
goto cont;
break;
default:
@@ -5252,46 +5314,54 @@ gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov,
cli_out ("\nStatus of volume: %s", volname);
if ((cmd & GF_CLI_STATUS_DETAIL) == 0) {
- cli_out ("Brick\t\t\t\t\t\t\tPort\tOnline\tPid");
+ cli_out ("Gluster process\t\t\t\t\t\tPort\tOnline\tPid");
cli_print_line (CLI_BRICK_STATUS_LINE_LEN);
}
- for (i = 0; i < count; i++) {
+ for (i = 0; i <= index_max; i++) {
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.hostname", i);
ret = dict_get_str (dict, key, &hostname);
if (ret)
- goto out;
+ continue;
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.path", i);
ret = dict_get_str (dict, key, &path);
if (ret)
- goto out;
+ continue;
+ /* Brick/not-brick is handled seperately here as all
+ * types of nodes are contained in the default ouput
+ */
memset (status.brick, 0, PATH_MAX + 255);
- snprintf (status.brick, PATH_MAX + 255, "%s:%s",
- hostname, path);
+ if (!strcmp (hostname, "NFS Server") ||
+ !strcmp (hostname, "Self-heal Daemon"))
+ snprintf (status.brick, PATH_MAX + 255, "%s on %s",
+ hostname, path);
+ else
+ snprintf (status.brick, PATH_MAX + 255, "Brick %s:%s",
+ hostname, path);
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.port", i);
ret = dict_get_int32 (dict, key, &(status.port));
if (ret)
- goto out;
+ continue;
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.status", i);
ret = dict_get_int32 (dict, key, &(status.online));
if (ret)
- goto out;
+ continue;
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.pid", i);
ret = dict_get_int32 (dict, key, &pid);
if (ret)
- goto out;
+ continue;
if (pid == -1)
ret = gf_asprintf (&(status.pid_str), "%s", "N/A");
else
diff --git a/cli/src/cli-xml-output.c b/cli/src/cli-xml-output.c
index 7f8f9cfc45e..702a7f7feb1 100644
--- a/cli/src/cli-xml-output.c
+++ b/cli/src/cli-xml-output.c
@@ -189,7 +189,8 @@ out:
int
cli_xml_output_vol_status_common (xmlTextWriterPtr writer, dict_t *dict,
- int brick_index, int *online)
+ int brick_index, int *online,
+ gf_boolean_t *node_present)
{
int ret = -1;
char *hostname = NULL;
@@ -201,8 +202,12 @@ cli_xml_output_vol_status_common (xmlTextWriterPtr writer, dict_t *dict,
snprintf (key, sizeof (key), "brick%d.hostname", brick_index);
ret = dict_get_str (dict, key, &hostname);
- if (ret)
+ if (ret) {
+ *node_present = _gf_false;
goto out;
+ }
+ *node_present = _gf_true;
+
ret = xmlTextWriterWriteFormatElement (writer, (xmlChar *)"hostname",
"%s", hostname);
XML_RET_CHECK_AND_GOTO (ret, out);
@@ -1282,8 +1287,12 @@ cli_xml_output_vol_status (dict_t *dict, int op_ret, int op_errno,
xmlBufferPtr buf = NULL;
char *volname = NULL;
int brick_count = 0;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
uint32_t cmd = GF_CLI_STATUS_NONE;
int online = 0;
+ gf_boolean_t node_present = _gf_true;
int i;
ret = cli_begin_xml_output (&writer, &buf);
@@ -1308,7 +1317,7 @@ cli_xml_output_vol_status (dict_t *dict, int op_ret, int op_errno,
ret = dict_get_int32 (dict, "count", &brick_count);
if (ret)
goto out;
- ret = xmlTextWriterWriteFormatElement (writer, (xmlChar *)"brickCount",
+ ret = xmlTextWriterWriteFormatElement (writer, (xmlChar *)"nodeCount",
"%d", brick_count);
if (ret)
goto out;
@@ -1317,15 +1326,28 @@ cli_xml_output_vol_status (dict_t *dict, int op_ret, int op_errno,
if (ret)
goto out;
- for (i = 0; i < brick_count; i++) {
- /* <brick> */
- ret = xmlTextWriterStartElement (writer, (xmlChar *)"brick");
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
+ if (ret)
+ goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
+ if (ret)
+ goto out;
+
+ index_max = brick_index_max + other_count;
+
+ for (i = 0; i <= index_max; i++) {
+ /* <node> */
+ ret = xmlTextWriterStartElement (writer, (xmlChar *)"node");
XML_RET_CHECK_AND_GOTO (ret, out);
ret = cli_xml_output_vol_status_common (writer, dict, i,
- &online);
- if (ret)
- goto out;
+ &online, &node_present);
+ if (ret) {
+ if (node_present)
+ goto out;
+ else
+ continue;
+ }
switch (cmd & GF_CLI_STATUS_MASK) {
case GF_CLI_STATUS_DETAIL:
@@ -1384,7 +1406,7 @@ cli_xml_output_vol_status (dict_t *dict, int op_ret, int op_errno,
break;
}
- /* </brick> */
+ /* </node> */
ret = xmlTextWriterEndElement (writer);
XML_RET_CHECK_AND_GOTO (ret, out);
}
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 3b419262ac1..058236427b2 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -962,33 +962,35 @@ glusterfs_command_done (int ret, call_frame_t *sync_frame, void *data)
}
int
-glusterfs_handle_nfs_status (rpcsvc_request_t *req)
+glusterfs_handle_node_status (rpcsvc_request_t *req)
{
int ret = -1;
- gd1_mgmt_brick_op_req nfs_req = {0,};
+ gd1_mgmt_brick_op_req node_req = {0,};
gd1_mgmt_brick_op_rsp rsp = {0,};
glusterfs_ctx_t *ctx = NULL;
glusterfs_graph_t *active = NULL;
xlator_t *any = NULL;
- xlator_t *nfs = NULL;
+ xlator_t *node = NULL;
xlator_t *subvol = NULL;
dict_t *dict = NULL;
dict_t *output = NULL;
char *volname = NULL;
+ char *node_name = NULL;
+ char *subvol_name = NULL;
uint32_t cmd = 0;
char *msg = NULL;
GF_ASSERT (req);
- if (!xdr_to_generic (req->msg[0], &nfs_req,
+ if (!xdr_to_generic (req->msg[0], &node_req,
(xdrproc_t)xdr_gd1_mgmt_brick_op_req)) {
req->rpc_err = GARBAGE_ARGS;
goto out;
}
dict = dict_new ();
- ret = dict_unserialize (nfs_req.input.input_val,
- nfs_req.input.input_len, &dict);
+ ret = dict_unserialize (node_req.input.input_val,
+ node_req.input.input_len, &dict);
if (ret < 0) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to unserialize "
"req buffer to dictionary");
@@ -1012,19 +1014,47 @@ glusterfs_handle_nfs_status (rpcsvc_request_t *req)
active = ctx->active;
any = active->first;
- nfs = xlator_search_by_name (any, "nfs-server");
- if (!nfs) {
+ if ((cmd & GF_CLI_STATUS_NFS) != 0)
+ ret = gf_asprintf (&node_name, "%s", "nfs-server");
+ else if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ ret = gf_asprintf (&node_name, "%s", "glustershd");
+ else {
ret = -1;
- gf_log (THIS->name, GF_LOG_ERROR, "nfs-server xlator is not"
- " loaded");
+ goto out;
+ }
+ if (ret == -1) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to set node xlator name");
goto out;
}
- subvol = xlator_search_by_name (nfs, volname);
+ node = xlator_search_by_name (any, node_name);
+ if (!node) {
+ ret = -1;
+ gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
+ node_name);
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_NFS) != 0)
+ ret = gf_asprintf (&subvol_name, "%s", volname);
+ else if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ ret = gf_asprintf (&subvol_name, "%s-replicate-0", volname);
+ else {
+ ret = -1;
+ goto out;
+ }
+ if (ret == -1) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to set node xlator name");
+ goto out;
+ }
+
+ subvol = xlator_search_by_name (node, subvol_name);
if (!subvol) {
ret = -1;
gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
- volname);
+ subvol_name);
goto out;
}
@@ -1037,13 +1067,17 @@ glusterfs_handle_nfs_status (rpcsvc_request_t *req)
break;
case GF_CLI_STATUS_CLIENTS:
+ // clients not availbale for SHD
+ if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ break;
+
ret = dict_set_str (output, "volname", volname);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR,
"Error setting volname to dict");
goto out;
}
- ret = nfs->dumpops->priv_to_dict (nfs, output);
+ ret = node->dumpops->priv_to_dict (node, output);
break;
case GF_CLI_STATUS_INODE:
@@ -1090,12 +1124,16 @@ glusterfs_handle_nfs_status (rpcsvc_request_t *req)
out:
if (dict)
dict_unref (dict);
- if (nfs_req.input.input_val)
- free (nfs_req.input.input_val);
+ if (node_req.input.input_val)
+ free (node_req.input.input_val);
if (msg)
GF_FREE (msg);
if (rsp.output.output_val)
GF_FREE (rsp.output.output_val);
+ if (node_name)
+ GF_FREE (node_name);
+ if (subvol_name)
+ GF_FREE (subvol_name);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
@@ -1224,11 +1262,11 @@ glusterfs_handle_rpc_msg (rpcsvc_request_t *req)
case GLUSTERD_BRICK_XLATOR_DEFRAG:
ret = glusterfs_handle_defrag (req);
break;
- case GLUSTERD_NFS_PROFILE:
+ case GLUSTERD_NODE_PROFILE:
ret = glusterfs_handle_nfs_profile (req);
break;
- case GLUSTERD_NFS_STATUS:
- ret = glusterfs_handle_nfs_status (req);
+ case GLUSTERD_NODE_STATUS:
+ ret = glusterfs_handle_node_status (req);
default:
break;
}
@@ -1287,8 +1325,8 @@ rpcsvc_actor_t glusterfs_actors[] = {
[GLUSTERD_BRICK_XLATOR_OP] = { "TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP, glusterfs_handle_rpc_msg, NULL, NULL, 0},
[GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS, glusterfs_handle_rpc_msg, NULL, NULL, 0},
[GLUSTERD_BRICK_XLATOR_DEFRAG] = { "TRANSLATOR DEFRAG", GLUSTERD_BRICK_XLATOR_DEFRAG, glusterfs_handle_rpc_msg, NULL, NULL, 0},
- [GLUSTERD_NFS_PROFILE] = {"NFS PROFILE", GLUSTERD_NFS_PROFILE, glusterfs_handle_rpc_msg, NULL, NULL, 0},
- [GLUSTERD_NFS_STATUS] = {"NFS STATUS", GLUSTERD_NFS_STATUS, glusterfs_handle_rpc_msg, NULL, NULL, 0}
+ [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE, glusterfs_handle_rpc_msg, NULL, NULL, 0},
+ [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS, glusterfs_handle_rpc_msg, NULL, NULL, 0}
};
struct rpcsvc_program glusterfs_mop_prog = {
diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h
index 0191c50aa62..cccd1e7f898 100644
--- a/rpc/rpc-lib/src/protocol-common.h
+++ b/rpc/rpc-lib/src/protocol-common.h
@@ -186,8 +186,8 @@ enum glusterd_brick_procnum {
GLUSTERD_BRICK_STATUS,
GLUSTERD_BRICK_OP,
GLUSTERD_BRICK_XLATOR_DEFRAG,
- GLUSTERD_NFS_PROFILE,
- GLUSTERD_NFS_STATUS,
+ GLUSTERD_NODE_PROFILE,
+ GLUSTERD_NODE_STATUS,
GLUSTERD_BRICK_MAXVALUE,
};
diff --git a/rpc/xdr/src/cli1-xdr.h b/rpc/xdr/src/cli1-xdr.h
index 979c2250f35..6b6458b70d0 100644
--- a/rpc/xdr/src/cli1-xdr.h
+++ b/rpc/xdr/src/cli1-xdr.h
@@ -155,18 +155,19 @@ enum gf1_cli_top_op {
typedef enum gf1_cli_top_op gf1_cli_top_op;
enum gf_cli_status_type {
- GF_CLI_STATUS_NONE = 0x000,
- GF_CLI_STATUS_MEM = 0x001,
- GF_CLI_STATUS_CLIENTS = 0x002,
- GF_CLI_STATUS_INODE = 0x004,
- GF_CLI_STATUS_FD = 0x008,
- GF_CLI_STATUS_CALLPOOL = 0x010,
- GF_CLI_STATUS_DETAIL = 0x020,
- GF_CLI_STATUS_MASK = 0x0FF,
- GF_CLI_STATUS_VOL = 0x100,
- GF_CLI_STATUS_ALL = 0x200,
- GF_CLI_STATUS_BRICK = 0x400,
- GF_CLI_STATUS_NFS = 0x800,
+ GF_CLI_STATUS_NONE = 0x0000,
+ GF_CLI_STATUS_MEM = 0x0001,
+ GF_CLI_STATUS_CLIENTS = 0x0002,
+ GF_CLI_STATUS_INODE = 0x0004,
+ GF_CLI_STATUS_FD = 0x0008,
+ GF_CLI_STATUS_CALLPOOL = 0x0010,
+ GF_CLI_STATUS_DETAIL = 0x0020,
+ GF_CLI_STATUS_MASK = 0x00FF,
+ GF_CLI_STATUS_VOL = 0x0100,
+ GF_CLI_STATUS_ALL = 0x0200,
+ GF_CLI_STATUS_BRICK = 0x0400,
+ GF_CLI_STATUS_NFS = 0x0800,
+ GF_CLI_STATUS_SHD = 0x1000,
};
typedef enum gf_cli_status_type gf_cli_status_type;
diff --git a/rpc/xdr/src/cli1-xdr.x b/rpc/xdr/src/cli1-xdr.x
index 72d2dbef448..d90aa2039f5 100644
--- a/rpc/xdr/src/cli1-xdr.x
+++ b/rpc/xdr/src/cli1-xdr.x
@@ -100,18 +100,19 @@ enum gf1_cli_top_op {
/* The unconventional hex numbers help us perform
bit-wise operations which reduces complexity */
enum gf_cli_status_type {
- GF_CLI_STATUS_NONE = 0x000,
- GF_CLI_STATUS_MEM = 0x001, /*000000000001*/
- GF_CLI_STATUS_CLIENTS = 0x002, /*000000000010*/
- GF_CLI_STATUS_INODE = 0x004, /*000000000100*/
- GF_CLI_STATUS_FD = 0x008, /*000000001000*/
- GF_CLI_STATUS_CALLPOOL = 0x010, /*000000010000*/
- GF_CLI_STATUS_DETAIL = 0x020, /*000000100000*/
- GF_CLI_STATUS_MASK = 0x0FF, /*000011111111 Used to get the op*/
- GF_CLI_STATUS_VOL = 0x100, /*000100000000*/
- GF_CLI_STATUS_ALL = 0x200, /*001000000000*/
- GF_CLI_STATUS_BRICK = 0x400, /*010000000000*/
- GF_CLI_STATUS_NFS = 0x800 /*100000000000*/
+ GF_CLI_STATUS_NONE = 0x0000,
+ GF_CLI_STATUS_MEM = 0x0001, /*0000000000001*/
+ GF_CLI_STATUS_CLIENTS = 0x0002, /*0000000000010*/
+ GF_CLI_STATUS_INODE = 0x0004, /*0000000000100*/
+ GF_CLI_STATUS_FD = 0x0008, /*0000000001000*/
+ GF_CLI_STATUS_CALLPOOL = 0x0010, /*0000000010000*/
+ GF_CLI_STATUS_DETAIL = 0x0020, /*0000000100000*/
+ GF_CLI_STATUS_MASK = 0x00FF, /*0000011111111 Used to get the op*/
+ GF_CLI_STATUS_VOL = 0x0100, /*0000100000000*/
+ GF_CLI_STATUS_ALL = 0x0200, /*0001000000000*/
+ GF_CLI_STATUS_BRICK = 0x0400, /*0010000000000*/
+ GF_CLI_STATUS_NFS = 0x0800, /*0100000000000*/
+ GF_CLI_STATUS_SHD = 0x1000 /*1000000000000*/
};
struct gf_cli_req {
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 9718918f3df..5788813235b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -246,8 +246,8 @@ out:
}
int
-glusterd_nfs_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req,
- dict_t *dict)
+glusterd_node_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req,
+ dict_t *dict)
{
int ret = -1;
gd1_mgmt_brick_op_req *brick_req = NULL;
@@ -263,7 +263,7 @@ glusterd_nfs_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req,
if (!brick_req)
goto out;
- brick_req->op = GLUSTERD_NFS_PROFILE;
+ brick_req->op = GLUSTERD_NODE_PROFILE;
brick_req->name = "";
break;
@@ -274,7 +274,7 @@ glusterd_nfs_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req,
if (!brick_req)
goto out;
- brick_req->op = GLUSTERD_NFS_STATUS;
+ brick_req->op = GLUSTERD_NODE_STATUS;
brick_req->name = "";
break;
@@ -631,6 +631,9 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_brickinfo_t *tmpbrickinfo = NULL;
glusterd_volinfo_t *volinfo = NULL;
+ dict_t *vol_opts = NULL;
+ gf_boolean_t nfs_disabled = _gf_false;
+ gf_boolean_t shd_enabled = _gf_true;
GF_ASSERT (dict);
this = THIS;
@@ -654,13 +657,58 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- snprintf (msg, sizeof(msg), "Volume %s does not exist", volname);
+ snprintf (msg, sizeof(msg), "Volume %s does not exist",
+ volname);
+ gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_is_volume_started (volinfo);
+ if (!ret) {
+ snprintf (msg, sizeof (msg), "Volume %s is not started",
+ volname);
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
ret = -1;
goto out;
}
- if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
+ vol_opts = volinfo->dict;
+
+ if ((cmd & GF_CLI_STATUS_NFS) != 0) {
+ nfs_disabled = dict_get_str_boolean (vol_opts, "nfs.disable",
+ _gf_false);
+ if (nfs_disabled) {
+ ret = -1;
+ snprintf (msg, sizeof (msg),
+ "NFS server is disabled for volume %s",
+ volname);
+ gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
+ goto out;
+ }
+ } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
+ if (!glusterd_is_volume_replicate (volinfo)) {
+ ret = -1;
+ snprintf (msg, sizeof (msg),
+ "Volume %s is not a replicate volume",
+ volname);
+ gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
+ goto out;
+ }
+
+ shd_enabled = dict_get_str_boolean (vol_opts,
+ "cluster.self-heal-daemon",
+ _gf_true);
+ if (!shd_enabled) {
+ ret = -1;
+ snprintf (msg, sizeof (msg),
+ "Self-heal Daemon is disabled for volume %s",
+ volname);
+ gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
+ goto out;
+ }
+
+ } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_str (dict, "brick", &brick);
if (ret)
goto out;
@@ -1340,8 +1388,10 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
dict_t *rsp_dict)
{
int ret = -1;
- int brick_count = 0;
+ int node_count = 0;
int brick_index = -1;
+ int other_count = 0;
+ int other_index = 0;
uint32_t cmd = 0;
char *volname = NULL;
char *brick = NULL;
@@ -1349,6 +1399,9 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_conf_t *priv = NULL;
+ dict_t *vol_opts = NULL;
+ gf_boolean_t nfs_disabled = _gf_false;
+ gf_boolean_t shd_enabled = _gf_true;
this = THIS;
GF_ASSERT (this);
@@ -1394,12 +1447,21 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
"does not exist", volname);
goto out;
}
+ vol_opts = volinfo->dict;
if ((cmd & GF_CLI_STATUS_NFS) != 0) {
ret = glusterd_add_node_to_dict ("nfs", rsp_dict, 0);
if (ret)
goto out;
- brick_count = 1;
+ other_count++;
+ node_count++;
+
+ } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
+ ret = glusterd_add_node_to_dict ("glustershd", rsp_dict, 0);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_str (dict, "brick", &brick);
@@ -1416,11 +1478,13 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
if (uuid_compare (brickinfo->uuid, priv->uuid))
goto out;
- glusterd_add_brick_to_dict (volinfo, brickinfo, rsp_dict, 0);
+ glusterd_add_brick_to_dict (volinfo, brickinfo, rsp_dict,
+ ++brick_index);
if (cmd & GF_CLI_STATUS_DETAIL)
glusterd_add_brick_detail_to_dict (volinfo, brickinfo,
- rsp_dict, 0);
- brick_count = 1;
+ rsp_dict,
+ brick_index);
+ node_count++;
} else {
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
@@ -1437,60 +1501,62 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
rsp_dict,
brick_index);
}
- brick_count++;
+ node_count++;
}
- }
-
- ret = dict_set_int32 (rsp_dict, "count", brick_count);
-
-out:
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
-
- return ret;
-}
-
-static int
-glusterd_op_volume_dict_uuid_to_hostname (dict_t *dict, const char *key_fmt,
- int idx_min, int idx_max)
-{
- int ret = -1;
- int i = 0;
- char key[1024];
- char *uuid_str = NULL;
- uuid_t uuid = {0,};
- char *hostname = NULL;
- GF_ASSERT (dict);
- GF_ASSERT (key_fmt);
-
- for (i = idx_min; i < idx_max; i++) {
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), key_fmt, i);
- ret = dict_get_str (dict, key, &uuid_str);
- if (ret)
- goto out;
+ if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
+ other_index = brick_index + 1;
- ret = uuid_parse (uuid_str, uuid);
- /* if parsing fails don't error out
- * let the original value be retained
- */
- if (ret)
- continue;
+ nfs_disabled = dict_get_str_boolean (vol_opts,
+ "nfs.disable",
+ _gf_false);
+ if (!nfs_disabled) {
+ ret = glusterd_add_node_to_dict ("nfs",
+ rsp_dict,
+ other_index);
+ if (ret)
+ goto out;
+ other_index++;
+ other_count++;
+ node_count++;
+ }
- hostname = glusterd_uuid_to_hostname (uuid);
- if (hostname) {
- ret = dict_set_dynstr (dict, key, hostname);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Error setting hostname to dict");
- GF_FREE (hostname);
- goto out;
+ shd_enabled = dict_get_str_boolean
+ (vol_opts, "cluster.self-heal-daemon",
+ _gf_true);
+ if (glusterd_is_volume_replicate (volinfo)
+ && shd_enabled) {
+ ret = glusterd_add_node_to_dict ("glustershd",
+ rsp_dict,
+ other_index);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
}
}
}
+ ret = dict_set_int32 (rsp_dict, "brick-index-max", brick_index);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Error setting brick-index-max to dict");
+ goto out;
+ }
+ ret = dict_set_int32 (rsp_dict, "other-count", other_count);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Error setting other-count to dict");
+ goto out;
+ }
+ ret = dict_set_int32 (rsp_dict, "count", node_count);
+ if (ret)
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Error setting node count to dict");
+
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+
return ret;
}
@@ -1889,6 +1955,177 @@ out:
return ret;
}
+/* This function takes a dict and converts the uuid values of key specified
+ * into hostnames
+ */
+static int
+glusterd_op_volume_dict_uuid_to_hostname (dict_t *dict, const char *key_fmt,
+ int idx_min, int idx_max)
+{
+ int ret = -1;
+ int i = 0;
+ char key[1024];
+ char *uuid_str = NULL;
+ uuid_t uuid = {0,};
+ char *hostname = NULL;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (key_fmt);
+
+ for (i = idx_min; i < idx_max; i++) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), key_fmt, i);
+ ret = dict_get_str (dict, key, &uuid_str);
+ if (ret)
+ continue;
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Got uuid %s",
+ uuid_str);
+
+ ret = uuid_parse (uuid_str, uuid);
+ /* if parsing fails don't error out
+ * let the original value be retained
+ */
+ if (ret)
+ continue;
+
+ hostname = glusterd_uuid_to_hostname (uuid);
+ if (hostname) {
+ gf_log (THIS->name, GF_LOG_DEBUG, "%s -> %s",
+ uuid_str, hostname);
+ ret = dict_set_dynstr (dict, key, hostname);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Error setting hostname to dict");
+ GF_FREE (hostname);
+ goto out;
+ }
+ }
+ }
+
+out:
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+/* This function is used to modify the op_ctx dict before sending it back
+ * to cli. This is useful in situations like changing the peer uuids to
+ * hostnames etc.
+ */
+void
+glusterd_op_modify_op_ctx (glusterd_op_t op)
+{
+ int ret = -1;
+ dict_t *op_ctx = NULL;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int count = 0;
+ uint32_t cmd = GF_CLI_STATUS_NONE;
+
+ op_ctx = glusterd_op_get_ctx();
+ if (!op_ctx) {
+ gf_log (THIS->name, GF_LOG_CRITICAL,
+ "Operation context is not present.");
+ goto out;
+ }
+
+ switch (op) {
+ case GD_OP_STATUS_VOLUME:
+ ret = dict_get_uint32 (op_ctx, "cmd", &cmd);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Failed to get status cmd");
+ goto out;
+ }
+ if (!(cmd & GF_CLI_STATUS_NFS || cmd & GF_CLI_STATUS_SHD ||
+ (cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE)) {
+ gf_log (THIS->name, GF_LOG_INFO,
+ "op_ctx modification not required for status "
+ "operation being performed");
+ goto out;
+ }
+
+ ret = dict_get_int32 (op_ctx, "brick-index-max",
+ &brick_index_max);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Failed to get brick-index-max");
+ goto out;
+ }
+
+ ret = dict_get_int32 (op_ctx, "other-count", &other_count);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Failed to get other-count");
+ goto out;
+ }
+
+ count = brick_index_max + other_count + 1;
+
+ ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx,
+ "brick%d.path",
+ 0, count);
+ if (ret)
+ gf_log (THIS->name, GF_LOG_WARNING,
+ "Failed uuid to hostname conversion");
+
+ break;
+
+ case GD_OP_PROFILE_VOLUME:
+ ret = dict_get_str_boolean (op_ctx, "nfs", _gf_false);
+ if (!ret)
+ goto out;
+
+ ret = dict_get_int32 (op_ctx, "count", &count);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Failed to get brick count");
+ goto out;
+ }
+
+ ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx,
+ "%d-brick",
+ 1, (count + 1));
+ if (ret)
+ gf_log (THIS->name, GF_LOG_WARNING,
+ "Failed uuid to hostname conversion");
+
+ break;
+
+ /* For both rebalance and remove-brick status, the glusterd op is the
+ * same
+ */
+ case GD_OP_DEFRAG_BRICK_VOLUME:
+ ret = dict_get_int32 (op_ctx, "count", &count);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Failed to get count");
+ goto out;
+ }
+
+ ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx,
+ "node-uuid-%d",
+ 1, (count + 1));
+ if (ret)
+ gf_log (THIS->name, GF_LOG_WARNING,
+ "Failed uuid to hostname conversion");
+ break;
+
+ default:
+ ret = 0;
+ gf_log (THIS->name, GF_LOG_INFO,
+ "op_ctx modification not required");
+ break;
+
+ }
+
+out:
+ if (ret)
+ gf_log (THIS->name, GF_LOG_WARNING,
+ "op_ctx modification failed");
+ return;
+}
+
static int
glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
{
@@ -1916,7 +2153,7 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
ret = glusterd_op_commit_perform (op, dict, &op_errstr, NULL); //rsp_dict invalid for source
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Commit failed");
+ gf_log (THIS->name, GF_LOG_ERROR, "Commit failed");
opinfo.op_errstr = op_errstr;
goto out;
}
@@ -1935,7 +2172,8 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = dict_set_static_ptr (dict, "peerinfo", peerinfo);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "failed to set peerinfo");
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "failed to set peerinfo");
goto out;
}
ret = proc->fn (NULL, this, dict);
@@ -1946,7 +2184,7 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
}
opinfo.pending_count = pending_count;
- gf_log ("glusterd", GF_LOG_INFO, "Sent op req to %d peers",
+ gf_log (THIS->name, GF_LOG_INFO, "Sent op req to %d peers",
opinfo.pending_count);
out:
if (dict)
@@ -1962,14 +2200,14 @@ out:
ret = glusterd_op_start_rb_timer (op_dict);
} else {
-
+ glusterd_op_modify_op_ctx (op);
ret = glusterd_op_sm_inject_all_acc ();
}
goto err;
}
err:
- gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning with %d", ret);
return ret;
@@ -2089,8 +2327,6 @@ glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx)
int ret = 0;
gf_boolean_t commit_ack_inject = _gf_true;
glusterd_op_t op = GD_OP_NONE;
- int count = 0;
- uint32_t cmd = GF_CLI_STATUS_NONE;
op = glusterd_op_get_op ();
GF_ASSERT (event);
@@ -2121,84 +2357,15 @@ glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx)
goto out;
}
- if (op == GD_OP_STATUS_VOLUME) {
- op_ctx = glusterd_op_get_ctx();
- if (!op_ctx) {
- gf_log (THIS->name, GF_LOG_CRITICAL, "Operation "
- "context is not present.");
- ret = -1;
- goto out;
- }
-
- ret = dict_get_uint32 (op_ctx, "cmd", &cmd);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to get status cmd");
- goto out;
- }
- if (!(cmd & GF_CLI_STATUS_NFS)) {
- ret = 0;
- goto out;
- }
-
- ret = dict_get_int32 (op_ctx, "count", &count);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to get brick count");
- goto out;
- }
-
- ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx,
- "brick%d.path",
- 0, count);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed uuid to hostname conversion");
- ret = 0;
- }
-
- }
-
- if (op == GD_OP_PROFILE_VOLUME) {
- op_ctx = glusterd_op_get_ctx();
- if (!op_ctx) {
- gf_log (THIS->name, GF_LOG_CRITICAL, "Operation "
- "context is not present.");
- ret = -1;
- goto out;
- }
-
- ret = dict_get_str_boolean (op_ctx, "nfs", _gf_false);
- if (!ret) {
- ret = 0;
- goto out;
- }
-
- ret = dict_get_int32 (op_ctx, "count", &count);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to get brick count");
- goto out;
- }
-
- ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx,
- "%d-brick",
- 1, (count + 1));
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed uuid to hostname conversion");
- ret = 0;
- }
-
- }
out:
if (commit_ack_inject) {
if (ret)
ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
- else if (!opinfo.pending_count)
+ else if (!opinfo.pending_count) {
+ glusterd_op_modify_op_ctx (op);
ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_ACC, NULL);
-
+ }
/*else do nothing*/
}
@@ -2896,8 +3063,10 @@ glusterd_status_volume_brick_rsp (dict_t *rsp_dict, dict_t *op_ctx,
count++;
}
ret = dict_get_int32 (rsp_dict, "index", &index);
- if (ret)
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get node index");
goto out;
+ }
dict_del (rsp_dict, "index");
rsp_ctx.count = index;
@@ -3608,6 +3777,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr)
case GF_CLI_STATUS_FD:
case GF_CLI_STATUS_CALLPOOL:
case GF_CLI_STATUS_NFS:
+ case GF_CLI_STATUS_SHD:
break;
default:
goto out;
@@ -3671,6 +3841,25 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr)
list_add_tail (&pending_node->list, &opinfo.pending_bricks);
ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
+ if (!glusterd_nodesvc_is_running ("glustershd")) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR,
+ "Self-heal daemon is not running");
+ goto out;
+ }
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = priv->shd;
+ pending_node->type = GD_NODE_SHD;
+ pending_node->index = 0;
+ list_add_tail (&pending_node->list, &opinfo.pending_bricks);
+
+ ret = 0;
} else {
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
brick_index++;
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
index 1f32681a5a1..047ff2f3d3d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -162,8 +162,9 @@ typedef struct glusterd_heal_rsp_conv_ {
typedef struct glusterd_status_rsp_conv_ {
int count;
+ int brick_index_max;
+ int other_count;
dict_t *dict;
- gf_boolean_t nfs;
} glusterd_status_rsp_conv_t;
typedef struct glusterd_gsync_status_temp {
@@ -241,7 +242,7 @@ int
glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickinfo,
gd1_mgmt_brick_op_req **req, dict_t *dict);
int
-glusterd_nfs_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req,
+glusterd_node_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req,
dict_t *dict);
int32_t
glusterd_handle_brick_rsp (void *pending_entry, glusterd_op_t op,
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 649156f4b33..14bcf8b9904 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -1010,19 +1010,22 @@ glusterd_volume_status_add_peer_rsp (dict_t *this, char *key, data_t *value,
data_t *new_value = NULL;
char brick_key[1024] = {0,};
char new_key[1024] = {0,};
+ int32_t index = 0;
int32_t ret = 0;
- if (!strcmp (key, "count") || !strcmp (key, "cmd"))
+ if (!strcmp (key, "count") || !strcmp (key, "cmd") ||
+ !strcmp (key, "brick-index-max") || !strcmp (key, "other-count"))
return;
rsp_ctx = data;
new_value = data_copy (value);
GF_ASSERT (new_value);
- if (rsp_ctx->nfs) {
- sscanf (key, "brick%*d.%s", brick_key);
+ sscanf (key, "brick%d.%s", &index, brick_key);
+
+ if (index > rsp_ctx->brick_index_max) {
snprintf (new_key, sizeof (new_key), "brick%d.%s",
- rsp_ctx->count, brick_key);
+ index + rsp_ctx->other_count, brick_key);
} else
strncpy (new_key, key, sizeof (new_key));
@@ -1039,39 +1042,56 @@ glusterd_volume_status_use_rsp_dict (dict_t *rsp_dict)
{
int ret = 0;
glusterd_status_rsp_conv_t rsp_ctx = {0};
- int32_t brick_count = 0;
- int32_t count = 0;
- int32_t cmd = 0;
+ int32_t node_count = 0;
+ int32_t rsp_node_count = 0;
+ int32_t brick_index_max = -1;
+ int32_t other_count = 0;
+ int32_t rsp_other_count = 0;
dict_t *ctx_dict = NULL;
glusterd_op_t op = GD_OP_NONE;
GF_ASSERT (rsp_dict);
- ret = dict_get_int32 (rsp_dict, "count", &brick_count);
+ ret = dict_get_int32 (rsp_dict, "count", &rsp_node_count);
if (ret) {
ret = 0; //no bricks in the rsp
goto out;
}
- ret = dict_get_int32 (rsp_dict, "cmd", &cmd);
- if (ret)
+ ret = dict_get_int32 (rsp_dict, "other-count", &rsp_other_count);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to get other count from rsp_dict");
goto out;
+ }
op = glusterd_op_get_op ();
GF_ASSERT (GD_OP_STATUS_VOLUME == op);
ctx_dict = glusterd_op_get_ctx (op);
- ret = dict_get_int32 (ctx_dict, "count", &count);
- rsp_ctx.count = count;
+ ret = dict_get_int32 (ctx_dict, "count", &node_count);
+ ret = dict_get_int32 (ctx_dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32 (ctx_dict, "other-count", &other_count);
+
+ rsp_ctx.count = node_count;
+ rsp_ctx.brick_index_max = brick_index_max;
+ rsp_ctx.other_count = other_count;
rsp_ctx.dict = ctx_dict;
- if (cmd & GF_CLI_STATUS_NFS)
- rsp_ctx.nfs = _gf_true;
- else
- rsp_ctx.nfs = _gf_false;
dict_foreach (rsp_dict, glusterd_volume_status_add_peer_rsp, &rsp_ctx);
- ret = dict_set_int32 (ctx_dict, "count", count + brick_count);
+ ret = dict_set_int32 (ctx_dict, "count", node_count + rsp_node_count);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to update node count");
+ goto out;
+ }
+
+ ret = dict_set_int32 (ctx_dict, "other-count",
+ (other_count + rsp_other_count));
+ if (ret)
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to update other-count");
out:
return ret;
}
@@ -1867,8 +1887,10 @@ glusterd3_1_brick_op (call_frame_t *frame, xlator_t *this,
if (!dummy_frame)
continue;
- if (pending_node->type == GD_NODE_NFS)
- ret = glusterd_nfs_op_build_payload
+ if ((pending_node->type == GD_NODE_NFS) ||
+ ((pending_node->type == GD_NODE_SHD) &&
+ (req_ctx->op == GD_OP_STATUS_VOLUME)))
+ ret = glusterd_node_op_build_payload
(req_ctx->op,
(gd1_mgmt_brick_op_req **)&req,
req_ctx->dict);
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index ba0727415a2..834916f6679 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -2951,7 +2951,6 @@ glusterd_shd_stop ()
return glusterd_nodesvc_stop ("glustershd", SIGTERM);
}
-/* Only NFS server for now */
int
glusterd_add_node_to_dict (char *server, dict_t *dict, int count)
{
@@ -2966,17 +2965,20 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count)
sizeof (pidfile));
running = glusterd_is_service_running (pidfile, &pid);
- /* For nfs servers setting
- * brick<n>.hostname = "NFS server"
+ /* For nfs-servers/self-heal-daemon setting
+ * brick<n>.hostname = "NFS Server" / "Self-heal Daemon"
* brick<n>.path = uuid
* brick<n>.port = 0
*
- * This might be confusing, but cli display's the name of
+ * This might be confusing, but cli displays the name of
* the brick as hostname+path, so this will make more sense
* when output.
*/
snprintf (key, sizeof (key), "brick%d.hostname", count);
- ret = dict_set_str (dict, key, "NFS Server");
+ if (!strcmp (server, "nfs"))
+ ret = dict_set_str (dict, key, "NFS Server");
+ else if (!strcmp (server, "glustershd"))
+ ret = dict_set_str (dict, key, "Self-heal Daemon");
if (ret)
goto out;