From 903f27305cbff51f174f2704ea13ffa65083fd24 Mon Sep 17 00:00:00 2001 From: hari gowtham Date: Mon, 16 May 2016 10:55:17 +0530 Subject: tier/cli : printing a warning instead of skipping the node back-port of : http://review.gluster.org/#/c/14347/8 Problem: skipping the status of the nodes down creates confusion to the user as one might see the status as completed for all nodes and while performing detach commit, the operation will fail as the node is down Fix: Display a warning message Note: When the last node is down (as per the peer list) then warning message can't be displayed as the total number of peers participating in the transaction is considered to be the total count. >Change-Id: Ib7afbd1b26df3378e4d537db06f41f5c105ad86e >BUG: 1324439 >Signed-off-by: hari gowtham Change-Id: Ie4296e932abaf163edc55b540b26dc6f5824ea85 BUG: 1328410 Signed-off-by: hari gowtham Reviewed-on: http://review.gluster.org/14458 Tested-by: hari gowtham Smoke: Gluster Build System NetBSD-regression: NetBSD Build System Reviewed-by: Atin Mukherjee CentOS-regression: Gluster Build System --- cli/src/cli-rpc-ops.c | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) (limited to 'cli') diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index d836f8e6aee..8f6073fb1e8 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -1509,7 +1509,8 @@ out: } int -gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type) +gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type, + gf_boolean_t is_tier) { int ret = -1; int count = 0; @@ -1528,6 +1529,7 @@ gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type) int hrs = 0; int min = 0; int sec = 0; + gf_boolean_t down = _gf_false; ret = dict_get_int32 (dict, "count", &count); if (ret) { @@ -1562,6 +1564,7 @@ gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type) gf_log ("cli", GF_LOG_TRACE, "failed to get status"); gf_log ("cli", GF_LOG_ERROR, "node down and has failed" " to set dict"); + down = _gf_true; continue; /* skip this node if value not available*/ } else if (ret) { @@ -1650,6 +1653,11 @@ gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type) } GF_FREE(size_str); } + if (is_tier && down) + cli_out ("WARNING: glusterd might be down on one or more nodes." + " Please check the nodes that are down using \'gluster" + " peer status\' and start the glusterd on those nodes," + " else tier detach commit might fail!"); out: return ret; } @@ -1667,6 +1675,7 @@ gf_cli_print_tier_status (dict_t *dict, enum gf_task_types task_type) gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED; char *status_str = NULL; char *size_str = NULL; + gf_boolean_t down = _gf_false; ret = dict_get_int32 (dict, "count", &count); if (ret) { @@ -1695,6 +1704,7 @@ gf_cli_print_tier_status (dict_t *dict, enum gf_task_types task_type) "failed to get status", count, i); gf_log ("cli", GF_LOG_ERROR, "node down and has failed" " to set dict"); + down = _gf_true; continue; /*skipping this node as value unavailable*/ } else if (ret) { @@ -1733,8 +1743,11 @@ gf_cli_print_tier_status (dict_t *dict, enum gf_task_types task_type) status_str = cli_vol_task_status_str[status_rcd]; cli_out ("%-20s %-20"PRIu64" %-20"PRIu64" %-20s", node_name, promoted, demoted, status_str); - } + if (down) + cli_out ("WARNING: glusterd might be down on one or more nodes." + " Please check the nodes that are down using \'gluster" + " peer status\' and start the glusterd on those nodes."); out: return ret; } @@ -1893,9 +1906,14 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov, if (cmd == GF_DEFRAG_CMD_STATUS_TIER) ret = gf_cli_print_tier_status (dict, GF_TASK_TYPE_REBALANCE); + else if (cmd == GF_DEFRAG_CMD_DETACH_STATUS) + ret = gf_cli_print_rebalance_status (dict, + GF_TASK_TYPE_REBALANCE, + _gf_true); else ret = gf_cli_print_rebalance_status (dict, - GF_TASK_TYPE_REBALANCE); + GF_TASK_TYPE_REBALANCE, + _gf_false); if (ret) gf_log ("cli", GF_LOG_ERROR, @@ -2491,7 +2509,8 @@ xml_output: goto out; } - ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK); + ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK, + _gf_true); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to print remove-brick " "rebalance status"); @@ -2669,7 +2688,8 @@ xml_output: goto out; } - ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK); + ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK, + _gf_false); if (ret) { gf_log ("cli", GF_LOG_ERROR, "Failed to print remove-brick " "rebalance status"); -- cgit