diff options
Diffstat (limited to 'cli/src/cli-rpc-ops.c')
| -rw-r--r-- | cli/src/cli-rpc-ops.c | 14855 |
1 files changed, 9717 insertions, 5138 deletions
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index c98d986b3fb..9b6b0c7fa50 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -1,31 +1,12 @@ /* - Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. -*/ - + Copyright (c) 2010-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. -#ifndef _CONFIG_H -#define _CONFIG_H -#include "config.h" -#endif - -#ifndef GSYNC_CONF -#define GSYNC_CONF GEOREP"/gsyncd.conf" -#endif + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ /* Widths of various columns in top read/write-perf output * Total width of top read/write-perf should be 80 chars @@ -33,6338 +14,10936 @@ */ #define VOL_TOP_PERF_FILENAME_DEF_WIDTH 47 #define VOL_TOP_PERF_FILENAME_ALT_WIDTH 44 -#define VOL_TOP_PERF_SPEED_WIDTH 4 -#define VOL_TOP_PERF_TIME_WIDTH 26 +#define VOL_TOP_PERF_SPEED_WIDTH 4 +#define VOL_TOP_PERF_TIME_WIDTH 26 + +#define INDENT_MAIN_HEAD "%-25s %s " + +#define RETURNING "Returning %d" +#define XML_ERROR "Error outputting to xml" +#define XDR_DECODE_FAIL "Failed to decode xdr response" +#define DICT_SERIALIZE_FAIL "Failed to serialize to data to dictionary" +#define DICT_UNSERIALIZE_FAIL "Failed to unserialize the dictionary" + +/* Do not show estimates if greater than this number */ +#define REBAL_ESTIMATE_SEC_UPPER_LIMIT (60 * 24 * 3600) +#define REBAL_ESTIMATE_START_TIME 600 #include "cli.h" -#include "compat-errno.h" +#include <glusterfs/compat-errno.h> #include "cli-cmd.h" #include <sys/uio.h> #include <stdlib.h> #include <sys/mount.h> -#include "cli1-xdr.h" -#include "xdr-generic.h" -#include "protocol-common.h" +#include <glusterfs/compat.h> #include "cli-mem-types.h" -#include "compat.h" - -#include "syscall.h" +#include <glusterfs/syscall.h> #include "glusterfs3.h" #include "portmap-xdr.h" +#include <glusterfs/byte-order.h> -#include "run.h" +#include <glusterfs/run.h> +#include <glusterfs/events.h> -extern rpc_clnt_prog_t *cli_rpc_prog; -extern int cli_op_ret; -extern int connected; +enum gf_task_types { GF_TASK_TYPE_REBALANCE, GF_TASK_TYPE_REMOVE_BRICK }; -char *cli_volume_type[] = {"Distribute", - "Stripe", - "Replicate", - "Striped-Replicate", - "Distributed-Stripe", - "Distributed-Replicate", - "Distributed-Striped-Replicate", -}; +rpc_clnt_prog_t cli_quotad_clnt; +static int32_t +gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data); -char *cli_volume_status[] = {"Created", - "Started", - "Stopped" +char *cli_vol_status_str[] = { + "Created", + "Started", + "Stopped", }; -int32_t -gf_cli3_1_get_volume (call_frame_t *frame, xlator_t *this, - void *data); +char *cli_vol_task_status_str[] = {"not started", + "in progress", + "stopped", + "completed", + "failed", + "fix-layout in progress", + "fix-layout stopped", + "fix-layout completed", + "fix-layout failed", + "unknown"}; +static int32_t +gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data); -rpc_clnt_prog_t cli_handshake_prog = { - .progname = "cli handshake", - .prognum = GLUSTER_HNDSK_PROGRAM, - .progver = GLUSTER_HNDSK_VERSION, +static int32_t +gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data); + +static int +cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn, + xdrproc_t xdrproc, dict_t *dict, int procnum, xlator_t *this, + rpc_clnt_prog_t *prog, struct iobref *iobref); + +static int +add_cli_cmd_timeout_to_dict(dict_t *dict); + +static rpc_clnt_prog_t cli_handshake_prog = { + .progname = "cli handshake", + .prognum = GLUSTER_HNDSK_PROGRAM, + .progver = GLUSTER_HNDSK_VERSION, }; -rpc_clnt_prog_t cli_pmap_prog = { - .progname = "cli portmap", - .prognum = GLUSTER_PMAP_PROGRAM, - .progver = GLUSTER_PMAP_VERSION, +static rpc_clnt_prog_t cli_pmap_prog = { + .progname = "cli portmap", + .prognum = GLUSTER_PMAP_PROGRAM, + .progver = GLUSTER_PMAP_VERSION, }; -void -gf_cli_probe_strerror (gf1_cli_probe_rsp *rsp, char *msg, size_t len) +static void +gf_free_xdr_cli_rsp(gf_cli_rsp rsp) { - switch (rsp->op_errno) { - case GF_PROBE_ANOTHER_CLUSTER: - snprintf (msg, len, "%s is already part of another cluster", - rsp->hostname); - break; - case GF_PROBE_VOLUME_CONFLICT: - snprintf (msg, len, "Atleast one volume on %s conflicts with " - "existing volumes in the cluster", rsp->hostname); - break; - case GF_PROBE_UNKNOWN_PEER: - snprintf (msg, len, "%s responded with 'unknown peer' error, " - "this could happen if %s doesn't have localhost in " - "its peer database", rsp->hostname, rsp->hostname); - break; - case GF_PROBE_ADD_FAILED: - snprintf (msg, len, "Failed to add peer information on %s" , - rsp->hostname); - break; - case GF_PROBE_QUORUM_NOT_MET: - snprintf (msg, len, "Cluster quorum is not met. Changing " - "peers is not allowed in this state"); - break; - default: - snprintf (msg, len, "Probe returned with unknown " - "errno %d", rsp->op_errno); - break; - } + if (rsp.dict.dict_val) { + free(rsp.dict.dict_val); + } + if (rsp.op_errstr) { + free(rsp.op_errstr); + } } -int -gf_cli3_1_probe_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static void +gf_free_xdr_getspec_rsp(gf_getspec_rsp rsp) { - gf1_cli_probe_rsp rsp = {0,}; - int ret = -1; - char msg[1024] = {0,}; + if (rsp.spec) { + free(rsp.spec); + } + if (rsp.xdata.xdata_val) { + free(rsp.xdata.xdata_val); + } +} - if (-1 == req->rpc_status) { - goto out; - } +static void +gf_free_xdr_fsm_log_rsp(gf1_cli_fsm_log_rsp rsp) +{ + if (rsp.op_errstr) { + free(rsp.op_errstr); + } + if (rsp.fsm_log.fsm_log_val) { + free(rsp.fsm_log.fsm_log_val); + } +} - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_probe_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - //rsp.op_ret = -1; - //rsp.op_errno = EINVAL; - goto out; +static int +gf_cli_probe_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + char msg[1024] = "success"; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + // rsp.op_ret = -1; + // rsp.op_errno = EINVAL; + goto out; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to probe"); + + if (rsp.op_errstr && rsp.op_errstr[0] != '\0') { + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); + if (rsp.op_ret) { + gf_log("cli", GF_LOG_ERROR, "%s", msg); } + } - gf_log ("cli", GF_LOG_INFO, "Received resp to probe"); - if (!rsp.op_ret) { - switch (rsp.op_errno) { - case GF_PROBE_SUCCESS: - snprintf (msg, sizeof (msg), - "Probe successful"); - break; - case GF_PROBE_LOCALHOST: - snprintf (msg, sizeof (msg), - "Probe on localhost not needed"); - break; - case GF_PROBE_FRIEND: - snprintf (msg, sizeof (msg), - "Probe on host %s port %d already" - " in peer list", rsp.hostname, - rsp.port); - break; - default: - snprintf (msg, sizeof (msg), - "Probe returned with unknown errno %d", - rsp.op_errno); - break; - } - } + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_str(NULL, (rsp.op_ret) ? NULL : msg, rsp.op_ret, + rsp.op_errno, (rsp.op_ret) ? msg : NULL); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - if (rsp.op_ret) { - gf_cli_probe_strerror (&rsp, msg, sizeof (msg)); - gf_log ("glusterd",GF_LOG_ERROR,"Probe failed with op_ret %d" - " and op_errno %d", rsp.op_ret, rsp.op_errno); - } + if (!rsp.op_ret) + cli_out("peer probe: %s", msg); + else + cli_err("peer probe: failed: %s", msg); -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("peerProbe", msg, rsp.op_ret, - rsp.op_errno, NULL); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif - if (!rsp.op_ret) - cli_out ("%s", msg); - else - cli_err ("%s", msg); + ret = rsp.op_ret; - ret = rsp.op_ret; +out: + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; +} + +static int +gf_cli_deprobe_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + char msg[1024] = "success"; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + // rsp.op_ret = -1; + // rsp.op_errno = EINVAL; + goto out; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to deprobe"); + + if (rsp.op_ret) { + if (rsp.op_errstr[0] != '\0') { + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); + gf_log("cli", GF_LOG_ERROR, "%s", rsp.op_errstr); + } + } + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_str(NULL, (rsp.op_ret) ? NULL : msg, rsp.op_ret, + rsp.op_errno, (rsp.op_ret) ? msg : NULL); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } + + if (!rsp.op_ret) + cli_out("peer detach: %s", msg); + else + cli_err("peer detach: failed: %s", msg); + + ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); - return ret; + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; } -int -gf_cli3_1_deprobe_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int +gf_cli_output_peer_hostnames(dict_t *dict, int count, const char *prefix) { - gf1_cli_deprobe_rsp rsp = {0,}; - int ret = -1; - char msg[1024] = {0,}; + int ret = -1; + char key[512] = { + 0, + }; + int i = 0; + char *hostname = NULL; + + cli_out("Other names:"); + /* Starting from friend.hostname1, as friend.hostname0 will be the same + * as friend.hostname + */ + for (i = 1; i < count; i++) { + ret = snprintf(key, sizeof(key), "%s.hostname%d", prefix, i); + ret = dict_get_strn(dict, key, ret, &hostname); + if (ret) + break; + cli_out("%s", hostname); + hostname = NULL; + } - if (-1 == req->rpc_status) { - goto out; - } + return ret; +} - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_deprobe_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - //rsp.op_ret = -1; - //rsp.op_errno = EINVAL; - goto out; - } +static int +gf_cli_output_peer_status(dict_t *dict, int count) +{ + int ret = -1; + char *uuid_buf = NULL; + char *hostname_buf = NULL; + int32_t i = 1; + char key[256] = { + 0, + }; + int keylen; + char *state = NULL; + int32_t connected = 0; + const char *connected_str = NULL; + int hostname_count = 0; + + cli_out("Number of Peers: %d", count); + i = 1; + while (i <= count) { + keylen = snprintf(key, sizeof(key), "friend%d.uuid", i); + ret = dict_get_strn(dict, key, keylen, &uuid_buf); + if (ret) + goto out; - gf_log ("cli", GF_LOG_INFO, "Received resp to deprobe"); - if (rsp.op_ret) { - switch (rsp.op_errno) { - case GF_DEPROBE_LOCALHOST: - snprintf (msg, sizeof (msg), - "%s is localhost", rsp.hostname); - break; - case GF_DEPROBE_NOT_FRIEND: - snprintf (msg, sizeof (msg), - "%s is not part of cluster", - rsp.hostname); - break; - case GF_DEPROBE_BRICK_EXIST: - snprintf (msg, sizeof (msg), - "Brick(s) with the peer %s exist in " - "cluster", rsp.hostname); - break; - case GF_DEPROBE_FRIEND_DOWN: - snprintf (msg, sizeof (msg), - "One of the peers is probably down." - " Check with 'peer status'."); - break; - case GF_DEPROBE_QUORUM_NOT_MET: - snprintf (msg, sizeof (msg), "Cluster " - "quorum is not met. Changing " - "peers is not allowed in this" - " state"); - break; - default: - snprintf (msg, sizeof (msg), - "Detach unsuccessful\nDetach returned" - " with unknown errno %d", - rsp.op_errno); - break; - } - gf_log ("glusterd",GF_LOG_ERROR,"Detach failed with op_ret %d" - " and op_errno %d", rsp.op_ret, rsp.op_errno); - } else { - snprintf (msg, sizeof (msg), "Detach successful"); - } + keylen = snprintf(key, sizeof(key), "friend%d.hostname", i); + ret = dict_get_strn(dict, key, keylen, &hostname_buf); + if (ret) + goto out; -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("peerDetach", msg, rsp.op_ret, - rsp.op_errno, NULL); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); + keylen = snprintf(key, sizeof(key), "friend%d.connected", i); + ret = dict_get_int32n(dict, key, keylen, &connected); + if (ret) + goto out; + if (connected) + connected_str = "Connected"; + else + connected_str = "Disconnected"; + + keylen = snprintf(key, sizeof(key), "friend%d.state", i); + ret = dict_get_strn(dict, key, keylen, &state); + if (ret) + goto out; + + cli_out("\nHostname: %s\nUuid: %s\nState: %s (%s)", hostname_buf, + uuid_buf, state, connected_str); + + keylen = snprintf(key, sizeof(key), "friend%d.hostname_count", i); + ret = dict_get_int32n(dict, key, keylen, &hostname_count); + /* Print other addresses only if there are more than 1. + */ + if ((ret == 0) && (hostname_count > 1)) { + snprintf(key, sizeof(key), "friend%d", i); + ret = gf_cli_output_peer_hostnames(dict, hostname_count, key); + if (ret) { + gf_log("cli", GF_LOG_WARNING, + "error outputting peer other names"); goto out; + } } -#endif - if (!rsp.op_ret) - cli_out ("%s", msg); + i++; + } + + ret = 0; +out: + return ret; +} + +static int +gf_cli_output_pool_list(dict_t *dict, int count) +{ + int ret = -1; + char *uuid_buf = NULL; + char *hostname_buf = NULL; + int32_t hostname_len = 8; /*min len 8 chars*/ + int32_t i = 1; + char key[64] = { + 0, + }; + int keylen; + int32_t connected = 0; + const char *connected_str = NULL; + + if (count <= 0) + goto out; + + while (i <= count) { + keylen = snprintf(key, sizeof(key), "friend%d.hostname", i); + ret = dict_get_strn(dict, key, keylen, &hostname_buf); + if (ret) + goto out; + + ret = strlen(hostname_buf); + if (ret > hostname_len) + hostname_len = ret; + + i++; + } + + cli_out("UUID\t\t\t\t\t%-*s\tState", hostname_len, "Hostname"); + + i = 1; + while (i <= count) { + keylen = snprintf(key, sizeof(key), "friend%d.uuid", i); + ret = dict_get_strn(dict, key, keylen, &uuid_buf); + if (ret) + goto out; + + keylen = snprintf(key, sizeof(key), "friend%d.hostname", i); + ret = dict_get_strn(dict, key, keylen, &hostname_buf); + if (ret) + goto out; + + keylen = snprintf(key, sizeof(key), "friend%d.connected", i); + ret = dict_get_int32n(dict, key, keylen, &connected); + if (ret) + goto out; + if (connected) + connected_str = "Connected"; else - cli_err ("%s", msg); + connected_str = "Disconnected"; - ret = rsp.op_ret; + cli_out("%s\t%-*s\t%s ", uuid_buf, hostname_len, hostname_buf, + connected_str); + i++; + } + ret = 0; out: - cli_cmd_broadcast_response (ret); - return ret; + return ret; } -int -gf_cli3_1_list_friends_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_peer_list_rsp rsp = {0,}; - int ret = -1; - dict_t *dict = NULL; - char *uuid_buf = NULL; - char *hostname_buf = NULL; - int32_t i = 1; - char key[256] = {0,}; - char *state = NULL; - int32_t port = 0; - int32_t connected = 0; - char *connected_str = NULL; - char msg[1024] = {0,}; - - if (-1 == req->rpc_status) { +/* function pointer for gf_cli_output_{pool_list,peer_status} */ +typedef int (*cli_friend_output_fn)(dict_t *, int); + +static int +gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf1_cli_peer_list_rsp rsp = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; + char msg[1024] = { + 0, + }; + const char *cmd = NULL; + cli_friend_output_fn friend_output_fn; + call_frame_t *frame = NULL; + unsigned long flags = 0; + + if (-1 == req->rpc_status) { + goto out; + } + + GF_ASSERT(myframe); + + frame = myframe; + + flags = (long)frame->local; + + if (flags == GF_CLI_LIST_POOL_NODES) { + cmd = "pool list"; + friend_output_fn = &gf_cli_output_pool_list; + } else { + cmd = "peer status"; + friend_output_fn = &gf_cli_output_peer_status; + } + + /* 'free' the flags set by gf_cli_list_friends */ + frame->local = NULL; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_peer_list_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + // rsp.op_ret = -1; + // rsp.op_errno = EINVAL; + goto out; + } + + gf_log("cli", GF_LOG_DEBUG, "Received resp to list: %d", rsp.op_ret); + + if (!rsp.op_ret) { + if (!rsp.friends.friends_len) { + snprintf(msg, sizeof(msg), "%s: No peers present", cmd); + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno, + msg); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); goto out; + } + cli_err("%s", msg); + ret = 0; + goto out; } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_peer_list_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - //rsp.op_ret = -1; - //rsp.op_errno = EINVAL; - goto out; + dict = dict_new(); + + if (!dict) { + ret = -1; + goto out; } - gf_log ("cli", GF_LOG_INFO, "Received resp to list: %d", - rsp.op_ret); + ret = dict_unserialize(rsp.friends.friends_val, rsp.friends.friends_len, + &dict); - ret = rsp.op_ret; + if (ret) { + gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } - if (!rsp.op_ret) { - - if (!rsp.friends.friends_len) { - snprintf (msg, sizeof (msg), - "No peers present"); -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_peer_status (dict, - rsp.op_ret, - rsp.op_errno, - msg); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif - cli_err ("%s", msg); - ret = 0; - goto out; - } + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno, + msg); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - dict = dict_new (); + ret = dict_get_int32_sizen(dict, "count", &count); + if (ret) { + goto out; + } - if (!dict) { - ret = -1; - goto out; - } + ret = friend_output_fn(dict, count); + if (ret) { + goto out; + } + } else { + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno, + NULL); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + } else { + ret = -1; + } + goto out; + } - ret = dict_unserialize (rsp.friends.friends_val, - rsp.friends.friends_len, - &dict); + ret = 0; - if (ret) { - gf_log ("", GF_LOG_ERROR, - "Unable to allocate memory"); - goto out; - } +out: + if (ret) + cli_err("%s: failed", cmd); -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_peer_status (dict, rsp.op_ret, - rsp.op_errno, msg); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif - ret = dict_get_int32 (dict, "count", &count); + cli_cmd_broadcast_response(ret); - if (ret) { - goto out; - } + if (dict) + dict_unref(dict); - cli_out ("Number of Peers: %d", count); - - while ( i <= count) { - snprintf (key, 256, "friend%d.uuid", i); - ret = dict_get_str (dict, key, &uuid_buf); - if (ret) - goto out; - - snprintf (key, 256, "friend%d.hostname", i); - ret = dict_get_str (dict, key, &hostname_buf); - if (ret) - goto out; - - snprintf (key, 256, "friend%d.connected", i); - ret = dict_get_int32 (dict, key, &connected); - if (ret) - goto out; - if (connected) - connected_str = "Connected"; - else - connected_str = "Disconnected"; - - snprintf (key, 256, "friend%d.port", i); - ret = dict_get_int32 (dict, key, &port); - if (ret) - goto out; - - snprintf (key, 256, "friend%d.state", i); - ret = dict_get_str (dict, key, &state); - if (ret) - goto out; - - if (!port) { - cli_out ("\nHostname: %s\nUuid: %s\nState: %s " - "(%s)", - hostname_buf, uuid_buf, state, - connected_str); - } else { - cli_out ("\nHostname: %s\nPort: %d\nUuid: %s\n" - "State: %s (%s)", hostname_buf, port, - uuid_buf, state, connected_str); - } - i++; - } - } else { -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_peer_status (dict, rsp.op_ret, - rsp.op_errno, NULL); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - } else -#endif - { - ret = -1; - } - goto out; - } + if (rsp.friends.friends_val) { + free(rsp.friends.friends_val); + } + return ret; +} +static int +gf_cli_get_state_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; + char *daemon_name = NULL; + char *ofilepath = NULL; + + GF_VALIDATE_OR_GOTO("cli", myframe, out); + + if (-1 == req->rpc_status) { + goto out; + } + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + dict = dict_new(); + + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + if (ret) + goto out; + + if (rsp.op_ret) { + if (strcmp(rsp.op_errstr, "")) + cli_err("Failed to get daemon state: %s", rsp.op_errstr); + else + cli_err( + "Failed to get daemon state. Check glusterd" + " log file for more details"); + } else { + ret = dict_get_str_sizen(dict, "daemon", &daemon_name); + if (ret) + gf_log("cli", GF_LOG_ERROR, "Couldn't get daemon name"); - ret = 0; + ret = dict_get_str_sizen(dict, "ofilepath", &ofilepath); + if (ret) + gf_log("cli", GF_LOG_ERROR, "Couldn't get filepath"); + + if (daemon_name && ofilepath) + cli_out("%s state dumped to %s", daemon_name, ofilepath); + } + + ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); - if (ret) - cli_err ("Peer status unsuccessful"); + if (dict) + dict_unref(dict); - if (dict) - dict_destroy (dict); + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); - return ret; + return ret; } -void -cli_out_options ( char *substr, char *optstr, char *valstr) +static void +cli_out_options(char *substr, char *optstr, char *valstr) { - char *ptr1 = NULL; - char *ptr2 = NULL; + char *ptr1 = NULL; + char *ptr2 = NULL; + + ptr1 = substr; + ptr2 = optstr; + + while (ptr1) { + /* Avoiding segmentation fault. */ + if (!ptr2) + return; + if (*ptr1 != *ptr2) + break; + ptr1++; + ptr2++; + } + + if (*ptr2 == '\0') + return; + cli_out("%s: %s", ptr2, valstr); +} - ptr1 = substr; - ptr2 = optstr; +static int +_gf_cli_output_volinfo_opts(dict_t *d, char *k, data_t *v, void *tmp) +{ + int ret = 0; + char *key = NULL; + char *ptr = NULL; + data_t *value = NULL; + + key = tmp; + + ptr = strstr(k, "option."); + if (ptr) { + value = v; + if (!value) { + ret = -1; + goto out; + } + cli_out_options(key, k, v->data); + } +out: + return ret; +} - while (ptr1) - { - if (*ptr1 != *ptr2) - break; - ptr1++; - ptr2++; - if (!ptr1) - return; - if (!ptr2) - return; - } +static int +print_brick_details(dict_t *dict, int volcount, int start_index, int end_index, + int replica_count) +{ + char key[64] = { + 0, + }; + int keylen; + int index = start_index; + int isArbiter = 0; + int ret = -1; + char *brick = NULL; + + while (index <= end_index) { + keylen = snprintf(key, sizeof(key), "volume%d.brick%d", volcount, + index); + ret = dict_get_strn(dict, key, keylen, &brick); + if (ret) + goto out; + keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter", + volcount, index); + if (dict_getn(dict, key, keylen)) + isArbiter = 1; + else + isArbiter = 0; - if (*ptr2 == '\0') - return; - cli_out ("%s: %s",ptr2 , valstr); + if (isArbiter) + cli_out("Brick%d: %s (arbiter)", index, brick); + else + cli_out("Brick%d: %s", index, brick); + index++; + } + ret = 0; +out: + return ret; } +static void +gf_cli_print_number_of_bricks(int type, int brick_count, int dist_count, + int stripe_count, int replica_count, + int disperse_count, int redundancy_count, + int arbiter_count) +{ + if (type == GF_CLUSTER_TYPE_NONE) { + cli_out("Number of Bricks: %d", brick_count); + } else if (type == GF_CLUSTER_TYPE_DISPERSE) { + cli_out("Number of Bricks: %d x (%d + %d) = %d", + (brick_count / dist_count), disperse_count - redundancy_count, + redundancy_count, brick_count); + } else { + /* For both replicate and stripe, dist_count is + good enough */ + if (arbiter_count == 0) { + cli_out("Number of Bricks: %d x %d = %d", + (brick_count / dist_count), dist_count, brick_count); + } else { + cli_out("Number of Bricks: %d x (%d + %d) = %d", + (brick_count / dist_count), dist_count - arbiter_count, + arbiter_count, brick_count); + } + } +} -int -gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - int ret = -1; - int opt_count = 0; - int k = 0; - int32_t i = 0; - int32_t j = 1; - int32_t status = 0; - int32_t type = 0; - int32_t brick_count = 0; - int32_t dist_count = 0; - int32_t stripe_count = 0; - int32_t replica_count = 0; - int32_t vol_type = 0; - int32_t transport = 0; - char *ptr = NULL; - char *volume_id_str = NULL; - char *brick = NULL; - char *volname = NULL; - dict_t *dict = NULL; - data_pair_t *pairs = NULL; - data_t *value = NULL; - cli_local_t *local = NULL; - char key[1024] = {0}; - char err_str[2048] = {0}; - gf_cli_rsp rsp = {0}; - - if (-1 == req->rpc_status) - goto out; +static int +gf_cli_get_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + int ret = -1; + int opt_count = 0; + int32_t i = 0; + int32_t j = 1; + int32_t status = 0; + int32_t type = 0; + int32_t brick_count = 0; + int32_t dist_count = 0; + int32_t stripe_count = 0; + int32_t replica_count = 0; + int32_t disperse_count = 0; + int32_t redundancy_count = 0; + int32_t arbiter_count = 0; + int32_t snap_count = 0; + int32_t thin_arbiter_count = 0; + int32_t vol_type = 0; + int32_t transport = 0; + char *volume_id_str = NULL; + char *volname = NULL; + char *ta_brick = NULL; + dict_t *dict = NULL; + cli_local_t *local = NULL; + char key[64] = {0}; + int keylen; + char err_str[2048] = {0}; + gf_cli_rsp rsp = {0}; + char *caps __attribute__((unused)) = NULL; + int k __attribute__((unused)) = 0; + call_frame_t *frame = NULL; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) + goto out; + + frame = myframe; + + GF_ASSERT(frame->local); + + local = frame->local; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to get vol: %d", rsp.op_ret); + + if (!rsp.dict.dict_len) { + if (global_state->mode & GLUSTER_MODE_XML) + goto xml_output; + cli_err("No volumes present"); + ret = 0; + goto out; + } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("cli", GF_LOG_ERROR, "error"); - goto out; - } + dict = dict_new(); - gf_log ("cli", GF_LOG_INFO, "Received resp to get vol: %d", - rsp.op_ret); + if (!dict) { + ret = -1; + goto out; + } - if (rsp.op_ret) { - ret = -1; - goto out; - } + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); - if (!rsp.dict.dict_len) { -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) - goto xml_output; -#endif - cli_out ("No volumes present"); + if (ret) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } + + ret = dict_get_int32_sizen(dict, "count", &count); + if (ret) + goto out; + + if (!count) { + switch (local->get_vol.flags) { + case GF_CLI_GET_NEXT_VOLUME: + GF_FREE(local->get_vol.volname); + local->get_vol.volname = NULL; ret = 0; goto out; - } - dict = dict_new (); - - if (!dict) { + case GF_CLI_GET_VOLUME: + snprintf(err_str, sizeof(err_str), "Volume %s does not exist", + local->get_vol.volname); ret = -1; - goto out; + if (!(global_state->mode & GLUSTER_MODE_XML)) + goto out; } + } - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &dict); + if (rsp.op_ret) { + if (global_state->mode & GLUSTER_MODE_XML) + goto xml_output; + ret = -1; + goto out; + } - if (ret) { - gf_log ("cli", GF_LOG_ERROR, - "Unable to allocate memory"); +xml_output: + if (global_state->mode & GLUSTER_MODE_XML) { + /* For GET_NEXT_VOLUME output is already begun in + * and will also end in gf_cli_get_next_volume() + */ + if (local->get_vol.flags == GF_CLI_GET_VOLUME) { + ret = cli_xml_output_vol_info_begin(local, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); goto out; + } } - ret = dict_get_int32 (dict, "count", &count); - if (ret) + if (dict) { + ret = cli_xml_output_vol_info(local, dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); goto out; + } + } - local = ((call_frame_t *)myframe)->local; - - if (!count) { - switch (local->get_vol.flags) { - - case GF_CLI_GET_NEXT_VOLUME: - GF_FREE (local->get_vol.volname); - local->get_vol.volname = NULL; - ret = 0; - goto out; - - case GF_CLI_GET_VOLUME: - memset (err_str, 0, sizeof (err_str)); - snprintf (err_str, sizeof (err_str), - "Volume %s does not exist", - local->get_vol.volname); - ret = -1; -#if (HAVE_LIB_XML) - if (!(global_state->mode & GLUSTER_MODE_XML)) -#endif - { - goto out; - } - } + if (local->get_vol.flags == GF_CLI_GET_VOLUME) { + ret = cli_xml_output_vol_info_end(local); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); } + goto out; + } -#if (HAVE_LIB_XML) -xml_output: - if (global_state->mode & GLUSTER_MODE_XML) { - /* For GET_NEXT_VOLUME output is already begun in - * and will also end in gf_cli3_1_get_next_volume() - */ - if (local->get_vol.flags == GF_CLI_GET_VOLUME) { - ret = cli_xml_output_vol_info_begin - (local, rsp.op_ret, rsp.op_errno, - rsp.op_errstr); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } - } + while (i < count) { + cli_out(" "); + keylen = snprintf(key, sizeof(key), "volume%d.name", i); + ret = dict_get_strn(dict, key, keylen, &volname); + if (ret) + goto out; - if (dict) { - ret = cli_xml_output_vol_info (local, dict); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } - } + keylen = snprintf(key, sizeof(key), "volume%d.type", i); + ret = dict_get_int32n(dict, key, keylen, &type); + if (ret) + goto out; - if (local->get_vol.flags == GF_CLI_GET_VOLUME) { - ret = cli_xml_output_vol_info_end (local); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - } - goto out; - } -#endif + keylen = snprintf(key, sizeof(key), "volume%d.status", i); + ret = dict_get_int32n(dict, key, keylen, &status); + if (ret) + goto out; - while ( i < count) { - cli_out (" "); - snprintf (key, 256, "volume%d.name", i); - ret = dict_get_str (dict, key, &volname); - if (ret) - goto out; + keylen = snprintf(key, sizeof(key), "volume%d.brick_count", i); + ret = dict_get_int32n(dict, key, keylen, &brick_count); + if (ret) + goto out; - snprintf (key, 256, "volume%d.type", i); - ret = dict_get_int32 (dict, key, &type); - if (ret) - goto out; + keylen = snprintf(key, sizeof(key), "volume%d.dist_count", i); + ret = dict_get_int32n(dict, key, keylen, &dist_count); + if (ret) + goto out; - snprintf (key, 256, "volume%d.status", i); - ret = dict_get_int32 (dict, key, &status); - if (ret) - goto out; + keylen = snprintf(key, sizeof(key), "volume%d.stripe_count", i); + ret = dict_get_int32n(dict, key, keylen, &stripe_count); + if (ret) + goto out; - snprintf (key, 256, "volume%d.brick_count", i); - ret = dict_get_int32 (dict, key, &brick_count); - if (ret) - goto out; + keylen = snprintf(key, sizeof(key), "volume%d.replica_count", i); + ret = dict_get_int32n(dict, key, keylen, &replica_count); + if (ret) + goto out; - snprintf (key, 256, "volume%d.dist_count", i); - ret = dict_get_int32 (dict, key, &dist_count); - if (ret) - goto out; + keylen = snprintf(key, sizeof(key), "volume%d.disperse_count", i); + ret = dict_get_int32n(dict, key, keylen, &disperse_count); + if (ret) + goto out; - snprintf (key, 256, "volume%d.stripe_count", i); - ret = dict_get_int32 (dict, key, &stripe_count); - if (ret) - goto out; + keylen = snprintf(key, sizeof(key), "volume%d.redundancy_count", i); + ret = dict_get_int32n(dict, key, keylen, &redundancy_count); + if (ret) + goto out; - snprintf (key, 256, "volume%d.replica_count", i); - ret = dict_get_int32 (dict, key, &replica_count); - if (ret) - goto out; + keylen = snprintf(key, sizeof(key), "volume%d.arbiter_count", i); + ret = dict_get_int32n(dict, key, keylen, &arbiter_count); + if (ret) + goto out; - snprintf (key, 256, "volume%d.transport", i); - ret = dict_get_int32 (dict, key, &transport); - if (ret) - goto out; + keylen = snprintf(key, sizeof(key), "volume%d.transport", i); + ret = dict_get_int32n(dict, key, keylen, &transport); + if (ret) + goto out; - snprintf (key, 256, "volume%d.volume_id", i); - ret = dict_get_str (dict, key, &volume_id_str); - if (ret) - goto out; - - vol_type = type; - - // Distributed (stripe/replicate/stripe-replica) setups - if ((type > 0) && ( dist_count < brick_count)) - vol_type = type + 3; - - cli_out ("Volume Name: %s", volname); - cli_out ("Type: %s", cli_volume_type[vol_type]); - cli_out ("Volume ID: %s", volume_id_str); - cli_out ("Status: %s", cli_volume_status[status]); - - if (type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) { - cli_out ("Number of Bricks: %d x %d x %d = %d", - (brick_count / dist_count), - stripe_count, - replica_count, - brick_count); - - } else if (type == GF_CLUSTER_TYPE_NONE) { - cli_out ("Number of Bricks: %d", brick_count); - - } else { - /* For both replicate and stripe, dist_count is - good enough */ - cli_out ("Number of Bricks: %d x %d = %d", - (brick_count / dist_count), - dist_count, brick_count); - } + keylen = snprintf(key, sizeof(key), "volume%d.volume_id", i); + ret = dict_get_strn(dict, key, keylen, &volume_id_str); + if (ret) + goto out; - cli_out ("Transport-type: %s", - ((transport == 0)?"tcp": - (transport == 1)?"rdma": - "tcp,rdma")); - j = 1; + keylen = snprintf(key, sizeof(key), "volume%d.snap_count", i); + ret = dict_get_int32n(dict, key, keylen, &snap_count); + if (ret) + goto out; - GF_FREE (local->get_vol.volname); - local->get_vol.volname = gf_strdup (volname); + keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_count", i); + ret = dict_get_int32n(dict, key, keylen, &thin_arbiter_count); + if (ret) + goto out; - if (brick_count) - cli_out ("Bricks:"); + // Distributed (stripe/replicate/stripe-replica) setups + vol_type = get_vol_type(type, dist_count, brick_count); - while (j <= brick_count) { - snprintf (key, 1024, "volume%d.brick%d", i, j); - ret = dict_get_str (dict, key, &brick); - if (ret) - goto out; + cli_out("Volume Name: %s", volname); + cli_out("Type: %s", vol_type_str[vol_type]); + cli_out("Volume ID: %s", volume_id_str); + cli_out("Status: %s", cli_vol_status_str[status]); + cli_out("Snapshot Count: %d", snap_count); - cli_out ("Brick%d: %s", j, brick); - j++; - } + gf_cli_print_number_of_bricks( + type, brick_count, dist_count, stripe_count, replica_count, + disperse_count, redundancy_count, arbiter_count); - pairs = dict->members_list; - if (!pairs) { - ret = -1; - goto out; - } + cli_out("Transport-type: %s", + ((transport == 0) ? "tcp" + : (transport == 1) ? "rdma" : "tcp,rdma")); + j = 1; - snprintf (key, 256, "volume%d.opt_count",i); - ret = dict_get_int32 (dict, key, &opt_count); - if (ret) - goto out; - - if (!opt_count) - goto out; - - cli_out ("Options Reconfigured:"); - k = 0; - - while (k < opt_count) { - - snprintf (key, 256, "volume%d.option.",i); - while (pairs) { - ptr = strstr (pairs->key, "option."); - if (ptr) { - value = pairs->value; - if (!value) { - ret = -1; - goto out; - } - cli_out_options (key, pairs->key, - value->data); - } - pairs = pairs->next; - } - k++; - } + GF_FREE(local->get_vol.volname); + local->get_vol.volname = gf_strdup(volname); + + cli_out("Bricks:"); + ret = print_brick_details(dict, i, j, brick_count, replica_count); + if (ret) + goto out; - i++; + if (thin_arbiter_count) { + snprintf(key, sizeof(key), "volume%d.thin_arbiter_brick", i); + ret = dict_get_str(dict, key, &ta_brick); + if (ret) + goto out; + cli_out("Thin-arbiter-path: %s", ta_brick); } + snprintf(key, sizeof(key), "volume%d.opt_count", i); + ret = dict_get_int32(dict, key, &opt_count); + if (ret) + goto out; - ret = 0; -out: - cli_cmd_broadcast_response (ret); + if (!opt_count) + goto out; + + cli_out("Options Reconfigured:"); + + snprintf(key, sizeof(key), "volume%d.option.", i); + + ret = dict_foreach(dict, _gf_cli_output_volinfo_opts, key); if (ret) - cli_err ("%s", err_str); + goto out; + + i++; + } + + ret = 0; +out: + if (ret) + cli_err("%s", err_str); - if (dict) - dict_destroy (dict); + cli_cmd_broadcast_response(ret); - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); + if (dict) + dict_unref(dict); - if (rsp.op_errstr) - free (rsp.op_errstr); + gf_free_xdr_cli_rsp(rsp); - gf_log ("cli", GF_LOG_INFO, "Returning: %d", ret); - return ret; + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + return ret; } -int -gf_cli3_1_create_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int +gf_cli_create_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - gf_cli_rsp rsp = {0,}; - int ret = -1; - cli_local_t *local = NULL; - char *volname = NULL; - dict_t *dict = NULL; + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + cli_local_t *local = NULL; + char *volname = NULL; + dict_t *rsp_dict = NULL; + call_frame_t *frame = NULL; - if (-1 == req->rpc_status) { - goto out; - } + GF_ASSERT(myframe); - local = ((call_frame_t *) (myframe))->local; - ((call_frame_t *) (myframe))->local = NULL; + if (-1 == req->rpc_status) { + goto out; + } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + frame = myframe; - dict = local->dict; + GF_ASSERT(frame->local); - ret = dict_get_str (dict, "volname", &volname); + local = frame->local; - gf_log ("cli", GF_LOG_INFO, "Received resp to create volume"); + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_dict ("volCreate", dict, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); + gf_log("cli", GF_LOG_INFO, "Received resp to create volume"); + + if (global_state->mode & GLUSTER_MODE_XML) { + if (rsp.op_ret == 0) { + rsp_dict = dict_new(); + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, + &rsp_dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); goto out; + } } -#endif - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_err ("%s", rsp.op_errstr); - else - cli_out ("Creation of volume %s has been %s", volname, - (rsp.op_ret) ? "unsuccessful": - "successful. Please start the volume to " - "access data."); - ret = rsp.op_ret; + ret = cli_xml_output_vol_create(rsp_dict, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } + + ret = dict_get_str_sizen(local->dict, "volname", &volname); + if (ret) + goto out; + + if (rsp.op_ret && strcmp(rsp.op_errstr, "")) + cli_err("volume create: %s: failed: %s", volname, rsp.op_errstr); + else if (rsp.op_ret) + cli_err("volume create: %s: failed", volname); + else + cli_out( + "volume create: %s: success: " + "please start the volume to access data", + volname); + + ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); - if (dict) - dict_unref (dict); - if (local) - cli_local_wipe (local); - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); - if (rsp.op_errstr) - free (rsp.op_errstr); - return ret; + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + + if (rsp_dict) + dict_unref(rsp_dict); + return ret; } -int -gf_cli3_1_delete_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int +gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - gf_cli_rsp rsp = {0,}; - int ret = -1; - cli_local_t *local = NULL; - char *volname = NULL; - call_frame_t *frame = NULL; - dict_t *dict = NULL; + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + cli_local_t *local = NULL; + char *volname = NULL; + call_frame_t *frame = NULL; + dict_t *rsp_dict = NULL; - if (-1 == req->rpc_status) { - goto out; - } + if (-1 == req->rpc_status) { + goto out; + } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + GF_ASSERT(myframe); + frame = myframe; - frame = myframe; - local = frame->local; - frame->local = NULL; + GF_ASSERT(frame->local); - if (local) - dict = local->dict; - ret = dict_get_str (dict, "volname", &volname); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, - "dict get failed"); - goto out; - } + local = frame->local; - gf_log ("cli", GF_LOG_INFO, "Received resp to delete volume"); + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_dict ("volDelete", dict, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); + gf_log("cli", GF_LOG_INFO, "Received resp to delete volume"); + + if (global_state->mode & GLUSTER_MODE_XML) { + if (rsp.op_ret == 0) { + rsp_dict = dict_new(); + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, + &rsp_dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); goto out; + } } -#endif - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_err ("%s", rsp.op_errstr); - else - cli_out ("Deleting volume %s has been %s", volname, - (rsp.op_ret) ? "unsuccessful": "successful"); - ret = rsp.op_ret; + ret = cli_xml_output_generic_volume("volDelete", rsp_dict, rsp.op_ret, + rsp.op_errno, rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } + + ret = dict_get_str_sizen(local->dict, "volname", &volname); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, "dict get failed"); + goto out; + } + + if (rsp.op_ret && strcmp(rsp.op_errstr, "")) + cli_err("volume delete: %s: failed: %s", volname, rsp.op_errstr); + else if (rsp.op_ret) + cli_err("volume delete: %s: failed", volname); + else + cli_out("volume delete: %s: success", volname); + + ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); - cli_local_wipe (local); - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); - if (dict) - dict_unref (dict); + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); - gf_log ("", GF_LOG_INFO, "Returning with %d", ret); - return ret; + if (rsp_dict) + dict_unref(rsp_dict); + gf_log("", GF_LOG_DEBUG, RETURNING, ret); + return ret; } -int -gf_cli3_1_start_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int +gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - gf_cli_rsp rsp = {0,}; - int ret = -1; - cli_local_t *local = NULL; - char *volname = NULL; - call_frame_t *frame = NULL; - dict_t *dict = NULL; + char *uuid_str = NULL; + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + cli_local_t *local = NULL; + call_frame_t *frame = NULL; + dict_t *dict = NULL; - if (-1 == req->rpc_status) { - goto out; - } + GF_ASSERT(myframe); - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + if (-1 == req->rpc_status) + goto out; - frame = myframe; + frame = myframe; - if (frame) { - local = frame->local; - frame->local = NULL; - } + GF_ASSERT(frame->local); - if (local) - dict = local->dict; + local = frame->local; - ret = dict_get_str (dict, "volname", &volname); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "dict get failed"); - goto out; - } + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } - gf_log ("cli", GF_LOG_INFO, "Received resp to start volume"); + frame->local = NULL; -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_dict ("volStart", dict, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif + gf_log("cli", GF_LOG_INFO, "Received resp to uuid get"); + + dict = dict_new(); + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_dict("uuidGenerate", dict, rsp.op_ret, + rsp.op_errno, rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_err ("%s", rsp.op_errstr); + if (rsp.op_ret) { + if (strcmp(rsp.op_errstr, "") == 0) + cli_err("Get uuid was unsuccessful"); else - cli_out ("Starting volume %s has been %s", volname, - (rsp.op_ret) ? "unsuccessful": "successful"); + cli_err("%s", rsp.op_errstr); - ret = rsp.op_ret; + } else { + ret = dict_get_str_sizen(dict, "uuid", &uuid_str); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get uuid from dictionary"); + goto out; + } + cli_out("UUID: %s", uuid_str); + } + ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); - if (local) - cli_local_wipe (local); - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); - if (rsp.op_errstr) - free (rsp.op_errstr); - if (dict) - dict_unref (dict); - return ret; + cli_cmd_broadcast_response(ret); + cli_local_wipe(local); + gf_free_xdr_cli_rsp(rsp); + + if (dict) + dict_unref(dict); + + gf_log("", GF_LOG_DEBUG, RETURNING, ret); + return ret; } -int -gf_cli3_1_stop_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int +gf_cli3_1_uuid_reset_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - gf_cli_rsp rsp = {0,}; - int ret = -1; - cli_local_t *local = NULL; - char *volname = NULL; - call_frame_t *frame = NULL; - dict_t *dict = NULL; + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + cli_local_t *local = NULL; + call_frame_t *frame = NULL; - if (-1 == req->rpc_status) { - goto out; - } + GF_ASSERT(myframe); - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + if (-1 == req->rpc_status) { + goto out; + } - frame = myframe; + frame = myframe; - if (frame) { - local = frame->local; - frame->local = NULL; - } + GF_ASSERT(frame->local); - if (local) { - dict = local->dict; - ret = dict_get_str (dict, "volname", &volname); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, - "Unable to get volname from dict"); - goto out; - } - } + local = frame->local; - gf_log ("cli", GF_LOG_INFO, "Received resp to stop volume"); + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_dict ("volStop", dict, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif + frame->local = NULL; - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_err ("%s", rsp.op_errstr); - else - cli_out ("Stopping volume %s has been %s", volname, - (rsp.op_ret) ? "unsuccessful": "successful"); - ret = rsp.op_ret; + gf_log("cli", GF_LOG_INFO, "Received resp to uuid reset"); + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_dict("uuidReset", NULL, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } + + if (rsp.op_ret && strcmp(rsp.op_errstr, "")) + cli_err("%s", rsp.op_errstr); + else + cli_out("resetting the peer uuid has been %s", + (rsp.op_ret) ? "unsuccessful" : "successful"); + ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); - if (rsp.op_errstr) - free (rsp.op_errstr); - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); - if (local) - cli_local_wipe (local); + cli_cmd_broadcast_response(ret); + cli_local_wipe(local); + gf_free_xdr_cli_rsp(rsp); - return ret; + gf_log("", GF_LOG_DEBUG, RETURNING, ret); + return ret; } -int -gf_cli3_1_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf_cli_rsp rsp = {0,}; - cli_local_t *local = NULL; - char *volname = NULL; - call_frame_t *frame = NULL; - char *status = "unknown"; - int cmd = 0; - int ret = -1; - dict_t *dict = NULL; - dict_t *local_dict = NULL; - uint64_t files = 0; - uint64_t size = 0; - uint64_t lookup = 0; - char msg[1024] = {0,}; - gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED; - int32_t counter = 0; - char *node_uuid = NULL; - char key[256] = {0,}; - int32_t i = 1; - uint64_t failures = 0; - - if (-1 == req->rpc_status) { - goto out; - } +static int +gf_cli_start_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + cli_local_t *local = NULL; + char *volname = NULL; + call_frame_t *frame = NULL; + dict_t *rsp_dict = NULL; - ret = xdr_to_generic (*iov, &rsp, - (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + GF_ASSERT(myframe); - frame = myframe; + if (-1 == req->rpc_status) { + goto out; + } - if (frame) { - local = frame->local; - frame->local = NULL; - } + frame = myframe; - if (local) { - local_dict = local->dict; - } + GF_ASSERT(frame->local); - ret = dict_get_str (local_dict, "volname", &volname); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, - "Failed to get volname"); - goto out; - } + local = frame->local; - ret = dict_get_int32 (local_dict, "rebalance-command", (int32_t*)&cmd); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, - "Failed to get command"); + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to start volume"); + + if (global_state->mode & GLUSTER_MODE_XML) { + if (rsp.op_ret == 0) { + rsp_dict = dict_new(); + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, + &rsp_dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); goto out; + } } - if (rsp.dict.dict_len) { - /* Unserialize the dictionary */ - dict = dict_new (); + ret = cli_xml_output_generic_volume("volStart", rsp_dict, rsp.op_ret, + rsp.op_errno, rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &dict); - if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, - "failed to " - "unserialize req-buffer to dictionary"); - goto out; - } - } + ret = dict_get_str_sizen(local->dict, "volname", &volname); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "dict get failed"); + goto out; + } - if (!((cmd == GF_DEFRAG_CMD_STOP) || (cmd == GF_DEFRAG_CMD_STATUS))) { - /* All other possibility is about starting a volume */ - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); - else - snprintf (msg, sizeof (msg), - "Starting rebalance on volume %s has been %s", - volname, (rsp.op_ret) ? "unsuccessful": - "successful"); - goto done; - } + if (rsp.op_ret && strcmp(rsp.op_errstr, "")) + cli_err("volume start: %s: failed: %s", volname, rsp.op_errstr); + else if (rsp.op_ret) + cli_err("volume start: %s: failed", volname); + else + cli_out("volume start: %s: success", volname); - ret = dict_get_int32 (dict, "count", &counter); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "count not set"); - goto out; - } + ret = rsp.op_ret; - if (cmd == GF_DEFRAG_CMD_STOP) { - if (rsp.op_ret == -1) { - if (strcmp (rsp.op_errstr, "")) - snprintf (msg, sizeof (msg), - "%s", rsp.op_errstr); - else - snprintf (msg, sizeof (msg), - "rebalance volume %s stop failed", - volname); - goto done; - } else { - snprintf (msg, sizeof (msg), - "Stopped rebalance process on volume %s \n", - volname); - } - } - if (cmd == GF_DEFRAG_CMD_STATUS) { - if (rsp.op_ret == -1) { - if (strcmp (rsp.op_errstr, "")) - snprintf (msg, sizeof (msg), - "%s", rsp.op_errstr); - else - snprintf (msg, sizeof (msg), - "Failed to get the status of " - "rebalance process"); - goto done; - } - } - cli_out ("%40s %16s %13s %13s %13s %14s", "Node", "Rebalanced-files", - "size", "scanned", "failures","status"); - cli_out ("%40s %16s %13s %13s %13s %14s", "---------", "-----------", - "-----------", "-----------", "-----------", "------------"); +out: + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); - do { - snprintf (key, 256, "node-uuid-%d", i); - ret = dict_get_str (dict, key, &node_uuid); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get node-uuid"); + if (rsp_dict) + dict_unref(rsp_dict); + return ret; +} - memset (key, 0, 256); - snprintf (key, 256, "files-%d", i); - ret = dict_get_uint64 (dict, key, &files); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get file count"); +static int +gf_cli_stop_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + cli_local_t *local = NULL; + char *volname = NULL; + call_frame_t *frame = NULL; + dict_t *rsp_dict = NULL; - memset (key, 0, 256); - snprintf (key, 256, "size-%d", i); - ret = dict_get_uint64 (dict, key, &size); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get size of xfer"); + GF_ASSERT(myframe); - memset (key, 0, 256); - snprintf (key, 256, "lookups-%d", i); - ret = dict_get_uint64 (dict, key, &lookup); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get lookedup file count"); + if (-1 == req->rpc_status) { + goto out; + } - memset (key, 0, 256); - snprintf (key, 256, "status-%d", i); - ret = dict_get_int32 (dict, key, (int32_t *)&status_rcd); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get status"); + frame = myframe; - memset (key, 0, 256); - snprintf (key, 256, "failures-%d", i); - ret = dict_get_uint64 (dict, key, &failures); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get failures count"); - - switch (status_rcd) { - case GF_DEFRAG_STATUS_NOT_STARTED: - status = "not started"; - break; - case GF_DEFRAG_STATUS_STARTED: - status = "in progress"; - break; - case GF_DEFRAG_STATUS_STOPPED: - status = "stopped"; - break; - case GF_DEFRAG_STATUS_COMPLETE: - status = "completed"; - break; - case GF_DEFRAG_STATUS_FAILED: - status = "failed"; - break; - } - cli_out ("%40s %16"PRId64 "%13"PRId64 "%13"PRId64 "%13"PRId64 - " %14s", node_uuid, files, size, lookup, failures, - status); - i++; - } while (i <= counter); + GF_ASSERT(frame->local); + local = frame->local; -done: -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("volRebalance", msg, rsp.op_ret, - status_rcd, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to stop volume"); + + if (global_state->mode & GLUSTER_MODE_XML) { + if (rsp.op_ret == 0) { + rsp_dict = dict_new(); + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, + &rsp_dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); goto out; + } } -#endif - if (rsp.op_ret) - cli_err ("%s", msg); - else - cli_out ("%s", msg); - ret = rsp.op_ret; + + ret = cli_xml_output_generic_volume("volStop", rsp_dict, rsp.op_ret, + rsp.op_errno, rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } + + ret = dict_get_str_sizen(local->dict, "volname", &volname); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, + "Unable to get volname from dict"); + goto out; + } + + if (rsp.op_ret && strcmp(rsp.op_errstr, "")) + cli_err("volume stop: %s: failed: %s", volname, rsp.op_errstr); + else if (rsp.op_ret) + cli_err("volume stop: %s: failed", volname); + else + cli_out("volume stop: %s: success", volname); + + ret = rsp.op_ret; out: - if (rsp.op_errstr) - free (rsp.op_errstr); //malloced by xdr - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); //malloced by xdr - if (dict) - dict_unref (dict); - if (local_dict) - dict_unref (local_dict); - if (local) - cli_local_wipe (local); - cli_cmd_broadcast_response (ret); - return ret; + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + + if (rsp_dict) + dict_unref(rsp_dict); + + return ret; } -int -gf_cli3_1_rename_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int +gf_cli_print_rebalance_status(dict_t *dict, enum gf_task_types task_type) { - gf_cli_rsp rsp = {0,}; - int ret = -1; - char msg[1024] = {0,}; + int ret = -1; + int count = 0; + int i = 1; + char key[64] = { + 0, + }; + int keylen; + gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED; + uint64_t files = 0; + uint64_t size = 0; + uint64_t lookup = 0; + char *node_name = NULL; + uint64_t failures = 0; + uint64_t skipped = 0; + double elapsed = 0; + char *status_str = NULL; + char *size_str = NULL; + int32_t hrs = 0; + uint32_t min = 0; + uint32_t sec = 0; + gf_boolean_t fix_layout = _gf_false; + uint64_t max_time = 0; + uint64_t max_elapsed = 0; + uint64_t time_left = 0; + gf_boolean_t show_estimates = _gf_false; + + ret = dict_get_int32_sizen(dict, "count", &count); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "count not set"); + goto out; + } + + for (i = 1; i <= count; i++) { + keylen = snprintf(key, sizeof(key), "status-%d", i); + ret = dict_get_int32n(dict, key, keylen, (int32_t *)&status_rcd); + /* If information from a node is missing we should skip + * the node and try to fetch information of other nodes. + * If information is not found for all nodes, we should + * error out. + */ + if (!ret) + break; + if (ret && i == count) { + gf_log("cli", GF_LOG_TRACE, "failed to get status"); + goto out; + } + } + + /* Fix layout will be sent to all nodes for the volume + so every status should be of type + GF_DEFRAG_STATUS_LAYOUT_FIX* + */ + + if ((task_type == GF_TASK_TYPE_REBALANCE) && + (status_rcd >= GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED)) { + fix_layout = _gf_true; + } + + if (fix_layout) { + cli_out("%35s %41s %27s", "Node", "status", "run time in h:m:s"); + cli_out("%35s %41s %27s", "---------", "-----------", "------------"); + } else { + cli_out("%40s %16s %13s %13s %13s %13s %20s %18s", "Node", + "Rebalanced-files", "size", "scanned", "failures", "skipped", + "status", + "run time in" + " h:m:s"); + cli_out("%40s %16s %13s %13s %13s %13s %20s %18s", "---------", + "-----------", "-----------", "-----------", "-----------", + "-----------", "------------", "--------------"); + } + + for (i = 1; i <= count; i++) { + /* Reset the variables to prevent carryover of values */ + node_name = NULL; + files = 0; + size = 0; + lookup = 0; + skipped = 0; + status_str = NULL; + elapsed = 0; + time_left = 0; + + /* Check if status is NOT_STARTED, and continue early */ + keylen = snprintf(key, sizeof(key), "status-%d", i); + + ret = dict_get_int32n(dict, key, keylen, (int32_t *)&status_rcd); + if (ret == -ENOENT) { + gf_log("cli", GF_LOG_TRACE, "count %d %d", count, i); + gf_log("cli", GF_LOG_TRACE, "failed to get status"); + gf_log("cli", GF_LOG_ERROR, "node down and has failed to set dict"); + continue; + /* skip this node if value not available*/ + } else if (ret) { + gf_log("cli", GF_LOG_TRACE, "count %d %d", count, i); + gf_log("cli", GF_LOG_TRACE, "failed to get status"); + continue; + /* skip this node if value not available*/ + } + + if (GF_DEFRAG_STATUS_NOT_STARTED == status_rcd) + continue; + + if (GF_DEFRAG_STATUS_STARTED == status_rcd) + show_estimates = _gf_true; + + keylen = snprintf(key, sizeof(key), "node-name-%d", i); + ret = dict_get_strn(dict, key, keylen, &node_name); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get node-name"); - if (-1 == req->rpc_status) { - goto out; - } + snprintf(key, sizeof(key), "files-%d", i); + ret = dict_get_uint64(dict, key, &files); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get file count"); - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + snprintf(key, sizeof(key), "size-%d", i); + ret = dict_get_uint64(dict, key, &size); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get size of xfer"); + snprintf(key, sizeof(key), "lookups-%d", i); + ret = dict_get_uint64(dict, key, &lookup); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get lookedup file count"); - gf_log ("cli", GF_LOG_INFO, "Received resp to probe"); - snprintf (msg, sizeof (msg), "Rename volume %s", - (rsp.op_ret) ? "unsuccessful": "successful"); + snprintf(key, sizeof(key), "failures-%d", i); + ret = dict_get_uint64(dict, key, &failures); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get failures count"); -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("volRename", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; + snprintf(key, sizeof(key), "skipped-%d", i); + ret = dict_get_uint64(dict, key, &skipped); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get skipped count"); + + /* For remove-brick include skipped count into failure count*/ + if (task_type != GF_TASK_TYPE_REBALANCE) { + failures += skipped; + skipped = 0; } -#endif - if (rsp.op_ret) - cli_err ("%s", msg); - else - cli_out ("%s", msg); - ret = rsp.op_ret; + snprintf(key, sizeof(key), "run-time-%d", i); + ret = dict_get_double(dict, key, &elapsed); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get run-time"); -out: - cli_cmd_broadcast_response (ret); - return ret; -} + snprintf(key, sizeof(key), "time-left-%d", i); + ret = dict_get_uint64(dict, key, &time_left); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get time left"); -int -gf_cli3_1_reset_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf_cli_rsp rsp = {0,}; - int ret = -1; - char msg[1024] = {0,}; + if (elapsed > max_elapsed) + max_elapsed = elapsed; - if (-1 == req->rpc_status) { - goto out; - } + if (time_left > max_time) + max_time = time_left; - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + /* Check for array bound */ + if (status_rcd >= GF_DEFRAG_STATUS_MAX) + status_rcd = GF_DEFRAG_STATUS_MAX; - gf_log ("cli", GF_LOG_INFO, "Received resp to reset"); + status_str = cli_vol_task_status_str[status_rcd]; + size_str = gf_uint64_2human_readable(size); + hrs = elapsed / 3600; + min = ((uint64_t)elapsed % 3600) / 60; + sec = ((uint64_t)elapsed % 3600) % 60; - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); - else - snprintf (msg, sizeof (msg), "reset volume %s", - (rsp.op_ret) ? "unsuccessful": "successful"); - -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("volReset", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; + if (fix_layout) { + cli_out("%35s %50s %8d:%d:%d", node_name, status_str, hrs, min, + sec); + } else { + if (size_str) { + cli_out("%40s %16" PRIu64 + " %13s" + " %13" PRIu64 " %13" PRIu64 " %13" PRIu64 + " %20s " + "%8d:%02d:%02d", + node_name, files, size_str, lookup, failures, skipped, + status_str, hrs, min, sec); + } else { + cli_out("%40s %16" PRIu64 " %13" PRIu64 " %13" PRIu64 + " %13" PRIu64 " %13" PRIu64 + " %20s" + " %8d:%02d:%02d", + node_name, files, size, lookup, failures, skipped, + status_str, hrs, min, sec); + } + } + GF_FREE(size_str); + } + + /* Max time will be non-zero if rebalance is still running */ + if (max_time) { + hrs = max_time / 3600; + min = (max_time % 3600) / 60; + sec = (max_time % 3600) % 60; + + if (hrs < REBAL_ESTIMATE_SEC_UPPER_LIMIT) { + cli_out( + "Estimated time left for rebalance to " + "complete : %8d:%02d:%02d", + hrs, min, sec); + } else { + cli_out( + "Estimated time left for rebalance to " + "complete : > 2 months. Please try again " + "later."); + } + } else { + /* Rebalance will return 0 if it could not calculate the + * estimates or if it is complete. + */ + if (!show_estimates) { + goto out; + } + if (max_elapsed <= REBAL_ESTIMATE_START_TIME) { + cli_out( + "The estimated time for rebalance to complete " + "will be unavailable for the first 10 " + "minutes."); + } else { + cli_out( + "Rebalance estimated time unavailable. Please " + "try again later."); } -#endif - - if (rsp.op_ret) - cli_err ("%s", msg); - else - cli_out ("%s", msg); - ret = rsp.op_ret; - + } out: - cli_cmd_broadcast_response (ret); - return ret; + return ret; } -int -gf_cli3_1_set_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int +gf_cli_defrag_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - gf_cli_rsp rsp = {0,}; - int ret = -1; - dict_t *dict = NULL; - char *help_str = NULL; - char msg[1024] = {0,}; - - if (-1 == req->rpc_status) { - goto out; + gf_cli_rsp rsp = { + 0, + }; + cli_local_t *local = NULL; + char *volname = NULL; + call_frame_t *frame = NULL; + int cmd = 0; + int ret = -1; + dict_t *dict = NULL; + char msg[1024] = { + 0, + }; + char *task_id_str = NULL; + + if (-1 == req->rpc_status) { + goto out; + } + + GF_ASSERT(myframe); + + frame = myframe; + + GF_ASSERT(frame->local); + + local = frame->local; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + ret = dict_get_str_sizen(local->dict, "volname", &volname); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, "Failed to get volname"); + goto out; + } + + ret = dict_get_int32_sizen(local->dict, "rebalance-command", + (int32_t *)&cmd); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get command"); + goto out; + } + + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new(); + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + if (ret < 0) { + gf_log("glusterd", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; } + } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; + if (!((cmd == GF_DEFRAG_CMD_STOP) || (cmd == GF_DEFRAG_CMD_STATUS)) && + !(global_state->mode & GLUSTER_MODE_XML)) { + ret = dict_get_str_sizen(dict, GF_REBALANCE_TID_KEY, &task_id_str); + if (ret) { + gf_log("cli", GF_LOG_WARNING, "failed to get %s from dict", + GF_REBALANCE_TID_KEY); + } + if (rsp.op_ret && strcmp(rsp.op_errstr, "")) { + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); + } else { + if (!rsp.op_ret) { + /* append errstr in the cli msg for successful + * case since unlock failures can be highlighted + * event though rebalance command was successful + */ + snprintf(msg, sizeof(msg), + "Rebalance on %s has been " + "started successfully. Use " + "rebalance status command to" + " check status of the " + "rebalance process.\nID: %s", + volname, task_id_str); + } else { + snprintf(msg, sizeof(msg), + "Starting rebalance on volume %s has " + "been unsuccessful.", + volname); + } + } + goto done; + } + + if (cmd == GF_DEFRAG_CMD_STOP) { + if (rsp.op_ret == -1) { + if (strcmp(rsp.op_errstr, "")) + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); + else + snprintf(msg, sizeof(msg), "rebalance volume %s stop failed", + volname); + goto done; + } else { + /* append errstr in the cli msg for successful case + * since unlock failures can be highlighted event though + * rebalance command was successful */ + snprintf(msg, sizeof(msg), + "rebalance process may be in the middle of a " + "file migration.\nThe process will be fully " + "stopped once the migration of the file is " + "complete.\nPlease check rebalance process " + "for completion before doing any further " + "brick related tasks on the volume.\n%s", + rsp.op_errstr); + } + } + if (cmd == GF_DEFRAG_CMD_STATUS) { + if (rsp.op_ret == -1) { + if (strcmp(rsp.op_errstr, "")) + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); + else + snprintf(msg, sizeof(msg), + "Failed to get the status of rebalance process"); + goto done; + } else { + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); } + } - gf_log ("cli", GF_LOG_INFO, "Received resp to set"); + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_rebalance(cmd, dict, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + goto out; + } - dict = dict_new (); + ret = gf_cli_print_rebalance_status(dict, GF_TASK_TYPE_REBALANCE); + if (ret) + gf_log("cli", GF_LOG_ERROR, "Failed to print rebalance status"); - if (!dict) { - ret = -1; - goto out; - } +done: + if (global_state->mode & GLUSTER_MODE_XML) + cli_xml_output_str("volRebalance", msg, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + else { + if (rsp.op_ret) + cli_err("volume rebalance: %s: failed%s%s", volname, + strlen(msg) ? ": " : "", msg); + else + cli_out("volume rebalance: %s: success%s%s", volname, + strlen(msg) ? ": " : "", msg); + } + ret = rsp.op_ret; - ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); +out: + gf_free_xdr_cli_rsp(rsp); + if (dict) + dict_unref(dict); + cli_cmd_broadcast_response(ret); + return ret; +} +static int +gf_cli_rename_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + char msg[1024] = { + 0, + }; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to probe"); + snprintf(msg, sizeof(msg), "Rename volume %s", + (rsp.op_ret) ? "unsuccessful" : "successful"); + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_str("volRename", msg, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); if (ret) - goto out; + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - if (dict_get_str (dict, "help-str", &help_str) && !msg[0]) - snprintf (msg, sizeof (msg), "Set volume %s", - (rsp.op_ret) ? "unsuccessful": "successful"); + if (rsp.op_ret) + cli_err("volume rename: failed"); + else + cli_out("volume rename: success"); -#if (HAVE_LIB_XML) - if ((global_state->mode & GLUSTER_MODE_XML) && (help_str == NULL)) { - ret = cli_xml_output_str ("volSet", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif + ret = rsp.op_ret; - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_err ("%s", rsp.op_errstr); +out: + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; +} - if (rsp.op_ret) - cli_err ("%s", msg); - else - cli_out ("%s", ((help_str == NULL) ? msg : help_str)); +static int +gf_cli_reset_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + char msg[1024] = { + 0, + }; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to reset"); + + if (strcmp(rsp.op_errstr, "")) + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); + else + snprintf(msg, sizeof(msg), "reset volume %s", + (rsp.op_ret) ? "unsuccessful" : "successful"); + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_str("volReset", msg, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - ret = rsp.op_ret; + if (rsp.op_ret) + cli_err("volume reset: failed: %s", msg); + else + cli_out("volume reset: success: %s", msg); + + ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); - return ret; + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; } -int -gf_cli3_1_add_brick_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int +gf_cli_ganesha_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - gf_cli_rsp rsp = {0,}; - int ret = -1; - char msg[1024] = {0,}; + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; - if (-1 == req->rpc_status) { - goto out; - } + GF_ASSERT(myframe); - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + if (-1 == req->rpc_status) { + goto out; + } + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } - gf_log ("cli", GF_LOG_INFO, "Received resp to add brick"); + gf_log("cli", GF_LOG_DEBUG, "Received resp to ganesha"); - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); - else - snprintf (msg, sizeof (msg), "Add Brick %s", - (rsp.op_ret) ? "unsuccessful": "successful"); + dict = dict_new(); -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("volAddBrick", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif + if (!dict) { + ret = -1; + goto out; + } - if (rsp.op_ret) - cli_err ("%s", msg); + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + if (ret) + goto out; + + if (rsp.op_ret) { + if (strcmp(rsp.op_errstr, "")) + cli_err("nfs-ganesha: failed: %s", rsp.op_errstr); else - cli_out ("%s", msg); - ret = rsp.op_ret; + cli_err("nfs-ganesha: failed"); + } -out: - cli_cmd_broadcast_response (ret); - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); - if (rsp.op_errstr) - free (rsp.op_errstr); - return ret; -} + else { + cli_out("nfs-ganesha : success "); + } -int -gf_cli3_remove_brick_status_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf_cli_rsp rsp = {0,}; - char *status = "unknown"; - int ret = -1; - uint64_t files = 0; - uint64_t size = 0; - uint64_t lookup = 0; - dict_t *dict = NULL; - //char msg[1024] = {0,}; - char key[256] = {0,}; - int32_t i = 1; - int32_t counter = 0; - char *node_uuid = 0; - gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED; - uint64_t failures = 0; - - - if (-1 == req->rpc_status) { - goto out; - } + ret = rsp.op_ret; - ret = xdr_to_generic (*iov, &rsp, - (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } +out: + if (dict) + dict_unref(dict); + cli_cmd_broadcast_response(ret); + return ret; +} - ret = rsp.op_ret; - if (rsp.op_ret == -1) { - if (strcmp (rsp.op_errstr, "")) - cli_err ("%s", rsp.op_errstr); - else - cli_err ("failed to get the status of " - "remove-brick process"); - goto out; +static char * +is_server_debug_xlator(void *myframe) +{ + call_frame_t *frame = NULL; + cli_local_t *local = NULL; + char **words = NULL; + char *key = NULL; + char *value = NULL; + char *debug_xlator = NULL; + + frame = myframe; + local = frame->local; + words = (char **)local->words; + + while (*words != NULL) { + if (strstr(*words, "trace") == NULL && + strstr(*words, "error-gen") == NULL) { + words++; + continue; + } + + key = *words; + words++; + value = *words; + if (value == NULL) + break; + if (strstr(value, "client")) { + words++; + continue; + } else { + if (!(strstr(value, "posix") || strstr(value, "acl") || + strstr(value, "locks") || strstr(value, "io-threads") || + strstr(value, "marker") || strstr(value, "index"))) { + words++; + continue; + } else { + debug_xlator = gf_strdup(key); + break; + } } + } - if (rsp.dict.dict_len) { - /* Unserialize the dictionary */ - dict = dict_new (); + return debug_xlator; +} - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &dict); - if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, - "failed to " - "unserialize req-buffer to dictionary"); - goto out; - } - } +static int +gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; + char *help_str = NULL; + char msg[1024] = { + 0, + }; + char *debug_xlator = NULL; + char tmp_str[512] = { + 0, + }; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to set"); + + dict = dict_new(); + + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } + + /* For brick processes graph change does not happen on the fly. + * The process has to be restarted. So this is a check from the + * volume set option such that if debug xlators such as trace/errorgen + * are provided in the set command, warn the user. + */ + debug_xlator = is_server_debug_xlator(myframe); + + if (dict_get_str_sizen(dict, "help-str", &help_str) && !msg[0]) + snprintf(msg, sizeof(msg), "Set volume %s", + (rsp.op_ret) ? "unsuccessful" : "successful"); + if (rsp.op_ret == 0 && debug_xlator) { + snprintf(tmp_str, sizeof(tmp_str), + "\n%s translator has been " + "added to the server volume file. Please restart the" + " volume for enabling the translator", + debug_xlator); + } + + if ((global_state->mode & GLUSTER_MODE_XML) && (help_str == NULL)) { + ret = cli_xml_output_str("volSet", msg, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - ret = dict_get_int32 (dict, "count", &counter); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "count not set"); - goto out; + if (rsp.op_ret) { + if (strcmp(rsp.op_errstr, "")) + cli_err("volume set: failed: %s", rsp.op_errstr); + else + cli_err("volume set: failed"); + } else { + if (help_str == NULL) { + if (debug_xlator == NULL) + cli_out("volume set: success"); + else + cli_out("volume set: success%s", tmp_str); + } else { + cli_out("%s", help_str); } + } + ret = rsp.op_ret; - cli_out ("%40s %16s %13s %13s %13s %14s", "Node", "Rebalanced-files", - "size", "scanned", "failures", "status"); - cli_out ("%40s %16s %13s %13s %13s %14s", "---------", "-----------", - "-----------", "-----------", "-----------", "------------"); +out: + if (dict) + dict_unref(dict); + GF_FREE(debug_xlator); + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; +} - do { - snprintf (key, 256, "node-uuid-%d", i); - ret = dict_get_str (dict, key, &node_uuid); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get node-uuid"); +int +gf_cli_add_brick_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + char msg[1024] = { + 0, + }; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to add brick"); + + if (rsp.op_ret && strcmp(rsp.op_errstr, "")) + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); + else + snprintf(msg, sizeof(msg), "Add Brick %s", + (rsp.op_ret) ? "unsuccessful" : "successful"); + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_str("volAddBrick", msg, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - memset (key, 0, 256); - snprintf (key, 256, "files-%d", i); - ret = dict_get_uint64 (dict, key, &files); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get file count"); + if (rsp.op_ret) + cli_err("volume add-brick: failed: %s", rsp.op_errstr); + else + cli_out("volume add-brick: success"); + ret = rsp.op_ret; - memset (key, 0, 256); - snprintf (key, 256, "size-%d", i); - ret = dict_get_uint64 (dict, key, &size); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get size of xfer"); +out: + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; +} - memset (key, 0, 256); - snprintf (key, 256, "lookups-%d", i); - ret = dict_get_uint64 (dict, key, &lookup); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get lookedup file count"); +static int +gf_cli3_remove_brick_status_cbk(struct rpc_req *req, struct iovec *iov, + int count, void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; + char msg[1024] = { + 0, + }; + int32_t command = 0; + gf1_op_commands cmd = GF_OP_CMD_NONE; + cli_local_t *local = NULL; + call_frame_t *frame = NULL; + const char *cmd_str; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + frame = myframe; + + GF_ASSERT(frame->local); + + local = frame->local; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + ret = dict_get_int32_sizen(local->dict, "command", &command); + if (ret) + goto out; + + cmd = command; + + switch (cmd) { + case GF_OP_CMD_STOP: + cmd_str = "stop"; + break; + case GF_OP_CMD_STATUS: + cmd_str = "status"; + break; + default: + cmd_str = "unknown"; + break; + } + + ret = rsp.op_ret; + if (rsp.op_ret == -1) { + if (strcmp(rsp.op_errstr, "")) + snprintf(msg, sizeof(msg), "volume remove-brick %s: failed: %s", + cmd_str, rsp.op_errstr); + else + snprintf(msg, sizeof(msg), "volume remove-brick %s: failed", + cmd_str); - memset (key, 0, 256); - snprintf (key, 256, "status-%d", i); - ret = dict_get_int32 (dict, key, (int32_t *)&status_rcd); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get status"); + if (global_state->mode & GLUSTER_MODE_XML) + goto xml_output; - snprintf (key, 256, "failures-%d", i); - ret = dict_get_uint64 (dict, key, &failures); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "Failed to get failure on files"); - - switch (status_rcd) { - case GF_DEFRAG_STATUS_NOT_STARTED: - status = "not started"; - break; - case GF_DEFRAG_STATUS_STARTED: - status = "in progress"; - break; - case GF_DEFRAG_STATUS_STOPPED: - status = "stopped"; - break; - case GF_DEFRAG_STATUS_COMPLETE: - status = "completed"; - break; - case GF_DEFRAG_STATUS_FAILED: - status = "failed"; - break; - } - cli_out ("%40s %16"PRId64 "%13"PRId64 "%13"PRId64 "%13"PRId64 - " %14s", node_uuid, files, size, lookup, failures, - status); - i++; - } while (i <= counter); + cli_err("%s", msg); + goto out; + } - //TODO: Do proper xml output - /* -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("volRemoveBrick", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new(); - cli_out ("%s", msg); - */ -out: - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); //malloced by xdr - if (dict) - dict_unref (dict); - cli_cmd_broadcast_response (ret); - return ret; -} + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + if (ret < 0) { + strncpy(msg, DICT_UNSERIALIZE_FAIL, sizeof(msg)); + if (global_state->mode & GLUSTER_MODE_XML) { + rsp.op_ret = -1; + goto xml_output; + } -int -gf_cli3_1_remove_brick_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf_cli_rsp rsp = {0,}; - int ret = -1; - char msg[1024] = {0,}; - gf1_op_commands cmd = GF_OP_CMD_NONE; - char *cmd_str = "unknown"; - cli_local_t *local = NULL; - call_frame_t *frame = NULL; - - if (-1 == req->rpc_status) { - goto out; + gf_log("cli", GF_LOG_ERROR, "%s", msg); + goto out; } + } - frame = myframe; - local = frame->local; +xml_output: + if (global_state->mode & GLUSTER_MODE_XML) { + if (strcmp(rsp.op_errstr, "")) { + ret = cli_xml_output_vol_remove_brick(_gf_true, dict, rsp.op_ret, + rsp.op_errno, rsp.op_errstr, + "volRemoveBrick"); + } else { + ret = cli_xml_output_vol_remove_brick(_gf_true, dict, rsp.op_ret, + rsp.op_errno, msg, + "volRemoveBrick"); + } + goto out; + } + + ret = gf_cli_print_rebalance_status(dict, GF_TASK_TYPE_REMOVE_BRICK); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to print remove-brick rebalance status"); + goto out; + } + + if ((cmd == GF_OP_CMD_STOP) && (rsp.op_ret == 0)) { + cli_out( + "'remove-brick' process may be in the middle of a " + "file migration.\nThe process will be fully stopped " + "once the migration of the file is complete.\nPlease " + "check remove-brick process for completion before " + "doing any further brick related tasks on the " + "volume."); + } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } +out: + if (dict) + dict_unref(dict); + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; +} - ret = dict_get_int32 (local->dict, "command", (int32_t *)&cmd); +static int +gf_cli_remove_brick_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + char msg[1024] = { + 0, + }; + gf1_op_commands cmd = GF_OP_CMD_NONE; + char *cmd_str = "unknown"; + cli_local_t *local = NULL; + call_frame_t *frame = NULL; + char *task_id_str = NULL; + dict_t *rsp_dict = NULL; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + frame = myframe; + + GF_ASSERT(frame->local); + + local = frame->local; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + ret = dict_get_int32_sizen(local->dict, "command", (int32_t *)&cmd); + if (ret) { + gf_log("", GF_LOG_ERROR, "failed to get command"); + goto out; + } + + if (rsp.dict.dict_len) { + rsp_dict = dict_new(); + if (!rsp_dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); if (ret) { - gf_log ("", GF_LOG_ERROR, "failed to get command"); - goto out; + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; } + } - switch (cmd) { - + switch (cmd) { + case GF_OP_CMD_DETACH_START: case GF_OP_CMD_START: - cmd_str = "start"; - break; + cmd_str = "start"; + + ret = dict_get_str_sizen(rsp_dict, GF_REMOVE_BRICK_TID_KEY, + &task_id_str); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "remove-brick-id is not present in dict"); + } + break; case GF_OP_CMD_COMMIT: - cmd_str = "commit"; - break; + cmd_str = "commit"; + break; case GF_OP_CMD_COMMIT_FORCE: - cmd_str = "commit force"; - break; + cmd_str = "commit force"; + break; default: - cmd_str = "unknown"; - break; - } - - gf_log ("cli", GF_LOG_INFO, "Received resp to remove brick"); - - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); - else - snprintf (msg, sizeof (msg), "Remove Brick %s %s", cmd_str, - (rsp.op_ret) ? "unsuccessful": "successful"); - -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("volRemoveBrick", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif - if (rsp.op_ret) - cli_err ("%s", msg); - else - cli_out ("%s", msg); - ret = rsp.op_ret; + cmd_str = "unknown"; + break; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to remove brick"); + + if (rsp.op_ret && strcmp(rsp.op_errstr, "")) + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); + else + snprintf(msg, sizeof(msg), "Remove Brick %s %s", cmd_str, + (rsp.op_ret) ? "unsuccessful" : "successful"); + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_remove_brick(_gf_false, rsp_dict, rsp.op_ret, + rsp.op_errno, msg, + "volRemoveBrick"); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } + + if (rsp.op_ret) { + cli_err("volume remove-brick %s: failed: %s", cmd_str, msg); + } else { + cli_out("volume remove-brick %s: success", cmd_str); + if (GF_OP_CMD_START == cmd && task_id_str != NULL) + cli_out("ID: %s", task_id_str); + if (GF_OP_CMD_COMMIT == cmd) + cli_out( + "Check the removed bricks to ensure all files " + "are migrated.\nIf files with data are " + "found on the brick path, copy them via a " + "gluster mount point before re-purposing the " + "removed brick. "); + } + + ret = rsp.op_ret; out: - if (frame) - frame->local = NULL; + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); - if (local) { - dict_unref (local->dict); - cli_local_wipe (local); - } + if (rsp_dict) + dict_unref(rsp_dict); - cli_cmd_broadcast_response (ret); - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); - if (rsp.op_errstr) - free (rsp.op_errstr); - - return ret; + return ret; } +static int +gf_cli_reset_brick_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + cli_local_t *local = NULL; + call_frame_t *frame = NULL; + const char *rb_operation_str = NULL; + dict_t *rsp_dict = NULL; + char msg[1024] = { + 0, + }; + char *reset_op = NULL; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + frame = myframe; + + GF_ASSERT(frame->local); + + local = frame->local; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + ret = dict_get_str_sizen(local->dict, "operation", &reset_op); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, "dict_get on operation failed"); + goto out; + } + + if (strcmp(reset_op, "GF_RESET_OP_START") && + strcmp(reset_op, "GF_RESET_OP_COMMIT") && + strcmp(reset_op, "GF_RESET_OP_COMMIT_FORCE")) { + ret = -1; + goto out; + } + + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + rsp_dict = dict_new(); + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } + } + + if (rsp.op_ret && (strcmp(rsp.op_errstr, ""))) { + rb_operation_str = rsp.op_errstr; + } else { + if (!strcmp(reset_op, "GF_RESET_OP_START")) { + if (rsp.op_ret) + rb_operation_str = "reset-brick start operation failed"; + else + rb_operation_str = "reset-brick start operation successful"; + } else if (!strcmp(reset_op, "GF_RESET_OP_COMMIT")) { + if (rsp.op_ret) + rb_operation_str = "reset-brick commit operation failed"; + else + rb_operation_str = "reset-brick commit operation successful"; + } else if (!strcmp(reset_op, "GF_RESET_OP_COMMIT_FORCE")) { + if (rsp.op_ret) + rb_operation_str = "reset-brick commit force operation failed"; + else + rb_operation_str = + "reset-brick commit force operation successful"; + } + } + + gf_log("cli", GF_LOG_INFO, "Received resp to reset brick"); + snprintf(msg, sizeof(msg), "%s", + rb_operation_str ? rb_operation_str : "Unknown operation"); + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_replace_brick(rsp_dict, rsp.op_ret, + rsp.op_errno, msg); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } + if (rsp.op_ret) + cli_err("volume reset-brick: failed: %s", msg); + else + cli_out("volume reset-brick: success: %s", msg); + ret = rsp.op_ret; -int -gf_cli3_1_replace_brick_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf_cli_rsp rsp = {0,}; - int ret = -1; - cli_local_t *local = NULL; - call_frame_t *frame = NULL; - dict_t *dict = NULL; - char *src_brick = NULL; - char *dst_brick = NULL; - char *status_reply = NULL; - gf1_cli_replace_op replace_op = 0; - char *rb_operation_str = NULL; - dict_t *rsp_dict = NULL; - char msg[1024] = {0,}; - - if (-1 == req->rpc_status) { - goto out; - } - - frame = (call_frame_t *) myframe; +out: + if (frame) + frame->local = NULL; - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + if (local) + cli_local_wipe(local); - local = frame->local; - GF_ASSERT (local); - dict = local->dict; + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + if (rsp_dict) + dict_unref(rsp_dict); - ret = dict_get_int32 (dict, "operation", (int32_t *)&replace_op); - if (ret) { - gf_log ("", GF_LOG_DEBUG, - "dict_get on operation failed"); - goto out; + return ret; +} +static int +gf_cli_replace_brick_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + cli_local_t *local = NULL; + call_frame_t *frame = NULL; + const char *rb_operation_str = NULL; + dict_t *rsp_dict = NULL; + char msg[1024] = { + 0, + }; + char *replace_op = NULL; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + frame = myframe; + + GF_ASSERT(frame->local); + + local = frame->local; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + ret = dict_get_str_sizen(local->dict, "operation", &replace_op); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, "dict_get on operation failed"); + goto out; + } + + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + rsp_dict = dict_new(); + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; } + } - switch (replace_op) { - case GF_REPLACE_OP_START: - if (rsp.op_ret) - rb_operation_str = "replace-brick failed to start"; - else - rb_operation_str = "replace-brick started successfully"; - break; + if (!strcmp(replace_op, "GF_REPLACE_OP_COMMIT_FORCE")) { + if (rsp.op_ret || ret) + rb_operation_str = "replace-brick commit force operation failed"; + else + rb_operation_str = + "replace-brick commit force operation successful"; + } else { + gf_log(frame->this->name, GF_LOG_DEBUG, "Unknown operation"); + } + + if (rsp.op_ret && (strcmp(rsp.op_errstr, ""))) { + rb_operation_str = rsp.op_errstr; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to replace brick"); + snprintf(msg, sizeof(msg), "%s", + rb_operation_str ? rb_operation_str : "Unknown operation"); + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_replace_brick(rsp_dict, rsp.op_ret, + rsp.op_errno, msg); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - case GF_REPLACE_OP_STATUS: - - if (rsp.op_ret || ret) - rb_operation_str = "replace-brick status unknown"; - else { - if (rsp.dict.dict_len) { - /* Unserialize the dictionary */ - rsp_dict = dict_new (); - - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &rsp_dict); - if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, - "failed to " - "unserialize req-buffer to dictionary"); - goto out; - } - } - ret = dict_get_str (rsp_dict, "status-reply", - &status_reply); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to" - "get status"); - goto out; - } - - rb_operation_str = status_reply; - } + if (rsp.op_ret) + cli_err("volume replace-brick: failed: %s", msg); + else + cli_out("volume replace-brick: success: %s", msg); + ret = rsp.op_ret; - break; +out: + if (frame) + frame->local = NULL; - case GF_REPLACE_OP_PAUSE: - if (rsp.op_ret) - rb_operation_str = "replace-brick pause failed"; - else - rb_operation_str = "replace-brick paused successfully"; - break; + if (local) + cli_local_wipe(local); - case GF_REPLACE_OP_ABORT: - if (rsp.op_ret) - rb_operation_str = "replace-brick abort failed"; - else - rb_operation_str = "replace-brick aborted successfully"; - break; + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + if (rsp_dict) + dict_unref(rsp_dict); - case GF_REPLACE_OP_COMMIT: - case GF_REPLACE_OP_COMMIT_FORCE: - ret = dict_get_str (dict, "src-brick", &src_brick); - if (ret) { - gf_log ("", GF_LOG_DEBUG, - "dict_get on src-brick failed"); - goto out; - } + return ret; +} - ret = dict_get_str (dict, "dst-brick", &dst_brick); - if (ret) { - gf_log ("", GF_LOG_DEBUG, - "dict_get on dst-brick failed"); - goto out; - } +static int +gf_cli_log_rotate_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + char msg[1024] = { + 0, + }; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + gf_log("cli", GF_LOG_DEBUG, "Received resp to log rotate"); + + if (rsp.op_ret && strcmp(rsp.op_errstr, "")) + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); + else + snprintf(msg, sizeof(msg), "log rotate %s", + (rsp.op_ret) ? "unsuccessful" : "successful"); + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_str("volLogRotate", msg, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } + if (rsp.op_ret) + cli_err("volume log-rotate: failed: %s", msg); + else + cli_out("volume log-rotate: success"); + ret = rsp.op_ret; - if (rsp.op_ret || ret) - rb_operation_str = "replace-brick commit failed"; - else - rb_operation_str = "replace-brick commit successful"; +out: + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); - break; + return ret; +} - default: - gf_log ("", GF_LOG_DEBUG, - "Unknown operation"); - break; - } +static int +gf_cli_sync_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + char msg[1024] = { + 0, + }; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + gf_log("cli", GF_LOG_DEBUG, "Received resp to sync"); + + if (rsp.op_ret && strcmp(rsp.op_errstr, "")) + snprintf(msg, sizeof(msg), "volume sync: failed: %s", rsp.op_errstr); + else + snprintf(msg, sizeof(msg), "volume sync: %s", + (rsp.op_ret) ? "failed" : "success"); + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_str("volSync", msg, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - if (rsp.op_ret && (strcmp (rsp.op_errstr, ""))) { - rb_operation_str = rsp.op_errstr; - } + if (rsp.op_ret) + cli_err("%s", msg); + else + cli_out("%s", msg); + ret = rsp.op_ret; - gf_log ("cli", GF_LOG_INFO, "Received resp to replace brick"); - snprintf (msg,sizeof (msg), "%s", - rb_operation_str ? rb_operation_str : "Unknown operation"); +out: + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; +} -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("volReplaceBrick", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; +static int +print_quota_list_usage_output(cli_local_t *local, char *path, int64_t avail, + char *sl_str, quota_limits_t *limits, + quota_meta_t *used_space, gf_boolean_t sl, + gf_boolean_t hl, double sl_num, + gf_boolean_t limit_set) +{ + int32_t ret = -1; + char *used_str = NULL; + char *avail_str = NULL; + char *hl_str = NULL; + char *sl_val = NULL; + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_quota_xml_output(local, path, limits->hl, sl_str, sl_num, + used_space->size, avail, sl ? "Yes" : "No", + hl ? "Yes" : "No", limit_set); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to output in xml format for quota list command"); } -#endif + goto out; + } - if (rsp.op_ret) - cli_err ("%s", msg); - else - cli_out ("%s", msg); - ret = rsp.op_ret; + used_str = gf_uint64_2human_readable(used_space->size); -out: - if (frame) - frame->local = NULL; + if (limit_set) { + hl_str = gf_uint64_2human_readable(limits->hl); + sl_val = gf_uint64_2human_readable(sl_num); - if (local) { - dict_unref (local->dict); - cli_local_wipe (local); + if (!used_str) { + cli_out("%-40s %7s %7s(%s) %8" PRIu64 "%9" PRIu64 + "" + "%15s %18s", + path, hl_str, sl_str, sl_val, used_space->size, avail, + sl ? "Yes" : "No", hl ? "Yes" : "No"); + } else { + avail_str = gf_uint64_2human_readable(avail); + cli_out("%-40s %7s %7s(%s) %8s %7s %15s %20s", path, hl_str, sl_str, + sl_val, used_str, avail_str, sl ? "Yes" : "No", + hl ? "Yes" : "No"); } + } else { + cli_out("%-36s %10s %10s %14s %9s %15s %18s", path, "N/A", "N/A", + used_str, "N/A", "N/A", "N/A"); + } - cli_cmd_broadcast_response (ret); - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); - if (rsp_dict) - dict_unref (rsp_dict); + ret = 0; +out: + GF_FREE(hl_str); + GF_FREE(used_str); + GF_FREE(avail_str); + GF_FREE(sl_val); - return ret; + return ret; } - static int -gf_cli3_1_log_rotate_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +print_quota_list_object_output(cli_local_t *local, char *path, int64_t avail, + char *sl_str, quota_limits_t *limits, + quota_meta_t *used_space, gf_boolean_t sl, + gf_boolean_t hl, double sl_num, + gf_boolean_t limit_set) { - gf_cli_rsp rsp = {0,}; - int ret = -1; - char msg[1024] = {0,}; + int32_t ret = -1; + int64_t sl_val = sl_num; - if (-1 == req->rpc_status) { - goto out; - } - - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_quota_object_xml_output(local, path, sl_str, sl_val, limits, + used_space, avail, sl ? "Yes" : "No", + hl ? "Yes" : "No", limit_set); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to output in xml format for quota list command"); + } + goto out; + } + + if (limit_set) { + cli_out("%-40s %9" PRIu64 " %9s(%" PRId64 ") %10" PRIu64 + "" + "%10" PRIu64 " %11" PRIu64 " %15s %20s", + path, limits->hl, sl_str, sl_val, used_space->file_count, + used_space->dir_count, avail, sl ? "Yes" : "No", + hl ? "Yes" : "No"); + } else { + cli_out("%-40s %9s %9s %10" PRIu64 " %10" PRIu64 " %11s %15s %20s", + path, "N/A", "N/A", used_space->file_count, + used_space->dir_count, "N/A", "N/A", "N/A"); + } + ret = 0; - gf_log ("cli", GF_LOG_DEBUG, "Received resp to log rotate"); +out: - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); - else - snprintf (msg, sizeof (msg), "log rotate %s", - (rsp.op_ret) ? "unsuccessful": "successful"); + return ret; +} -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("volLogRotate", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); +static int +print_quota_list_output(cli_local_t *local, char *path, char *default_sl, + quota_limits_t *limits, quota_meta_t *used_space, + int type, gf_boolean_t limit_set) +{ + int64_t avail = 0; + char percent_str[20] = {0}; + char *sl_final = NULL; + int ret = -1; + double sl_num = 0; + gf_boolean_t sl = _gf_false; + gf_boolean_t hl = _gf_false; + int64_t used_size = 0; + + GF_ASSERT(local); + GF_ASSERT(path); + + if (limit_set) { + if (limits->sl < 0) { + ret = gf_string2percent(default_sl, &sl_num); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "could not convert default soft limit to percent"); + goto out; + } + sl_num = (sl_num * limits->hl) / 100; + sl_final = default_sl; + } else { + sl_num = (limits->sl * limits->hl) / 100; + ret = snprintf(percent_str, sizeof(percent_str), "%" PRIu64 "%%", + limits->sl); + if (ret < 0) goto out; + sl_final = percent_str; } -#endif - - if (rsp.op_ret) - cli_err ("%s", msg); + if (type == GF_QUOTA_OPTION_TYPE_LIST) + used_size = used_space->size; else - cli_out ("%s", msg); - ret = rsp.op_ret; - + used_size = used_space->file_count + used_space->dir_count; + + if (limits->hl > used_size) { + avail = limits->hl - used_size; + hl = _gf_false; + if (used_size > sl_num) + sl = _gf_true; + else + sl = _gf_false; + } else { + avail = 0; + hl = sl = _gf_true; + } + } + + if (type == GF_QUOTA_OPTION_TYPE_LIST) + ret = print_quota_list_usage_output(local, path, avail, sl_final, + limits, used_space, sl, hl, sl_num, + limit_set); + else + ret = print_quota_list_object_output(local, path, avail, sl_final, + limits, used_space, sl, hl, sl_num, + limit_set); out: - cli_cmd_broadcast_response (ret); - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); - - return ret; + return ret; } static int -gf_cli3_1_sync_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +print_quota_list_from_mountdir(cli_local_t *local, char *mountdir, + char *default_sl, char *path, int type) { - gf_cli_rsp rsp = {0,}; - int ret = -1; - char msg[1024] = {0,}; + int ret = -1; + ssize_t xattr_size = 0; + quota_limits_t limits = { + 0, + }; + quota_meta_t used_space = { + 0, + }; + char *key = NULL; + gf_boolean_t limit_set = _gf_true; + + GF_ASSERT(local); + GF_ASSERT(mountdir); + GF_ASSERT(path); + + if (type == GF_QUOTA_OPTION_TYPE_LIST) + key = QUOTA_LIMIT_KEY; + else + key = QUOTA_LIMIT_OBJECTS_KEY; + + ret = sys_lgetxattr(mountdir, key, (void *)&limits, sizeof(limits)); + if (ret < 0) { + gf_log("cli", GF_LOG_ERROR, + "Failed to get the xattr %s on %s. Reason : %s", key, mountdir, + strerror(errno)); + + switch (errno) { +#if defined(ENODATA) + case ENODATA: +#endif +#if defined(ENOATTR) && (ENOATTR != ENODATA) + case ENOATTR: +#endif + /* If it's an ENOATTR, quota/inode-quota is + * configured(limit is set at least for one directory). + * The user is trying to issue 'list/list-objects' + * command for a directory on which quota limit is + * not set and we are showing the used-space in case + * of list-usage and showing (dir_count, file_count) + * in case of list-objects. Other labels are + * shown "N/A". + */ - if (-1 == req->rpc_status) { - goto out; - } + limit_set = _gf_false; + goto enoattr; + break; - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; + default: + cli_err("%-40s %s", path, strerror(errno)); + break; } - gf_log ("cli", GF_LOG_DEBUG, "Received resp to sync"); + goto out; + } - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); - else - snprintf (msg, sizeof (msg), "volume sync: %s", - (rsp.op_ret) ? "unsuccessful": "successful"); - -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("volSync", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif + limits.hl = ntoh64(limits.hl); + limits.sl = ntoh64(limits.sl); - if (rsp.op_ret) - cli_err ("%s", msg); - else - cli_out ("%s", msg); - ret = rsp.op_ret; +enoattr: + xattr_size = sys_lgetxattr(mountdir, QUOTA_SIZE_KEY, NULL, 0); + if (xattr_size < (sizeof(int64_t) * 2) && + type == GF_QUOTA_OPTION_TYPE_LIST_OBJECTS) { + ret = -1; + /* This can happen when glusterfs is upgraded from 3.6 to 3.7 + * and the xattr healing is not completed. + */ + } else if (xattr_size > (sizeof(int64_t) * 2)) { + ret = sys_lgetxattr(mountdir, QUOTA_SIZE_KEY, &used_space, + sizeof(used_space)); + } else if (xattr_size > 0) { + /* This is for compatibility. + * Older version had only file usage + */ + ret = sys_lgetxattr(mountdir, QUOTA_SIZE_KEY, &(used_space.size), + sizeof(used_space.size)); + used_space.file_count = 0; + used_space.dir_count = 0; + } else { + ret = -1; + } + + if (ret < 0) { + gf_log("cli", GF_LOG_ERROR, "Failed to get quota size on path %s: %s", + mountdir, strerror(errno)); + print_quota_list_empty(path, type); + goto out; + } + + used_space.size = ntoh64(used_space.size); + used_space.file_count = ntoh64(used_space.file_count); + used_space.dir_count = ntoh64(used_space.dir_count); + + ret = print_quota_list_output(local, path, default_sl, &limits, &used_space, + type, limit_set); out: - cli_cmd_broadcast_response (ret); - return ret; + return ret; } -int32_t -gf_cli3_1_print_limit_list (char *volname, char *limit_list, - char *op_errstr) -{ - int64_t size = 0; - int64_t limit_value = 0; - int32_t i, j, k; - int32_t len = 0, ret = -1; - char *size_str = NULL; - char path [PATH_MAX] = {0, }; - char ret_str [1024] = {0, }; - char value [1024] = {0, }; - char mountdir [] = "/tmp/mntXXXXXX"; - char abspath [PATH_MAX] = {0, }; - runner_t runner = {0,}; - - GF_VALIDATE_OR_GOTO ("cli", volname, out); - GF_VALIDATE_OR_GOTO ("cli", limit_list, out); - - if (!connected) - goto out; +static int +gluster_remove_auxiliary_mount(char *volname) +{ + int ret = -1; + char mountdir[PATH_MAX] = { + 0, + }; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + GLUSTERD_GET_QUOTA_LIST_MOUNT_PATH(mountdir, volname, "/"); + ret = gf_umount_lazy(this->name, mountdir, 1); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "umount on %s failed, reason : %s", + mountdir, strerror(errno)); + } + + return ret; +} - len = strlen (limit_list); - if (len == 0) { - cli_err ("%s", op_errstr?op_errstr:"quota limit not set "); - goto out; +static int +gf_cli_print_limit_list_from_dict(cli_local_t *local, char *volname, + dict_t *dict, char *default_sl, int count, + int op_ret, int op_errno, char *op_errstr) +{ + int ret = -1; + int i = 0; + char key[32] = { + 0, + }; + int keylen; + char mountdir[PATH_MAX] = { + 0, + }; + char *path = NULL; + int type = -1; + + if (!dict || count <= 0) + goto out; + + ret = dict_get_int32_sizen(dict, "type", &type); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get quota type"); + goto out; + } + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_quota_limit_list_begin(local, op_ret, op_errno, + op_errstr); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; } + } else { + print_quota_list_header(type); + } - if (mkdtemp (mountdir) == NULL) { - gf_log ("cli", GF_LOG_WARNING, "failed to create a temporary " - "mount directory"); - ret = -1; - goto out; + while (count--) { + keylen = snprintf(key, sizeof(key), "path%d", i++); + + ret = dict_get_strn(dict, key, keylen, &path); + if (ret < 0) { + gf_log("cli", GF_LOG_DEBUG, "Path not present in limit list"); + continue; } - /* Mount a temporary client to fetch the disk usage - * of the directory on which the limit is set. - */ - ret = runcmd (SBIN_DIR"/glusterfs", "-s", - "localhost", "--volfile-id", volname, "-l", - DEFAULT_LOG_FILE_DIRECTORY"/quota-list.log", - mountdir, NULL); + ret = gf_canonicalize_path(path); + if (ret) + goto out; + GLUSTERD_GET_QUOTA_LIST_MOUNT_PATH(mountdir, volname, path); + ret = print_quota_list_from_mountdir(local, mountdir, default_sl, path, + type); + } + +out: + return ret; +} + +static int +print_quota_list_from_quotad(call_frame_t *frame, dict_t *rsp_dict) +{ + char *path = NULL; + char *default_sl = NULL; + int ret = -1; + cli_local_t *local = NULL; + dict_t *gd_rsp_dict = NULL; + quota_meta_t used_space = { + 0, + }; + quota_limits_t limits = { + 0, + }; + quota_limits_t *size_limits = NULL; + int32_t type = 0; + int32_t success_count = 0; + + GF_ASSERT(frame); + + local = frame->local; + gd_rsp_dict = local->dict; + + ret = dict_get_int32_sizen(rsp_dict, "type", &type); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get type"); + goto out; + } + + ret = dict_get_str_sizen(rsp_dict, GET_ANCESTRY_PATH_KEY, &path); + if (ret) { + gf_log("cli", GF_LOG_WARNING, "path key is not present in dict"); + goto out; + } + + ret = dict_get_str_sizen(gd_rsp_dict, "default-soft-limit", &default_sl); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, + "failed to get default soft limit"); + goto out; + } + + if (type == GF_QUOTA_OPTION_TYPE_LIST) { + ret = dict_get_bin(rsp_dict, QUOTA_LIMIT_KEY, (void **)&size_limits); if (ret) { - gf_log ("cli", GF_LOG_WARNING, "failed to mount glusterfs client"); - ret = -1; - goto rm_dir; + gf_log("cli", GF_LOG_WARNING, "limit key not present in dict on %s", + path); + goto out; } - - len = strlen (limit_list); - if (len == 0) { - cli_out ("quota limit not set "); - goto unmount; + } else { + ret = dict_get_bin(rsp_dict, QUOTA_LIMIT_OBJECTS_KEY, + (void **)&size_limits); + if (ret) { + gf_log("cli", GF_LOG_WARNING, + "object limit key not present in dict on %s", path); + goto out; + } + } + + limits.hl = ntoh64(size_limits->hl); + limits.sl = ntoh64(size_limits->sl); + + if (type == GF_QUOTA_OPTION_TYPE_LIST) + ret = quota_dict_get_meta(rsp_dict, QUOTA_SIZE_KEY, + SLEN(QUOTA_SIZE_KEY), &used_space); + else + ret = quota_dict_get_inode_meta(rsp_dict, QUOTA_SIZE_KEY, + SLEN(QUOTA_SIZE_KEY), &used_space); + + if (ret < 0) { + gf_log("cli", GF_LOG_WARNING, "size key not present in dict"); + print_quota_list_empty(path, type); + goto out; + } + + LOCK(&local->lock); + { + ret = dict_get_int32_sizen(gd_rsp_dict, "quota-list-success-count", + &success_count); + if (ret) + success_count = 0; + + ret = dict_set_int32_sizen(gd_rsp_dict, "quota-list-success-count", + success_count + 1); + } + UNLOCK(&local->lock); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to set quota-list-success-count in dict"); + goto out; + } + + if (success_count == 0) { + if (!(global_state->mode & GLUSTER_MODE_XML)) { + print_quota_list_header(type); + } else { + ret = cli_xml_output_vol_quota_limit_list_begin(local, 0, 0, NULL); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } } + } - i = 0; + ret = print_quota_list_output(local, path, default_sl, &limits, &used_space, + type, _gf_true); +out: + return ret; +} - cli_out ("\tpath\t\t limit_set\t size"); - cli_out ("-----------------------------------------------------------" - "-----------------------"); - while (i < len) { - j = 0; - k = 0; +static void * +cli_cmd_broadcast_response_detached(void *opaque) +{ + int32_t ret = 0; - while (limit_list [i] != ':') { - path [k++] = limit_list [i++]; - } - path [k] = '\0'; + ret = (intptr_t)opaque; + cli_cmd_broadcast_response(ret); - i++; //skip ':' + return NULL; +} - while (limit_list [i] != ',' && limit_list [i] != '\0') { - value [j++] = limit_list[i++]; - } - value [j] = '\0'; - - snprintf (abspath, sizeof (abspath), "%s/%s", mountdir, path); - - ret = sys_lgetxattr (abspath, "trusted.limit.list", (void *) ret_str, 4096); - if (ret < 0) { - cli_out ("%-20s %10s", path, value); - } else { - sscanf (ret_str, "%"PRId64",%"PRId64, &size, - &limit_value); - size_str = gf_uint64_2human_readable ((uint64_t) size); - if (size_str == NULL) { - cli_out ("%-20s %10s %20"PRId64, path, - value, size); - } else { - cli_out ("%-20s %10s %20s", path, - value, size_str); - GF_FREE (size_str); - } - } - i++; - } +static int32_t +cli_quota_compare_path(struct list_head *list1, struct list_head *list2) +{ + struct list_node *node1 = NULL; + struct list_node *node2 = NULL; + dict_t *dict1 = NULL; + dict_t *dict2 = NULL; + char *path1 = NULL; + char *path2 = NULL; + int ret = 0; -unmount: + node1 = list_entry(list1, struct list_node, list); + node2 = list_entry(list2, struct list_node, list); - runinit (&runner); - runner_add_args (&runner, "umount", -#if GF_LINUX_HOST_OS - "-l", -#endif - mountdir, NULL); - ret = runner_run_reuse (&runner); - if (ret) - runner_log (&runner, "cli", GF_LOG_WARNING, "error executing"); - runner_end (&runner); + dict1 = node1->ptr; + dict2 = node2->ptr; -rm_dir: - rmdir (mountdir); -out: - return ret; + ret = dict_get_str_sizen(dict1, GET_ANCESTRY_PATH_KEY, &path1); + if (ret < 0) + return 0; + + ret = dict_get_str_sizen(dict2, GET_ANCESTRY_PATH_KEY, &path2); + if (ret < 0) + return 0; + + return strcmp(path1, path2); } -int -gf_cli3_1_quota_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf_cli_rsp rsp = {0,}; - int ret = -1; - dict_t *dict = NULL; - char *volname = NULL; - char *limit_list = NULL; - int32_t type = 0; - char msg[1024] = {0,}; - - if (-1 == req->rpc_status) { - goto out; - } +static int +cli_quotad_getlimit_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + /*TODO: we need to gather the path, hard-limit, soft-limit and used space*/ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; + struct list_node *node = NULL; + struct list_node *tmpnode = NULL; + call_frame_t *frame = NULL; + cli_local_t *local = NULL; + int32_t list_count = 0; + pthread_t th_id = { + 0, + }; + int32_t max_count = 0; + + GF_ASSERT(myframe); + + frame = myframe; + + GF_ASSERT(frame->local); + + local = frame->local; + + LOCK(&local->lock); + { + ret = dict_get_int32_sizen(local->dict, "quota-list-count", + &list_count); + if (ret) + list_count = 0; + + list_count++; + ret = dict_set_int32_sizen(local->dict, "quota-list-count", list_count); + } + UNLOCK(&local->lock); + + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to set quota-list-count in dict"); + goto out; + } + + if (-1 == req->rpc_status) { + if (list_count == 0) + cli_err( + "Connection failed. Please check if quota " + "daemon is operational."); + ret = -1; + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + if (rsp.op_ret) { + ret = -1; + if (strcmp(rsp.op_errstr, "")) + cli_err("quota command failed : %s", rsp.op_errstr); + else + cli_err("quota command : failed"); + goto out; + } + + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new(); - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; } - if (rsp.op_ret && - strcmp (rsp.op_errstr, "") == 0) { - snprintf (msg, sizeof (msg), "command unsuccessful %s", - rsp.op_errstr); -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) - goto xml_output; -#endif - goto out; + node = list_node_add_order(dict, &local->dict_list, + cli_quota_compare_path); + if (node == NULL) { + gf_log("cli", GF_LOG_ERROR, "failed to add node to the list"); + dict_unref(dict); + ret = -1; + goto out; } + } - if (rsp.dict.dict_len) { - /* Unserialize the dictionary */ - dict = dict_new (); + ret = dict_get_int32_sizen(local->dict, "max_count", &max_count); + if (ret < 0) { + gf_log("cli", GF_LOG_ERROR, "failed to get max_count"); + goto out; + } - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &dict); - if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, - "failed to " - "unserialize req-buffer to dictionary"); - goto out; - } + if (list_count == max_count) { + list_for_each_entry_safe(node, tmpnode, &local->dict_list, list) + { + dict = node->ptr; + print_quota_list_from_quotad(frame, dict); + list_node_del(node); + dict_unref(dict); } + } - ret = dict_get_str (dict, "volname", &volname); +out: + /* Bad Fix: CLI holds the lock to process a command. + * When processing quota list command, below sequence of steps executed + * in the same thread and causing deadlock + * + * 1) CLI holds the lock + * 2) Send rpc_clnt_submit request to quotad for quota usage + * 3) If quotad is down, rpc_clnt_submit invokes cbk function with error + * 4) cbk function cli_quotad_getlimit_cbk invokes + * cli_cmd_broadcast_response which tries to hold lock to broadcast + * the results and hangs, because same thread has already holding + * the lock + * + * Broadcasting response in a separate thread which is not a + * good fix. This needs to be re-visted with better solution + */ + if (ret == -1) { + ret = pthread_create(&th_id, NULL, cli_cmd_broadcast_response_detached, + (void *)-1); if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get volname"); + gf_log("cli", GF_LOG_ERROR, "pthread_create failed: %s", + strerror(errno)); + } else { + cli_cmd_broadcast_response(ret); + } + gf_free_xdr_cli_rsp(rsp); + + return ret; +} - ret = dict_get_str (dict, "limit_list", &limit_list); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get limit_list"); +static int +cli_quotad_getlimit(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; - ret = dict_get_int32 (dict, "type", &type); - if (ret) - gf_log (THIS->name, GF_LOG_TRACE, - "failed to get type"); + if (!frame || !this || !data) { + ret = -1; + goto out; + } - if (type == GF_QUOTA_OPTION_TYPE_LIST) { -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_vol_quota_limit_list - (volname, limit_list, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; + dict = data; + ret = add_cli_cmd_timeout_to_dict(dict); - } -#endif - if (limit_list) { - gf_cli3_1_print_limit_list (volname, - limit_list, - rsp.op_errstr); - } else { - gf_log ("cli", GF_LOG_INFO, "Received resp to quota " - "command "); - if (rsp.op_errstr) - snprintf (msg, sizeof (msg), "%s", - rsp.op_errstr); - } - } else { - gf_log ("cli", GF_LOG_INFO, "Received resp to quota command "); - if (rsp.op_errstr) - snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); - else - snprintf (msg, sizeof (msg), "successful"); - } + ret = dict_allocate_and_serialize(dict, &req.dict.dict_val, + &req.dict.dict_len); + if (ret < 0) { + gf_log(this->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL); -#if (HAVE_LIB_XML) -xml_output: - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("volQuota", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif + goto out; + } - if (strlen (msg) > 0) { - if (rsp.op_ret) - cli_err ("%s", msg); - else - cli_out ("%s", msg); - } + ret = cli_cmd_submit(global_quotad_rpc, &req, frame, &cli_quotad_clnt, + GF_AGGREGATOR_GETLIMIT, NULL, this, + cli_quotad_getlimit_cbk, (xdrproc_t)xdr_gf_cli_req); - ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); + GF_FREE(req.dict.dict_val); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + return ret; +} - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); +static void +gf_cli_quota_list(cli_local_t *local, char *volname, dict_t *dict, + char *default_sl, int count, int op_ret, int op_errno, + char *op_errstr) +{ + if (!cli_cmd_connected()) + goto out; - return ret; + if (count > 0) { + GF_VALIDATE_OR_GOTO("cli", volname, out); + + gf_cli_print_limit_list_from_dict(local, volname, dict, default_sl, + count, op_ret, op_errno, op_errstr); + } +out: + return; } -int -gf_cli3_1_getspec_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int +gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - gf_getspec_rsp rsp = {0,}; - int ret = -1; - char *spec = NULL; - - if (-1 == req->rpc_status) { - goto out; + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; + char *volname = NULL; + int32_t type = 0; + call_frame_t *frame = NULL; + char *default_sl = NULL; + cli_local_t *local = NULL; + char *default_sl_dup = NULL; + int32_t entry_count = 0; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + frame = myframe; + + GF_ASSERT(frame->local); + + local = frame->local; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + if (rsp.op_ret) { + ret = -1; + if (global_state->mode & GLUSTER_MODE_XML) + goto xml_output; + + if (strcmp(rsp.op_errstr, "")) { + cli_err("quota command failed : %s", rsp.op_errstr); + if (rsp.op_ret == -ENOENT) + cli_err("please enter the path relative to the volume"); + } else { + cli_err("quota command : failed"); } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp); - if (ret < 0 || rsp.op_ret == -1) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; + goto out; + } + + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new(); + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + if (ret < 0) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; } + } - gf_log ("cli", GF_LOG_INFO, "Received resp to getspec"); + gf_log("cli", GF_LOG_DEBUG, "Received resp to quota command"); - spec = GF_MALLOC (rsp.op_ret + 1, cli_mt_char); - if (!spec) { - gf_log("", GF_LOG_ERROR, "out of memory"); - goto out; + ret = dict_get_str_sizen(dict, "default-soft-limit", &default_sl); + if (ret) + gf_log(frame->this->name, GF_LOG_TRACE, + "failed to get default soft limit"); + + // default-soft-limit is part of rsp_dict only iff we sent + // GLUSTER_CLI_QUOTA with type being GF_QUOTA_OPTION_TYPE_LIST + if (default_sl) { + default_sl_dup = gf_strdup(default_sl); + if (!default_sl_dup) { + ret = -1; + goto out; + } + ret = dict_set_dynstr_sizen(local->dict, "default-soft-limit", + default_sl_dup); + if (ret) { + gf_log(frame->this->name, GF_LOG_TRACE, + "failed to set default soft limit"); + GF_FREE(default_sl_dup); } - memcpy (spec, rsp.spec, rsp.op_ret); - spec[rsp.op_ret] = '\0'; - cli_out ("%s", spec); - GF_FREE (spec); + } - ret = 0; + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + gf_log(frame->this->name, GF_LOG_ERROR, "failed to get volname"); -out: - cli_cmd_broadcast_response (ret); - return ret; -} + ret = dict_get_int32_sizen(dict, "type", &type); + if (ret) + gf_log(frame->this->name, GF_LOG_TRACE, "failed to get type"); -int -gf_cli3_1_pmap_b2p_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - pmap_port_by_brick_rsp rsp = {0,}; - int ret = -1; - char *spec = NULL; + ret = dict_get_int32_sizen(dict, "count", &entry_count); + if (ret) + gf_log(frame->this->name, GF_LOG_TRACE, "failed to get count"); - if (-1 == req->rpc_status) { - goto out; - } + if ((type == GF_QUOTA_OPTION_TYPE_LIST) || + (type == GF_QUOTA_OPTION_TYPE_LIST_OBJECTS)) { + gf_cli_quota_list(local, volname, dict, default_sl, entry_count, + rsp.op_ret, rsp.op_errno, rsp.op_errstr); - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_port_by_brick_rsp); - if (ret < 0 || rsp.op_ret == -1) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_quota_limit_list_end(local); + if (ret < 0) { + ret = -1; + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + } + goto out; } + } - gf_log ("cli", GF_LOG_INFO, "Received resp to pmap b2p"); +xml_output: - cli_out ("%d", rsp.port); - GF_FREE (spec); + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_str("volQuota", NULL, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - ret = rsp.op_ret; + if (!rsp.op_ret && type != GF_QUOTA_OPTION_TYPE_LIST && + type != GF_QUOTA_OPTION_TYPE_LIST_OBJECTS) + cli_out("volume quota : success"); + ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); - return ret; + + if ((type == GF_QUOTA_OPTION_TYPE_LIST) || + (type == GF_QUOTA_OPTION_TYPE_LIST_OBJECTS)) { + gluster_remove_auxiliary_mount(volname); + } + + cli_cmd_broadcast_response(ret); + if (dict) + dict_unref(dict); + + gf_free_xdr_cli_rsp(rsp); + + return ret; } +static int +gf_cli_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_getspec_rsp rsp = { + 0, + }; + int ret = -1; + char *spec = NULL; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + if (rsp.op_ret == -1) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + "getspec failed"); + ret = -1; + goto out; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to getspec"); + + spec = GF_MALLOC(rsp.op_ret + 1, cli_mt_char); + if (!spec) { + gf_log("", GF_LOG_ERROR, "out of memory"); + ret = -1; + goto out; + } + memcpy(spec, rsp.spec, rsp.op_ret); + spec[rsp.op_ret] = '\0'; + cli_out("%s", spec); + GF_FREE(spec); + + ret = 0; -int32_t -gf_cli3_1_probe (call_frame_t *frame, xlator_t *this, - void *data) +out: + cli_cmd_broadcast_response(ret); + gf_free_xdr_getspec_rsp(rsp); + return ret; +} + +static int +gf_cli_pmap_b2p_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - gf1_cli_probe_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - char *hostname = NULL; - int port = 0; + pmap_port_by_brick_rsp rsp = { + 0, + }; + int ret = -1; + char *spec = NULL; - if (!frame || !this || !data) { - ret = -1; - goto out; - } + GF_ASSERT(myframe); - dict = data; - ret = dict_get_str (dict, "hostname", &hostname); - if (ret) - goto out; + if (-1 == req->rpc_status) { + goto out; + } - ret = dict_get_int32 (dict, "port", &port); - if (ret) - port = CLI_GLUSTERD_PORT; + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_port_by_brick_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + if (rsp.op_ret == -1) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + "pump_b2p failed"); + ret = -1; + goto out; + } - req.hostname = hostname; - req.port = port; + gf_log("cli", GF_LOG_INFO, "Received resp to pmap b2p"); - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_PROBE, NULL, - this, gf_cli3_1_probe_cbk, - (xdrproc_t)xdr_gf1_cli_probe_req); + cli_out("%d", rsp.port); + GF_FREE(spec); + + ret = rsp.op_ret; out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; + cli_cmd_broadcast_response(ret); + return ret; } -int32_t -gf_cli3_1_deprobe (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_probe(call_frame_t *frame, xlator_t *this, void *data) { - gf1_cli_deprobe_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; - char *hostname = NULL; - int port = 0; - int flags = 0; + gf_cli_req req = { + { + 0, + }, + }; + int ret = 0; + dict_t *dict = NULL; + int port = 0; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_int32_sizen(dict, "port", &port); + if (ret) { + ret = dict_set_int32_sizen(dict, "port", CLI_GLUSTERD_PORT); + if (ret) + goto out; + } - if (!frame || !this || !data) { - ret = -1; - goto out; - } + ret = cli_to_glusterd(&req, frame, gf_cli_probe_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_PROBE, + this, cli_rpc_prog, NULL); - dict = data; - ret = dict_get_str (dict, "hostname", &hostname); - if (ret) - goto out; +out: + GF_FREE(req.dict.dict_val); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + + return ret; +} - ret = dict_get_int32 (dict, "port", &port); +static int32_t +gf_cli_deprobe(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = { + { + 0, + }, + }; + int ret = 0; + dict_t *dict = NULL; + int port = 0; + int flags = 0; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + ret = dict_get_int32_sizen(dict, "port", &port); + if (ret) { + ret = dict_set_int32_sizen(dict, "port", CLI_GLUSTERD_PORT); if (ret) - port = CLI_GLUSTERD_PORT; + goto out; + } - ret = dict_get_int32 (dict, "flags", &flags); + ret = dict_get_int32_sizen(dict, "flags", &flags); + if (ret) { + ret = dict_set_int32_sizen(dict, "flags", 0); if (ret) - flags = 0; + goto out; + } - req.hostname = hostname; - req.port = port; - req.flags = flags; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_DEPROBE, NULL, - this, gf_cli3_1_deprobe_cbk, - (xdrproc_t)xdr_gf1_cli_deprobe_req); + ret = cli_to_glusterd(&req, frame, gf_cli_deprobe_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_DEPROBE, + this, cli_rpc_prog, NULL); out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; + GF_FREE(req.dict.dict_val); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + + return ret; } -int32_t -gf_cli3_1_list_friends (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_list_friends(call_frame_t *frame, xlator_t *this, void *data) { - gf1_cli_peer_list_req req = {0,}; - int ret = 0; + gf1_cli_peer_list_req req = { + 0, + }; + int ret = 0; + unsigned long flags = 0; + + if (!frame || !this) { + ret = -1; + goto out; + } + + GF_ASSERT(frame->local == NULL); + + flags = (long)data; + req.flags = flags; + frame->local = (void *)flags; + ret = cli_cmd_submit( + NULL, &req, frame, cli_rpc_prog, GLUSTER_CLI_LIST_FRIENDS, NULL, this, + gf_cli_list_friends_cbk, (xdrproc_t)xdr_gf1_cli_peer_list_req); - if (!frame || !this) { - ret = -1; - goto out; - } +out: + if (ret && frame) { + /* + * If everything goes fine, gf_cli_list_friends_cbk() + * [invoked through cli_cmd_submit()]resets the + * frame->local to NULL. In case cli_cmd_submit() + * fails in between, RESET frame->local here. + */ + frame->local = NULL; + } + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + return ret; +} - req.flags = GF_CLI_LIST_ALL; +static int32_t +gf_cli_get_state(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = { + { + 0, + }, + }; + int ret = 0; + dict_t *dict = NULL; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_LIST_FRIENDS, NULL, - this, gf_cli3_1_list_friends_cbk, - (xdrproc_t) xdr_gf1_cli_peer_list_req); + dict = data; -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; + ret = cli_to_glusterd(&req, frame, gf_cli_get_state_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_GET_STATE, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + GF_FREE(req.dict.dict_val); + + return ret; } -int32_t -gf_cli3_1_get_next_volume (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_get_next_volume(call_frame_t *frame, xlator_t *this, void *data) { + int ret = 0; + cli_cmd_volume_get_ctx_t *ctx = NULL; + cli_local_t *local = NULL; - int ret = 0; - cli_cmd_volume_get_ctx_t *ctx = NULL; - cli_local_t *local = NULL; + if (!frame || !this || !data) { + ret = -1; + goto out; + } - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - ctx = data; - local = frame->local; + ctx = data; + local = frame->local; -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_vol_info_begin (local, 0, 0, ""); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); - goto out; - } + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_info_begin(local, 0, 0, ""); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; } -#endif + } - ret = gf_cli3_1_get_volume (frame, this, data); + ret = gf_cli_get_volume(frame, this, data); + if (!local || !local->get_vol.volname) { + if ((global_state->mode & GLUSTER_MODE_XML)) + goto end_xml; - if (!local || !local->get_vol.volname) { -#if (HAVE_LIB_XML) - if ((global_state->mode & GLUSTER_MODE_XML)) - goto end_xml; -#endif - cli_out ("No volumes present"); - goto out; - } + cli_err("No volumes present"); + goto out; + } + ctx->volname = local->get_vol.volname; + while (ctx->volname) { + ret = gf_cli_get_volume(frame, this, ctx); + if (ret) + goto out; ctx->volname = local->get_vol.volname; + } - while (ctx->volname) { - ret = gf_cli3_1_get_volume (frame, this, ctx); - if (ret) - goto out; - ctx->volname = local->get_vol.volname; - } - -#if (HAVE_LIB_XML) end_xml: - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_vol_info_end (local); - if (ret) - gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml"); - } -#endif + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_info_end(local); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + } out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + return ret; } -int32_t -gf_cli3_1_get_volume (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data) { - gf_cli_req req = {{0,}}; - int ret = 0; - cli_cmd_volume_get_ctx_t *ctx = NULL; - dict_t *dict = NULL; - int32_t flags = 0; + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + cli_cmd_volume_get_ctx_t *ctx = NULL; + dict_t *dict = NULL; + int32_t flags = 0; + + if (!this || !data) { + ret = -1; + goto out; + } + + ctx = data; + + dict = dict_new(); + if (!dict) { + gf_log(THIS->name, GF_LOG_ERROR, "Failed to create the dict"); + ret = -1; + goto out; + } + + if (ctx->volname) { + ret = dict_set_str_sizen(dict, "volname", ctx->volname); + if (ret) + goto out; + } + + flags = ctx->flags; + ret = dict_set_int32_sizen(dict, "flags", flags); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, "failed to set flags"); + goto out; + } + + ret = dict_allocate_and_serialize(dict, &req.dict.dict_val, + &req.dict.dict_len); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL); + goto out; + } + + ret = cli_cmd_submit(NULL, &req, frame, cli_rpc_prog, + GLUSTER_CLI_GET_VOLUME, NULL, this, + gf_cli_get_volume_cbk, (xdrproc_t)xdr_gf_cli_req); - if (!frame || !this || !data) { - ret = -1; - goto out; - } +out: + if (dict) + dict_unref(dict); - ctx = data; + GF_FREE(req.dict.dict_val); - dict = dict_new (); - if (!dict) - goto out; + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + return ret; +} - if (ctx->volname) { - ret = dict_set_str (dict, "volname", ctx->volname); - if (ret) - goto out; - } +static int32_t +gf_cli3_1_uuid_get(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; + + dict = data; + ret = cli_to_glusterd(&req, frame, gf_cli3_1_uuid_get_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_UUID_GET, + this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + GF_FREE(req.dict.dict_val); + return ret; +} - flags = ctx->flags; - ret = dict_set_int32 (dict, "flags", flags); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to set flags"); - goto out; - } +static int32_t +gf_cli3_1_uuid_reset(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; + + dict = data; + ret = cli_to_glusterd(&req, frame, gf_cli3_1_uuid_reset_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_UUID_RESET, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + GF_FREE(req.dict.dict_val); + return ret; +} - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); +static int32_t +gf_cli_create_volume(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_GET_VOLUME, NULL, - this, gf_cli3_1_get_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); + dict = data; -out: - if (dict) - dict_unref (dict); + ret = cli_to_glusterd(&req, frame, gf_cli_create_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_CREATE_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - if (req.dict.dict_val) - GF_FREE (req.dict.dict_val); + GF_FREE(req.dict.dict_val); - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; + return ret; } +static int32_t +gf_cli_delete_volume(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; + + dict = data; -int32_t -gf_cli3_1_create_volume (call_frame_t *frame, xlator_t *this, - void *data) + ret = cli_to_glusterd(&req, frame, gf_cli_delete_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_DELETE_VOLUME, this, cli_rpc_prog, NULL); + GF_FREE(req.dict.dict_val); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + + return ret; +} + +static int32_t +gf_cli_start_volume(call_frame_t *frame, xlator_t *this, void *data) { - gf_cli_req req = {{0,}}; - int ret = 0; - dict_t *dict = NULL; - cli_local_t *local = NULL; + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; - if (!frame || !this || !data) { - ret = -1; - goto out; - } + dict = data; - dict = dict_ref ((dict_t *)data); + ret = cli_to_glusterd(&req, frame, gf_cli_start_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_START_VOLUME, this, cli_rpc_prog, NULL); - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "failed to get serialized length of dict"); - goto out; - } + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + GF_FREE(req.dict.dict_val); - local = cli_local_get (); + return ret; +} - if (local) { - local->dict = dict_ref (dict); - frame->local = local; - } +static int32_t +gf_cli_stop_volume(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = data; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_CREATE_VOLUME, NULL, - this, gf_cli3_1_create_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); + dict = data; + ret = cli_to_glusterd(&req, frame, gf_cli_stop_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_STOP_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + GF_FREE(req.dict.dict_val); + return ret; +} -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); +static int32_t +gf_cli_defrag_volume(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; - if (dict) - dict_unref (dict); + dict = data; - if (req.dict.dict_val) { - GF_FREE (req.dict.dict_val); - } + ret = cli_to_glusterd(&req, frame, gf_cli_defrag_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_DEFRAG_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + GF_FREE(req.dict.dict_val); - return ret; + return ret; } -int32_t -gf_cli3_1_delete_volume (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_rename_volume(call_frame_t *frame, xlator_t *this, void *data) { - gf_cli_req req = {{0,}}; - int ret = 0; - cli_local_t *local = NULL; - dict_t *dict = NULL; + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; - if (!frame || !this || !data) { - ret = -1; - goto out; - } + if (!frame || !this || !data) { + ret = -1; + goto out; + } - local = cli_local_get (); + dict = data; - dict = dict_new (); - ret = dict_set_str (dict, "volname", data); - if (ret) { - gf_log (THIS->name, GF_LOG_WARNING, "dict set failed"); - goto out; - } - if (local) { - local->dict = dict_ref (dict); - frame->local = local; - } + ret = dict_allocate_and_serialize(dict, &req.dict.dict_val, + &req.dict.dict_len); + if (ret < 0) { + gf_log(this->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL); - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to get serialize dict"); - goto out; - } + goto out; + } - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_DELETE_VOLUME, NULL, - this, gf_cli3_1_delete_volume_cbk, - (xdrproc_t)xdr_gf_cli_req); + ret = cli_cmd_submit(NULL, &req, frame, cli_rpc_prog, + GLUSTER_CLI_RENAME_VOLUME, NULL, this, + gf_cli_rename_volume_cbk, (xdrproc_t)xdr_gf_cli_req); out: - if (dict) - dict_unref (dict); - if (req.dict.dict_val) - GF_FREE (req.dict.dict_val); - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + GF_FREE(req.dict.dict_val); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - return ret; + return ret; } -int32_t -gf_cli3_1_start_volume (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_reset_volume(call_frame_t *frame, xlator_t *this, void *data) { - gf_cli_req req = {{0,}}; - int ret = 0; - cli_local_t *local = NULL; - dict_t *dict = NULL; + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; + + dict = data; + + ret = cli_to_glusterd(&req, frame, gf_cli_reset_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_RESET_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + GF_FREE(req.dict.dict_val); + return ret; +} - if (!frame || !this || !data) { - ret = -1; - goto out; - } +static int32_t +gf_cli_ganesha(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; - dict = data; - local = cli_local_get (); + dict = data; - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to serialize dict"); - goto out; - } + ret = cli_to_glusterd(&req, frame, gf_cli_ganesha_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_GANESHA, + this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + return ret; +} - if (local) { - local->dict = dict_ref (dict); - frame->local = local; - } +static int32_t +gf_cli_set_volume(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_START_VOLUME, NULL, - this, gf_cli3_1_start_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); + dict = data; -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + ret = cli_to_glusterd(&req, frame, gf_cli_set_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_SET_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + GF_FREE(req.dict.dict_val); - return ret; + return ret; } int32_t -gf_cli3_1_stop_volume (call_frame_t *frame, xlator_t *this, - void *data) +gf_cli_add_brick(call_frame_t *frame, xlator_t *this, void *data) { - gf_cli_req req = {{0,}}; - int ret = 0; - cli_local_t *local = NULL; - dict_t *dict = data; + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; - if (!frame || !this || !data) { - ret = -1; - goto out; - } + dict = data; - local = cli_local_get (); - dict = data; + ret = cli_to_glusterd(&req, frame, gf_cli_add_brick_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_ADD_BRICK, this, cli_rpc_prog, NULL); - if (local) { - local->dict = dict_ref (dict); - frame->local = local; - } + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *) &req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to serialize the data"); + GF_FREE(req.dict.dict_val); - goto out; - } - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_STOP_VOLUME, NULL, - this, gf_cli3_1_stop_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); - -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; + return ret; } -int32_t -gf_cli3_1_defrag_volume (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf_cli_req req = {{0,}}; - int ret = 0; - cli_local_t *local = NULL; - char *volname = NULL; - char *cmd_str = NULL; - dict_t *dict = NULL; - gf_cli_defrag_type cmd = 0; - dict_t *req_dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } - - dict = data; - - ret = dict_get_str (dict, "volname", &volname); - if (ret) - gf_log ("", GF_LOG_DEBUG, "error"); +static int32_t +gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + ; + gf_cli_req status_req = {{ + 0, + }}; + ; + int ret = 0; + dict_t *dict = NULL; + int32_t command = 0; + int32_t cmd = 0; + + if (!frame || !this) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_int32_sizen(dict, "command", &command); + if (ret) + goto out; + + if ((command != GF_OP_CMD_STATUS) && (command != GF_OP_CMD_STOP)) { + ret = cli_to_glusterd( + &req, frame, gf_cli_remove_brick_cbk, (xdrproc_t)xdr_gf_cli_req, + dict, GLUSTER_CLI_REMOVE_BRICK, this, cli_rpc_prog, NULL); + } else { + /* Need rebalance status to be sent :-) */ + if (command == GF_OP_CMD_STATUS) + cmd |= GF_DEFRAG_CMD_STATUS; + else + cmd |= GF_DEFRAG_CMD_STOP; - ret = dict_get_str (dict, "command", &cmd_str); + ret = dict_set_int32_sizen(dict, "rebalance-command", (int32_t)cmd); if (ret) { - gf_log ("", GF_LOG_DEBUG, "error"); - goto out; + gf_log(this->name, GF_LOG_ERROR, "Failed to set dict"); + goto out; } - if (strcmp (cmd_str, "start") == 0) { - cmd = GF_DEFRAG_CMD_START; - ret = dict_get_str (dict, "option", &cmd_str); - if (!ret) { - if (strcmp (cmd_str, "force") == 0) { - cmd = GF_DEFRAG_CMD_START_FORCE; - } - } - goto done; - } + ret = cli_to_glusterd( + &status_req, frame, gf_cli3_remove_brick_status_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_DEFRAG_VOLUME, this, + cli_rpc_prog, NULL); + } - if (strcmp (cmd_str, "fix-layout") == 0) { - cmd = GF_DEFRAG_CMD_START_LAYOUT_FIX; - goto done; - } - if (strcmp (cmd_str, "stop") == 0) { - cmd = GF_DEFRAG_CMD_STOP; - goto done; - } - if (strcmp (cmd_str, "status") == 0) { - cmd = GF_DEFRAG_CMD_STATUS; - } +out: + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); -done: - local = cli_local_get (); + GF_FREE(req.dict.dict_val); - req_dict = dict_new (); - if (!req_dict) { - ret = -1; - goto out; - } + GF_FREE(status_req.dict.dict_val); - ret = dict_set_str (req_dict, "volname", volname); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, - "Failed to set dict"); - goto out; - } + return ret; +} - ret = dict_set_int32 (req_dict, "rebalance-command", (int32_t) cmd); +static int32_t +gf_cli_reset_brick(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; + char *dst_brick = NULL; + char *op = NULL; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str_sizen(dict, "operation", &op); + if (ret) { + gf_log(this->name, GF_LOG_DEBUG, "dict_get on operation failed"); + goto out; + } + + if (!strcmp(op, "GF_RESET_OP_COMMIT") || + !strcmp(op, "GF_RESET_OP_COMMIT_FORCE")) { + ret = dict_get_str_sizen(dict, "dst-brick", &dst_brick); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, - "Failed to set dict"); - goto out; + gf_log(this->name, GF_LOG_DEBUG, "dict_get on dst-brick failed"); + goto out; } + } - if (local) { - local->dict = dict_ref (req_dict); - frame->local = local; - } + ret = cli_to_glusterd(&req, frame, gf_cli_reset_brick_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_RESET_BRICK, this, cli_rpc_prog, NULL); - ret = dict_allocate_and_serialize (req_dict, - &req.dict.dict_val, - (size_t *) &req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to serialize the data"); +out: + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - goto out; - } + GF_FREE(req.dict.dict_val); - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_DEFRAG_VOLUME, NULL, - this, gf_cli3_1_defrag_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); + return ret; +} + +static int32_t +gf_cli_replace_brick(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; + int32_t op = 0; + + if (!frame || !this || !data) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_int32_sizen(dict, "operation", &op); + if (ret) { + gf_log(this->name, GF_LOG_DEBUG, "dict_get on operation failed"); + goto out; + } + + ret = cli_to_glusterd(&req, frame, gf_cli_replace_brick_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_REPLACE_BRICK, this, cli_rpc_prog, NULL); out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - return ret; + GF_FREE(req.dict.dict_val); + + return ret; } -int32_t -gf_cli3_1_rename_volume (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_log_rotate(call_frame_t *frame, xlator_t *this, void *data) { - gf_cli_req req = {{0,}}; - int ret = 0; - dict_t *dict = NULL; + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; - if (!frame || !this || !data) { - ret = -1; - goto out; - } + dict = data; - dict = data; + ret = cli_to_glusterd(&req, frame, gf_cli_log_rotate_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_LOG_ROTATE, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *) &req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to serialize the data"); + GF_FREE(req.dict.dict_val); + return ret; +} - goto out; - } +static int32_t +gf_cli_sync_volume(call_frame_t *frame, xlator_t *this, void *data) +{ + int ret = 0; + gf_cli_req req = {{ + 0, + }}; + dict_t *dict = NULL; + + dict = data; + ret = cli_to_glusterd(&req, frame, gf_cli_sync_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_SYNC_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + GF_FREE(req.dict.dict_val); - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_RENAME_VOLUME, NULL, - this, gf_cli3_1_rename_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); + return ret; +} + +static int32_t +gf_cli_getspec(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_getspec_req req = { + 0, + }; + int ret = 0; + dict_t *dict = NULL; + dict_t *op_dict = NULL; + + if (!frame || !this) { + ret = -1; + goto out; + } + + dict = data; + + ret = dict_get_str_sizen(dict, "volid", &req.key); + if (ret) + goto out; + + op_dict = dict_new(); + if (!op_dict) { + ret = -1; + goto out; + } + + // Set the supported min and max op-versions, so glusterd can make a + // decision + ret = dict_set_int32_sizen(op_dict, "min-op-version", GD_OP_VERSION_MIN); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, + "Failed to set min-op-version in request dict"); + goto out; + } + + ret = dict_set_int32_sizen(op_dict, "max-op-version", GD_OP_VERSION_MAX); + if (ret) { + gf_log(THIS->name, GF_LOG_ERROR, + "Failed to set max-op-version in request dict"); + goto out; + } + + ret = dict_allocate_and_serialize(op_dict, &req.xdata.xdata_val, + &req.xdata.xdata_len); + if (ret < 0) { + gf_log(THIS->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL); + goto out; + } + + ret = cli_cmd_submit(NULL, &req, frame, &cli_handshake_prog, + GF_HNDSK_GETSPEC, NULL, this, gf_cli_getspec_cbk, + (xdrproc_t)xdr_gf_getspec_req); out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + if (op_dict) { + dict_unref(op_dict); + } + GF_FREE(req.xdata.xdata_val); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - return ret; + return ret; } -int32_t -gf_cli3_1_reset_volume (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_quota(call_frame_t *frame, xlator_t *this, void *data) { - gf_cli_req req = {{0,}}; - int ret = 0; - dict_t *dict = NULL; + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; + + dict = data; + + ret = cli_to_glusterd(&req, frame, gf_cli_quota_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_QUOTA, + this, cli_rpc_prog, NULL); + GF_FREE(req.dict.dict_val); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + return ret; +} - if (!frame || !this || !data) { - ret = -1; - goto out; - } +static int32_t +gf_cli_pmap_b2p(call_frame_t *frame, xlator_t *this, void *data) +{ + pmap_port_by_brick_req req = { + 0, + }; + int ret = 0; + dict_t *dict = NULL; - dict = data; + if (!frame || !this) { + ret = -1; + goto out; + } - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to get serialized length of dict"); - goto out; - } + dict = data; + + ret = dict_get_str_sizen(dict, "brick", &req.brick); + if (ret) + goto out; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_RESET_VOLUME, NULL, - this, gf_cli3_1_reset_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); + ret = cli_cmd_submit(NULL, &req, frame, &cli_pmap_prog, GF_PMAP_PORTBYBRICK, + NULL, this, gf_cli_pmap_b2p_cbk, + (xdrproc_t)xdr_pmap_port_by_brick_req); out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - return ret; + return ret; } -int32_t -gf_cli3_1_set_volume (call_frame_t *frame, xlator_t *this, - void *data) +static int +gf_cli_fsm_log_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - gf_cli_req req = {{0,}}; - int ret = 0; - dict_t *dict = NULL; + gf1_cli_fsm_log_rsp rsp = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; + int tr_count = 0; + char key[64] = {0}; + int keylen; + int i = 0; + char *old_state = NULL; + char *new_state = NULL; + char *event = NULL; + char *time = NULL; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_fsm_log_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + if (rsp.op_ret) { + if (strcmp(rsp.op_errstr, "")) + cli_err("%s", rsp.op_errstr); + cli_err("fsm log unsuccessful"); + ret = rsp.op_ret; + goto out; + } + + dict = dict_new(); + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.fsm_log.fsm_log_val, rsp.fsm_log.fsm_log_len, + &dict); + + if (ret) { + cli_err(DICT_UNSERIALIZE_FAIL); + goto out; + } + + ret = dict_get_int32_sizen(dict, "count", &tr_count); + if (!ret && tr_count) + cli_out("number of transitions: %d", tr_count); + else + cli_err("No transitions"); + for (i = 0; i < tr_count; i++) { + keylen = snprintf(key, sizeof(key), "log%d-old-state", i); + ret = dict_get_strn(dict, key, keylen, &old_state); + if (ret) + goto out; - if (!frame || !this || !data) { - ret = -1; - goto out; - } + keylen = snprintf(key, sizeof(key), "log%d-event", i); + ret = dict_get_strn(dict, key, keylen, &event); + if (ret) + goto out; - dict = data; + keylen = snprintf(key, sizeof(key), "log%d-new-state", i); + ret = dict_get_strn(dict, key, keylen, &new_state); + if (ret) + goto out; - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "failed to get serialized length of dict"); - goto out; - } + keylen = snprintf(key, sizeof(key), "log%d-time", i); + ret = dict_get_strn(dict, key, keylen, &time); + if (ret) + goto out; + cli_out( + "Old State: [%s]\n" + "New State: [%s]\n" + "Event : [%s]\n" + "timestamp: [%s]\n", + old_state, new_state, event, time); + } - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_SET_VOLUME, NULL, - this, gf_cli3_1_set_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); + ret = rsp.op_ret; out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + cli_cmd_broadcast_response(ret); + if (dict) { + dict_unref(dict); + } + gf_free_xdr_fsm_log_rsp(rsp); - return ret; + return ret; } -int32_t -gf_cli3_1_add_brick (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_fsm_log(call_frame_t *frame, xlator_t *this, void *data) +{ + int ret = -1; + gf1_cli_fsm_log_req req = { + 0, + }; + + GF_ASSERT(frame); + GF_ASSERT(this); + GF_ASSERT(data); + + if (!frame || !this || !data) + goto out; + req.name = data; + ret = cli_cmd_submit(NULL, &req, frame, cli_rpc_prog, GLUSTER_CLI_FSM_LOG, + NULL, this, gf_cli_fsm_log_cbk, + (xdrproc_t)xdr_gf1_cli_fsm_log_req); + +out: + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + + return ret; +} + +static int +gf_cli_gsync_config_command(dict_t *dict) { - gf_cli_req req = {{0,}}; - int ret = 0; - dict_t *dict = NULL; - char *volname = NULL; - int32_t count = 0; + runner_t runner = { + 0, + }; + char *subop = NULL; + char *gwd = NULL; + char *slave = NULL; + char *confpath = NULL; + char *master = NULL; + char *op_name = NULL; + int ret = -1; + char conf_path[PATH_MAX] = ""; + + if (dict_get_str_sizen(dict, "subop", &subop) != 0) + return -1; + + if (strcmp(subop, "get") != 0 && strcmp(subop, "get-all") != 0) { + cli_out(GEOREP " config updated successfully"); + return 0; + } + + if (dict_get_str_sizen(dict, "glusterd_workdir", &gwd) != 0 || + dict_get_str_sizen(dict, "slave", &slave) != 0) + return -1; + + if (dict_get_str_sizen(dict, "master", &master) != 0) + master = NULL; + if (dict_get_str_sizen(dict, "op_name", &op_name) != 0) + op_name = NULL; + + ret = dict_get_str_sizen(dict, "conf_path", &confpath); + if (ret || !confpath) { + ret = snprintf(conf_path, sizeof(conf_path) - 1, + "%s/" GEOREP "/gsyncd_template.conf", gwd); + conf_path[ret] = '\0'; + confpath = conf_path; + } + + runinit(&runner); + runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL); + runner_argprintf(&runner, "%s", confpath); + runner_argprintf(&runner, "--iprefix=%s", DATADIR); + if (master) + runner_argprintf(&runner, ":%s", master); + runner_add_arg(&runner, slave); + runner_argprintf(&runner, "--config-%s", subop); + if (op_name) + runner_add_arg(&runner, op_name); + + return runner_run(&runner); +} - if (!frame || !this || !data) { +static int +gf_cli_print_status(char **title_values, gf_gsync_status_t **sts_vals, + int *spacing, int gsync_count, int number_of_fields, + int is_detail) +{ + int i = 0; + int j = 0; + int ret = 0; + int status_fields = 8; /* Indexed at 0 */ + int total_spacing = 0; + char **output_values = NULL; + char *tmp = NULL; + char *hyphens = NULL; + + /* calculating spacing for hyphens */ + for (i = 0; i < number_of_fields; i++) { + /* Suppressing detail output for status */ + if ((!is_detail) && (i > status_fields)) { + /* Suppressing detailed output for + * status */ + continue; + } + spacing[i] += 3; /* Adding extra space to + distinguish between fields */ + total_spacing += spacing[i]; + } + total_spacing += 4; /* For the spacing between the fields */ + + /* char pointers for each field */ + output_values = GF_MALLOC(number_of_fields * sizeof(char *), + gf_common_mt_char); + if (!output_values) { + ret = -1; + goto out; + } + for (i = 0; i < number_of_fields; i++) { + output_values[i] = GF_CALLOC(spacing[i] + 1, sizeof(char), + gf_common_mt_char); + if (!output_values[i]) { + ret = -1; + goto out; + } + } + + cli_out(" "); + + /* setting the title "NODE", "MASTER", etc. from title_values[] + and printing the same */ + for (j = 0; j < number_of_fields; j++) { + if ((!is_detail) && (j > status_fields)) { + /* Suppressing detailed output for + * status */ + output_values[j][0] = '\0'; + continue; + } + memset(output_values[j], ' ', spacing[j]); + memcpy(output_values[j], title_values[j], strlen(title_values[j])); + output_values[j][spacing[j]] = '\0'; + } + cli_out("%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s", output_values[0], + output_values[1], output_values[2], output_values[3], + output_values[4], output_values[5], output_values[6], + output_values[7], output_values[8], output_values[9], + output_values[10], output_values[11], output_values[12], + output_values[13], output_values[14], output_values[15]); + + hyphens = GF_MALLOC((total_spacing + 1) * sizeof(char), gf_common_mt_char); + if (!hyphens) { + ret = -1; + goto out; + } + + /* setting and printing the hyphens */ + memset(hyphens, '-', total_spacing); + hyphens[total_spacing] = '\0'; + cli_out("%s", hyphens); + + for (i = 0; i < gsync_count; i++) { + for (j = 0; j < number_of_fields; j++) { + if ((!is_detail) && (j > status_fields)) { + /* Suppressing detailed output for + * status */ + output_values[j][0] = '\0'; + continue; + } + tmp = get_struct_variable(j, sts_vals[i]); + if (!tmp) { + gf_log("", GF_LOG_ERROR, "struct member empty."); ret = -1; goto out; + } + memset(output_values[j], ' ', spacing[j]); + memcpy(output_values[j], tmp, strlen(tmp)); + output_values[j][spacing[j]] = '\0'; } - dict = data; + cli_out("%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s", + output_values[0], output_values[1], output_values[2], + output_values[3], output_values[4], output_values[5], + output_values[6], output_values[7], output_values[8], + output_values[9], output_values[10], output_values[11], + output_values[12], output_values[13], output_values[14], + output_values[15]); + } - ret = dict_get_str (dict, "volname", &volname); +out: + if (output_values) { + for (i = 0; i < number_of_fields; i++) { + if (output_values[i]) + GF_FREE(output_values[i]); + } + GF_FREE(output_values); + } - if (ret) - goto out; + if (hyphens) + GF_FREE(hyphens); - ret = dict_get_int32 (dict, "count", &count); - if (ret) - goto out; + return ret; +} + +int +gf_gsync_status_t_comparator(const void *p, const void *q) +{ + char *slavekey1 = NULL; + char *slavekey2 = NULL; + slavekey1 = get_struct_variable(20, (*(gf_gsync_status_t **)p)); + slavekey2 = get_struct_variable(20, (*(gf_gsync_status_t **)q)); + if (!slavekey1 || !slavekey2) { + gf_log("cli", GF_LOG_ERROR, "struct member empty."); + return 0; + } - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "failed to get serialized length of dict"); + return strcmp(slavekey1, slavekey2); +} + +static int +gf_cli_read_status_data(dict_t *dict, gf_gsync_status_t **sts_vals, + int *spacing, int gsync_count, int number_of_fields) +{ + char *tmp = NULL; + char sts_val_name[PATH_MAX] = ""; + int ret = 0; + int i = 0; + int j = 0; + + /* Storing per node status info in each object */ + for (i = 0; i < gsync_count; i++) { + snprintf(sts_val_name, sizeof(sts_val_name), "status_value%d", i); + + /* Fetching the values from dict, and calculating + the max length for each field */ + ret = dict_get_bin(dict, sts_val_name, (void **)&(sts_vals[i])); + if (ret) + goto out; + + for (j = 0; j < number_of_fields; j++) { + tmp = get_struct_variable(j, sts_vals[i]); + if (!tmp) { + gf_log("", GF_LOG_ERROR, "struct member empty."); + ret = -1; goto out; + } + if (strlen(tmp) > spacing[j]) + spacing[j] = strlen(tmp); } + } - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_ADD_BRICK, NULL, - this, gf_cli3_1_add_brick_cbk, - (xdrproc_t) xdr_gf_cli_req); + /* Sort based on Session Slave */ + qsort(sts_vals, gsync_count, sizeof(gf_gsync_status_t *), + gf_gsync_status_t_comparator); out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} - if (req.dict.dict_val) { - GF_FREE (req.dict.dict_val); - } +static int +gf_cli_gsync_status_output(dict_t *dict, gf_boolean_t is_detail) +{ + int gsync_count = 0; + int i = 0; + int ret = 0; + int spacing[16] = {0}; + int num_of_fields = 16; + char errmsg[1024] = ""; + char *master = NULL; + char *slave = NULL; + static char *title_values[] = {"MASTER NODE", + "MASTER VOL", + "MASTER BRICK", + "SLAVE USER", + "SLAVE", + "SLAVE NODE", + "STATUS", + "CRAWL STATUS", + "LAST_SYNCED", + "ENTRY", + "DATA", + "META", + "FAILURES", + "CHECKPOINT TIME", + "CHECKPOINT COMPLETED", + "CHECKPOINT COMPLETION TIME"}; + gf_gsync_status_t **sts_vals = NULL; + + /* Checks if any session is active or not */ + ret = dict_get_int32_sizen(dict, "gsync-count", &gsync_count); + if (ret) { + ret = dict_get_str_sizen(dict, "master", &master); + + ret = dict_get_str_sizen(dict, "slave", &slave); + + if (master) { + if (slave) + snprintf(errmsg, sizeof(errmsg), + "No active geo-replication sessions between %s" + " and %s", + master, slave); + else + snprintf(errmsg, sizeof(errmsg), + "No active geo-replication sessions for %s", master); + } else + snprintf(errmsg, sizeof(errmsg), + "No active geo-replication sessions"); + + gf_log("cli", GF_LOG_INFO, "%s", errmsg); + cli_out("%s", errmsg); + ret = -1; + goto out; + } + + for (i = 0; i < num_of_fields; i++) + spacing[i] = strlen(title_values[i]); + + /* gsync_count = number of nodes reporting output. + each sts_val object will store output of each + node */ + sts_vals = GF_MALLOC(gsync_count * sizeof(gf_gsync_status_t *), + gf_common_mt_char); + if (!sts_vals) { + ret = -1; + goto out; + } + + ret = gf_cli_read_status_data(dict, sts_vals, spacing, gsync_count, + num_of_fields); + if (ret) { + gf_log("", GF_LOG_ERROR, "Unable to read status data"); + goto out; + } + + ret = gf_cli_print_status(title_values, sts_vals, spacing, gsync_count, + num_of_fields, is_detail); + if (ret) { + gf_log("", GF_LOG_ERROR, "Unable to print status output"); + goto out; + } - return ret; +out: + if (sts_vals) + GF_FREE(sts_vals); + + return ret; } -int32_t -gf_cli3_1_remove_brick (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf_cli_req req = {{0,}};; - gf_cli_req status_req = {{0,}};; - int ret = 0; - dict_t *dict = NULL; - int32_t command = 0; - char *volname = NULL; - dict_t *req_dict = NULL; - int32_t cmd = 0; - cli_local_t *local = NULL; - - if (!frame || !this || !data) { +static int32_t +write_contents_to_common_pem_file(dict_t *dict, int output_count) +{ + char *workdir = NULL; + char common_pem_file[PATH_MAX] = ""; + char *output = NULL; + char output_name[32] = ""; + int bytes_written = 0; + int fd = -1; + int ret = -1; + int i = -1; + + ret = dict_get_str_sizen(dict, "glusterd_workdir", &workdir); + if (ret || !workdir) { + gf_log("", GF_LOG_ERROR, "Unable to fetch workdir"); + ret = -1; + goto out; + } + + snprintf(common_pem_file, sizeof(common_pem_file), + "%s/geo-replication/common_secret.pem.pub", workdir); + + sys_unlink(common_pem_file); + + fd = open(common_pem_file, O_WRONLY | O_CREAT, 0600); + if (fd == -1) { + gf_log("", GF_LOG_ERROR, "Failed to open %s Error : %s", + common_pem_file, strerror(errno)); + ret = -1; + goto out; + } + + for (i = 1; i <= output_count; i++) { + ret = snprintf(output_name, sizeof(output_name), "output_%d", i); + ret = dict_get_strn(dict, output_name, ret, &output); + if (ret) { + gf_log("", GF_LOG_ERROR, "Failed to get %s.", output_name); + cli_out("Unable to fetch output."); + } + if (output) { + bytes_written = sys_write(fd, output, strlen(output)); + if (bytes_written != strlen(output)) { + gf_log("", GF_LOG_ERROR, "Failed to write to %s", + common_pem_file); ret = -1; goto out; - } - - local = cli_local_get (); - if (!local) { + } + /* Adding the new line character */ + bytes_written = sys_write(fd, "\n", 1); + if (bytes_written != 1) { + gf_log("", GF_LOG_ERROR, "Failed to add new line char"); ret = -1; - gf_log (this->name, GF_LOG_ERROR, - "Out of memory"); goto out; + } + output = NULL; } + } - frame->local = local; - - dict = data; + cli_out("Common secret pub file present at %s", common_pem_file); + ret = 0; +out: + if (fd >= 0) + sys_close(fd); - local->dict = dict_ref (dict); + gf_log("", GF_LOG_DEBUG, RETURNING, ret); + return ret; +} - ret = dict_get_str (dict, "volname", &volname); - if (ret) - goto out; +static int +gf_cli_sys_exec_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + int ret = -1; + int output_count = -1; + int i = -1; + char *output = NULL; + char *command = NULL; + char output_name[32] = ""; + gf_cli_rsp rsp = { + 0, + }; + dict_t *dict = NULL; + + GF_ASSERT(myframe); + + if (req->rpc_status == -1) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + dict = dict_new(); + + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + + if (ret) + goto out; + + if (rsp.op_ret) { + cli_err("%s", rsp.op_errstr ? rsp.op_errstr : "Command failed."); + ret = rsp.op_ret; + goto out; + } - ret = dict_get_int32 (dict, "command", &command); - if (ret) - goto out; + ret = dict_get_int32_sizen(dict, "output_count", &output_count); + if (ret) { + cli_out("Command executed successfully."); + ret = 0; + goto out; + } - if ((command != GF_OP_CMD_STATUS) && - (command != GF_OP_CMD_STOP)) { + ret = dict_get_str_sizen(dict, "command", &command); + if (ret) { + gf_log("", GF_LOG_ERROR, "Unable to get command from dict"); + goto out; + } - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "failed to get serialized length of dict"); - goto out; - } + if (!strcmp(command, "gsec_create")) { + ret = write_contents_to_common_pem_file(dict, output_count); + if (!ret) + goto out; + } - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_REMOVE_BRICK, NULL, - this, gf_cli3_1_remove_brick_cbk, - (xdrproc_t) xdr_gf_cli_req); - } else { - /* Need rebalance status to e sent :-) */ - req_dict = dict_new (); - if (!req_dict) { - ret = -1; - goto out; - } + for (i = 1; i <= output_count; i++) { + ret = snprintf(output_name, sizeof(output_name), "output_%d", i); + ret = dict_get_strn(dict, output_name, ret, &output); + if (ret) { + gf_log("", GF_LOG_ERROR, "Failed to get %s.", output_name); + cli_out("Unable to fetch output."); + } + if (output) { + cli_out("%s", output); + output = NULL; + } + } - ret = dict_set_str (req_dict, "volname", volname); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, - "Failed to set dict"); - goto out; - } + ret = 0; +out: + if (dict) + dict_unref(dict); + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; +} - if (command == GF_OP_CMD_STATUS) - cmd |= GF_DEFRAG_CMD_STATUS; - else - cmd |= GF_DEFRAG_CMD_STOP; +static int +gf_cli_copy_file_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + int ret = -1; + gf_cli_rsp rsp = { + 0, + }; + dict_t *dict = NULL; - ret = dict_set_int32 (req_dict, "rebalance-command", (int32_t) cmd); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, - "Failed to set dict"); - goto out; - } + GF_ASSERT(myframe); - ret = dict_allocate_and_serialize (req_dict, - &status_req.dict.dict_val, - (size_t *) &status_req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to serialize the data"); + if (req->rpc_status == -1) { + goto out; + } - goto out; - } + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } - ret = cli_cmd_submit (&status_req, frame, cli_rpc_prog, - GLUSTER_CLI_DEFRAG_VOLUME, NULL, - this, gf_cli3_remove_brick_status_cbk, - (xdrproc_t) xdr_gf_cli_req); + dict = dict_new(); - } + if (!dict) { + ret = -1; + goto out; + } -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); - if (req.dict.dict_val) { - GF_FREE (req.dict.dict_val); - } + if (ret) + goto out; - if (status_req.dict.dict_val) - GF_FREE (status_req.dict.dict_val); + if (rsp.op_ret) { + cli_err("%s", rsp.op_errstr ? rsp.op_errstr : "Copy unsuccessful"); + ret = rsp.op_ret; + goto out; + } - if (req_dict) - dict_unref (req_dict); + cli_out("Successfully copied file."); - return ret; +out: + if (dict) + dict_unref(dict); + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; } -int32_t -gf_cli3_1_replace_brick (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf_cli_req req = {{0,}}; - int ret = 0; - cli_local_t *local = NULL; - dict_t *dict = NULL; - char *src_brick = NULL; - char *dst_brick = NULL; - char *volname = NULL; - int32_t op = 0; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } +static int +gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + int ret = -1; + gf_cli_rsp rsp = { + 0, + }; + dict_t *dict = NULL; + char *gsync_status = NULL; + char *master = NULL; + char *slave = NULL; + int32_t type = 0; + gf_boolean_t status_detail = _gf_false; + + GF_ASSERT(myframe); + + if (req->rpc_status == -1) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + dict = dict_new(); + + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + + if (ret) + goto out; + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_gsync(dict, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - dict = data; + if (rsp.op_ret) { + cli_err("%s", + rsp.op_errstr ? rsp.op_errstr : GEOREP " command unsuccessful"); + ret = rsp.op_ret; + goto out; + } + + ret = dict_get_str_sizen(dict, "gsync-status", &gsync_status); + if (!ret) + cli_out("%s", gsync_status); + + ret = dict_get_int32_sizen(dict, "type", &type); + if (ret) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + "failed to get type"); + goto out; + } + + switch (type) { + case GF_GSYNC_OPTION_TYPE_START: + case GF_GSYNC_OPTION_TYPE_STOP: + if (dict_get_str_sizen(dict, "master", &master) != 0) + master = "???"; + if (dict_get_str_sizen(dict, "slave", &slave) != 0) + slave = "???"; + + cli_out( + "%s " GEOREP " session between %s & %s has been successful", + type == GF_GSYNC_OPTION_TYPE_START ? "Starting" : "Stopping", + master, slave); + break; + + case GF_GSYNC_OPTION_TYPE_PAUSE: + case GF_GSYNC_OPTION_TYPE_RESUME: + if (dict_get_str_sizen(dict, "master", &master) != 0) + master = "???"; + if (dict_get_str_sizen(dict, "slave", &slave) != 0) + slave = "???"; + + cli_out("%s " GEOREP " session between %s & %s has been successful", + type == GF_GSYNC_OPTION_TYPE_PAUSE ? "Pausing" : "Resuming", + master, slave); + break; + + case GF_GSYNC_OPTION_TYPE_CONFIG: + ret = gf_cli_gsync_config_command(dict); + break; + + case GF_GSYNC_OPTION_TYPE_STATUS: + status_detail = dict_get_str_boolean(dict, "status-detail", + _gf_false); + ret = gf_cli_gsync_status_output(dict, status_detail); + break; + + case GF_GSYNC_OPTION_TYPE_DELETE: + if (dict_get_str_sizen(dict, "master", &master) != 0) + master = "???"; + if (dict_get_str_sizen(dict, "slave", &slave) != 0) + slave = "???"; + cli_out("Deleting " GEOREP + " session between %s & %s has been successful", + master, slave); + break; + + case GF_GSYNC_OPTION_TYPE_CREATE: + if (dict_get_str_sizen(dict, "master", &master) != 0) + master = "???"; + if (dict_get_str_sizen(dict, "slave", &slave) != 0) + slave = "???"; + cli_out("Creating " GEOREP + " session between %s & %s has been successful", + master, slave); + break; - local = cli_local_get (); - if (!local) { - ret = -1; - gf_log (this->name, GF_LOG_ERROR, - "Out of memory"); - goto out; - } + default: + cli_out(GEOREP " command executed successfully"); + } - local->dict = dict_ref (dict); - frame->local = local; +out: + if (dict) + dict_unref(dict); + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; +} - ret = dict_get_int32 (dict, "operation", &op); - if (ret) { - gf_log (this->name, GF_LOG_DEBUG, - "dict_get on operation failed"); - goto out; - } - ret = dict_get_str (dict, "volname", &volname); - if (ret) { - gf_log (this->name, GF_LOG_DEBUG, - "dict_get on volname failed"); - goto out; +static int32_t +gf_cli_sys_exec(call_frame_t *frame, xlator_t *this, void *data) +{ + int ret = 0; + dict_t *dict = NULL; + gf_cli_req req = {{ + 0, + }}; + + dict = data; + + ret = cli_to_glusterd(&req, frame, gf_cli_sys_exec_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_SYS_EXEC, + this, cli_rpc_prog, NULL); + if (ret) + if (!frame || !this || !data) { + ret = -1; + gf_log("cli", GF_LOG_ERROR, "Invalid data"); } - ret = dict_get_str (dict, "src-brick", &src_brick); - if (ret) { - gf_log (this->name, GF_LOG_DEBUG, - "dict_get on src-brick failed"); - goto out; - } + GF_FREE(req.dict.dict_val); + return ret; +} - ret = dict_get_str (dict, "dst-brick", &dst_brick); - if (ret) { - gf_log (this->name, GF_LOG_DEBUG, - "dict_get on dst-brick failed"); - goto out; +static int32_t +gf_cli_copy_file(call_frame_t *frame, xlator_t *this, void *data) +{ + int ret = 0; + dict_t *dict = NULL; + gf_cli_req req = {{ + 0, + }}; + + dict = data; + + ret = cli_to_glusterd(&req, frame, gf_cli_copy_file_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_COPY_FILE, this, cli_rpc_prog, NULL); + if (ret) + if (!frame || !this || !data) { + ret = -1; + gf_log("cli", GF_LOG_ERROR, "Invalid data"); } - gf_log (this->name, GF_LOG_DEBUG, - "Received command replace-brick %s with " - "%s with operation=%d", src_brick, - dst_brick, op); + GF_FREE(req.dict.dict_val); + return ret; +} +static int32_t +gf_cli_gsync_set(call_frame_t *frame, xlator_t *this, void *data) +{ + int ret = 0; + dict_t *dict = NULL; + gf_cli_req req = {{ + 0, + }}; - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_DEBUG, - "failed to get serialized length of dict"); - goto out; - } + dict = data; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_REPLACE_BRICK, NULL, - this, gf_cli3_1_replace_brick_cbk, - (xdrproc_t) xdr_gf_cli_req); + ret = cli_to_glusterd(&req, frame, gf_cli_gsync_set_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_GSYNC_SET, this, cli_rpc_prog, NULL); + GF_FREE(req.dict.dict_val); -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} - if (req.dict.dict_val) { - GF_FREE (req.dict.dict_val); - } +int +cli_profile_info_percentage_cmp(void *a, void *b) +{ + cli_profile_info_t *ia = NULL; + cli_profile_info_t *ib = NULL; - return ret; -} + ia = a; + ib = b; + if (ia->percentage_avg_latency < ib->percentage_avg_latency) + return -1; + else if (ia->percentage_avg_latency > ib->percentage_avg_latency) + return 1; + return 0; +} -int32_t -gf_cli3_1_log_rotate (call_frame_t *frame, xlator_t *this, - void *data) +static void +cmd_profile_volume_brick_out(dict_t *dict, int count, int interval) { - gf_cli_req req = {{0,}}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; + char key[256] = {0}; + int i = 0; + uint64_t sec = 0; + uint64_t r_count = 0; + uint64_t w_count = 0; + uint64_t rb_counts[32] = {0}; + uint64_t wb_counts[32] = {0}; + cli_profile_info_t profile_info[GF_FOP_MAXVALUE] = {{0}}; + cli_profile_info_t upcall_info[GF_UPCALL_FLAGS_MAXVALUE] = { + {0}, + }; + char output[128] = {0}; + int per_line = 0; + char read_blocks[128] = {0}; + char write_blocks[128] = {0}; + int index = 0; + int is_header_printed = 0; + int ret = 0; + double total_percentage_latency = 0; + + for (i = 0; i < 32; i++) { + snprintf(key, sizeof(key), "%d-%d-read-%" PRIu32, count, interval, + (1U << i)); + ret = dict_get_uint64(dict, key, &rb_counts[i]); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key); } + } - dict = data; + for (i = 0; i < 32; i++) { + snprintf(key, sizeof(key), "%d-%d-write-%" PRIu32, count, interval, + (1U << i)); + ret = dict_get_uint64(dict, key, &wb_counts[i]); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key); + } + } - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); + for (i = 0; i < GF_UPCALL_FLAGS_MAXVALUE; i++) { + snprintf(key, sizeof(key), "%d-%d-%d-upcall-hits", count, interval, i); + ret = dict_get_uint64(dict, key, &upcall_info[i].fop_hits); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key); + } + upcall_info[i].fop_name = (char *)gf_upcall_list[i]; + } - if (ret < 0) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to serialize dict"); - goto out; + for (i = 0; i < GF_FOP_MAXVALUE; i++) { + snprintf(key, sizeof(key), "%d-%d-%d-hits", count, interval, i); + ret = dict_get_uint64(dict, key, &profile_info[i].fop_hits); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key); } - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_LOG_ROTATE, NULL, - this, gf_cli3_1_log_rotate_cbk, - (xdrproc_t) xdr_gf_cli_req); + snprintf(key, sizeof(key), "%d-%d-%d-avglatency", count, interval, i); + ret = dict_get_double(dict, key, &profile_info[i].avg_latency); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key); + } + snprintf(key, sizeof(key), "%d-%d-%d-minlatency", count, interval, i); + ret = dict_get_double(dict, key, &profile_info[i].min_latency); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key); + } -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + snprintf(key, sizeof(key), "%d-%d-%d-maxlatency", count, interval, i); + ret = dict_get_double(dict, key, &profile_info[i].max_latency); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key); + } + profile_info[i].fop_name = (char *)gf_fop_list[i]; - if (req.dict.dict_val) - GF_FREE (req.dict.dict_val); - return ret; + total_percentage_latency += (profile_info[i].fop_hits * + profile_info[i].avg_latency); + } + if (total_percentage_latency) { + for (i = 0; i < GF_FOP_MAXVALUE; i++) { + profile_info[i] + .percentage_avg_latency = 100 * ((profile_info[i].avg_latency * + profile_info[i].fop_hits) / + total_percentage_latency); + } + gf_array_insertionsort(profile_info, 1, GF_FOP_MAXVALUE - 1, + sizeof(cli_profile_info_t), + cli_profile_info_percentage_cmp); + } + snprintf(key, sizeof(key), "%d-%d-duration", count, interval); + ret = dict_get_uint64(dict, key, &sec); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key); + } + + snprintf(key, sizeof(key), "%d-%d-total-read", count, interval); + ret = dict_get_uint64(dict, key, &r_count); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key); + } + + snprintf(key, sizeof(key), "%d-%d-total-write", count, interval); + ret = dict_get_uint64(dict, key, &w_count); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key); + } + + if (interval == -1) + cli_out("Cumulative Stats:"); + else + cli_out("Interval %d Stats:", interval); + snprintf(output, sizeof(output), "%14s", "Block Size:"); + snprintf(read_blocks, sizeof(read_blocks), "%14s", "No. of Reads:"); + snprintf(write_blocks, sizeof(write_blocks), "%14s", "No. of Writes:"); + index = 14; + for (i = 0; i < 32; i++) { + if ((rb_counts[i] == 0) && (wb_counts[i] == 0)) + continue; + per_line++; + snprintf(output + index, sizeof(output) - index, "%19" PRIu32 "b+ ", + (1U << i)); + if (rb_counts[i]) { + snprintf(read_blocks + index, sizeof(read_blocks) - index, + "%21" PRId64 " ", rb_counts[i]); + } else { + snprintf(read_blocks + index, sizeof(read_blocks) - index, "%21s ", + "0"); + } + if (wb_counts[i]) { + snprintf(write_blocks + index, sizeof(write_blocks) - index, + "%21" PRId64 " ", wb_counts[i]); + } else { + snprintf(write_blocks + index, sizeof(write_blocks) - index, + "%21s ", "0"); + } + index += 22; + if (per_line == 3) { + cli_out("%s", output); + cli_out("%s", read_blocks); + cli_out("%s", write_blocks); + cli_out(" "); + per_line = 0; + snprintf(output, sizeof(output), "%14s", "Block Size:"); + snprintf(read_blocks, sizeof(read_blocks), "%14s", "No. of Reads:"); + snprintf(write_blocks, sizeof(write_blocks), "%14s", + "No. of Writes:"); + index = 14; + } + } + + if (per_line != 0) { + cli_out("%s", output); + cli_out("%s", read_blocks); + cli_out("%s", write_blocks); + } + for (i = 0; i < GF_FOP_MAXVALUE; i++) { + if (profile_info[i].fop_hits == 0) + continue; + if (is_header_printed == 0) { + cli_out("%10s %13s %13s %13s %14s %11s", "%-latency", "Avg-latency", + "Min-Latency", "Max-Latency", "No. of calls", "Fop"); + cli_out("%10s %13s %13s %13s %14s %11s", "---------", "-----------", + "-----------", "-----------", "------------", "----"); + is_header_printed = 1; + } + if (profile_info[i].fop_hits) { + cli_out( + "%10.2lf %10.2lf us %10.2lf us %10.2lf us" + " %14" PRId64 " %11s", + profile_info[i].percentage_avg_latency, + profile_info[i].avg_latency, profile_info[i].min_latency, + profile_info[i].max_latency, profile_info[i].fop_hits, + profile_info[i].fop_name); + } + } + + for (i = 0; i < GF_UPCALL_FLAGS_MAXVALUE; i++) { + if (upcall_info[i].fop_hits == 0) + continue; + if (upcall_info[i].fop_hits) { + cli_out( + "%10.2lf %10.2lf us %10.2lf us %10.2lf us" + " %14" PRId64 " %11s", + upcall_info[i].percentage_avg_latency, + upcall_info[i].avg_latency, upcall_info[i].min_latency, + upcall_info[i].max_latency, upcall_info[i].fop_hits, + upcall_info[i].fop_name); + } + } + + cli_out(" "); + cli_out("%12s: %" PRId64 " seconds", "Duration", sec); + cli_out("%12s: %" PRId64 " bytes", "Data Read", r_count); + cli_out("%12s: %" PRId64 " bytes", "Data Written", w_count); + cli_out(" "); } -int32_t -gf_cli3_1_sync_volume (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - int ret = 0; - gf_cli_req req = {{0,}}; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; + gf1_cli_stats_op op = GF_CLI_STATS_NONE; + char key[64] = {0}; + int len; + int interval = 0; + int i = 1; + int32_t brick_count = 0; + char *volname = NULL; + char *brick = NULL; + char str[1024] = { + 0, + }; + int stats_cleared = 0; + gf1_cli_info_op info_op = GF_CLI_INFO_NONE; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + gf_log("cli", GF_LOG_DEBUG, "Received resp to profile"); + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + dict = dict_new(); + + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + + if (ret) { + gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_profile(dict, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } + + ret = dict_get_int32_sizen(dict, "op", (int32_t *)&op); + if (ret) + goto out; + + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + goto out; + + if (rsp.op_ret && strcmp(rsp.op_errstr, "")) { + cli_err("%s", rsp.op_errstr); + } else { + switch (op) { + case GF_CLI_STATS_START: + cli_out("Starting volume profile on %s has been %s ", volname, + (rsp.op_ret) ? "unsuccessful" : "successful"); + break; + case GF_CLI_STATS_STOP: + cli_out("Stopping volume profile on %s has been %s ", volname, + (rsp.op_ret) ? "unsuccessful" : "successful"); + break; + case GF_CLI_STATS_INFO: + break; + default: + cli_out("volume profile on %s has been %s ", volname, + (rsp.op_ret) ? "unsuccessful" : "successful"); + break; } + } - dict = data; - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - - if (ret < 0) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to serialize dict"); - goto out; - } + if (rsp.op_ret) { + ret = rsp.op_ret; + goto out; + } - ret = cli_cmd_submit (&req, frame, - cli_rpc_prog, GLUSTER_CLI_SYNC_VOLUME, - NULL, this, gf_cli3_1_sync_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); + if (GF_CLI_STATS_INFO != op) { + ret = 0; + goto out; + } -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - if (req.dict.dict_val) - GF_FREE (req.dict.dict_val); + ret = dict_get_int32_sizen(dict, "count", &brick_count); + if (ret) + goto out; - return ret; -} + if (!brick_count) { + cli_err("All bricks of volume %s are down.", volname); + goto out; + } -int32_t -gf_cli3_1_getspec (call_frame_t *frame, xlator_t *this, - void *data) -{ - gf_getspec_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; + ret = dict_get_int32_sizen(dict, "info-op", (int32_t *)&info_op); + if (ret) + goto out; - if (!frame || !this || !data) { - ret = -1; - goto out; + while (i <= brick_count) { + len = snprintf(key, sizeof(key), "%d-brick", i); + ret = dict_get_strn(dict, key, len, &brick); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Couldn't get brick name"); + goto out; } - dict = data; + ret = dict_get_str_boolean(dict, "nfs", _gf_false); - ret = dict_get_str (dict, "volid", &req.key); if (ret) - goto out; + len = snprintf(str, sizeof(str), "NFS Server : %s", brick); + else + len = snprintf(str, sizeof(str), "Brick: %s", brick); + cli_out("%s", str); + memset(str, '-', len); + cli_out("%s", str); + + if (GF_CLI_INFO_CLEAR == info_op) { + len = snprintf(key, sizeof(key), "%d-stats-cleared", i); + ret = dict_get_int32n(dict, key, len, &stats_cleared); + if (ret) + goto out; + cli_out(stats_cleared ? "Cleared stats." + : "Failed to clear stats."); + } else { + len = snprintf(key, sizeof(key), "%d-cumulative", i); + ret = dict_get_int32n(dict, key, len, &interval); + if (ret == 0) + cmd_profile_volume_brick_out(dict, i, interval); - ret = cli_cmd_submit (&req, frame, &cli_handshake_prog, - GF_HNDSK_GETSPEC, NULL, - this, gf_cli3_1_getspec_cbk, - (xdrproc_t) xdr_gf_getspec_req); + len = snprintf(key, sizeof(key), "%d-interval", i); + ret = dict_get_int32n(dict, key, len, &interval); + if (ret == 0) + cmd_profile_volume_brick_out(dict, i, interval); + } + i++; + } + ret = rsp.op_ret; out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - return ret; + if (dict) + dict_unref(dict); + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; } -int32_t -gf_cli3_1_quota (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_profile_volume(call_frame_t *frame, xlator_t *this, void *data) { - gf_cli_req req = {{0,}}; - int ret = 0; - dict_t *dict = NULL; - - if (!frame || !this || !data) { - ret = -1; - goto out; - } + int ret = -1; + gf_cli_req req = {{ + 0, + }}; + dict_t *dict = NULL; - dict = data; + GF_ASSERT(frame); + GF_ASSERT(this); + GF_ASSERT(data); - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to get serialized length of dict"); - goto out; - } + dict = data; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_QUOTA, NULL, - this, gf_cli3_1_quota_cbk, - (xdrproc_t) xdr_gf_cli_req); + ret = cli_to_glusterd(&req, frame, gf_cli_profile_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_PROFILE_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - GF_FREE (req.dict.dict_val); -out: - return ret; + GF_FREE(req.dict.dict_val); + return ret; } -int32_t -gf_cli3_1_pmap_b2p (call_frame_t *frame, xlator_t *this, void *data) +static int32_t +gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - pmap_port_by_brick_req req = {0,}; - int ret = 0; - dict_t *dict = NULL; + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; + gf1_cli_stats_op op = GF_CLI_STATS_NONE; + char key[256] = {0}; + int keylen; + int i = 0; + int32_t brick_count = 0; + char brick[1024]; + int32_t members = 0; + char *filename; + char *bricks; + uint64_t value = 0; + int32_t j = 0; + gf1_cli_top_op top_op = GF_CLI_TOP_NONE; + uint64_t nr_open = 0; + uint64_t max_nr_open = 0; + double throughput = 0; + double time = 0; + int32_t time_sec = 0; + long int time_usec = 0; + char timestr[GF_TIMESTR_SIZE] = { + 0, + }; + char *openfd_str = NULL; + gf_boolean_t nfs = _gf_false; + gf_boolean_t clear_stats = _gf_false; + int stats_cleared = 0; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + gf_log("cli", GF_LOG_DEBUG, "Received resp to top"); + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + if (rsp.op_ret) { + if (strcmp(rsp.op_errstr, "")) + cli_err("%s", rsp.op_errstr); + cli_err("volume top unsuccessful"); + ret = rsp.op_ret; + goto out; + } - if (!frame || !this || !data) { - ret = -1; - goto out; - } + dict = dict_new(); - dict = data; + if (!dict) { + ret = -1; + goto out; + } - ret = dict_get_str (dict, "brick", &req.brick); - if (ret) - goto out; + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); - ret = cli_cmd_submit (&req, frame, &cli_pmap_prog, - GF_PMAP_PORTBYBRICK, NULL, - this, gf_cli3_1_pmap_b2p_cbk, - (xdrproc_t) xdr_pmap_port_by_brick_req); + if (ret) { + gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + ret = dict_get_int32_sizen(dict, "op", (int32_t *)&op); - return ret; -} + if (op != GF_CLI_STATS_TOP) { + ret = 0; + goto out; + } -static int -gf_cli3_1_fsm_log_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_fsm_log_rsp rsp = {0,}; - int ret = -1; - dict_t *dict = NULL; - int tr_count = 0; - char key[256] = {0}; - int i = 0; - char *old_state = NULL; - char *new_state = NULL; - char *event = NULL; - char *time = NULL; - - if (-1 == req->rpc_status) { - goto out; + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_top(dict, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); } + goto out; + } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_fsm_log_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + ret = dict_get_int32_sizen(dict, "count", &brick_count); + if (ret) + goto out; + keylen = snprintf(key, sizeof(key), "%d-top-op", 1); + ret = dict_get_int32n(dict, key, keylen, (int32_t *)&top_op); + if (ret) + goto out; - if (rsp.op_ret) { - if (strcmp (rsp.op_errstr, "")) - cli_err ("%s", rsp.op_errstr); - cli_err ("fsm log unsuccessful"); - ret = rsp.op_ret; - goto out; - } + clear_stats = dict_get_str_boolean(dict, "clear-stats", _gf_false); - dict = dict_new (); - if (!dict) { - ret = -1; - goto out; - } + while (i < brick_count) { + i++; + keylen = snprintf(brick, sizeof(brick), "%d-brick", i); + ret = dict_get_strn(dict, brick, keylen, &bricks); + if (ret) + goto out; - ret = dict_unserialize (rsp.fsm_log.fsm_log_val, - rsp.fsm_log.fsm_log_len, - &dict); + nfs = dict_get_str_boolean(dict, "nfs", _gf_false); - if (ret) { - cli_err ("bad response"); + if (clear_stats) { + keylen = snprintf(key, sizeof(key), "%d-stats-cleared", i); + ret = dict_get_int32n(dict, key, keylen, &stats_cleared); + if (ret) goto out; + cli_out(stats_cleared ? "Cleared stats for %s %s" + : "Failed to clear stats for %s %s", + nfs ? "NFS server on" : "brick", bricks); + continue; } - ret = dict_get_int32 (dict, "count", &tr_count); - if (tr_count) - cli_out("number of transitions: %d", tr_count); + if (nfs) + cli_out("NFS Server : %s", bricks); else - cli_out("No transitions"); - for (i = 0; i < tr_count; i++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "log%d-old-state", i); - ret = dict_get_str (dict, key, &old_state); - if (ret) - goto out; + cli_out("Brick: %s", bricks); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "log%d-event", i); - ret = dict_get_str (dict, key, &event); - if (ret) - goto out; + keylen = snprintf(key, sizeof(key), "%d-members", i); + ret = dict_get_int32n(dict, key, keylen, &members); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "log%d-new-state", i); - ret = dict_get_str (dict, key, &new_state); + switch (top_op) { + case GF_CLI_TOP_OPEN: + snprintf(key, sizeof(key), "%d-current-open", i); + ret = dict_get_uint64(dict, key, &nr_open); if (ret) - goto out; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "log%d-time", i); - ret = dict_get_str (dict, key, &time); + break; + snprintf(key, sizeof(key), "%d-max-open", i); + ret = dict_get_uint64(dict, key, &max_nr_open); + if (ret) + goto out; + keylen = snprintf(key, sizeof(key), "%d-max-openfd-time", i); + ret = dict_get_strn(dict, key, keylen, &openfd_str); if (ret) - goto out; - cli_out ("Old State: [%s]\n" - "New State: [%s]\n" - "Event : [%s]\n" - "timestamp: [%s]\n", old_state, new_state, event, time); + goto out; + cli_out("Current open fds: %" PRIu64 ", Max open fds: %" PRIu64 + ", Max openfd time: %s", + nr_open, max_nr_open, openfd_str); + case GF_CLI_TOP_READ: + case GF_CLI_TOP_WRITE: + case GF_CLI_TOP_OPENDIR: + case GF_CLI_TOP_READDIR: + if (!members) { + continue; + } + cli_out("Count\t\tfilename\n======================="); + break; + case GF_CLI_TOP_READ_PERF: + case GF_CLI_TOP_WRITE_PERF: + snprintf(key, sizeof(key), "%d-throughput", i); + ret = dict_get_double(dict, key, &throughput); + if (!ret) { + snprintf(key, sizeof(key), "%d-time", i); + ret = dict_get_double(dict, key, &time); + } + if (!ret) + cli_out("Throughput %.2f MBps time %.4f secs", throughput, + time / 1e6); + + if (!members) { + continue; + } + cli_out("%*s %-*s %-*s", VOL_TOP_PERF_SPEED_WIDTH, "MBps", + VOL_TOP_PERF_FILENAME_DEF_WIDTH, "Filename", + VOL_TOP_PERF_TIME_WIDTH, "Time"); + cli_out("%*s %-*s %-*s", VOL_TOP_PERF_SPEED_WIDTH, + "====", VOL_TOP_PERF_FILENAME_DEF_WIDTH, + "========", VOL_TOP_PERF_TIME_WIDTH, "===="); + break; + default: + goto out; } - ret = rsp.op_ret; + for (j = 1; j <= members; j++) { + keylen = snprintf(key, sizeof(key), "%d-filename-%d", i, j); + ret = dict_get_strn(dict, key, keylen, &filename); + if (ret) + break; + snprintf(key, sizeof(key), "%d-value-%d", i, j); + ret = dict_get_uint64(dict, key, &value); + if (ret) + goto out; + if (top_op == GF_CLI_TOP_READ_PERF || + top_op == GF_CLI_TOP_WRITE_PERF) { + keylen = snprintf(key, sizeof(key), "%d-time-sec-%d", i, j); + ret = dict_get_int32n(dict, key, keylen, (int32_t *)&time_sec); + if (ret) + goto out; + keylen = snprintf(key, sizeof(key), "%d-time-usec-%d", i, j); + ret = dict_get_int32n(dict, key, keylen, (int32_t *)&time_usec); + if (ret) + goto out; + gf_time_fmt(timestr, sizeof timestr, time_sec, gf_timefmt_FT); + snprintf(timestr + strlen(timestr), + sizeof timestr - strlen(timestr), ".%ld", time_usec); + if (strlen(filename) < VOL_TOP_PERF_FILENAME_DEF_WIDTH) + cli_out("%*" PRIu64 " %-*s %-*s", VOL_TOP_PERF_SPEED_WIDTH, + value, VOL_TOP_PERF_FILENAME_DEF_WIDTH, filename, + VOL_TOP_PERF_TIME_WIDTH, timestr); + else + cli_out("%*" PRIu64 " ...%-*s %-*s", + VOL_TOP_PERF_SPEED_WIDTH, value, + VOL_TOP_PERF_FILENAME_ALT_WIDTH, + filename + strlen(filename) - + VOL_TOP_PERF_FILENAME_ALT_WIDTH, + VOL_TOP_PERF_TIME_WIDTH, timestr); + } else { + cli_out("%" PRIu64 "\t\t%s", value, filename); + } + } + } + ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); - return ret; + cli_cmd_broadcast_response(ret); + + if (dict) + dict_unref(dict); + + gf_free_xdr_cli_rsp(rsp); + return ret; } -int32_t -gf_cli3_1_fsm_log (call_frame_t *frame, xlator_t *this, void *data) +static int32_t +gf_cli_top_volume(call_frame_t *frame, xlator_t *this, void *data) +{ + int ret = -1; + gf_cli_req req = {{ + 0, + }}; + dict_t *dict = NULL; + + GF_ASSERT(frame); + GF_ASSERT(this); + GF_ASSERT(data); + + dict = data; + + ret = cli_to_glusterd(&req, frame, gf_cli_top_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_PROFILE_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + GF_FREE(req.dict.dict_val); + return ret; +} + +static int +gf_cli_getwd_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - int ret = -1; - gf1_cli_fsm_log_req req = {0,}; + gf1_cli_getwd_rsp rsp = { + 0, + }; + int ret = -1; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_getwd_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + if (rsp.op_ret == -1) { + cli_err("getwd failed"); + ret = rsp.op_ret; + goto out; + } - GF_ASSERT (frame); - GF_ASSERT (this); - GF_ASSERT (data); + gf_log("cli", GF_LOG_INFO, "Received resp to getwd"); - if (!frame || !this || !data) - goto out; - req.name = data; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_FSM_LOG, NULL, - this, gf_cli3_1_fsm_log_cbk, - (xdrproc_t) xdr_gf1_cli_fsm_log_req); + cli_out("%s", rsp.wd); + + ret = 0; out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + cli_cmd_broadcast_response(ret); + if (rsp.wd) { + free(rsp.wd); + } - return ret; + return ret; } -int -gf_cli3_1_gsync_config_command (dict_t *dict) +static int32_t +gf_cli_getwd(call_frame_t *frame, xlator_t *this, void *data) { - runner_t runner = {0,}; - char *subop = NULL; - char *gwd = NULL; - char *slave = NULL; - char *master = NULL; - char *op_name = NULL; + int ret = -1; + gf1_cli_getwd_req req = { + 0, + }; - if (dict_get_str (dict, "subop", &subop) != 0) - return -1; + GF_ASSERT(frame); + GF_ASSERT(this); - if (strcmp (subop, "get") != 0 && strcmp (subop, "get-all") != 0) { - cli_out (GEOREP" config updated successfully"); - return 0; - } + if (!frame || !this) + goto out; - if (dict_get_str (dict, "glusterd_workdir", &gwd) != 0 || - dict_get_str (dict, "slave", &slave) != 0) - return -1; + ret = cli_cmd_submit(NULL, &req, frame, cli_rpc_prog, GLUSTER_CLI_GETWD, + NULL, this, gf_cli_getwd_cbk, + (xdrproc_t)xdr_gf1_cli_getwd_req); - if (dict_get_str (dict, "master", &master) != 0) - master = NULL; - if (dict_get_str (dict, "op_name", &op_name) != 0) - op_name = NULL; - - runinit (&runner); - runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "-c", NULL); - runner_argprintf (&runner, "%s/"GSYNC_CONF, gwd); - if (master) - runner_argprintf (&runner, ":%s", master); - runner_add_arg (&runner, slave); - runner_argprintf (&runner, "--config-%s", subop); - if (op_name) - runner_add_arg (&runner, op_name); +out: + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - return runner_run (&runner); + return ret; } -int -gf_cli3_1_gsync_out_status (dict_t *dict) +static void +cli_print_volume_status_mempool(dict_t *dict, char *prefix) { - int gsync_count = 0; - int i = 0; - int ret = 0; - char mst[PATH_MAX] = {0, }; - char slv[PATH_MAX]= {0, }; - char sts[PATH_MAX] = {0, }; - char hyphens[81] = {0, }; - char *mst_val = NULL; - char *slv_val = NULL; - char *sts_val = NULL; + int ret = -1; + int32_t mempool_count = 0; + char *name = NULL; + int32_t hotcount = 0; + int32_t coldcount = 0; + uint64_t paddedsizeof = 0; + uint64_t alloccount = 0; + int32_t maxalloc = 0; + uint64_t pool_misses = 0; + int32_t maxstdalloc = 0; + char key[128] = { + /* prefix is really small 'brick%d' really */ + 0, + }; + int keylen; + int i = 0; + + GF_ASSERT(dict); + GF_ASSERT(prefix); + + keylen = snprintf(key, sizeof(key), "%s.mempool-count", prefix); + ret = dict_get_int32n(dict, key, keylen, &mempool_count); + if (ret) + goto out; + + cli_out("Mempool Stats\n-------------"); + cli_out("%-30s %9s %9s %12s %10s %8s %8s %12s", "Name", "HotCount", + "ColdCount", "PaddedSizeof", "AllocCount", "MaxAlloc", "Misses", + "Max-StdAlloc"); + cli_out("%-30s %9s %9s %12s %10s %8s %8s %12s", "----", "--------", + "---------", "------------", "----------", "--------", "--------", + "------------"); + + for (i = 0; i < mempool_count; i++) { + keylen = snprintf(key, sizeof(key), "%s.pool%d.name", prefix, i); + ret = dict_get_strn(dict, key, keylen, &name); + if (ret) + goto out; - cli_out ("%-20s %-50s %-10s", "MASTER", "SLAVE", "STATUS"); + keylen = snprintf(key, sizeof(key), "%s.pool%d.hotcount", prefix, i); + ret = dict_get_int32n(dict, key, keylen, &hotcount); + if (ret) + goto out; - for (i=0; i<sizeof(hyphens)-1; i++) - hyphens[i] = '-'; + keylen = snprintf(key, sizeof(key), "%s.pool%d.coldcount", prefix, i); + ret = dict_get_int32n(dict, key, keylen, &coldcount); + if (ret) + goto out; - cli_out ("%s", hyphens); + snprintf(key, sizeof(key), "%s.pool%d.paddedsizeof", prefix, i); + ret = dict_get_uint64(dict, key, &paddedsizeof); + if (ret) + goto out; + snprintf(key, sizeof(key), "%s.pool%d.alloccount", prefix, i); + ret = dict_get_uint64(dict, key, &alloccount); + if (ret) + goto out; - ret = dict_get_int32 (dict, "gsync-count", &gsync_count); - if (ret) { - gf_log ("cli", GF_LOG_INFO, "No active geo-replication sessions" - "present for the selected"); - ret = 0; - goto out; - } + keylen = snprintf(key, sizeof(key), "%s.pool%d.max_alloc", prefix, i); + ret = dict_get_int32n(dict, key, keylen, &maxalloc); + if (ret) + goto out; - for (i = 1; i <= gsync_count; i++) { - snprintf (mst, sizeof(mst), "master%d", i); - snprintf (slv, sizeof(slv), "slave%d", i); - snprintf (sts, sizeof(sts), "status%d", i); + keylen = snprintf(key, sizeof(key), "%s.pool%d.max-stdalloc", prefix, + i); + ret = dict_get_int32n(dict, key, keylen, &maxstdalloc); + if (ret) + goto out; - ret = dict_get_str (dict, mst, &mst_val); - if (ret) - goto out; + snprintf(key, sizeof(key), "%s.pool%d.pool-misses", prefix, i); + ret = dict_get_uint64(dict, key, &pool_misses); + if (ret) + goto out; - ret = dict_get_str (dict, slv, &slv_val); - if (ret) - goto out; + cli_out("%-30s %9d %9d %12" PRIu64 " %10" PRIu64 " %8d %8" PRIu64 + " %12d", + name, hotcount, coldcount, paddedsizeof, alloccount, maxalloc, + pool_misses, maxstdalloc); + } - ret = dict_get_str (dict, sts, &sts_val); - if (ret) - goto out; +out: + return; +} - cli_out ("%-20s %-50s %-10s", mst_val, - slv_val, sts_val); +static void +cli_print_volume_status_mem(dict_t *dict, gf_boolean_t notbrick) +{ + int ret = -1; + char *volname = NULL; + char *hostname = NULL; + char *path = NULL; + int online = -1; + char key[64] = { + 0, + }; + int brick_index_max = -1; + int other_count = 0; + int index_max = 0; + int val = 0; + int i = 0; + + GF_ASSERT(dict); + + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + goto out; + cli_out("Memory status for volume : %s", volname); + + ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max); + if (ret) + goto out; + ret = dict_get_int32_sizen(dict, "other-count", &other_count); + if (ret) + goto out; + + index_max = brick_index_max + other_count; + + for (i = 0; i <= index_max; i++) { + cli_out("----------------------------------------------"); + + snprintf(key, sizeof(key), "brick%d.hostname", i); + ret = dict_get_str(dict, key, &hostname); + if (ret) + continue; + snprintf(key, sizeof(key), "brick%d.path", i); + ret = dict_get_str(dict, key, &path); + if (ret) + continue; + if (notbrick) + cli_out("%s : %s", hostname, path); + else + cli_out("Brick : %s:%s", hostname, path); + snprintf(key, sizeof(key), "brick%d.status", i); + ret = dict_get_int32(dict, key, &online); + if (ret) + goto out; + if (!online) { + if (notbrick) + cli_out("%s is offline", hostname); + else + cli_out("Brick is offline"); + continue; } - out: - return ret; + cli_out("Mallinfo\n--------"); -} + snprintf(key, sizeof(key), "brick%d.mallinfo.arena", i); + ret = dict_get_int32(dict, key, &val); + if (ret) + goto out; + cli_out("%-8s : %d", "Arena", val); -int -gf_cli3_1_gsync_set_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - int ret = -1; - gf_cli_rsp rsp = {0, }; - dict_t *dict = NULL; - char *gsync_status = NULL; - char *master = NULL; - char *slave = NULL; - int32_t type = 0; - - if (req->rpc_status == -1) { - ret = -1; - goto out; - } + snprintf(key, sizeof(key), "brick%d.mallinfo.ordblks", i); + ret = dict_get_int32(dict, key, &val); + if (ret) + goto out; + cli_out("%-8s : %d", "Ordblks", val); - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, - "Unable to get response structure"); - goto out; - } + snprintf(key, sizeof(key), "brick%d.mallinfo.smblks", i); + ret = dict_get_int32(dict, key, &val); + if (ret) + goto out; + cli_out("%-8s : %d", "Smblks", val); - dict = dict_new (); + snprintf(key, sizeof(key), "brick%d.mallinfo.hblks", i); + ret = dict_get_int32(dict, key, &val); + if (ret) + goto out; + cli_out("%-8s : %d", "Hblks", val); - if (!dict) { - ret = -1; - goto out; - } + snprintf(key, sizeof(key), "brick%d.mallinfo.hblkhd", i); + ret = dict_get_int32(dict, key, &val); + if (ret) + goto out; + cli_out("%-8s : %d", "Hblkhd", val); - ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); + snprintf(key, sizeof(key), "brick%d.mallinfo.usmblks", i); + ret = dict_get_int32(dict, key, &val); + if (ret) + goto out; + cli_out("%-8s : %d", "Usmblks", val); + snprintf(key, sizeof(key), "brick%d.mallinfo.fsmblks", i); + ret = dict_get_int32(dict, key, &val); if (ret) - goto out; + goto out; + cli_out("%-8s : %d", "Fsmblks", val); -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_dict ("volGeoRep", dict, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif + snprintf(key, sizeof(key), "brick%d.mallinfo.uordblks", i); + ret = dict_get_int32(dict, key, &val); + if (ret) + goto out; + cli_out("%-8s : %d", "Uordblks", val); - if (rsp.op_ret) { - cli_err ("%s", rsp.op_errstr ? rsp.op_errstr : - GEOREP" command unsuccessful"); - ret = rsp.op_ret; - goto out; - } + snprintf(key, sizeof(key), "brick%d.mallinfo.fordblks", i); + ret = dict_get_int32(dict, key, &val); + if (ret) + goto out; + cli_out("%-8s : %d", "Fordblks", val); - ret = dict_get_str (dict, "gsync-status", &gsync_status); - if (!ret) - cli_out ("%s", gsync_status); - else - ret = 0; + snprintf(key, sizeof(key), "brick%d.mallinfo.keepcost", i); + ret = dict_get_int32(dict, key, &val); + if (ret) + goto out; + cli_out("%-8s : %d", "Keepcost", val); - ret = dict_get_int32 (dict, "type", &type); - if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to get type"); - goto out; + cli_out(" "); + snprintf(key, sizeof(key), "brick%d", i); + cli_print_volume_status_mempool(dict, key); + } +out: + cli_out("----------------------------------------------\n"); + return; +} + +static void +cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick) +{ + int ret = -1; + char *volname = NULL; + int client_count = 0; + int current_count = 0; + char key[64] = { + 0, + }; + int i = 0; + int total = 0; + char *name = NULL; + gf_boolean_t is_fuse_done = _gf_false; + gf_boolean_t is_gfapi_done = _gf_false; + gf_boolean_t is_rebalance_done = _gf_false; + gf_boolean_t is_glustershd_done = _gf_false; + gf_boolean_t is_quotad_done = _gf_false; + gf_boolean_t is_snapd_done = _gf_false; + + GF_ASSERT(dict); + + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + goto out; + cli_out("Client connections for volume %s", volname); + + ret = dict_get_int32_sizen(dict, "client-count", &client_count); + if (ret) + goto out; + + cli_out("%-48s %15s", "Name", "count"); + cli_out("%-48s %15s", "-----", "------"); + for (i = 0; i < client_count; i++) { + name = NULL; + snprintf(key, sizeof(key), "client%d.name", i); + ret = dict_get_str(dict, key, &name); + + if (!strncmp(name, "fuse", 4)) { + if (!is_fuse_done) { + is_fuse_done = _gf_true; + ret = dict_get_int32_sizen(dict, "fuse-count", ¤t_count); + if (ret) + goto out; + total = total + current_count; + goto print; + } + continue; + } else if (!strncmp(name, "gfapi", 5)) { + if (!is_gfapi_done) { + is_gfapi_done = _gf_true; + ret = dict_get_int32_sizen(dict, "gfapi-count", ¤t_count); + if (ret) + goto out; + total = total + current_count; + goto print; + } + continue; + } else if (!strcmp(name, "rebalance")) { + if (!is_rebalance_done) { + is_rebalance_done = _gf_true; + ret = dict_get_int32_sizen(dict, "rebalance-count", + ¤t_count); + if (ret) + goto out; + total = total + current_count; + goto print; + } + continue; + } else if (!strcmp(name, "glustershd")) { + if (!is_glustershd_done) { + is_glustershd_done = _gf_true; + ret = dict_get_int32_sizen(dict, "glustershd-count", + ¤t_count); + if (ret) + goto out; + total = total + current_count; + goto print; + } + continue; + } else if (!strcmp(name, "quotad")) { + if (!is_quotad_done) { + is_quotad_done = _gf_true; + ret = dict_get_int32_sizen(dict, "quotad-count", + ¤t_count); + if (ret) + goto out; + total = total + current_count; + goto print; + } + continue; + } else if (!strcmp(name, "snapd")) { + if (!is_snapd_done) { + is_snapd_done = _gf_true; + ret = dict_get_int32_sizen(dict, "snapd-count", ¤t_count); + if (ret) + goto out; + total = total + current_count; + goto print; + } + continue; } - switch (type) { - case GF_GSYNC_OPTION_TYPE_START: - case GF_GSYNC_OPTION_TYPE_STOP: - if (dict_get_str (dict, "master", &master) != 0) - master = "???"; - if (dict_get_str (dict, "slave", &slave) != 0) - slave = "???"; + print: + cli_out("%-48s %15d", name, current_count); + } +out: + cli_out("\ntotal clients for volume %s : %d ", volname, total); + cli_out( + "-----------------------------------------------------------------\n"); + return; +} - cli_out ("%s " GEOREP " session between %s & %s" - " has been successful", - type == GF_GSYNC_OPTION_TYPE_START ? - "Starting" : "Stopping", - master, slave); - break; +static void +cli_print_volume_status_clients(dict_t *dict, gf_boolean_t notbrick) +{ + int ret = -1; + char *volname = NULL; + int brick_index_max = -1; + int other_count = 0; + int index_max = 0; + char *hostname = NULL; + char *path = NULL; + int online = -1; + int client_count = 0; + char *clientname = NULL; + uint64_t bytesread = 0; + uint64_t byteswrite = 0; + uint32_t opversion = 0; + char key[128] = { + 0, + }; + int i = 0; + int j = 0; + + GF_ASSERT(dict); + + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + goto out; + cli_out("Client connections for volume %s", volname); + + ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max); + if (ret) + goto out; + ret = dict_get_int32_sizen(dict, "other-count", &other_count); + if (ret) + goto out; + + index_max = brick_index_max + other_count; + + for (i = 0; i <= index_max; i++) { + hostname = NULL; + path = NULL; + online = -1; + client_count = 0; + clientname = NULL; + bytesread = 0; + byteswrite = 0; + + cli_out("----------------------------------------------"); + + snprintf(key, sizeof(key), "brick%d.hostname", i); + ret = dict_get_str(dict, key, &hostname); + + snprintf(key, sizeof(key), "brick%d.path", i); + ret = dict_get_str(dict, key, &path); + + if (hostname && path) { + if (notbrick) + cli_out("%s : %s", hostname, path); + else + cli_out("Brick : %s:%s", hostname, path); + } + snprintf(key, sizeof(key), "brick%d.status", i); + ret = dict_get_int32(dict, key, &online); + if (!online) { + if (notbrick) + cli_out("%s is offline", hostname); + else + cli_out("Brick is offline"); + continue; + } + + snprintf(key, sizeof(key), "brick%d.clientcount", i); + ret = dict_get_int32(dict, key, &client_count); + + if (hostname && path) + cli_out("Clients connected : %d", client_count); + if (client_count == 0) + continue; + + cli_out("%-48s %15s %15s %15s", "Hostname", "BytesRead", "BytesWritten", + "OpVersion"); + cli_out("%-48s %15s %15s %15s", "--------", "---------", "------------", + "---------"); + for (j = 0; j < client_count; j++) { + snprintf(key, sizeof(key), "brick%d.client%d.hostname", i, j); + ret = dict_get_str(dict, key, &clientname); + + snprintf(key, sizeof(key), "brick%d.client%d.bytesread", i, j); + ret = dict_get_uint64(dict, key, &bytesread); + + snprintf(key, sizeof(key), "brick%d.client%d.byteswrite", i, j); + ret = dict_get_uint64(dict, key, &byteswrite); + + snprintf(key, sizeof(key), "brick%d.client%d.opversion", i, j); + ret = dict_get_uint32(dict, key, &opversion); + + cli_out("%-48s %15" PRIu64 " %15" PRIu64 " %15" PRIu32, clientname, + bytesread, byteswrite, opversion); + } + } +out: + cli_out("----------------------------------------------\n"); + return; +} - case GF_GSYNC_OPTION_TYPE_CONFIG: - ret = gf_cli3_1_gsync_config_command (dict); - break; +#ifdef DEBUG /* this function is only used in debug */ +static void +cli_print_volume_status_inode_entry(dict_t *dict, char *prefix) +{ + int ret = -1; + char key[1024] = { + 0, + }; + char *gfid = NULL; + uint64_t nlookup = 0; + uint32_t ref = 0; + int ia_type = 0; + char inode_type; + + GF_ASSERT(dict); + GF_ASSERT(prefix); + + snprintf(key, sizeof(key), "%s.gfid", prefix); + ret = dict_get_str(dict, key, &gfid); + if (ret) + goto out; + + snprintf(key, sizeof(key), "%s.nlookup", prefix); + ret = dict_get_uint64(dict, key, &nlookup); + if (ret) + goto out; + + snprintf(key, sizeof(key), "%s.ref", prefix); + ret = dict_get_uint32(dict, key, &ref); + if (ret) + goto out; + + snprintf(key, sizeof(key), "%s.ia_type", prefix); + ret = dict_get_int32(dict, key, &ia_type); + if (ret) + goto out; + + switch (ia_type) { + case IA_IFREG: + inode_type = 'R'; + break; + case IA_IFDIR: + inode_type = 'D'; + break; + case IA_IFLNK: + inode_type = 'L'; + break; + case IA_IFBLK: + inode_type = 'B'; + break; + case IA_IFCHR: + inode_type = 'C'; + break; + case IA_IFIFO: + inode_type = 'F'; + break; + case IA_IFSOCK: + inode_type = 'S'; + break; + default: + inode_type = 'I'; + break; + } - case GF_GSYNC_OPTION_TYPE_STATUS: - ret = gf_cli3_1_gsync_out_status (dict); - goto out; - default: - cli_out (GEOREP" command executed successfully"); - } + cli_out("%-40s %14" PRIu64 " %14" PRIu32 " %9c", gfid, nlookup, ref, + inode_type); out: + return; +} +#endif + +static void +cli_print_volume_status_itables(dict_t *dict, char *prefix) +{ + int ret = -1; + char key[1024] = { + 0, + }; + uint32_t active_size = 0; + uint32_t lru_size = 0; + uint32_t purge_size = 0; + uint32_t lru_limit = 0; +#ifdef DEBUG + int i = 0; +#endif + GF_ASSERT(dict); + GF_ASSERT(prefix); + + snprintf(key, sizeof(key), "%s.lru_limit", prefix); + ret = dict_get_uint32(dict, key, &lru_limit); + if (ret) + goto out; + cli_out("LRU limit : %u", lru_limit); + + snprintf(key, sizeof(key), "%s.active_size", prefix); + ret = dict_get_uint32(dict, key, &active_size); + if (ret) + goto out; + +#ifdef DEBUG + if (active_size != 0) { + cli_out("Active inodes:"); + cli_out("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", "IA type"); + cli_out("%-40s %14s %14s %9s", "----", "-------", "---", "-------"); + } + for (i = 0; i < active_size; i++) { + snprintf(key, sizeof(key), "%s.active%d", prefix, i); + cli_print_volume_status_inode_entry(dict, key); + } + cli_out(" "); +#else + cli_out("Active Inodes : %u", active_size); + +#endif - cli_cmd_broadcast_response (ret); + snprintf(key, sizeof(key), "%s.lru_size", prefix); + ret = dict_get_uint32(dict, key, &lru_size); + if (ret) + goto out; + +#ifdef DEBUG + if (lru_size != 0) { + cli_out("LRU inodes:"); + cli_out("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", "IA type"); + cli_out("%-40s %14s %14s %9s", "----", "-------", "---", "-------"); + } + for (i = 0; i < lru_size; i++) { + snprintf(key, sizeof(key), "%s.lru%d", prefix, i); + cli_print_volume_status_inode_entry(dict, key); + } + cli_out(" "); +#else + cli_out("LRU Inodes : %u", lru_size); +#endif - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); + snprintf(key, sizeof(key), "%s.purge_size", prefix); + ret = dict_get_uint32(dict, key, &purge_size); + if (ret) + goto out; +#ifdef DEBUG + if (purge_size != 0) { + cli_out("Purged inodes:"); + cli_out("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", "IA type"); + cli_out("%-40s %14s %14s %9s", "----", "-------", "---", "-------"); + } + for (i = 0; i < purge_size; i++) { + snprintf(key, sizeof(key), "%s.purge%d", prefix, i); + cli_print_volume_status_inode_entry(dict, key); + } +#else + cli_out("Purge Inodes : %u", purge_size); +#endif - return ret; +out: + return; } -int32_t -gf_cli3_1_gsync_set (call_frame_t *frame, xlator_t *this, - void *data) +static void +cli_print_volume_status_inode(dict_t *dict, gf_boolean_t notbrick) { - int ret = 0; - dict_t *dict = NULL; - gf_cli_req req = {{0,}}; + int ret = -1; + char *volname = NULL; + int brick_index_max = -1; + int other_count = 0; + int index_max = 0; + char *hostname = NULL; + char *path = NULL; + int online = -1; + int conn_count = 0; + char key[64] = { + 0, + }; + int i = 0; + int j = 0; + + GF_ASSERT(dict); + + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + goto out; + cli_out("Inode tables for volume %s", volname); + + ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max); + if (ret) + goto out; + ret = dict_get_int32_sizen(dict, "other-count", &other_count); + if (ret) + goto out; + + index_max = brick_index_max + other_count; + + for (i = 0; i <= index_max; i++) { + cli_out("----------------------------------------------"); + + snprintf(key, sizeof(key), "brick%d.hostname", i); + ret = dict_get_str(dict, key, &hostname); + if (ret) + goto out; + snprintf(key, sizeof(key), "brick%d.path", i); + ret = dict_get_str(dict, key, &path); + if (ret) + goto out; + if (notbrick) + cli_out("%s : %s", hostname, path); + else + cli_out("Brick : %s:%s", hostname, path); - if (!frame || !this || !data) { - ret = -1; - goto out; + snprintf(key, sizeof(key), "brick%d.status", i); + ret = dict_get_int32(dict, key, &online); + if (ret) + goto out; + if (!online) { + if (notbrick) + cli_out("%s is offline", hostname); + else + cli_out("Brick is offline"); + continue; } - dict = data; + snprintf(key, sizeof(key), "brick%d.conncount", i); + ret = dict_get_int32(dict, key, &conn_count); + if (ret) + goto out; - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *) &req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to serialize the data"); + for (j = 0; j < conn_count; j++) { + if (conn_count > 1) + cli_out("Connection %d:", j + 1); - goto out; + snprintf(key, sizeof(key), "brick%d.conn%d.itable", i, j); + cli_print_volume_status_itables(dict, key); + cli_out(" "); } + } +out: + cli_out("----------------------------------------------"); + return; +} - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_GSYNC_SET, NULL, - this, gf_cli3_1_gsync_set_cbk, - (xdrproc_t) xdr_gf_cli_req); +void +cli_print_volume_status_fdtable(dict_t *dict, char *prefix) +{ + int ret = -1; + char key[256] = { + 0, + }; + int refcount = 0; + uint32_t maxfds = 0; + int firstfree = 0; + int openfds = 0; + int fd_pid = 0; + int fd_refcount = 0; + int fd_flags = 0; + int i = 0; + + GF_ASSERT(dict); + GF_ASSERT(prefix); + + snprintf(key, sizeof(key), "%s.refcount", prefix); + ret = dict_get_int32(dict, key, &refcount); + if (ret) + goto out; + + snprintf(key, sizeof(key), "%s.maxfds", prefix); + ret = dict_get_uint32(dict, key, &maxfds); + if (ret) + goto out; + + snprintf(key, sizeof(key), "%s.firstfree", prefix); + ret = dict_get_int32(dict, key, &firstfree); + if (ret) + goto out; + + cli_out("RefCount = %d MaxFDs = %d FirstFree = %d", refcount, maxfds, + firstfree); + + snprintf(key, sizeof(key), "%s.openfds", prefix); + ret = dict_get_int32(dict, key, &openfds); + if (ret) + goto out; + if (0 == openfds) { + cli_err("No open fds"); + goto out; + } + + cli_out("%-19s %-19s %-19s %-19s", "FD Entry", "PID", "RefCount", "Flags"); + cli_out("%-19s %-19s %-19s %-19s", "--------", "---", "--------", "-----"); + + for (i = 0; i < maxfds; i++) { + snprintf(key, sizeof(key), "%s.fdentry%d.pid", prefix, i); + ret = dict_get_int32(dict, key, &fd_pid); + if (ret) + continue; -out: - if (req.dict.dict_val) - GF_FREE (req.dict.dict_val); + snprintf(key, sizeof(key), "%s.fdentry%d.refcount", prefix, i); + ret = dict_get_int32(dict, key, &fd_refcount); + if (ret) + continue; - return ret; -} + snprintf(key, sizeof(key), "%s.fdentry%d.flags", prefix, i); + ret = dict_get_int32(dict, key, &fd_flags); + if (ret) + continue; + cli_out("%-19d %-19d %-19d %-19d", i, fd_pid, fd_refcount, fd_flags); + } -int -cli_profile_info_percentage_cmp (void *a, void *b) +out: + return; +} + +static void +cli_print_volume_status_fd(dict_t *dict, gf_boolean_t notbrick) { - cli_profile_info_t *ia = NULL; - cli_profile_info_t *ib = NULL; - int ret = 0; + int ret = -1; + char *volname = NULL; + int brick_index_max = -1; + int other_count = 0; + int index_max = 0; + char *hostname = NULL; + char *path = NULL; + int online = -1; + int conn_count = 0; + char key[64] = { + 0, + }; + int i = 0; + int j = 0; + + GF_ASSERT(dict); + + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + goto out; + cli_out("FD tables for volume %s", volname); + + ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max); + if (ret) + goto out; + ret = dict_get_int32_sizen(dict, "other-count", &other_count); + if (ret) + goto out; + + index_max = brick_index_max + other_count; + + for (i = 0; i <= index_max; i++) { + cli_out("----------------------------------------------"); + + snprintf(key, sizeof(key), "brick%d.hostname", i); + ret = dict_get_str(dict, key, &hostname); + if (ret) + goto out; + snprintf(key, sizeof(key), "brick%d.path", i); + ret = dict_get_str(dict, key, &path); + if (ret) + goto out; - ia = a; - ib = b; - if (ia->percentage_avg_latency < ib->percentage_avg_latency) - ret = -1; - else if (ia->percentage_avg_latency > ib->percentage_avg_latency) - ret = 1; + if (notbrick) + cli_out("%s : %s", hostname, path); else - ret = 0; - return ret; -} - + cli_out("Brick : %s:%s", hostname, path); -void -cmd_profile_volume_brick_out (dict_t *dict, int count, int interval) -{ - char key[256] = {0}; - int i = 0; - uint64_t sec = 0; - uint64_t r_count = 0; - uint64_t w_count = 0; - uint64_t rb_counts[32] = {0}; - uint64_t wb_counts[32] = {0}; - cli_profile_info_t profile_info[GF_FOP_MAXVALUE] = {{0}}; - char output[128] = {0}; - int per_line = 0; - char read_blocks[128] = {0}; - char write_blocks[128] = {0}; - int index = 0; - int is_header_printed = 0; - int ret = 0; - double total_percentage_latency = 0; - - for (i = 0; i < 32; i++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%d-%d-read-%d", count, - interval, (1 << i)); - ret = dict_get_uint64 (dict, key, &rb_counts[i]); - } - - for (i = 0; i < 32; i++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%d-%d-write-%d", count, interval, - (1<<i)); - ret = dict_get_uint64 (dict, key, &wb_counts[i]); + snprintf(key, sizeof(key), "brick%d.status", i); + ret = dict_get_int32(dict, key, &online); + if (ret) + goto out; + if (!online) { + if (notbrick) + cli_out("%s is offline", hostname); + else + cli_out("Brick is offline"); + continue; } - for (i = 0; i < GF_FOP_MAXVALUE; i++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%d-%d-%d-hits", count, - interval, i); - ret = dict_get_uint64 (dict, key, &profile_info[i].fop_hits); - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%d-%d-%d-avglatency", count, - interval, i); - ret = dict_get_double (dict, key, &profile_info[i].avg_latency); - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%d-%d-%d-minlatency", count, - interval, i); - ret = dict_get_double (dict, key, &profile_info[i].min_latency); - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%d-%d-%d-maxlatency", count, - interval, i); - ret = dict_get_double (dict, key, &profile_info[i].max_latency); - profile_info[i].fop_name = gf_fop_list[i]; - - total_percentage_latency += - (profile_info[i].fop_hits * profile_info[i].avg_latency); - } - if (total_percentage_latency) { - for (i = 0; i < GF_FOP_MAXVALUE; i++) { - profile_info[i].percentage_avg_latency = 100 * ( - (profile_info[i].avg_latency* profile_info[i].fop_hits) / - total_percentage_latency); - } - gf_array_insertionsort (profile_info, 1, GF_FOP_MAXVALUE - 1, - sizeof (cli_profile_info_t), - cli_profile_info_percentage_cmp); + snprintf(key, sizeof(key), "brick%d.conncount", i); + ret = dict_get_int32(dict, key, &conn_count); + if (ret) + goto out; + + for (j = 0; j < conn_count; j++) { + cli_out("Connection %d:", j + 1); + + snprintf(key, sizeof(key), "brick%d.conn%d.fdtable", i, j); + cli_print_volume_status_fdtable(dict, key); + cli_out(" "); } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%d-%d-duration", count, interval); - ret = dict_get_uint64 (dict, key, &sec); + } +out: + cli_out("----------------------------------------------"); + return; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%d-%d-total-read", count, interval); - ret = dict_get_uint64 (dict, key, &r_count); +static void +cli_print_volume_status_call_frame(dict_t *dict, char *prefix) +{ + int ret = -1; + char key[1024] = { + 0, + }; + int ref_count = 0; + char *translator = 0; + int complete = 0; + char *parent = NULL; + char *wind_from = NULL; + char *wind_to = NULL; + char *unwind_from = NULL; + char *unwind_to = NULL; + + if (!dict || !prefix) + return; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%d-%d-total-write", count, interval); - ret = dict_get_uint64 (dict, key, &w_count); + snprintf(key, sizeof(key), "%s.refcount", prefix); + ret = dict_get_int32(dict, key, &ref_count); + if (ret) + return; - if (ret == 0) { - } + snprintf(key, sizeof(key), "%s.translator", prefix); + ret = dict_get_str(dict, key, &translator); + if (ret) + return; - if (interval == -1) - cli_out ("Cumulative Stats:"); - else - cli_out ("Interval %d Stats:", interval); - snprintf (output, sizeof (output), "%14s", "Block Size:"); - snprintf (read_blocks, sizeof (read_blocks), "%14s", "No. of Reads:"); - snprintf (write_blocks, sizeof (write_blocks), "%14s", "No. of Writes:"); - index = 14; - for (i = 0; i < 32; i++) { - if ((rb_counts[i] == 0) && (wb_counts[i] == 0)) - continue; - per_line++; - snprintf (output+index, sizeof (output)-index, "%19db+ ", (1<<i)); - if (rb_counts[i]) { - snprintf (read_blocks+index, sizeof (read_blocks)-index, - "%21"PRId64" ", rb_counts[i]); - } else { - snprintf (read_blocks+index, sizeof (read_blocks)-index, - "%21s ", "0"); - } - if (wb_counts[i]) { - snprintf (write_blocks+index, sizeof (write_blocks)-index, - "%21"PRId64" ", wb_counts[i]); - } else { - snprintf (write_blocks+index, sizeof (write_blocks)-index, - "%21s ", "0"); - } - index += 22; - if (per_line == 3) { - cli_out ("%s", output); - cli_out ("%s", read_blocks); - cli_out ("%s", write_blocks); - cli_out (" "); - per_line = 0; - memset (output, 0, sizeof (output)); - memset (read_blocks, 0, sizeof (read_blocks)); - memset (write_blocks, 0, sizeof (write_blocks)); - snprintf (output, sizeof (output), "%14s", "Block Size:"); - snprintf (read_blocks, sizeof (read_blocks), "%14s", - "No. of Reads:"); - snprintf (write_blocks, sizeof (write_blocks), "%14s", - "No. of Writes:"); - index = 14; - } - } + snprintf(key, sizeof(key), "%s.complete", prefix); + ret = dict_get_int32(dict, key, &complete); + if (ret) + return; - if (per_line != 0) { - cli_out ("%s", output); - cli_out ("%s", read_blocks); - cli_out ("%s", write_blocks); - } - for (i = 0; i < GF_FOP_MAXVALUE; i++) { - if (profile_info[i].fop_hits == 0) - continue; - if (is_header_printed == 0) { - cli_out ("%10s %13s %13s %13s %14s %11s", "%-latency", - "Avg-latency", "Min-Latency", "Max-Latency", - "No. of calls", "Fop"); - cli_out ("%10s %13s %13s %13s %14s %11s", "---------", - "-----------", "-----------", "-----------", - "------------", "----"); - is_header_printed = 1; - } - if (profile_info[i].fop_hits) { - cli_out ("%10.2lf %10.2lf us %10.2lf us %10.2lf us" - " %14"PRId64" %11s", - profile_info[i].percentage_avg_latency, - profile_info[i].avg_latency, - profile_info[i].min_latency, - profile_info[i].max_latency, - profile_info[i].fop_hits, - profile_info[i].fop_name); - } - } - cli_out (" "); - cli_out ("%12s: %"PRId64" seconds", "Duration", sec); - cli_out ("%12s: %"PRId64" bytes", "Data Read", r_count); - cli_out ("%12s: %"PRId64" bytes", "Data Written", w_count); - cli_out (" "); + cli_out(" Ref Count = %d", ref_count); + cli_out(" Translator = %s", translator); + cli_out(" Completed = %s", (complete ? "Yes" : "No")); + + snprintf(key, sizeof(key), "%s.parent", prefix); + ret = dict_get_str(dict, key, &parent); + if (!ret) + cli_out(" Parent = %s", parent); + + snprintf(key, sizeof(key), "%s.windfrom", prefix); + ret = dict_get_str(dict, key, &wind_from); + if (!ret) + cli_out(" Wind From = %s", wind_from); + + snprintf(key, sizeof(key), "%s.windto", prefix); + ret = dict_get_str(dict, key, &wind_to); + if (!ret) + cli_out(" Wind To = %s", wind_to); + + snprintf(key, sizeof(key), "%s.unwindfrom", prefix); + ret = dict_get_str(dict, key, &unwind_from); + if (!ret) + cli_out(" Unwind From = %s", unwind_from); + + snprintf(key, sizeof(key), "%s.unwindto", prefix); + ret = dict_get_str(dict, key, &unwind_to); + if (!ret) + cli_out(" Unwind To = %s", unwind_to); } -int32_t -gf_cli3_1_profile_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf_cli_rsp rsp = {0,}; - int ret = -1; - dict_t *dict = NULL; - gf1_cli_stats_op op = GF_CLI_STATS_NONE; - char key[256] = {0}; - int interval = 0; - int i = 1; - int32_t brick_count = 0; - char *volname = NULL; - char *brick = NULL; - char str[1024] = {0,}; - - if (-1 == req->rpc_status) { - goto out; - } +static void +cli_print_volume_status_call_stack(dict_t *dict, char *prefix) +{ + int ret = -1; + char key[256] = { + 0, + }; + int uid = 0; + int gid = 0; + int pid = 0; + uint64_t unique = 0; + // char *op = NULL; + int count = 0; + int i = 0; + + if (!prefix) + return; - gf_log ("cli", GF_LOG_DEBUG, "Received resp to profile"); - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + snprintf(key, sizeof(key), "%s.uid", prefix); + ret = dict_get_int32(dict, key, &uid); + if (ret) + return; - dict = dict_new (); + snprintf(key, sizeof(key), "%s.gid", prefix); + ret = dict_get_int32(dict, key, &gid); + if (ret) + return; - if (!dict) { - ret = -1; - goto out; - } + snprintf(key, sizeof(key), "%s.pid", prefix); + ret = dict_get_int32(dict, key, &pid); + if (ret) + return; - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &dict); + snprintf(key, sizeof(key), "%s.unique", prefix); + ret = dict_get_uint64(dict, key, &unique); + if (ret) + return; - if (ret) { - gf_log ("", GF_LOG_ERROR, - "Unable to allocate memory"); - goto out; - } else { - dict->extra_stdfree = rsp.dict.dict_val; - } + /* + snprintf (key, sizeof (key), "%s.op", prefix); + ret = dict_get_str (dict, key, &op); + if (ret) + return; + */ -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_vol_profile (dict, rsp.op_ret, - rsp.op_errno, - rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif + snprintf(key, sizeof(key), "%s.count", prefix); + ret = dict_get_int32(dict, key, &count); + if (ret) + return; - ret = dict_get_str (dict, "volname", &volname); - if (ret) - goto out; + cli_out(" UID : %d", uid); + cli_out(" GID : %d", gid); + cli_out(" PID : %d", pid); + cli_out(" Unique : %" PRIu64, unique); + // cli_out ("\tOp : %s", op); + cli_out(" Frames : %d", count); - ret = dict_get_int32 (dict, "op", (int32_t*)&op); - if (ret) - goto out; + for (i = 0; i < count; i++) { + cli_out(" Frame %d", i + 1); - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) { - cli_err ("%s", rsp.op_errstr); - } else { - switch (op) { - case GF_CLI_STATS_START: - cli_out ("Starting volume profile on %s has been %s ", - volname, - (rsp.op_ret) ? "unsuccessful": "successful"); - break; - case GF_CLI_STATS_STOP: - cli_out ("Stopping volume profile on %s has been %s ", - volname, - (rsp.op_ret) ? "unsuccessful": "successful"); - break; - case GF_CLI_STATS_INFO: - break; - default: - cli_out ("volume profile on %s has been %s ", - volname, - (rsp.op_ret) ? "unsuccessful": "successful"); - break; - } - } + snprintf(key, sizeof(key), "%s.frame%d", prefix, i); + cli_print_volume_status_call_frame(dict, key); + } - if (rsp.op_ret) { - ret = rsp.op_ret; - goto out; - } + cli_out(" "); +} - if (op != GF_CLI_STATS_INFO) { - ret = 0; - goto out; +static void +cli_print_volume_status_callpool(dict_t *dict, gf_boolean_t notbrick) +{ + int ret = -1; + char *volname = NULL; + int brick_index_max = -1; + int other_count = 0; + int index_max = 0; + char *hostname = NULL; + char *path = NULL; + int online = -1; + int call_count = 0; + char key[64] = { + 0, + }; + int i = 0; + int j = 0; + + GF_ASSERT(dict); + + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + goto out; + cli_out("Pending calls for volume %s", volname); + + ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max); + if (ret) + goto out; + ret = dict_get_int32_sizen(dict, "other-count", &other_count); + if (ret) + goto out; + + index_max = brick_index_max + other_count; + + for (i = 0; i <= index_max; i++) { + cli_out("----------------------------------------------"); + + snprintf(key, sizeof(key), "brick%d.hostname", i); + ret = dict_get_str(dict, key, &hostname); + if (ret) + goto out; + snprintf(key, sizeof(key), "brick%d.path", i); + ret = dict_get_str(dict, key, &path); + if (ret) + goto out; + + if (notbrick) + cli_out("%s : %s", hostname, path); + else + cli_out("Brick : %s:%s", hostname, path); + + snprintf(key, sizeof(key), "brick%d.status", i); + ret = dict_get_int32(dict, key, &online); + if (ret) + goto out; + if (!online) { + if (notbrick) + cli_out("%s is offline", hostname); + else + cli_out("Brick is offline"); + continue; } - ret = dict_get_int32 (dict, "count", &brick_count); + snprintf(key, sizeof(key), "brick%d.callpool.count", i); + ret = dict_get_int32(dict, key, &call_count); if (ret) - goto out; + goto out; + cli_out("Pending calls: %d", call_count); - if (!brick_count) { - cli_out ("All bricks of volume %s are down.", volname); - goto out; - } + if (0 == call_count) + continue; - while (i <= brick_count) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%d-brick", i); - ret = dict_get_str (dict, key, &brick); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, "Couldn't get brick name"); - goto out; - } + for (j = 0; j < call_count; j++) { + cli_out("Call Stack%d", j + 1); - ret = dict_get_str_boolean (dict, "nfs", _gf_false); - if (ret) - snprintf (str, sizeof (str), "NFS Server : %s", brick); - else - snprintf (str, sizeof (str), "Brick: %s", brick); - cli_out ("%s", str); - memset (str, '-', strlen (str)); - cli_out ("%s", str); - - snprintf (key, sizeof (key), "%d-cumulative", i); - ret = dict_get_int32 (dict, key, &interval); - if (ret == 0) { - cmd_profile_volume_brick_out (dict, i, interval); - } - snprintf (key, sizeof (key), "%d-interval", i); - ret = dict_get_int32 (dict, key, &interval); - if (ret == 0) { - cmd_profile_volume_brick_out (dict, i, interval); - } - i++; + snprintf(key, sizeof(key), "brick%d.callpool.stack%d", i, j); + cli_print_volume_status_call_stack(dict, key); } - ret = rsp.op_ret; + } out: - if (dict) - dict_unref (dict); - if (rsp.op_errstr) - free (rsp.op_errstr); - cli_cmd_broadcast_response (ret); - return ret; + cli_out("----------------------------------------------"); + return; } -int32_t -gf_cli3_1_profile_volume (call_frame_t *frame, xlator_t *this, void *data) +static void +cli_print_volume_status_tasks(dict_t *dict) { - int ret = -1; - gf_cli_req req = {{0,}}; - dict_t *dict = NULL; + int ret = -1; + int i = 0; + int j = 0; + int count = 0; + int task_count = 0; + int status = 0; + char *op = NULL; + char *task_id_str = NULL; + char *volname = NULL; + char key[64] = { + 0, + }; + char task[32] = { + 0, + }; + char *brick = NULL; + + ret = dict_get_int32_sizen(dict, "tasks", &task_count); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get tasks count"); + return; + } - GF_ASSERT (frame); - GF_ASSERT (this); - GF_ASSERT (data); + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + goto out; - if (!frame || !this || !data) - goto out; - dict = data; + cli_out("Task Status of Volume %s", volname); + cli_print_line(CLI_BRICK_STATUS_LINE_LEN); - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); + if (task_count == 0) { + cli_out("There are no active volume tasks"); + cli_out(" "); + return; + } - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to serialize the data"); + for (i = 0; i < task_count; i++) { + snprintf(key, sizeof(key), "task%d.type", i); + ret = dict_get_str(dict, key, &op); + if (ret) + return; + cli_out("%-20s : %-20s", "Task", op); + + snprintf(key, sizeof(key), "task%d.id", i); + ret = dict_get_str(dict, key, &task_id_str); + if (ret) + return; + cli_out("%-20s : %-20s", "ID", task_id_str); + + snprintf(key, sizeof(key), "task%d.status", i); + ret = dict_get_int32(dict, key, &status); + if (ret) + return; + + snprintf(task, sizeof(task), "task%d", i); + if (!strcmp(op, "Remove brick")) { + snprintf(key, sizeof(key), "%s.count", task); + ret = dict_get_int32(dict, key, &count); + if (ret) goto out; - } + cli_out("%-20s", "Removed bricks:"); - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_PROFILE_VOLUME, NULL, - this, gf_cli3_1_profile_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); + for (j = 1; j <= count; j++) { + snprintf(key, sizeof(key), "%s.brick%d", task, j); + ret = dict_get_str(dict, key, &brick); + if (ret) + goto out; -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + cli_out("%-20s", brick); + } + } + cli_out("%-20s : %-20s", "Status", cli_vol_task_status_str[status]); + cli_out(" "); + } - if (req.dict.dict_val) - GF_FREE (req.dict.dict_val); - return ret; +out: + return; } -int32_t -gf_cli3_1_top_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf_cli_rsp rsp = {0,}; - int ret = -1; - dict_t *dict = NULL; - gf1_cli_stats_op op = GF_CLI_STATS_NONE; - char key[256] = {0}; - int i = 0; - int32_t brick_count = 0; - char brick[1024]; - int32_t members = 0; - char *filename; - char *bricks; - uint64_t value = 0; - int32_t j = 0; - gf1_cli_top_op top_op = GF_CLI_TOP_NONE; - uint64_t nr_open = 0; - uint64_t max_nr_open = 0; - double throughput = 0; - double time = 0; - long int time_sec = 0; - long int time_usec = 0; - struct tm *tm = NULL; - char timestr[256] = {0, }; - char *openfd_str = NULL; - - if (-1 == req->rpc_status) { - goto out; +static int +gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + int ret = -1; + int brick_index_max = -1; + int other_count = 0; + int index_max = 0; + int i = 0; + int type = -1; + int hot_brick_count = -1; + int pid = -1; + uint32_t cmd = 0; + gf_boolean_t notbrick = _gf_false; + char key[64] = { + 0, + }; + char *hostname = NULL; + char *path = NULL; + char *volname = NULL; + dict_t *dict = NULL; + gf_cli_rsp rsp = { + 0, + }; + cli_volume_status_t status = {0}; + cli_local_t *local = NULL; + gf_boolean_t wipe_local = _gf_false; + char msg[1024] = { + 0, + }; + + GF_ASSERT(myframe); + + if (req->rpc_status == -1) + goto out; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + gf_log("cli", GF_LOG_DEBUG, "Received response to status cmd"); + + local = ((call_frame_t *)myframe)->local; + if (!local) { + local = cli_local_get(); + if (!local) { + ret = -1; + gf_log("cli", GF_LOG_ERROR, "Failed to get local"); + goto out; } + wipe_local = _gf_true; + } - gf_log ("cli", GF_LOG_DEBUG, "Received resp to top"); - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "Unable to decode response"); - goto out; - } + if (rsp.op_ret) { + if (strcmp(rsp.op_errstr, "")) + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); + else + snprintf(msg, sizeof(msg), + "Unable to obtain volume status information."); - if (rsp.op_ret) { - if (strcmp (rsp.op_errstr, "")) - cli_err ("%s", rsp.op_errstr); - cli_err ("volume top unsuccessful"); - ret = rsp.op_ret; - goto out; - } + if (global_state->mode & GLUSTER_MODE_XML) { + if (!local->all) + cli_xml_output_str("volStatus", msg, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + ret = 0; + goto out; + } + + cli_err("%s", msg); + if (local && local->all) { + ret = 0; + cli_out(" "); + } else + ret = -1; + + goto out; + } + + dict = dict_new(); + if (!dict) { + gf_log(THIS->name, GF_LOG_ERROR, "Failed to create the dict"); + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + if (ret) + goto out; + + ret = dict_get_uint32(dict, "cmd", &cmd); + if (ret) + goto out; + + if ((cmd & GF_CLI_STATUS_ALL)) { + if (local && local->dict) { + dict_ref(dict); + ret = dict_set_static_ptr(local->dict, "rsp-dict", dict); + ret = 0; + } else { + gf_log("cli", GF_LOG_ERROR, "local not found"); + ret = -1; + } + goto out; + } + + if (global_state->mode & GLUSTER_MODE_XML) { + if (!local->all) { + ret = cli_xml_output_vol_status_begin(local, rsp.op_ret, + rsp.op_errno, rsp.op_errstr); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto xml_end; + } + } + if (cmd & GF_CLI_STATUS_TASKS) { + ret = cli_xml_output_vol_status_tasks_detail(local, dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Error outputting to xml"); + goto xml_end; + } + } else { + ret = cli_xml_output_vol_status(local, dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto xml_end; + } + } + + xml_end: + if (!local->all) { + ret = cli_xml_output_vol_status_end(local); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + } + } + goto out; + } + + if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD) || + (cmd & GF_CLI_STATUS_QUOTAD) || (cmd & GF_CLI_STATUS_SNAPD) || + (cmd & GF_CLI_STATUS_BITD) || (cmd & GF_CLI_STATUS_SCRUB)) + notbrick = _gf_true; + + switch (cmd & GF_CLI_STATUS_MASK) { + case GF_CLI_STATUS_MEM: + cli_print_volume_status_mem(dict, notbrick); + goto cont; + break; + case GF_CLI_STATUS_CLIENTS: + cli_print_volume_status_clients(dict, notbrick); + goto cont; + break; + case GF_CLI_STATUS_CLIENT_LIST: + cli_print_volume_status_client_list(dict, notbrick); + goto cont; + break; + case GF_CLI_STATUS_INODE: + cli_print_volume_status_inode(dict, notbrick); + goto cont; + break; + case GF_CLI_STATUS_FD: + cli_print_volume_status_fd(dict, notbrick); + goto cont; + break; + case GF_CLI_STATUS_CALLPOOL: + cli_print_volume_status_callpool(dict, notbrick); + goto cont; + break; + case GF_CLI_STATUS_TASKS: + cli_print_volume_status_tasks(dict); + goto cont; + break; + default: + break; + } - dict = dict_new (); + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + goto out; - if (!dict) { - ret = -1; - goto out; - } + ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max); + if (ret) + goto out; - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &dict); + ret = dict_get_int32_sizen(dict, "other-count", &other_count); + if (ret) + goto out; - if (ret) { - gf_log ("", GF_LOG_ERROR, - "Unable to allocate memory"); - goto out; - } + index_max = brick_index_max + other_count; - ret = dict_get_int32 (dict, "op", (int32_t*)&op); + ret = dict_get_int32_sizen(dict, "type", &type); + if (ret) + goto out; - if (op != GF_CLI_STATS_TOP) { - ret = 0; - goto out; - } + ret = dict_get_int32_sizen(dict, "hot_brick_count", &hot_brick_count); + if (ret) + goto out; -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_vol_top (dict, rsp.op_ret, - rsp.op_errno, - rsp.op_errstr); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - } - goto out; + cli_out("Status of volume: %s", volname); + + if ((cmd & GF_CLI_STATUS_DETAIL) == 0) { + cli_out("%-*s %s %s %s %s", CLI_VOL_STATUS_BRICK_LEN, + "Gluster process", "TCP Port", "RDMA Port", "Online", "Pid"); + cli_print_line(CLI_BRICK_STATUS_LINE_LEN); + } + + status.brick = GF_MALLOC(PATH_MAX + 256, gf_common_mt_strdup); + if (!status.brick) { + errno = ENOMEM; + ret = -1; + goto out; + } + + for (i = 0; i <= index_max; i++) { + status.rdma_port = 0; + + snprintf(key, sizeof(key), "brick%d.hostname", i); + ret = dict_get_str(dict, key, &hostname); + if (ret) + continue; + + snprintf(key, sizeof(key), "brick%d.path", i); + ret = dict_get_str(dict, key, &path); + if (ret) + continue; + + /* Brick/not-brick is handled separately here as all + * types of nodes are contained in the default output + */ + status.brick[0] = '\0'; + if (!strcmp(hostname, "NFS Server") || + !strcmp(hostname, "Self-heal Daemon") || + !strcmp(hostname, "Quota Daemon") || + !strcmp(hostname, "Snapshot Daemon") || + !strcmp(hostname, "Scrubber Daemon") || + !strcmp(hostname, "Bitrot Daemon")) + snprintf(status.brick, PATH_MAX + 255, "%s on %s", hostname, path); + else { + snprintf(key, sizeof(key), "brick%d.rdma_port", i); + ret = dict_get_int32(dict, key, &(status.rdma_port)); + if (ret) + continue; + snprintf(status.brick, PATH_MAX + 255, "Brick %s:%s", hostname, + path); } -#endif - ret = dict_get_int32 (dict, "count", &brick_count); + snprintf(key, sizeof(key), "brick%d.port", i); + ret = dict_get_int32(dict, key, &(status.port)); if (ret) - goto out; - snprintf (key, sizeof (key), "%d-top-op", 1); - ret = dict_get_int32 (dict, key, (int32_t*)&top_op); + continue; + + snprintf(key, sizeof(key), "brick%d.status", i); + ret = dict_get_int32(dict, key, &(status.online)); if (ret) - goto out; - while (i < brick_count) { - i++; - snprintf (brick, sizeof (brick), "%d-brick", i); - ret = dict_get_str (dict, brick, &bricks); - if (ret) - goto out; - ret = dict_get_str_boolean (dict, "nfs", _gf_false); - if (ret) - cli_out ("NFS Server : %s", bricks); - else - cli_out ("Brick: %s", bricks); - - snprintf(key, sizeof (key), "%d-members", i); - ret = dict_get_int32 (dict, key, &members); - - switch (top_op) { - case GF_CLI_TOP_OPEN: - snprintf (key, sizeof (key), "%d-current-open", i); - ret = dict_get_uint64 (dict, key, &nr_open); - if (ret) - break; - snprintf (key, sizeof (key), "%d-max-open", i); - ret = dict_get_uint64 (dict, key, &max_nr_open); - if (ret) - goto out; - snprintf (key, sizeof (key), "%d-max-openfd-time", i); - ret = dict_get_str (dict, key, &openfd_str); - if (ret) - goto out; - cli_out ("Current open fds: %"PRIu64", Max open" - " fds: %"PRIu64", Max openfd time: %s", nr_open, - max_nr_open, openfd_str); - case GF_CLI_TOP_READ: - case GF_CLI_TOP_WRITE: - case GF_CLI_TOP_OPENDIR: - case GF_CLI_TOP_READDIR: - if (!members) { - continue; - } - cli_out ("Count\t\tfilename\n======================="); - break; - case GF_CLI_TOP_READ_PERF: - case GF_CLI_TOP_WRITE_PERF: - snprintf (key, sizeof (key), "%d-throughput", i); - ret = dict_get_double (dict, key, &throughput); - if (!ret) { - snprintf (key, sizeof (key), "%d-time", i); - ret = dict_get_double (dict, key, &time); - } - if (!ret) - cli_out ("Throughput %.2f MBps time %.4f secs", throughput, - time / 1e6); - - if (!members) { - continue; - } - cli_out ("%*s %-*s %-*s", - VOL_TOP_PERF_SPEED_WIDTH, "MBps", - VOL_TOP_PERF_FILENAME_DEF_WIDTH, "Filename", - VOL_TOP_PERF_TIME_WIDTH, "Time"); - cli_out ("%*s %-*s %-*s", - VOL_TOP_PERF_SPEED_WIDTH, "====", - VOL_TOP_PERF_FILENAME_DEF_WIDTH, "========", - VOL_TOP_PERF_TIME_WIDTH, "===="); - break; - default: - goto out; - } + continue; - for (j = 1; j <= members; j++) { - snprintf (key, sizeof (key), "%d-filename-%d", i, j); - ret = dict_get_str (dict, key, &filename); - if (ret) - break; - snprintf (key, sizeof (key), "%d-value-%d", i, j); - ret = dict_get_uint64 (dict, key, &value); - if (ret) - goto out; - if ( top_op == GF_CLI_TOP_READ_PERF || - top_op == GF_CLI_TOP_WRITE_PERF) { - snprintf (key, sizeof (key), "%d-time-sec-%d", i, j); - ret = dict_get_int32 (dict, key, (int32_t *)&time_sec); - if (ret) - goto out; - snprintf (key, sizeof (key), "%d-time-usec-%d", i, j); - ret = dict_get_int32 (dict, key, (int32_t *)&time_usec); - if (ret) - goto out; - tm = localtime (&time_sec); - if (!tm) - goto out; - strftime (timestr, 256, "%Y-%m-%d %H:%M:%S", tm); - snprintf (timestr + strlen (timestr), 256 - strlen (timestr), - ".%"GF_PRI_SUSECONDS, time_usec); - if (strlen (filename) < VOL_TOP_PERF_FILENAME_DEF_WIDTH) - cli_out ("%*"PRIu64" %-*s %-*s", - VOL_TOP_PERF_SPEED_WIDTH, - value, - VOL_TOP_PERF_FILENAME_DEF_WIDTH, - filename, - VOL_TOP_PERF_TIME_WIDTH, - timestr); - else - cli_out ("%*"PRIu64" ...%-*s %-*s", - VOL_TOP_PERF_SPEED_WIDTH, - value, - VOL_TOP_PERF_FILENAME_ALT_WIDTH , - filename + strlen (filename) - - VOL_TOP_PERF_FILENAME_ALT_WIDTH, - VOL_TOP_PERF_TIME_WIDTH, - timestr); - } else { - cli_out ("%"PRIu64"\t\t%s", value, filename); - } - } + snprintf(key, sizeof(key), "brick%d.pid", i); + ret = dict_get_int32(dict, key, &pid); + if (ret) + continue; + if (pid == -1) + ret = gf_asprintf(&(status.pid_str), "%s", "N/A"); + else + ret = gf_asprintf(&(status.pid_str), "%d", pid); + + if (ret == -1) + goto out; + + if ((cmd & GF_CLI_STATUS_DETAIL)) { + ret = cli_get_detail_status(dict, i, &status); + if (ret) + goto out; + cli_print_line(CLI_BRICK_STATUS_LINE_LEN); + cli_print_detailed_status(&status); + } else { + cli_print_brick_status(&status); } - ret = rsp.op_ret; -out: - cli_cmd_broadcast_response (ret); + /* Allocatated memory using gf_asprintf*/ + GF_FREE(status.pid_str); + } + cli_out(" "); - if (dict) - dict_unref (dict); + if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) + cli_print_volume_status_tasks(dict); +cont: + ret = rsp.op_ret; - if (rsp.dict.dict_val) - free (rsp.dict.dict_val); - return ret; +out: + if (dict) + dict_unref(dict); + GF_FREE(status.brick); + if (local && wipe_local) { + cli_local_wipe(local); + } + + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; } -int32_t -gf_cli3_1_top_volume (call_frame_t *frame, xlator_t *this, void *data) +static int32_t +gf_cli_status_volume(call_frame_t *frame, xlator_t *this, void *data) { - int ret = -1; - gf_cli_req req = {{0,}}; - dict_t *dict = NULL; + gf_cli_req req = {{ + 0, + }}; + int ret = -1; + dict_t *dict = NULL; + + dict = data; + + ret = cli_to_glusterd(&req, frame, gf_cli_status_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_STATUS_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + GF_FREE(req.dict.dict_val); + return ret; +} - GF_ASSERT (frame); - GF_ASSERT (this); - GF_ASSERT (data); +static int +gf_cli_status_volume_all(call_frame_t *frame, xlator_t *this, void *data) +{ + int i = 0; + int ret = -1; + int vol_count = -1; + uint32_t cmd = 0; + char key[1024] = {0}; + char *volname = NULL; + void *vol_dict = NULL; + dict_t *dict = NULL; + cli_local_t *local = NULL; - if (!frame || !this || !data) - goto out; - dict = data; + if (!frame) + goto out; - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to serialize the data"); + if (!frame->local) + goto out; - goto out; - } + local = frame->local; + ret = dict_get_uint32(local->dict, "cmd", &cmd); + if (ret) + goto out; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_PROFILE_VOLUME, NULL, - this, gf_cli3_1_top_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); + local->all = _gf_true; -out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - if (req.dict.dict_val) - GF_FREE (req.dict.dict_val); - return ret; -} + ret = gf_cli_status_volume(frame, this, data); + if (ret) + goto out; -int -gf_cli3_1_getwd_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_getwd_rsp rsp = {0,}; - int ret = -1; + ret = dict_get_ptr(local->dict, "rsp-dict", &vol_dict); + if (ret) + goto out; - if (-1 == req->rpc_status) { - goto out; - } + ret = dict_get_int32_sizen((dict_t *)vol_dict, "vol_count", &vol_count); + if (ret) { + cli_err("Failed to get names of volumes"); + goto out; + } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_getwd_rsp); - if (ret < 0 || rsp.op_ret == -1) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; + /* remove the "all" flag in cmd */ + cmd &= ~GF_CLI_STATUS_ALL; + cmd |= GF_CLI_STATUS_VOL; + + if (global_state->mode & GLUSTER_MODE_XML) { + // TODO: Pass proper op_* values + ret = cli_xml_output_vol_status_begin(local, 0, 0, NULL); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto xml_end; } + } - gf_log ("cli", GF_LOG_INFO, "Received resp to getwd"); + if (vol_count == 0 && !(global_state->mode & GLUSTER_MODE_XML)) { + cli_err("No volumes present"); + ret = 0; + goto out; + } - cli_out ("%s", rsp.wd); + for (i = 0; i < vol_count; i++) { + dict = dict_new(); + if (!dict) + goto out; - ret = 0; + ret = snprintf(key, sizeof(key), "vol%d", i); + ret = dict_get_strn(vol_dict, key, ret, &volname); + if (ret) + goto out; -out: - cli_cmd_broadcast_response (ret); - return ret; -} + ret = dict_set_str_sizen(dict, "volname", volname); + if (ret) + goto out; -int32_t -gf_cli3_1_getwd (call_frame_t *frame, xlator_t *this, void *data) -{ - int ret = -1; - gf1_cli_getwd_req req = {0,}; + ret = dict_set_uint32(dict, "cmd", cmd); + if (ret) + goto out; - GF_ASSERT (frame); - GF_ASSERT (this); + ret = gf_cli_status_volume(frame, this, dict); + if (ret) + goto out; - if (!frame || !this) - goto out; + dict_unref(dict); + } - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_GETWD, NULL, - this, gf_cli3_1_getwd_cbk, - (xdrproc_t) xdr_gf1_cli_getwd_req); +xml_end: + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_status_end(local); + } out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + if (ret) + gf_log("cli", GF_LOG_ERROR, "status all failed"); - return ret; -} + if (vol_dict) + dict_unref(vol_dict); -void -cli_print_volume_status_mempool (dict_t *dict, char *prefix) -{ - int ret = -1; - int32_t mempool_count = 0; - char *name = NULL; - int32_t hotcount = 0; - int32_t coldcount = 0; - uint64_t paddedsizeof = 0; - uint64_t alloccount = 0; - int32_t maxalloc = 0; - uint64_t pool_misses = 0; - int32_t maxstdalloc = 0; - char key[1024] = {0,}; - int i = 0; - - GF_ASSERT (dict); - GF_ASSERT (prefix); - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.mempool-count",prefix); - ret = dict_get_int32 (dict, key, &mempool_count); - if (ret) - goto out; + if (ret && dict) + dict_unref(dict); - cli_out ("Mempool Stats\n-------------"); - cli_out ("%-30s %9s %9s %12s %10s %8s %8s %12s", "Name", "HotCount", - "ColdCount", "PaddedSizeof", "AllocCount", "MaxAlloc", - "Misses", "Max-StdAlloc"); - cli_out ("%-30s %9s %9s %12s %10s %8s %8s %12s", "----", "--------", - "---------", "------------", "----------", - "--------", "--------", "------------"); - - for (i = 0; i < mempool_count; i++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.pool%d.name", prefix, i); - ret = dict_get_str (dict, key, &name); - if (ret) - goto out; + if (local) + cli_local_wipe(local); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.pool%d.hotcount", prefix, i); - ret = dict_get_int32 (dict, key, &hotcount); - if (ret) - goto out; + if (frame) + frame->local = NULL; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.pool%d.coldcount", prefix, i); - ret = dict_get_int32 (dict, key, &coldcount); - if (ret) - goto out; + return ret; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.pool%d.paddedsizeof", - prefix, i); - ret = dict_get_uint64 (dict, key, &paddedsizeof); - if (ret) - goto out; +static int +gf_cli_mount_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf1_cli_mount_rsp rsp = { + 0, + }; + int ret = -1; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.pool%d.alloccount", prefix, i); - ret = dict_get_uint64 (dict, key, &alloccount); - if (ret) - goto out; + GF_ASSERT(myframe); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.pool%d.max_alloc", prefix, i); - ret = dict_get_int32 (dict, key, &maxalloc); - if (ret) - goto out; + if (-1 == req->rpc_status) { + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.pool%d.max-stdalloc", prefix, i); - ret = dict_get_int32 (dict, key, &maxstdalloc); - if (ret) - goto out; + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_mount_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.pool%d.pool-misses", prefix, i); - ret = dict_get_uint64 (dict, key, &pool_misses); - if (ret) - goto out; + gf_log("cli", GF_LOG_INFO, "Received resp to mount"); - cli_out ("%-30s %9d %9d %12"PRIu64" %10"PRIu64" %8d %8"PRIu64 - " %12d", name, hotcount, coldcount, paddedsizeof, - alloccount, maxalloc, pool_misses, maxstdalloc); - } + if (rsp.op_ret == 0) { + ret = 0; + cli_out("%s", rsp.path); + } else { + /* weird sounding but easy to parse... */ + cli_err("%d : failed with this errno (%s)", rsp.op_errno, + strerror(rsp.op_errno)); + ret = -1; + } out: - return; - + cli_cmd_broadcast_response(ret); + if (rsp.path) { + free(rsp.path); + } + return ret; } -void -cli_print_volume_status_mem (dict_t *dict, gf_boolean_t notbrick) +static int32_t +gf_cli_mount(call_frame_t *frame, xlator_t *this, void *data) { - int ret = -1; - char *volname = NULL; - char *hostname = NULL; - char *path = NULL; - int online = -1; - char key[1024] = {0,}; - int brick_index_max = -1; - int other_count = 0; - int index_max = 0; - int val = 0; - int i = 0; + gf1_cli_mount_req req = { + 0, + }; + int ret = -1; + void **dataa = data; + char *label = NULL; + dict_t *dict = NULL; + + if (!frame || !this || !data) + goto out; + + label = dataa[0]; + dict = dataa[1]; + + req.label = label; + ret = dict_allocate_and_serialize(dict, &req.dict.dict_val, + &req.dict.dict_len); + if (ret) { + ret = -1; + goto out; + } + + ret = cli_cmd_submit(NULL, &req, frame, cli_rpc_prog, GLUSTER_CLI_MOUNT, + NULL, this, gf_cli_mount_cbk, + (xdrproc_t)xdr_gf1_cli_mount_req); - GF_ASSERT (dict); +out: + GF_FREE(req.dict.dict_val); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + return ret; +} - ret = dict_get_str (dict, "volname", &volname); - if (ret) - goto out; - cli_out ("Memory status for volume : %s", volname); +static int +gf_cli_umount_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf1_cli_umount_rsp rsp = { + 0, + }; + int ret = -1; - ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max); - if (ret) - goto out; - ret = dict_get_int32 (dict, "other-count", &other_count); - if (ret) - goto out; + GF_ASSERT(myframe); - index_max = brick_index_max + other_count; + if (-1 == req->rpc_status) { + goto out; + } - for (i = 0; i <= index_max; i++) { - cli_out ("----------------------------------------------"); + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_umount_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.hostname", i); - ret = dict_get_str (dict, key, &hostname); - if (ret) - continue; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.path", i); - ret = dict_get_str (dict, key, &path); - if (ret) - continue; - if (notbrick) - cli_out ("%s : %s", hostname, path); - else - cli_out ("Brick : %s:%s", hostname, path); + gf_log("cli", GF_LOG_INFO, "Received resp to mount"); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.status", i); - ret = dict_get_int32 (dict, key, &online); - if (ret) - goto out; - if (!online) { - if (notbrick) - cli_out ("%s is offline", hostname); - else - cli_out ("Brick is offline"); - continue; - } + if (rsp.op_ret == 0) + ret = 0; + else { + cli_err("umount failed"); + ret = -1; + } - cli_out ("Mallinfo\n--------"); +out: + cli_cmd_broadcast_response(ret); + return ret; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.mallinfo.arena", i); - ret = dict_get_int32 (dict, key, &val); - if (ret) - goto out; - cli_out ("%-8s : %d","Arena", val); - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.mallinfo.ordblks", i); - ret = dict_get_int32 (dict, key, &val); - if(ret) - goto out; - cli_out ("%-8s : %d","Ordblks", val); - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.mallinfo.smblks", i); - ret = dict_get_int32 (dict, key, &val); - if(ret) - goto out; - cli_out ("%-8s : %d","Smblks", val); - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.mallinfo.hblks", i); - ret = dict_get_int32 (dict, key, &val); - if(ret) - goto out; - cli_out ("%-8s : %d", "Hblks", val); - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.mallinfo.hblkhd", i); - ret = dict_get_int32 (dict, key, &val); - if (ret) - goto out; - cli_out ("%-8s : %d", "Hblkhd", val); +static int32_t +gf_cli_umount(call_frame_t *frame, xlator_t *this, void *data) +{ + gf1_cli_umount_req req = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.mallinfo.usmblks", i); - ret = dict_get_int32 (dict, key, &val); - if (ret) - goto out; - cli_out ("%-8s : %d", "Usmblks", val); + if (!frame || !this || !data) + goto out; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.mallinfo.fsmblks", i); - ret = dict_get_int32 (dict, key, &val); - if (ret) - goto out; - cli_out ("%-8s : %d", "Fsmblks", val); + dict = data; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.mallinfo.uordblks", i); - ret = dict_get_int32 (dict, key, &val); - if (ret) - goto out; - cli_out ("%-8s : %d", "Uordblks", val); + ret = dict_get_str_sizen(dict, "path", &req.path); + if (ret == 0) + ret = dict_get_int32_sizen(dict, "lazy", &req.lazy); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.mallinfo.fordblks", i); - ret = dict_get_int32 (dict, key, &val); - if (ret) - goto out; - cli_out ("%-8s : %d", "Fordblks", val); + if (ret) { + ret = -1; + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.mallinfo.keepcost", i); - ret = dict_get_int32 (dict, key, &val); - if (ret) - goto out; - cli_out ("%-8s : %d", "Keepcost", val); + ret = cli_cmd_submit(NULL, &req, frame, cli_rpc_prog, GLUSTER_CLI_UMOUNT, + NULL, this, gf_cli_umount_cbk, + (xdrproc_t)xdr_gf1_cli_umount_req); - cli_out (" "); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d", i); - cli_print_volume_status_mempool (dict, key); - } out: - cli_out ("----------------------------------------------\n"); - return; + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + return ret; } -void -cli_print_volume_status_clients (dict_t *dict, gf_boolean_t notbrick) -{ - int ret = -1; - char *volname = NULL; - int brick_index_max = -1; - int other_count = 0; - int index_max = 0; - char *hostname = NULL; - char *path = NULL; - int online = -1; - int client_count = 0; - char *clientname = NULL; - uint64_t bytesread = 0; - uint64_t byteswrite = 0; - char key[1024] = {0,}; - int i = 0; - int j = 0; - - GF_ASSERT (dict); - - ret = dict_get_str (dict, "volname", &volname); +static void +cmd_heal_volume_statistics_out(dict_t *dict, int brick) +{ + uint64_t num_entries = 0; + int ret = 0; + char key[256] = {0}; + char *hostname = NULL; + uint64_t i = 0; + uint64_t healed_count = 0; + uint64_t split_brain_count = 0; + uint64_t heal_failed_count = 0; + char *start_time_str = NULL; + char *end_time_str = NULL; + char *crawl_type = NULL; + int progress = -1; + + snprintf(key, sizeof key, "%d-hostname", brick); + ret = dict_get_str(dict, key, &hostname); + if (ret) + goto out; + cli_out("------------------------------------------------"); + cli_out("\nCrawl statistics for brick no %d", brick); + cli_out("Hostname of brick %s", hostname); + + snprintf(key, sizeof key, "statistics-%d-count", brick); + ret = dict_get_uint64(dict, key, &num_entries); + if (ret) + goto out; + + for (i = 0; i < num_entries; i++) { + snprintf(key, sizeof key, "statistics_crawl_type-%d-%" PRIu64, brick, + i); + ret = dict_get_str(dict, key, &crawl_type); if (ret) - goto out; - cli_out ("Client connections for volume %s", volname); + goto out; - ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max); + snprintf(key, sizeof key, "statistics_healed_cnt-%d-%" PRIu64, brick, + i); + ret = dict_get_uint64(dict, key, &healed_count); if (ret) - goto out; - ret = dict_get_int32 (dict, "other-count", &other_count); - if (ret) - goto out; + goto out; - index_max = brick_index_max + other_count; + snprintf(key, sizeof key, "statistics_sb_cnt-%d-%" PRIu64, brick, i); + ret = dict_get_uint64(dict, key, &split_brain_count); + if (ret) + goto out; + snprintf(key, sizeof key, "statistics_heal_failed_cnt-%d-%" PRIu64, + brick, i); + ret = dict_get_uint64(dict, key, &heal_failed_count); + if (ret) + goto out; + snprintf(key, sizeof key, "statistics_strt_time-%d-%" PRIu64, brick, i); + ret = dict_get_str(dict, key, &start_time_str); + if (ret) + goto out; + snprintf(key, sizeof key, "statistics_end_time-%d-%" PRIu64, brick, i); + ret = dict_get_str(dict, key, &end_time_str); + if (ret) + goto out; + snprintf(key, sizeof key, "statistics_inprogress-%d-%" PRIu64, brick, + i); + ret = dict_get_int32(dict, key, &progress); + if (ret) + goto out; - for (i = 0; i <= index_max; i++) { - cli_out ("----------------------------------------------"); + cli_out("\nStarting time of crawl: %s", start_time_str); + if (progress == 1) + cli_out("Crawl is in progress"); + else + cli_out("Ending time of crawl: %s", end_time_str); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.hostname", i); - ret = dict_get_str (dict, key, &hostname); - if (ret) - goto out; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.path", i); - ret = dict_get_str (dict, key, &path); - if (ret) - goto out; + cli_out("Type of crawl: %s", crawl_type); + cli_out("No. of entries healed: %" PRIu64, healed_count); + cli_out("No. of entries in split-brain: %" PRIu64, split_brain_count); + cli_out("No. of heal failed entries: %" PRIu64, heal_failed_count); + } - if (notbrick) - cli_out ("%s : %s", hostname, path); - else - cli_out ("Brick : %s:%s", hostname, path); +out: + return; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.status", i); - ret = dict_get_int32 (dict, key, &online); - if (ret) - goto out; - if (!online) { - if (notbrick) - cli_out ("%s is offline", hostname); - else - cli_out ("Brick is offline"); - continue; - } +static void +cmd_heal_volume_brick_out(dict_t *dict, int brick) +{ + uint64_t num_entries = 0; + int ret = 0; + char key[64] = {0}; + char *hostname = NULL; + char *path = NULL; + char *status = NULL; + uint64_t i = 0; + uint32_t time = 0; + char timestr[GF_TIMESTR_SIZE] = {0}; + char *shd_status = NULL; + + snprintf(key, sizeof key, "%d-hostname", brick); + ret = dict_get_str(dict, key, &hostname); + if (ret) + goto out; + snprintf(key, sizeof key, "%d-path", brick); + ret = dict_get_str(dict, key, &path); + if (ret) + goto out; + cli_out("\nBrick %s:%s", hostname, path); + + snprintf(key, sizeof key, "%d-status", brick); + ret = dict_get_str(dict, key, &status); + if (status && status[0] != '\0') + cli_out("Status: %s", status); + + snprintf(key, sizeof key, "%d-shd-status", brick); + ret = dict_get_str(dict, key, &shd_status); + + if (!shd_status) { + snprintf(key, sizeof key, "%d-count", brick); + ret = dict_get_uint64(dict, key, &num_entries); + cli_out("Number of entries: %" PRIu64, num_entries); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.clientcount", i); - ret = dict_get_int32 (dict, key, &client_count); - if (ret) - goto out; - - cli_out ("Clients connected : %d", client_count); - if (client_count == 0) - continue; - - cli_out ("%-48s %15s %15s", "Hostname", "BytesRead", - "BytesWritten"); - cli_out ("%-48s %15s %15s", "--------", "---------", - "------------"); - for (j =0; j < client_count; j++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), - "brick%d.client%d.hostname", i, j); - ret = dict_get_str (dict, key, &clientname); - if (ret) - goto out; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), - "brick%d.client%d.bytesread", i, j); - ret = dict_get_uint64 (dict, key, &bytesread); - if (ret) - goto out; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), - "brick%d.client%d.byteswrite", i, j); - ret = dict_get_uint64 (dict, key, &byteswrite); - if (ret) - goto out; - - cli_out ("%-48s %15"PRIu64" %15"PRIu64, - clientname, bytesread, byteswrite); + for (i = 0; i < num_entries; i++) { + snprintf(key, sizeof key, "%d-%" PRIu64, brick, i); + ret = dict_get_str(dict, key, &path); + if (ret) + continue; + time = 0; + snprintf(key, sizeof key, "%d-%" PRIu64 "-time", brick, i); + ret = dict_get_uint32(dict, key, &time); + if (ret || !time) { + cli_out("%s", path); + } else { + gf_time_fmt(timestr, sizeof timestr, time, gf_timefmt_FT); + if (i == 0) { + cli_out("at path on brick"); + cli_out("-----------------------------------"); } + cli_out("%s %s", timestr, path); + } } + } + out: - cli_out ("----------------------------------------------\n"); - return; + return; } -void -cli_print_volume_status_inode_entry (dict_t *dict, char *prefix) +static void +cmd_heal_volume_statistics_heal_count_out(dict_t *dict, int brick) { - int ret = -1; - char key[1024] = {0,}; - char *gfid = NULL; - uint64_t nlookup = 0; - uint32_t ref = 0; - int ia_type = 0; - char inode_type; - - GF_ASSERT (dict); - GF_ASSERT (prefix); - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.gfid", prefix); - ret = dict_get_str (dict, key, &gfid); - if (ret) - goto out; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.nlookup", prefix); - ret = dict_get_uint64 (dict, key, &nlookup); + uint64_t num_entries = 0; + int ret = 0; + char key[64] = {0}; + char *hostname = NULL; + char *path = NULL; + char *status = NULL; + char *shd_status = NULL; + + snprintf(key, sizeof key, "%d-hostname", brick); + ret = dict_get_str(dict, key, &hostname); + if (ret) + goto out; + snprintf(key, sizeof key, "%d-path", brick); + ret = dict_get_str(dict, key, &path); + if (ret) + goto out; + cli_out("\nBrick %s:%s", hostname, path); + + snprintf(key, sizeof key, "%d-status", brick); + ret = dict_get_str(dict, key, &status); + if (status && strlen(status)) + cli_out("Status: %s", status); + + snprintf(key, sizeof key, "%d-shd-status", brick); + ret = dict_get_str(dict, key, &shd_status); + + if (!shd_status) { + snprintf(key, sizeof key, "%d-hardlinks", brick); + ret = dict_get_uint64(dict, key, &num_entries); if (ret) - goto out; + cli_out("No gathered input for this brick"); + else + cli_out("Number of entries: %" PRIu64, num_entries); + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.ref", prefix); - ret = dict_get_uint32 (dict, key, &ref); - if (ret) - goto out; +out: + return; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.ia_type", prefix); - ret = dict_get_int32 (dict, key, &ia_type); - if (ret) - goto out; +static int +gf_is_cli_heal_get_command(gf_xl_afr_op_t heal_op) +{ + /* If the command is get command value is 1 otherwise 0, for + invalid commands -1 */ + static int get_cmds[GF_SHD_OP_HEAL_DISABLE + 1] = { + [GF_SHD_OP_INVALID] = -1, + [GF_SHD_OP_HEAL_INDEX] = 0, + [GF_SHD_OP_HEAL_FULL] = 0, + [GF_SHD_OP_INDEX_SUMMARY] = 1, + [GF_SHD_OP_SPLIT_BRAIN_FILES] = 1, + [GF_SHD_OP_STATISTICS] = 1, + [GF_SHD_OP_STATISTICS_HEAL_COUNT] = 1, + [GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA] = 1, + [GF_SHD_OP_HEAL_ENABLE] = 0, + [GF_SHD_OP_HEAL_DISABLE] = 0, + }; + + if (heal_op > GF_SHD_OP_INVALID && heal_op <= GF_SHD_OP_HEAL_DISABLE) + return get_cmds[heal_op] == 1; + return _gf_false; +} - switch (ia_type) { - case IA_IFREG: - inode_type = 'R'; - break; - case IA_IFDIR: - inode_type = 'D'; - break; - case IA_IFLNK: - inode_type = 'L'; - break; - case IA_IFBLK: - inode_type = 'B'; - break; - case IA_IFCHR: - inode_type = 'C'; - break; - case IA_IFIFO: - inode_type = 'F'; - break; - case IA_IFSOCK: - inode_type = 'S'; - break; - default: - inode_type = 'I'; - break; +int +gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + cli_local_t *local = NULL; + char *volname = NULL; + call_frame_t *frame = NULL; + dict_t *dict = NULL; + int brick_count = 0; + int i = 0; + gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID; + const char *operation = NULL; + const char *substr = NULL; + const char *heal_op_str = NULL; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) { + goto out; + } + + frame = myframe; + + GF_ASSERT(frame->local); + + local = frame->local; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + ret = dict_get_int32_sizen(local->dict, "heal-op", (int32_t *)&heal_op); + // TODO: Proper XML output + //#if (HAVE_LIB_XML) + // if (global_state->mode & GLUSTER_MODE_XML) { + // ret = cli_xml_output_dict ("volHeal", dict, rsp.op_ret, + // rsp.op_errno, rsp.op_errstr); + // if (ret) + // gf_log ("cli", GF_LOG_ERROR, XML_ERROR); + // goto out; + // } + //#endif + + ret = dict_get_str_sizen(local->dict, "volname", &volname); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, "failed to get volname"); + goto out; + } + + gf_log("cli", GF_LOG_INFO, "Received resp to heal volume"); + + operation = "Gathering "; + substr = ""; + switch (heal_op) { + case GF_SHD_OP_HEAL_INDEX: + operation = "Launching heal operation "; + heal_op_str = "to perform index self heal"; + substr = "\nUse heal info commands to check status."; + break; + case GF_SHD_OP_HEAL_FULL: + operation = "Launching heal operation "; + heal_op_str = "to perform full self heal"; + substr = "\nUse heal info commands to check status."; + break; + case GF_SHD_OP_INDEX_SUMMARY: + heal_op_str = "list of entries to be healed"; + break; + case GF_SHD_OP_SPLIT_BRAIN_FILES: + heal_op_str = "list of split brain entries"; + break; + case GF_SHD_OP_STATISTICS: + heal_op_str = "crawl statistics"; + break; + case GF_SHD_OP_STATISTICS_HEAL_COUNT: + heal_op_str = "count of entries to be healed"; + break; + case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: + heal_op_str = "count of entries to be healed per replica"; + break; + case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE: + case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME: + case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK: + case GF_SHD_OP_HEAL_SUMMARY: + case GF_SHD_OP_HEALED_FILES: + case GF_SHD_OP_HEAL_FAILED_FILES: + /* These cases are never hit; they're coded just to silence the + * compiler warnings.*/ + break; + + case GF_SHD_OP_INVALID: + heal_op_str = "invalid heal op"; + break; + case GF_SHD_OP_HEAL_ENABLE: + operation = ""; + heal_op_str = "Enable heal"; + break; + case GF_SHD_OP_HEAL_DISABLE: + operation = ""; + heal_op_str = "Disable heal"; + break; + case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE: + operation = ""; + heal_op_str = "Enable granular entry heal"; + break; + case GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE: + operation = ""; + heal_op_str = "Disable granular entry heal"; + break; + } + + if (rsp.op_ret) { + if (strcmp(rsp.op_errstr, "")) { + cli_err("%s%s on volume %s has been unsuccessful:", operation, + heal_op_str, volname); + cli_err("%s", rsp.op_errstr); } + ret = rsp.op_ret; + goto out; + } else { + cli_out("%s%s on volume %s has been successful %s", operation, + heal_op_str, volname, substr); + } + + ret = rsp.op_ret; + if (!gf_is_cli_heal_get_command(heal_op)) + goto out; + + dict = dict_new(); + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + + if (ret) { + gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } + + ret = dict_get_int32_sizen(dict, "count", &brick_count); + if (ret) + goto out; + + if (!brick_count) { + cli_err("All bricks of volume %s are down.", volname); + ret = -1; + goto out; + } + + switch (heal_op) { + case GF_SHD_OP_STATISTICS: + for (i = 0; i < brick_count; i++) + cmd_heal_volume_statistics_out(dict, i); + break; + case GF_SHD_OP_STATISTICS_HEAL_COUNT: + case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA: + for (i = 0; i < brick_count; i++) + cmd_heal_volume_statistics_heal_count_out(dict, i); + break; + case GF_SHD_OP_INDEX_SUMMARY: + case GF_SHD_OP_SPLIT_BRAIN_FILES: + for (i = 0; i < brick_count; i++) + cmd_heal_volume_brick_out(dict, i); + break; + default: + break; + } - cli_out ("%-40s %14"PRIu64" %14"PRIu32" %9c", - gfid, nlookup, ref, inode_type); + ret = rsp.op_ret; out: - return; - + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + if (dict) + dict_unref(dict); + return ret; } -void -cli_print_volume_status_itables (dict_t *dict, char *prefix) +static int32_t +gf_cli_heal_volume(call_frame_t *frame, xlator_t *this, void *data) { - int ret = -1; - char key[1024] = {0,}; - uint32_t active_size = 0; - uint32_t lru_size = 0; - uint32_t purge_size = 0; - int i =0; + gf_cli_req req = {{ + 0, + }}; + int ret = 0; + dict_t *dict = NULL; - GF_ASSERT (dict); - GF_ASSERT (prefix); + dict = data; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.active_size", prefix); - ret = dict_get_uint32 (dict, key, &active_size); - if (ret) - goto out; - if (active_size != 0) { - cli_out ("Active inodes:"); - cli_out ("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", - "IA type"); - cli_out ("%-40s %14s %14s %9s", "----", "-------", "---", - "-------"); - } - for (i = 0; i < active_size; i++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.active%d", prefix, i); - cli_print_volume_status_inode_entry (dict, key); - } - cli_out (" "); + ret = cli_to_glusterd(&req, frame, gf_cli_heal_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, dict, + GLUSTER_CLI_HEAL_VOLUME, this, cli_rpc_prog, NULL); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.lru_size", prefix); - ret = dict_get_uint32 (dict, key, &lru_size); - if (ret) - goto out; - if (lru_size != 0) { - cli_out ("LRU inodes:"); - cli_out ("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", - "IA type"); - cli_out ("%-40s %14s %14s %9s", "----", "-------", "---", - "-------"); - } - for (i = 0; i < lru_size; i++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.lru%d", prefix, i); - cli_print_volume_status_inode_entry (dict, key); - } - cli_out (" "); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + + GF_FREE(req.dict.dict_val); + + return ret; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.purge_size", prefix); - ret = dict_get_uint32 (dict, key, &purge_size); +static int32_t +gf_cli_statedump_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + char msg[1024] = "Volume statedump successful"; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) + goto out; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + gf_log("cli", GF_LOG_DEBUG, "Received response to statedump"); + if (rsp.op_ret) + snprintf(msg, sizeof(msg), "%s", rsp.op_errstr); + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_str("volStatedump", msg, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); if (ret) - goto out; - if (purge_size != 0) { - cli_out ("Purged inodes:"); - cli_out ("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", - "IA type"); - cli_out ("%-40s %14s %14s %9s", "----", "-------", "---", - "-------"); - } - for (i = 0; i < purge_size; i++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.purge%d", prefix, i); - cli_print_volume_status_inode_entry (dict, key); - } + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } + + if (rsp.op_ret) + cli_err("volume statedump: failed: %s", msg); + else + cli_out("volume statedump: success"); + ret = rsp.op_ret; out: - return; + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; } -void -cli_print_volume_status_inode (dict_t *dict, gf_boolean_t notbrick) +static int32_t +gf_cli_statedump_volume(call_frame_t *frame, xlator_t *this, void *data) { - int ret = -1; - char *volname = NULL; - int brick_index_max = -1; - int other_count = 0; - int index_max = 0; - char *hostname = NULL; - char *path = NULL; - int online = -1; - int conn_count = 0; - char key[1024] = {0,}; - int i = 0; - int j = 0; + gf_cli_req req = {{ + 0, + }}; + dict_t *options = NULL; + int ret = -1; - GF_ASSERT (dict); + options = data; - ret = dict_get_str (dict, "volname", &volname); - if (ret) - goto out; - cli_out ("Inode tables for volume %s", volname); + ret = cli_to_glusterd( + &req, frame, gf_cli_statedump_volume_cbk, (xdrproc_t)xdr_gf_cli_req, + options, GLUSTER_CLI_STATEDUMP_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + + GF_FREE(req.dict.dict_val); + return ret; +} - ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max); +static int32_t +gf_cli_list_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + int ret = -1; + gf_cli_rsp rsp = { + 0, + }; + dict_t *dict = NULL; + int vol_count = 0; + ; + char *volname = NULL; + char key[1024] = { + 0, + }; + int i = 0; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) + goto out; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + dict = dict_new(); + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_list(dict, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); if (ret) - goto out; - ret = dict_get_int32 (dict, "other-count", &other_count); + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } + + if (rsp.op_ret) + cli_err("%s", rsp.op_errstr); + else { + ret = dict_get_int32_sizen(dict, "count", &vol_count); if (ret) + goto out; + + if (vol_count == 0) { + cli_err("No volumes present in cluster"); + goto out; + } + for (i = 0; i < vol_count; i++) { + ret = snprintf(key, sizeof(key), "volume%d", i); + ret = dict_get_strn(dict, key, ret, &volname); + if (ret) goto out; + cli_out("%s", volname); + } + } - index_max = brick_index_max + other_count; + ret = rsp.op_ret; - for ( i = 0; i <= index_max; i++) { - cli_out ("----------------------------------------------"); +out: + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.hostname", i); - ret = dict_get_str (dict, key, &hostname); - if (ret) - goto out; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.path", i); - ret = dict_get_str (dict, key, &path); - if (ret) - goto out; - if (notbrick) - cli_out ("%s : %s", hostname, path); - else - cli_out ("Brick : %s:%s", hostname, path); + if (dict) + dict_unref(dict); + return ret; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.status", i); - ret = dict_get_int32 (dict, key, &online); - if (ret) - goto out; - if (!online) { - if (notbrick) - cli_out ("%s is offline", hostname); - else - cli_out ("Brick is offline"); - continue; - } +static int32_t +gf_cli_list_volume(call_frame_t *frame, xlator_t *this, void *data) +{ + int ret = -1; + gf_cli_req req = {{ + 0, + }}; + + ret = cli_cmd_submit(NULL, &req, frame, cli_rpc_prog, + GLUSTER_CLI_LIST_VOLUME, NULL, this, + gf_cli_list_volume_cbk, (xdrproc_t)xdr_gf_cli_req); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + return ret; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.conncount", i); - ret = dict_get_int32 (dict, key, &conn_count); - if (ret) - goto out; +static int32_t +gf_cli_clearlocks_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + char *lk_summary = NULL; + dict_t *dict = NULL; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) + goto out; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + gf_log("cli", GF_LOG_DEBUG, "Received response to clear-locks"); + + if (rsp.op_ret) { + cli_err("Volume clear-locks unsuccessful"); + cli_err("%s", rsp.op_errstr); + + } else { + if (!rsp.dict.dict_len) { + cli_err("Possibly no locks cleared"); + ret = 0; + goto out; + } - for (j = 0; j < conn_count; j++) { - if (conn_count > 1) - cli_out ("Connection %d:", j+1); + dict = dict_new(); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.conn%d.itable", - i, j); - cli_print_volume_status_itables (dict, key); - cli_out (" "); - } + if (!dict) { + ret = -1; + goto out; } -out: - cli_out ("----------------------------------------------"); - return; -} -void -cli_print_volume_status_fdtable (dict_t *dict, char *prefix) -{ - int ret = -1; - char key[1024] = {0,}; - int refcount = 0; - uint32_t maxfds = 0; - int firstfree = 0; - int openfds = 0; - int fd_pid = 0; - int fd_refcount = 0; - int fd_flags = 0; - int i = 0; - - GF_ASSERT (dict); - GF_ASSERT (prefix); - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.refcount", prefix); - ret = dict_get_int32 (dict, key, &refcount); - if (ret) - goto out; + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.maxfds", prefix); - ret = dict_get_uint32 (dict, key, &maxfds); - if (ret) - goto out; + if (ret) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } - memset (key, 0 ,sizeof (key)); - snprintf (key, sizeof (key), "%s.firstfree", prefix); - ret = dict_get_int32 (dict, key, &firstfree); - if (ret) - goto out; + ret = dict_get_str(dict, "lk-summary", &lk_summary); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Unable to get lock summary from dictionary"); + goto out; + } + cli_out("Volume clear-locks successful"); + cli_out("%s", lk_summary); + } - cli_out ("RefCount = %d MaxFDs = %d FirstFree = %d", - refcount, maxfds, firstfree); + ret = rsp.op_ret; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.openfds", prefix); - ret = dict_get_int32 (dict, key, &openfds); - if (ret) - goto out; - if (0 == openfds) { - cli_out ("No open fds"); - goto out; - } +out: + if (dict) + dict_unref(dict); + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; +} - cli_out ("%-19s %-19s %-19s %-19s", "FD Entry", "PID", - "RefCount", "Flags"); - cli_out ("%-19s %-19s %-19s %-19s", "--------", "---", - "--------", "-----"); +static int32_t +gf_cli_clearlocks_volume(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + dict_t *options = NULL; + int ret = -1; - for (i = 0; i < maxfds ; i++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.fdentry%d.pid", prefix, i); - ret = dict_get_int32 (dict, key, &fd_pid); - if (ret) - continue; + options = data; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.fdentry%d.refcount", - prefix, i); - ret = dict_get_int32 (dict, key, &fd_refcount); - if (ret) - continue; + ret = cli_to_glusterd( + &req, frame, gf_cli_clearlocks_volume_cbk, (xdrproc_t)xdr_gf_cli_req, + options, GLUSTER_CLI_CLRLOCKS_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.fdentry%d.flags", prefix, i); - ret = dict_get_int32 (dict, key, &fd_flags); - if (ret) - continue; + GF_FREE(req.dict.dict_val); + return ret; +} + +static int32_t +cli_snapshot_remove_reply(gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame) +{ + int32_t ret = -1; + char *snap_name = NULL; + int32_t delete_cmd = -1; + cli_local_t *local = NULL; + + GF_ASSERT(frame); + GF_ASSERT(rsp); + GF_ASSERT(dict); + + local = frame->local; + + ret = dict_get_int32_sizen(dict, "sub-cmd", &delete_cmd); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not get sub-cmd"); + goto end; + } + + if ((global_state->mode & GLUSTER_MODE_XML) && + (delete_cmd == GF_SNAP_DELETE_TYPE_SNAP)) { + ret = cli_xml_output_snap_delete_begin(local, rsp->op_ret, + rsp->op_errno, rsp->op_errstr); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to create xml output for delete"); + goto end; + } + } + + if (rsp->op_ret && !(global_state->mode & GLUSTER_MODE_XML)) { + cli_err("snapshot delete: failed: %s", + rsp->op_errstr ? rsp->op_errstr + : "Please check log file for details"); + ret = rsp->op_ret; + goto out; + } + + if (delete_cmd == GF_SNAP_DELETE_TYPE_ALL || + delete_cmd == GF_SNAP_DELETE_TYPE_VOL) { + local = ((call_frame_t *)frame)->local; + if (!local) { + ret = -1; + gf_log("cli", GF_LOG_ERROR, "frame->local is NULL"); + goto out; + } + + /* During first call back of snapshot delete of type + * ALL and VOL, We will get the snapcount and snapnames. + * Hence to make the subsequent rpc calls for individual + * snapshot delete, We need to save it in local dictionary. + */ + dict_copy(dict, local->dict); + ret = 0; + goto out; + } - cli_out ("%-19d %-19d %-19d %-19d", i, fd_pid, fd_refcount, - fd_flags); + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_snapshot_delete(local, dict, rsp); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to create xml output for snapshot delete command"); + goto out; + } + /* Error out in case of the op already failed */ + if (rsp->op_ret) { + ret = rsp->op_ret; + goto out; + } + } else { + ret = dict_get_str_sizen(dict, "snapname", &snap_name); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snapname"); + goto out; } + cli_out("snapshot delete: %s: snap removed successfully", snap_name); + } + ret = 0; + out: - return; + if ((global_state->mode & GLUSTER_MODE_XML) && + (delete_cmd == GF_SNAP_DELETE_TYPE_SNAP)) { + ret = cli_xml_output_snap_delete_end(local); + } +end: + return ret; } -void -cli_print_volume_status_fd (dict_t *dict, gf_boolean_t notbrick) +static int +cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp) { - int ret = -1; - char *volname = NULL; - int brick_index_max = -1; - int other_count = 0; - int index_max = 0; - char *hostname = NULL; - char *path = NULL; - int online = -1; - int conn_count = 0; - char key[1024] = {0,}; - int i = 0; - int j = 0; - - GF_ASSERT (dict); - - ret = dict_get_str (dict, "volname", &volname); - if (ret) + char buf[PATH_MAX] = ""; + char *volname = NULL; + int ret = -1; + int config_command = 0; + uint64_t value = 0; + uint64_t hard_limit = 0; + uint64_t soft_limit = 0; + uint64_t i = 0; + uint64_t voldisplaycount = 0; + char *auto_delete = NULL; + char *snap_activate = NULL; + + GF_ASSERT(dict); + GF_ASSERT(rsp); + + if (rsp->op_ret) { + cli_err("Snapshot Config : failed: %s", + rsp->op_errstr ? rsp->op_errstr + : "Please check log file for details"); + ret = rsp->op_ret; + goto out; + } + + ret = dict_get_int32_sizen(dict, "config-command", &config_command); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch config type"); + goto out; + } + + ret = dict_get_uint64(dict, "snap-max-hard-limit", &hard_limit); + /* Ignore the error, as the key specified is optional */ + ret = dict_get_uint64(dict, "snap-max-soft-limit", &soft_limit); + + ret = dict_get_str_sizen(dict, "auto-delete", &auto_delete); + + ret = dict_get_str_sizen(dict, "snap-activate-on-create", &snap_activate); + + if (!hard_limit && !soft_limit && + config_command != GF_SNAP_CONFIG_DISPLAY && !auto_delete && + !snap_activate) { + ret = -1; + gf_log(THIS->name, GF_LOG_ERROR, "Could not fetch config-key"); + goto out; + } + + ret = dict_get_str_sizen(dict, "volname", &volname); + /* Ignore the error, as volname is optional */ + + if (!volname) { + volname = "System"; + } + + switch (config_command) { + case GF_SNAP_CONFIG_TYPE_SET: + if (hard_limit && soft_limit) { + cli_out( + "snapshot config: snap-max-hard-limit " + "& snap-max-soft-limit for system set successfully"); + } else if (hard_limit) { + cli_out( + "snapshot config: snap-max-hard-limit " + "for %s set successfully", + volname); + } else if (soft_limit) { + cli_out( + "snapshot config: snap-max-soft-limit " + "for %s set successfully", + volname); + } else if (auto_delete) { + cli_out("snapshot config: auto-delete successfully set"); + } else if (snap_activate) { + cli_out("snapshot config: activate-on-create successfully set"); + } + break; + + case GF_SNAP_CONFIG_DISPLAY: + cli_out("\nSnapshot System Configuration:"); + ret = dict_get_uint64(dict, "snap-max-hard-limit", &value); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Could not fetch snap_max_hard_limit for %s", volname); + ret = -1; goto out; - cli_out ("FD tables for volume %s", volname); + } + cli_out("snap-max-hard-limit : %" PRIu64, value); - ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max); - if (ret) - goto out; - ret = dict_get_int32 (dict, "other-count", &other_count); - if (ret) + ret = dict_get_uint64(dict, "snap-max-soft-limit", &soft_limit); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Could not fetch snap-max-soft-limit for %s", volname); + ret = -1; goto out; + } + cli_out("snap-max-soft-limit : %" PRIu64 "%%", soft_limit); - index_max = brick_index_max + other_count; + cli_out("auto-delete : %s", auto_delete); - for (i = 0; i <= index_max; i++) { - cli_out ("----------------------------------------------"); + cli_out("activate-on-create : %s\n", snap_activate); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.hostname", i); - ret = dict_get_str (dict, key, &hostname); - if (ret) - goto out; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.path", i); - ret = dict_get_str (dict, key, &path); - if (ret) - goto out; + cli_out("Snapshot Volume Configuration:"); - if (notbrick) - cli_out ("%s : %s", hostname, path); - else - cli_out ("Brick : %s:%s", hostname, path); + ret = dict_get_uint64(dict, "voldisplaycount", &voldisplaycount); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch voldisplaycount"); + ret = -1; + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.status", i); - ret = dict_get_int32 (dict, key, &online); - if (ret) - goto out; - if (!online) { - if (notbrick) - cli_out ("%s is offline", hostname); - else - cli_out ("Brick is offline"); - continue; + for (i = 0; i < voldisplaycount; i++) { + ret = snprintf(buf, sizeof(buf), "volume%" PRIu64 "-volname", + i); + ret = dict_get_strn(dict, buf, ret, &volname); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch %s", buf); + ret = -1; + goto out; } + cli_out("\nVolume : %s", volname); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.conncount", i); - ret = dict_get_int32 (dict, key, &conn_count); - if (ret) - goto out; + snprintf(buf, sizeof(buf), + "volume%" PRIu64 "-snap-max-hard-limit", i); + ret = dict_get_uint64(dict, buf, &value); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch %s", buf); + ret = -1; + goto out; + } + cli_out("snap-max-hard-limit : %" PRIu64, value); - for (j = 0; j < conn_count; j++) { - cli_out ("Connection %d:", j+1); + snprintf(buf, sizeof(buf), + "volume%" PRIu64 "-active-hard-limit", i); + ret = dict_get_uint64(dict, buf, &value); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Could not fetch" + " effective snap_max_hard_limit for %s", + volname); + ret = -1; + goto out; + } + cli_out("Effective snap-max-hard-limit : %" PRIu64, value); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.conn%d.fdtable", - i, j); - cli_print_volume_status_fdtable (dict, key); - cli_out (" "); + snprintf(buf, sizeof(buf), + "volume%" PRIu64 "-snap-max-soft-limit", i); + ret = dict_get_uint64(dict, buf, &value); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch %s", buf); + ret = -1; + goto out; } - } + cli_out("Effective snap-max-soft-limit : %" PRIu64 + " " + "(%" PRIu64 "%%)", + value, soft_limit); + } + break; + default: + break; + } + + ret = 0; out: - cli_out ("----------------------------------------------"); - return; + return ret; } -void -cli_print_volume_status_call_frame (dict_t *dict, char *prefix) +/* This function is used to print the volume related information + * of a snap. + * + * arg - 0, dict : Response Dictionary. + * arg - 1, prefix str : snaplist.snap{0..}.vol{0..}.* + */ +static int +cli_get_each_volinfo_in_snap(dict_t *dict, char *keyprefix, + gf_boolean_t snap_driven) { - int ret = -1; - char key[1024] = {0,}; - int ref_count = 0; - char *translator = 0; - int complete = 0; - char *parent = NULL; - char *wind_from = NULL; - char *wind_to = NULL; - char *unwind_from = NULL; - char *unwind_to = NULL; - - if (!dict || !prefix) - return; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.refcount", prefix); - ret = dict_get_int32 (dict, key, &ref_count); - if (ret) - return; - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.translator", prefix); - ret = dict_get_str (dict, key, &translator); - if (ret) - return; + char key[PATH_MAX] = ""; + char *get_buffer = NULL; + int value = 0; + int ret = -1; + char indent[5] = "\t"; + char *volname = NULL; + + GF_ASSERT(dict); + GF_ASSERT(keyprefix); + + if (snap_driven) { + ret = snprintf(key, sizeof(key), "%s.volname", keyprefix); + if (ret < 0) { + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.complete", prefix); - ret = dict_get_int32 (dict, key, &complete); - if (ret) - return; + ret = dict_get_str(dict, key, &get_buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get %s", key); + goto out; + } + cli_out("%s" INDENT_MAIN_HEAD "%s", indent, "Snap Volume Name", ":", + get_buffer); - cli_out (" Ref Count = %d", ref_count); - cli_out (" Translator = %s", translator); - cli_out (" Completed = %s", (complete ? "Yes" : "No")); + ret = snprintf(key, sizeof(key), "%s.origin-volname", keyprefix); + if (ret < 0) { + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.parent", prefix); - ret = dict_get_str (dict, key, &parent); - if (!ret) - cli_out (" Parent = %s", parent); + ret = dict_get_str(dict, key, &volname); + if (ret) { + gf_log("cli", GF_LOG_WARNING, "Failed to get %s", key); + cli_out("%-12s", "Origin:"); + } + cli_out("%s" INDENT_MAIN_HEAD "%s", indent, "Origin Volume name", ":", + volname); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.windfrom", prefix); - ret = dict_get_str (dict, key, &wind_from); - if (!ret) - cli_out (" Wind From = %s", wind_from); + ret = snprintf(key, sizeof(key), "%s.snapcount", keyprefix); + if (ret < 0) { + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.windto", prefix); - ret = dict_get_str (dict, key, &wind_to); - if (!ret) - cli_out (" Wind To = %s", wind_to); + ret = dict_get_int32(dict, key, &value); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get %s", key); + goto out; + } + cli_out("%s%s %s %s %d", indent, "Snaps taken for", volname, ":", + value); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.unwindfrom", prefix); - ret = dict_get_str (dict, key, &unwind_from); - if (!ret) - cli_out (" Unwind From = %s", unwind_from); + ret = snprintf(key, sizeof(key), "%s.snaps-available", keyprefix); + if (ret < 0) { + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.unwindto", prefix); - ret = dict_get_str (dict, key, &unwind_to); - if (!ret) - cli_out (" Unwind To = %s", unwind_to); + ret = dict_get_int32(dict, key, &value); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get %s", key); + goto out; + } + cli_out("%s%s %s %s %d", indent, "Snaps available for", volname, ":", + value); + } + + ret = snprintf(key, sizeof(key), "%s.vol-status", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_str(dict, key, &get_buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get %s", key); + goto out; + } + cli_out("%s" INDENT_MAIN_HEAD "%s", indent, "Status", ":", get_buffer); +out: + return ret; } -void -cli_print_volume_status_call_stack (dict_t *dict, char *prefix) +/* This function is used to print snap related information + * arg - 0, dict : Response dictionary. + * arg - 1, prefix_str : snaplist.snap{0..}.* + */ +static int +cli_get_volinfo_in_snap(dict_t *dict, char *keyprefix) { - int ret = -1; - char key[1024] = {0,}; - int uid = 0; - int gid = 0; - int pid = 0; - uint64_t unique = 0; - //char *op = NULL; - int count = 0; - int i = 0; + char key[PATH_MAX] = ""; + int i = 0; + int volcount = 0; + int ret = -1; + + GF_ASSERT(dict); + GF_ASSERT(keyprefix); + + ret = snprintf(key, sizeof(key), "%s.vol-count", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_int32(dict, key, &volcount); + for (i = 1; i <= volcount; i++) { + ret = snprintf(key, sizeof(key), "%s.vol%d", keyprefix, i); + if (ret < 0) { + goto out; + } + ret = cli_get_each_volinfo_in_snap(dict, key, _gf_true); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Could not list details of volume in a snap"); + goto out; + } + cli_out(" "); + } - if (!dict || !prefix) - return; +out: + return ret; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.uid", prefix); - ret = dict_get_int32 (dict, key, &uid); - if (ret) - return; +static int +cli_get_each_snap_info(dict_t *dict, char *prefix_str, gf_boolean_t snap_driven) +{ + char key_buffer[PATH_MAX] = ""; + char *get_buffer = NULL; + int ret = -1; + char indent[5] = ""; + + GF_ASSERT(dict); + GF_ASSERT(prefix_str); + + if (!snap_driven) + strcat(indent, "\t"); + + ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snapname", prefix_str); + if (ret < 0) { + goto out; + } + + ret = dict_get_str(dict, key_buffer, &get_buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Unable to fetch snapname %s ", key_buffer); + goto out; + } + cli_out("%s" INDENT_MAIN_HEAD "%s", indent, "Snapshot", ":", get_buffer); + + ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snap-id", prefix_str); + if (ret < 0) { + goto out; + } + + ret = dict_get_str(dict, key_buffer, &get_buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Unable to fetch snap-id %s ", key_buffer); + goto out; + } + cli_out("%s" INDENT_MAIN_HEAD "%s", indent, "Snap UUID", ":", get_buffer); + + ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snap-desc", prefix_str); + if (ret < 0) { + goto out; + } + + ret = dict_get_str(dict, key_buffer, &get_buffer); + if (!ret) { + /* Ignore error for description */ + cli_out("%s" INDENT_MAIN_HEAD "%s", indent, "Description", ":", + get_buffer); + } + + ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snap-time", prefix_str); + if (ret < 0) { + goto out; + } + + ret = dict_get_str(dict, key_buffer, &get_buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Unable to fetch snap-time %s ", + prefix_str); + goto out; + } + cli_out("%s" INDENT_MAIN_HEAD "%s", indent, "Created", ":", get_buffer); + + if (snap_driven) { + cli_out("%-12s", "Snap Volumes:\n"); + ret = cli_get_volinfo_in_snap(dict, prefix_str); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Unable to list details of the snaps"); + goto out; + } + } +out: + return ret; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.gid", prefix); - ret = dict_get_int32 (dict, key, &gid); - if (ret) - return; +/* This is a generic function to print snap related information. + * arg - 0, dict : Response Dictionary + */ +static int +cli_call_snapshot_info(dict_t *dict, gf_boolean_t bool_snap_driven) +{ + int snap_count = 0; + char key[32] = ""; + int ret = -1; + int i = 0; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.pid", prefix); - ret = dict_get_int32 (dict, key, &pid); - if (ret) - return; + GF_ASSERT(dict); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.unique", prefix); - ret = dict_get_uint64 (dict, key, &unique); - if (ret) - return; + ret = dict_get_int32_sizen(dict, "snapcount", &snap_count); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Unable to get snapcount"); + goto out; + } - /* - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.op", prefix); - ret = dict_get_str (dict, key, &op); - if (ret) - return; - */ + if (snap_count == 0) { + cli_out("No snapshots present"); + } + for (i = 1; i <= snap_count; i++) { + ret = snprintf(key, sizeof(key), "snap%d", i); + if (ret < 0) { + goto out; + } + ret = cli_get_each_snap_info(dict, key, bool_snap_driven); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Unable to print snap details"); + goto out; + } + } +out: + return ret; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.count", prefix); - ret = dict_get_int32 (dict, key, &count); - if (ret) - return; +static int +cli_get_snaps_in_volume(dict_t *dict) +{ + int ret = -1; + int i = 0; + int count = 0; + int avail = 0; + char key[32] = ""; + char *get_buffer = NULL; + + GF_ASSERT(dict); + + ret = dict_get_str_sizen(dict, "origin-volname", &get_buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch origin-volname"); + goto out; + } + cli_out(INDENT_MAIN_HEAD "%s", "Volume Name", ":", get_buffer); + + ret = dict_get_int32_sizen(dict, "snapcount", &avail); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch snapcount"); + goto out; + } + cli_out(INDENT_MAIN_HEAD "%d", "Snaps Taken", ":", avail); + + ret = dict_get_int32_sizen(dict, "snaps-available", &count); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch snaps-available"); + goto out; + } + cli_out(INDENT_MAIN_HEAD "%d", "Snaps Available", ":", count); + + for (i = 1; i <= avail; i++) { + snprintf(key, sizeof(key), "snap%d", i); + ret = cli_get_each_snap_info(dict, key, _gf_false); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Unable to print snap details"); + goto out; + } - cli_out (" UID : %d", uid); - cli_out (" GID : %d", gid); - cli_out (" PID : %d", pid); - cli_out (" Unique : %"PRIu64, unique); - //cli_out ("\tOp : %s", op); - cli_out (" Frames : %d", count); + ret = snprintf(key, sizeof(key), "snap%d.vol1", i); + if (ret < 0) { + goto out; + } + ret = cli_get_each_volinfo_in_snap(dict, key, _gf_false); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Could not get volume related information"); + goto out; + } - for (i = 0; i < count; i++) { - cli_out (" Frame %d", i+1); + cli_out(" "); + } +out: + return ret; +} - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "%s.frame%d", prefix, i); - cli_print_volume_status_call_frame (dict, key); +static int +cli_snapshot_list(dict_t *dict) +{ + int snapcount = 0; + char key[32] = ""; + int ret = -1; + int i = 0; + char *get_buffer = NULL; + + GF_ASSERT(dict); + + ret = dict_get_int32_sizen(dict, "snapcount", &snapcount); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch snap count"); + goto out; + } + + if (snapcount == 0) { + cli_out("No snapshots present"); + } + + for (i = 1; i <= snapcount; i++) { + ret = snprintf(key, sizeof(key), "snapname%d", i); + if (ret < 0) { + goto out; } - cli_out (" "); + ret = dict_get_strn(dict, key, ret, &get_buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not get %s ", key); + goto out; + } else { + cli_out("%s", get_buffer); + } + } +out: + return ret; } -void -cli_print_volume_status_callpool (dict_t *dict, gf_boolean_t notbrick) +static int +cli_get_snap_volume_status(dict_t *dict, char *key_prefix) { - int ret = -1; - char *volname = NULL; - int brick_index_max = -1; - int other_count = 0; - int index_max = 0; - char *hostname = NULL; - char *path = NULL; - int online = -1; - int call_count = 0; - char key[1024] = {0,}; - int i = 0; - int j = 0; + int ret = -1; + char key[PATH_MAX] = ""; + char *buffer = NULL; + int brickcount = 0; + int i = 0; + int pid = 0; + + GF_ASSERT(dict); + GF_ASSERT(key_prefix); + + ret = snprintf(key, sizeof(key), "%s.brickcount", key_prefix); + if (ret < 0) { + goto out; + } + ret = dict_get_int32(dict, key, &brickcount); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to fetch brickcount"); + goto out; + } + + for (i = 0; i < brickcount; i++) { + ret = snprintf(key, sizeof(key), "%s.brick%d.path", key_prefix, i); + if (ret < 0) { + goto out; + } - GF_ASSERT (dict); + ret = dict_get_str(dict, key, &buffer); + if (ret) { + gf_log("cli", GF_LOG_INFO, "Unable to get Brick Path"); + continue; + } + cli_out("\n\t%-17s %s %s", "Brick Path", ":", buffer); - ret = dict_get_str (dict, "volname", &volname); - if (ret) - goto out; - cli_out ("Pending calls for volume %s", volname); + ret = snprintf(key, sizeof(key), "%s.brick%d.vgname", key_prefix, i); + if (ret < 0) { + goto out; + } - ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max); - if (ret) - goto out; - ret = dict_get_int32 (dict, "other-count", &other_count); - if (ret) - goto out; + ret = dict_get_str(dict, key, &buffer); + if (ret) { + gf_log("cli", GF_LOG_INFO, "Unable to get Volume Group"); + cli_out("\t%-17s %s %s", "Volume Group", ":", "N/A"); + } else + cli_out("\t%-17s %s %s", "Volume Group", ":", buffer); - index_max = brick_index_max + other_count; + ret = snprintf(key, sizeof(key), "%s.brick%d.status", key_prefix, i); + if (ret < 0) { + goto out; + } - for (i = 0; i <= index_max; i++) { - cli_out ("----------------------------------------------"); + ret = dict_get_str(dict, key, &buffer); + if (ret) { + gf_log("cli", GF_LOG_INFO, "Unable to get Brick Running"); + cli_out("\t%-17s %s %s", "Brick Running", ":", "N/A"); + } else + cli_out("\t%-17s %s %s", "Brick Running", ":", buffer); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.hostname", i); - ret = dict_get_str (dict, key, &hostname); - if (ret) - goto out; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.path", i); - ret = dict_get_str (dict, key, &path); - if (ret) - goto out; + ret = snprintf(key, sizeof(key), "%s.brick%d.pid", key_prefix, i); + if (ret < 0) { + goto out; + } - if (notbrick) - cli_out ("%s : %s", hostname, path); - else - cli_out ("Brick : %s:%s", hostname, path); + ret = dict_get_int32(dict, key, &pid); + if (ret) { + gf_log("cli", GF_LOG_INFO, "Unable to get pid"); + cli_out("\t%-17s %s %s", "Brick PID", ":", "N/A"); + } else + cli_out("\t%-17s %s %d", "Brick PID", ":", pid); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.status", i); - ret = dict_get_int32 (dict, key, &online); - if (ret) - goto out; - if (!online) { - if (notbrick) - cli_out ("%s is offline", hostname); - else - cli_out ("Brick is offline"); - continue; - } + ret = snprintf(key, sizeof(key), "%s.brick%d.data", key_prefix, i); + if (ret < 0) { + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.callpool.count", i); - ret = dict_get_int32 (dict, key, &call_count); - if (ret) - goto out; - cli_out ("Pending calls: %d", call_count); + ret = dict_get_str(dict, key, &buffer); + if (ret) { + gf_log("cli", GF_LOG_INFO, "Unable to get Data Percent"); + cli_out("\t%-17s %s %s", "Data Percentage", ":", "N/A"); + } else + cli_out("\t%-17s %s %s", "Data Percentage", ":", buffer); + + ret = snprintf(key, sizeof(key), "%s.brick%d.lvsize", key_prefix, i); + if (ret < 0) { + goto out; + } + ret = dict_get_str(dict, key, &buffer); + if (ret) { + gf_log("cli", GF_LOG_INFO, "Unable to get LV Size"); + cli_out("\t%-17s %s %s", "LV Size", ":", "N/A"); + } else + cli_out("\t%-17s %s %s", "LV Size", ":", buffer); + } - if (0 == call_count) - continue; + ret = 0; +out: + return ret; +} - for (j = 0; j < call_count; j++) { - cli_out ("Call Stack%d", j+1); +static int +cli_get_single_snap_status(dict_t *dict, char *keyprefix) +{ + int ret = -1; + char key[64] = ""; /* keyprefix is ""status.snap0" */ + int i = 0; + int volcount = 0; + char *get_buffer = NULL; + + GF_ASSERT(dict); + GF_ASSERT(keyprefix); + + ret = snprintf(key, sizeof(key), "%s.snapname", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_str(dict, key, &get_buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Unable to get snapname"); + goto out; + } + cli_out("\nSnap Name : %s", get_buffer); + + ret = snprintf(key, sizeof(key), "%s.uuid", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_str(dict, key, &get_buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Unable to get snap UUID"); + goto out; + } + cli_out("Snap UUID : %s", get_buffer); + + ret = snprintf(key, sizeof(key), "%s.volcount", keyprefix); + if (ret < 0) { + goto out; + } + + ret = dict_get_int32(dict, key, &volcount); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Unable to get volume count"); + goto out; + } + + for (i = 0; i < volcount; i++) { + ret = snprintf(key, sizeof(key), "%s.vol%d", keyprefix, i); + if (ret < 0) { + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), - "brick%d.callpool.stack%d", i, j); - cli_print_volume_status_call_stack (dict, key); - } + ret = cli_get_snap_volume_status(dict, key); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not get snap volume status"); + goto out; } + } +out: + return ret; +} +static int32_t +cli_populate_req_dict_for_delete(dict_t *snap_dict, dict_t *dict, size_t index) +{ + int32_t ret = -1; + char key[PATH_MAX] = ""; + char *buffer = NULL; + + GF_ASSERT(snap_dict); + GF_ASSERT(dict); + + ret = dict_set_int32_sizen(snap_dict, "sub-cmd", GF_SNAP_DELETE_TYPE_ITER); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Could not save command type in snap dictionary"); + goto out; + } + + ret = snprintf(key, sizeof(key), "snapname%zu", index); + if (ret < 0) { + goto out; + } + + ret = dict_get_str(dict, key, &buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snapname"); + goto out; + } + + ret = dict_set_dynstr_with_alloc(snap_dict, "snapname", buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to save snapname"); + goto out; + } + + ret = dict_set_int32_sizen(snap_dict, "type", GF_SNAP_OPTION_TYPE_DELETE); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to save command type"); + goto out; + } + + ret = dict_set_dynstr_with_alloc(snap_dict, "cmd-str", "snapshot delete"); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not save command string as delete"); + goto out; + } out: - cli_out ("----------------------------------------------"); - return; + return ret; } static int -gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - int ret = -1; - int brick_index_max = -1; - int other_count = 0; - int index_max = 0; - int i = 0; - int pid = -1; - uint32_t cmd = 0; - gf_boolean_t notbrick = _gf_false; - char key[1024] = {0,}; - char *hostname = NULL; - char *path = NULL; - char *volname = NULL; - dict_t *dict = NULL; - gf_cli_rsp rsp = {0,}; - cli_volume_status_t status = {0}; - cli_local_t *local = NULL; - char msg[1024] = {0,}; - - if (req->rpc_status == -1) - goto out; +cli_populate_req_dict_for_status(dict_t *snap_dict, dict_t *dict, int index) +{ + int ret = -1; + char key[PATH_MAX] = ""; + char *buffer = NULL; + + GF_ASSERT(snap_dict); + GF_ASSERT(dict); + + ret = dict_set_uint32(snap_dict, "sub-cmd", GF_SNAP_STATUS_TYPE_ITER); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not save command type in snap dict"); + goto out; + } + + ret = snprintf(key, sizeof(key), "status.snap%d.snapname", index); + if (ret < 0) { + goto out; + } + + ret = dict_get_strn(dict, key, ret, &buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not get snapname"); + goto out; + } + + ret = dict_set_str_sizen(snap_dict, "snapname", buffer); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not save snapname in snap dict"); + goto out; + } + + ret = dict_set_int32_sizen(snap_dict, "type", GF_SNAP_OPTION_TYPE_STATUS); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not save command type"); + goto out; + } + + ret = dict_set_dynstr_with_alloc(snap_dict, "cmd-str", "snapshot status"); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not save command string as status"); + goto out; + } + + ret = dict_set_int32_sizen(snap_dict, "hold_vol_locks", _gf_false); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Setting volume lock flag failed"); + goto out; + } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("cli", GF_LOG_ERROR, "Volume status response error"); - goto out; +out: + return ret; +} + +static int +cli_snapshot_status(dict_t *dict, gf_cli_rsp *rsp, call_frame_t *frame) +{ + int ret = -1; + int status_cmd = -1; + cli_local_t *local = NULL; + + GF_ASSERT(dict); + GF_ASSERT(rsp); + GF_ASSERT(frame); + + local = ((call_frame_t *)frame)->local; + if (!local) { + gf_log("cli", GF_LOG_ERROR, "frame->local is NULL"); + goto out; + } + + if (rsp->op_ret) { + if (rsp->op_errstr) { + ret = dict_set_dynstr_with_alloc(local->dict, "op_err_str", + rsp->op_errstr); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to set op_errstr in local dictionary"); + goto out; + } + } + ret = rsp->op_ret; + goto out; + } + + ret = dict_get_int32_sizen(dict, "sub-cmd", &status_cmd); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch status type"); + goto out; + } + + if ((status_cmd != GF_SNAP_STATUS_TYPE_SNAP) && + (status_cmd != GF_SNAP_STATUS_TYPE_ITER)) { + dict_copy(dict, local->dict); + goto out; + } + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_snapshot_status_single_snap(local, dict, "status.snap0"); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to create xml output for snapshot status"); + } + } else { + ret = cli_get_single_snap_status(dict, "status.snap0"); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch status of snap"); } + } + +out: + return ret; +} - gf_log ("cli", GF_LOG_DEBUG, "Received response to status cmd"); +static int +gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type, + char *snap_name, char *volname, char *snap_uuid, + char *clone_name) +{ + int ret = -1; + int config_command = 0; + int32_t delete_cmd = -1; + uint64_t hard_limit = 0; + uint64_t soft_limit = 0; + char *auto_delete = NULL; + char *snap_activate = NULL; + char msg[PATH_MAX] = { + 0, + }; + char option[512] = { + 0, + }; + + GF_VALIDATE_OR_GOTO("cli", dict, out); + GF_VALIDATE_OR_GOTO("cli", rsp, out); + + switch (type) { + case GF_SNAP_OPTION_TYPE_CREATE: + if (!snap_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snap name"); + goto out; + } + + if (!volname) { + gf_log("cli", GF_LOG_ERROR, "Failed to get volume name"); + goto out; + } + + if (rsp->op_ret != 0) { + gf_event(EVENT_SNAPSHOT_CREATE_FAILED, + "snapshot_name=%s;volume_name=%s;error=%s", snap_name, + volname, + rsp->op_errstr ? rsp->op_errstr + : "Please check log file for details"); + ret = 0; + break; + } - local = ((call_frame_t *)myframe)->local; + if (!snap_uuid) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snap uuid"); + goto out; + } - if (rsp.op_ret) { - if (strcmp (rsp.op_errstr, "")) - snprintf (msg, sizeof (msg), "%s", rsp.op_errstr); - else - snprintf (msg, sizeof (msg), "Unable to obtain volume " - "status information."); - -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - cli_xml_output_str ("volStatus", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - ret = 0; - goto out; - } -#endif - cli_err ("%s", msg); - if (local && local->all) { - ret = 0; - cli_out (" "); - } else - ret = -1; + gf_event(EVENT_SNAPSHOT_CREATED, + "snapshot_name=%s;volume_name=%s;snapshot_uuid=%s", + snap_name, volname, snap_uuid); + + ret = 0; + break; + case GF_SNAP_OPTION_TYPE_ACTIVATE: + if (!snap_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snap name"); goto out; - } + } - dict = dict_new (); - if (!dict) + if (rsp->op_ret != 0) { + gf_event(EVENT_SNAPSHOT_ACTIVATE_FAILED, + "snapshot_name=%s;error=%s", snap_name, + rsp->op_errstr ? rsp->op_errstr + : "Please check log file for details"); + ret = 0; + break; + } + + if (!snap_uuid) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snap uuid"); goto out; + } - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &dict); - if (ret) + gf_event(EVENT_SNAPSHOT_ACTIVATED, + "snapshot_name=%s;snapshot_uuid=%s", snap_name, snap_uuid); + + ret = 0; + break; + + case GF_SNAP_OPTION_TYPE_DEACTIVATE: + if (!snap_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snap name"); goto out; + } - ret = dict_get_uint32 (dict, "cmd", &cmd); - if (ret) + if (rsp->op_ret != 0) { + gf_event(EVENT_SNAPSHOT_DEACTIVATE_FAILED, + "snapshot_name=%s;error=%s", snap_name, + rsp->op_errstr ? rsp->op_errstr + : "Please check log file for details"); + ret = 0; + break; + } + + if (!snap_uuid) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snap uuid"); goto out; + } - if ((cmd & GF_CLI_STATUS_ALL)) { - if (local) { - local->dict = dict; - ret = 0; - } else { - gf_log ("cli", GF_LOG_ERROR, "local not found"); - ret = -1; - } + gf_event(EVENT_SNAPSHOT_DEACTIVATED, + "snapshot_name=%s;snapshot_uuid=%s", snap_name, snap_uuid); + + ret = 0; + break; + + case GF_SNAP_OPTION_TYPE_RESTORE: + if (!snap_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snap name"); goto out; - } + } - if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD)) - notbrick = _gf_true; + if (rsp->op_ret != 0) { + gf_event(EVENT_SNAPSHOT_RESTORE_FAILED, + "snapshot_name=%s;error=%s", snap_name, + rsp->op_errstr ? rsp->op_errstr + : "Please check log file for details"); + ret = 0; + break; + } - ret = dict_get_int32 (dict, "count", &count); - if (ret) + if (!snap_uuid) { + ret = -1; + gf_log("cli", GF_LOG_ERROR, "Failed to get snap uuid"); goto out; - if (count == 0) { + } + + if (!volname) { ret = -1; + gf_log("cli", GF_LOG_ERROR, "Failed to get volname"); goto out; - } + } - ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max); - if (ret) + gf_event(EVENT_SNAPSHOT_RESTORED, + "snapshot_name=%s;snapshot_uuid=%s;volume_name=%s", + snap_name, snap_uuid, volname); + + ret = 0; + break; + + case GF_SNAP_OPTION_TYPE_DELETE: + ret = dict_get_int32_sizen(dict, "sub-cmd", &delete_cmd); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not get sub-cmd"); goto out; - ret = dict_get_int32 (dict, "other-count", &other_count); - if (ret) + } + + /* + * Need not generate any event (success or failure) for delete * + * all, as it will trigger individual delete for all snapshots * + */ + if (delete_cmd == GF_SNAP_DELETE_TYPE_ALL) { + ret = 0; + break; + } + + if (!snap_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snap name"); goto out; + } - index_max = brick_index_max + other_count; + if (rsp->op_ret != 0) { + gf_event(EVENT_SNAPSHOT_DELETE_FAILED, + "snapshot_name=%s;error=%s", snap_name, + rsp->op_errstr ? rsp->op_errstr + : "Please check log file for details"); + ret = 0; + break; + } -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_vol_status (dict, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - } + if (!snap_uuid) { + ret = -1; + gf_log("cli", GF_LOG_ERROR, "Failed to get snap uuid"); goto out; - } -#endif + } - status.brick = GF_CALLOC (1, PATH_MAX + 256, gf_common_mt_strdup); - - switch (cmd & GF_CLI_STATUS_MASK) { - case GF_CLI_STATUS_MEM: - cli_print_volume_status_mem (dict, notbrick); - goto cont; - break; - case GF_CLI_STATUS_CLIENTS: - cli_print_volume_status_clients (dict, notbrick); - goto cont; - break; - case GF_CLI_STATUS_INODE: - cli_print_volume_status_inode (dict, notbrick); - goto cont; - break; - case GF_CLI_STATUS_FD: - cli_print_volume_status_fd (dict, notbrick); - goto cont; - break; - case GF_CLI_STATUS_CALLPOOL: - cli_print_volume_status_callpool (dict, notbrick); - goto cont; - break; - default: - break; - } - - ret = dict_get_str (dict, "volname", &volname); - if (ret) + gf_event(EVENT_SNAPSHOT_DELETED, + "snapshot_name=%s;snapshot_uuid=%s", snap_name, snap_uuid); + + ret = 0; + break; + + case GF_SNAP_OPTION_TYPE_CLONE: + if (!clone_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get clone name"); goto out; + } - cli_out ("Status of volume: %s", volname); + if (!snap_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snapname name"); + goto out; + } - if ((cmd & GF_CLI_STATUS_DETAIL) == 0) { - cli_out ("Gluster process\t\t\t\t\t\tPort\tOnline\tPid"); - cli_print_line (CLI_BRICK_STATUS_LINE_LEN); - } + if (rsp->op_ret != 0) { + gf_event(EVENT_SNAPSHOT_CLONE_FAILED, + "snapshot_name=%s;clone_name=%s;error=%s", snap_name, + clone_name, + rsp->op_errstr ? rsp->op_errstr + : "Please check log file for details"); + ret = 0; + break; + } - for (i = 0; i <= index_max; i++) { + if (!snap_uuid) { + ret = -1; + gf_log("cli", GF_LOG_ERROR, "Failed to get snap uuid"); + goto out; + } + gf_event(EVENT_SNAPSHOT_CLONED, + "snapshot_name=%s;clone_name=%s;clone_uuid=%s", snap_name, + clone_name, snap_uuid); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.hostname", i); - ret = dict_get_str (dict, key, &hostname); - if (ret) - continue; + ret = 0; + break; - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.path", i); - ret = dict_get_str (dict, key, &path); - if (ret) - continue; + case GF_SNAP_OPTION_TYPE_CONFIG: + if (rsp->op_ret != 0) { + gf_event(EVENT_SNAPSHOT_CONFIG_UPDATE_FAILED, "error=%s", + rsp->op_errstr ? rsp->op_errstr + : "Please check log file for details"); + ret = 0; + break; + } - /* Brick/not-brick is handled seperately here as all - * types of nodes are contained in the default output - */ - memset (status.brick, 0, PATH_MAX + 255); - if (!strcmp (hostname, "NFS Server") || - !strcmp (hostname, "Self-heal Daemon")) - snprintf (status.brick, PATH_MAX + 255, "%s on %s", - hostname, path); - else - snprintf (status.brick, PATH_MAX + 255, "Brick %s:%s", - hostname, path); + ret = dict_get_int32_sizen(dict, "config-command", &config_command); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not fetch config type"); + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.port", i); - ret = dict_get_int32 (dict, key, &(status.port)); - if (ret) - continue; + if (config_command == GF_SNAP_CONFIG_DISPLAY) { + ret = 0; + break; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.status", i); - ret = dict_get_int32 (dict, key, &(status.online)); - if (ret) - continue; + /* These are optional parameters therefore ignore the error */ + ret = dict_get_uint64(dict, "snap-max-hard-limit", &hard_limit); + ret = dict_get_uint64(dict, "snap-max-soft-limit", &soft_limit); + ret = dict_get_str_sizen(dict, "auto-delete", &auto_delete); + ret = dict_get_str_sizen(dict, "snap-activate-on-create", + &snap_activate); - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.pid", i); - ret = dict_get_int32 (dict, key, &pid); - if (ret) - continue; - if (pid == -1) - ret = gf_asprintf (&(status.pid_str), "%s", "N/A"); - else - ret = gf_asprintf (&(status.pid_str), "%d", pid); + if (!hard_limit && !soft_limit && !auto_delete && !snap_activate) { + ret = -1; + gf_log("cli", GF_LOG_ERROR, + "At least one option from " + "snap-max-hard-limit, snap-max-soft-limit, " + "auto-delete and snap-activate-on-create " + "should be set"); + goto out; + } - if (ret == -1) - goto out; + if (hard_limit || soft_limit) { + snprintf(option, sizeof(option), "%s=%" PRIu64, + hard_limit ? "hard_limit" : "soft_limit", + hard_limit ? hard_limit : soft_limit); + } else if (auto_delete || snap_activate) { + snprintf(option, sizeof(option), "%s=%s", + auto_delete ? "auto-delete" : "snap-activate", + auto_delete ? auto_delete : snap_activate); + } - if ((cmd & GF_CLI_STATUS_DETAIL)) { - ret = cli_get_detail_status (dict, i, &status); - if (ret) - goto out; - cli_print_line (CLI_BRICK_STATUS_LINE_LEN); - cli_print_detailed_status (&status); + volname = NULL; + ret = dict_get_str_sizen(dict, "volname", &volname); - } else { - cli_print_brick_status (&status); - } - } - cli_out (" "); -cont: - ret = rsp.op_ret; + snprintf(msg, sizeof(msg), "config_type=%s;%s", + volname ? "volume_config" : "system_config", option); -out: - if (status.brick) - GF_FREE (status.brick); + gf_event(EVENT_SNAPSHOT_CONFIG_UPDATED, "%s", msg); - cli_cmd_broadcast_response (ret); - return ret; + ret = 0; + break; + + default: + gf_log("cli", GF_LOG_WARNING, + "Cannot generate event for unknown type."); + ret = 0; + goto out; + } + +out: + return ret; } -int32_t -gf_cli3_1_status_volume (call_frame_t *frame, xlator_t *this, - void *data) +/* + * Fetch necessary data from dict at one place instead of * + * repeating the same code again and again. * + */ +static int +gf_cli_snapshot_get_data_from_dict(dict_t *dict, char **snap_name, + char **volname, char **snap_uuid, + int8_t *soft_limit_flag, char **clone_name) { - gf_cli_req req = {{0,}}; - int ret = -1; - dict_t *dict = NULL; + int ret = -1; - if (!frame || !this || !data) - goto out; + GF_VALIDATE_OR_GOTO("cli", dict, out); - dict = data; + if (snap_name) { + ret = dict_get_str_sizen(dict, "snapname", snap_name); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get snapname from dict"); + } + } - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log ("cli", GF_LOG_ERROR, - "failed to serialize the data"); + if (volname) { + ret = dict_get_str_sizen(dict, "volname1", volname); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get volname1 from dict"); + } + } - goto out; + if (snap_uuid) { + ret = dict_get_str_sizen(dict, "snapuuid", snap_uuid); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get snapuuid from dict"); + } + } + + if (soft_limit_flag) { + ret = dict_get_int8(dict, "soft-limit-reach", soft_limit_flag); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, + "failed to get soft-limit-reach from dict"); } - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_STATUS_VOLUME, NULL, - this, gf_cli3_1_status_cbk, - (xdrproc_t)xdr_gf_cli_req); + } - out: - gf_log ("cli", GF_LOG_DEBUG, "Returning: %d", ret); - return ret; + if (clone_name) { + ret = dict_get_str_sizen(dict, "clonename", clone_name); + if (ret) { + gf_log("cli", GF_LOG_DEBUG, "failed to get clonename from dict"); + } + } + + ret = 0; +out: + return ret; } -int -gf_cli_status_volume_all (call_frame_t *frame, xlator_t *this, void *data) +static int +gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - int i = 0; - int ret = -1; - int vol_count = -1; - uint32_t cmd = 0; - char key[1024] = {0}; - char *volname = NULL; - dict_t *vol_dict = NULL; - dict_t *dict = NULL; - cli_local_t *local = NULL; + int ret = -1; + gf_cli_rsp rsp = { + 0, + }; + dict_t *dict = NULL; + char *snap_name = NULL; + char *clone_name = NULL; + int32_t type = 0; + call_frame_t *frame = NULL; + gf_boolean_t snap_driven = _gf_false; + int8_t soft_limit_flag = -1; + char *volname = NULL; + char *snap_uuid = NULL; + + GF_ASSERT(myframe); + + if (req->rpc_status == -1) { + goto out; + } + + frame = myframe; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL); + goto out; + } + + dict = dict_new(); + + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + + if (ret) + goto out; + + ret = dict_get_int32_sizen(dict, "type", &type); + if (ret) { + gf_log(frame->this->name, GF_LOG_ERROR, "failed to get type"); + goto out; + } + + ret = gf_cli_snapshot_get_data_from_dict( + dict, &snap_name, &volname, &snap_uuid, &soft_limit_flag, &clone_name); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to fetch data from dict."); + goto out; + } + +#if (USE_EVENTS) + ret = gf_cli_generate_snapshot_event(&rsp, dict, type, snap_name, volname, + snap_uuid, clone_name); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to generate snapshot event"); + goto out; + } +#endif - dict = (dict_t *)data; - ret = dict_get_uint32 (dict, "cmd", &cmd); - if (ret) + /* Snapshot status and delete command is handled separately */ + if (global_state->mode & GLUSTER_MODE_XML && + GF_SNAP_OPTION_TYPE_STATUS != type && + GF_SNAP_OPTION_TYPE_DELETE != type) { + ret = cli_xml_output_snapshot(type, dict, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + } + + goto out; + } + + switch (type) { + case GF_SNAP_OPTION_TYPE_CREATE: + if (rsp.op_ret) { + cli_err("snapshot create: failed: %s", + rsp.op_errstr ? rsp.op_errstr + : "Please check log file for details"); + ret = rsp.op_ret; goto out; + } - local = cli_local_get (); - if (!local) { + if (!snap_name) { ret = -1; - gf_log ("cli", GF_LOG_ERROR, "Failed to allocate local"); + gf_log("cli", GF_LOG_ERROR, "Failed to get snap name"); goto out; - } - frame->local = local; - local->all = _gf_true; + } - ret = gf_cli3_1_status_volume (frame, this, data); - if (ret) + if (!volname) { + ret = -1; + gf_log("cli", GF_LOG_ERROR, "Failed to get volume name"); + goto out; + } + + cli_out("snapshot create: success: Snap %s created successfully", + snap_name); + + if (soft_limit_flag == 1) { + cli_out( + "Warning: Soft-limit of volume (%s) is " + "reached. Snapshot creation is not possible " + "once hard-limit is reached.", + volname); + } + ret = 0; + break; + + case GF_SNAP_OPTION_TYPE_CLONE: + if (rsp.op_ret) { + cli_err("snapshot clone: failed: %s", + rsp.op_errstr ? rsp.op_errstr + : "Please check log file for details"); + ret = rsp.op_ret; goto out; + } - vol_dict = local->dict; + if (!clone_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get clone name"); + goto out; + } - ret = dict_get_int32 (vol_dict, "vol_count", &vol_count); - if (ret) { - cli_err ("Failed to get names of volumes"); + if (!snap_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snapname name"); goto out; - } + } - if (vol_count == 0) { - cli_out ("No volumes present"); - ret = 0; + cli_out("snapshot clone: success: Clone %s created successfully", + clone_name); + + ret = 0; + break; + + case GF_SNAP_OPTION_TYPE_RESTORE: + if (rsp.op_ret) { + cli_err("snapshot restore: failed: %s", + rsp.op_errstr ? rsp.op_errstr + : "Please check log file for details"); + ret = rsp.op_ret; goto out; - } + } - /* remove the "all" flag in cmd */ - cmd &= ~GF_CLI_STATUS_ALL; - cmd |= GF_CLI_STATUS_VOL; + if (!snap_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snap name"); + goto out; + } - for (i = 0; i < vol_count; i++) { + cli_out("Snapshot restore: %s: Snap restored successfully", + snap_name); - dict = dict_new (); - if (!dict) - goto out; + ret = 0; + break; + case GF_SNAP_OPTION_TYPE_ACTIVATE: + if (rsp.op_ret) { + cli_err("snapshot activate: failed: %s", + rsp.op_errstr ? rsp.op_errstr + : "Please check log file for details"); + ret = rsp.op_ret; + goto out; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "vol%d", i); - ret = dict_get_str (vol_dict, key, &volname); - if (ret) - goto out; + if (!snap_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snap name"); + goto out; + } - ret = dict_set_dynstr (dict, "volname", volname); - if (ret) - goto out; + cli_out("Snapshot activate: %s: Snap activated successfully", + snap_name); - ret = dict_set_uint32 (dict, "cmd", cmd); - if (ret) - goto out; + ret = 0; + break; - ret = gf_cli3_1_status_volume (frame, this, dict); - if (ret) - goto out; + case GF_SNAP_OPTION_TYPE_DEACTIVATE: + if (rsp.op_ret) { + cli_err("snapshot deactivate: failed: %s", + rsp.op_errstr ? rsp.op_errstr + : "Please check log file for details"); + ret = rsp.op_ret; + goto out; + } - dict_unref (dict); - } + if (!snap_name) { + gf_log("cli", GF_LOG_ERROR, "Failed to get snap name"); + goto out; + } - out: - if (ret) - gf_log ("cli", GF_LOG_ERROR, "status all failed"); - if (ret && dict) - dict_unref (dict); - if (frame) - frame->local = NULL; - return ret; -} + cli_out("Snapshot deactivate: %s: Snap deactivated successfully", + snap_name); -static int -gf_cli3_1_mount_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf1_cli_mount_rsp rsp = {0,}; - int ret = -1; + ret = 0; + break; + case GF_SNAP_OPTION_TYPE_INFO: + if (rsp.op_ret) { + cli_err("Snapshot info : failed: %s", + rsp.op_errstr ? rsp.op_errstr + : "Please check log file for details"); + ret = rsp.op_ret; + goto out; + } - if (-1 == req->rpc_status) { + snap_driven = dict_get_str_boolean(dict, "snap-driven", _gf_false); + if (snap_driven == _gf_true) { + ret = cli_call_snapshot_info(dict, snap_driven); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Snapshot info failed"); + goto out; + } + } else if (snap_driven == _gf_false) { + ret = cli_get_snaps_in_volume(dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Snapshot info failed"); + goto out; + } + } + break; + + case GF_SNAP_OPTION_TYPE_CONFIG: + ret = cli_snapshot_config_display(dict, &rsp); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to display snapshot config output."); + goto out; + } + break; + + case GF_SNAP_OPTION_TYPE_LIST: + if (rsp.op_ret) { + cli_err("Snapshot list : failed: %s", + rsp.op_errstr ? rsp.op_errstr + : "Please check log file for details"); + ret = rsp.op_ret; goto out; - } + } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_mount_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); + ret = cli_snapshot_list(dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to display snapshot list"); goto out; - } + } + break; - gf_log ("cli", GF_LOG_INFO, "Received resp to mount"); + case GF_SNAP_OPTION_TYPE_DELETE: + ret = cli_snapshot_remove_reply(&rsp, dict, frame); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to delete snap"); + goto out; + } + break; - if (rsp.op_ret == 0) { - ret = 0; - cli_out ("%s", rsp.path); - } else { - /* weird sounding but easy to parse... */ - cli_err ("%d : failed with this errno (%s)", - rsp.op_errno, strerror (rsp.op_errno)); - ret = 1; - } + case GF_SNAP_OPTION_TYPE_STATUS: + ret = cli_snapshot_status(dict, &rsp, frame); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to display snapshot status output."); + goto out; + } + break; + default: + cli_err("Unknown command executed"); + ret = -1; + goto out; + } out: - cli_cmd_broadcast_response (ret); - return ret; + if (dict) + dict_unref(dict); + cli_cmd_broadcast_response(ret); + + free(rsp.dict.dict_val); + free(rsp.op_errstr); + + return ret; } int32_t -gf_cli3_1_mount (call_frame_t *frame, xlator_t *this, void *data) +gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data) { - gf1_cli_mount_req req = {0,}; - int ret = -1; - void **dataa = data; - char *label = NULL; - dict_t *dict = NULL; + gf_cli_req req = {{ + 0, + }}; + int32_t ret = -1; + int32_t cmd = -1; + cli_local_t *local = NULL; + dict_t *snap_dict = NULL; + int32_t snapcount = 0; + int i = 0; + char question[PATH_MAX] = ""; + char *volname = NULL; + gf_answer_t answer = GF_ANSWER_NO; + + GF_VALIDATE_OR_GOTO("cli", frame, out); + GF_VALIDATE_OR_GOTO("cli", frame->local, out); + GF_VALIDATE_OR_GOTO("cli", this, out); + GF_VALIDATE_OR_GOTO("cli", data, out); + + local = frame->local; + + ret = dict_get_int32_sizen(local->dict, "sub-cmd", &cmd); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get sub-cmd"); + goto out; + } + + /* No need multiple RPCs for individual snapshot delete*/ + if (cmd == GF_SNAP_DELETE_TYPE_SNAP) { + ret = 0; + goto out; + } + + ret = dict_get_int32_sizen(local->dict, "snapcount", &snapcount); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not get snapcount"); + goto out; + } + + if (global_state->mode & GLUSTER_MODE_XML) { +#ifdef HAVE_LIB_XML + ret = xmlTextWriterWriteFormatElement( + local->writer, (xmlChar *)"snapCount", "%d", snapcount); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to write xml element \"snapCount\""); + goto out; + } +#endif /* HAVE_LIB_XML */ + } else if (snapcount == 0) { + cli_out("No snapshots present"); + goto out; + } + + if (cmd == GF_SNAP_DELETE_TYPE_ALL) { + snprintf(question, sizeof(question), + "System contains %d snapshot(s).\nDo you still " + "want to continue and delete them? ", + snapcount); + } else { + ret = dict_get_str_sizen(local->dict, "volname", &volname); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to fetch volname from local dictionary"); + goto out; + } - if (!frame || !this || !data) - goto out; + snprintf(question, sizeof(question), + "Volume (%s) contains %d snapshot(s).\nDo you still want to " + "continue and delete them? ", + volname, snapcount); + } - label = dataa[0]; - dict = dataa[1]; + answer = cli_cmd_get_confirmation(global_state, question); + if (GF_ANSWER_NO == answer) { + ret = 0; + gf_log("cli", GF_LOG_DEBUG, + "User cancelled snapshot delete operation for snap delete"); + goto out; + } + + for (i = 1; i <= snapcount; i++) { + ret = -1; - req.label = label; - ret = dict_allocate_and_serialize (dict, &req.dict.dict_val, - (size_t *)&req.dict.dict_len); + snap_dict = dict_new(); + if (!snap_dict) + goto out; + + ret = cli_populate_req_dict_for_delete(snap_dict, local->dict, i); if (ret) { - ret = -1; - goto out; + gf_log("cli", GF_LOG_ERROR, + "Could not populate snap request dictionary"); + goto out; } - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_MOUNT, NULL, - this, gf_cli3_1_mount_cbk, - (xdrproc_t)xdr_gf1_cli_mount_req); + ret = cli_to_glusterd(&req, frame, gf_cli_snapshot_cbk, + (xdrproc_t)xdr_gf_cli_req, snap_dict, + GLUSTER_CLI_SNAP, this, cli_rpc_prog, NULL); + if (ret) { + /* Fail the operation if deleting one of the + * snapshots is failed + */ + gf_log("cli", GF_LOG_ERROR, + "cli_to_glusterd for snapshot delete failed"); + goto out; + } + dict_unref(snap_dict); + snap_dict = NULL; + } out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; + if (snap_dict) + dict_unref(snap_dict); + GF_FREE(req.dict.dict_val); + + return ret; } -static int -gf_cli3_1_umount_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int32_t +gf_cli_snapshot_for_status(call_frame_t *frame, xlator_t *this, void *data) { - gf1_cli_umount_rsp rsp = {0,}; - int ret = -1; + gf_cli_req req = {{ + 0, + }}; + int ret = -1; + int32_t cmd = -1; + cli_local_t *local = NULL; + dict_t *snap_dict = NULL; + int snapcount = 0; + int i = 0; + + GF_VALIDATE_OR_GOTO("cli", frame, out); + GF_VALIDATE_OR_GOTO("cli", frame->local, out); + GF_VALIDATE_OR_GOTO("cli", this, out); + GF_VALIDATE_OR_GOTO("cli", data, out); + + local = frame->local; + + ret = dict_get_int32_sizen(local->dict, "sub-cmd", &cmd); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get sub-cmd"); + goto out; + } + + /* Snapshot status of single snap (i.e. GF_SNAP_STATUS_TYPE_SNAP) + * is already handled. Therefore we can return from here. + * If want to get status of all snaps in the system or volume then + * we should get them one by one.*/ + if ((cmd == GF_SNAP_STATUS_TYPE_SNAP) || + (cmd == GF_SNAP_STATUS_TYPE_ITER)) { + ret = 0; + goto out; + } - if (-1 == req->rpc_status) { - goto out; - } + ret = dict_get_int32_sizen(local->dict, "status.snapcount", &snapcount); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Could not get snapcount"); + goto out; + } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_umount_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; - } + if (snapcount == 0 && !(global_state->mode & GLUSTER_MODE_XML)) { + cli_out("No snapshots present"); + } - gf_log ("cli", GF_LOG_INFO, "Received resp to mount"); + for (i = 0; i < snapcount; i++) { + ret = -1; - if (rsp.op_ret == 0) - ret = 0; - else { - cli_err ("umount failed"); - ret = 1; + snap_dict = dict_new(); + if (!snap_dict) + goto out; + + ret = cli_populate_req_dict_for_status(snap_dict, local->dict, i); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Could not populate snap request dictionary"); + goto out; + } + + ret = cli_to_glusterd(&req, frame, gf_cli_snapshot_cbk, + (xdrproc_t)xdr_gf_cli_req, snap_dict, + GLUSTER_CLI_SNAP, this, cli_rpc_prog, NULL); + + /* Ignore the return value and error for snapshot + * status of type "ALL" or "VOL" + * + * Scenario : There might be case where status command + * and delete command might be issued at the same time. + * In that case when status tried to fetch detail of + * snap which has been deleted by concurrent command, + * then it will show snapshot not present. Which will + * not be appropriate. + */ + if (ret && (cmd != GF_SNAP_STATUS_TYPE_ALL && + cmd != GF_SNAP_STATUS_TYPE_VOL)) { + gf_log("cli", GF_LOG_ERROR, + "cli_to_glusterd for snapshot status failed"); + goto out; } + dict_unref(snap_dict); + snap_dict = NULL; + } + ret = 0; out: - cli_cmd_broadcast_response (ret); - return ret; + if (snap_dict) + dict_unref(snap_dict); + GF_FREE(req.dict.dict_val); + + return ret; } -int32_t -gf_cli3_1_umount (call_frame_t *frame, xlator_t *this, void *data) +static int32_t +gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data) { - gf1_cli_umount_req req = {0,}; - int ret = -1; - dict_t *dict = NULL; + gf_cli_req req = {{ + 0, + }}; + dict_t *options = NULL; + int ret = -1; + int tmp_ret = -1; + cli_local_t *local = NULL; + char *err_str = NULL; + int type = -1; - if (!frame || !this || !data) - goto out; + if (!frame || !frame->local || !this || !data) + goto out; - dict = data; + local = frame->local; - ret = dict_get_str (dict, "path", &req.path); - if (ret == 0) - ret = dict_get_int32 (dict, "lazy", &req.lazy); + options = data; + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_snapshot_begin_composite_op(local); if (ret) { - ret = -1; - goto out; + gf_log("cli", GF_LOG_ERROR, + "Failed to begin snapshot xml composite op"); + goto out; } + } - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_UMOUNT, NULL, - this, gf_cli3_1_umount_cbk, - (xdrproc_t)xdr_gf1_cli_umount_req); + ret = cli_to_glusterd(&req, frame, gf_cli_snapshot_cbk, + (xdrproc_t)xdr_gf_cli_req, options, GLUSTER_CLI_SNAP, + this, cli_rpc_prog, NULL); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "cli_to_glusterd for snapshot failed"); + goto xmlend; + } - out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; -} + ret = dict_get_int32_sizen(local->dict, "type", &type); -void -cmd_heal_volume_brick_out (dict_t *dict, int brick) -{ - uint64_t num_entries = 0; - int ret = 0; - char key[256] = {0}; - char *hostname = NULL; - char *path = NULL; - char *status = NULL; - uint64_t i = 0; - uint32_t time = 0; - char timestr[256] = {0}; - struct tm tm = {0}; - time_t ltime = 0; - - snprintf (key, sizeof (key), "%d-hostname", brick); - ret = dict_get_str (dict, key, &hostname); - if (ret) - goto out; - snprintf (key, sizeof (key), "%d-path", brick); - ret = dict_get_str (dict, key, &path); - if (ret) - goto out; - cli_out ("\nBrick %s:%s", hostname, path); - snprintf (key, sizeof (key), "%d-count", brick); - ret = dict_get_uint64 (dict, key, &num_entries); - cli_out ("Number of entries: %"PRIu64, num_entries); - snprintf (key, sizeof (key), "%d-status", brick); - ret = dict_get_str (dict, key, &status); - if (status && strlen (status)) - cli_out ("Status: %s", status); - for (i = 0; i < num_entries; i++) { - snprintf (key, sizeof (key), "%d-%"PRIu64, brick, i); - ret = dict_get_str (dict, key, &path); - if (ret) - continue; - time = 0; - snprintf (key, sizeof (key), "%d-%"PRIu64"-time", brick, i); - ret = dict_get_uint32 (dict, key, &time); - if (!time) { - cli_out ("%s", path); - } else { - ltime = time; - memset (&tm, 0, sizeof (tm)); - if (!localtime_r (<ime, &tm)) { - snprintf (timestr, sizeof (timestr), - "Invalid time"); - } else { - strftime (timestr, sizeof (timestr), - "%Y-%m-%d %H:%M:%S", &tm); - } - if (i ==0) { - cli_out ("at path on brick"); - cli_out ("-----------------------------------"); - } - cli_out ("%s %s", timestr, path); - } + if (GF_SNAP_OPTION_TYPE_STATUS == type) { + ret = gf_cli_snapshot_for_status(frame, this, data); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "cli to glusterd for snapshot status command failed"); } -out: - return; -} -int -gf_cli3_1_heal_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - gf_cli_rsp rsp = {0,}; - int ret = -1; - cli_local_t *local = NULL; - char *volname = NULL; - call_frame_t *frame = NULL; - dict_t *input_dict = NULL; - dict_t *dict = NULL; - int brick_count = 0; - int i = 0; - gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID; - - if (-1 == req->rpc_status) { - goto out; - } + goto xmlend; + } - ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log ("", GF_LOG_ERROR, "error"); - goto out; + if (GF_SNAP_OPTION_TYPE_DELETE == type) { + ret = gf_cli_snapshot_for_delete(frame, this, data); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "cli to glusterd for snapshot delete command failed"); } - frame = myframe; + goto xmlend; + } - if (frame) { - local = frame->local; - frame->local = NULL; - } + ret = 0; - if (local) { - input_dict = local->dict; - ret = dict_get_int32 (input_dict, "heal-op", - (int32_t*)&heal_op); - } -//TODO: Proper XML output -//#if (HAVE_LIB_XML) -// if (global_state->mode & GLUSTER_MODE_XML) { -// ret = cli_xml_output_dict ("volHeal", dict, rsp.op_ret, -// rsp.op_errno, rsp.op_errstr); -// if (ret) -// gf_log ("cli", GF_LOG_ERROR, -// "Error outputting to xml"); -// goto out; -// } -//#endif - - ret = dict_get_str (input_dict, "volname", &volname); +xmlend: + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_snapshot_end_composite_op(local); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to get volname"); - goto out; + gf_log("cli", GF_LOG_ERROR, + "Failed to end snapshot xml composite op"); + goto out; } + } +out: + if (ret && local && GF_SNAP_OPTION_TYPE_STATUS == type) { + tmp_ret = dict_get_str_sizen(local->dict, "op_err_str", &err_str); + if (tmp_ret || !err_str) { + cli_err("Snapshot Status : failed: %s", + "Please check log file for details"); + } else { + cli_err("Snapshot Status : failed: %s", err_str); + dict_del_sizen(local->dict, "op_err_str"); + } + } - gf_log ("cli", GF_LOG_INFO, "Received resp to heal volume"); - - if (rsp.op_ret && strcmp (rsp.op_errstr, "")) - cli_err ("%s", rsp.op_errstr); - else - cli_out ("Heal operation on volume %s has been %s", volname, - (rsp.op_ret) ? "unsuccessful": "successful"); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - ret = rsp.op_ret; - if ((heal_op == GF_AFR_OP_HEAL_FULL) || - (heal_op == GF_AFR_OP_HEAL_INDEX)) - goto out; + GF_FREE(req.dict.dict_val); - dict = dict_new (); - if (!dict) { - ret = -1; - goto out; - } - - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &dict); + if (global_state->mode & GLUSTER_MODE_XML) { + /* XML mode handles its own error */ + ret = 0; + } + return ret; +} - if (ret) { - gf_log ("", GF_LOG_ERROR, - "Unable to allocate memory"); - goto out; +static int32_t +gf_cli_barrier_volume_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) + goto out; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + gf_log("cli", GF_LOG_DEBUG, "Received response to barrier"); + + if (rsp.op_ret) { + if (rsp.op_errstr && (strlen(rsp.op_errstr) > 1)) { + cli_err("volume barrier: command unsuccessful : %s", rsp.op_errstr); } else { - dict->extra_stdfree = rsp.dict.dict_val; - } - ret = dict_get_int32 (dict, "count", &brick_count); - if (ret) - goto out; - - if (!brick_count) { - cli_out ("All bricks of volume %s are down.", volname); - goto out; + cli_err("volume barrier: command unsuccessful"); } - - for (i = 0; i < brick_count; i++) - cmd_heal_volume_brick_out (dict, i); - ret = rsp.op_ret; + } else { + cli_out("volume barrier: command successful"); + } + ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); - if (local) - cli_local_wipe (local); - if (rsp.op_errstr) - free (rsp.op_errstr); - if (dict) - dict_unref (dict); - return ret; + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; } -int32_t -gf_cli3_1_heal_volume (call_frame_t *frame, xlator_t *this, - void *data) +static int +gf_cli_barrier_volume(call_frame_t *frame, xlator_t *this, void *data) { - gf_cli_req req = {{0,}}; - int ret = 0; - cli_local_t *local = NULL; - dict_t *dict = NULL; + gf_cli_req req = {{ + 0, + }}; + dict_t *options = NULL; + int ret = -1; - if (!frame || !this || !data) { - ret = -1; - goto out; - } + options = data; - dict = data; - local = cli_local_get (); + ret = cli_to_glusterd(&req, frame, gf_cli_barrier_volume_cbk, + (xdrproc_t)xdr_gf_cli_req, options, + GLUSTER_CLI_BARRIER_VOLUME, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - if (local) { - local->dict = dict_ref (dict); - frame->local = local; - } + GF_FREE(req.dict.dict_val); + return ret; +} - ret = dict_allocate_and_serialize (dict, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to serialize the data"); +static int32_t +gf_cli_get_vol_opt_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + gf_cli_rsp rsp = { + 0, + }; + int ret = -1; + dict_t *dict = NULL; + char *key = NULL; + char *value = NULL; + char msg[1024] = { + 0, + }; + int i = 0; + char dict_key[50] = { + 0, + }; + + GF_ASSERT(myframe); + + if (-1 == req->rpc_status) + goto out; + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + gf_log("cli", GF_LOG_DEBUG, "Received response to get volume option"); + + if (rsp.op_ret) { + if (strcmp(rsp.op_errstr, "")) + snprintf(msg, sizeof(msg), "volume get option: failed: %s", + rsp.op_errstr); + else + snprintf(msg, sizeof(msg), "volume get option: failed"); - goto out; + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_str("volGetopts", msg, rsp.op_ret, + rsp.op_errno, rsp.op_errstr); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + } + } else { + cli_err("%s", msg); } - - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_HEAL_VOLUME, NULL, - this, gf_cli3_1_heal_volume_cbk, - (xdrproc_t) xdr_gf_cli_req); + ret = rsp.op_ret; + goto out_nolog; + } + dict = dict_new(); + + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } + + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_getopts(dict, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) { + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + ret = 0; + } + goto out; + } + + ret = dict_get_str_sizen(dict, "warning", &value); + if (!ret) { + cli_out("%s", value); + } + + ret = dict_get_int32_sizen(dict, "count", &count); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to retrieve count from the dictionary"); + goto out; + } + + if (count <= 0) { + gf_log("cli", GF_LOG_ERROR, "Value of count :%d is invalid", count); + ret = -1; + goto out; + } + + cli_out("%-40s%-40s", "Option", "Value"); + cli_out("%-40s%-40s", "------", "-----"); + for (i = 1; i <= count; i++) { + ret = snprintf(dict_key, sizeof dict_key, "key%d", i); + ret = dict_get_strn(dict, dict_key, ret, &key); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to retrieve %s from the dictionary", dict_key); + goto out; + } + ret = snprintf(dict_key, sizeof dict_key, "value%d", i); + ret = dict_get_strn(dict, dict_key, ret, &value); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "Failed to retrieve key value for %s from the dictionary", + dict_key); + goto out; + } + cli_out("%-40s%-40s", key, value); + } out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + if (ret) { + cli_out( + "volume get option failed. Check the cli/glusterd log " + "file for more details"); + } + +out_nolog: + if (dict) + dict_unref(dict); + cli_cmd_broadcast_response(ret); + gf_free_xdr_cli_rsp(rsp); + return ret; +} - if (req.dict.dict_val) - GF_FREE (req.dict.dict_val); +static int +gf_cli_get_vol_opt(call_frame_t *frame, xlator_t *this, void *data) +{ + gf_cli_req req = {{ + 0, + }}; + dict_t *options = NULL; + int ret = -1; + + options = data; - return ret; + ret = cli_to_glusterd(&req, frame, gf_cli_get_vol_opt_cbk, + (xdrproc_t)xdr_gf_cli_req, options, + GLUSTER_CLI_GET_VOL_OPT, this, cli_rpc_prog, NULL); + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); + + GF_FREE(req.dict.dict_val); + return ret; } -int32_t -gf_cli3_1_statedump_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int +add_cli_cmd_timeout_to_dict(dict_t *dict) { - gf_cli_rsp rsp = {0,}; - int ret = -1; - char msg[1024] = {0,}; - - if (-1 == req->rpc_status) - goto out; - ret = xdr_to_generic (*iov, &rsp, - (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log (THIS->name, GF_LOG_ERROR, "XDR decoding failed"); - goto out; - } - gf_log ("cli", GF_LOG_DEBUG, "Received response to statedump"); - if (rsp.op_ret) - snprintf (msg, sizeof(msg), "%s", rsp.op_errstr); - else - snprintf (msg, sizeof (msg), "Volume statedump successful"); + int ret = 0; -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_str ("volStatedump", msg, rsp.op_ret, - rsp.op_errno, rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; + if (cli_default_conn_timeout > 120) { + ret = dict_set_uint32(dict, "timeout", cli_default_conn_timeout); + if (ret) { + gf_log("cli", GF_LOG_INFO, "Failed to save timeout to dict"); } -#endif - - if (rsp.op_ret) - cli_err ("%s", msg); - else - cli_out ("%s", msg); - ret = rsp.op_ret; + } + return ret; +} +static int +cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn, + xdrproc_t xdrproc, dict_t *dict, int procnum, xlator_t *this, + rpc_clnt_prog_t *prog, struct iobref *iobref) +{ + int ret = 0; + size_t len = 0; + char *cmd = NULL; + int i = 0; + const char **words = NULL; + cli_local_t *local = NULL; + + if (!this || !frame || !frame->local || !dict) { + ret = -1; + goto out; + } + + local = frame->local; + + if (!local->words) { + ret = -1; + goto out; + } + + words = local->words; + + while (words[i]) + len += strlen(words[i++]) + 1; + + cmd = GF_MALLOC(len + 1, gf_common_mt_char); + if (!cmd) { + ret = -1; + goto out; + } + cmd[0] = '\0'; + + for (i = 0; words[i]; i++) { + strncat(cmd, words[i], len - 1); + if (words[i + 1] != NULL) + strncat(cmd, " ", len - 1); + } + + ret = dict_set_dynstr_sizen(dict, "cmd-str", cmd); + if (ret) + goto out; + + ret = add_cli_cmd_timeout_to_dict(dict); + + ret = dict_allocate_and_serialize(dict, &(req->dict).dict_val, + &(req->dict).dict_len); + + if (ret < 0) { + gf_log(this->name, GF_LOG_DEBUG, + "failed to get serialized length of dict"); + goto out; + } + + ret = cli_cmd_submit(NULL, req, frame, prog, procnum, iobref, this, cbkfn, + (xdrproc_t)xdrproc); out: - cli_cmd_broadcast_response (ret); - return ret; + return ret; } -int32_t -gf_cli3_1_statedump_volume (call_frame_t *frame, xlator_t *this, - void *data) +static int +gf_cli_print_bitrot_scrub_status(dict_t *dict) { - gf_cli_req req = {{0,}}; - dict_t *options = NULL; - int ret = -1; + int i = 1; + int j = 0; + int ret = -1; + int count = 0; + char key[64] = { + 0, + }; + char *volname = NULL; + char *node_name = NULL; + char *scrub_freq = NULL; + char *state_scrub = NULL; + char *scrub_impact = NULL; + char *bad_file_str = NULL; + char *scrub_log_file = NULL; + char *bitrot_log_file = NULL; + uint64_t scrub_files = 0; + uint64_t unsigned_files = 0; + uint64_t scrub_time = 0; + uint64_t days = 0; + uint64_t hours = 0; + uint64_t minutes = 0; + uint64_t seconds = 0; + char *last_scrub = NULL; + uint64_t error_count = 0; + int8_t scrub_running = 0; + char *scrub_state_op = NULL; + + ret = dict_get_int32_sizen(dict, "count", &count); + if (ret) { + gf_log("cli", GF_LOG_ERROR, + "failed to get count value from dictionary"); + goto out; + } + + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get volume name"); + + ret = dict_get_str_sizen(dict, "features.scrub", &state_scrub); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get scrub state value"); + + ret = dict_get_str_sizen(dict, "features.scrub-throttle", &scrub_impact); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get scrub impact value"); + + ret = dict_get_str_sizen(dict, "features.scrub-freq", &scrub_freq); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get scrub -freq value"); + + ret = dict_get_str_sizen(dict, "bitrot_log_file", &bitrot_log_file); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get bitrot log file location"); + + ret = dict_get_str_sizen(dict, "scrub_log_file", &scrub_log_file); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get scrubber log file location"); + + for (i = 1; i <= count; i++) { + snprintf(key, sizeof(key), "scrub-running-%d", i); + ret = dict_get_int8(dict, key, &scrub_running); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get scrubbed files"); + if (scrub_running) + break; + } - if (!frame || !this || !data) - goto out; + if (scrub_running) + gf_asprintf(&scrub_state_op, "%s (In Progress)", state_scrub); + else + gf_asprintf(&scrub_state_op, "%s (Idle)", state_scrub); - options = data; + cli_out("\n%s: %s\n", "Volume name ", volname); - ret = dict_allocate_and_serialize (options, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log (this->name, GF_LOG_ERROR, - "failed to serialize the data"); + cli_out("%s: %s\n", "State of scrub", scrub_state_op); - goto out; - } + cli_out("%s: %s\n", "Scrub impact", scrub_impact); - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_STATEDUMP_VOLUME, NULL, - this, gf_cli3_1_statedump_volume_cbk, - (xdrproc_t)xdr_gf_cli_req); + cli_out("%s: %s\n", "Scrub frequency", scrub_freq); -out: - if (options) - dict_destroy (options); - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); + cli_out("%s: %s\n", "Bitrot error log location", bitrot_log_file); - if (req.dict.dict_val) - GF_FREE (req.dict.dict_val); - return ret; -} + cli_out("%s: %s\n", "Scrubber error log location", scrub_log_file); -int32_t -gf_cli3_1_list_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) -{ - int ret = -1; - gf_cli_rsp rsp = {0,}; - dict_t *dict = NULL; - int vol_count = 0;; - char *volname = NULL; - char key[1024] = {0,}; - int i = 0; + for (i = 1; i <= count; i++) { + /* Reset the variables to prevent carryover of values */ + node_name = NULL; + last_scrub = NULL; + scrub_time = 0; + error_count = 0; + scrub_files = 0; + unsigned_files = 0; - if (-1 == req->rpc_status) - goto out; - ret = xdr_to_generic (*iov, &rsp, - (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { - gf_log (THIS->name, GF_LOG_ERROR, "XDR decoding failed"); - goto out; - } + snprintf(key, sizeof(key), "node-name-%d", i); + ret = dict_get_str(dict, key, &node_name); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get node-name"); - dict = dict_new (); - if (!dict) { - ret = -1; - goto out; - } + snprintf(key, sizeof(key), "scrubbed-files-%d", i); + ret = dict_get_uint64(dict, key, &scrub_files); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get scrubbed files"); - ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, "Unable to allocate memory"); - goto out; - } + snprintf(key, sizeof(key), "unsigned-files-%d", i); + ret = dict_get_uint64(dict, key, &unsigned_files); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get unsigned files"); -#if (HAVE_LIB_XML) - if (global_state->mode & GLUSTER_MODE_XML) { - ret = cli_xml_output_vol_list (dict, rsp.op_ret, rsp.op_errno, - rsp.op_errstr); - if (ret) - gf_log ("cli", GF_LOG_ERROR, - "Error outputting to xml"); - goto out; - } -#endif - if (rsp.op_ret) - cli_err ("%s", rsp.op_errstr); - else { - ret = dict_get_int32 (dict, "count", &vol_count); - if (ret) - goto out; + snprintf(key, sizeof(key), "scrub-duration-%d", i); + ret = dict_get_uint64(dict, key, &scrub_time); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get last scrub duration"); - if (vol_count == 0) { - cli_out ("No volumes present in cluster"); - goto out; - } - for (i = 0; i < vol_count; i++) { - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "volume%d", i); - ret = dict_get_str (dict, key, &volname); - if (ret) - goto out; - cli_out ("%s", volname); - } - } + snprintf(key, sizeof(key), "last-scrub-time-%d", i); + ret = dict_get_str(dict, key, &last_scrub); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get last scrub time"); + snprintf(key, sizeof(key), "error-count-%d", i); + ret = dict_get_uint64(dict, key, &error_count); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get error count"); - ret = rsp.op_ret; + cli_out("\n%s\n", + "========================================================="); -out: - cli_cmd_broadcast_response (ret); - return ret; -} + cli_out("%s: %s\n", "Node", node_name); -int32_t -gf_cli3_1_list_volume (call_frame_t *frame, xlator_t *this, void *data) -{ - int ret = -1; - gf_cli_req req = {{0,}}; + cli_out("%s: %" PRIu64 "\n", "Number of Scrubbed files", scrub_files); - if (!frame || !this) - goto out; + cli_out("%s: %" PRIu64 "\n", "Number of Skipped files", unsigned_files); - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_LIST_VOLUME, NULL, - this, gf_cli3_1_list_volume_cbk, - (xdrproc_t)xdr_gf_cli_req); + if ((!last_scrub) || !strcmp(last_scrub, "")) + cli_out("%s: %s\n", "Last completed scrub time", + "Scrubber pending to complete."); + else + cli_out("%s: %s\n", "Last completed scrub time", last_scrub); + + /* Printing last scrub duration time in human readable form*/ + seconds = scrub_time % 60; + minutes = (scrub_time / 60) % 60; + hours = (scrub_time / 3600) % 24; + days = scrub_time / 86400; + cli_out("%s: %" PRIu64 ":%" PRIu64 ":%" PRIu64 ":%" PRIu64 "\n", + "Duration of last scrub (D:M:H:M:S)", days, hours, minutes, + seconds); + + cli_out("%s: %" PRIu64 "\n", "Error count", error_count); + + if (error_count) { + cli_out("%s:\n", "Corrupted object's [GFID]"); + /* Printing list of bad file's (Corrupted object's)*/ + for (j = 0; j < error_count; j++) { + snprintf(key, sizeof(key), "quarantine-%d-%d", j, i); + ret = dict_get_str(dict, key, &bad_file_str); + if (!ret) { + cli_out("%s\n", bad_file_str); + } + } + } + } + cli_out("%s\n", + "========================================================="); out: - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - return ret; + GF_FREE(scrub_state_op); + return 0; } -int32_t -gf_cli3_1_clearlocks_volume_cbk (struct rpc_req *req, struct iovec *iov, - int count, void *myframe) +static int +gf_cli_bitrot_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) { - gf_cli_rsp rsp = {0,}; - int ret = -1; - char *lk_summary = NULL; - char *volname = NULL; - dict_t *dict = NULL; - - if (-1 == req->rpc_status) - goto out; - ret = xdr_to_generic (*iov, &rsp, - (xdrproc_t)xdr_gf_cli_rsp); - if (ret < 0) { + int ret = -1; + int type = 0; + gf_cli_rsp rsp = { + 0, + }; + dict_t *dict = NULL; + char *scrub_cmd = NULL; + char *volname = NULL; + char *cmd_str = NULL; + char *cmd_op = NULL; + + GF_ASSERT(myframe); + + if (req->rpc_status == -1) { + goto out; + } + + ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp); + if (ret < 0) { + gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR, + XDR_DECODE_FAIL); + goto out; + } + + if (rsp.op_ret) { + ret = -1; + if (global_state->mode & GLUSTER_MODE_XML) + goto xml_output; + + if (strcmp(rsp.op_errstr, "")) + cli_err("Bitrot command failed : %s", rsp.op_errstr); + else + cli_err("Bitrot command : failed"); - gf_log ("cli", GF_LOG_ERROR, "XDR decoding failed"); - goto out; - } - gf_log ("cli", GF_LOG_DEBUG, "Received response to clear-locks"); + goto out; + } - if (rsp.op_ret) { - cli_err ("Volume clear-locks unsuccessful"); - cli_err ("%s", rsp.op_errstr); + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new(); - } else { - if (!rsp.dict.dict_len) { - cli_out ("Possibly no locks cleared"); - ret = 0; - goto out; - } + if (!dict) { + ret = -1; + goto out; + } - dict = dict_new (); + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict); - if (!dict) { - ret = -1; - goto out; - } + if (ret) { + gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL); + goto out; + } + } - ret = dict_unserialize (rsp.dict.dict_val, - rsp.dict.dict_len, - &dict); + gf_log("cli", GF_LOG_DEBUG, "Received resp to bit rot command"); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, - "Unable to serialize response dictionary"); - goto out; - } + ret = dict_get_int32_sizen(dict, "type", &type); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to get command type"); + goto out; + } - ret = dict_get_str (dict, "volname", &volname); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, "Unable to get volname " - "from dictionary"); - goto out; - } + if ((type == GF_BITROT_CMD_SCRUB_STATUS) && + !(global_state->mode & GLUSTER_MODE_XML)) { + ret = gf_cli_print_bitrot_scrub_status(dict); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Failed to print bitrot scrub status"); + } + goto out; + } + + /* Ignoring the error, as using dict val for cli output only */ + ret = dict_get_str_sizen(dict, "volname", &volname); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get volume name"); + + ret = dict_get_str_sizen(dict, "scrub-value", &scrub_cmd); + if (ret) + gf_log("cli", GF_LOG_TRACE, "Failed to get scrub command"); + + ret = dict_get_str_sizen(dict, "cmd-str", &cmd_str); + if (ret) + gf_log("cli", GF_LOG_TRACE, "failed to get command string"); + + if (cmd_str) + cmd_op = strrchr(cmd_str, ' ') + 1; + + switch (type) { + case GF_BITROT_OPTION_TYPE_ENABLE: + cli_out("volume bitrot: success bitrot enabled for volume %s", + volname); + ret = 0; + goto out; + case GF_BITROT_OPTION_TYPE_DISABLE: + cli_out("volume bitrot: success bitrot disabled for volume %s", + volname); + ret = 0; + goto out; + case GF_BITROT_CMD_SCRUB_ONDEMAND: + cli_out("volume bitrot: scrubber started ondemand for volume %s", + volname); + ret = 0; + goto out; + case GF_BITROT_OPTION_TYPE_SCRUB: + if (!strncmp("pause", scrub_cmd, sizeof("pause"))) + cli_out("volume bitrot: scrubber paused for volume %s", + volname); + if (!strncmp("resume", scrub_cmd, sizeof("resume"))) + cli_out("volume bitrot: scrubber resumed for volume %s", + volname); + ret = 0; + goto out; + case GF_BITROT_OPTION_TYPE_SCRUB_FREQ: + cli_out( + "volume bitrot: scrub-frequency is set to %s " + "successfully for volume %s", + cmd_op, volname); + ret = 0; + goto out; + case GF_BITROT_OPTION_TYPE_SCRUB_THROTTLE: + cli_out( + "volume bitrot: scrub-throttle is set to %s " + "successfully for volume %s", + cmd_op, volname); + ret = 0; + goto out; + } - ret = dict_get_str (dict, "lk-summary", &lk_summary); - if (ret) { - gf_log ("cli", GF_LOG_ERROR, "Unable to get lock " - "summary from dictionary"); - goto out; - } - cli_out ("Volume clear-locks successful"); - cli_out ("%s", lk_summary); +xml_output: + if (global_state->mode & GLUSTER_MODE_XML) { + ret = cli_xml_output_vol_profile(dict, rsp.op_ret, rsp.op_errno, + rsp.op_errstr); + if (ret) + gf_log("cli", GF_LOG_ERROR, XML_ERROR); + goto out; + } - } + if (!rsp.op_ret) + cli_out("volume bitrot: success"); - ret = rsp.op_ret; + ret = rsp.op_ret; out: - cli_cmd_broadcast_response (ret); - return ret; + if (dict) + dict_unref(dict); + + gf_free_xdr_cli_rsp(rsp); + cli_cmd_broadcast_response(ret); + return ret; } -int32_t -gf_cli3_1_clearlocks_volume (call_frame_t *frame, xlator_t *this, - void *data) +static int32_t +gf_cli_bitrot(call_frame_t *frame, xlator_t *this, void *data) { - gf_cli_req req = {{0,}}; - dict_t *options = NULL; - int ret = -1; + gf_cli_req req = {{ + 0, + }}; + dict_t *options = NULL; + int ret = -1; + + options = data; + + ret = cli_to_glusterd(&req, frame, gf_cli_bitrot_cbk, + (xdrproc_t)xdr_gf_cli_req, options, + GLUSTER_CLI_BITROT, this, cli_rpc_prog, NULL); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "cli_to_glusterd for bitrot failed"); + goto out; + } - if (!frame || !this || !data) - goto out; +out: + gf_log("cli", GF_LOG_DEBUG, RETURNING, ret); - options = data; + GF_FREE(req.dict.dict_val); - ret = dict_allocate_and_serialize (options, - &req.dict.dict_val, - (size_t *)&req.dict.dict_len); - if (ret < 0) { - gf_log ("cli", GF_LOG_ERROR, - "failed to serialize the data"); + return ret; +} - goto out; - } +static struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = { + [GLUSTER_CLI_NULL] = {"NULL", NULL}, + [GLUSTER_CLI_PROBE] = {"PROBE_QUERY", gf_cli_probe}, + [GLUSTER_CLI_DEPROBE] = {"DEPROBE_QUERY", gf_cli_deprobe}, + [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", gf_cli_list_friends}, + [GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", gf_cli3_1_uuid_reset}, + [GLUSTER_CLI_UUID_GET] = {"UUID_GET", gf_cli3_1_uuid_get}, + [GLUSTER_CLI_CREATE_VOLUME] = {"CREATE_VOLUME", gf_cli_create_volume}, + [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", gf_cli_delete_volume}, + [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME", gf_cli_start_volume}, + [GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", gf_cli_stop_volume}, + [GLUSTER_CLI_RENAME_VOLUME] = {"RENAME_VOLUME", gf_cli_rename_volume}, + [GLUSTER_CLI_DEFRAG_VOLUME] = {"DEFRAG_VOLUME", gf_cli_defrag_volume}, + [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", gf_cli_get_volume}, + [GLUSTER_CLI_GET_NEXT_VOLUME] = {"GET_NEXT_VOLUME", gf_cli_get_next_volume}, + [GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", gf_cli_set_volume}, + [GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", gf_cli_add_brick}, + [GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", gf_cli_remove_brick}, + [GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK", gf_cli_replace_brick}, + [GLUSTER_CLI_LOG_ROTATE] = {"LOG ROTATE", gf_cli_log_rotate}, + [GLUSTER_CLI_GETSPEC] = {"GETSPEC", gf_cli_getspec}, + [GLUSTER_CLI_PMAP_PORTBYBRICK] = {"PMAP PORTBYBRICK", gf_cli_pmap_b2p}, + [GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", gf_cli_sync_volume}, + [GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", gf_cli_reset_volume}, + [GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", gf_cli_fsm_log}, + [GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", gf_cli_gsync_set}, + [GLUSTER_CLI_PROFILE_VOLUME] = {"PROFILE_VOLUME", gf_cli_profile_volume}, + [GLUSTER_CLI_QUOTA] = {"QUOTA", gf_cli_quota}, + [GLUSTER_CLI_TOP_VOLUME] = {"TOP_VOLUME", gf_cli_top_volume}, + [GLUSTER_CLI_GETWD] = {"GETWD", gf_cli_getwd}, + [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", gf_cli_status_volume}, + [GLUSTER_CLI_STATUS_ALL] = {"STATUS_ALL", gf_cli_status_volume_all}, + [GLUSTER_CLI_MOUNT] = {"MOUNT", gf_cli_mount}, + [GLUSTER_CLI_UMOUNT] = {"UMOUNT", gf_cli_umount}, + [GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", gf_cli_heal_volume}, + [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", + gf_cli_statedump_volume}, + [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", gf_cli_list_volume}, + [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", + gf_cli_clearlocks_volume}, + [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", gf_cli_copy_file}, + [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", gf_cli_sys_exec}, + [GLUSTER_CLI_SNAP] = {"SNAP", gf_cli_snapshot}, + [GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER VOLUME", gf_cli_barrier_volume}, + [GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", gf_cli_get_vol_opt}, + [GLUSTER_CLI_BITROT] = {"BITROT", gf_cli_bitrot}, + [GLUSTER_CLI_GET_STATE] = {"GET_STATE", gf_cli_get_state}, + [GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", gf_cli_reset_brick}, + [GLUSTER_CLI_GANESHA] = {"GANESHA", gf_cli_ganesha}, +}; - ret = cli_cmd_submit (&req, frame, cli_rpc_prog, - GLUSTER_CLI_CLRLOCKS_VOLUME, NULL, - this, gf_cli3_1_clearlocks_volume_cbk, - (xdrproc_t)xdr_gf_cli_req); +struct rpc_clnt_program cli_prog = { + .progname = "Gluster CLI", + .prognum = GLUSTER_CLI_PROGRAM, + .progver = GLUSTER_CLI_VERSION, + .numproc = GLUSTER_CLI_MAXVALUE, + .proctable = gluster_cli_actors, +}; -out: - if (options) - dict_destroy (options); - gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret); - - if (req.dict.dict_val) - GF_FREE (req.dict.dict_val); - return ret; -} - -struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = { - [GLUSTER_CLI_NULL] = {"NULL", NULL }, - [GLUSTER_CLI_PROBE] = {"PROBE_QUERY", gf_cli3_1_probe}, - [GLUSTER_CLI_DEPROBE] = {"DEPROBE_QUERY", gf_cli3_1_deprobe}, - [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", gf_cli3_1_list_friends}, - [GLUSTER_CLI_CREATE_VOLUME] = {"CREATE_VOLUME", gf_cli3_1_create_volume}, - [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", gf_cli3_1_delete_volume}, - [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME", gf_cli3_1_start_volume}, - [GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", gf_cli3_1_stop_volume}, - [GLUSTER_CLI_RENAME_VOLUME] = {"RENAME_VOLUME", gf_cli3_1_rename_volume}, - [GLUSTER_CLI_DEFRAG_VOLUME] = {"DEFRAG_VOLUME", gf_cli3_1_defrag_volume}, - [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", gf_cli3_1_get_volume}, - [GLUSTER_CLI_GET_NEXT_VOLUME] = {"GET_NEXT_VOLUME", gf_cli3_1_get_next_volume}, - [GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", gf_cli3_1_set_volume}, - [GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", gf_cli3_1_add_brick}, - [GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", gf_cli3_1_remove_brick}, - [GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK", gf_cli3_1_replace_brick}, - [GLUSTER_CLI_LOG_ROTATE] = {"LOG ROTATE", gf_cli3_1_log_rotate}, - [GLUSTER_CLI_GETSPEC] = {"GETSPEC", gf_cli3_1_getspec}, - [GLUSTER_CLI_PMAP_PORTBYBRICK] = {"PMAP PORTBYBRICK", gf_cli3_1_pmap_b2p}, - [GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", gf_cli3_1_sync_volume}, - [GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", gf_cli3_1_reset_volume}, - [GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", gf_cli3_1_fsm_log}, - [GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", gf_cli3_1_gsync_set}, - [GLUSTER_CLI_PROFILE_VOLUME] = {"PROFILE_VOLUME", gf_cli3_1_profile_volume}, - [GLUSTER_CLI_QUOTA] = {"QUOTA", gf_cli3_1_quota}, - [GLUSTER_CLI_TOP_VOLUME] = {"TOP_VOLUME", gf_cli3_1_top_volume}, - [GLUSTER_CLI_GETWD] = {"GETWD", gf_cli3_1_getwd}, - [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", gf_cli3_1_status_volume}, - [GLUSTER_CLI_STATUS_ALL] = {"STATUS_ALL", gf_cli_status_volume_all}, - [GLUSTER_CLI_MOUNT] = {"MOUNT", gf_cli3_1_mount}, - [GLUSTER_CLI_UMOUNT] = {"UMOUNT", gf_cli3_1_umount}, - [GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", gf_cli3_1_heal_volume}, - [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", gf_cli3_1_statedump_volume}, - [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", gf_cli3_1_list_volume}, - [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", gf_cli3_1_clearlocks_volume}, +static struct rpc_clnt_procedure cli_quotad_procs[GF_AGGREGATOR_MAXVALUE] = { + [GF_AGGREGATOR_NULL] = {"NULL", NULL}, + [GF_AGGREGATOR_LOOKUP] = {"LOOKUP", NULL}, + [GF_AGGREGATOR_GETLIMIT] = {"GETLIMIT", cli_quotad_getlimit}, }; -struct rpc_clnt_program cli_prog = { - .progname = "Gluster CLI", - .prognum = GLUSTER_CLI_PROGRAM, - .progver = GLUSTER_CLI_VERSION, - .numproc = GLUSTER_CLI_MAXVALUE, - .proctable = gluster_cli_actors, +struct rpc_clnt_program cli_quotad_clnt = { + .progname = "CLI Quotad client", + .prognum = GLUSTER_AGGREGATOR_PROGRAM, + .progver = GLUSTER_AGGREGATOR_VERSION, + .numproc = GF_AGGREGATOR_MAXVALUE, + .proctable = cli_quotad_procs, }; |
