summaryrefslogtreecommitdiffstats
path: root/cli/src/cli-rpc-ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'cli/src/cli-rpc-ops.c')
-rw-r--r--cli/src/cli-rpc-ops.c6174
1 files changed, 4600 insertions, 1574 deletions
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index d1888415c..8481d468c 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -1,33 +1,18 @@
/*
- Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
+ Copyright (c) 2010-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
-#ifndef GSYNC_CONF
-#define GSYNC_CONF GEOREP"/gsyncd.conf"
-#endif
-#define DEFAULT_LOG_FILE_DIRECTORY DATADIR "/log/glusterfs"
-
/* Widths of various columns in top read/write-perf output
* Total width of top read/write-perf should be 80 chars
* including one space between column
@@ -37,6 +22,8 @@
#define VOL_TOP_PERF_SPEED_WIDTH 4
#define VOL_TOP_PERF_TIME_WIDTH 26
+#define INDENT_MAIN_HEAD "%-25s %s "
+
#include "cli.h"
#include "compat-errno.h"
#include "cli-cmd.h"
@@ -52,32 +39,56 @@
#include "syscall.h"
#include "glusterfs3.h"
#include "portmap-xdr.h"
+#include "byte-order.h"
+#include "cli-quotad-client.h"
#include "run.h"
+enum gf_task_types {
+ GF_TASK_TYPE_REBALANCE,
+ GF_TASK_TYPE_REMOVE_BRICK
+};
+
+extern struct rpc_clnt *global_quotad_rpc;
+extern rpc_clnt_prog_t cli_quotad_clnt;
extern rpc_clnt_prog_t *cli_rpc_prog;
extern int cli_op_ret;
extern int connected;
-char *cli_volume_type[] = {"Distribute",
- "Stripe",
- "Replicate",
- "Striped-Replicate",
- "Distributed-Stripe",
- "Distributed-Replicate",
- "Distributed-Striped-Replicate",
-};
-
-
-char *cli_volume_status[] = {"Created",
- "Started",
- "Stopped"
+char *cli_vol_type_str[] = {"Distribute",
+ "Stripe",
+ "Replicate",
+ "Striped-Replicate",
+ "Distributed-Stripe",
+ "Distributed-Replicate",
+ "Distributed-Striped-Replicate",
+ };
+
+char *cli_vol_status_str[] = {"Created",
+ "Started",
+ "Stopped",
+ };
+
+char *cli_vol_task_status_str[] = {"not started",
+ "in progress",
+ "stopped",
+ "completed",
+ "failed",
+ "fix-layout in progress",
+ "fix-layout stopped",
+ "fix-layout completed",
+ "fix-layout failed",
+ "unknown"
};
int32_t
-gf_cli3_1_get_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_get_volume (call_frame_t *frame, xlator_t *this,
void *data);
+int
+cli_to_glusterd (gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn,
+ xdrproc_t xdrproc, dict_t *dict, int procnum, xlator_t *this,
+ rpc_clnt_prog_t *prog, struct iobref *iobref);
rpc_clnt_prog_t cli_handshake_prog = {
.progname = "cli handshake",
@@ -92,10 +103,10 @@ rpc_clnt_prog_t cli_pmap_prog = {
};
int
-gf_cli3_1_probe_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_probe_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
- gf1_cli_probe_rsp rsp = {0,};
+ gf_cli_rsp rsp = {0,};
int ret = -1;
char msg[1024] = {0,};
@@ -103,88 +114,39 @@ gf_cli3_1_probe_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_probe_rsp);
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
//rsp.op_ret = -1;
//rsp.op_errno = EINVAL;
goto out;
}
gf_log ("cli", GF_LOG_INFO, "Received resp to probe");
- if (!rsp.op_ret) {
- switch (rsp.op_errno) {
- case GF_PROBE_SUCCESS:
- snprintf (msg, sizeof (msg),
- "Probe successful");
- break;
- case GF_PROBE_LOCALHOST:
- snprintf (msg, sizeof (msg),
- "Probe on localhost not needed");
- break;
- case GF_PROBE_FRIEND:
- snprintf (msg, sizeof (msg),
- "Probe on host %s port %d already"
- " in peer list", rsp.hostname,
- rsp.port);
- break;
- default:
- snprintf (msg, sizeof (msg),
- "Probe returned with unknown errno %d",
- rsp.op_errno);
- break;
- }
- }
-
- if (rsp.op_ret) {
- switch (rsp.op_errno) {
- case GF_PROBE_ANOTHER_CLUSTER:
- snprintf (msg, sizeof (msg),
- "%s is already part of another"
- " cluster", rsp.hostname);
- break;
- case GF_PROBE_VOLUME_CONFLICT:
- snprintf (msg, sizeof (msg),
- "Atleast one volume on %s conflicts "
- "with existing volumes in the "
- "cluster", rsp.hostname);
- break;
- case GF_PROBE_UNKNOWN_PEER:
- snprintf (msg, sizeof (msg),
- "%s responded with 'unknown peer'"
- " error, this could happen if %s "
- "doesn't have localhost in its peer"
- " database", rsp.hostname,
- rsp.hostname);
- break;
- case GF_PROBE_ADD_FAILED:
- snprintf (msg, sizeof (msg),
- "Failed to add peer information "
- "on %s" , rsp.hostname);
- break;
- default:
- snprintf (msg, sizeof (msg),
- "Probe unsuccessful\nProbe returned "
- "with unknown errno %d",
- rsp.op_errno);
- break;
- }
- gf_log ("glusterd",GF_LOG_ERROR,"Probe failed with op_ret %d"
- " and op_errno %d", rsp.op_ret, rsp.op_errno);
+ if (rsp.op_errstr && (strlen (rsp.op_errstr) > 0)) {
+ snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
+ if (rsp.op_ret)
+ gf_log ("cli", GF_LOG_ERROR, "%s", msg);
}
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_str ("peerProbe", msg, rsp.op_ret,
- rsp.op_errno, NULL);
+ ret = cli_xml_output_str (NULL,
+ (rsp.op_ret)? NULL : msg,
+ rsp.op_ret, rsp.op_errno,
+ (rsp.op_ret)? msg : NULL);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
"Error outputting to xml");
goto out;
}
-#endif
- cli_out ("%s", msg);
+
+ if (!rsp.op_ret)
+ cli_out ("peer probe: success. %s", msg);
+ else
+ cli_err ("peer probe: failed: %s", msg);
+
ret = rsp.op_ret;
out:
@@ -193,71 +155,53 @@ out:
}
int
-gf_cli3_1_deprobe_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_deprobe_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
- gf1_cli_deprobe_rsp rsp = {0,};
+ gf_cli_rsp rsp = {0,};
int ret = -1;
- char msg[1024] = {0,};
+ char msg[1024] = {0,};
if (-1 == req->rpc_status) {
goto out;
}
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_deprobe_rsp);
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
//rsp.op_ret = -1;
//rsp.op_errno = EINVAL;
goto out;
}
gf_log ("cli", GF_LOG_INFO, "Received resp to deprobe");
+
if (rsp.op_ret) {
- switch (rsp.op_errno) {
- case GF_DEPROBE_LOCALHOST:
- snprintf (msg, sizeof (msg),
- "%s is localhost", rsp.hostname);
- break;
- case GF_DEPROBE_NOT_FRIEND:
- snprintf (msg, sizeof (msg),
- "%s is not part of cluster",
- rsp.hostname);
- break;
- case GF_DEPROBE_BRICK_EXIST:
- snprintf (msg, sizeof (msg),
- "Brick(s) with the peer %s exist in "
- "cluster", rsp.hostname);
- break;
- case GF_DEPROBE_FRIEND_DOWN:
- snprintf (msg, sizeof (msg),
- "One of the peers is probably down."
- " Check with 'peer status'.");
- break;
- default:
- snprintf (msg, sizeof (msg),
- "Detach unsuccessful\nDetach returned"
- " with unknown errno %d",
- rsp.op_errno);
- break;
+ if (strlen (rsp.op_errstr) > 0) {
+ snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
+ gf_log ("cli", GF_LOG_ERROR, "%s", rsp.op_errstr);
}
- gf_log ("glusterd",GF_LOG_ERROR,"Detach failed with op_ret %d"
- " and op_errno %d", rsp.op_ret, rsp.op_errno);
} else {
- snprintf (msg, sizeof (msg), "Detach successful");
+ snprintf (msg, sizeof (msg), "success");
}
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_str ("peerDetach", msg, rsp.op_ret,
- rsp.op_errno, NULL);
+ ret = cli_xml_output_str (NULL,
+ (rsp.op_ret)? NULL : msg,
+ rsp.op_ret, rsp.op_errno,
+ (rsp.op_ret)? msg : NULL);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
"Error outputting to xml");
goto out;
}
-#endif
- cli_out ("%s", msg);
+
+ if (!rsp.op_ret)
+ cli_out ("peer detach: %s", msg);
+ else
+ cli_err ("peer detach: failed: %s", msg);
+
ret = rsp.op_ret;
out:
@@ -266,34 +210,143 @@ out:
}
int
-gf_cli3_1_list_friends_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gf_cli_output_peer_status (dict_t *dict, int count)
{
- gf1_cli_peer_list_rsp rsp = {0,};
int ret = -1;
- dict_t *dict = NULL;
char *uuid_buf = NULL;
char *hostname_buf = NULL;
int32_t i = 1;
char key[256] = {0,};
char *state = NULL;
- int32_t port = 0;
int32_t connected = 0;
char *connected_str = NULL;
+ cli_out ("Number of Peers: %d", count);
+ i = 1;
+ while ( i <= count) {
+ snprintf (key, 256, "friend%d.uuid", i);
+ ret = dict_get_str (dict, key, &uuid_buf);
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "friend%d.hostname", i);
+ ret = dict_get_str (dict, key, &hostname_buf);
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "friend%d.connected", i);
+ ret = dict_get_int32 (dict, key, &connected);
+ if (ret)
+ goto out;
+ if (connected)
+ connected_str = "Connected";
+ else
+ connected_str = "Disconnected";
+
+
+ snprintf (key, 256, "friend%d.state", i);
+ ret = dict_get_str (dict, key, &state);
+ if (ret)
+ goto out;
+
+ cli_out ("\nHostname: %s\nUuid: %s\nState: %s (%s)",
+ hostname_buf, uuid_buf, state, connected_str);
+ i++;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+gf_cli_output_pool_list (dict_t *dict, int count)
+{
+ int ret = -1;
+ char *uuid_buf = NULL;
+ char *hostname_buf = NULL;
+ int32_t i = 1;
+ char key[256] = {0,};
+ int32_t connected = 0;
+ char *connected_str = NULL;
+
+ if (count >= 1)
+ cli_out ("UUID\t\t\t\t\tHostname\tState");
+
+ while ( i <= count) {
+ snprintf (key, 256, "friend%d.uuid", i);
+ ret = dict_get_str (dict, key, &uuid_buf);
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "friend%d.hostname", i);
+ ret = dict_get_str (dict, key, &hostname_buf);
+ if (ret)
+ goto out;
+
+ snprintf (key, 256, "friend%d.connected", i);
+ ret = dict_get_int32 (dict, key, &connected);
+ if (ret)
+ goto out;
+ if (connected)
+ connected_str = "Connected";
+ else
+ connected_str = "Disconnected";
+
+ cli_out ("%s\t%-9s\t%s ", uuid_buf, hostname_buf,
+ connected_str);
+ i++;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+/* function pointer for gf_cli_output_{pool_list,peer_status} */
+typedef int (*cli_friend_output_fn) (dict_t*, int);
+
+int
+gf_cli_list_friends_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf1_cli_peer_list_rsp rsp = {0,};
+ int ret = -1;
+ dict_t *dict = NULL;
+ char msg[1024] = {0,};
+ char *cmd = NULL;
+ cli_friend_output_fn friend_output_fn;
+ call_frame_t *frame = NULL;
+ unsigned long flags = 0;
+
+ frame = myframe;
+ flags = (long)frame->local;
+
+ if (flags == GF_CLI_LIST_POOL_NODES) {
+ cmd = "pool list";
+ friend_output_fn = &gf_cli_output_pool_list;
+ } else {
+ cmd = "peer status";
+ friend_output_fn = &gf_cli_output_peer_status;
+ }
+
+ /* 'free' the flags set by gf_cli_list_friends */
+ frame->local = NULL;
+
if (-1 == req->rpc_status) {
goto out;
}
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_peer_list_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
//rsp.op_ret = -1;
//rsp.op_errno = EINVAL;
goto out;
}
- gf_log ("cli", GF_LOG_INFO, "Received resp to list: %d",
+ gf_log ("cli", GF_LOG_DEBUG, "Received resp to list: %d",
rsp.op_ret);
ret = rsp.op_ret;
@@ -301,7 +354,19 @@ gf_cli3_1_list_friends_cbk (struct rpc_req *req, struct iovec *iov,
if (!rsp.op_ret) {
if (!rsp.friends.friends_len) {
- cli_out ("No peers present");
+ snprintf (msg, sizeof (msg),
+ "%s: No peers present", cmd);
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ ret = cli_xml_output_peer_status (dict,
+ rsp.op_ret,
+ rsp.op_errno,
+ msg);
+ if (ret)
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ goto out;
+ }
+ cli_err ("%s", msg);
ret = 0;
goto out;
}
@@ -323,69 +388,34 @@ gf_cli3_1_list_friends_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_dict ("peerStatus", dict,
- rsp.op_ret, rsp.op_errno,
- NULL);
+ ret = cli_xml_output_peer_status (dict, rsp.op_ret,
+ rsp.op_errno, msg);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
- "Error ouputting to xml");
+ "Error outputting to xml");
goto out;
}
-#endif
- ret = dict_get_int32 (dict, "count", &count);
+ ret = dict_get_int32 (dict, "count", &count);
if (ret) {
goto out;
}
- cli_out ("Number of Peers: %d", count);
-
- while ( i <= count) {
- snprintf (key, 256, "friend%d.uuid", i);
- ret = dict_get_str (dict, key, &uuid_buf);
- if (ret)
- goto out;
-
- snprintf (key, 256, "friend%d.hostname", i);
- ret = dict_get_str (dict, key, &hostname_buf);
- if (ret)
- goto out;
-
- snprintf (key, 256, "friend%d.connected", i);
- ret = dict_get_int32 (dict, key, &connected);
- if (ret)
- goto out;
- if (connected)
- connected_str = "Connected";
- else
- connected_str = "Disconnected";
-
- snprintf (key, 256, "friend%d.port", i);
- ret = dict_get_int32 (dict, key, &port);
- if (ret)
- goto out;
-
- snprintf (key, 256, "friend%d.state", i);
- ret = dict_get_str (dict, key, &state);
- if (ret)
- goto out;
-
- if (!port) {
- cli_out ("\nHostname: %s\nUuid: %s\nState: %s "
- "(%s)",
- hostname_buf, uuid_buf, state,
- connected_str);
- } else {
- cli_out ("\nHostname: %s\nPort: %d\nUuid: %s\n"
- "State: %s (%s)", hostname_buf, port,
- uuid_buf, state, connected_str);
- }
- i++;
+ ret = friend_output_fn (dict, count);
+ if (ret) {
+ goto out;
}
} else {
- ret = -1;
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ ret = cli_xml_output_peer_status (dict, rsp.op_ret,
+ rsp.op_errno, NULL);
+ if (ret)
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ } else {
+ ret = -1;
+ }
goto out;
}
@@ -395,7 +425,7 @@ gf_cli3_1_list_friends_cbk (struct rpc_req *req, struct iovec *iov,
out:
cli_cmd_broadcast_response (ret);
if (ret)
- cli_out ("Peer status unsuccessful");
+ cli_err ("%s: failed", cmd);
if (dict)
dict_destroy (dict);
@@ -429,14 +459,37 @@ cli_out_options ( char *substr, char *optstr, char *valstr)
cli_out ("%s: %s",ptr2 , valstr);
}
+static int
+_gf_cli_output_volinfo_opts (dict_t *d, char *k,
+ data_t *v, void *tmp)
+{
+ int ret = 0;
+ char *key = NULL;
+ char *ptr = NULL;
+ data_t *value = NULL;
+
+ key = tmp;
+
+ ptr = strstr (k, "option.");
+ if (ptr) {
+ value = v;
+ if (!value) {
+ ret = -1;
+ goto out;
+ }
+ cli_out_options (key, k, v->data);
+ }
+out:
+ return ret;
+}
+
int
-gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_get_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
int ret = -1;
int opt_count = 0;
- int k = 0;
int32_t i = 0;
int32_t j = 1;
int32_t status = 0;
@@ -447,24 +500,27 @@ gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov,
int32_t replica_count = 0;
int32_t vol_type = 0;
int32_t transport = 0;
- char *ptr = NULL;
char *volume_id_str = NULL;
char *brick = NULL;
char *volname = NULL;
dict_t *dict = NULL;
- data_pair_t *pairs = NULL;
- data_t *value = NULL;
cli_local_t *local = NULL;
char key[1024] = {0};
char err_str[2048] = {0};
gf_cli_rsp rsp = {0};
+ char *caps = NULL;
+ int k __attribute__((unused)) = 0;
+ /* snap_volume variable helps in showing whether a volume is a normal
+ * volume or a volume for the snapshot */
+ int32_t snap_volume = 0;
if (-1 == req->rpc_status)
goto out;
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("cli", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
@@ -477,7 +533,9 @@ gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov,
}
if (!rsp.dict.dict_len) {
- cli_out ("No volumes present");
+ if (global_state->mode & GLUSTER_MODE_XML)
+ goto xml_output;
+ cli_err ("No volumes present");
ret = 0;
goto out;
}
@@ -520,21 +578,44 @@ gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov,
"Volume %s does not exist",
local->get_vol.volname);
ret = -1;
- goto out;
+ if (!(global_state->mode & GLUSTER_MODE_XML))
+ goto out;
}
}
-#if (HAVE_LIB_XML)
+xml_output:
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_vol_info (dict, rsp.op_ret,
- rsp.op_errno, rsp.op_errstr);
- if (ret) {
- gf_log ("cli", GF_LOG_ERROR,
- "Error outputting to xml");
+ /* For GET_NEXT_VOLUME output is already begun in
+ * and will also end in gf_cli_get_next_volume()
+ */
+ if (local->get_vol.flags == GF_CLI_GET_VOLUME) {
+ ret = cli_xml_output_vol_info_begin
+ (local, rsp.op_ret, rsp.op_errno,
+ rsp.op_errstr);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ goto out;
+ }
+ }
+
+ if (dict) {
+ ret = cli_xml_output_vol_info (local, dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ goto out;
+ }
+ }
+
+ if (local->get_vol.flags == GF_CLI_GET_VOLUME) {
+ ret = cli_xml_output_vol_info_end (local);
+ if (ret)
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
}
goto out;
}
-#endif
while ( i < count) {
cli_out (" ");
@@ -553,6 +634,11 @@ gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov,
if (ret)
goto out;
+ snprintf (key, sizeof (key), "volume%d.snap_volume", i);
+ ret = dict_get_int32 (dict, key, &snap_volume);
+ if (ret)
+ goto out;
+
snprintf (key, 256, "volume%d.brick_count", i);
ret = dict_get_int32 (dict, key, &brick_count);
if (ret)
@@ -590,9 +676,47 @@ gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov,
vol_type = type + 3;
cli_out ("Volume Name: %s", volname);
- cli_out ("Type: %s", cli_volume_type[vol_type]);
+ cli_out ("Type: %s", cli_vol_type_str[vol_type]);
cli_out ("Volume ID: %s", volume_id_str);
- cli_out ("Status: %s", cli_volume_status[status]);
+ cli_out ("Status: %s", cli_vol_status_str[status]);
+ if (snap_volume)
+ cli_out ("Snap Volume: %s", "yes");
+ else
+ cli_out ("Snap Volume: %s", "no");
+
+#ifdef HAVE_BD_XLATOR
+ k = 0;
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "volume%d.xlator%d", i, k);
+ ret = dict_get_str (dict, key, &caps);
+ if (ret)
+ goto next;
+ do {
+ j = 0;
+ cli_out ("Xlator %d: %s", k + 1, caps);
+ do {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "volume%d.xlator%d.caps%d",
+ i, k, j++);
+ ret = dict_get_str (dict, key, &caps);
+ if (ret)
+ break;
+ cli_out ("Capability %d: %s", j, caps);
+ } while (1);
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "volume%d.xlator%d", i, ++k);
+ ret = dict_get_str (dict, key, &caps);
+ if (ret)
+ break;
+ } while (1);
+
+next:
+#else
+ caps = 0; /* Avoid compiler warnings when BD not enabled */
+#endif
if (type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) {
cli_out ("Number of Bricks: %d x %d x %d = %d",
@@ -600,10 +724,8 @@ gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov,
stripe_count,
replica_count,
brick_count);
-
} else if (type == GF_CLUSTER_TYPE_NONE) {
cli_out ("Number of Bricks: %d", brick_count);
-
} else {
/* For both replicate and stripe, dist_count is
good enough */
@@ -631,15 +753,15 @@ gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
cli_out ("Brick%d: %s", j, brick);
+#ifdef HAVE_BD_XLATOR
+ snprintf (key, 256, "volume%d.vg%d", i, j);
+ ret = dict_get_str (dict, key, &caps);
+ if (!ret)
+ cli_out ("Brick%d VG: %s", j, caps);
+#endif
j++;
}
- pairs = dict->members_list;
- if (!pairs) {
- ret = -1;
- goto out;
- }
-
snprintf (key, 256, "volume%d.opt_count",i);
ret = dict_get_int32 (dict, key, &opt_count);
if (ret)
@@ -649,26 +771,12 @@ gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
cli_out ("Options Reconfigured:");
- k = 0;
- while (k < opt_count) {
-
- snprintf (key, 256, "volume%d.option.",i);
- while (pairs) {
- ptr = strstr (pairs->key, "option.");
- if (ptr) {
- value = pairs->value;
- if (!value) {
- ret = -1;
- goto out;
- }
- cli_out_options (key, pairs->key,
- value->data);
- }
- pairs = pairs->next;
- }
- k++;
- }
+ snprintf (key, 256, "volume%d.option.",i);
+
+ ret = dict_foreach (dict, _gf_cli_output_volinfo_opts, key);
+ if (ret)
+ goto out;
i++;
}
@@ -678,23 +786,21 @@ gf_cli3_1_get_volume_cbk (struct rpc_req *req, struct iovec *iov,
out:
cli_cmd_broadcast_response (ret);
if (ret)
- cli_out ("%s", err_str);
+ cli_err ("%s", err_str);
if (dict)
dict_destroy (dict);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
+ free (rsp.dict.dict_val);
- if (rsp.op_errstr)
- free (rsp.op_errstr);
+ free (rsp.op_errstr);
gf_log ("cli", GF_LOG_INFO, "Returning: %d", ret);
return ret;
}
int
-gf_cli3_1_create_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_create_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -702,61 +808,70 @@ gf_cli3_1_create_volume_cbk (struct rpc_req *req, struct iovec *iov,
cli_local_t *local = NULL;
char *volname = NULL;
dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
if (-1 == req->rpc_status) {
goto out;
}
local = ((call_frame_t *) (myframe))->local;
- ((call_frame_t *) (myframe))->local = NULL;
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
+ gf_log ("cli", GF_LOG_INFO, "Received resp to create volume");
+
dict = local->dict;
ret = dict_get_str (dict, "volname", &volname);
+ if (ret)
+ goto out;
- gf_log ("cli", GF_LOG_INFO, "Received resp to create volume");
-
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_dict ("volCreate", dict, rsp.op_ret,
- rsp.op_errno, rsp.op_errstr);
+ if (rsp.op_ret == 0) {
+ rsp_dict = dict_new ();
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &rsp_dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Failed rsp_dict unserialization");
+ goto out;
+ }
+ }
+
+ ret = cli_xml_output_vol_create (rsp_dict, rsp.op_ret,
+ rsp.op_errno, rsp.op_errstr);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
"Error outputting to xml");
goto out;
}
-#endif
if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
- cli_out ("%s", rsp.op_errstr);
+ cli_err ("volume create: %s: failed: %s", volname,
+ rsp.op_errstr);
+ else if (rsp.op_ret)
+ cli_err ("volume create: %s: failed", volname);
else
- cli_out ("Creation of volume %s has been %s", volname,
- (rsp.op_ret) ? "unsuccessful":
- "successful. Please start the volume to "
- "access data.");
+ cli_out ("volume create: %s: success: "
+ "please start the volume to access data", volname);
+
ret = rsp.op_ret;
out:
cli_cmd_broadcast_response (ret);
- if (dict)
- dict_unref (dict);
- if (local)
- cli_local_wipe (local);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
- if (rsp.op_errstr)
- free (rsp.op_errstr);
+ free (rsp.dict.dict_val);
+ free (rsp.op_errstr);
return ret;
}
int
-gf_cli3_1_delete_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_delete_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -765,47 +880,197 @@ gf_cli3_1_delete_volume_cbk (struct rpc_req *req, struct iovec *iov,
char *volname = NULL;
call_frame_t *frame = NULL;
dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
if (-1 == req->rpc_status) {
goto out;
}
+ frame = myframe;
+
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
- frame = myframe;
local = frame->local;
- frame->local = NULL;
if (local)
dict = local->dict;
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_log (frame->this->name, GF_LOG_ERROR,
"dict get failed");
goto out;
}
gf_log ("cli", GF_LOG_INFO, "Received resp to delete volume");
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_dict ("volDelete", dict, rsp.op_ret,
+ if (rsp.op_ret == 0) {
+ rsp_dict = dict_new ();
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &rsp_dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Failed rsp_dict unserialization");
+ goto out;
+ }
+ }
+
+ ret = cli_xml_output_generic_volume ("volDelete", rsp_dict,
+ rsp.op_ret, rsp.op_errno,
+ rsp.op_errstr);
+ if (ret)
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ goto out;
+ }
+
+ if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
+ cli_err ("volume delete: %s: failed: %s", volname,
+ rsp.op_errstr);
+ else if (rsp.op_ret)
+ cli_err ("volume delete: %s: failed", volname);
+ else
+ cli_out ("volume delete: %s: success", volname);
+
+ ret = rsp.op_ret;
+
+out:
+ cli_cmd_broadcast_response (ret);
+ free (rsp.dict.dict_val);
+
+ gf_log ("", GF_LOG_INFO, "Returning with %d", ret);
+ return ret;
+}
+
+int
+gf_cli3_1_uuid_get_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ char *uuid_str = NULL;
+ gf_cli_rsp rsp = {0,};
+ int ret = -1;
+ cli_local_t *local = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *dict = NULL;
+
+ if (-1 == req->rpc_status)
+ goto out;
+
+ frame = myframe;
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
+ goto out;
+ }
+
+ local = frame->local;
+ frame->local = NULL;
+
+ gf_log ("cli", GF_LOG_INFO, "Received resp to uuid get");
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len,
+ &dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to unserialize "
+ "response for uuid get");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "uuid", &uuid_str);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get uuid "
+ "from dictionary");
+ goto out;
+ }
+
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ ret = cli_xml_output_dict ("uuidGenerate", dict, rsp.op_ret,
+ rsp.op_errno, rsp.op_errstr);
+ if (ret)
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ goto out;
+ }
+
+ if (rsp.op_ret) {
+ if (strcmp (rsp.op_errstr, "") == 0)
+ cli_err ("Get uuid was unsuccessful");
+ else
+ cli_err ("%s", rsp.op_errstr);
+
+ } else {
+ cli_out ("UUID: %s", uuid_str);
+
+ }
+ ret = rsp.op_ret;
+
+out:
+ cli_cmd_broadcast_response (ret);
+ cli_local_wipe (local);
+ if (rsp.dict.dict_val)
+ free (rsp.dict.dict_val);
+ if (dict)
+ dict_unref (dict);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+ return ret;
+}
+
+int
+gf_cli3_1_uuid_reset_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf_cli_rsp rsp = {0,};
+ int ret = -1;
+ cli_local_t *local = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *dict = NULL;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ frame = myframe;
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
+ goto out;
+ }
+
+ local = frame->local;
+ frame->local = NULL;
+
+ gf_log ("cli", GF_LOG_INFO, "Received resp to uuid reset");
+
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ ret = cli_xml_output_dict ("uuidReset", dict, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
"Error outputting to xml");
goto out;
}
-#endif
if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
- cli_out ("%s", rsp.op_errstr);
+ cli_err ("%s", rsp.op_errstr);
else
- cli_out ("Deleting volume %s has been %s", volname,
+ cli_out ("resetting the peer uuid has been %s",
(rsp.op_ret) ? "unsuccessful": "successful");
ret = rsp.op_ret;
@@ -822,7 +1087,7 @@ out:
}
int
-gf_cli3_1_start_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_start_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -831,69 +1096,76 @@ gf_cli3_1_start_volume_cbk (struct rpc_req *req, struct iovec *iov,
char *volname = NULL;
call_frame_t *frame = NULL;
dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
if (-1 == req->rpc_status) {
goto out;
}
+ frame = myframe;
+
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
- frame = myframe;
-
- if (frame) {
+ if (frame)
local = frame->local;
- frame->local = NULL;
- }
if (local)
dict = local->dict;
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "dict get failed");
+ gf_log (frame->this->name, GF_LOG_ERROR, "dict get failed");
goto out;
}
gf_log ("cli", GF_LOG_INFO, "Received resp to start volume");
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_dict ("volStart", dict, rsp.op_ret,
- rsp.op_errno, rsp.op_errstr);
+ if (rsp.op_ret == 0) {
+ rsp_dict = dict_new ();
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &rsp_dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Failed rsp_dict unserialization");
+ goto out;
+ }
+ }
+
+ ret = cli_xml_output_generic_volume ("volStart", rsp_dict,
+ rsp.op_ret, rsp.op_errno,
+ rsp.op_errstr);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
"Error outputting to xml");
goto out;
}
-#endif
if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
- cli_out ("%s", rsp.op_errstr);
+ cli_err ("volume start: %s: failed: %s", volname,
+ rsp.op_errstr);
+ else if (rsp.op_ret)
+ cli_err ("volume start: %s: failed", volname);
else
- cli_out ("Starting volume %s has been %s", volname,
- (rsp.op_ret) ? "unsuccessful": "successful");
+ cli_out ("volume start: %s: success", volname);
ret = rsp.op_ret;
out:
cli_cmd_broadcast_response (ret);
- if (local)
- cli_local_wipe (local);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
- if (rsp.op_errstr)
- free (rsp.op_errstr);
- if (dict)
- dict_unref (dict);
+ free (rsp.dict.dict_val);
+ free (rsp.op_errstr);
return ret;
}
int
-gf_cli3_1_stop_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_stop_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -902,19 +1174,21 @@ gf_cli3_1_stop_volume_cbk (struct rpc_req *req, struct iovec *iov,
char *volname = NULL;
call_frame_t *frame = NULL;
dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
if (-1 == req->rpc_status) {
goto out;
}
+ frame = myframe;
+
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
- frame = myframe;
-
if (frame)
local = frame->local;
@@ -922,7 +1196,7 @@ gf_cli3_1_stop_volume_cbk (struct rpc_req *req, struct iovec *iov,
dict = local->dict;
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_log (frame->this->name, GF_LOG_ERROR,
"Unable to get volname from dict");
goto out;
}
@@ -930,83 +1204,218 @@ gf_cli3_1_stop_volume_cbk (struct rpc_req *req, struct iovec *iov,
gf_log ("cli", GF_LOG_INFO, "Received resp to stop volume");
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_dict ("volStop", dict, rsp.op_ret,
- rsp.op_errno, rsp.op_errstr);
+ if (rsp.op_ret == 0) {
+ rsp_dict = dict_new ();
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &rsp_dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Failed rsp_dict unserialization");
+ goto out;
+ }
+ }
+
+ ret = cli_xml_output_generic_volume ("volStop", rsp_dict,
+ rsp.op_ret, rsp.op_errno,
+ rsp.op_errstr);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
"Error outputting to xml");
goto out;
}
-#endif
if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
- cli_out ("%s", rsp.op_errstr);
+ cli_err ("volume stop: %s: failed: %s", volname, rsp.op_errstr);
+ else if (rsp.op_ret)
+ cli_err ("volume stop: %s: failed", volname);
else
- cli_out ("Stopping volume %s has been %s", volname,
- (rsp.op_ret) ? "unsuccessful": "successful");
+ cli_out ("volume stop: %s: success", volname);
+
ret = rsp.op_ret;
out:
cli_cmd_broadcast_response (ret);
- if (rsp.op_errstr)
- free (rsp.op_errstr);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
- if (local)
- cli_local_wipe (local);
+ free (rsp.op_errstr);
+ free (rsp.dict.dict_val);
+
+ return ret;
+}
+
+int
+gf_cli_print_rebalance_status (dict_t *dict, enum gf_task_types task_type)
+{
+ int ret = -1;
+ int count = 0;
+ int i = 1;
+ char key[256] = {0,};
+ gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED;
+ uint64_t files = 0;
+ uint64_t size = 0;
+ uint64_t lookup = 0;
+ char *node_name = NULL;
+ uint64_t failures = 0;
+ uint64_t skipped = 0;
+ double elapsed = 0;
+ char *status_str = NULL;
+ char *size_str = NULL;
+
+ ret = dict_get_int32 (dict, "count", &count);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "count not set");
+ goto out;
+ }
+
+
+ cli_out ("%40s %16s %13s %13s %13s %13s %20s %18s", "Node",
+ "Rebalanced-files", "size", "scanned", "failures", "skipped",
+ "status", "run time in secs");
+ cli_out ("%40s %16s %13s %13s %13s %13s %20s %18s", "---------",
+ "-----------", "-----------", "-----------", "-----------",
+ "-----------", "------------", "--------------");
+ for (i = 1; i <= count; i++) {
+ /* Reset the variables to prevent carryover of values */
+ node_name = NULL;
+ files = 0;
+ size = 0;
+ lookup = 0;
+ skipped = 0;
+ status_str = NULL;
+ elapsed = 0;
+
+ /* Check if status is NOT_STARTED, and continue early */
+ memset (key, 0, 256);
+ snprintf (key, 256, "status-%d", i);
+ ret = dict_get_int32 (dict, key, (int32_t *)&status_rcd);
+ if (ret) {
+ gf_log ("cli", GF_LOG_TRACE, "failed to get status");
+ goto out;
+ }
+ if (GF_DEFRAG_STATUS_NOT_STARTED == status_rcd)
+ continue;
+
+
+ snprintf (key, 256, "node-name-%d", i);
+ ret = dict_get_str (dict, key, &node_name);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get node-name");
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "files-%d", i);
+ ret = dict_get_uint64 (dict, key, &files);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE,
+ "failed to get file count");
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "size-%d", i);
+ ret = dict_get_uint64 (dict, key, &size);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE,
+ "failed to get size of xfer");
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "lookups-%d", i);
+ ret = dict_get_uint64 (dict, key, &lookup);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE,
+ "failed to get lookedup file count");
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "failures-%d", i);
+ ret = dict_get_uint64 (dict, key, &failures);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE,
+ "failed to get failures count");
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "skipped-%d", i);
+ ret = dict_get_uint64 (dict, key, &skipped);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE,
+ "failed to get skipped count");
+
+ /* For remove-brick include skipped count into failure count*/
+ if (task_type != GF_TASK_TYPE_REBALANCE) {
+ failures += skipped;
+ skipped = 0;
+ }
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "run-time-%d", i);
+ ret = dict_get_double (dict, key, &elapsed);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get run-time");
+
+ /* Check for array bound */
+ if (status_rcd >= GF_DEFRAG_STATUS_MAX)
+ status_rcd = GF_DEFRAG_STATUS_MAX;
+
+ status_str = cli_vol_task_status_str[status_rcd];
+ size_str = gf_uint64_2human_readable(size);
+ if (size_str) {
+ cli_out ("%40s %16"PRIu64 " %13s" " %13"PRIu64 " %13"
+ PRIu64" %13"PRIu64 " %20s %18.2f", node_name,
+ files, size_str, lookup, failures, skipped,
+ status_str, elapsed);
+ } else {
+ cli_out ("%40s %16"PRIu64 " %13"PRIu64 " %13"PRIu64
+ " %13"PRIu64" %13"PRIu64 " %20s %18.2f",
+ node_name, files, size, lookup, failures,
+ skipped, status_str, elapsed);
+ }
+ GF_FREE(size_str);
+ }
+out:
return ret;
}
int
-gf_cli3_1_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
- gf_cli_rsp rsp = {0,};
- cli_local_t *local = NULL;
- char *volname = NULL;
- call_frame_t *frame = NULL;
- char *status = "unknown";
- int cmd = 0;
- int ret = -1;
- dict_t *dict = NULL;
- dict_t *local_dict = NULL;
- uint64_t files = 0;
- uint64_t size = 0;
- uint64_t lookup = 0;
- char msg[1024] = {0,};
+ gf_cli_rsp rsp = {0,};
+ cli_local_t *local = NULL;
+ char *volname = NULL;
+ call_frame_t *frame = NULL;
+ int cmd = 0;
+ int ret = -1;
+ dict_t *dict = NULL;
+ dict_t *local_dict = NULL;
+ char msg[1024] = {0,};
+ char *task_id_str = NULL;
if (-1 == req->rpc_status) {
goto out;
}
+ frame = myframe;
+
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
- frame = myframe;
-
if (frame)
local = frame->local;
- if (local) {
+ if (local)
local_dict = local->dict;
- }
ret = dict_get_str (local_dict, "volname", &volname);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_log (frame->this->name, GF_LOG_ERROR,
"Failed to get volname");
goto out;
}
ret = dict_get_int32 (local_dict, "rebalance-command", (int32_t*)&cmd);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_log (frame->this->name, GF_LOG_ERROR,
"Failed to get command");
goto out;
}
@@ -1026,20 +1435,26 @@ gf_cli3_1_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,
}
}
- ret = dict_get_uint64 (dict, "files", &files);
- if (ret)
- gf_log (THIS->name, GF_LOG_TRACE,
- "failed to get file count");
-
- ret = dict_get_uint64 (dict, "size", &size);
- if (ret)
- gf_log (THIS->name, GF_LOG_TRACE,
- "failed to get size of xfer");
-
- ret = dict_get_uint64 (dict, "lookups", &lookup);
- if (ret)
- gf_log (THIS->name, GF_LOG_TRACE,
- "failed to get lookedup file count");
+ if (!((cmd == GF_DEFRAG_CMD_STOP) || (cmd == GF_DEFRAG_CMD_STATUS)) &&
+ !(global_state->mode & GLUSTER_MODE_XML)) {
+ /* All other possibilites are about starting a rebalance */
+ ret = dict_get_str (dict, GF_REBALANCE_TID_KEY, &task_id_str);
+ if (rsp.op_ret && strcmp (rsp.op_errstr, "")) {
+ snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
+ } else {
+ if (!rsp.op_ret) {
+ snprintf (msg, sizeof (msg),
+ "Starting rebalance on volume %s has "
+ "been successful.\nID: %s", volname,
+ task_id_str);
+ } else {
+ snprintf (msg, sizeof (msg),
+ "Starting rebalance on volume %s has "
+ "been unsuccessful.", volname);
+ }
+ }
+ goto done;
+ }
if (cmd == GF_DEFRAG_CMD_STOP) {
if (rsp.op_ret == -1) {
@@ -1050,13 +1465,16 @@ gf_cli3_1_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,
snprintf (msg, sizeof (msg),
"rebalance volume %s stop failed",
volname);
+ goto done;
} else {
snprintf (msg, sizeof (msg),
- "Stopped rebalance process on volume %s \n"
- "(after rebalancing %"PRId64" bytes - "
- "%"PRId64" files)", volname, size, files);
+ "rebalance process may be in the middle of a "
+ "file migration.\nThe process will be fully "
+ "stopped once the migration of the file is "
+ "complete.\nPlease check rebalance process "
+ "for completion before doing any further "
+ "brick related tasks on the volume.");
}
- goto done;
}
if (cmd == GF_DEFRAG_CMD_STATUS) {
if (rsp.op_ret == -1) {
@@ -1069,77 +1487,46 @@ gf_cli3_1_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,
"rebalance process");
goto done;
}
+ }
- switch (rsp.op_errno) {
- case GF_DEFRAG_STATUS_NOT_STARTED:
- status = "not started";
- break;
- case GF_DEFRAG_STATUS_STARTED:
- status = "in progress";
- break;
- case GF_DEFRAG_STATUS_STOPPED:
- status = "stopped";
- break;
- case GF_DEFRAG_STATUS_COMPLETE:
- status = "completed";
- break;
- case GF_DEFRAG_STATUS_FAILED:
- status = "failed";
- break;
- }
- if (files || size || lookup) {
- snprintf (msg, sizeof(msg),
- "Rebalance %s: rebalanced %"PRId64
- " files of size %"PRId64" (total files"
- " scanned %"PRId64")", status,
- files, size, lookup);
- goto done;
- }
-
- snprintf (msg, sizeof (msg), "Rebalance %s", status);
- goto done;
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ ret = cli_xml_output_vol_rebalance (cmd, dict, rsp.op_ret,
+ rsp.op_errno,
+ rsp.op_errstr);
+ goto out;
}
- /* All other possibility is about starting a volume */
- if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
- snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
- else
- snprintf (msg, sizeof (msg),
- "Starting rebalance on volume %s has been %s",
- volname, (rsp.op_ret) ? "unsuccessful":
- "successful");
+ ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REBALANCE);
+ if (ret)
+ gf_log ("cli", GF_LOG_ERROR,
+ "Failed to print rebalance status");
done:
-#if (HAVE_LIB_XML)
- if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_str ("volRebalance", msg, rsp.op_ret,
- rsp.op_errno, rsp.op_errstr);
- if (ret)
- gf_log ("cli", GF_LOG_ERROR,
- "Error outputting to xml");
- goto out;
+ if (global_state->mode & GLUSTER_MODE_XML)
+ cli_xml_output_str ("volRebalance", msg,
+ rsp.op_ret, rsp.op_errno,
+ rsp.op_errstr);
+ else {
+ if (rsp.op_ret)
+ cli_err ("volume rebalance: %s: failed: %s", volname,
+ msg);
+ else
+ cli_out ("volume rebalance: %s: success: %s", volname,
+ msg);
}
-#endif
- cli_out ("%s", msg);
ret = rsp.op_ret;
out:
- if (rsp.op_errstr)
- free (rsp.op_errstr); //malloced by xdr
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val); //malloced by xdr
+ free (rsp.op_errstr); //malloced by xdr
+ free (rsp.dict.dict_val); //malloced by xdr
if (dict)
dict_unref (dict);
- if (local_dict)
- dict_unref (local_dict);
- if (local)
- cli_local_wipe (local);
cli_cmd_broadcast_response (ret);
return ret;
}
int
-gf_cli3_1_rename_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_rename_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -1152,7 +1539,8 @@ gf_cli3_1_rename_volume_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
@@ -1161,7 +1549,6 @@ gf_cli3_1_rename_volume_cbk (struct rpc_req *req, struct iovec *iov,
snprintf (msg, sizeof (msg), "Rename volume %s",
(rsp.op_ret) ? "unsuccessful": "successful");
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str ("volRename", msg, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
@@ -1170,9 +1557,12 @@ gf_cli3_1_rename_volume_cbk (struct rpc_req *req, struct iovec *iov,
"Error outputting to xml");
goto out;
}
-#endif
- cli_out ("%s", msg);
+ if (rsp.op_ret)
+ cli_err ("volume rename: failed");
+ else
+ cli_out ("volume rename: success");
+
ret = rsp.op_ret;
out:
@@ -1181,7 +1571,7 @@ out:
}
int
-gf_cli3_1_reset_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_reset_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -1194,19 +1584,19 @@ gf_cli3_1_reset_volume_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
gf_log ("cli", GF_LOG_INFO, "Received resp to reset");
- if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
+ if (strcmp (rsp.op_errstr, ""))
snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
else
snprintf (msg, sizeof (msg), "reset volume %s",
(rsp.op_ret) ? "unsuccessful": "successful");
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str ("volReset", msg, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
@@ -1215,18 +1605,68 @@ gf_cli3_1_reset_volume_cbk (struct rpc_req *req, struct iovec *iov,
"Error outputting to xml");
goto out;
}
-#endif
- cli_out ("%s", msg);
+ if (rsp.op_ret)
+ cli_err ("volume reset: failed: %s", msg);
+ else
+ cli_out ("volume reset: success: %s", msg);
+
ret = rsp.op_ret;
out:
- cli_cmd_broadcast_response (ret);
+ cli_cmd_broadcast_response (ret);
return ret;
}
+char *
+is_server_debug_xlator (void *myframe)
+{
+ call_frame_t *frame = NULL;
+ cli_local_t *local = NULL;
+ char **words = NULL;
+ char *key = NULL;
+ char *value = NULL;
+ char *debug_xlator = NULL;
+
+ frame = myframe;
+ local = frame->local;
+ words = (char **)local->words;
+
+ while (*words != NULL) {
+ if (strstr (*words, "trace") == NULL &&
+ strstr (*words, "error-gen") == NULL) {
+ words++;
+ continue;
+ }
+
+ key = *words;
+ words++;
+ value = *words;
+ if (value == NULL)
+ break;
+ if (strstr (value, "client")) {
+ words++;
+ continue;
+ } else {
+ if (!(strstr (value, "posix") || strstr (value, "acl")
+ || strstr (value, "locks") ||
+ strstr (value, "io-threads") ||
+ strstr (value, "marker") ||
+ strstr (value, "index"))) {
+ words++;
+ continue;
+ } else {
+ debug_xlator = gf_strdup (key);
+ break;
+ }
+ }
+ }
+
+ return debug_xlator;
+}
+
int
-gf_cli3_1_set_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_set_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -1234,6 +1674,8 @@ gf_cli3_1_set_volume_cbk (struct rpc_req *req, struct iovec *iov,
dict_t *dict = NULL;
char *help_str = NULL;
char msg[1024] = {0,};
+ char *debug_xlator = _gf_false;
+ char tmp_str[512] = {0,};
if (-1 == req->rpc_status) {
goto out;
@@ -1241,15 +1683,13 @@ gf_cli3_1_set_volume_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
gf_log ("cli", GF_LOG_INFO, "Received resp to set");
- if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
- snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
-
dict = dict_new ();
if (!dict) {
@@ -1259,17 +1699,23 @@ gf_cli3_1_set_volume_cbk (struct rpc_req *req, struct iovec *iov,
ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict);
- if (ret)
- goto out;
+ /* For brick processes graph change does not happen on the fly.
+ * The process has to be restarted. So this is a check from the
+ * volume set option such that if debug xlators such as trace/errorgen
+ * are provided in the set command, warn the user.
+ */
+ debug_xlator = is_server_debug_xlator (myframe);
- if (dict_get_str (dict, "help-str", &help_str))
+ if (dict_get_str (dict, "help-str", &help_str) && !msg[0])
snprintf (msg, sizeof (msg), "Set volume %s",
(rsp.op_ret) ? "unsuccessful": "successful");
- else
- snprintf (msg, sizeof (msg), "%s", help_str);
+ if (rsp.op_ret == 0 && debug_xlator) {
+ snprintf (tmp_str, sizeof (tmp_str), "\n%s translator has been "
+ "added to the server volume file. Please restart the"
+ " volume for enabling the translator", debug_xlator);
+ }
-#if (HAVE_LIB_XML)
- if (global_state->mode & GLUSTER_MODE_XML) {
+ if ((global_state->mode & GLUSTER_MODE_XML) && (help_str == NULL)) {
ret = cli_xml_output_str ("volSet", msg, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
@@ -1277,18 +1723,35 @@ gf_cli3_1_set_volume_cbk (struct rpc_req *req, struct iovec *iov,
"Error outputting to xml");
goto out;
}
-#endif
- cli_out ("%s", msg);
+ if (rsp.op_ret) {
+ if (strcmp (rsp.op_errstr, ""))
+ cli_err ("volume set: failed: %s", rsp.op_errstr);
+ else
+ cli_err ("volume set: failed");
+ } else {
+ if (help_str == NULL) {
+ if (debug_xlator == NULL)
+ cli_out ("volume set: success");
+ else
+ cli_out ("volume set: success%s", tmp_str);
+ }else {
+ cli_out ("%s", help_str);
+ }
+ }
+
ret = rsp.op_ret;
out:
+ if (dict)
+ dict_unref (dict);
+ GF_FREE (debug_xlator);
cli_cmd_broadcast_response (ret);
return ret;
}
int
-gf_cli3_1_add_brick_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_add_brick_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -1301,7 +1764,8 @@ gf_cli3_1_add_brick_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
@@ -1314,7 +1778,6 @@ gf_cli3_1_add_brick_cbk (struct rpc_req *req, struct iovec *iov,
snprintf (msg, sizeof (msg), "Add Brick %s",
(rsp.op_ret) ? "unsuccessful": "successful");
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str ("volAddBrick", msg, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
@@ -1323,17 +1786,17 @@ gf_cli3_1_add_brick_cbk (struct rpc_req *req, struct iovec *iov,
"Error outputting to xml");
goto out;
}
-#endif
- cli_out ("%s", msg);
+ if (rsp.op_ret)
+ cli_err ("volume add-brick: failed: %s", rsp.op_errstr);
+ else
+ cli_out ("volume add-brick: success");
ret = rsp.op_ret;
out:
cli_cmd_broadcast_response (ret);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
- if (rsp.op_errstr)
- free (rsp.op_errstr);
+ free (rsp.dict.dict_val);
+ free (rsp.op_errstr);
return ret;
}
@@ -1342,52 +1805,63 @@ gf_cli3_remove_brick_status_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
- char *status = "unknown";
int ret = -1;
- uint64_t files = 0;
- uint64_t size = 0;
dict_t *dict = NULL;
char msg[1024] = {0,};
+ int32_t command = 0;
+ gf1_op_commands cmd = GF_OP_CMD_NONE;
+ cli_local_t *local = NULL;
+ call_frame_t *frame = NULL;
+ char *cmd_str = "unknown";
if (-1 == req->rpc_status) {
goto out;
}
+ frame = myframe;
+
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
- ret = rsp.op_ret;
- if (rsp.op_ret == -1) {
- if (strcmp (rsp.op_errstr, ""))
- cli_out ("%s", rsp.op_errstr);
- else
- cli_out ("failed to get the status of "
- "remove-brick process");
+ if (frame)
+ local = frame->local;
+ ret = dict_get_int32 (local->dict, "command", &command);
+ if (ret)
goto out;
- }
+ cmd = command;
- switch (rsp.op_errno) {
- case GF_DEFRAG_STATUS_NOT_STARTED:
- status = "not started";
+ switch (cmd) {
+ case GF_OP_CMD_STOP:
+ cmd_str = "stop";
break;
- case GF_DEFRAG_STATUS_STARTED:
- status = "in progress";
+ case GF_OP_CMD_STATUS:
+ cmd_str = "status";
break;
- case GF_DEFRAG_STATUS_STOPPED:
- status = "stopped";
- break;
- case GF_DEFRAG_STATUS_COMPLETE:
- status = "completed";
- break;
- case GF_DEFRAG_STATUS_FAILED:
- status = "failed";
+ default:
break;
}
+ ret = rsp.op_ret;
+ if (rsp.op_ret == -1) {
+ if (strcmp (rsp.op_errstr, ""))
+ snprintf (msg, sizeof (msg), "volume remove-brick %s: "
+ "failed: %s", cmd_str, rsp.op_errstr);
+ else
+ snprintf (msg, sizeof (msg), "volume remove-brick %s: "
+ "failed", cmd_str);
+
+ if (global_state->mode & GLUSTER_MODE_XML)
+ goto xml_output;
+
+ cli_err ("%s", msg);
+ goto out;
+ }
+
if (rsp.dict.dict_len) {
/* Unserialize the dictionary */
dict = dict_new ();
@@ -1396,60 +1870,53 @@ gf_cli3_remove_brick_status_cbk (struct rpc_req *req, struct iovec *iov,
rsp.dict.dict_len,
&dict);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
+ strncpy (msg, "failed to unserialize req-buffer to "
+ "dictionary", sizeof (msg));
+
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ rsp.op_ret = -1;
+ goto xml_output;
+ }
+
+ gf_log ("cli", GF_LOG_ERROR, "%s", msg);
goto out;
}
}
- ret = dict_get_uint64 (dict, "files", &files);
- if (ret)
- gf_log (THIS->name, GF_LOG_TRACE,
- "failed to get file count");
-
- ret = dict_get_uint64 (dict, "size", &size);
- if (ret)
- gf_log (THIS->name, GF_LOG_TRACE,
- "failed to get size of xfer");
-
- if (files && (rsp.op_errno == 1)) {
- snprintf (msg, sizeof (msg),
- "remove-brick %s: fixed layout %"PRId64,
- status,files);
- goto out;
- }
- if (files && (rsp.op_errno == 6)) {
- snprintf (msg, sizeof (msg),
- "remove-brick %s: fixed layout %"PRId64,
- status, files);
+xml_output:
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ if (strcmp (rsp.op_errstr, "")) {
+ ret = cli_xml_output_vol_remove_brick (_gf_true, dict,
+ rsp.op_ret,
+ rsp.op_errno,
+ rsp.op_errstr);
+ } else {
+ ret = cli_xml_output_vol_remove_brick (_gf_true, dict,
+ rsp.op_ret,
+ rsp.op_errno,
+ msg);
+ }
goto out;
}
- if (files) {
- snprintf (msg, sizeof (msg),
- "remove-brick %s: decommissioned %"PRId64
- " files of size %"PRId64, status,
- files, size);
+
+ ret = gf_cli_print_rebalance_status (dict, GF_TASK_TYPE_REMOVE_BRICK);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to print remove-brick "
+ "rebalance status");
goto out;
}
- snprintf (msg, sizeof (msg), "remove-brick %s", status);
-
-#if (HAVE_LIB_XML)
- if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_str ("volRemoveBrick", msg, rsp.op_ret,
- rsp.op_errno, rsp.op_errstr);
- if (ret)
- gf_log ("cli", GF_LOG_ERROR,
- "Error outputting to xml");
- goto out;
+ if ((cmd == GF_OP_CMD_STOP) && (rsp.op_ret == 0)) {
+ cli_out ("'remove-brick' process may be in the middle of a "
+ "file migration.\nThe process will be fully stopped "
+ "once the migration of the file is complete.\nPlease "
+ "check remove-brick process for completion before "
+ "doing any further brick related tasks on the "
+ "volume.");
}
-#endif
- cli_out ("%s", msg);
out:
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val); //malloced by xdr
+ free (rsp.dict.dict_val); //malloced by xdr
if (dict)
dict_unref (dict);
cli_cmd_broadcast_response (ret);
@@ -1458,57 +1925,117 @@ out:
int
-gf_cli3_1_remove_brick_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_remove_brick_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
int ret = -1;
char msg[1024] = {0,};
+ gf1_op_commands cmd = GF_OP_CMD_NONE;
+ char *cmd_str = "unknown";
+ cli_local_t *local = NULL;
+ call_frame_t *frame = NULL;
+ char *task_id_str = NULL;
+ dict_t *rsp_dict = NULL;
if (-1 == req->rpc_status) {
goto out;
}
+ frame = myframe;
+ local = frame->local;
+
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
+ ret = dict_get_int32 (local->dict, "command", (int32_t *)&cmd);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "failed to get command");
+ goto out;
+ }
+
+ if (rsp.dict.dict_len) {
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len,
+ &rsp_dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Failed to unserialize rsp_dict");
+ goto out;
+ }
+ }
+
+ switch (cmd) {
+ case GF_OP_CMD_START:
+ cmd_str = "start";
+
+ ret = dict_get_str (rsp_dict, GF_REMOVE_BRICK_TID_KEY, &task_id_str);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "remove-brick-id is not present in dict");
+ }
+ break;
+ case GF_OP_CMD_COMMIT:
+ cmd_str = "commit";
+ break;
+ case GF_OP_CMD_COMMIT_FORCE:
+ cmd_str = "commit force";
+ break;
+ default:
+ cmd_str = "unknown";
+ break;
+ }
+
gf_log ("cli", GF_LOG_INFO, "Received resp to remove brick");
if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
else
- snprintf (msg, sizeof (msg), "Remove Brick %s",
+ snprintf (msg, sizeof (msg), "Remove Brick %s %s", cmd_str,
(rsp.op_ret) ? "unsuccessful": "successful");
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_str ("volRemoveBrick", msg, rsp.op_ret,
- rsp.op_errno, rsp.op_errstr);
+ ret = cli_xml_output_vol_remove_brick (_gf_false, rsp_dict,
+ rsp.op_ret, rsp.op_errno,
+ msg);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
"Error outputting to xml");
goto out;
}
-#endif
- cli_out ("%s", msg);
+
+ if (rsp.op_ret) {
+ cli_err ("volume remove-brick %s: failed: %s", cmd_str,
+ msg);
+ } else {
+ cli_out ("volume remove-brick %s: success", cmd_str);
+ if (GF_OP_CMD_START == cmd && task_id_str != NULL)
+ cli_out ("ID: %s", task_id_str);
+ }
+
ret = rsp.op_ret;
out:
cli_cmd_broadcast_response (ret);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
- if (rsp.op_errstr)
- free (rsp.op_errstr);
+ free (rsp.dict.dict_val);
+ free (rsp.op_errstr);
+
return ret;
}
int
-gf_cli3_1_replace_brick_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_replace_brick_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -1522,7 +2049,8 @@ gf_cli3_1_replace_brick_cbk (struct rpc_req *req, struct iovec *iov,
gf1_cli_replace_op replace_op = 0;
char *rb_operation_str = NULL;
dict_t *rsp_dict = NULL;
- char msg[1024] = {0,};
+ char msg[1024] = {0,};
+ char *task_id_str = NULL;
if (-1 == req->rpc_status) {
goto out;
@@ -1532,7 +2060,8 @@ gf_cli3_1_replace_brick_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
@@ -1547,58 +2076,77 @@ gf_cli3_1_replace_brick_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ rsp_dict = dict_new ();
+
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &rsp_dict);
+ if (ret < 0) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "failed to "
+ "unserialize rsp buffer to dictionary");
+ goto out;
+ }
+ }
+
switch (replace_op) {
case GF_REPLACE_OP_START:
- if (rsp.op_ret)
- rb_operation_str = "replace-brick failed to start";
- else
- rb_operation_str = "replace-brick started successfully";
+ if (rsp.op_ret) {
+ rb_operation_str = gf_strdup ("replace-brick failed to"
+ " start");
+ } else {
+ ret = dict_get_str (rsp_dict, GF_REPLACE_BRICK_TID_KEY,
+ &task_id_str);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get "
+ "\"replace-brick-id\" from dict");
+ goto out;
+ }
+ ret = gf_asprintf (&rb_operation_str,
+ "replace-brick started successfully"
+ "\nID: %s", task_id_str);
+ if (ret < 0)
+ goto out;
+ }
break;
case GF_REPLACE_OP_STATUS:
- if (rsp.op_ret || ret)
- rb_operation_str = "replace-brick status unknown";
- else {
- if (rsp.dict.dict_len) {
- /* Unserialize the dictionary */
- rsp_dict = dict_new ();
-
- ret = dict_unserialize (rsp.dict.dict_val,
- rsp.dict.dict_len,
- &rsp_dict);
- if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- }
- }
+ if (rsp.op_ret || ret) {
+ rb_operation_str = gf_strdup ("replace-brick status "
+ "unknown");
+ } else {
ret = dict_get_str (rsp_dict, "status-reply",
&status_reply);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "failed to"
+ gf_log (frame->this->name, GF_LOG_ERROR, "failed to"
"get status");
goto out;
}
- rb_operation_str = status_reply;
+ rb_operation_str = gf_strdup (status_reply);
}
break;
case GF_REPLACE_OP_PAUSE:
if (rsp.op_ret)
- rb_operation_str = "replace-brick pause failed";
+ rb_operation_str = gf_strdup ("replace-brick pause "
+ "failed");
else
- rb_operation_str = "replace-brick paused successfully";
+ rb_operation_str = gf_strdup ("replace-brick paused "
+ "successfully");
break;
case GF_REPLACE_OP_ABORT:
if (rsp.op_ret)
- rb_operation_str = "replace-brick abort failed";
+ rb_operation_str = gf_strdup ("replace-brick abort "
+ "failed");
else
- rb_operation_str = "replace-brick aborted successfully";
+ rb_operation_str = gf_strdup ("replace-brick aborted "
+ "successfully");
break;
case GF_REPLACE_OP_COMMIT:
@@ -1619,9 +2167,11 @@ gf_cli3_1_replace_brick_cbk (struct rpc_req *req, struct iovec *iov,
if (rsp.op_ret || ret)
- rb_operation_str = "replace-brick commit failed";
+ rb_operation_str = gf_strdup ("replace-brick commit "
+ "failed");
else
- rb_operation_str = "replace-brick commit successful";
+ rb_operation_str = gf_strdup ("replace-brick commit "
+ "successful");
break;
@@ -1632,36 +2182,43 @@ gf_cli3_1_replace_brick_cbk (struct rpc_req *req, struct iovec *iov,
}
if (rsp.op_ret && (strcmp (rsp.op_errstr, ""))) {
- rb_operation_str = rsp.op_errstr;
+ rb_operation_str = gf_strdup (rsp.op_errstr);
}
gf_log ("cli", GF_LOG_INFO, "Received resp to replace brick");
- snprintf (msg,sizeof (msg), "%s",
+ snprintf (msg, sizeof (msg), "%s",
rb_operation_str ? rb_operation_str : "Unknown operation");
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_str ("volReplaceBrick", msg, rsp.op_ret,
- rsp.op_errno, rsp.op_errstr);
+ ret = cli_xml_output_vol_replace_brick (replace_op, rsp_dict,
+ rsp.op_ret,
+ rsp.op_errno, msg);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
"Error outputting to xml");
goto out;
}
-#endif
- cli_out ("%s", msg);
+ if (rsp.op_ret)
+ cli_err ("volume replace-brick: failed: %s", msg);
+ else
+ cli_out ("volume replace-brick: success: %s", msg);
ret = rsp.op_ret;
out:
+ if (frame)
+ frame->local = NULL;
+
if (local) {
dict_unref (local->dict);
cli_local_wipe (local);
}
+ if (rb_operation_str)
+ GF_FREE (rb_operation_str);
+
cli_cmd_broadcast_response (ret);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
+ free (rsp.dict.dict_val);
if (rsp_dict)
dict_unref (rsp_dict);
@@ -1670,7 +2227,7 @@ out:
static int
-gf_cli3_1_log_rotate_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_log_rotate_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -1683,7 +2240,8 @@ gf_cli3_1_log_rotate_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
@@ -1695,7 +2253,6 @@ gf_cli3_1_log_rotate_cbk (struct rpc_req *req, struct iovec *iov,
snprintf (msg, sizeof (msg), "log rotate %s",
(rsp.op_ret) ? "unsuccessful": "successful");
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str ("volLogRotate", msg, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
@@ -1704,21 +2261,22 @@ gf_cli3_1_log_rotate_cbk (struct rpc_req *req, struct iovec *iov,
"Error outputting to xml");
goto out;
}
-#endif
- cli_out ("%s", msg);
+ if (rsp.op_ret)
+ cli_err ("volume log-rotate: failed: %s", msg);
+ else
+ cli_out ("volume log-rotate: success");
ret = rsp.op_ret;
out:
cli_cmd_broadcast_response (ret);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
+ free (rsp.dict.dict_val);
return ret;
}
static int
-gf_cli3_1_sync_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_sync_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -1731,19 +2289,20 @@ gf_cli3_1_sync_volume_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
gf_log ("cli", GF_LOG_DEBUG, "Received resp to sync");
if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
- snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
+ snprintf (msg, sizeof (msg), "volume sync: failed: %s",
+ rsp.op_errstr);
else
snprintf (msg, sizeof (msg), "volume sync: %s",
- (rsp.op_ret) ? "unsuccessful": "successful");
+ (rsp.op_ret) ? "failed": "success");
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str ("volSync", msg, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
@@ -1752,9 +2311,11 @@ gf_cli3_1_sync_volume_cbk (struct rpc_req *req, struct iovec *iov,
"Error outputting to xml");
goto out;
}
-#endif
- cli_out ("%s", msg);
+ if (rsp.op_ret)
+ cli_err ("%s", msg);
+ else
+ cli_out ("%s", msg);
ret = rsp.op_ret;
out:
@@ -1762,145 +2323,421 @@ out:
return ret;
}
-int32_t
-gf_cli3_1_print_limit_list (char *volname, char *limit_list,
- char *op_errstr)
+static int
+print_quota_list_output (char *mountdir, char *default_sl, char *path)
{
- int64_t size = 0;
- int64_t limit_value = 0;
- int32_t i, j, k;
- int32_t len = 0, ret = -1;
- char *size_str = NULL;
- char path [PATH_MAX] = {0, };
- char ret_str [1024] = {0, };
- char value [1024] = {0, };
- char mountdir [] = "/tmp/mntXXXXXX";
- char abspath [PATH_MAX] = {0, };
- runner_t runner = {0,};
+ int64_t used_space = 0;
+ int64_t avail = 0;
+ char *used_str = NULL;
+ char *avail_str = NULL;
+ int ret = -1;
+ char *sl_final = NULL;
+ char *hl_str = NULL;
+ double sl_num = 0;
+ gf_boolean_t sl = _gf_false;
+ gf_boolean_t hl = _gf_false;
+ char percent_str[20] = {0};
+
+ struct quota_limit {
+ int64_t hl;
+ int64_t sl;
+ } __attribute__ ((__packed__)) existing_limits;
+
+ ret = sys_lgetxattr (mountdir, "trusted.glusterfs.quota.limit-set",
+ (void *)&existing_limits,
+ sizeof (existing_limits));
+ if (ret < 0) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get the xattr "
+ "trusted.glusterfs.quota.limit-set on %s. Reason : %s",
+ mountdir, strerror (errno));
+ switch (errno) {
+#if defined(ENODATA)
+ case ENODATA:
+#endif
+#if defined(ENOATTR) && (ENOATTR != ENODATA)
+ case ENOATTR:
+#endif
+ cli_err ("%-40s %s", path, "Limit not set");
+ break;
+ default:
+ cli_err ("%-40s %s", path, strerror (errno));
+ break;
+ }
- GF_VALIDATE_OR_GOTO ("cli", volname, out);
- GF_VALIDATE_OR_GOTO ("cli", limit_list, out);
+ goto out;
+ }
- if (!connected)
+ existing_limits.hl = ntoh64 (existing_limits.hl);
+ existing_limits.sl = ntoh64 (existing_limits.sl);
+
+ hl_str = gf_uint64_2human_readable (existing_limits.hl);
+
+ if (existing_limits.sl < 0) {
+ ret = gf_string2percent (default_sl, &sl_num);
+ sl_num = (sl_num * existing_limits.hl) / 100;
+ sl_final = default_sl;
+ } else {
+ sl_num = (existing_limits.sl * existing_limits.hl) / 100;
+ snprintf (percent_str, sizeof (percent_str), "%"PRIu64"%%",
+ existing_limits.sl);
+ sl_final = percent_str;
+ }
+
+ ret = sys_lgetxattr (mountdir, "trusted.glusterfs.quota.size",
+ &used_space, sizeof (used_space));
+
+ if (ret < 0) {
+ cli_out ("%-40s %7s %9s %11s %7s %15s %20s",
+ path, hl_str, sl_final,
+ "N/A", "N/A", "N/A", "N/A");
+ } else {
+ used_space = ntoh64 (used_space);
+
+ used_str = gf_uint64_2human_readable (used_space);
+
+ if (existing_limits.hl > used_space) {
+ avail = existing_limits.hl - used_space;
+ hl = _gf_false;
+ if (used_space > sl_num)
+ sl = _gf_true;
+ else
+ sl = _gf_false;
+ } else {
+ avail = 0;
+ hl = sl = _gf_true;
+ }
+
+ avail_str = gf_uint64_2human_readable (avail);
+ if (used_str == NULL) {
+ cli_out ("%-40s %7s %9s %11"PRIu64
+ "%9"PRIu64" %15s %18s", path, hl_str,
+ sl_final, used_space, avail, sl? "Yes" : "No",
+ hl? "Yes" : "No");
+ } else {
+ cli_out ("%-40s %7s %9s %11s %7s %15s %20s", path, hl_str,
+ sl_final, used_str, avail_str, sl? "Yes" : "No",
+ hl? "Yes" : "No");
+ }
+ }
+
+out:
+ GF_FREE (used_str);
+ GF_FREE (avail_str);
+ GF_FREE (hl_str);
+ return ret;
+}
+
+int
+gf_cli_print_limit_list_from_dict (char *volname, dict_t *dict,
+ char *default_sl, int count, char *op_errstr)
+{
+ int ret = -1;
+ int i = 0;
+ char key[1024] = {0,};
+ char mountdir[PATH_MAX] = {0,};
+ char *path = NULL;
+
+ if (!dict|| count <= 0)
goto out;
- len = strlen (limit_list);
- if (len == 0) {
- cli_out ("%s", op_errstr?op_errstr:"quota limit not set ");
+ /* Need to check if any quota limits are set on the volume before trying
+ * to list them
+ */
+ if (!_limits_set_on_volume (volname)) {
+ ret = 0;
+ cli_out ("quota: No quota configured on volume %s", volname);
goto out;
}
- if (mkdtemp (mountdir) == NULL) {
- gf_log ("cli", GF_LOG_WARNING, "failed to create a temporary "
- "mount directory");
+ /* Check if the mount is online before doing any listing */
+ if (!_quota_aux_mount_online (volname)) {
ret = -1;
goto out;
}
- /* Mount a temporary client to fetch the disk usage
- * of the directory on which the limit is set.
- */
- ret = runcmd (SBIN_DIR"/glusterfs", "-s",
- "localhost", "--volfile-id", volname, "-l",
- DEFAULT_LOG_FILE_DIRECTORY"/quota-list.log",
- mountdir, NULL);
+ cli_out (" Path Hard-limit "
+ "Soft-limit Used Available Soft-limit exceeded?"
+ " Hard-limit exceeded?");
+ cli_out ("--------------------------------------------------------"
+ "--------------------------------------------------------"
+ "-----------");
+
+ while (count--) {
+ snprintf (key, sizeof (key), "path%d", i++);
+
+ ret = dict_get_str (dict, key, &path);
+ if (ret < 0) {
+ gf_log ("cli", GF_LOG_DEBUG, "Path not present in limit"
+ " list");
+ continue;
+ }
+
+ ret = gf_canonicalize_path (path);
+ if (ret)
+ goto out;
+ GLUSTERD_GET_QUOTA_AUX_MOUNT_PATH (mountdir, volname, path);
+ ret = print_quota_list_output (mountdir, default_sl, path);
+
+ }
+out:
+ return ret;
+}
+
+int
+print_quota_list_from_quotad (call_frame_t *frame, dict_t *rsp_dict)
+{
+ int64_t used_space = 0;
+ int64_t avail = 0;
+ int64_t *limit = NULL;
+ char *used_str = NULL;
+ char *avail_str = NULL;
+ char *hl_str = NULL;
+ char *sl_final = NULL;
+ char *path = NULL;
+ char *default_sl = NULL;
+ int ret = -1;
+ cli_local_t *local = NULL;
+ dict_t *gd_rsp_dict = NULL;
+ double sl_num = 0;
+ gf_boolean_t sl = _gf_false;
+ gf_boolean_t hl = _gf_false;
+ char percent_str[20] = {0,};
+
+ local = frame->local;
+ gd_rsp_dict = local->dict;
+
+ struct quota_limit {
+ int64_t hl;
+ int64_t sl;
+ } __attribute__ ((__packed__)) *existing_limits = NULL;
+
+ ret = dict_get_str (rsp_dict, GET_ANCESTRY_PATH_KEY, &path);
if (ret) {
- gf_log ("cli", GF_LOG_WARNING, "failed to mount glusterfs client");
- ret = -1;
- goto rm_dir;
+ gf_log ("cli", GF_LOG_WARNING, "path key is not present "
+ "in dict");
+ goto out;
+ }
+
+ ret = dict_get_bin (rsp_dict, QUOTA_LIMIT_KEY, (void**)&limit);
+ if (ret) {
+ gf_log ("cli", GF_LOG_WARNING,
+ "limit key not present in dict");
+ goto out;
}
- len = strlen (limit_list);
- if (len == 0) {
- cli_out ("quota limit not set ");
- goto unmount;
+ ret = dict_get_str (gd_rsp_dict, "default-soft-limit", &default_sl);
+ if (ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR, "failed to "
+ "get default soft limit");
+ goto out;
}
+ existing_limits = (struct quota_limit *)limit;
+ existing_limits->hl = ntoh64 (existing_limits->hl);
+ existing_limits->sl = ntoh64 (existing_limits->sl);
- i = 0;
+ hl_str = gf_uint64_2human_readable (existing_limits->hl);
- cli_out ("\tpath\t\t limit_set\t size");
- cli_out ("-----------------------------------------------------------"
- "-----------------------");
- while (i < len) {
- j = 0;
- k = 0;
+ if (existing_limits->sl < 0) {
+ ret = gf_string2percent (default_sl, &sl_num);
+ sl_num = (sl_num * existing_limits->hl) / 100;
+ sl_final = default_sl;
+ } else {
+ sl_num = (existing_limits->sl * existing_limits->hl) / 100;
+ snprintf (percent_str, sizeof (percent_str), "%"PRIu64"%%",
+ existing_limits->sl);
+ sl_final = percent_str;
+ }
- while (limit_list [i] != ':') {
- path [k++] = limit_list [i++];
+ ret = dict_get_bin (rsp_dict, QUOTA_SIZE_KEY, (void**)&limit);
+ if (ret < 0) {
+ gf_log ("cli", GF_LOG_WARNING,
+ "size key not present in dict");
+ cli_out ("%-40s %7s %9s %11s %7s %15s %20s", path, hl_str,
+ sl_final, "N/A", "N/A", "N/A", "N/A");
+ } else {
+ used_space = *limit;
+ used_space = ntoh64 (used_space);
+ used_str = gf_uint64_2human_readable (used_space);
+
+ if (existing_limits->hl > used_space) {
+ avail = existing_limits->hl - used_space;
+ hl = _gf_false;
+ if (used_space > sl_num)
+ sl = _gf_true;
+ else
+ sl = _gf_false;
+ } else {
+ avail = 0;
+ hl = sl = _gf_true;
}
- path [k] = '\0';
+ avail_str = gf_uint64_2human_readable (avail);
+ if (used_str == NULL)
+ cli_out ("%-40s %7s %9s %11"PRIu64
+ "%9"PRIu64" %15s %20s", path, hl_str,
+ sl_final, used_space, avail, sl? "Yes" : "No",
+ hl? "Yes" : "No");
+ else
+ cli_out ("%-40s %7s %9s %11s %7s %15s %20s", path,
+ hl_str, sl_final, used_str, avail_str,
+ sl? "Yes" : "No", hl? "Yes" : "No");
+ }
+
+ ret = 0;
+out:
+ GF_FREE (used_str);
+ GF_FREE (avail_str);
+ GF_FREE (hl_str);
+ return ret;
+}
- i++; //skip ':'
+int
+cli_quotad_getlimit_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ //TODO: we need to gather the path, hard-limit, soft-limit and used space
+ gf_cli_rsp rsp = {0,};
+ int ret = -1;
+ dict_t *dict = NULL;
+ call_frame_t *frame = NULL;
- while (limit_list [i] != ',' && limit_list [i] != '\0') {
- value [j++] = limit_list[i++];
- }
- value [j] = '\0';
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ frame = myframe;
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
+ goto out;
+ }
- snprintf (abspath, sizeof (abspath), "%s/%s", mountdir, path);
+ if (rsp.op_ret) {
+ ret = -1;
+ if (strcmp (rsp.op_errstr, ""))
+ cli_err ("quota command failed : %s", rsp.op_errstr);
+ else
+ cli_err ("quota command : failed");
+ goto out;
+ }
+
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new ();
- ret = sys_lgetxattr (abspath, "trusted.limit.list", (void *) ret_str, 4096);
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &dict);
if (ret < 0) {
- cli_out ("%-20s %10s", path, value);
- } else {
- sscanf (ret_str, "%"PRId64",%"PRId64, &size,
- &limit_value);
- size_str = gf_uint64_2human_readable ((uint64_t) size);
- if (size_str == NULL) {
- cli_out ("%-20s %10s %20"PRId64, path,
- value, size);
- } else {
- cli_out ("%-20s %10s %20s", path,
- value, size_str);
- GF_FREE (size_str);
- }
+ gf_log ("cli", GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
}
- i++;
+ print_quota_list_from_quotad (frame, dict);
}
-unmount:
+out:
+ cli_cmd_broadcast_response (ret);
+ if (dict)
+ dict_unref (dict);
- runinit (&runner);
- runner_add_args (&runner, "umount",
-#if GF_LINUX_HOST_OS
- "-l",
-#endif
- mountdir, NULL);
- ret = runner_run_reuse (&runner);
- if (ret)
- runner_log (&runner, "cli", GF_LOG_WARNING, "error executing");
- runner_end (&runner);
+ free (rsp.dict.dict_val);
+ return ret;
+}
+
+int
+cli_quotad_getlimit (call_frame_t *frame, xlator_t *this, void *data)
+{
+ gf_cli_req req = {{0,}};
+ int ret = 0;
+ dict_t *dict = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ dict = data;
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to serialize the data");
+
+ goto out;
+ }
+
+ ret = cli_cmd_submit (global_quotad_rpc, &req, frame, &cli_quotad_clnt,
+ GF_AGGREGATOR_GETLIMIT, NULL,
+ this, cli_quotad_getlimit_cbk,
+ (xdrproc_t) xdr_gf_cli_req);
-rm_dir:
- rmdir (mountdir);
out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
+
+
+}
+
+void
+gf_cli_quota_list (char *volname, dict_t *dict, int count, char *op_errstr,
+ char *default_sl)
+{
+ GF_VALIDATE_OR_GOTO ("cli", volname, out);
+
+ if (!connected)
+ goto out;
+
+ if (count > 0)
+ gf_cli_print_limit_list_from_dict (volname, dict, default_sl,
+ count, op_errstr);
+out:
+ return;
}
int
-gf_cli3_1_quota_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_quota_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
- gf_cli_rsp rsp = {0,};
- int ret = -1;
- dict_t *dict = NULL;
- char *volname = NULL;
- char *limit_list = NULL;
- int32_t type = 0;
-
+ gf_cli_rsp rsp = {0,};
+ int ret = -1;
+ dict_t *dict = NULL;
+ char *volname = NULL;
+ int32_t type = 0;
+ call_frame_t *frame = NULL;
+ char *default_sl = NULL;
+ char *limit_list = NULL;
+ cli_local_t *local = NULL;
+ dict_t *aggr = NULL;
+ char *default_sl_dup = NULL;
+ int32_t entry_count = 0;
if (-1 == req->rpc_status) {
goto out;
}
+ frame = myframe;
+ local = frame->local;
+ aggr = local->dict;
+
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
- if (rsp.op_ret &&
- strcmp (rsp.op_errstr, "") == 0) {
- cli_out ("command unsuccessful %s", rsp.op_errstr);
+ if (rsp.op_ret) {
+ ret = -1;
+ if (global_state->mode & GLUSTER_MODE_XML)
+ goto xml_output;
+
+ if (strcmp (rsp.op_errstr, ""))
+ cli_err ("quota command failed : %s", rsp.op_errstr);
+ else
+ cli_err ("quota command : failed");
+
goto out;
}
@@ -1912,73 +2749,115 @@ gf_cli3_1_quota_cbk (struct rpc_req *req, struct iovec *iov,
rsp.dict.dict_len,
&dict);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
+ gf_log ("cli", GF_LOG_ERROR,
"failed to "
"unserialize req-buffer to dictionary");
goto out;
}
}
+ gf_log ("cli", GF_LOG_DEBUG, "Received resp to quota command");
+
ret = dict_get_str (dict, "volname", &volname);
if (ret)
- gf_log (THIS->name, GF_LOG_TRACE,
+ gf_log (frame->this->name, GF_LOG_ERROR,
"failed to get volname");
- ret = dict_get_str (dict, "limit_list", &limit_list);
+ ret = dict_get_str (dict, "default-soft-limit", &default_sl);
if (ret)
- gf_log (THIS->name, GF_LOG_TRACE,
- "failed to get limit_list");
+ gf_log (frame->this->name, GF_LOG_TRACE, "failed to get "
+ "default soft limit");
+
+ // default-soft-limit is part of rsp_dict only iff we sent
+ // GLUSTER_CLI_QUOTA with type being GF_QUOTA_OPTION_TYPE_LIST
+ if (default_sl) {
+ default_sl_dup = gf_strdup (default_sl);
+ if (!default_sl_dup) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr (aggr, "default-soft-limit",
+ default_sl_dup);
+ if (ret) {
+ gf_log (frame->this->name, GF_LOG_TRACE,
+ "failed to set default soft limit");
+ GF_FREE (default_sl_dup);
+ }
+ }
ret = dict_get_int32 (dict, "type", &type);
if (ret)
- gf_log (THIS->name, GF_LOG_TRACE,
+ gf_log (frame->this->name, GF_LOG_TRACE,
"failed to get type");
+ ret = dict_get_int32 (dict, "count", &entry_count);
+ if (ret)
+ gf_log (frame->this->name, GF_LOG_TRACE, "failed to get count");
+
if (type == GF_QUOTA_OPTION_TYPE_LIST) {
- if (limit_list) {
- gf_cli3_1_print_limit_list (volname,
- limit_list,
- rsp.op_errstr);
- } else {
- gf_log ("cli", GF_LOG_INFO, "Received resp to quota "
- "command ");
- if (rsp.op_errstr)
- cli_out ("%s", rsp.op_errstr);
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ ret = cli_xml_output_vol_quota_limit_list
+ (volname, limit_list, rsp.op_ret,
+ rsp.op_errno, rsp.op_errstr);
+ if (ret)
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ goto out;
}
- } else {
- gf_log ("cli", GF_LOG_INFO, "Received resp to quota command ");
- if (rsp.op_errstr)
- cli_out ("%s", rsp.op_errstr);
- else
- cli_out ("%s", "successful");
+
+ gf_cli_quota_list (volname, dict, entry_count, rsp.op_errstr,
+ default_sl);
}
-out:
- ret = rsp.op_ret;
+xml_output:
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ ret = cli_xml_output_str ("volQuota", NULL, rsp.op_ret,
+ rsp.op_errno, rsp.op_errstr);
+ if (ret)
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ goto out;
+ }
+
+ if (!rsp.op_ret && type != GF_QUOTA_OPTION_TYPE_LIST)
+ cli_out ("volume quota : success");
+ ret = rsp.op_ret;
+out:
cli_cmd_broadcast_response (ret);
+ if (dict)
+ dict_unref (dict);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
+ free (rsp.dict.dict_val);
return ret;
}
int
-gf_cli3_1_getspec_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_getspec_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_getspec_rsp rsp = {0,};
int ret = -1;
char *spec = NULL;
+ call_frame_t *frame = NULL;
if (-1 == req->rpc_status) {
goto out;
}
+ frame = myframe;
+
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
- if (ret < 0 || rsp.op_ret == -1) {
- gf_log ("", GF_LOG_ERROR, "error");
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
+ goto out;
+ }
+
+ if (rsp.op_ret == -1) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "getspec failed");
goto out;
}
@@ -2002,20 +2881,30 @@ out:
}
int
-gf_cli3_1_pmap_b2p_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_pmap_b2p_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
pmap_port_by_brick_rsp rsp = {0,};
int ret = -1;
char *spec = NULL;
+ call_frame_t *frame = NULL;
if (-1 == req->rpc_status) {
goto out;
}
+ frame = myframe;
+
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_port_by_brick_rsp);
- if (ret < 0 || rsp.op_ret == -1) {
- gf_log ("", GF_LOG_ERROR, "error");
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
+ goto out;
+ }
+
+ if (rsp.op_ret == -1) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "pump_b2p failed");
goto out;
}
@@ -2033,13 +2922,12 @@ out:
int32_t
-gf_cli3_1_probe (call_frame_t *frame, xlator_t *this,
+gf_cli_probe (call_frame_t *frame, xlator_t *this,
void *data)
{
- gf1_cli_probe_req req = {0,};
+ gf_cli_req req = {{0,},};
int ret = 0;
dict_t *dict = NULL;
- char *hostname = NULL;
int port = 0;
if (!frame || !this || !data) {
@@ -2048,35 +2936,32 @@ gf_cli3_1_probe (call_frame_t *frame, xlator_t *this,
}
dict = data;
- ret = dict_get_str (dict, "hostname", &hostname);
- if (ret)
- goto out;
ret = dict_get_int32 (dict, "port", &port);
- if (ret)
- port = CLI_GLUSTERD_PORT;
-
- req.hostname = hostname;
- req.port = port;
+ if (ret) {
+ ret = dict_set_int32 (dict, "port", CLI_GLUSTERD_PORT);
+ if (ret)
+ goto out;
+ }
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_PROBE, NULL,
- this, gf_cli3_1_probe_cbk,
- (xdrproc_t)xdr_gf1_cli_probe_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_probe_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_PROBE, this, cli_rpc_prog, NULL);
out:
+ GF_FREE (req.dict.dict_val);
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
return ret;
}
int32_t
-gf_cli3_1_deprobe (call_frame_t *frame, xlator_t *this,
+gf_cli_deprobe (call_frame_t *frame, xlator_t *this,
void *data)
{
- gf1_cli_deprobe_req req = {0,};
+ gf_cli_req req = {{0,},};
int ret = 0;
dict_t *dict = NULL;
- char *hostname = NULL;
int port = 0;
int flags = 0;
@@ -2086,57 +2971,70 @@ gf_cli3_1_deprobe (call_frame_t *frame, xlator_t *this,
}
dict = data;
- ret = dict_get_str (dict, "hostname", &hostname);
- if (ret)
- goto out;
-
ret = dict_get_int32 (dict, "port", &port);
- if (ret)
- port = CLI_GLUSTERD_PORT;
+ if (ret) {
+ ret = dict_set_int32 (dict, "port", CLI_GLUSTERD_PORT);
+ if (ret)
+ goto out;
+ }
ret = dict_get_int32 (dict, "flags", &flags);
- if (ret)
- flags = 0;
+ if (ret) {
+ ret = dict_set_int32 (dict, "flags", 0);
+ if (ret)
+ goto out;
+ }
- req.hostname = hostname;
- req.port = port;
- req.flags = flags;
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_DEPROBE, NULL,
- this, gf_cli3_1_deprobe_cbk,
- (xdrproc_t)xdr_gf1_cli_deprobe_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_deprobe_cbk,
+ (xdrproc_t)xdr_gf_cli_req, dict,
+ GLUSTER_CLI_DEPROBE, this, cli_rpc_prog, NULL);
out:
+ GF_FREE (req.dict.dict_val);
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
return ret;
}
int32_t
-gf_cli3_1_list_friends (call_frame_t *frame, xlator_t *this,
- void *data)
+gf_cli_list_friends (call_frame_t *frame, xlator_t *this,
+ void *data)
{
gf1_cli_peer_list_req req = {0,};
int ret = 0;
+ unsigned long flags = 0;
if (!frame || !this) {
ret = -1;
goto out;
}
- req.flags = GF_CLI_LIST_ALL;
+ GF_ASSERT (frame->local == NULL);
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ flags = (long)data;
+ req.flags = flags;
+ frame->local = (void*)flags;
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_LIST_FRIENDS, NULL,
- this, gf_cli3_1_list_friends_cbk,
+ this, gf_cli_list_friends_cbk,
(xdrproc_t) xdr_gf1_cli_peer_list_req);
out:
+ if (ret) {
+ /*
+ * If everything goes fine, gf_cli_list_friends_cbk()
+ * [invoked through cli_cmd_submit()]resets the
+ * frame->local to NULL. In case cli_cmd_submit()
+ * fails in between, RESET frame->local here.
+ */
+ frame->local = NULL;
+ }
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
-gf_cli3_1_get_next_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_get_next_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
@@ -2150,31 +3048,51 @@ gf_cli3_1_get_next_volume (call_frame_t *frame, xlator_t *this,
}
ctx = data;
+ local = frame->local;
+
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ ret = cli_xml_output_vol_info_begin (local, 0, 0, "");
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml");
+ goto out;
+ }
+ }
- ret = gf_cli3_1_get_volume (frame, this, data);
+ ret = gf_cli_get_volume (frame, this, data);
- local = frame->local;
if (!local || !local->get_vol.volname) {
- cli_out ("No volumes present");
+ if ((global_state->mode & GLUSTER_MODE_XML))
+ goto end_xml;
+
+ cli_err ("No volumes present");
goto out;
}
+
ctx->volname = local->get_vol.volname;
while (ctx->volname) {
- ret = gf_cli3_1_get_volume (frame, this, ctx);
+ ret = gf_cli_get_volume (frame, this, ctx);
if (ret)
goto out;
ctx->volname = local->get_vol.volname;
}
+end_xml:
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ ret = cli_xml_output_vol_info_end (local);
+ if (ret)
+ gf_log ("cli", GF_LOG_ERROR, "Error outputting to xml");
+ }
+
out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
-gf_cli3_1_get_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_get_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
@@ -2203,90 +3121,108 @@ gf_cli3_1_get_volume (call_frame_t *frame, xlator_t *this,
flags = ctx->flags;
ret = dict_set_int32 (dict, "flags", flags);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "failed to set flags");
+ gf_log (frame->this->name, GF_LOG_ERROR, "failed to set flags");
goto out;
}
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_GET_VOLUME, NULL,
- this, gf_cli3_1_get_volume_cbk,
+ this, gf_cli_get_volume_cbk,
(xdrproc_t) xdr_gf_cli_req);
out:
if (dict)
dict_unref (dict);
- if (req.dict.dict_val)
- GF_FREE (req.dict.dict_val);
+ GF_FREE (req.dict.dict_val);
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
-
int32_t
-gf_cli3_1_create_volume (call_frame_t *frame, xlator_t *this,
- void *data)
+gf_cli3_1_uuid_get (call_frame_t *frame, xlator_t *this,
+ void *data)
{
- gf_cli_req req = {{0,}};
- int ret = 0;
- dict_t *dict = NULL;
- cli_local_t *local = NULL;
+ gf_cli_req req = {{0,}};
+ int ret = 0;
+ dict_t *dict = NULL;
- if (!frame || !this || !data) {
+ if (!frame || !this || !data) {
ret = -1;
goto out;
}
- dict = dict_ref ((dict_t *)data);
+ dict = data;
+ ret = cli_to_glusterd (&req, frame, gf_cli3_1_uuid_get_cbk,
+ (xdrproc_t)xdr_gf_cli_req, dict,
+ GLUSTER_CLI_UUID_GET, this, cli_rpc_prog,
+ NULL);
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_DEBUG,
- "failed to get serialized length of dict");
+int32_t
+gf_cli3_1_uuid_reset (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf_cli_req req = {{0,}};
+ int ret = 0;
+ dict_t *dict = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
goto out;
}
- local = cli_local_get ();
+ dict = data;
+ ret = cli_to_glusterd (&req, frame, gf_cli3_1_uuid_reset_cbk,
+ (xdrproc_t)xdr_gf_cli_req, dict,
+ GLUSTER_CLI_UUID_RESET, this, cli_rpc_prog,
+ NULL);
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
- if (local) {
- local->dict = dict_ref (dict);
- frame->local = local;
- }
+int32_t
+gf_cli_create_volume (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf_cli_req req = {{0,}};
+ int ret = 0;
+ dict_t *dict = NULL;
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_CREATE_VOLUME, NULL,
- this, gf_cli3_1_create_volume_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+ dict = data;
+ ret = cli_to_glusterd (&req, frame, gf_cli_create_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_CREATE_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
- if (dict)
- dict_unref (dict);
-
- if (req.dict.dict_val) {
- GF_FREE (req.dict.dict_val);
- }
+ GF_FREE (req.dict.dict_val);
return ret;
}
int32_t
-gf_cli3_1_delete_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_delete_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
int ret = 0;
- cli_local_t *local = NULL;
dict_t *dict = NULL;
if (!frame || !this || !data) {
@@ -2294,50 +3230,26 @@ gf_cli3_1_delete_volume (call_frame_t *frame, xlator_t *this,
goto out;
}
- local = cli_local_get ();
-
- dict = dict_new ();
- ret = dict_set_str (dict, "volname", data);
- if (ret) {
- gf_log (THIS->name, GF_LOG_WARNING, "dict set failed");
- goto out;
- }
- if (local) {
- local->dict = dict_ref (dict);
- frame->local = local;
- }
-
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to get serialize dict");
- goto out;
- }
+ dict = data;
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_DELETE_VOLUME, NULL,
- this, gf_cli3_1_delete_volume_cbk,
- (xdrproc_t)xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_delete_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_DELETE_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
- if (dict)
- dict_unref (dict);
- if (req.dict.dict_val)
- GF_FREE (req.dict.dict_val);
+ GF_FREE (req.dict.dict_val);
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
-gf_cli3_1_start_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_start_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
int ret = 0;
- cli_local_t *local = NULL;
dict_t *dict = NULL;
if (!frame || !this || !data) {
@@ -2346,27 +3258,11 @@ gf_cli3_1_start_volume (call_frame_t *frame, xlator_t *this,
}
dict = data;
- local = cli_local_get ();
-
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to serialize dict");
- goto out;
- }
-
-
- if (local) {
- local->dict = dict_ref (dict);
- frame->local = local;
- }
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_START_VOLUME, NULL,
- this, gf_cli3_1_start_volume_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_start_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_START_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
@@ -2375,12 +3271,11 @@ out:
}
int32_t
-gf_cli3_1_stop_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_stop_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
int ret = 0;
- cli_local_t *local = NULL;
dict_t *dict = data;
if (!frame || !this || !data) {
@@ -2388,28 +3283,12 @@ gf_cli3_1_stop_volume (call_frame_t *frame, xlator_t *this,
goto out;
}
- local = cli_local_get ();
dict = data;
- if (local) {
- local->dict = dict_ref (dict);
- frame->local = local;
- }
-
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *) &req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to serialize the data");
-
- goto out;
- }
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_STOP_VOLUME, NULL,
- this, gf_cli3_1_stop_volume_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_stop_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_STOP_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
@@ -2418,17 +3297,12 @@ out:
}
int32_t
-gf_cli3_1_defrag_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_defrag_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
int ret = 0;
- cli_local_t *local = NULL;
- char *volname = NULL;
- char *cmd_str = NULL;
dict_t *dict = NULL;
- gf_cli_defrag_type cmd = 0;
- dict_t *req_dict = NULL;
if (!frame || !this || !data) {
ret = -1;
@@ -2437,81 +3311,10 @@ gf_cli3_1_defrag_volume (call_frame_t *frame, xlator_t *this,
dict = data;
- ret = dict_get_str (dict, "volname", &volname);
- if (ret)
- gf_log ("", GF_LOG_DEBUG, "error");
-
- ret = dict_get_str (dict, "command", &cmd_str);
- if (ret) {
- gf_log ("", GF_LOG_DEBUG, "error");
- goto out;
- }
-
- if (strcmp (cmd_str, "start") == 0) {
- cmd = GF_DEFRAG_CMD_START;
- ret = dict_get_str (dict, "option", &cmd_str);
- if (!ret) {
- if (strcmp (cmd_str, "force") == 0) {
- cmd = GF_DEFRAG_CMD_START_FORCE;
- }
- }
- goto done;
- }
-
- if (strcmp (cmd_str, "fix-layout") == 0) {
- cmd = GF_DEFRAG_CMD_START_LAYOUT_FIX;
- goto done;
- }
- if (strcmp (cmd_str, "stop") == 0) {
- cmd = GF_DEFRAG_CMD_STOP;
- goto done;
- }
- if (strcmp (cmd_str, "status") == 0) {
- cmd = GF_DEFRAG_CMD_STATUS;
- }
-
-done:
- local = cli_local_get ();
-
- req_dict = dict_new ();
- if (!req_dict) {
- ret = -1;
- goto out;
- }
-
- ret = dict_set_str (req_dict, "volname", volname);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to set dict");
- goto out;
- }
-
- ret = dict_set_int32 (req_dict, "rebalance-command", (int32_t) cmd);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to set dict");
- goto out;
- }
-
- if (local) {
- local->dict = dict_ref (req_dict);
- frame->local = local;
- }
-
- ret = dict_allocate_and_serialize (req_dict,
- &req.dict.dict_val,
- (size_t *) &req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to serialize the data");
-
- goto out;
- }
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_DEFRAG_VOLUME, NULL,
- this, gf_cli3_1_defrag_volume_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_defrag_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_DEFRAG_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
@@ -2520,7 +3323,7 @@ out:
}
int32_t
-gf_cli3_1_rename_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_rename_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
@@ -2534,9 +3337,8 @@ gf_cli3_1_rename_volume (call_frame_t *frame, xlator_t *this,
dict = data;
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *) &req.dict.dict_len);
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
if (ret < 0) {
gf_log (this->name, GF_LOG_ERROR,
"failed to serialize the data");
@@ -2545,9 +3347,9 @@ gf_cli3_1_rename_volume (call_frame_t *frame, xlator_t *this,
}
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_RENAME_VOLUME, NULL,
- this, gf_cli3_1_rename_volume_cbk,
+ this, gf_cli_rename_volume_cbk,
(xdrproc_t) xdr_gf_cli_req);
out:
@@ -2557,7 +3359,7 @@ out:
}
int32_t
-gf_cli3_1_reset_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_reset_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
@@ -2571,28 +3373,18 @@ gf_cli3_1_reset_volume (call_frame_t *frame, xlator_t *this,
dict = data;
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to get serialized length of dict");
- goto out;
- }
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_RESET_VOLUME, NULL,
- this, gf_cli3_1_reset_volume_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_reset_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_RESET_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
- gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
-
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
-gf_cli3_1_set_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_set_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
@@ -2606,19 +3398,10 @@ gf_cli3_1_set_volume (call_frame_t *frame, xlator_t *this,
dict = data;
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_DEBUG,
- "failed to get serialized length of dict");
- goto out;
- }
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_SET_VOLUME, NULL,
- this, gf_cli3_1_set_volume_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_set_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_SET_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
@@ -2627,7 +3410,7 @@ out:
}
int32_t
-gf_cli3_1_add_brick (call_frame_t *frame, xlator_t *this,
+gf_cli_add_brick (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
@@ -2652,33 +3435,20 @@ gf_cli3_1_add_brick (call_frame_t *frame, xlator_t *this,
if (ret)
goto out;
-
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_DEBUG,
- "failed to get serialized length of dict");
- goto out;
- }
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_ADD_BRICK, NULL,
- this, gf_cli3_1_add_brick_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_add_brick_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_ADD_BRICK, this, cli_rpc_prog, NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
- if (req.dict.dict_val) {
- GF_FREE (req.dict.dict_val);
- }
+ GF_FREE (req.dict.dict_val);
return ret;
}
int32_t
-gf_cli3_1_remove_brick (call_frame_t *frame, xlator_t *this,
+gf_cli_remove_brick (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};;
@@ -2705,23 +3475,16 @@ gf_cli3_1_remove_brick (call_frame_t *frame, xlator_t *this,
if (ret)
goto out;
- if (command != GF_OP_CMD_STATUS) {
+ if ((command != GF_OP_CMD_STATUS) &&
+ (command != GF_OP_CMD_STOP)) {
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_DEBUG,
- "failed to get serialized length of dict");
- goto out;
- }
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_REMOVE_BRICK, NULL,
- this, gf_cli3_1_remove_brick_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_remove_brick_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_REMOVE_BRICK, this,
+ cli_rpc_prog, NULL);
} else {
- /* Need rebalance status to e sent :-) */
+ /* Need rebalance status to be sent :-) */
req_dict = dict_new ();
if (!req_dict) {
ret = -1;
@@ -2730,60 +3493,49 @@ gf_cli3_1_remove_brick (call_frame_t *frame, xlator_t *this,
ret = dict_set_str (req_dict, "volname", volname);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"Failed to set dict");
goto out;
}
- cmd |= GF_DEFRAG_CMD_STATUS;
+ if (command == GF_OP_CMD_STATUS)
+ cmd |= GF_DEFRAG_CMD_STATUS;
+ else
+ cmd |= GF_DEFRAG_CMD_STOP;
ret = dict_set_int32 (req_dict, "rebalance-command", (int32_t) cmd);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"Failed to set dict");
goto out;
}
- ret = dict_allocate_and_serialize (req_dict,
- &status_req.dict.dict_val,
- (size_t *) &status_req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to serialize the data");
+ ret = cli_to_glusterd (&status_req, frame,
+ gf_cli3_remove_brick_status_cbk,
+ (xdrproc_t) xdr_gf_cli_req, req_dict,
+ GLUSTER_CLI_DEFRAG_VOLUME, this,
+ cli_rpc_prog, NULL);
- goto out;
}
- ret = cli_cmd_submit (&status_req, frame, cli_rpc_prog,
- GLUSTER_CLI_DEFRAG_VOLUME, NULL,
- this, gf_cli3_remove_brick_status_cbk,
- (xdrproc_t) xdr_gf_cli_req);
-
- }
-
out:
+ if (req_dict)
+ dict_unref (req_dict);
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
- if (req.dict.dict_val) {
- GF_FREE (req.dict.dict_val);
- }
+ GF_FREE (req.dict.dict_val);
- if (status_req.dict.dict_val)
- GF_FREE (status_req.dict.dict_val);
-
- if (req_dict)
- dict_unref (req_dict);
+ GF_FREE (status_req.dict.dict_val);
return ret;
}
int32_t
-gf_cli3_1_replace_brick (call_frame_t *frame, xlator_t *this,
+gf_cli_replace_brick (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
int ret = 0;
- cli_local_t *local = NULL;
dict_t *dict = NULL;
char *src_brick = NULL;
char *dst_brick = NULL;
@@ -2797,17 +3549,6 @@ gf_cli3_1_replace_brick (call_frame_t *frame, xlator_t *this,
dict = data;
- local = cli_local_get ();
- if (!local) {
- ret = -1;
- gf_log (this->name, GF_LOG_ERROR,
- "Out of memory");
- goto out;
- }
-
- local->dict = dict_ref (dict);
- frame->local = local;
-
ret = dict_get_int32 (dict, "operation", &op);
if (ret) {
gf_log (this->name, GF_LOG_DEBUG,
@@ -2840,34 +3581,22 @@ gf_cli3_1_replace_brick (call_frame_t *frame, xlator_t *this,
"%s with operation=%d", src_brick,
dst_brick, op);
-
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_DEBUG,
- "failed to get serialized length of dict");
- goto out;
- }
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_REPLACE_BRICK, NULL,
- this, gf_cli3_1_replace_brick_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_replace_brick_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_REPLACE_BRICK, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
- if (req.dict.dict_val) {
- GF_FREE (req.dict.dict_val);
- }
+ GF_FREE (req.dict.dict_val);
return ret;
}
int32_t
-gf_cli3_1_log_rotate (call_frame_t *frame, xlator_t *this,
+gf_cli_log_rotate (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
@@ -2881,31 +3610,20 @@ gf_cli3_1_log_rotate (call_frame_t *frame, xlator_t *this,
dict = data;
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
-
- if (ret < 0) {
- gf_log (THIS->name, GF_LOG_ERROR, "failed to serialize dict");
- goto out;
- }
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_LOG_ROTATE, NULL,
- this, gf_cli3_1_log_rotate_cbk,
- (xdrproc_t) xdr_gf_cli_req);
-
+ ret = cli_to_glusterd (&req, frame, gf_cli_log_rotate_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_LOG_ROTATE, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
- if (req.dict.dict_val)
- GF_FREE (req.dict.dict_val);
+ GF_FREE (req.dict.dict_val);
return ret;
}
int32_t
-gf_cli3_1_sync_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_sync_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
int ret = 0;
@@ -2918,35 +3636,27 @@ gf_cli3_1_sync_volume (call_frame_t *frame, xlator_t *this,
}
dict = data;
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (THIS->name, GF_LOG_ERROR, "failed to serialize dict");
- goto out;
- }
-
- ret = cli_cmd_submit (&req, frame,
- cli_rpc_prog, GLUSTER_CLI_SYNC_VOLUME,
- NULL, this, gf_cli3_1_sync_volume_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_sync_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_SYNC_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
- if (req.dict.dict_val)
- GF_FREE (req.dict.dict_val);
+ GF_FREE (req.dict.dict_val);
return ret;
}
int32_t
-gf_cli3_1_getspec (call_frame_t *frame, xlator_t *this,
+gf_cli_getspec (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_getspec_req req = {0,};
int ret = 0;
dict_t *dict = NULL;
+ dict_t *op_dict = NULL;
if (!frame || !this || !data) {
ret = -1;
@@ -2959,19 +3669,52 @@ gf_cli3_1_getspec (call_frame_t *frame, xlator_t *this,
if (ret)
goto out;
- ret = cli_cmd_submit (&req, frame, &cli_handshake_prog,
+ op_dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ // Set the supported min and max op-versions, so glusterd can make a
+ // decision
+ ret = dict_set_int32 (op_dict, "min-op-version", GD_OP_VERSION_MIN);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to set min-op-version"
+ " in request dict");
+ goto out;
+ }
+
+ ret = dict_set_int32 (op_dict, "max-op-version", GD_OP_VERSION_MAX);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to set max-op-version"
+ " in request dict");
+ goto out;
+ }
+
+ ret = dict_allocate_and_serialize (op_dict, &req.xdata.xdata_val,
+ &req.xdata.xdata_len);
+ if (ret < 0) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to serialize dictionary");
+ goto out;
+ }
+
+ ret = cli_cmd_submit (NULL, &req, frame, &cli_handshake_prog,
GF_HNDSK_GETSPEC, NULL,
- this, gf_cli3_1_getspec_cbk,
+ this, gf_cli_getspec_cbk,
(xdrproc_t) xdr_gf_getspec_req);
out:
+ if (op_dict) {
+ dict_unref(op_dict);
+ }
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
-gf_cli3_1_quota (call_frame_t *frame, xlator_t *this,
+gf_cli_quota (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
@@ -2985,27 +3728,18 @@ gf_cli3_1_quota (call_frame_t *frame, xlator_t *this,
dict = data;
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to get serialized length of dict");
- goto out;
- }
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_QUOTA, NULL,
- this, gf_cli3_1_quota_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_quota_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_QUOTA, this, cli_rpc_prog, NULL);
- GF_FREE (req.dict.dict_val);
out:
+ GF_FREE (req.dict.dict_val);
+
return ret;
}
int32_t
-gf_cli3_1_pmap_b2p (call_frame_t *frame, xlator_t *this, void *data)
+gf_cli_pmap_b2p (call_frame_t *frame, xlator_t *this, void *data)
{
pmap_port_by_brick_req req = {0,};
int ret = 0;
@@ -3022,9 +3756,9 @@ gf_cli3_1_pmap_b2p (call_frame_t *frame, xlator_t *this, void *data)
if (ret)
goto out;
- ret = cli_cmd_submit (&req, frame, &cli_pmap_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, &cli_pmap_prog,
GF_PMAP_PORTBYBRICK, NULL,
- this, gf_cli3_1_pmap_b2p_cbk,
+ this, gf_cli_pmap_b2p_cbk,
(xdrproc_t) xdr_pmap_port_by_brick_req);
out:
@@ -3034,7 +3768,7 @@ out:
}
static int
-gf_cli3_1_fsm_log_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_fsm_log_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf1_cli_fsm_log_rsp rsp = {0,};
@@ -3054,14 +3788,15 @@ gf_cli3_1_fsm_log_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_fsm_log_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
if (rsp.op_ret) {
if (strcmp (rsp.op_errstr, ""))
- cli_out ("%s", rsp.op_errstr);
- cli_out ("fsm log unsuccessful");
+ cli_err ("%s", rsp.op_errstr);
+ cli_err ("fsm log unsuccessful");
ret = rsp.op_ret;
goto out;
}
@@ -3077,7 +3812,7 @@ gf_cli3_1_fsm_log_cbk (struct rpc_req *req, struct iovec *iov,
&dict);
if (ret) {
- cli_out ("bad response");
+ cli_err ("bad response");
goto out;
}
@@ -3085,7 +3820,7 @@ gf_cli3_1_fsm_log_cbk (struct rpc_req *req, struct iovec *iov,
if (tr_count)
cli_out("number of transitions: %d", tr_count);
else
- cli_out("No transitions");
+ cli_err("No transitions");
for (i = 0; i < tr_count; i++) {
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "log%d-old-state", i);
@@ -3124,7 +3859,7 @@ out:
}
int32_t
-gf_cli3_1_fsm_log (call_frame_t *frame, xlator_t *this, void *data)
+gf_cli_fsm_log (call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
gf1_cli_fsm_log_req req = {0,};
@@ -3136,9 +3871,9 @@ gf_cli3_1_fsm_log (call_frame_t *frame, xlator_t *this, void *data)
if (!frame || !this || !data)
goto out;
req.name = data;
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_FSM_LOG, NULL,
- this, gf_cli3_1_fsm_log_cbk,
+ this, gf_cli_fsm_log_cbk,
(xdrproc_t) xdr_gf1_cli_fsm_log_req);
out:
@@ -3148,14 +3883,17 @@ out:
}
int
-gf_cli3_1_gsync_config_command (dict_t *dict)
+gf_cli_gsync_config_command (dict_t *dict)
{
runner_t runner = {0,};
char *subop = NULL;
char *gwd = NULL;
char *slave = NULL;
+ char *confpath = NULL;
char *master = NULL;
char *op_name = NULL;
+ int ret = -1;
+ char conf_path[PATH_MAX] = "";
if (dict_get_str (dict, "subop", &subop) != 0)
return -1;
@@ -3174,9 +3912,17 @@ gf_cli3_1_gsync_config_command (dict_t *dict)
if (dict_get_str (dict, "op_name", &op_name) != 0)
op_name = NULL;
+ ret = dict_get_str (dict, "conf_path", &confpath);
+ if (!confpath) {
+ ret = snprintf (conf_path, sizeof(conf_path) - 1,
+ "%s/"GEOREP"/gsyncd_template.conf", gwd);
+ conf_path[ret] = '\0';
+ confpath = conf_path;
+ }
+
runinit (&runner);
runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "-c", NULL);
- runner_argprintf (&runner, "%s/"GSYNC_CONF, gwd);
+ runner_argprintf (&runner, "%s", confpath);
if (master)
runner_argprintf (&runner, ":%s", master);
runner_add_arg (&runner, slave);
@@ -3187,65 +3933,505 @@ gf_cli3_1_gsync_config_command (dict_t *dict)
return runner_run (&runner);
}
+char*
+get_struct_variable (int mem_num, gf_gsync_status_t *sts_val)
+{
+ switch (mem_num) {
+ case 0: return (sts_val->node);
+ case 1: return (sts_val->master);
+ case 2: return (sts_val->brick);
+ case 3: return (sts_val->slave_node);
+ case 4: return (sts_val->worker_status);
+ case 5: return (sts_val->checkpoint_status);
+ case 6: return (sts_val->crawl_status);
+ case 7: return (sts_val->files_syncd);
+ case 8: return (sts_val->files_remaining);
+ case 9: return (sts_val->bytes_remaining);
+ case 10: return (sts_val->purges_remaining);
+ case 11: return (sts_val->total_files_skipped);
+ default:
+ goto out;
+ }
+
+out:
+ return NULL;
+}
+
int
-gf_cli3_1_gsync_out_status (dict_t *dict)
+gf_cli_print_status (char **title_values,
+ gf_gsync_status_t **sts_vals,
+ int *spacing, int gsync_count,
+ int number_of_fields, int is_detail)
{
- int gsync_count = 0;
- int i = 0;
- int ret = 0;
- char mst[PATH_MAX] = {0, };
- char slv[PATH_MAX]= {0, };
- char sts[PATH_MAX] = {0, };
- char hyphens[81] = {0, };
- char *mst_val = NULL;
- char *slv_val = NULL;
- char *sts_val = NULL;
-
- cli_out ("%-20s %-50s %-10s", "MASTER", "SLAVE", "STATUS");
-
- for (i=0; i<sizeof(hyphens)-1; i++)
- hyphens[i] = '-';
+ int i = 0;
+ int j = 0;
+ int ret = 0;
+ int status_fields = 6; /* Indexed at 0 */
+ int total_spacing = 0;
+ char **output_values = NULL;
+ char *tmp = NULL;
+ char *hyphens = NULL;
+
+ /* calculating spacing for hyphens */
+ for (i = 0; i < number_of_fields; i++) {
+ /* Suppressing detail output for status */
+ if ((!is_detail) && (i > status_fields)) {
+ /* Suppressing detailed output for
+ * status */
+ continue;
+ }
+ spacing[i] += 3; /* Adding extra space to
+ distinguish between fields */
+ total_spacing += spacing[i];
+ }
+ total_spacing += 4; /* For the spacing between the fields */
+
+ /* char pointers for each field */
+ output_values = GF_CALLOC (number_of_fields, sizeof (char *),
+ gf_common_mt_char);
+ if (!output_values) {
+ ret = -1;
+ goto out;
+ }
+ for (i = 0; i < number_of_fields; i++) {
+ output_values[i] = GF_CALLOC (spacing[i] + 1, sizeof (char),
+ gf_common_mt_char);
+ if (!output_values[i]) {
+ ret = -1;
+ goto out;
+ }
+ }
+
+ hyphens = GF_CALLOC (total_spacing + 1, sizeof (char),
+ gf_common_mt_char);
+ if (!hyphens) {
+ ret = -1;
+ goto out;
+ }
+
+ cli_out (" ");
+ /* setting the title "NODE", "MASTER", etc. from title_values[]
+ and printing the same */
+ for (j = 0; j < number_of_fields; j++) {
+ if ((!is_detail) && (j > status_fields)) {
+ /* Suppressing detailed output for
+ * status */
+ output_values[j][0] = '\0';
+ continue;
+ }
+ memset (output_values[j], ' ', spacing[j]);
+ memcpy (output_values[j], title_values[j],
+ strlen(title_values[j]));
+ output_values[j][spacing[j]] = '\0';
+ }
+ cli_out ("%s %s %s %s %s %s %s %s %s %s %s %s",
+ output_values[0], output_values[1],
+ output_values[2], output_values[3],
+ output_values[4], output_values[5],
+ output_values[6], output_values[7],
+ output_values[8], output_values[9],
+ output_values[10], output_values[11]);
+
+ /* setting and printing the hyphens */
+ memset (hyphens, '-', total_spacing);
+ hyphens[total_spacing] = '\0';
cli_out ("%s", hyphens);
+ for (i = 0; i < gsync_count; i++) {
+ for (j = 0; j < number_of_fields; j++) {
+ if ((!is_detail) && (j > status_fields)) {
+ /* Suppressing detailed output for
+ * status */
+ output_values[j][0] = '\0';
+ continue;
+ }
+ tmp = get_struct_variable(j, sts_vals[i]);
+ if (!tmp) {
+ gf_log ("", GF_LOG_ERROR,
+ "struct member empty.");
+ ret = -1;
+ goto out;
+ }
+ memset (output_values[j], ' ', spacing[j]);
+ memcpy (output_values[j], tmp, strlen (tmp));
+ output_values[j][spacing[j]] = '\0';
+ }
+
+ cli_out ("%s %s %s %s %s %s %s %s %s %s %s %s",
+ output_values[0], output_values[1],
+ output_values[2], output_values[3],
+ output_values[4], output_values[5],
+ output_values[6], output_values[7],
+ output_values[8], output_values[9],
+ output_values[10], output_values[11]);
+ }
+
+out:
+ if (output_values) {
+ for (i = 0; i < number_of_fields; i++) {
+ if (output_values[i])
+ GF_FREE (output_values[i]);
+ }
+ GF_FREE (output_values);
+ }
+
+ if (hyphens)
+ GF_FREE (hyphens);
+
+ return ret;
+}
+
+int
+gf_cli_read_status_data (dict_t *dict,
+ gf_gsync_status_t **sts_vals,
+ int *spacing, int gsync_count,
+ int number_of_fields)
+{
+ char *tmp = NULL;
+ char sts_val_name[PATH_MAX] = "";
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+
+ /* Storing per node status info in each object */
+ for (i = 0; i < gsync_count; i++) {
+ snprintf (sts_val_name, sizeof(sts_val_name), "status_value%d", i);
+
+ /* Fetching the values from dict, and calculating
+ the max length for each field */
+ ret = dict_get_bin (dict, sts_val_name, (void **)&(sts_vals[i]));
+ if (ret)
+ goto out;
+
+ for (j = 0; j < number_of_fields; j++) {
+ tmp = get_struct_variable(j, sts_vals[i]);
+ if (!tmp) {
+ gf_log ("", GF_LOG_ERROR,
+ "struct member empty.");
+ ret = -1;
+ goto out;
+ }
+ if (strlen (tmp) > spacing[j])
+ spacing[j] = strlen (tmp);
+ }
+ }
+
+out:
+ return ret;
+}
+int
+gf_cli_gsync_status_output (dict_t *dict, gf_boolean_t is_detail)
+{
+ int gsync_count = 0;
+ int i = 0;
+ int ret = 0;
+ int spacing[13] = {0};
+ int num_of_fields = 12;
+ char errmsg[1024] = "";
+ char *master = NULL;
+ char *slave = NULL;
+ char *title_values[] = {"MASTER NODE", "MASTER VOL",
+ "MASTER BRICK", "SLAVE",
+ "STATUS", "CHECKPOINT STATUS",
+ "CRAWL STATUS", "FILES SYNCD",
+ "FILES PENDING", "BYTES PENDING",
+ "DELETES PENDING", "FILES SKIPPED"};
+ gf_gsync_status_t **sts_vals = NULL;
+
+ /* Checks if any session is active or not */
ret = dict_get_int32 (dict, "gsync-count", &gsync_count);
if (ret) {
- gf_log ("cli", GF_LOG_INFO, "No active geo-replication sessions"
- "present for the selected");
+ ret = dict_get_str (dict, "master", &master);
+
+ ret = dict_get_str (dict, "slave", &slave);
+
+ if (master) {
+ if (slave)
+ snprintf (errmsg, sizeof(errmsg), "No active "
+ "geo-replication sessions between %s"
+ " and %s", master, slave);
+ else
+ snprintf (errmsg, sizeof(errmsg), "No active "
+ "geo-replication sessions for %s",
+ master);
+ } else
+ snprintf (errmsg, sizeof(errmsg), "No active "
+ "geo-replication sessions");
+
+ gf_log ("cli", GF_LOG_INFO, "%s", errmsg);
+ cli_out ("%s", errmsg);
ret = 0;
goto out;
}
- for (i = 1; i <= gsync_count; i++) {
- snprintf (mst, sizeof(mst), "master%d", i);
- snprintf (slv, sizeof(slv), "slave%d", i);
- snprintf (sts, sizeof(sts), "status%d", i);
+ for (i = 0; i < num_of_fields; i++)
+ spacing[i] = strlen(title_values[i]);
- ret = dict_get_str (dict, mst, &mst_val);
- if (ret)
+ /* gsync_count = number of nodes reporting output.
+ each sts_val object will store output of each
+ node */
+ sts_vals = GF_CALLOC (gsync_count, sizeof (gf_gsync_status_t *),
+ gf_common_mt_char);
+ if (!sts_vals) {
+ ret = -1;
+ goto out;
+ }
+ for (i = 0; i < gsync_count; i++) {
+ sts_vals[i] = GF_CALLOC (1, sizeof (gf_gsync_status_t),
+ gf_common_mt_char);
+ if (!sts_vals[i]) {
+ ret = -1;
goto out;
+ }
+ }
- ret = dict_get_str (dict, slv, &slv_val);
- if (ret)
- goto out;
+ ret = gf_cli_read_status_data (dict, sts_vals, spacing,
+ gsync_count, num_of_fields);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to read status data");
+ goto out;
+ }
- ret = dict_get_str (dict, sts, &sts_val);
- if (ret)
- goto out;
+ ret = gf_cli_print_status (title_values, sts_vals, spacing, gsync_count,
+ num_of_fields, is_detail);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to print status output");
+ goto out;
+ }
- cli_out ("%-20s %-50s %-10s", mst_val,
- slv_val, sts_val);
+out:
+ if (sts_vals)
+ GF_FREE (sts_vals);
+ return ret;
+}
+
+static int32_t
+write_contents_to_common_pem_file (dict_t *dict, int output_count)
+{
+ char *workdir = NULL;
+ char common_pem_file[PATH_MAX] = "";
+ char *output = NULL;
+ char output_name[PATH_MAX] = "";
+ int bytes_written = 0;
+ int fd = -1;
+ int ret = -1;
+ int i = -1;
+
+ ret = dict_get_str (dict, "glusterd_workdir", &workdir);
+ if (ret || !workdir) {
+ gf_log ("", GF_LOG_ERROR, "Unable to fetch workdir");
+ ret = -1;
+ goto out;
}
- out:
+ snprintf (common_pem_file, sizeof(common_pem_file),
+ "%s/geo-replication/common_secret.pem.pub",
+ workdir);
+
+ unlink (common_pem_file);
+
+ fd = open (common_pem_file, O_WRONLY | O_CREAT, 0600);
+ if (fd == -1) {
+ gf_log ("", GF_LOG_ERROR, "Failed to open %s"
+ " Error : %s", common_pem_file,
+ strerror (errno));
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 1; i <= output_count; i++) {
+ memset (output_name, '\0', sizeof (output_name));
+ snprintf (output_name, sizeof (output_name),
+ "output_%d", i);
+ ret = dict_get_str (dict, output_name, &output);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Failed to get %s.",
+ output_name);
+ cli_out ("Unable to fetch output.");
+ }
+ if (output) {
+ bytes_written = write (fd, output, strlen(output));
+ if (bytes_written != strlen(output)) {
+ gf_log ("", GF_LOG_ERROR, "Failed to write "
+ "to %s", common_pem_file);
+ ret = -1;
+ goto out;
+ }
+ /* Adding the new line character */
+ bytes_written = write (fd, "\n", strlen("\n"));
+ if (bytes_written != strlen("\n")) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to add new line char");
+ ret = -1;
+ goto out;
+ }
+ output = NULL;
+ }
+ }
+
+ cli_out ("Common secret pub file present at %s", common_pem_file);
+ ret = 0;
+out:
+ if (fd)
+ close (fd);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
+}
+
+int
+gf_cli_sys_exec_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ int ret = -1;
+ int output_count = -1;
+ int i = -1;
+ char *output = NULL;
+ char *command = NULL;
+ char output_name[PATH_MAX] = "";
+ gf_cli_rsp rsp = {0, };
+ dict_t *dict = NULL;
+ call_frame_t *frame = NULL;
+
+ if (req->rpc_status == -1) {
+ ret = -1;
+ goto out;
+ }
+
+ frame = myframe;
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
+ goto out;
+ }
+
+ dict = dict_new ();
+
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict);
+
+ if (ret)
+ goto out;
+
+ if (rsp.op_ret) {
+ cli_err ("%s", rsp.op_errstr ? rsp.op_errstr :
+ "Command failed.");
+ ret = rsp.op_ret;
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "output_count", &output_count);
+ if (ret) {
+ cli_out ("Command executed successfully.");
+ ret = 0;
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "command", &command);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to get command from dict");
+ goto out;
+ }
+
+ if (!strcmp (command, "gsec_create")) {
+ ret = write_contents_to_common_pem_file (dict, output_count);
+ if (!ret)
+ goto out;
+ }
+
+ for (i = 1; i <= output_count; i++) {
+ memset (output_name, '\0', sizeof (output_name));
+ snprintf (output_name, sizeof (output_name),
+ "output_%d", i);
+ ret = dict_get_str (dict, output_name, &output);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Failed to get %s.",
+ output_name);
+ cli_out ("Unable to fetch output.");
+ }
+ if (output) {
+ cli_out ("%s", output);
+ output = NULL;
+ }
+ }
+
+ ret = 0;
+out:
+ if (dict)
+ dict_unref (dict);
+ cli_cmd_broadcast_response (ret);
+
+ free (rsp.dict.dict_val);
+
+ return ret;
+}
+int
+gf_cli_copy_file_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ int ret = -1;
+ gf_cli_rsp rsp = {0, };
+ dict_t *dict = NULL;
+ call_frame_t *frame = NULL;
+
+ if (req->rpc_status == -1) {
+ ret = -1;
+ goto out;
+ }
+
+ frame = myframe;
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
+ goto out;
+ }
+
+ dict = dict_new ();
+
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict);
+
+ if (ret)
+ goto out;
+
+ if (rsp.op_ret) {
+ cli_err ("%s", rsp.op_errstr ? rsp.op_errstr :
+ "Copy unsuccessful");
+ ret = rsp.op_ret;
+ goto out;
+ }
+
+ cli_out ("Successfully copied file.");
+
+out:
+ if (dict)
+ dict_unref (dict);
+ cli_cmd_broadcast_response (ret);
+
+ free (rsp.dict.dict_val);
+
+ return ret;
}
int
-gf_cli3_1_gsync_set_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_gsync_set_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
int ret = -1;
@@ -3254,17 +4440,22 @@ gf_cli3_1_gsync_set_cbk (struct rpc_req *req, struct iovec *iov,
char *gsync_status = NULL;
char *master = NULL;
char *slave = NULL;
- int32_t type = 0;
+ int32_t type = 0;
+ call_frame_t *frame = NULL;
+ gf_boolean_t status_detail = _gf_false;
+
if (req->rpc_status == -1) {
ret = -1;
goto out;
}
+ frame = myframe;
+
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR,
- "Unable to get response structure");
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
@@ -3280,19 +4471,17 @@ gf_cli3_1_gsync_set_cbk (struct rpc_req *req, struct iovec *iov,
if (ret)
goto out;
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_dict ("volGeoRep", dict, rsp.op_ret,
- rsp.op_errno, rsp.op_errstr);
+ ret = cli_xml_output_vol_gsync (dict, rsp.op_ret, rsp.op_errno,
+ rsp.op_errstr);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
"Error outputting to xml");
goto out;
}
-#endif
if (rsp.op_ret) {
- cli_out ("%s", rsp.op_errstr ? rsp.op_errstr :
+ cli_err ("%s", rsp.op_errstr ? rsp.op_errstr :
GEOREP" command unsuccessful");
ret = rsp.op_ret;
goto out;
@@ -3306,7 +4495,7 @@ gf_cli3_1_gsync_set_cbk (struct rpc_req *req, struct iovec *iov,
ret = dict_get_int32 (dict, "type", &type);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "failed to get type");
+ gf_log (frame->this->name, GF_LOG_ERROR, "failed to get type");
goto out;
}
@@ -3326,29 +4515,53 @@ gf_cli3_1_gsync_set_cbk (struct rpc_req *req, struct iovec *iov,
break;
case GF_GSYNC_OPTION_TYPE_CONFIG:
- ret = gf_cli3_1_gsync_config_command (dict);
+ ret = gf_cli_gsync_config_command (dict);
break;
case GF_GSYNC_OPTION_TYPE_STATUS:
- ret = gf_cli3_1_gsync_out_status (dict);
- goto out;
+ status_detail = dict_get_str_boolean (dict,
+ "status-detail",
+ _gf_false);
+ if (status_detail)
+ ret = gf_cli_gsync_status_output (dict, status_detail);
+ else
+ ret = gf_cli_gsync_status_output (dict, status_detail);
+ break;
+
+ case GF_GSYNC_OPTION_TYPE_DELETE:
+ if (dict_get_str (dict, "master", &master) != 0)
+ master = "???";
+ if (dict_get_str (dict, "slave", &slave) != 0)
+ slave = "???";
+ cli_out ("Deleting " GEOREP " session between %s & %s"
+ " has been successful", master, slave);
+ break;
+
+ case GF_GSYNC_OPTION_TYPE_CREATE:
+ if (dict_get_str (dict, "master", &master) != 0)
+ master = "???";
+ if (dict_get_str (dict, "slave", &slave) != 0)
+ slave = "???";
+ cli_out ("Creating " GEOREP " session between %s & %s"
+ " has been successful", master, slave);
+ break;
+
default:
cli_out (GEOREP" command executed successfully");
}
out:
-
+ if (dict)
+ dict_unref (dict);
cli_cmd_broadcast_response (ret);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
+ free (rsp.dict.dict_val);
return ret;
}
int32_t
-gf_cli3_1_gsync_set (call_frame_t *frame, xlator_t *this,
- void *data)
+gf_cli_sys_exec (call_frame_t *frame, xlator_t *this, void *data)
{
int ret = 0;
dict_t *dict = NULL;
@@ -3356,29 +4569,67 @@ gf_cli3_1_gsync_set (call_frame_t *frame, xlator_t *this,
if (!frame || !this || !data) {
ret = -1;
+ gf_log ("cli", GF_LOG_ERROR, "Invalid data");
goto out;
}
dict = data;
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *) &req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to serialize the data");
+ ret = cli_to_glusterd (&req, frame, gf_cli_sys_exec_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_SYS_EXEC, this, cli_rpc_prog,
+ NULL);
+out:
+ GF_FREE (req.dict.dict_val);
+ return ret;
+}
+int32_t
+gf_cli_copy_file (call_frame_t *frame, xlator_t *this, void *data)
+{
+ int ret = 0;
+ dict_t *dict = NULL;
+ gf_cli_req req = {{0,}};
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ gf_log ("cli", GF_LOG_ERROR, "Invalid data");
goto out;
}
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_GSYNC_SET, NULL,
- this, gf_cli3_1_gsync_set_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ dict = data;
+ ret = cli_to_glusterd (&req, frame, gf_cli_copy_file_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_COPY_FILE, this, cli_rpc_prog,
+ NULL);
out:
- if (req.dict.dict_val)
- GF_FREE (req.dict.dict_val);
+ GF_FREE (req.dict.dict_val);
+ return ret;
+}
+
+int32_t
+gf_cli_gsync_set (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ int ret = 0;
+ dict_t *dict = NULL;
+ gf_cli_req req = {{0,}};
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ dict = data;
+
+ ret = cli_to_glusterd (&req, frame, gf_cli_gsync_set_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_GSYNC_SET, this, cli_rpc_prog,
+ NULL);
+
+out:
+ GF_FREE (req.dict.dict_val);
return ret;
}
@@ -3411,7 +4662,6 @@ cmd_profile_volume_brick_out (dict_t *dict, int count, int interval)
uint64_t sec = 0;
uint64_t r_count = 0;
uint64_t w_count = 0;
- char *brick = NULL;
uint64_t rb_counts[32] = {0};
uint64_t wb_counts[32] = {0};
cli_profile_info_t profile_info[GF_FOP_MAXVALUE] = {{0}};
@@ -3424,9 +4674,6 @@ cmd_profile_volume_brick_out (dict_t *dict, int count, int interval)
int ret = 0;
double total_percentage_latency = 0;
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "%d-brick", count);
- ret = dict_get_str (dict, key, &brick);
for (i = 0; i < 32; i++) {
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "%d-%d-read-%d", count,
@@ -3461,7 +4708,7 @@ cmd_profile_volume_brick_out (dict_t *dict, int count, int interval)
snprintf (key, sizeof (key), "%d-%d-%d-maxlatency", count,
interval, i);
ret = dict_get_double (dict, key, &profile_info[i].max_latency);
- profile_info[i].fop_name = gf_fop_list[i];
+ profile_info[i].fop_name = (char *)gf_fop_list[i];
total_percentage_latency +=
(profile_info[i].fop_hits * profile_info[i].avg_latency);
@@ -3489,7 +4736,6 @@ cmd_profile_volume_brick_out (dict_t *dict, int count, int interval)
ret = dict_get_uint64 (dict, key, &w_count);
if (ret == 0) {
- cli_out ("Brick: %s", brick);
}
if (interval == -1)
@@ -3574,7 +4820,7 @@ cmd_profile_volume_brick_out (dict_t *dict, int count, int interval)
}
int32_t
-gf_cli3_1_profile_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_profile_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -3586,6 +4832,11 @@ gf_cli3_1_profile_volume_cbk (struct rpc_req *req, struct iovec *iov,
int i = 1;
int32_t brick_count = 0;
char *volname = NULL;
+ char *brick = NULL;
+ char str[1024] = {0,};
+ int stats_cleared = 0;
+ gf1_cli_info_op info_op = GF_CLI_INFO_NONE;
+
if (-1 == req->rpc_status) {
goto out;
}
@@ -3593,7 +4844,8 @@ gf_cli3_1_profile_volume_cbk (struct rpc_req *req, struct iovec *iov,
gf_log ("cli", GF_LOG_DEBUG, "Received resp to profile");
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
@@ -3616,7 +4868,6 @@ gf_cli3_1_profile_volume_cbk (struct rpc_req *req, struct iovec *iov,
dict->extra_stdfree = rsp.dict.dict_val;
}
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_vol_profile (dict, rsp.op_ret,
rsp.op_errno,
@@ -3626,7 +4877,6 @@ gf_cli3_1_profile_volume_cbk (struct rpc_req *req, struct iovec *iov,
"Error outputting to xml");
goto out;
}
-#endif
ret = dict_get_str (dict, "volname", &volname);
if (ret)
@@ -3637,7 +4887,7 @@ gf_cli3_1_profile_volume_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
if (rsp.op_ret && strcmp (rsp.op_errstr, "")) {
- cli_out ("%s", rsp.op_errstr);
+ cli_err ("%s", rsp.op_errstr);
} else {
switch (op) {
case GF_CLI_STATS_START:
@@ -3665,30 +4915,62 @@ gf_cli3_1_profile_volume_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
- if (op != GF_CLI_STATS_INFO) {
+ if (GF_CLI_STATS_INFO != op) {
ret = 0;
goto out;
}
+ ret = dict_get_int32 (dict, "info-op", (int32_t*)&info_op);
+ if (ret)
+ goto out;
+
ret = dict_get_int32 (dict, "count", &brick_count);
if (ret)
goto out;
if (!brick_count) {
- cli_out ("All bricks of volume %s are down.", volname);
+ cli_err ("All bricks of volume %s are down.", volname);
goto out;
}
while (i <= brick_count) {
- snprintf (key, sizeof (key), "%d-cumulative", i);
- ret = dict_get_int32 (dict, key, &interval);
- if (ret == 0) {
- cmd_profile_volume_brick_out (dict, i, interval);
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "%d-brick", i);
+ ret = dict_get_str (dict, key, &brick);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Couldn't get brick name");
+ goto out;
}
- snprintf (key, sizeof (key), "%d-interval", i);
- ret = dict_get_int32 (dict, key, &interval);
- if (ret == 0) {
- cmd_profile_volume_brick_out (dict, i, interval);
+
+ ret = dict_get_str_boolean (dict, "nfs", _gf_false);
+
+ if (ret)
+ snprintf (str, sizeof (str), "NFS Server : %s", brick);
+ else
+ snprintf (str, sizeof (str), "Brick: %s", brick);
+ cli_out ("%s", str);
+ memset (str, '-', strlen (str));
+ cli_out ("%s", str);
+
+ if (GF_CLI_INFO_CLEAR == info_op) {
+ snprintf (key, sizeof (key), "%d-stats-cleared", i);
+ ret = dict_get_int32 (dict, key, &stats_cleared);
+ if (ret)
+ goto out;
+ cli_out (stats_cleared ? "Cleared stats." :
+ "Failed to clear stats.");
+ } else {
+ snprintf (key, sizeof (key), "%d-cumulative", i);
+ ret = dict_get_int32 (dict, key, &interval);
+ if (ret == 0)
+ cmd_profile_volume_brick_out (dict, i,
+ interval);
+
+ snprintf (key, sizeof (key), "%d-interval", i);
+ ret = dict_get_int32 (dict, key, &interval);
+ if (ret == 0)
+ cmd_profile_volume_brick_out (dict, i,
+ interval);
}
i++;
}
@@ -3697,14 +4979,13 @@ gf_cli3_1_profile_volume_cbk (struct rpc_req *req, struct iovec *iov,
out:
if (dict)
dict_unref (dict);
- if (rsp.op_errstr)
- free (rsp.op_errstr);
+ free (rsp.op_errstr);
cli_cmd_broadcast_response (ret);
return ret;
}
int32_t
-gf_cli3_1_profile_volume (call_frame_t *frame, xlator_t *this, void *data)
+gf_cli_profile_volume (call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
gf_cli_req req = {{0,}};
@@ -3718,58 +4999,47 @@ gf_cli3_1_profile_volume (call_frame_t *frame, xlator_t *this, void *data)
goto out;
dict = data;
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
-
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to serialize the data");
-
- goto out;
- }
-
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_PROFILE_VOLUME, NULL,
- this, gf_cli3_1_profile_volume_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_profile_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_PROFILE_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
- if (req.dict.dict_val)
- GF_FREE (req.dict.dict_val);
+ GF_FREE (req.dict.dict_val);
return ret;
}
int32_t
-gf_cli3_1_top_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_top_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
int ret = -1;
- dict_t *dict = NULL;
- gf1_cli_stats_op op = GF_CLI_STATS_NONE;
+ dict_t *dict = NULL;
+ gf1_cli_stats_op op = GF_CLI_STATS_NONE;
char key[256] = {0};
int i = 0;
int32_t brick_count = 0;
char brick[1024];
int32_t members = 0;
- char *filename;
- char *bricks;
- uint64_t value = 0;
+ char *filename;
+ char *bricks;
+ uint64_t value = 0;
int32_t j = 0;
gf1_cli_top_op top_op = GF_CLI_TOP_NONE;
uint64_t nr_open = 0;
uint64_t max_nr_open = 0;
double throughput = 0;
double time = 0;
- long int time_sec = 0;
- long int time_usec = 0;
- struct tm *tm = NULL;
+ int32_t time_sec = 0;
+ long int time_usec = 0;
char timestr[256] = {0, };
- char *openfd_str = NULL;
+ char *openfd_str = NULL;
+ gf_boolean_t nfs = _gf_false;
+ gf_boolean_t clear_stats = _gf_false;
+ int stats_cleared = 0;
if (-1 == req->rpc_status) {
goto out;
@@ -3778,14 +5048,15 @@ gf_cli3_1_top_volume_cbk (struct rpc_req *req, struct iovec *iov,
gf_log ("cli", GF_LOG_DEBUG, "Received resp to top");
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "Unable to decode response");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
if (rsp.op_ret) {
if (strcmp (rsp.op_errstr, ""))
- cli_out ("%s", rsp.op_errstr);
- cli_out ("volume top unsuccessful");
+ cli_err ("%s", rsp.op_errstr);
+ cli_err ("volume top unsuccessful");
ret = rsp.op_ret;
goto out;
}
@@ -3814,7 +5085,6 @@ gf_cli3_1_top_volume_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_vol_top (dict, rsp.op_ret,
rsp.op_errno,
@@ -3825,7 +5095,6 @@ gf_cli3_1_top_volume_cbk (struct rpc_req *req, struct iovec *iov,
}
goto out;
}
-#endif
ret = dict_get_int32 (dict, "count", &brick_count);
if (ret)
@@ -3834,13 +5103,35 @@ gf_cli3_1_top_volume_cbk (struct rpc_req *req, struct iovec *iov,
ret = dict_get_int32 (dict, key, (int32_t*)&top_op);
if (ret)
goto out;
+
+ clear_stats = dict_get_str_boolean (dict, "clear-stats", _gf_false);
+
while (i < brick_count) {
i++;
snprintf (brick, sizeof (brick), "%d-brick", i);
ret = dict_get_str (dict, brick, &bricks);
if (ret)
goto out;
- cli_out ("Brick: %s", bricks);
+
+ nfs = dict_get_str_boolean (dict, "nfs", _gf_false);
+
+ if (clear_stats) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "%d-stats-cleared", i);
+ ret = dict_get_int32 (dict, key, &stats_cleared);
+ if (ret)
+ goto out;
+ cli_out (stats_cleared ? "Cleared stats for %s %s" :
+ "Failed to clear stats for %s %s",
+ nfs ? "NFS server on" : "brick", bricks);
+ continue;
+ }
+
+ if (nfs)
+ cli_out ("NFS Server : %s", bricks);
+ else
+ cli_out ("Brick: %s", bricks);
+
snprintf(key, sizeof (key), "%d-members", i);
ret = dict_get_int32 (dict, key, &members);
@@ -3917,12 +5208,10 @@ gf_cli3_1_top_volume_cbk (struct rpc_req *req, struct iovec *iov,
ret = dict_get_int32 (dict, key, (int32_t *)&time_usec);
if (ret)
goto out;
- tm = localtime (&time_sec);
- if (!tm)
- goto out;
- strftime (timestr, 256, "%Y-%m-%d %H:%M:%S", tm);
- snprintf (timestr + strlen (timestr), 256 - strlen (timestr),
- ".%"GF_PRI_SUSECONDS, time_usec);
+ gf_time_fmt (timestr, sizeof timestr,
+ time_sec, gf_timefmt_FT);
+ snprintf (timestr + strlen (timestr), sizeof timestr - strlen (timestr),
+ ".%ld", time_usec);
if (strlen (filename) < VOL_TOP_PERF_FILENAME_DEF_WIDTH)
cli_out ("%*"PRIu64" %-*s %-*s",
VOL_TOP_PERF_SPEED_WIDTH,
@@ -3953,13 +5242,12 @@ out:
if (dict)
dict_unref (dict);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
+ free (rsp.dict.dict_val);
return ret;
}
int32_t
-gf_cli3_1_top_volume (call_frame_t *frame, xlator_t *this, void *data)
+gf_cli_top_volume (call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
gf_cli_req req = {{0,}};
@@ -3973,32 +5261,20 @@ gf_cli3_1_top_volume (call_frame_t *frame, xlator_t *this, void *data)
goto out;
dict = data;
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to serialize the data");
-
- goto out;
- }
-
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_PROFILE_VOLUME, NULL,
- this, gf_cli3_1_top_volume_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_top_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_PROFILE_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
- if (req.dict.dict_val)
- GF_FREE (req.dict.dict_val);
+ GF_FREE (req.dict.dict_val);
return ret;
}
int
-gf_cli3_1_getwd_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_getwd_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf1_cli_getwd_rsp rsp = {0,};
@@ -4009,8 +5285,15 @@ gf_cli3_1_getwd_cbk (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_getwd_rsp);
- if (ret < 0 || rsp.op_ret == -1) {
- gf_log ("", GF_LOG_ERROR, "error");
+ if (ret < 0) {
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
+ goto out;
+ }
+
+ if (rsp.op_ret == -1) {
+ cli_err ("getwd failed");
+ ret = rsp.op_ret;
goto out;
}
@@ -4026,7 +5309,7 @@ out:
}
int32_t
-gf_cli3_1_getwd (call_frame_t *frame, xlator_t *this, void *data)
+gf_cli_getwd (call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
gf1_cli_getwd_req req = {0,};
@@ -4037,9 +5320,9 @@ gf_cli3_1_getwd (call_frame_t *frame, xlator_t *this, void *data)
if (!frame || !this)
goto out;
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_GETWD, NULL,
- this, gf_cli3_1_getwd_cbk,
+ this, gf_cli_getwd_cbk,
(xdrproc_t) xdr_gf1_cli_getwd_req);
out:
@@ -4059,6 +5342,8 @@ cli_print_volume_status_mempool (dict_t *dict, char *prefix)
uint64_t paddedsizeof = 0;
uint64_t alloccount = 0;
int32_t maxalloc = 0;
+ uint64_t pool_misses = 0;
+ int32_t maxstdalloc = 0;
char key[1024] = {0,};
int i = 0;
@@ -4072,10 +5357,12 @@ cli_print_volume_status_mempool (dict_t *dict, char *prefix)
goto out;
cli_out ("Mempool Stats\n-------------");
- cli_out ("%-30s %9s %9s %12s %10s %8s", "Name", "HotCount","ColdCount",
- "PaddedSizeof", "AllocCount", "MaxAlloc");
- cli_out ("%-30s %9s %9s %12s %10s %8s", "----", "--------", "---------",
- "------------", "----------", "--------");
+ cli_out ("%-30s %9s %9s %12s %10s %8s %8s %12s", "Name", "HotCount",
+ "ColdCount", "PaddedSizeof", "AllocCount", "MaxAlloc",
+ "Misses", "Max-StdAlloc");
+ cli_out ("%-30s %9s %9s %12s %10s %8s %8s %12s", "----", "--------",
+ "---------", "------------", "----------",
+ "--------", "--------", "------------");
for (i = 0; i < mempool_count; i++) {
memset (key, 0, sizeof (key));
@@ -4115,9 +5402,21 @@ cli_print_volume_status_mempool (dict_t *dict, char *prefix)
if (ret)
goto out;
- cli_out ("%-30s %9d %9d %12"PRIu64" %10"PRIu64" %8d", name,
- hotcount, coldcount, paddedsizeof, alloccount,
- maxalloc);
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "%s.pool%d.max-stdalloc", prefix, i);
+ ret = dict_get_int32 (dict, key, &maxstdalloc);
+ if (ret)
+ goto out;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "%s.pool%d.pool-misses", prefix, i);
+ ret = dict_get_uint64 (dict, key, &pool_misses);
+ if (ret)
+ goto out;
+
+ cli_out ("%-30s %9d %9d %12"PRIu64" %10"PRIu64" %8d %8"PRIu64
+ " %12d", name, hotcount, coldcount, paddedsizeof,
+ alloccount, maxalloc, pool_misses, maxstdalloc);
}
out:
@@ -4126,7 +5425,7 @@ out:
}
void
-cli_print_volume_status_mem (dict_t *dict)
+cli_print_volume_status_mem (dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
@@ -4134,7 +5433,9 @@ cli_print_volume_status_mem (dict_t *dict)
char *path = NULL;
int online = -1;
char key[1024] = {0,};
- int brick_count = 0;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
int val = 0;
int i = 0;
@@ -4145,24 +5446,32 @@ cli_print_volume_status_mem (dict_t *dict)
goto out;
cli_out ("Memory status for volume : %s", volname);
- ret = dict_get_int32 (dict, "count", &brick_count);
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
+ if (ret)
+ goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
if (ret)
goto out;
- for (i = 0; i < brick_count; i++) {
+ index_max = brick_index_max + other_count;
+
+ for (i = 0; i <= index_max; i++) {
cli_out ("----------------------------------------------");
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.hostname", i);
ret = dict_get_str (dict, key, &hostname);
if (ret)
- goto out;
+ continue;
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.path", i);
ret = dict_get_str (dict, key, &path);
if (ret)
- goto out;
- cli_out ("Brick : %s:%s", hostname, path);
+ continue;
+ if (notbrick)
+ cli_out ("%s : %s", hostname, path);
+ else
+ cli_out ("Brick : %s:%s", hostname, path);
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.status", i);
@@ -4170,7 +5479,10 @@ cli_print_volume_status_mem (dict_t *dict)
if (ret)
goto out;
if (!online) {
- cli_out ("Brick is offline");
+ if (notbrick)
+ cli_out ("%s is offline", hostname);
+ else
+ cli_out ("Brick is offline");
continue;
}
@@ -4257,11 +5569,13 @@ out:
}
void
-cli_print_volume_status_clients (dict_t *dict)
+cli_print_volume_status_clients (dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
- int brick_count = 0;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
char *hostname = NULL;
char *path = NULL;
int online = -1;
@@ -4280,11 +5594,16 @@ cli_print_volume_status_clients (dict_t *dict)
goto out;
cli_out ("Client connections for volume %s", volname);
- ret = dict_get_int32 (dict, "count", &brick_count);
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
+ if (ret)
+ goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
if (ret)
goto out;
- for ( i = 0; i < brick_count; i++) {
+ index_max = brick_index_max + other_count;
+
+ for (i = 0; i <= index_max; i++) {
cli_out ("----------------------------------------------");
memset (key, 0, sizeof (key));
@@ -4297,7 +5616,11 @@ cli_print_volume_status_clients (dict_t *dict)
ret = dict_get_str (dict, key, &path);
if (ret)
goto out;
- cli_out ("Brick : %s:%s", hostname, path);
+
+ if (notbrick)
+ cli_out ("%s : %s", hostname, path);
+ else
+ cli_out ("Brick : %s:%s", hostname, path);
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.status", i);
@@ -4305,7 +5628,10 @@ cli_print_volume_status_clients (dict_t *dict)
if (ret)
goto out;
if (!online) {
- cli_out ("Brick is offline");
+ if (notbrick)
+ cli_out ("%s is offline", hostname);
+ else
+ cli_out ("Brick is offline");
continue;
}
@@ -4316,6 +5642,9 @@ cli_print_volume_status_clients (dict_t *dict)
goto out;
cli_out ("Clients connected : %d", client_count);
+ if (client_count == 0)
+ continue;
+
cli_out ("%-48s %15s %15s", "Hostname", "BytesRead",
"BytesWritten");
cli_out ("%-48s %15s %15s", "--------", "---------",
@@ -4360,6 +5689,7 @@ cli_print_volume_status_inode_entry (dict_t *dict, char *prefix)
uint64_t nlookup = 0;
uint32_t ref = 0;
int ia_type = 0;
+ char inode_type;
GF_ASSERT (dict);
GF_ASSERT (prefix);
@@ -4388,8 +5718,35 @@ cli_print_volume_status_inode_entry (dict_t *dict, char *prefix)
if (ret)
goto out;
- cli_out ("%-40s %14"PRIu64" %14"PRIu32" %9d",
- gfid, nlookup, ref, ia_type);
+ switch (ia_type) {
+ case IA_IFREG:
+ inode_type = 'R';
+ break;
+ case IA_IFDIR:
+ inode_type = 'D';
+ break;
+ case IA_IFLNK:
+ inode_type = 'L';
+ break;
+ case IA_IFBLK:
+ inode_type = 'B';
+ break;
+ case IA_IFCHR:
+ inode_type = 'C';
+ break;
+ case IA_IFIFO:
+ inode_type = 'F';
+ break;
+ case IA_IFSOCK:
+ inode_type = 'S';
+ break;
+ default:
+ inode_type = 'I';
+ break;
+ }
+
+ cli_out ("%-40s %14"PRIu64" %14"PRIu32" %9c",
+ gfid, nlookup, ref, inode_type);
out:
return;
@@ -4470,11 +5827,13 @@ out:
}
void
-cli_print_volume_status_inode (dict_t *dict)
+cli_print_volume_status_inode (dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
- int brick_count = 0;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
char *hostname = NULL;
char *path = NULL;
int online = -1;
@@ -4490,11 +5849,16 @@ cli_print_volume_status_inode (dict_t *dict)
goto out;
cli_out ("Inode tables for volume %s", volname);
- ret = dict_get_int32 (dict, "count", &brick_count);
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
+ if (ret)
+ goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
if (ret)
goto out;
- for (i = 0; i < brick_count; i++) {
+ index_max = brick_index_max + other_count;
+
+ for ( i = 0; i <= index_max; i++) {
cli_out ("----------------------------------------------");
memset (key, 0, sizeof (key));
@@ -4507,7 +5871,10 @@ cli_print_volume_status_inode (dict_t *dict)
ret = dict_get_str (dict, key, &path);
if (ret)
goto out;
- cli_out ("Brick : %s:%s", hostname, path);
+ if (notbrick)
+ cli_out ("%s : %s", hostname, path);
+ else
+ cli_out ("Brick : %s:%s", hostname, path);
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.status", i);
@@ -4515,7 +5882,10 @@ cli_print_volume_status_inode (dict_t *dict)
if (ret)
goto out;
if (!online) {
- cli_out ("Brick is offline");
+ if (notbrick)
+ cli_out ("%s is offline", hostname);
+ else
+ cli_out ("Brick is offline");
continue;
}
@@ -4585,7 +5955,7 @@ cli_print_volume_status_fdtable (dict_t *dict, char *prefix)
if (ret)
goto out;
if (0 == openfds) {
- cli_out ("No open fds");
+ cli_err ("No open fds");
goto out;
}
@@ -4623,11 +5993,13 @@ out:
}
void
-cli_print_volume_status_fd (dict_t *dict)
+cli_print_volume_status_fd (dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
- int brick_count = 0;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
char *hostname = NULL;
char *path = NULL;
int online = -1;
@@ -4643,11 +6015,16 @@ cli_print_volume_status_fd (dict_t *dict)
goto out;
cli_out ("FD tables for volume %s", volname);
- ret = dict_get_int32 (dict, "count", &brick_count);
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
+ if (ret)
+ goto out;
+
+ index_max = brick_index_max + other_count;
- for (i = 0; i < brick_count; i++) {
+ for (i = 0; i <= index_max; i++) {
cli_out ("----------------------------------------------");
memset (key, 0, sizeof (key));
@@ -4660,7 +6037,11 @@ cli_print_volume_status_fd (dict_t *dict)
ret = dict_get_str (dict, key, &path);
if (ret)
goto out;
- cli_out ("Brick : %s:%s", hostname, path);
+
+ if (notbrick)
+ cli_out ("%s : %s", hostname, path);
+ else
+ cli_out ("Brick : %s:%s", hostname, path);
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.status", i);
@@ -4668,7 +6049,10 @@ cli_print_volume_status_fd (dict_t *dict)
if (ret)
goto out;
if (!online) {
- cli_out ("Brick is offline");
+ if (notbrick)
+ cli_out ("%s is offline", hostname);
+ else
+ cli_out ("Brick is offline");
continue;
}
@@ -4837,11 +6221,13 @@ cli_print_volume_status_call_stack (dict_t *dict, char *prefix)
}
void
-cli_print_volume_status_callpool (dict_t *dict)
+cli_print_volume_status_callpool (dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
- int brick_count = 0;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
char *hostname = NULL;
char *path = NULL;
int online = -1;
@@ -4857,11 +6243,16 @@ cli_print_volume_status_callpool (dict_t *dict)
goto out;
cli_out ("Pending calls for volume %s", volname);
- ret = dict_get_int32 (dict, "count", &brick_count);
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
+ if (ret)
+ goto out;
+ ret = dict_get_int32 (dict, "other-count", &other_count);
if (ret)
goto out;
- for (i = 0; i < brick_count; i++) {
+ index_max = brick_index_max + other_count;
+
+ for (i = 0; i <= index_max; i++) {
cli_out ("----------------------------------------------");
memset (key, 0, sizeof (key));
@@ -4874,7 +6265,11 @@ cli_print_volume_status_callpool (dict_t *dict)
ret = dict_get_str (dict, key, &path);
if (ret)
goto out;
- cli_out ("Brick : %s:%s", hostname, path);
+
+ if (notbrick)
+ cli_out ("%s : %s", hostname, path);
+ else
+ cli_out ("Brick : %s:%s", hostname, path);
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.status", i);
@@ -4882,7 +6277,10 @@ cli_print_volume_status_callpool (dict_t *dict)
if (ret)
goto out;
if (!online) {
- cli_out ("Brick is offline");
+ if (notbrick)
+ cli_out ("%s is offline", hostname);
+ else
+ cli_out ("Brick is offline");
continue;
}
@@ -4911,37 +6309,194 @@ out:
return;
}
+static void
+cli_print_volume_status_tasks (dict_t *dict)
+{
+ int ret = -1;
+ int i = 0;
+ int j = 0;
+ int count = 0;
+ int task_count = 0;
+ int status = 0;
+ char *op = NULL;
+ char *task_id_str = NULL;
+ char *volname = NULL;
+ char key[1024] = {0,};
+ char task[1024] = {0,};
+ char *brick = NULL;
+ char *src_brick = NULL;
+ char *dest_brick = NULL;
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "tasks", &task_count);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get tasks count");
+ return;
+ }
+
+ cli_out ("Task Status of Volume %s", volname);
+ cli_print_line (CLI_BRICK_STATUS_LINE_LEN);
+
+ if (task_count == 0) {
+ cli_out ("There are no active volume tasks");
+ cli_out (" ");
+ return;
+ }
+
+ for (i = 0; i < task_count; i++) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "task%d.type", i);
+ ret = dict_get_str(dict, key, &op);
+ if (ret)
+ return;
+ cli_out ("%-20s : %-20s", "Task", op);
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "task%d.id", i);
+ ret = dict_get_str (dict, key, &task_id_str);
+ if (ret)
+ return;
+ cli_out ("%-20s : %-20s", "ID", task_id_str);
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "task%d.status", i);
+ ret = dict_get_int32 (dict, key, &status);
+ if (ret)
+ return;
+
+ snprintf (task, sizeof (task), "task%d", i);
+
+ /*
+ Replace brick only has two states - In progress and Complete
+ Ref: xlators/mgmt/glusterd/src/glusterd-replace-brick.c
+ */
+
+ if (!strcmp (op, "Replace brick")) {
+ if (status)
+ status = GF_DEFRAG_STATUS_COMPLETE;
+ else
+ status = GF_DEFRAG_STATUS_STARTED;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "%s.src-brick", task);
+ ret = dict_get_str (dict, key, &src_brick);
+ if (ret)
+ goto out;
+
+ cli_out ("%-20s : %-20s", "Source Brick", src_brick);
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "%s.dst-brick", task);
+ ret = dict_get_str (dict, key, &dest_brick);
+ if (ret)
+ goto out;
+
+ cli_out ("%-20s : %-20s", "Destination Brick",
+ dest_brick);
+
+ } else if (!strcmp (op, "Remove brick")) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "%s.count", task);
+ ret = dict_get_int32 (dict, key, &count);
+ if (ret)
+ goto out;
+
+ cli_out ("%-20s", "Removed bricks:");
+
+ for (j = 1; j <= count; j++) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),"%s.brick%d",
+ task, j);
+ ret = dict_get_str (dict, key, &brick);
+ if (ret)
+ goto out;
+
+ cli_out ("%-20s", brick);
+ }
+ }
+ cli_out ("%-20s : %-20s", "Status",
+ cli_vol_task_status_str[status]);
+ cli_out (" ");
+ }
+
+out:
+ return;
+}
+
static int
-gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
- int ret = -1;
- int i = 0;
- int pid = -1;
- uint32_t cmd = 0;
- char key[1024] = {0,};
- char *hostname = NULL;
- char *path = NULL;
- char *volname = NULL;
- dict_t *dict = NULL;
- gf_cli_rsp rsp = {0,};
- cli_volume_status_t status = {0};
+ int ret = -1;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int index_max = 0;
+ int i = 0;
+ int pid = -1;
+ uint32_t cmd = 0;
+ gf_boolean_t notbrick = _gf_false;
+ char key[1024] = {0,};
+ char *hostname = NULL;
+ char *path = NULL;
+ char *volname = NULL;
+ dict_t *dict = NULL;
+ gf_cli_rsp rsp = {0,};
+ cli_volume_status_t status = {0};
+ cli_local_t *local = NULL;
+ gf_boolean_t wipe_local = _gf_false;
+ char msg[1024] = {0,};
if (req->rpc_status == -1)
goto out;
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("cli", GF_LOG_ERROR, "Volume status response error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
gf_log ("cli", GF_LOG_DEBUG, "Received response to status cmd");
- if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
- cli_out ("%s", rsp.op_errstr);
- else if (rsp.op_ret)
- cli_out ("Unable to obtain volume status information.");
+ local = ((call_frame_t *)myframe)->local;
+ if (!local) {
+ local = cli_local_get ();
+ if (!local) {
+ ret = -1;
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get local");
+ goto out;
+ }
+ wipe_local = _gf_true;
+ }
+
+ if (rsp.op_ret) {
+ if (strcmp (rsp.op_errstr, ""))
+ snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
+ else
+ snprintf (msg, sizeof (msg), "Unable to obtain volume "
+ "status information.");
+
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ if (!local->all)
+ cli_xml_output_str ("volStatus", msg,
+ rsp.op_ret, rsp.op_errno,
+ rsp.op_errstr);
+ ret = 0;
+ goto out;
+ }
+
+ cli_err ("%s", msg);
+ if (local && local->all) {
+ ret = 0;
+ cli_out (" ");
+ } else
+ ret = -1;
+
+ goto out;
+ }
dict = dict_new ();
if (!dict)
@@ -4958,51 +6513,85 @@ gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
if ((cmd & GF_CLI_STATUS_ALL)) {
- ((call_frame_t *)myframe)->local = dict;
- ret = 0;
- goto out;
- }
- ret = dict_get_int32 (dict, "count", &count);
- if (ret)
- goto out;
- if (count == 0) {
- ret = -1;
+ if (local && local->dict) {
+ dict_ref (dict);
+ ret = dict_set_static_ptr (local->dict, "rsp-dict", dict);
+ ret = 0;
+ } else {
+ gf_log ("cli", GF_LOG_ERROR, "local not found");
+ ret = -1;
+ }
goto out;
}
-#if (HAVE_LIB_XML)
+ if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD) ||
+ (cmd & GF_CLI_STATUS_QUOTAD))
+ notbrick = _gf_true;
+
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_vol_status (dict, rsp.op_ret,
- rsp.op_errno, rsp.op_errstr);
- if (ret) {
- gf_log ("cli", GF_LOG_ERROR,
- "Error outputting to xml");
+ if (!local->all) {
+ ret = cli_xml_output_vol_status_begin (local,
+ rsp.op_ret,
+ rsp.op_errno,
+ rsp.op_errstr);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ goto out;
+ }
+ }
+ if (cmd & GF_CLI_STATUS_TASKS) {
+ ret = cli_xml_output_vol_status_tasks_detail (local,
+ dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,"Error outputting "
+ "to xml");
+ goto out;
+ }
+ } else {
+ ret = cli_xml_output_vol_status (local, dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ goto out;
+ }
+ }
+
+ if (!local->all) {
+ ret = cli_xml_output_vol_status_end (local);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ }
}
goto out;
}
-#endif
status.brick = GF_CALLOC (1, PATH_MAX + 256, gf_common_mt_strdup);
switch (cmd & GF_CLI_STATUS_MASK) {
case GF_CLI_STATUS_MEM:
- cli_print_volume_status_mem (dict);
+ cli_print_volume_status_mem (dict, notbrick);
goto cont;
break;
case GF_CLI_STATUS_CLIENTS:
- cli_print_volume_status_clients (dict);
+ cli_print_volume_status_clients (dict, notbrick);
goto cont;
break;
case GF_CLI_STATUS_INODE:
- cli_print_volume_status_inode (dict);
+ cli_print_volume_status_inode (dict, notbrick);
goto cont;
break;
case GF_CLI_STATUS_FD:
- cli_print_volume_status_fd (dict);
+ cli_print_volume_status_fd (dict, notbrick);
goto cont;
break;
case GF_CLI_STATUS_CALLPOOL:
- cli_print_volume_status_callpool (dict);
+ cli_print_volume_status_callpool (dict, notbrick);
+ goto cont;
+ break;
+ case GF_CLI_STATUS_TASKS:
+ cli_print_volume_status_tasks (dict);
goto cont;
break;
default:
@@ -5013,48 +6602,69 @@ gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov,
if (ret)
goto out;
- cli_out ("\nStatus of volume: %s", volname);
+ ret = dict_get_int32 (dict, "brick-index-max", &brick_index_max);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "other-count", &other_count);
+ if (ret)
+ goto out;
- if ((cmd & GF_CLI_STATUS_DETAIL) == 0)
- cli_out ("Brick\t\t\t\t\t\t\tPort\tOnline\tPid");
+ index_max = brick_index_max + other_count;
- for (i = 0; i < count; i++) {
+ cli_out ("Status of volume: %s", volname);
+
+ if ((cmd & GF_CLI_STATUS_DETAIL) == 0) {
+ cli_out ("Gluster process\t\t\t\t\t\tPort\tOnline\tPid");
cli_print_line (CLI_BRICK_STATUS_LINE_LEN);
+ }
+
+ for (i = 0; i <= index_max; i++) {
+
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.hostname", i);
ret = dict_get_str (dict, key, &hostname);
if (ret)
- goto out;
+ continue;
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.path", i);
ret = dict_get_str (dict, key, &path);
if (ret)
- goto out;
+ continue;
+ /* Brick/not-brick is handled separately here as all
+ * types of nodes are contained in the default output
+ */
memset (status.brick, 0, PATH_MAX + 255);
- snprintf (status.brick, PATH_MAX + 255, "%s:%s",
- hostname, path);
+ if (!strcmp (hostname, "NFS Server") ||
+ !strcmp (hostname, "Self-heal Daemon") ||
+ !strcmp (hostname, "Quota Daemon"))
+ snprintf (status.brick, PATH_MAX + 255, "%s on %s",
+ hostname, path);
+ else
+ snprintf (status.brick, PATH_MAX + 255, "Brick %s:%s",
+ hostname, path);
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.port", i);
ret = dict_get_int32 (dict, key, &(status.port));
if (ret)
- goto out;
+ continue;
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.status", i);
ret = dict_get_int32 (dict, key, &(status.online));
if (ret)
- goto out;
+ continue;
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.pid", i);
ret = dict_get_int32 (dict, key, &pid);
if (ret)
- goto out;
+ continue;
if (pid == -1)
ret = gf_asprintf (&(status.pid_str), "%s", "N/A");
else
@@ -5067,26 +6677,34 @@ gf_cli3_1_status_cbk (struct rpc_req *req, struct iovec *iov,
ret = cli_get_detail_status (dict, i, &status);
if (ret)
goto out;
+ cli_print_line (CLI_BRICK_STATUS_LINE_LEN);
cli_print_detailed_status (&status);
} else {
cli_print_brick_status (&status);
}
}
+ cli_out (" ");
+ if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE)
+ cli_print_volume_status_tasks (dict);
cont:
ret = rsp.op_ret;
out:
- if (status.brick)
- GF_FREE (status.brick);
+ if (dict)
+ dict_unref (dict);
+ GF_FREE (status.brick);
+ if (local && wipe_local) {
+ cli_local_wipe (local);
+ }
cli_cmd_broadcast_response (ret);
return ret;
}
int32_t
-gf_cli3_1_status_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_status_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
@@ -5098,20 +6716,10 @@ gf_cli3_1_status_volume (call_frame_t *frame, xlator_t *this,
dict = data;
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log ("cli", GF_LOG_ERROR,
- "failed to serialize the data");
-
- goto out;
- }
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_STATUS_VOLUME, NULL,
- this, gf_cli3_1_status_cbk,
- (xdrproc_t)xdr_gf_cli_req);
-
+ ret = cli_to_glusterd (&req, frame, gf_cli_status_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_STATUS_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning: %d", ret);
return ret;
@@ -5126,29 +6734,32 @@ gf_cli_status_volume_all (call_frame_t *frame, xlator_t *this, void *data)
uint32_t cmd = 0;
char key[1024] = {0};
char *volname = NULL;
- dict_t *vol_dict = NULL;
+ void *vol_dict = NULL;
dict_t *dict = NULL;
+ cli_local_t *local = NULL;
- dict = (dict_t *)data;
- ret = dict_get_uint32 (dict, "cmd", &cmd);
- if (ret)
+ if (frame->local) {
+ local = frame->local;
+ local->all = _gf_true;
+ } else
goto out;
- ret = gf_cli3_1_status_volume (frame, this, data);
+ ret = dict_get_uint32 (local->dict, "cmd", &cmd);
if (ret)
goto out;
- vol_dict = (dict_t *)(frame->local);
- ret = dict_get_int32 (vol_dict, "vol_count", &vol_count);
- if (ret) {
- cli_out ("Failed to get names of volumes");
+ ret = gf_cli_status_volume (frame, this, data);
+ if (ret)
goto out;
- }
- if (vol_count == 0) {
- cli_out ("No volumes present");
- ret = 0;
+ ret = dict_get_ptr (local->dict, "rsp-dict", &vol_dict);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 ((dict_t *)vol_dict, "vol_count", &vol_count);
+ if (ret) {
+ cli_err ("Failed to get names of volumes");
goto out;
}
@@ -5156,6 +6767,22 @@ gf_cli_status_volume_all (call_frame_t *frame, xlator_t *this, void *data)
cmd &= ~GF_CLI_STATUS_ALL;
cmd |= GF_CLI_STATUS_VOL;
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ //TODO: Pass proper op_* values
+ ret = cli_xml_output_vol_status_begin (local, 0,0, NULL);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ goto out;
+ }
+ }
+
+ if (vol_count == 0 && !(global_state->mode & GLUSTER_MODE_XML)) {
+ cli_err ("No volumes present");
+ ret = 0;
+ goto out;
+ }
+
for (i = 0; i < vol_count; i++) {
dict = dict_new ();
@@ -5168,7 +6795,7 @@ gf_cli_status_volume_all (call_frame_t *frame, xlator_t *this, void *data)
if (ret)
goto out;
- ret = dict_set_dynstr (dict, "volname", volname);
+ ret = dict_set_str (dict, "volname", volname);
if (ret)
goto out;
@@ -5176,23 +6803,38 @@ gf_cli_status_volume_all (call_frame_t *frame, xlator_t *this, void *data)
if (ret)
goto out;
- ret = gf_cli3_1_status_volume (frame, this, dict);
+ ret = gf_cli_status_volume (frame, this, dict);
if (ret)
goto out;
dict_unref (dict);
}
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ ret = cli_xml_output_vol_status_end (local);
+ }
+
out:
if (ret)
gf_log ("cli", GF_LOG_ERROR, "status all failed");
+
+ if (vol_dict)
+ dict_unref (vol_dict);
+
if (ret && dict)
dict_unref (dict);
+
+ if (local)
+ cli_local_wipe (local);
+
+ if (frame)
+ frame->local = NULL;
+
return ret;
}
static int
-gf_cli3_1_mount_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_mount_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf1_cli_mount_rsp rsp = {0,};
@@ -5204,7 +6846,8 @@ gf_cli3_1_mount_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_mount_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
@@ -5215,9 +6858,9 @@ gf_cli3_1_mount_cbk (struct rpc_req *req, struct iovec *iov,
cli_out ("%s", rsp.path);
} else {
/* weird sounding but easy to parse... */
- cli_out ("%d : failed with this errno (%s)",
+ cli_err ("%d : failed with this errno (%s)",
rsp.op_errno, strerror (rsp.op_errno));
- ret = 1;
+ ret = -1;
}
out:
@@ -5226,7 +6869,7 @@ out:
}
int32_t
-gf_cli3_1_mount (call_frame_t *frame, xlator_t *this, void *data)
+gf_cli_mount (call_frame_t *frame, xlator_t *this, void *data)
{
gf1_cli_mount_req req = {0,};
int ret = -1;
@@ -5242,15 +6885,15 @@ gf_cli3_1_mount (call_frame_t *frame, xlator_t *this, void *data)
req.label = label;
ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
+ &req.dict.dict_len);
if (ret) {
ret = -1;
goto out;
}
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_MOUNT, NULL,
- this, gf_cli3_1_mount_cbk,
+ this, gf_cli_mount_cbk,
(xdrproc_t)xdr_gf1_cli_mount_req);
out:
@@ -5259,7 +6902,7 @@ out:
}
static int
-gf_cli3_1_umount_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_umount_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf1_cli_umount_rsp rsp = {0,};
@@ -5271,7 +6914,8 @@ gf_cli3_1_umount_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf1_cli_umount_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
@@ -5280,8 +6924,8 @@ gf_cli3_1_umount_cbk (struct rpc_req *req, struct iovec *iov,
if (rsp.op_ret == 0)
ret = 0;
else {
- cli_out ("umount failed");
- ret = 1;
+ cli_err ("umount failed");
+ ret = -1;
}
out:
@@ -5290,7 +6934,7 @@ out:
}
int32_t
-gf_cli3_1_umount (call_frame_t *frame, xlator_t *this, void *data)
+gf_cli_umount (call_frame_t *frame, xlator_t *this, void *data)
{
gf1_cli_umount_req req = {0,};
int ret = -1;
@@ -5310,9 +6954,9 @@ gf_cli3_1_umount (call_frame_t *frame, xlator_t *this, void *data)
goto out;
}
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_UMOUNT, NULL,
- this, gf_cli3_1_umount_cbk,
+ this, gf_cli_umount_cbk,
(xdrproc_t)xdr_gf1_cli_umount_req);
out:
@@ -5320,8 +6964,212 @@ gf_cli3_1_umount (call_frame_t *frame, xlator_t *this, void *data)
return ret;
}
+void
+cmd_heal_volume_statistics_out (dict_t *dict, int brick)
+{
+
+ uint64_t num_entries = 0;
+ int ret = 0;
+ char key[256] = {0};
+ char *hostname = NULL;
+ uint64_t i = 0;
+ uint64_t healed_count = 0;
+ uint64_t split_brain_count = 0;
+ uint64_t heal_failed_count = 0;
+ char *start_time_str = NULL;
+ char *end_time_str = NULL;
+ char *crawl_type = NULL;
+ int progress = -1;
+
+ snprintf (key, sizeof key, "%d-hostname", brick);
+ ret = dict_get_str (dict, key, &hostname);
+ if (ret)
+ goto out;
+ cli_out ("------------------------------------------------");
+ cli_out ("\nCrawl statistics for brick no %d", brick);
+ cli_out ("Hostname of brick %s", hostname);
+
+ snprintf (key, sizeof key, "statistics-%d-count", brick);
+ ret = dict_get_uint64 (dict, key, &num_entries);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < num_entries; i++)
+ {
+ snprintf (key, sizeof key, "statistics_crawl_type-%d-%"PRIu64,
+ brick, i);
+ ret = dict_get_str (dict, key, &crawl_type);
+ if (ret)
+ goto out;
+
+ snprintf (key, sizeof key, "statistics_healed_cnt-%d-%"PRIu64,
+ brick,i);
+ ret = dict_get_uint64 (dict, key, &healed_count);
+ if (ret)
+ goto out;
+
+ snprintf (key, sizeof key, "statistics_sb_cnt-%d-%"PRIu64,
+ brick, i);
+ ret = dict_get_uint64 (dict, key, &split_brain_count);
+ if (ret)
+ goto out;
+ snprintf (key, sizeof key, "statistics_heal_failed_cnt-%d-%"PRIu64,
+ brick, i);
+ ret = dict_get_uint64 (dict, key, &heal_failed_count);
+ if (ret)
+ goto out;
+ snprintf (key, sizeof key, "statistics_strt_time-%d-%"PRIu64,
+ brick, i);
+ ret = dict_get_str (dict, key, &start_time_str);
+ if (ret)
+ goto out;
+ snprintf (key, sizeof key, "statistics_end_time-%d-%"PRIu64,
+ brick, i);
+ ret = dict_get_str (dict, key, &end_time_str);
+ if (ret)
+ goto out;
+ snprintf (key, sizeof key, "statistics_inprogress-%d-%"PRIu64,
+ brick, i);
+ ret = dict_get_int32 (dict, key, &progress);
+ if (ret)
+ goto out;
+
+ cli_out ("\nStarting time of crawl: %s", start_time_str);
+ if (progress == 1)
+ cli_out ("Crawl is in progress");
+ else
+ cli_out ("Ending time of crawl: %s", end_time_str);
+
+ cli_out ("Type of crawl: %s", crawl_type);
+ cli_out ("No. of entries healed: %"PRIu64,
+ healed_count);
+ cli_out ("No. of entries in split-brain: %"PRIu64,
+ split_brain_count);
+ cli_out ("No. of heal failed entries: %"PRIu64,
+ heal_failed_count);
+
+ }
+
+
+out:
+ return;
+}
+
+void
+cmd_heal_volume_brick_out (dict_t *dict, int brick)
+{
+ uint64_t num_entries = 0;
+ int ret = 0;
+ char key[256] = {0};
+ char *hostname = NULL;
+ char *path = NULL;
+ char *status = NULL;
+ uint64_t i = 0;
+ uint32_t time = 0;
+ char timestr[32] = {0};
+ char *shd_status = NULL;
+
+ snprintf (key, sizeof key, "%d-hostname", brick);
+ ret = dict_get_str (dict, key, &hostname);
+ if (ret)
+ goto out;
+ snprintf (key, sizeof key, "%d-path", brick);
+ ret = dict_get_str (dict, key, &path);
+ if (ret)
+ goto out;
+ cli_out ("\nBrick %s:%s", hostname, path);
+
+ snprintf (key, sizeof key, "%d-status", brick);
+ ret = dict_get_str (dict, key, &status);
+ if (status && strlen (status))
+ cli_out ("Status: %s", status);
+
+ snprintf (key, sizeof key, "%d-shd-status",brick);
+ ret = dict_get_str (dict, key, &shd_status);
+
+ if(!shd_status)
+ {
+ snprintf (key, sizeof key, "%d-count", brick);
+ ret = dict_get_uint64 (dict, key, &num_entries);
+ cli_out ("Number of entries: %"PRIu64, num_entries);
+
+
+ for (i = 0; i < num_entries; i++) {
+ snprintf (key, sizeof key, "%d-%"PRIu64, brick, i);
+ ret = dict_get_str (dict, key, &path);
+ if (ret)
+ continue;
+ time = 0;
+ snprintf (key, sizeof key, "%d-%"PRIu64"-time",
+ brick, i);
+ ret = dict_get_uint32 (dict, key, &time);
+ if (!time) {
+ cli_out ("%s", path);
+ } else {
+ gf_time_fmt (timestr, sizeof timestr,
+ time, gf_timefmt_FT);
+ if (i == 0) {
+ cli_out ("at path on brick");
+ cli_out ("-----------------------------------");
+ }
+ cli_out ("%s %s", timestr, path);
+ }
+ }
+ }
+
+out:
+ return;
+}
+
+
+void
+cmd_heal_volume_statistics_heal_count_out (dict_t *dict, int brick)
+{
+ uint64_t num_entries = 0;
+ int ret = 0;
+ char key[256] = {0};
+ char *hostname = NULL;
+ char *path = NULL;
+ char *status = NULL;
+ char *shd_status = NULL;
+
+ snprintf (key, sizeof key, "%d-hostname", brick);
+ ret = dict_get_str (dict, key, &hostname);
+ if (ret)
+ goto out;
+ snprintf (key, sizeof key, "%d-path", brick);
+ ret = dict_get_str (dict, key, &path);
+ if (ret)
+ goto out;
+ cli_out ("\nBrick %s:%s", hostname, path);
+
+ snprintf (key, sizeof key, "%d-status", brick);
+ ret = dict_get_str (dict, key, &status);
+ if (status && strlen (status))
+ cli_out ("Status: %s", status);
+
+ snprintf (key, sizeof key, "%d-shd-status",brick);
+ ret = dict_get_str (dict, key, &shd_status);
+
+ if(!shd_status)
+ {
+ snprintf (key, sizeof key, "%d-hardlinks", brick);
+ ret = dict_get_uint64 (dict, key, &num_entries);
+ if (ret)
+ cli_out ("No gathered input for this brick");
+ else
+ cli_out ("Number of entries: %"PRIu64, num_entries);
+
+
+ }
+
+out:
+ return;
+}
+
+
int
-gf_cli3_1_heal_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_heal_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -5329,75 +7177,182 @@ gf_cli3_1_heal_volume_cbk (struct rpc_req *req, struct iovec *iov,
cli_local_t *local = NULL;
char *volname = NULL;
call_frame_t *frame = NULL;
+ dict_t *input_dict = NULL;
dict_t *dict = NULL;
+ int brick_count = 0;
+ int i = 0;
+ gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID;
+ char *operation = NULL;
+ char *substr = NULL;
+ char *heal_op_str = NULL;
if (-1 == req->rpc_status) {
goto out;
}
+ frame = myframe;
+
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log ("", GF_LOG_ERROR, "error");
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
- frame = myframe;
-
- if (frame) {
+ if (frame)
local = frame->local;
- frame->local = NULL;
+
+ if (local) {
+ input_dict = local->dict;
+ ret = dict_get_int32 (input_dict, "heal-op",
+ (int32_t*)&heal_op);
+ }
+//TODO: Proper XML output
+//#if (HAVE_LIB_XML)
+// if (global_state->mode & GLUSTER_MODE_XML) {
+// ret = cli_xml_output_dict ("volHeal", dict, rsp.op_ret,
+// rsp.op_errno, rsp.op_errstr);
+// if (ret)
+// gf_log ("cli", GF_LOG_ERROR,
+// "Error outputting to xml");
+// goto out;
+// }
+//#endif
+
+ ret = dict_get_str (input_dict, "volname", &volname);
+ if (ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR, "failed to get volname");
+ goto out;
}
- if (local)
- dict = local->dict;
+ gf_log ("cli", GF_LOG_INFO, "Received resp to heal volume");
-#if (HAVE_LIB_XML)
- if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_output_dict ("volHeal", dict, rsp.op_ret,
- rsp.op_errno, rsp.op_errstr);
- if (ret)
- gf_log ("cli", GF_LOG_ERROR,
- "Error outputting to xml");
+ switch (heal_op) {
+ case GF_AFR_OP_HEAL_INDEX:
+ heal_op_str = "to perform index self heal";
+ break;
+ case GF_AFR_OP_HEAL_FULL:
+ heal_op_str = "to perform full self heal";
+ break;
+ case GF_AFR_OP_INDEX_SUMMARY:
+ heal_op_str = "list of entries to be healed";
+ break;
+ case GF_AFR_OP_HEALED_FILES:
+ heal_op_str = "list of healed entries";
+ break;
+ case GF_AFR_OP_HEAL_FAILED_FILES:
+ heal_op_str = "list of heal failed entries";
+ break;
+ case GF_AFR_OP_SPLIT_BRAIN_FILES:
+ heal_op_str = "list of split brain entries";
+ break;
+ case GF_AFR_OP_STATISTICS:
+ heal_op_str = "crawl statistics";
+ break;
+ case GF_AFR_OP_STATISTICS_HEAL_COUNT:
+ heal_op_str = "count of entries to be healed";
+ break;
+ case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
+ heal_op_str = "count of entries to be healed per replica";
+ break;
+ case GF_AFR_OP_INVALID:
+ heal_op_str = "invalid heal op";
+ break;
+ }
+
+ if ((heal_op == GF_AFR_OP_HEAL_FULL) ||
+ (heal_op == GF_AFR_OP_HEAL_INDEX)) {
+ operation = "Launching heal operation";
+ substr = "\nUse heal info commands to check status";
+ } else {
+ operation = "Gathering";
+ substr = "";
+ }
+
+ if (rsp.op_ret) {
+ if (strcmp (rsp.op_errstr, "")) {
+ cli_err ("%s", rsp.op_errstr);
+ } else {
+ cli_err ("%s %s on volume %s has been unsuccessful",
+ operation, heal_op_str, volname);
+ }
+
+ ret = rsp.op_ret;
+ goto out;
+ } else {
+ cli_out ("%s %s on volume %s has been successful %s", operation,
+ heal_op_str, volname, substr);
+ }
+
+ ret = rsp.op_ret;
+ if ((heal_op == GF_AFR_OP_HEAL_FULL) ||
+ (heal_op == GF_AFR_OP_HEAL_INDEX))
+ goto out;
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
goto out;
}
-#endif
- ret = dict_get_str (dict, "volname", &volname);
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &dict);
+
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "failed to get volname");
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to allocate memory");
goto out;
+ } else {
+ dict->extra_stdfree = rsp.dict.dict_val;
}
+ ret = dict_get_int32 (dict, "count", &brick_count);
+ if (ret)
+ goto out;
- gf_log ("cli", GF_LOG_INFO, "Received resp to heal volume");
+ if (!brick_count) {
+ cli_err ("All bricks of volume %s are down.", volname);
+ goto out;
+ }
- if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
- cli_out ("%s", rsp.op_errstr);
- else
- cli_out ("Starting heal on volume %s has been %s", volname,
- (rsp.op_ret) ? "unsuccessful": "successful");
+ switch (heal_op) {
+ case GF_AFR_OP_STATISTICS:
+ for (i = 0; i < brick_count; i++)
+ cmd_heal_volume_statistics_out (dict, i);
+ break;
+ case GF_AFR_OP_STATISTICS_HEAL_COUNT:
+ case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
+ for (i = 0; i < brick_count; i++)
+ cmd_heal_volume_statistics_heal_count_out (dict,
+ i);
+ break;
+ case GF_AFR_OP_INDEX_SUMMARY:
+ case GF_AFR_OP_HEALED_FILES:
+ case GF_AFR_OP_HEAL_FAILED_FILES:
+ case GF_AFR_OP_SPLIT_BRAIN_FILES:
+ for (i = 0; i < brick_count; i++)
+ cmd_heal_volume_brick_out (dict, i);
+ break;
+ default:
+ break;
+ }
ret = rsp.op_ret;
out:
cli_cmd_broadcast_response (ret);
- if (local)
- cli_local_wipe (local);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
- if (rsp.op_errstr)
- free (rsp.op_errstr);
+ free (rsp.op_errstr);
if (dict)
dict_unref (dict);
return ret;
}
int32_t
-gf_cli3_1_heal_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_heal_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
int ret = 0;
- cli_local_t *local = NULL;
dict_t *dict = NULL;
if (!frame || !this || !data) {
@@ -5406,39 +7361,22 @@ gf_cli3_1_heal_volume (call_frame_t *frame, xlator_t *this,
}
dict = data;
- local = cli_local_get ();
- if (local) {
- local->dict = dict_ref (dict);
- frame->local = local;
- }
-
- ret = dict_allocate_and_serialize (dict,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to serialize the data");
-
- goto out;
- }
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_HEAL_VOLUME, NULL,
- this, gf_cli3_1_heal_volume_cbk,
- (xdrproc_t) xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_heal_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, dict,
+ GLUSTER_CLI_HEAL_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
- if (req.dict.dict_val)
- GF_FREE (req.dict.dict_val);
+ GF_FREE (req.dict.dict_val);
return ret;
}
int32_t
-gf_cli3_1_statedump_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_statedump_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -5450,16 +7388,16 @@ gf_cli3_1_statedump_volume_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log (THIS->name, GF_LOG_ERROR, "XDR decoding failed");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
- gf_log ("cli", GF_LOG_DEBUG, "Recieved response to statedump");
+ gf_log ("cli", GF_LOG_DEBUG, "Received response to statedump");
if (rsp.op_ret)
snprintf (msg, sizeof(msg), "%s", rsp.op_errstr);
else
- snprintf (msg, sizeof (msg), "Volume statedump sucessful");
+ snprintf (msg, sizeof (msg), "Volume statedump successful");
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str ("volStatedump", msg, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
@@ -5468,9 +7406,11 @@ gf_cli3_1_statedump_volume_cbk (struct rpc_req *req, struct iovec *iov,
"Error outputting to xml");
goto out;
}
-#endif
- cli_out ("%s", msg);
+ if (rsp.op_ret)
+ cli_err ("volume statedump: failed: %s", msg);
+ else
+ cli_out ("volume statedump: success");
ret = rsp.op_ret;
out:
@@ -5479,7 +7419,7 @@ out:
}
int32_t
-gf_cli3_1_statedump_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_statedump_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
@@ -5491,33 +7431,20 @@ gf_cli3_1_statedump_volume (call_frame_t *frame, xlator_t *this,
options = data;
- ret = dict_allocate_and_serialize (options,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to serialize the data");
-
- goto out;
- }
-
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_STATEDUMP_VOLUME, NULL,
- this, gf_cli3_1_statedump_volume_cbk,
- (xdrproc_t)xdr_gf_cli_req);
+ ret = cli_to_glusterd (&req, frame, gf_cli_statedump_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, options,
+ GLUSTER_CLI_STATEDUMP_VOLUME, this, cli_rpc_prog,
+ NULL);
out:
- if (options)
- dict_destroy (options);
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
- if (req.dict.dict_val)
- GF_FREE (req.dict.dict_val);
+ GF_FREE (req.dict.dict_val);
return ret;
}
int32_t
-gf_cli3_1_list_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_list_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
int ret = -1;
@@ -5533,7 +7460,8 @@ gf_cli3_1_list_volume_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log (THIS->name, GF_LOG_ERROR, "XDR decoding failed");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
@@ -5549,7 +7477,6 @@ gf_cli3_1_list_volume_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
-#if (HAVE_LIB_XML)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_vol_list (dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
@@ -5558,28 +7485,25 @@ gf_cli3_1_list_volume_cbk (struct rpc_req *req, struct iovec *iov,
"Error outputting to xml");
goto out;
}
-#endif
+
if (rsp.op_ret)
- cli_out ("%s", rsp.op_errstr);
+ cli_err ("%s", rsp.op_errstr);
else {
ret = dict_get_int32 (dict, "count", &vol_count);
if (ret)
goto out;
if (vol_count == 0) {
- cli_out ("No volumes present in cluster");
+ cli_err ("No volumes present in cluster");
goto out;
}
- cli_out ("%d %s present in cluster", vol_count,
- ((vol_count == 1) ? "volume" : "volumes"));
-
for (i = 0; i < vol_count; i++) {
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "volume%d", i);
ret = dict_get_str (dict, key, &volname);
if (ret)
goto out;
- cli_out ("\t%d. %s", i+1, volname);
+ cli_out ("%s", volname);
}
}
@@ -5591,7 +7515,7 @@ out:
}
int32_t
-gf_cli3_1_list_volume (call_frame_t *frame, xlator_t *this, void *data)
+gf_cli_list_volume (call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
gf_cli_req req = {{0,}};
@@ -5599,9 +7523,9 @@ gf_cli3_1_list_volume (call_frame_t *frame, xlator_t *this, void *data)
if (!frame || !this)
goto out;
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
+ ret = cli_cmd_submit (NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_LIST_VOLUME, NULL,
- this, gf_cli3_1_list_volume_cbk,
+ this, gf_cli_list_volume_cbk,
(xdrproc_t)xdr_gf_cli_req);
out:
@@ -5610,7 +7534,7 @@ out:
}
int32_t
-gf_cli3_1_clearlocks_volume_cbk (struct rpc_req *req, struct iovec *iov,
+gf_cli_clearlocks_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
@@ -5624,19 +7548,19 @@ gf_cli3_1_clearlocks_volume_cbk (struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
-
- gf_log ("cli", GF_LOG_ERROR, "XDR decoding failed");
+ gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
goto out;
}
gf_log ("cli", GF_LOG_DEBUG, "Received response to clear-locks");
if (rsp.op_ret) {
- cli_out ("Volume clear-locks unsuccessful");
- cli_out ("%s", rsp.op_errstr);
+ cli_err ("Volume clear-locks unsuccessful");
+ cli_err ("%s", rsp.op_errstr);
} else {
if (!rsp.dict.dict_len) {
- cli_out ("Possibly no locks cleared");
+ cli_err ("Possibly no locks cleared");
ret = 0;
goto out;
}
@@ -5679,12 +7603,14 @@ gf_cli3_1_clearlocks_volume_cbk (struct rpc_req *req, struct iovec *iov,
ret = rsp.op_ret;
out:
+ if (dict)
+ dict_unref (dict);
cli_cmd_broadcast_response (ret);
return ret;
}
int32_t
-gf_cli3_1_clearlocks_volume (call_frame_t *frame, xlator_t *this,
+gf_cli_clearlocks_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
@@ -5696,67 +7622,1153 @@ gf_cli3_1_clearlocks_volume (call_frame_t *frame, xlator_t *this,
options = data;
- ret = dict_allocate_and_serialize (options,
- &req.dict.dict_val,
- (size_t *)&req.dict.dict_len);
+ ret = cli_to_glusterd (&req, frame, gf_cli_clearlocks_volume_cbk,
+ (xdrproc_t) xdr_gf_cli_req, options,
+ GLUSTER_CLI_CLRLOCKS_VOLUME, this, cli_rpc_prog,
+ NULL);
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+
+ GF_FREE (req.dict.dict_val);
+ return ret;
+}
+
+int32_t
+cli_snapshot_remove_reply (gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame)
+{
+ int32_t ret = -1;
+ char *snap_name = NULL;
+
+ GF_ASSERT (rsp);
+ GF_ASSERT (dict);
+ GF_ASSERT (frame);
+
+ if (rsp->op_ret) {
+ cli_err("snapshot delete: failed: %s",
+ rsp->op_errstr ? rsp->op_errstr :
+ "Please check log file for details");
+ ret = rsp->op_ret;
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &snap_name);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get snapname");
+ goto out;
+ }
+
+ cli_out ("snapshot delete: %s: snap removed successfully",
+ snap_name);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+cli_snapshot_config_display (dict_t *dict, gf_cli_rsp *rsp)
+{
+ char buf[PATH_MAX] = "";
+ char *volname = NULL;
+ int ret = -1;
+ int config_command = 0;
+ uint64_t value = 0;
+ uint64_t hard_limit = 0;
+ uint64_t soft_limit = 0;
+ uint64_t i = 0;
+ uint64_t voldisplaycount = 0;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp);
+
+ if (rsp->op_ret) {
+ cli_err ("Snapshot Config : failed: %s",
+ rsp->op_errstr ? rsp->op_errstr :
+ "Please check log file for details");
+ ret = rsp->op_ret;
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "config-command", &config_command);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch config type");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+ /* Ignore the error, as volname is optional */
+
+ if (!volname) {
+ volname = "System";
+ }
+
+ ret = dict_get_uint64 (dict, "snap-max-hard-limit", &hard_limit);
+ /* Ignore the error, as the key specified is optional */
+ ret = dict_get_uint64 (dict, "snap-max-soft-limit", &soft_limit);
+
+ if (!hard_limit && !soft_limit
+ && config_command != GF_SNAP_CONFIG_DISPLAY) {
+ ret = -1;
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "Could not fetch config-key");
+ goto out;
+ }
+
+ switch (config_command) {
+ case GF_SNAP_CONFIG_TYPE_SET:
+ if (hard_limit && soft_limit) {
+ cli_out ("snapshot config: snap-max-hard-limit "
+ "& snap-max-soft-limit for system set "
+ "successfully");
+ } else if (hard_limit) {
+ cli_out ("snapshot config: %s "
+ "for snap-max-hard-limit set successfully",
+ volname);
+ } else if (soft_limit) {
+ cli_out ("snapshot config: %s "
+ "for snap-max-soft-limit set successfully",
+ volname);
+ }
+ break;
+
+ case GF_SNAP_CONFIG_DISPLAY:
+ cli_out ("\nSnapshot System Configuration:");
+ ret = dict_get_uint64 (dict, "snap-max-hard-limit",
+ &value);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch "
+ "snap_max_hard_limit for %s", volname);
+ ret = -1;
+ goto out;
+ }
+ cli_out ("snap-max-hard-limit : %"PRIu64, value);
+
+ ret = dict_get_uint64 (dict, "snap-max-soft-limit",
+ &soft_limit);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch "
+ "snap-max-soft-limit for %s", volname);
+ ret = -1;
+ goto out;
+ }
+ cli_out ("snap-max-soft-limit : %"PRIu64"%%\n",
+ soft_limit);
+
+ cli_out ("Snapshot Volume Configuration:");
+
+ ret = dict_get_uint64 (dict, "voldisplaycount",
+ &voldisplaycount);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Could not fetch voldisplaycount");
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 0; i < voldisplaycount; i++) {
+ snprintf (buf, sizeof(buf), "volume%"PRIu64"-volname", i);
+ ret = dict_get_str (dict, buf, &volname);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch "
+ " %s", buf);
+ ret = -1;
+ goto out;
+ }
+ cli_out ("\nVolume : %s", volname);
+
+ snprintf (buf, sizeof(buf),
+ "volume%"PRIu64"-snap-max-hard-limit", i);
+ ret = dict_get_uint64 (dict, buf, &value);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch "
+ " %s", buf);
+ ret = -1;
+ goto out;
+ }
+ cli_out ("snap-max-hard-limit : %"PRIu64, value);
+
+ snprintf (buf, sizeof(buf),
+ "volume%"PRIu64"-active-hard-limit", i);
+ ret = dict_get_uint64 (dict, buf, &value);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch"
+ " effective snap_max_hard_limit for "
+ "%s", volname);
+ ret = -1;
+ goto out;
+ }
+ cli_out ("Effective snap-max-hard-limit : %"PRIu64,
+ value);
+
+ snprintf (buf, sizeof(buf),
+ "volume%"PRIu64"-snap-max-soft-limit", i);
+ ret = dict_get_uint64 (dict, buf, &value);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch "
+ " %s", buf);
+ ret = -1;
+ goto out;
+ }
+ cli_out ("Effective snap-max-soft-limit : %"PRIu64" "
+ "(%"PRIu64"%%)", value, soft_limit);
+ }
+ break;
+ default:
+ break;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+/* This function is used to print the volume related information
+ * of a snap.
+ *
+ * arg - 0, dict : Response Dictionary.
+ * arg - 1, prefix str : snaplist.snap{0..}.vol{0..}.*
+ */
+int
+cli_get_each_volinfo_in_snap (dict_t *dict, char *keyprefix,
+ gf_boolean_t snap_driven) {
+ char key[PATH_MAX] = "";
+ char *get_buffer = NULL;
+ int value = 0;
+ int ret = -1;
+ char indent[5] = "\t";
+ char *volname = NULL;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (keyprefix);
+
+ if (snap_driven) {
+ ret = snprintf (key, sizeof (key), "%s.volname", keyprefix);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key, &get_buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get %s", key);
+ goto out;
+ }
+ cli_out ("%s" INDENT_MAIN_HEAD "%s", indent,
+ "Snap Volume Name", ":", get_buffer);
+
+ ret = snprintf (key, sizeof (key),
+ "%s.origin-volname", keyprefix);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key, &volname);
+ if (ret) {
+ gf_log ("cli", GF_LOG_WARNING, "Failed to get %s", key);
+ cli_out ("%-12s", "Origin:");
+ }
+ cli_out ("%s" INDENT_MAIN_HEAD "%s", indent,
+ "Origin Volume name", ":", volname);
+
+
+ ret = snprintf (key, sizeof (key), "%s.snapcount",
+ keyprefix);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, key, &value);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get %s", key);
+ goto out;
+ }
+ cli_out ("%s%s %s %s %d", indent, "Snaps taken for",
+ volname, ":", value);
+
+ ret = snprintf (key, sizeof (key), "%s.snaps-available",
+ keyprefix);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, key, &value);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get %s", key);
+ goto out;
+ }
+ cli_out ("%s%s %s %s %d", indent, "Snaps available for",
+ volname, ":", value);
+ }
+
+
+ ret = snprintf (key, sizeof (key), "%s.vol-status", keyprefix);
if (ret < 0) {
- gf_log ("cli", GF_LOG_ERROR,
- "failed to serialize the data");
+ goto out;
+ }
+ ret = dict_get_str (dict, key, &get_buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get %s", key);
goto out;
}
+ cli_out ("%s" INDENT_MAIN_HEAD "%s", indent, "Status",
+ ":", get_buffer);
+out:
+ return ret;
+}
- ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
- GLUSTER_CLI_CLRLOCKS_VOLUME, NULL,
- this, gf_cli3_1_clearlocks_volume_cbk,
- (xdrproc_t)xdr_gf_cli_req);
+/* This function is used to print snap related information
+ * arg - 0, dict : Response dictionary.
+ * arg - 1, prefix_str : snaplist.snap{0..}.*
+ */
+int
+cli_get_volinfo_in_snap (dict_t *dict, char *keyprefix) {
+
+ char key[PATH_MAX] = "";
+ int i = 0;
+ int volcount = 0;
+ int ret = -1;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (keyprefix);
+
+ ret = snprintf (key, sizeof (key), "%s.vol-count", keyprefix);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, key, &volcount);
+ for (i = 1 ; i <= volcount ; i++) {
+ ret = snprintf (key, sizeof (key),
+ "%s.vol%d", keyprefix, i);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = cli_get_each_volinfo_in_snap (dict, key, _gf_true);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not list "
+ "details of volume in a snap");
+ goto out;
+ }
+ cli_out (" ");
+ }
+
+out:
+ return ret;
+}
+
+int
+cli_get_each_snap_info (dict_t *dict, char *prefix_str,
+ gf_boolean_t snap_driven) {
+ char key_buffer[PATH_MAX] = "";
+ char *get_buffer = NULL;
+ int ret = -1;
+ char indent[5] = "";
+
+ GF_ASSERT (dict);
+ GF_ASSERT (prefix_str);
+
+ if (!snap_driven)
+ strcat (indent, "\t");
+
+ ret = snprintf (key_buffer, sizeof (key_buffer), "%s.snapname",
+ prefix_str);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key_buffer, &get_buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to fetch snapname %s ",
+ key_buffer);
+ goto out;
+ }
+ cli_out ("%s" INDENT_MAIN_HEAD "%s", indent, "Snapshot",
+ ":", get_buffer);
+
+ ret = snprintf (key_buffer, sizeof (key_buffer), "%s.snap-id",
+ prefix_str);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key_buffer, &get_buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to fetch snap-id %s ",
+ key_buffer);
+ goto out;
+ }
+ cli_out ("%s" INDENT_MAIN_HEAD "%s", indent, "Snap UUID",
+ ":", get_buffer);
+
+ ret = snprintf (key_buffer, sizeof (key_buffer), "%s.snap-desc",
+ prefix_str);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key_buffer, &get_buffer);
+ if (!ret) {
+ /* Ignore error for description */
+ cli_out ("%s" INDENT_MAIN_HEAD "%s", indent,
+ "Description", ":", get_buffer);
+ }
+
+ ret = snprintf (key_buffer, sizeof (key_buffer), "%s.snap-time",
+ prefix_str);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key_buffer, &get_buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to fetch snap-time %s ",
+ prefix_str);
+ goto out;
+ }
+ cli_out ("%s" INDENT_MAIN_HEAD "%s", indent, "Created",
+ ":", get_buffer);
+
+ if (snap_driven) {
+ cli_out ("%-12s", "Snap Volumes:\n");
+ ret = cli_get_volinfo_in_snap (dict, prefix_str);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to list details "
+ "of the snaps");
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+/* This is a generic function to print snap related information.
+ * arg - 0, dict : Response Dictionary
+ */
+int
+cli_call_snapshot_info (dict_t *dict, gf_boolean_t bool_snap_driven) {
+ int snap_count = 0;
+ char key[PATH_MAX] = "";
+ int ret = -1;
+ int i = 0;
+
+ GF_ASSERT (dict);
+
+ ret = dict_get_int32 (dict, "snap-count", &snap_count);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to get snap-count");
+ goto out;
+ }
+
+ if (snap_count == 0) {
+ cli_out ("No snapshots present");
+ }
+
+ for (i = 1 ; i <= snap_count ; i++) {
+ ret = snprintf (key, sizeof (key), "snap%d", i);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = cli_get_each_snap_info (dict, key, bool_snap_driven);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Unable to print snap details");
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+int
+cli_get_snaps_in_volume (dict_t *dict) {
+ int ret = -1;
+ int i = 0;
+ int count = 0;
+ int avail = 0;
+ char key[PATH_MAX] = "";
+ char *get_buffer = NULL;
+
+ GF_ASSERT (dict);
+
+ ret = dict_get_str (dict, "origin-volname", &get_buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch origin-volname");
+ goto out;
+ }
+ cli_out (INDENT_MAIN_HEAD "%s", "Volume Name", ":", get_buffer);
+
+ ret = dict_get_int32 (dict, "snap-count", &avail);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch snap-count");
+ goto out;
+ }
+ cli_out (INDENT_MAIN_HEAD "%d", "Snaps Taken", ":", avail);
+
+ ret = dict_get_int32 (dict, "snaps-available", &count);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch snaps-available");
+ goto out;
+ }
+ cli_out (INDENT_MAIN_HEAD "%d", "Snaps Available", ":", count);
+
+ for (i = 1 ; i <= avail ; i++) {
+ snprintf (key, sizeof (key), "snap%d", i);
+ ret = cli_get_each_snap_info (dict, key, _gf_false);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Unable to print snap details");
+ goto out;
+ }
+
+ ret = snprintf (key, sizeof (key), "snap%d.vol1", i);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = cli_get_each_volinfo_in_snap (dict, key, _gf_false);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not get volume "
+ "related information");
+ goto out;
+ }
+
+ cli_out (" ");
+ }
+out:
+ return ret;
+}
+
+int
+cli_snapshot_list (dict_t *dict) {
+ int snapcount = 0;
+ char key[PATH_MAX] = "";
+ int ret = -1;
+ int i = 0;
+ char *get_buffer = NULL;
+
+ GF_ASSERT (dict);
+
+ ret = dict_get_int32 (dict, "snap-count", &snapcount);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch snap count");
+ goto out;
+ }
+
+ if (snapcount == 0) {
+ cli_out ("No snapshots present");
+ }
+
+ for (i = 1 ; i <= snapcount ; i++) {
+ ret = snprintf (key, sizeof (key), "snapname%d", i);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key, &get_buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not get %s ", key);
+ goto out;
+ } else {
+ cli_out ("%s", get_buffer);
+ }
+ }
+out:
+ return ret;
+}
+
+int
+cli_get_snap_volume_status (dict_t *dict, char *key_prefix)
+{
+ int ret = -1;
+ char key[PATH_MAX] = "";
+ char *buffer = NULL;
+ int brickcount = 0;
+ int i = 0;
+ int pid = 0;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (key_prefix);
+
+ ret = snprintf (key, sizeof (key), "%s.brickcount", key_prefix);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = dict_get_int32 (dict, key, &brickcount);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to fetch brickcount");
+ goto out;
+ }
+
+ for (i = 0 ; i < brickcount ; i++) {
+ ret = snprintf (key, sizeof (key), "%s.brick%d.path",
+ key_prefix, i);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key, &buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_INFO,
+ "Unable to get Brick Path");
+ continue;
+ }
+ cli_out ("\n\t%-17s %s %s", "Brick Path", ":", buffer);
+
+ ret = snprintf (key, sizeof (key), "%s.brick%d.vgname",
+ key_prefix, i);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key, &buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_INFO,
+ "Unable to get Volume Group");
+ cli_out ("\t%-17s %s %s", "Volume Group", ":",
+ "N/A");
+ } else
+ cli_out ("\t%-17s %s %s", "Volume Group", ":",
+ buffer);
+
+ ret = snprintf (key, sizeof (key), "%s.brick%d.status",
+ key_prefix, i);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key, &buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_INFO,
+ "Unable to get Brick Running");
+ cli_out ("\t%-17s %s %s", "Brick Running", ":",
+ "N/A");
+ } else
+ cli_out ("\t%-17s %s %s", "Brick Running", ":",
+ buffer);
+
+ ret = snprintf (key, sizeof (key), "%s.brick%d.pid",
+ key_prefix, i);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, key, &pid);
+ if (ret) {
+ gf_log ("cli", GF_LOG_INFO,
+ "Unable to get pid");
+ cli_out ("\t%-17s %s %s", "Brick PID", ":", "N/A");
+ } else
+ cli_out ("\t%-17s %s %d", "Brick PID", ":", pid);
+
+ ret = snprintf (key, sizeof (key), "%s.brick%d.data",
+ key_prefix, i);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key, &buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_INFO,
+ "Unable to get Data Percent");
+ cli_out ("\t%-17s %s %s", "Data Percentage", ":",
+ "N/A");
+ } else
+ cli_out ("\t%-17s %s %s", "Data Percentage", ":",
+ buffer);
+
+ ret = snprintf (key, sizeof (key), "%s.brick%d.lvsize",
+ key_prefix, i);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = dict_get_str (dict, key, &buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_INFO, "Unable to get LV Size");
+ cli_out ("\t%-17s %s %s", "LV Size", ":", "N/A");
+ } else
+ cli_out ("\t%-17s %s %s", "LV Size", ":", buffer);
+
+ }
+out:
+ return ret;
+}
+
+
+
+int
+cli_get_single_snap_status (dict_t *dict, char *keyprefix)
+{
+ int ret = -1;
+ char key[PATH_MAX] = "";
+ int i = 0;
+ int volcount = 0;
+ char *get_buffer = NULL;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (keyprefix);
+
+ ret = snprintf (key, sizeof (key), "%s.snapname", keyprefix);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key, &get_buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to get snapname");
+ goto out;
+ }
+ cli_out ("\nSnap Name : %s", get_buffer);
+
+ ret = snprintf (key, sizeof (key), "%s.uuid", keyprefix);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_str (dict, key, &get_buffer);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to get snap UUID");
+ goto out;
+ }
+ cli_out ("Snap UUID : %s", get_buffer);
+
+ ret = snprintf (key, sizeof (key), "%s.volcount", keyprefix);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, key, &volcount);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Unable to get volume count");
+ goto out;
+ }
+ for (i = 0 ; i < volcount ; i++) {
+ ret = snprintf (key, sizeof (key), "%s.vol%d", keyprefix, i);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = cli_get_snap_volume_status (dict, key);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Could not get snap volume status");
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+int
+cli_snap_status_all (dict_t *dict) {
+ int ret = -1;
+ char key[PATH_MAX] = "";
+ int snapcount = 0;
+ int i = 0;
+
+ GF_ASSERT (dict);
+
+ ret = dict_get_int32 (dict, "status.snapcount", &snapcount);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not get snapcount");
+ goto out;
+ }
+
+ if (snapcount == 0) {
+ cli_out ("No snapshots present");
+ }
+
+ for (i = 0 ; i < snapcount; i++) {
+ ret = snprintf (key, sizeof (key), "status.snap%d",i);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = cli_get_single_snap_status (dict, key);
+ }
+out:
+ return ret;
+}
+
+
+int
+cli_snapshot_status_display (dict_t *dict, gf_cli_rsp *rsp)
+{
+ char key[PATH_MAX] = "";
+ int ret = -1;
+ int status_cmd = -1;
+
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp);
+
+ if (rsp->op_ret) {
+ cli_err ("Snapshot Status : failed: %s",
+ rsp->op_errstr ? rsp->op_errstr :
+ "Please check log file for details");
+ ret = rsp->op_ret;
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "cmd", &status_cmd);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch status type");
+ goto out;
+ }
+ switch (status_cmd) {
+ case GF_SNAP_STATUS_TYPE_ALL:
+ {
+ ret = cli_snap_status_all (dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch "
+ "status of all snap");
+ goto out;
+ }
+ break;
+ }
+
+ case GF_SNAP_STATUS_TYPE_SNAP:
+ {
+ ret = snprintf (key, sizeof (key), "status.snap0");
+ if (ret < 0) {
+ goto out;
+ }
+ ret = cli_get_single_snap_status (dict, key);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch "
+ "status of snap");
+ goto out;
+ }
+ break;
+ }
+
+ case GF_SNAP_STATUS_TYPE_VOL:
+ {
+ ret = cli_snap_status_all (dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Could not fetch "
+ "status of snap in a volume");
+ goto out;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+out:
+ return ret;
+}
+
+int
+gf_cli_snapshot_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ int ret = -1;
+ gf_cli_rsp rsp = {0, };
+ dict_t *dict = NULL;
+ char *snap_name = NULL;
+ int32_t type = 0;
+ call_frame_t *frame = NULL;
+ gf_boolean_t snap_driven = _gf_false;
+
+ if (req->rpc_status == -1) {
+ ret = -1;
+ goto out;
+ }
+
+ frame = myframe;
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Failed to decode xdr response");
+ goto out;
+ }
+
+ dict = dict_new ();
+
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict);
+
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "type", &type);
+ if (ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR, "failed to get type");
+ goto out;
+ }
+
+ switch (type) {
+ case GF_SNAP_OPTION_TYPE_CREATE:
+ if (rsp.op_ret) {
+ cli_err("snapshot create: failed: %s",
+ rsp.op_errstr ? rsp.op_errstr :
+ "Please check log file for details");
+ ret = rsp.op_ret;
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &snap_name);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Failed to get snap name");
+ goto out;
+ }
+ cli_out ("snapshot create: %s: snap created successfully",
+ snap_name);
+ break;
+
+ case GF_SNAP_OPTION_TYPE_RESTORE:
+ /* TODO: Check if rsp.op_ret needs to be checked here. Or is
+ * it ok to check this in the start of the function where we
+ * get rsp.*/
+ if (rsp.op_ret) {
+ cli_err("snapshot restore: failed: %s",
+ rsp.op_errstr ? rsp.op_errstr :
+ "Please check log file for details");
+ ret = rsp.op_ret;
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &snap_name);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Failed to get snap name");
+ goto out;
+ }
+
+ cli_out ("Snapshot restore: %s: Snap restored "
+ "successfully", snap_name);
+
+ ret = 0;
+ break;
+
+ case GF_SNAP_OPTION_TYPE_INFO:
+ if (rsp.op_ret) {
+ cli_err ("Snapshot info : failed: %s",
+ rsp.op_errstr ? rsp.op_errstr :
+ "Please check log file for details");
+ ret = rsp.op_ret;
+ goto out;
+ }
+
+ snap_driven = dict_get_str_boolean (dict, "snap-driven",
+ _gf_false);
+ if (snap_driven == _gf_true) {
+ ret = cli_call_snapshot_info (dict, snap_driven);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Snapshot info failed");
+ goto out;
+ }
+ } else if (snap_driven == _gf_false) {
+ ret = cli_get_snaps_in_volume (dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Snapshot info failed");
+ goto out;
+ }
+ }
+ break;
+
+ case GF_SNAP_OPTION_TYPE_CONFIG:
+ ret = cli_snapshot_config_display (dict, &rsp);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to display "
+ "snapshot config output.");
+ goto out;
+ }
+ break;
+
+ case GF_SNAP_OPTION_TYPE_LIST:
+ if (rsp.op_ret) {
+ cli_err ("Snapshot list : failed: %s",
+ rsp.op_errstr ? rsp.op_errstr :
+ "Please check log file for details");
+ ret = rsp.op_ret;
+ goto out;
+ }
+
+ ret = cli_snapshot_list (dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to display "
+ "snapshot list");
+ goto out;
+ }
+ break;
+
+ case GF_SNAP_OPTION_TYPE_DELETE:
+ ret = cli_snapshot_remove_reply (&rsp, dict, frame);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Failed to delete snap");
+ goto out;
+ }
+ break;
+
+ case GF_SNAP_OPTION_TYPE_STATUS:
+ ret = cli_snapshot_status_display (dict, &rsp);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to display "
+ "snapshot status output.");
+ goto out;
+ }
+ break;
+
+ default:
+ cli_err ("Unknown command executed");
+ ret = -1;
+ goto out;
+ }
+out:
+ if (dict)
+ dict_unref (dict);
+ cli_cmd_broadcast_response (ret);
+
+ free (rsp.dict.dict_val);
+ free (rsp.op_errstr);
+
+ return ret;
+}
+
+int32_t
+gf_cli_snapshot (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf_cli_req req = {{0,}};
+ dict_t *options = NULL;
+ int ret = -1;
+
+ if (!frame || !this || !data)
+ goto out;
+
+ options = data;
+
+ ret = cli_to_glusterd (&req, frame, gf_cli_snapshot_cbk,
+ (xdrproc_t) xdr_gf_cli_req, options,
+ GLUSTER_CLI_SNAP, this, cli_rpc_prog,
+ NULL);
out:
- if (options)
- dict_destroy (options);
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
- if (req.dict.dict_val)
- GF_FREE (req.dict.dict_val);
+ GF_FREE (req.dict.dict_val);
+ return ret;
+}
+
+int
+cli_to_glusterd (gf_cli_req *req, call_frame_t *frame,
+ fop_cbk_fn_t cbkfn, xdrproc_t xdrproc, dict_t *dict,
+ int procnum, xlator_t *this, rpc_clnt_prog_t *prog,
+ struct iobref *iobref)
+{
+ int ret = 0;
+ size_t len = 0;
+ char *cmd = NULL;
+ int i = 0;
+ const char **words = NULL;
+ cli_local_t *local = NULL;
+
+ if (!this || !frame || !dict) {
+ ret = -1;
+ goto out;
+ }
+
+ if (!frame->local) {
+ ret = -1;
+ goto out;
+ }
+
+ local = frame->local;
+
+ if (!local->words) {
+ ret = -1;
+ goto out;
+ }
+
+ words = local->words;
+
+ while (words[i])
+ len += strlen (words[i++]) + 1;
+
+ cmd = GF_CALLOC (1, len, gf_common_mt_char);
+
+ if (!cmd) {
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 0; words[i]; i++) {
+ strncat (cmd, words[i], strlen (words[i]));
+ if (words[i+1] != NULL)
+ strncat (cmd, " ", strlen (" "));
+ }
+
+ cmd [len - 1] = '\0';
+
+ ret = dict_set_dynstr (dict, "cmd-str", cmd);
+ if (ret)
+ goto out;
+
+ ret = dict_allocate_and_serialize (dict, &(req->dict).dict_val,
+ &(req->dict).dict_len);
+
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = cli_cmd_submit (NULL, req, frame, prog, procnum, iobref, this,
+ cbkfn, (xdrproc_t) xdrproc);
+
+out:
return ret;
+
}
struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_NULL] = {"NULL", NULL },
- [GLUSTER_CLI_PROBE] = {"PROBE_QUERY", gf_cli3_1_probe},
- [GLUSTER_CLI_DEPROBE] = {"DEPROBE_QUERY", gf_cli3_1_deprobe},
- [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", gf_cli3_1_list_friends},
- [GLUSTER_CLI_CREATE_VOLUME] = {"CREATE_VOLUME", gf_cli3_1_create_volume},
- [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", gf_cli3_1_delete_volume},
- [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME", gf_cli3_1_start_volume},
- [GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", gf_cli3_1_stop_volume},
- [GLUSTER_CLI_RENAME_VOLUME] = {"RENAME_VOLUME", gf_cli3_1_rename_volume},
- [GLUSTER_CLI_DEFRAG_VOLUME] = {"DEFRAG_VOLUME", gf_cli3_1_defrag_volume},
- [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", gf_cli3_1_get_volume},
- [GLUSTER_CLI_GET_NEXT_VOLUME] = {"GET_NEXT_VOLUME", gf_cli3_1_get_next_volume},
- [GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", gf_cli3_1_set_volume},
- [GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", gf_cli3_1_add_brick},
- [GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", gf_cli3_1_remove_brick},
- [GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK", gf_cli3_1_replace_brick},
- [GLUSTER_CLI_LOG_ROTATE] = {"LOG ROTATE", gf_cli3_1_log_rotate},
- [GLUSTER_CLI_GETSPEC] = {"GETSPEC", gf_cli3_1_getspec},
- [GLUSTER_CLI_PMAP_PORTBYBRICK] = {"PMAP PORTBYBRICK", gf_cli3_1_pmap_b2p},
- [GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", gf_cli3_1_sync_volume},
- [GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", gf_cli3_1_reset_volume},
- [GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", gf_cli3_1_fsm_log},
- [GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", gf_cli3_1_gsync_set},
- [GLUSTER_CLI_PROFILE_VOLUME] = {"PROFILE_VOLUME", gf_cli3_1_profile_volume},
- [GLUSTER_CLI_QUOTA] = {"QUOTA", gf_cli3_1_quota},
- [GLUSTER_CLI_TOP_VOLUME] = {"TOP_VOLUME", gf_cli3_1_top_volume},
- [GLUSTER_CLI_GETWD] = {"GETWD", gf_cli3_1_getwd},
- [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", gf_cli3_1_status_volume},
+ [GLUSTER_CLI_PROBE] = {"PROBE_QUERY", gf_cli_probe},
+ [GLUSTER_CLI_DEPROBE] = {"DEPROBE_QUERY", gf_cli_deprobe},
+ [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", gf_cli_list_friends},
+ [GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", gf_cli3_1_uuid_reset},
+ [GLUSTER_CLI_UUID_GET] = {"UUID_GET", gf_cli3_1_uuid_get},
+ [GLUSTER_CLI_CREATE_VOLUME] = {"CREATE_VOLUME", gf_cli_create_volume},
+ [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", gf_cli_delete_volume},
+ [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME", gf_cli_start_volume},
+ [GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", gf_cli_stop_volume},
+ [GLUSTER_CLI_RENAME_VOLUME] = {"RENAME_VOLUME", gf_cli_rename_volume},
+ [GLUSTER_CLI_DEFRAG_VOLUME] = {"DEFRAG_VOLUME", gf_cli_defrag_volume},
+ [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", gf_cli_get_volume},
+ [GLUSTER_CLI_GET_NEXT_VOLUME] = {"GET_NEXT_VOLUME", gf_cli_get_next_volume},
+ [GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", gf_cli_set_volume},
+ [GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", gf_cli_add_brick},
+ [GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", gf_cli_remove_brick},
+ [GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK", gf_cli_replace_brick},
+ [GLUSTER_CLI_LOG_ROTATE] = {"LOG ROTATE", gf_cli_log_rotate},
+ [GLUSTER_CLI_GETSPEC] = {"GETSPEC", gf_cli_getspec},
+ [GLUSTER_CLI_PMAP_PORTBYBRICK] = {"PMAP PORTBYBRICK", gf_cli_pmap_b2p},
+ [GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", gf_cli_sync_volume},
+ [GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", gf_cli_reset_volume},
+ [GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", gf_cli_fsm_log},
+ [GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", gf_cli_gsync_set},
+ [GLUSTER_CLI_PROFILE_VOLUME] = {"PROFILE_VOLUME", gf_cli_profile_volume},
+ [GLUSTER_CLI_QUOTA] = {"QUOTA", gf_cli_quota},
+ [GLUSTER_CLI_TOP_VOLUME] = {"TOP_VOLUME", gf_cli_top_volume},
+ [GLUSTER_CLI_GETWD] = {"GETWD", gf_cli_getwd},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", gf_cli_status_volume},
[GLUSTER_CLI_STATUS_ALL] = {"STATUS_ALL", gf_cli_status_volume_all},
- [GLUSTER_CLI_MOUNT] = {"MOUNT", gf_cli3_1_mount},
- [GLUSTER_CLI_UMOUNT] = {"UMOUNT", gf_cli3_1_umount},
- [GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", gf_cli3_1_heal_volume},
- [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", gf_cli3_1_statedump_volume},
- [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", gf_cli3_1_list_volume},
- [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", gf_cli3_1_clearlocks_volume},
+ [GLUSTER_CLI_MOUNT] = {"MOUNT", gf_cli_mount},
+ [GLUSTER_CLI_UMOUNT] = {"UMOUNT", gf_cli_umount},
+ [GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", gf_cli_heal_volume},
+ [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", gf_cli_statedump_volume},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", gf_cli_list_volume},
+ [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", gf_cli_clearlocks_volume},
+ [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", gf_cli_copy_file},
+ [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", gf_cli_sys_exec},
+ [GLUSTER_CLI_SNAP] = {"SNAP", gf_cli_snapshot},
};
struct rpc_clnt_program cli_prog = {
@@ -5766,3 +8778,17 @@ struct rpc_clnt_program cli_prog = {
.numproc = GLUSTER_CLI_MAXVALUE,
.proctable = gluster_cli_actors,
};
+
+struct rpc_clnt_procedure cli_quotad_procs[GF_AGGREGATOR_MAXVALUE] = {
+ [GF_AGGREGATOR_NULL] = {"NULL", NULL},
+ [GF_AGGREGATOR_LOOKUP] = {"LOOKUP", NULL},
+ [GF_AGGREGATOR_GETLIMIT] = {"GETLIMIT", cli_quotad_getlimit},
+};
+
+struct rpc_clnt_program cli_quotad_clnt = {
+ .progname = "CLI Quotad client",
+ .prognum = GLUSTER_AGGREGATOR_PROGRAM,
+ .progver = GLUSTER_AGGREGATOR_VERSION,
+ .numproc = GF_AGGREGATOR_MAXVALUE,
+ .proctable = cli_quotad_procs,
+};