diff options
Diffstat (limited to 'cli/src/cli-cmd-volume.c')
| -rw-r--r-- | cli/src/cli-cmd-volume.c | 1007 |
1 files changed, 775 insertions, 232 deletions
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c index 6ab1515e3..53c94c687 100644 --- a/cli/src/cli-cmd-volume.c +++ b/cli/src/cli-cmd-volume.c @@ -1,22 +1,12 @@ /* - Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. -*/ + Copyright (c) 2010-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ #include <stdio.h> #include <string.h> #include <stdlib.h> @@ -38,10 +28,13 @@ #include "cli-mem-types.h" #include "cli1-xdr.h" #include "run.h" +#include "syscall.h" extern struct rpc_clnt *global_rpc; +extern struct rpc_clnt *global_quotad_rpc; extern rpc_clnt_prog_t *cli_rpc_prog; +extern rpc_clnt_prog_t cli_quotad_clnt; int cli_cmd_volume_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word, @@ -105,8 +98,7 @@ out: cli_out ("Getting Volume information failed!"); } - if (frame) - FRAME_DESTROY (frame); + CLI_STACK_DESTROY (frame); return ret; @@ -122,6 +114,12 @@ cli_cmd_sync_volume_cbk (struct cli_state *state, struct cli_cmd_word *word, int sent = 0; int parse_error = 0; dict_t *dict = NULL; + cli_local_t *local = NULL; + gf_answer_t answer = GF_ANSWER_NO; + const char *question = "Sync volume may make data " + "inaccessible while the sync " + "is in progress. Do you want " + "to continue?"; if ((wordcount < 3) || (wordcount > 4)) { cli_usage_out (word->pattern); @@ -156,12 +154,22 @@ cli_cmd_sync_volume_cbk (struct cli_state *state, struct cli_cmd_word *word, goto out; } + if (!(state->mode & GLUSTER_MODE_SCRIPT)) { + answer = cli_cmd_get_confirmation (state, question); + if (GF_ANSWER_NO == answer) { + ret = 0; + goto out; + } + } + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SYNC_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; + CLI_LOCAL_INIT (local, words, frame, dict); + if (proc->fn) { ret = proc->fn (frame, THIS, dict); } @@ -173,8 +181,8 @@ out: cli_out ("Volume sync failed"); } - if (dict) - dict_unref (dict); + CLI_STACK_DESTROY (frame); + return ret; } @@ -315,13 +323,11 @@ found_bad_brick_order: out: ai_list_tmp2 = NULL; i = 0; - if (brick_list_dup) - GF_FREE (brick_list_dup); + GF_FREE (brick_list_dup); list_for_each_entry (ai_list_tmp1, &ai_list->list, list) { if (ai_list_tmp1->info) freeaddrinfo (ai_list_tmp1->info); - if (ai_list_tmp2) - free (ai_list_tmp2); + free (ai_list_tmp2); ai_list_tmp2 = ai_list_tmp1; } free (ai_list_tmp2); @@ -342,7 +348,12 @@ cli_cmd_volume_create_cbk (struct cli_state *state, struct cli_cmd_word *word, int32_t brick_count = 0; int32_t sub_count = 0; int32_t type = GF_CLUSTER_TYPE_NONE; - + cli_local_t *local = NULL; + char *trans_type = NULL; + char *question = "RDMA transport is" + " recommended only for testing purposes" + " in this release. Do you want to continue?"; + gf_answer_t answer = GF_ANSWER_NO; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CREATE_VOLUME]; @@ -388,19 +399,47 @@ cli_cmd_volume_create_cbk (struct cli_state *state, struct cli_cmd_word *word, goto out; } } + + + ret = dict_get_str (options, "transport", &trans_type); + if (ret) { + gf_log("cli", GF_LOG_ERROR, "Unable to get transport type"); + goto out; + } + + if (strcasestr (trans_type, "rdma")) { + answer = + cli_cmd_get_confirmation (state, question); + if (GF_ANSWER_NO == answer) { + ret = 0; + goto out; + } + } + + if (state->mode & GLUSTER_MODE_WIGNORE) { + ret = dict_set_int32 (options, "force", _gf_true); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to set force " + "option"); + goto out; + } + } + + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: - if (options) - dict_unref (options); if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume create failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -417,6 +456,8 @@ cli_cmd_volume_delete_cbk (struct cli_state *state, struct cli_cmd_word *word, const char *question = NULL; int sent = 0; int parse_error = 0; + cli_local_t *local = NULL; + dict_t *dict = NULL; question = "Deleting volume will erase all information about the volume. " "Do you want to continue?"; @@ -426,6 +467,10 @@ cli_cmd_volume_delete_cbk (struct cli_state *state, struct cli_cmd_word *word, if (!frame) goto out; + dict = dict_new (); + if (!dict) + goto out; + if (wordcount != 3) { cli_usage_out (word->pattern); parse_error = 1; @@ -441,8 +486,17 @@ cli_cmd_volume_delete_cbk (struct cli_state *state, struct cli_cmd_word *word, volname = (char *)words[2]; + ret = dict_set_str (dict, "volname", volname); + + if (ret) { + gf_log (THIS->name, GF_LOG_WARNING, "dict set failed"); + goto out; + } + + CLI_LOCAL_INIT (local, words, frame, dict); + if (proc->fn) { - ret = proc->fn (frame, THIS, volname); + ret = proc->fn (frame, THIS, dict); } out: @@ -452,6 +506,8 @@ out: cli_out ("Volume delete failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -466,6 +522,7 @@ cli_cmd_volume_start_cbk (struct cli_state *state, struct cli_cmd_word *word, int parse_error = 0; dict_t *dict = NULL; int flags = 0; + cli_local_t *local = NULL; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -516,19 +573,21 @@ cli_cmd_volume_start_cbk (struct cli_state *state, struct cli_cmd_word *word, proc = &cli_rpc_prog->proctable[GLUSTER_CLI_START_VOLUME]; + CLI_LOCAL_INIT (local, words, frame, dict); + if (proc->fn) { ret = proc->fn (frame, THIS, dict); } out: - if (dict) - dict_unref (dict); if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume start failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -537,7 +596,7 @@ cli_cmd_get_confirmation (struct cli_state *state, const char *question) { char answer[5] = {'\0', }; char flush = '\0'; - int len = 0; + size_t len; if (state->mode & GLUSTER_MODE_SCRIPT) return GF_ANSWER_YES; @@ -551,7 +610,7 @@ cli_cmd_get_confirmation (struct cli_state *state, const char *question) len = strlen (answer); - if (answer [len - 1] == '\n'){ + if (len && answer [len - 1] == '\n'){ answer [--len] = '\0'; } else { do{ @@ -587,6 +646,7 @@ cli_cmd_volume_stop_cbk (struct cli_state *state, struct cli_cmd_word *word, int parse_error = 0; dict_t *dict = NULL; char *volname = NULL; + cli_local_t *local = NULL; const char *question = "Stopping volume will make its data inaccessible. " "Do you want to continue?"; @@ -636,6 +696,8 @@ cli_cmd_volume_stop_cbk (struct cli_state *state, struct cli_cmd_word *word, proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STOP_VOLUME]; + CLI_LOCAL_INIT (local, words, frame, dict); + if (proc->fn) { ret = proc->fn (frame, THIS, dict); } @@ -646,8 +708,9 @@ out: if ((sent == 0) && (parse_error == 0)) cli_out ("Volume stop on '%s' failed", volname); } - if (dict) - dict_unref (dict); + + CLI_STACK_DESTROY (frame); + return ret; } @@ -704,6 +767,8 @@ out: cli_out ("Volume rename on '%s' failed", (char *)words[2]); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -717,7 +782,7 @@ cli_cmd_volume_defrag_cbk (struct cli_state *state, struct cli_cmd_word *word, dict_t *dict = NULL; int sent = 0; int parse_error = 0; - int index = 0; + cli_local_t *local = NULL; #ifdef GF_SOLARIS_HOST_OS cli_out ("Command not supported on Solaris"); goto out; @@ -727,78 +792,30 @@ cli_cmd_volume_defrag_cbk (struct cli_state *state, struct cli_cmd_word *word, if (!frame) goto out; - dict = dict_new (); - if (!dict) - goto out; + ret = cli_cmd_volume_defrag_parse (words, wordcount, &dict); - if (!((wordcount == 4) || (wordcount == 5))) { + if (ret) { cli_usage_out (word->pattern); parse_error = 1; - goto out; - } - - if (wordcount == 4) { - index = 3; - } else { - if (strcmp (words[3], "fix-layout") && - strcmp (words[3], "start")) { - cli_usage_out (word->pattern); - parse_error = 1; - goto out; - } - index = 4; - } - - if (strcmp (words[index], "start") && strcmp (words[index], "stop") && - strcmp (words[index], "status") && strcmp (words[index], "force")) { - cli_usage_out (word->pattern); - parse_error = 1; - goto out; - } - - ret = dict_set_str (dict, "volname", (char *)words[2]); - if (ret) - goto out; - - if (wordcount == 4) { - ret = dict_set_str (dict, "command", (char *)words[3]); - if (ret) - goto out; - } - if (wordcount == 5) { - if ((strcmp (words[3], "fix-layout") || - strcmp (words[4], "start")) && - (strcmp (words[3], "start") || - strcmp (words[4], "force"))) { - cli_usage_out (word->pattern); - parse_error = 1; - goto out; - } - - ret = dict_set_str (dict, "option", (char *)words[4]); - if (ret) - goto out; - ret = dict_set_str (dict, "command", (char *)words[3]); - if (ret) - goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DEFRAG_VOLUME]; + CLI_LOCAL_INIT (local, words, frame, dict); + if (proc->fn) { ret = proc->fn (frame, THIS, dict); } out: - if (dict) - dict_destroy (dict); - if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume rebalance failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -808,11 +825,11 @@ cli_cmd_volume_reset_cbk (struct cli_state *state, struct cli_cmd_word *word, { int sent = 0; int parse_error = 0; - int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; + cli_local_t *local = NULL; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RESET_VOLUME]; @@ -821,27 +838,27 @@ cli_cmd_volume_reset_cbk (struct cli_state *state, struct cli_cmd_word *word, goto out; ret = cli_cmd_volume_reset_parse (words, wordcount, &options); - if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: - if (options) - dict_unref (options); - if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume reset failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -857,6 +874,7 @@ cli_cmd_volume_profile_cbk (struct cli_state *state, struct cli_cmd_word *word, rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; + cli_local_t *local = NULL; ret = cli_cmd_volume_profile_parse (words, wordcount, &options); @@ -872,20 +890,21 @@ cli_cmd_volume_profile_cbk (struct cli_state *state, struct cli_cmd_word *word, if (!frame) goto out; + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: - if (options) - dict_unref (options); - if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume profile failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -901,6 +920,8 @@ cli_cmd_volume_set_cbk (struct cli_state *state, struct cli_cmd_word *word, rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; + cli_local_t *local = NULL; + char *op_errstr = NULL; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SET_VOLUME]; @@ -908,28 +929,33 @@ cli_cmd_volume_set_cbk (struct cli_state *state, struct cli_cmd_word *word, if (!frame) goto out; - ret = cli_cmd_volume_set_parse (words, wordcount, &options); - + ret = cli_cmd_volume_set_parse (words, wordcount, &options, &op_errstr); if (ret) { - cli_usage_out (word->pattern); + if (op_errstr) { + cli_err ("%s", op_errstr); + GF_FREE (op_errstr); + } else + cli_usage_out (word->pattern); + parse_error = 1; goto out; } + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: - if (options) - dict_unref (options); - if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume set failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -945,35 +971,428 @@ cli_cmd_volume_add_brick_cbk (struct cli_state *state, dict_t *options = NULL; int sent = 0; int parse_error = 0; + gf_answer_t answer = GF_ANSWER_NO; + cli_local_t *local = NULL; + + const char *question = "Changing the 'stripe count' of the volume is " + "not a supported feature. In some cases it may result in data " + "loss on the volume. Also there may be issues with regular " + "filesystem operations on the volume after the change. Do you " + "really want to continue with 'stripe' count option ? "; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_add_brick_parse (words, wordcount, &options); - if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } + /* TODO: there are challenges in supporting changing of + stripe-count, until it is properly supported give warning to user */ + if (dict_get (options, "stripe-count")) { + answer = cli_cmd_get_confirmation (state, question); + + if (GF_ANSWER_NO == answer) { + ret = 0; + goto out; + } + } + + if (state->mode & GLUSTER_MODE_WIGNORE) { + ret = dict_set_int32 (options, "force", _gf_true); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to set force " + "option"); + goto out; + } + } + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_ADD_BRICK]; + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: - if (options) - dict_unref (options); - if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume add-brick failed"); } + CLI_STACK_DESTROY (frame); + + return ret; +} + +static int +gf_cli_create_auxiliary_mount (char *volname) +{ + int ret = -1; + char mountdir[PATH_MAX] = {0,}; + char pidfile_path[PATH_MAX] = {0,}; + char logfile[PATH_MAX] = {0,}; + + GLUSTERFS_GET_AUX_MOUNT_PIDFILE (pidfile_path, volname); + + if (gf_is_service_running (pidfile_path, NULL)) { + gf_log ("cli", GF_LOG_DEBUG, "Aux mount of volume %s is running" + " already", volname); + ret = 0; + goto out; + } + + GLUSTERD_GET_QUOTA_AUX_MOUNT_PATH (mountdir, volname, "/"); + ret = mkdir (mountdir, 0777); + if (ret && errno != EEXIST) { + gf_log ("cli", GF_LOG_ERROR, "Failed to create auxiliary mount " + "directory %s. Reason : %s", mountdir, + strerror (errno)); + goto out; + } + + snprintf (logfile, PATH_MAX-1, "%s/quota-mount-%s.log", + DEFAULT_LOG_FILE_DIRECTORY, volname); + + ret = runcmd (SBIN_DIR"/glusterfs", + "-s", "localhost", + "--volfile-id", volname, + "-l", logfile, + "-p", pidfile_path, + mountdir, + "--client-pid", "-42", NULL); + + if (ret) { + gf_log ("cli", GF_LOG_WARNING, "failed to mount glusterfs " + "client. Please check the log file %s for more details", + logfile); + ret = -1; + goto out; + } + + ret = 0; + +out: + return ret; +} + +static int +cli_stage_quota_op (char *volname, int op_code) +{ + int ret = -1; + + switch (op_code) { + case GF_QUOTA_OPTION_TYPE_ENABLE: + case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE: + case GF_QUOTA_OPTION_TYPE_REMOVE: + case GF_QUOTA_OPTION_TYPE_LIST: + ret = gf_cli_create_auxiliary_mount (volname); + if (ret) { + cli_err ("quota: Could not start quota " + "auxiliary mount"); + goto out; + } + ret = 0; + break; + + default: + ret = 0; + break; + } + +out: + return ret; +} + +static void +print_quota_list_header (void) +{ + //Header + cli_out (" Path Hard-limit " + "Soft-limit Used Available Soft-limit exceeded?" + " Hard-limit exceeded?"); + cli_out ("-----------------------------------------------------" + "-----------------------------------------------------" + "-----------------"); +} + +int +cli_get_soft_limit (dict_t *options, const char **words, dict_t *xdata) +{ + call_frame_t *frame = NULL; + cli_local_t *local = NULL; + rpc_clnt_procedure_t *proc = NULL; + char *default_sl = NULL; + char *default_sl_dup = NULL; + int ret = -1; + + frame = create_frame (THIS, THIS->ctx->pool); + if (!frame) { + ret = -1; + goto out; + } + + //We need a ref on @options to prevent CLI_STACK_DESTROY + //from destroying it prematurely. + dict_ref (options); + CLI_LOCAL_INIT (local, words, frame, options); + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_QUOTA]; + ret = proc->fn (frame, THIS, options); + + ret = dict_get_str (options, "default-soft-limit", &default_sl); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to get default soft limit"); + goto out; + } + + default_sl_dup = gf_strdup (default_sl); + if (!default_sl_dup) { + ret = -1; + goto out; + } + + ret = dict_set_dynstr (xdata, "default-soft-limit", default_sl_dup); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to set default soft limit"); + GF_FREE (default_sl_dup); + goto out; + } + +out: + CLI_STACK_DESTROY (frame); + return ret; +} + +#define QUOTA_CONF_HEADER \ + "GlusterFS Quota conf | version: v%d.%d\n" +int +cli_cmd_quota_conf_skip_header (int fd) +{ + char buf[PATH_MAX] = {0,}; + + snprintf (buf, sizeof(buf)-1, QUOTA_CONF_HEADER, 1, 1); + return gf_skip_header_section (fd, strlen (buf)); +} + +/* Checks if at least one limit has been set on the volume + * + * Returns true if at least one limit is set. Returns false otherwise. + */ +gf_boolean_t +_limits_set_on_volume (char *volname) { + gf_boolean_t limits_set = _gf_false; + int ret = -1; + char quota_conf_file[PATH_MAX] = {0,}; + int fd = -1; + char buf[16] = {0,}; + + /* TODO: fix hardcoding; Need to perform an RPC call to glusterd + * to fetch working directory + */ + sprintf (quota_conf_file, "/var/lib/glusterd/vols/%s/quota.conf", + volname); + fd = open (quota_conf_file, O_RDONLY); + if (fd == -1) + goto out; + + ret = cli_cmd_quota_conf_skip_header (fd); + if (ret) + goto out; + + /* Try to read atleast one gfid */ + ret = read (fd, (void *)buf, 16); + if (ret == 16) + limits_set = _gf_true; +out: + if (fd != -1) + close (fd); + return limits_set; +} + +/* Checks if the mount is connected to the bricks + * + * Returns true if connected and false if not + */ +gf_boolean_t +_quota_aux_mount_online (char *volname) +{ + int ret = 0; + char mount_path[PATH_MAX + 1] = {0,}; + struct stat buf = {0,}; + + GF_ASSERT (volname); + + /* Try to create the aux mount before checking if bricks are online */ + ret = gf_cli_create_auxiliary_mount (volname); + if (ret) { + cli_err ("quota: Could not start quota auxiliary mount"); + return _gf_false; + } + + GLUSTERD_GET_QUOTA_AUX_MOUNT_PATH (mount_path, volname, "/"); + + ret = sys_stat (mount_path, &buf); + if (ret) { + if (ENOTCONN == errno) { + cli_err ("quota: Cannot connect to bricks. Check if " + "bricks are online."); + } else { + cli_err ("quota: Error on quota auxiliary mount (%s).", + strerror (errno)); + } + return _gf_false; + } + return _gf_true; +} + +int +cli_cmd_quota_handle_list_all (const char **words, dict_t *options) +{ + int all_failed = 1; + int count = 0; + int ret = -1; + rpc_clnt_procedure_t *proc = NULL; + cli_local_t *local = NULL; + call_frame_t *frame = NULL; + dict_t *xdata = NULL; + char *gfid_str = NULL; + char *volname = NULL; + char *volname_dup = NULL; + unsigned char buf[16] = {0}; + int fd = -1; + char quota_conf_file[PATH_MAX] = {0}; + + xdata = dict_new (); + if (!xdata) { + ret = -1; + goto out; + } + + ret = dict_get_str (options, "volname", &volname); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to get volume name"); + goto out; + } + + ret = cli_get_soft_limit (options, words, xdata); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to fetch default " + "soft-limit"); + goto out; + } + + /* Check if at least one limit is set on volume. No need to check for + * quota enabled as cli_get_soft_limit() handles that + */ + if (!_limits_set_on_volume (volname)) { + cli_out ("quota: No quota configured on volume %s", volname); + ret = 0; + goto out; + } + + /* Check if the mount is online before doing any listing */ + if (!_quota_aux_mount_online (volname)) { + ret = -1; + goto out; + } + + frame = create_frame (THIS, THIS->ctx->pool); + if (!frame) { + ret = -1; + goto out; + } + + volname_dup = gf_strdup (volname); + if (!volname_dup) { + ret = -1; + goto out; + } + + ret = dict_set_dynstr (xdata, "volume-uuid", volname_dup); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to set volume-uuid"); + GF_FREE (volname_dup); + goto out; + } + + //TODO: fix hardcoding; Need to perform an RPC call to glusterd + //to fetch working directory + sprintf (quota_conf_file, "/var/lib/glusterd/vols/%s/quota.conf", + volname); + fd = open (quota_conf_file, O_RDONLY); + if (fd == -1) { + //This may because no limits were yet set on the volume + gf_log ("cli", GF_LOG_TRACE, "Unable to open " + "quota.conf"); + ret = 0; + goto out; + } + + ret = cli_cmd_quota_conf_skip_header (fd); + if (ret) { + goto out; + } + CLI_LOCAL_INIT (local, words, frame, xdata); + proc = &cli_quotad_clnt.proctable[GF_AGGREGATOR_GETLIMIT]; + + print_quota_list_header (); + gfid_str = GF_CALLOC (1, gf_common_mt_char, 64); + if (!gfid_str) { + ret = -1; + goto out; + } + for (count = 0;; count++) { + ret = read (fd, (void*) buf, 16); + if (ret <= 0) { + //Finished reading all entries in the conf file + break; + } + if (ret < 16) { + //This should never happen. We must have a multiple of + //entry_sz bytes in our configuration file. + gf_log (THIS->name, GF_LOG_CRITICAL, "Quota " + "configuration store may be corrupt."); + goto out; + } + uuid_utoa_r (buf, gfid_str); + ret = dict_set_str (xdata, "gfid", gfid_str); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to set gfid"); + goto out; + } + + ret = proc->fn (frame, THIS, xdata); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to get quota " + "limits for %s", uuid_utoa ((unsigned char*)buf)); + } + + dict_del (xdata, "gfid"); + all_failed = all_failed && ret; + } + + if (count > 0) { + ret = all_failed? -1: 0; + } else { + ret = 0; + } +out: + if (fd != -1) { + close (fd); + } + + GF_FREE (gfid_str); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Could not fetch and display quota" + " limits"); + } + CLI_STACK_DESTROY (frame); return ret; } @@ -989,45 +1408,79 @@ cli_cmd_quota_cbk (struct cli_state *state, struct cli_cmd_word *word, call_frame_t *frame = NULL; dict_t *options = NULL; gf_answer_t answer = GF_ANSWER_NO; + cli_local_t *local = NULL; + int sent = 0; + char *volname = NULL; const char *question = "Disabling quota will delete all the quota " "configuration. Do you want to continue?"; - proc = &cli_rpc_prog->proctable[GLUSTER_CLI_QUOTA]; - if (proc == NULL) { - ret = -1; + //parse **words into options dictionary + ret = cli_cmd_quota_parse (words, wordcount, &options); + if (ret < 0) { + cli_usage_out (word->pattern); + parse_err = 1; + goto out; + } + + ret = dict_get_int32 (options, "type", &type); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to get opcode"); + goto out; + } + + //handle quota-disable and quota-list-all different from others + switch (type) { + case GF_QUOTA_OPTION_TYPE_DISABLE: + answer = cli_cmd_get_confirmation (state, question); + if (answer == GF_ANSWER_NO) + goto out; + break; + case GF_QUOTA_OPTION_TYPE_LIST: + if (wordcount != 4) + break; + ret = cli_cmd_quota_handle_list_all (words, options); + goto out; + default: + break; + } + + ret = dict_get_str (options, "volname", &volname); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to get volume name"); goto out; } + //create auxiliary mount need for quota commands that operate on path + ret = cli_stage_quota_op (volname, type); + if (ret) + goto out; + frame = create_frame (THIS, THIS->ctx->pool); if (!frame) { ret = -1; goto out; } - ret = cli_cmd_quota_parse (words, wordcount, &options); - if (ret < 0) { - cli_usage_out (word->pattern); - parse_err = 1; + CLI_LOCAL_INIT (local, words, frame, options); + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_QUOTA]; + if (proc == NULL) { + ret = -1; goto out; - } else if (dict_get_int32 (options, "type", &type) == 0 && - type == GF_QUOTA_OPTION_TYPE_DISABLE) { - answer = cli_cmd_get_confirmation (state, question); - if (answer == GF_ANSWER_NO) - goto out; } if (proc->fn) ret = proc->fn (frame, THIS, options); out: - if (options) - dict_unref (options); - - if (ret && parse_err == 0) - cli_out ("Quota command failed"); + if (ret) { + cli_cmd_sent_status_get (&sent); + if (sent == 0 && parse_err == 0) + cli_out ("Quota command failed. Please check the cli " + "logs for more details"); + } + CLI_STACK_DESTROY (frame); return ret; - } int @@ -1043,6 +1496,7 @@ cli_cmd_volume_remove_brick_cbk (struct cli_state *state, int sent = 0; int parse_error = 0; int need_question = 0; + cli_local_t *local = NULL; const char *question = "Removing brick(s) can result in data loss. " "Do you want to Continue?"; @@ -1053,7 +1507,6 @@ cli_cmd_volume_remove_brick_cbk (struct cli_state *state, ret = cli_cmd_volume_remove_brick_parse (words, wordcount, &options, &need_question); - if (ret) { cli_usage_out (word->pattern); parse_error = 1; @@ -1071,6 +1524,8 @@ cli_cmd_volume_remove_brick_cbk (struct cli_state *state, proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REMOVE_BRICK]; + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { ret = proc->fn (frame, THIS, options); } @@ -1082,8 +1537,8 @@ out: cli_out ("Volume remove-brick failed"); } - if (options) - dict_unref (options); + CLI_STACK_DESTROY (frame); + return ret; } @@ -1100,6 +1555,12 @@ cli_cmd_volume_replace_brick_cbk (struct cli_state *state, dict_t *options = NULL; int sent = 0; int parse_error = 0; + cli_local_t *local = NULL; + int replace_op = 0; + char *q = "All replace-brick commands except " + "commit force are deprecated. " + "Do you want to continue?"; + gf_answer_t answer = GF_ANSWER_NO; #ifdef GF_SOLARIS_HOST_OS cli_out ("Command not supported on Solaris"); @@ -1119,20 +1580,39 @@ cli_cmd_volume_replace_brick_cbk (struct cli_state *state, goto out; } + ret = dict_get_int32 (options, "operation", &replace_op); + if (replace_op != GF_REPLACE_OP_COMMIT_FORCE) { + answer = cli_cmd_get_confirmation (state, q); + if (GF_ANSWER_NO == answer) { + ret = 0; + goto out; + } + } + + if (state->mode & GLUSTER_MODE_WIGNORE) { + ret = dict_set_int32 (options, "force", _gf_true); + if (ret) { + gf_log ("cli", GF_LOG_ERROR, "Failed to set force" + "option"); + goto out; + } + } + + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: - if (options) - dict_unref (options); - if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume replace-brick failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -1157,6 +1637,7 @@ cli_cmd_volume_top_cbk (struct cli_state *state, struct cli_cmd_word *word, dict_t *options = NULL; int sent = 0; int parse_error = 0; + cli_local_t *local = NULL; ret = cli_cmd_volume_top_parse (words, wordcount, &options); @@ -1172,20 +1653,21 @@ cli_cmd_volume_top_cbk (struct cli_state *state, struct cli_cmd_word *word, if (!frame) goto out; + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: - if (options) - dict_unref (options); - if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume top failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -1201,6 +1683,7 @@ cli_cmd_log_rotate_cbk (struct cli_state *state, struct cli_cmd_word *word, dict_t *options = NULL; int sent = 0; int parse_error = 0; + cli_local_t *local = NULL; if (!((wordcount == 4) || (wordcount == 5))) { cli_usage_out (word->pattern); @@ -1208,6 +1691,13 @@ cli_cmd_log_rotate_cbk (struct cli_state *state, struct cli_cmd_word *word, goto out; } + if (!((strcmp ("rotate", words[2]) == 0) || + (strcmp ("rotate", words[3]) == 0))) { + cli_usage_out (word->pattern); + parse_error = 1; + goto out; + } + proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LOG_ROTATE]; frame = create_frame (THIS, THIS->ctx->pool); @@ -1218,19 +1708,19 @@ cli_cmd_log_rotate_cbk (struct cli_state *state, struct cli_cmd_word *word, if (ret) goto out; + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: - if (options) - dict_destroy (options); - if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume log rotate failed"); } + CLI_STACK_DESTROY (frame); return ret; } @@ -1304,6 +1794,7 @@ cli_cmd_volume_gsync_set_cbk (struct cli_state *state, struct cli_cmd_word *word dict_t *options = NULL; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; + cli_local_t *local = NULL; proc = &cli_rpc_prog->proctable [GLUSTER_CLI_GSYNC_SET]; if (proc == NULL) { @@ -1324,16 +1815,17 @@ cli_cmd_volume_gsync_set_cbk (struct cli_state *state, struct cli_cmd_word *word goto out; } + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) ret = proc->fn (frame, THIS, options); out: - if (options) - dict_unref (options); - if (ret && parse_err == 0) cli_out (GEOREP" command failed"); + CLI_STACK_DESTROY (frame); + return ret; } @@ -1347,6 +1839,7 @@ cli_cmd_volume_status_cbk (struct cli_state *state, call_frame_t *frame = NULL; dict_t *dict = NULL; uint32_t cmd = 0; + cli_local_t *local = NULL; ret = cli_cmd_volume_status_parse (words, wordcount, &dict); @@ -1374,11 +1867,13 @@ cli_cmd_volume_status_cbk (struct cli_state *state, if (!frame) goto out; + CLI_LOCAL_INIT (local, words, frame, dict); + ret = proc->fn (frame, THIS, dict); - out: - if (dict) - dict_unref (dict); +out: + CLI_STACK_DESTROY (frame); + return ret; } @@ -1386,16 +1881,14 @@ cli_cmd_volume_status_cbk (struct cli_state *state, int cli_get_detail_status (dict_t *dict, int i, cli_volume_status_t *status) { - uint64_t free = -1; - uint64_t total = -1; + uint64_t free = 0; + uint64_t total = 0; char key[1024] = {0}; int ret = 0; memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.free", i); ret = dict_get_uint64 (dict, key, &free); - if (ret) - goto out; status->free = gf_uint64_2human_readable (free); if (!status->free) @@ -1404,70 +1897,65 @@ cli_get_detail_status (dict_t *dict, int i, cli_volume_status_t *status) memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.total", i); ret = dict_get_uint64 (dict, key, &total); - if (ret) - goto out; status->total = gf_uint64_2human_readable (total); if (!status->total) goto out; +#ifdef GF_LINUX_HOST_OS memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.device", i); ret = dict_get_str (dict, key, &(status->device)); if (ret) - goto out; + status->device = NULL; +#endif memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.block_size", i); ret = dict_get_uint64 (dict, key, &(status->block_size)); - if (ret) - goto out; + if (ret) { + ret = 0; + status->block_size = 0; + } #ifdef GF_LINUX_HOST_OS memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.mnt_options", i); ret = dict_get_str (dict, key, &(status->mount_options)); if (ret) - goto out; + status->mount_options = NULL; memset (key, 0, sizeof (key)); snprintf (key, sizeof (key), "brick%d.fs_name", i); ret = dict_get_str (dict, key, &(status->fs_name)); - if (ret) - goto out; -#endif - - if (IS_EXT_FS(status->fs_name) || - !strcmp (status->fs_name, "xfs")) { - -#ifdef GF_LINUX_HOST_OS - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.inode_size", i); - ret = dict_get_str (dict, key, &(status->inode_size)); - if (ret) - status->inode_size = NULL; -#endif - - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.total_inodes", i); - ret = dict_get_uint64 (dict, key, - &(status->total_inodes)); - if (ret) - goto out; + if (ret) { + ret = 0; + status->fs_name = NULL; + } - memset (key, 0, sizeof (key)); - snprintf (key, sizeof (key), "brick%d.free_inodes", i); - ret = dict_get_uint64 (dict, key, &(status->free_inodes)); - if (ret) - goto out; - } else { -#ifdef GF_LINUX_HOST_OS + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "brick%d.inode_size", i); + ret = dict_get_str (dict, key, &(status->inode_size)); + if (ret) status->inode_size = NULL; -#endif +#endif /* GF_LINUX_HOST_OS */ + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "brick%d.total_inodes", i); + ret = dict_get_uint64 (dict, key, + &(status->total_inodes)); + if (ret) status->total_inodes = 0; + + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "brick%d.free_inodes", i); + ret = dict_get_uint64 (dict, key, &(status->free_inodes)); + if (ret) { + ret = 0; status->free_inodes = 0; } + out: return ret; } @@ -1476,13 +1964,23 @@ void cli_print_detailed_status (cli_volume_status_t *status) { cli_out ("%-20s : %-20s", "Brick", status->brick); - cli_out ("%-20s : %-20d", "Port", status->port); + if (status->online) + cli_out ("%-20s : %-20d", "Port", status->port); + else + cli_out ("%-20s : %-20s", "Port", "N/A"); cli_out ("%-20s : %-20c", "Online", (status->online) ? 'Y' : 'N'); cli_out ("%-20s : %-20s", "Pid", status->pid_str); #ifdef GF_LINUX_HOST_OS - cli_out ("%-20s : %-20s", "File System", status->fs_name); - cli_out ("%-20s : %-20s", "Device", status->device); + if (status->fs_name) + cli_out ("%-20s : %-20s", "File System", status->fs_name); + else + cli_out ("%-20s : %-20s", "File System", "N/A"); + + if (status->device) + cli_out ("%-20s : %-20s", "Device", status->device); + else + cli_out ("%-20s : %-20s", "Device", "N/A"); if (status->mount_options) { cli_out ("%-20s : %-20s", "Mount Options", @@ -1498,18 +1996,26 @@ cli_print_detailed_status (cli_volume_status_t *status) cli_out ("%-20s : %-20s", "Inode Size", "N/A"); } #endif - cli_out ("%-20s : %-20s", "Disk Space Free", status->free); - cli_out ("%-20s : %-20s", "Total Disk Space", status->total); + if (status->free) + cli_out ("%-20s : %-20s", "Disk Space Free", status->free); + else + cli_out ("%-20s : %-20s", "Disk Space Free", "N/A"); + + if (status->total) + cli_out ("%-20s : %-20s", "Total Disk Space", status->total); + else + cli_out ("%-20s : %-20s", "Total Disk Space", "N/A"); + if (status->total_inodes) { - cli_out ("%-20s : %-20ld", "Inode Count", + cli_out ("%-20s : %-20"GF_PRI_INODE, "Inode Count", status->total_inodes); } else { cli_out ("%-20s : %-20s", "Inode Count", "N/A"); } if (status->free_inodes) { - cli_out ("%-20s : %-20ld", "Free Inodes", + cli_out ("%-20s : %-20"GF_PRI_INODE, "Free Inodes", status->free_inodes); } else { cli_out ("%-20s : %-20s", "Free Inodes", "N/A"); @@ -1520,30 +2026,38 @@ int cli_print_brick_status (cli_volume_status_t *status) { int fieldlen = CLI_VOL_STATUS_BRICK_LEN; - char buf[80] = {0,}; int bricklen = 0; - int i = 0; char *p = NULL; int num_tabs = 0; - bricklen = strlen (status->brick); p = status->brick; + bricklen = strlen (p); while (bricklen > 0) { if (bricklen > fieldlen) { - i++; - strncpy (buf, p, fieldlen); - buf[strlen(buf) + 1] = '\0'; - cli_out ("%s", buf); - p = status->brick + i * fieldlen; + cli_out ("%.*s", fieldlen, p); + p += fieldlen; bricklen -= fieldlen; } else { num_tabs = (fieldlen - bricklen) / CLI_TAB_LENGTH + 1; printf ("%s", p); while (num_tabs-- != 0) printf ("\t"); - cli_out ("%d\t%c\t%s", - status->port, status->online?'Y':'N', - status->pid_str); + if (status->port) { + if (status->online) + cli_out ("%d\t%c\t%s", + status->port, + status->online?'Y':'N', + status->pid_str); + else + cli_out ("%s\t%c\t%s", + "N/A", + status->online?'Y':'N', + status->pid_str); + } + else + cli_out ("%s\t%c\t%s", + "N/A", status->online?'Y':'N', + status->pid_str); bricklen = 0; } } @@ -1560,32 +2074,34 @@ cli_cmd_volume_heal_cbk (struct cli_state *state, struct cli_cmd_word *word, call_frame_t *frame = NULL; int sent = 0; int parse_error = 0; - dict_t *dict = NULL; + dict_t *options = NULL; + xlator_t *this = NULL; + cli_local_t *local = NULL; - frame = create_frame (THIS, THIS->ctx->pool); + this = THIS; + frame = create_frame (this, this->ctx->pool); if (!frame) goto out; - if (wordcount != 3) { + if (wordcount < 3) { cli_usage_out (word->pattern); - parse_error = 1; + parse_error = 1; goto out; } - dict = dict_new (); - if (!dict) - goto out; - - ret = dict_set_str (dict, "volname", (char *) words[2]); + ret = cli_cmd_volume_heal_options_parse (words, wordcount, &options); if (ret) { - gf_log (THIS->name, GF_LOG_ERROR, "failed to set volname"); + cli_usage_out (word->pattern); + parse_error = 1; goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_HEAL_VOLUME]; + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { - ret = proc->fn (frame, THIS, dict); + ret = proc->fn (frame, THIS, options); } out: @@ -1595,8 +2111,7 @@ out: cli_out ("Volume heal failed"); } - if (dict) - dict_unref (dict); + CLI_STACK_DESTROY (frame); return ret; } @@ -1611,6 +2126,7 @@ cli_cmd_volume_statedump_cbk (struct cli_state *state, struct cli_cmd_word *word dict_t *options = NULL; int sent = 0; int parse_error = 0; + cli_local_t *local = NULL; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -1639,6 +2155,9 @@ cli_cmd_volume_statedump_cbk (struct cli_state *state, struct cli_cmd_word *word goto out; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STATEDUMP_VOLUME]; + + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { ret = proc->fn (frame, THIS, options); } @@ -1650,6 +2169,8 @@ out: cli_out ("Volume statedump failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -1678,6 +2199,8 @@ out: cli_out ("Volume list failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -1692,6 +2215,7 @@ cli_cmd_volume_clearlocks_cbk (struct cli_state *state, dict_t *options = NULL; int sent = 0; int parse_error = 0; + cli_local_t *local = NULL; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) @@ -1721,6 +2245,9 @@ cli_cmd_volume_clearlocks_cbk (struct cli_state *state, goto out; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CLRLOCKS_VOLUME]; + + CLI_LOCAL_INIT (local, words, frame, options); + if (proc->fn) { ret = proc->fn (frame, THIS, options); } @@ -1732,6 +2259,8 @@ out: cli_out ("Volume clear-locks failed"); } + CLI_STACK_DESTROY (frame); + return ret; } @@ -1740,7 +2269,13 @@ struct cli_cmd volume_cmds[] = { cli_cmd_volume_info_cbk, "list information of all volumes"}, - { "volume create <NEW-VOLNAME> [stripe <COUNT>] [replica <COUNT>] [transport <tcp|rdma|tcp,rdma>] <NEW-BRICK> ...", + { "volume create <NEW-VOLNAME> [stripe <COUNT>] [replica <COUNT>] " + "[transport <tcp|rdma|tcp,rdma>] <NEW-BRICK>" +#ifdef HAVE_BD_XLATOR + "?<vg_name>" +#endif + "... [force]", + cli_cmd_volume_create_cbk, "create a new volume of specified type with mentioned bricks"}, @@ -1760,19 +2295,20 @@ struct cli_cmd volume_cmds[] = { cli_cmd_volume_rename_cbk, "rename volume <VOLNAME> to <NEW-VOLNAME>"},*/ - { "volume add-brick <VOLNAME> [<stripe|replica> <COUNT>] <NEW-BRICK> ...", + { "volume add-brick <VOLNAME> [<stripe|replica> <COUNT>] <NEW-BRICK> ... [force]", cli_cmd_volume_add_brick_cbk, "add brick to volume <VOLNAME>"}, - { "volume remove-brick <VOLNAME> [replica <COUNT>] <BRICK> ... {start|pause|abort|status|commit|force}", + { "volume remove-brick <VOLNAME> [replica <COUNT>] <BRICK> ..." + " <start|stop|status|commit|force>", cli_cmd_volume_remove_brick_cbk, "remove brick from volume <VOLNAME>"}, - { "volume rebalance <VOLNAME> [fix-layout] {start|stop|status} [force]", + { "volume rebalance <VOLNAME> {{fix-layout start} | {start [force]|stop|status}}", cli_cmd_volume_defrag_cbk, "rebalance operations"}, - { "volume replace-brick <VOLNAME> <BRICK> <NEW-BRICK> {start|pause|abort|status|commit [force]}", + { "volume replace-brick <VOLNAME> <BRICK> <NEW-BRICK> {start [force]|pause|abort|status|commit [force]}", cli_cmd_volume_replace_brick_cbk, "replace-brick operations"}, @@ -1788,10 +2324,15 @@ struct cli_cmd volume_cmds[] = { cli_cmd_volume_help_cbk, "display help for the volume command"}, - { "volume log rotate <VOLNAME> [BRICK]", + { "volume log <VOLNAME> rotate [BRICK]", cli_cmd_log_rotate_cbk, "rotate the log file for corresponding volume/brick"}, + { "volume log rotate <VOLNAME> [BRICK]", + cli_cmd_log_rotate_cbk, + "rotate the log file for corresponding volume/brick" + " NOTE: This is an old syntax, will be deprecated from next release."}, + { "volume sync <HOSTNAME> [all|<VOLNAME>]", cli_cmd_sync_volume_cbk, "sync the volume information from a peer"}, @@ -1801,37 +2342,39 @@ struct cli_cmd volume_cmds[] = { "reset all the reconfigured options"}, #if (SYNCDAEMON_COMPILE) - {"volume "GEOREP" [<VOLNAME>] [<SLAVE-URL>] {start|stop|config|status|log-rotate} [options...]", + {"volume "GEOREP" [<VOLNAME>] [<SLAVE-URL>] {create [push-pem] [force]" + "|start [force]|stop [force]|config|status [detail]|delete} [options...]", cli_cmd_volume_gsync_set_cbk, "Geo-sync operations", cli_cmd_check_gsync_exists_cbk}, #endif - { "volume profile <VOLNAME> {start|info|stop}", + { "volume profile <VOLNAME> {start|info [peek|incremental [peek]|cumulative|clear]|stop} [nfs]", cli_cmd_volume_profile_cbk, "volume profile operations"}, - { "volume quota <VOLNAME> <enable|disable|limit-usage|list|remove> [path] [value]", + { "volume quota <VOLNAME> {enable|disable|list [<path> ...]|remove <path>| default-soft-limit <percent>} |\n" + "volume quota <VOLNAME> {limit-usage <path> <size> [<percent>]} |\n" + "volume quota <VOLNAME> {alert-time|soft-timeout|hard-timeout} {<time>}", cli_cmd_quota_cbk, "quota translator specific operations"}, - { "volume top <VOLNAME> {[open|read|write|opendir|readdir] " - "|[read-perf|write-perf bs <size> count <count>]} " - " [brick <brick>] [list-cnt <count>]", + { "volume top <VOLNAME> {open|read|write|opendir|readdir|clear} [nfs|brick <brick>] [list-cnt <value>] |\n" + "volume top <VOLNAME> {read-perf|write-perf} [bs <size> count <count>] [brick <brick>] [list-cnt <value>]", cli_cmd_volume_top_cbk, "volume top operations"}, - { "volume status [all | <VOLNAME> [<BRICK>]]" - " [detail|clients|mem|inode|fd|callpool]", + { "volume status [all | <VOLNAME> [nfs|shd|<BRICK>|quotad]]" + " [detail|clients|mem|inode|fd|callpool|tasks]", cli_cmd_volume_status_cbk, "display status of all or specified volume(s)/brick"}, - { "volume heal <VOLNAME>", + { "volume heal <VOLNAME> [{full | statistics {heal-count {replica <hostname:brickname>}} |info {healed | heal-failed | split-brain}}]", cli_cmd_volume_heal_cbk, - "Start healing of volume specified by <VOLNAME>"}, + "self-heal commands on volume specified by <VOLNAME>"}, - {"volume statedump <VOLNAME> [nfs] [all|mem|iobuf|callpool|priv|fd|" - "inode]...", + {"volume statedump <VOLNAME> [nfs|quotad] [all|mem|iobuf|callpool|priv|fd|" + "inode|history]...", cli_cmd_volume_statedump_cbk, "perform statedump on bricks"}, |
