summaryrefslogtreecommitdiffstats
path: root/cli
diff options
context:
space:
mode:
authorRaghavendra Bhat <raghavendra@redhat.com>2012-08-23 15:32:33 +0530
committerAnand Avati <avati@redhat.com>2012-11-28 16:28:55 -0800
commitfbfcb0ad2aac73c2b5ab8950770c1184352bbf24 (patch)
treed23af3c8a539da97e8f146ab19e4996cad6419c5 /cli
parentfadc34e7ce82f9e7f98f20e995cb2bbf71a00b20 (diff)
glusterd, cli: implement gluster system uuid reset command
A commonly faced problem among glusterfs users is: after a fresh installation of glusterfs in a virtual machine, the VM image is cloned to make multiple instances of the server. This breaks glusterd because right after glusterfs installation on the first boot glusterd would have created the node UUID and this gets inherited into the clone. The result is wierd behavior at the time of peer probe where glusterd does not (yet) deal with UUID collisions in a user friendly way. To handle it gluster peer reset command is implemented which upon execution changes the uuid of local glusterd. Change-Id: If207dd2ad93ab94ef1a3253f409c21c442975f87 BUG: 811493 Signed-off-by: Raghavendra Bhat <raghavendra@redhat.com> Reviewed-on: http://review.gluster.org/3637 Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'cli')
-rw-r--r--cli/src/cli-cmd-system.c65
-rw-r--r--cli/src/cli-cmd-volume.c13
-rw-r--r--cli/src/cli-cmd.h13
-rw-r--r--cli/src/cli-rpc-ops.c77
4 files changed, 155 insertions, 13 deletions
diff --git a/cli/src/cli-cmd-system.c b/cli/src/cli-cmd-system.c
index 255eb605e64..b969c227b31 100644
--- a/cli/src/cli-cmd-system.c
+++ b/cli/src/cli-cmd-system.c
@@ -278,6 +278,67 @@ cli_cmd_umount_cbk (struct cli_state *state, struct cli_cmd_word *word,
return ret;
}
+int
+cli_cmd_uuid_reset_cbk (struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ int sent = 0;
+ int parse_error = 0;
+ gf_answer_t answer = GF_ANSWER_NO;
+ char *question = NULL;
+ cli_local_t *local = NULL;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+
+ question = "Resetting uuid changes the uuid of local glusterd. "
+ "Do you want to continue?";
+
+ if (wordcount != 3) {
+ cli_usage_out (word->pattern);
+ parse_error = 1;
+ goto out;
+ }
+
+ proc = &cli_rpc_prog->proctable[GLUSTER_CLI_UUID_RESET];
+
+ this = THIS;
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame)
+ goto out;
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+ CLI_LOCAL_INIT (local, words, frame, dict);
+ answer = cli_cmd_get_confirmation (state, question);
+
+ if (GF_ANSWER_NO == answer) {
+ ret = 0;
+ goto out;
+ }
+
+ //send NULL as argument since no dictionary is sent to glusterd
+ if (proc->fn) {
+ ret = proc->fn (frame, this, dict);
+ }
+
+out:
+ if (ret) {
+ cli_cmd_sent_status_get (&sent);
+ if ((sent == 0) && (parse_error == 0))
+ cli_out ("uuid reset failed");
+ }
+
+ CLI_STACK_DESTROY (frame);
+
+ return ret;
+}
+
struct cli_cmd cli_system_cmds[] = {
{ "system:: getspec <VOLID>",
cli_cmd_getspec_cbk,
@@ -303,6 +364,10 @@ struct cli_cmd cli_system_cmds[] = {
cli_cmd_umount_cbk,
"request an umount"},
+ { "system:: uuid reset",
+ cli_cmd_uuid_reset_cbk,
+ "reset the uuid of glusterd"},
+
{ "system:: help",
cli_cmd_system_help_cbk,
"display help for system commands"},
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
index f523579a6b2..5b02ef5d9fe 100644
--- a/cli/src/cli-cmd-volume.c
+++ b/cli/src/cli-cmd-volume.c
@@ -29,19 +29,6 @@
#include "cli1-xdr.h"
#include "run.h"
-#define CLI_LOCAL_INIT(local, words, frame, dictionary) \
- do { \
- local = cli_local_get (); \
- \
- if (local) { \
- local->words = words; \
- if (dictionary) \
- local->dict = dictionary; \
- if (frame) \
- frame->local = local; \
- } \
- } while (0)
-
extern struct rpc_clnt *global_rpc;
extern rpc_clnt_prog_t *cli_rpc_prog;
diff --git a/cli/src/cli-cmd.h b/cli/src/cli-cmd.h
index 0ec3167742f..a30a6cfe418 100644
--- a/cli/src/cli-cmd.h
+++ b/cli/src/cli-cmd.h
@@ -20,6 +20,19 @@
#include "cli.h"
#include "list.h"
+#define CLI_LOCAL_INIT(local, words, frame, dictionary) \
+ do { \
+ local = cli_local_get (); \
+ \
+ if (local) { \
+ local->words = words; \
+ if (dictionary) \
+ local->dict = dictionary; \
+ if (frame) \
+ frame->local = local; \
+ } \
+ } while (0)
+
#define CLI_STACK_DESTROY(_frame) \
do { \
if (_frame) { \
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index 10ff6e7a90e..703289b5b54 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -892,6 +892,60 @@ out:
}
int
+gf_cli3_1_uuid_reset_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gf_cli_rsp rsp = {0,};
+ int ret = -1;
+ cli_local_t *local = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *dict = NULL;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+ frame = myframe;
+ local = frame->local;
+ frame->local = NULL;
+
+ gf_log ("cli", GF_LOG_INFO, "Received resp to uuid reset");
+
+ if (global_state->mode & GLUSTER_MODE_XML) {
+ ret = cli_xml_output_dict ("uuidReset", dict, rsp.op_ret,
+ rsp.op_errno, rsp.op_errstr);
+ if (ret)
+ gf_log ("cli", GF_LOG_ERROR,
+ "Error outputting to xml");
+ goto out;
+ }
+
+ if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
+ cli_err ("%s", rsp.op_errstr);
+ else
+ cli_out ("resetting the peer uuid has been %s",
+ (rsp.op_ret) ? "unsuccessful": "successful");
+ ret = rsp.op_ret;
+
+out:
+ cli_cmd_broadcast_response (ret);
+ cli_local_wipe (local);
+ if (rsp.dict.dict_val)
+ free (rsp.dict.dict_val);
+ if (dict)
+ dict_unref (dict);
+
+ gf_log ("", GF_LOG_INFO, "Returning with %d", ret);
+ return ret;
+}
+
+int
gf_cli_start_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
@@ -2509,6 +2563,28 @@ out:
return ret;
}
+int32_t
+gf_cli3_1_uuid_reset (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gf_cli_req req = {{0,}};
+ int ret = 0;
+ dict_t *dict = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ dict = data;
+ ret = cli_to_glusterd (&req, frame, gf_cli3_1_uuid_reset_cbk,
+ (xdrproc_t)xdr_gf_cli_req, dict,
+ GLUSTER_CLI_UUID_RESET, this, cli_rpc_prog,
+ NULL);
+out:
+ gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
int32_t
gf_cli_create_volume (call_frame_t *frame, xlator_t *this,
@@ -6104,6 +6180,7 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_PROBE] = {"PROBE_QUERY", gf_cli_probe},
[GLUSTER_CLI_DEPROBE] = {"DEPROBE_QUERY", gf_cli_deprobe},
[GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", gf_cli_list_friends},
+ [GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", gf_cli3_1_uuid_reset},
[GLUSTER_CLI_CREATE_VOLUME] = {"CREATE_VOLUME", gf_cli_create_volume},
[GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", gf_cli_delete_volume},
[GLUSTER_CLI_START_VOLUME] = {"START_VOLUME", gf_cli_start_volume},