diff options
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 4803 |
1 files changed, 2813 insertions, 1990 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index b35427cbf..71d076624 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -1,22 +1,12 @@ /* - Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com> - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. -*/ + Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ #ifndef _CONFIG_H #define _CONFIG_H #include "config.h" @@ -36,17 +26,21 @@ #include "compat.h" #include "compat-errno.h" #include "statedump.h" +#include "run.h" #include "glusterd-mem-types.h" #include "glusterd.h" #include "glusterd-sm.h" #include "glusterd-op-sm.h" #include "glusterd-utils.h" #include "glusterd-store.h" +#include "glusterd-locks.h" -#include "glusterd1.h" -#include "cli1.h" -#include "rpc-clnt.h" #include "glusterd1-xdr.h" +#include "cli1-xdr.h" +#include "xdr-generic.h" +#include "rpc-clnt.h" +#include "glusterd-volgen.h" +#include "glusterd-mountbroker.h" #include <sys/resource.h> #include <inttypes.h> @@ -54,143 +48,67 @@ #include "defaults.c" #include "common-utils.h" -static int -glusterd_friend_find_by_hostname (const char *hoststr, - glusterd_peerinfo_t **peerinfo) -{ - int ret = -1; - glusterd_conf_t *priv = NULL; - glusterd_peerinfo_t *entry = NULL; - glusterd_peer_hostname_t *name = NULL; - struct addrinfo *addr, *p; - char *host = NULL; - struct sockaddr_in6 *s6 = NULL; - struct sockaddr_in *s4 = NULL; - struct in_addr *in_addr = NULL; - char hname[1024] = {0,}; - - GF_ASSERT (hoststr); - GF_ASSERT (peerinfo); - - *peerinfo = NULL; - priv = THIS->private; - - GF_ASSERT (priv); - - list_for_each_entry (entry, &priv->peers, uuid_list) { - list_for_each_entry (name, &entry->hostnames, hostname_list) { - if (!strncmp (name->hostname, hoststr, - 1024)) { - - gf_log ("glusterd", GF_LOG_NORMAL, - "Friend %s found.. state: %d", hoststr, - entry->state.state); - *peerinfo = entry; - return 0; - } - } - } - - ret = getaddrinfo(hoststr, NULL, NULL, &addr); - if (ret != 0) { - gf_log ("", GF_LOG_ERROR, "error in getaddrinfo: %s\n", - gai_strerror(ret)); - goto out; - } - - for (p = addr; p != NULL; p = p->ai_next) { - switch (p->ai_family) { - case AF_INET: - s4 = (struct sockaddr_in *) p->ai_addr; - in_addr = &s4->sin_addr; - break; - case AF_INET6: - s6 = (struct sockaddr_in6 *) p->ai_addr; - in_addr =(struct in_addr *) &s6->sin6_addr; - break; - default: ret = -1; - goto out; - } - host = inet_ntoa(*in_addr); +#include "globals.h" +#include "glusterd-syncop.h" - ret = getnameinfo (p->ai_addr, p->ai_addrlen, hname, - 1024, NULL, 0, 0); - if (ret) - goto out; +#ifdef HAVE_BD_XLATOR +#include <lvm2app.h> +#endif - list_for_each_entry (entry, &priv->peers, uuid_list) { - list_for_each_entry (name, &entry->hostnames, - hostname_list) { - if (!strncmp (name->hostname, host, - 1024) || !strncmp (name->hostname,hname, - 1024)) { - gf_log ("glusterd", GF_LOG_NORMAL, - "Friend %s found.. state: %d", - hoststr, entry->state.state); - *peerinfo = entry; - freeaddrinfo (addr); - return 0; - } - } - } - } +extern uuid_t global_txn_id; -out: - if (addr) - freeaddrinfo (addr); - return -1; +int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, + void *data, rpc_clnt_notify_t notify_fn) +{ + glusterd_conf_t *priv = THIS->private; + int ret = -1; + synclock_lock (&priv->big_lock); + ret = notify_fn (rpc, mydata, event, data); + synclock_unlock (&priv->big_lock); + return ret; } -static int -glusterd_friend_find_by_uuid (uuid_t uuid, - glusterd_peerinfo_t **peerinfo) +int glusterd_big_locked_handler (rpcsvc_request_t *req, rpcsvc_actor actor_fn) { - int ret = -1; - glusterd_conf_t *priv = NULL; - glusterd_peerinfo_t *entry = NULL; - - GF_ASSERT (peerinfo); - - *peerinfo = NULL; - priv = THIS->private; - - GF_ASSERT (priv); + glusterd_conf_t *priv = THIS->private; + int ret = -1; - if (uuid_is_null (uuid)) - return -1; - - list_for_each_entry (entry, &priv->peers, uuid_list) { - if (!uuid_compare (entry->uuid, uuid)) { - - gf_log ("glusterd", GF_LOG_NORMAL, - "Friend found.. state: %d", - entry->state.state); - *peerinfo = entry; - return 0; - } - } + synclock_lock (&priv->big_lock); + ret = actor_fn (req); + synclock_unlock (&priv->big_lock); return ret; } static int glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid, - char *hostname, int port, dict_t *dict) + char *hostname, int port, + gd1_mgmt_friend_req *friend_req) { int ret = -1; glusterd_peerinfo_t *peerinfo = NULL; glusterd_friend_sm_event_t *event = NULL; glusterd_friend_req_ctx_t *ctx = NULL; + char rhost[UNIX_PATH_MAX + 1] = {0}; + uuid_t friend_uuid = {0}; + dict_t *dict = NULL; + uuid_parse (uuid_utoa (uuid), friend_uuid); if (!port) - port = 6969; // TODO: use define values. + port = GF_DEFAULT_BASE_PORT; - ret = glusterd_friend_find (uuid, hostname, &peerinfo); + ret = glusterd_remote_hostname_get (req, rhost, sizeof (rhost)); + ret = glusterd_friend_find (uuid, rhost, &peerinfo); if (ret) { - gf_log ("glusterd", GF_LOG_NORMAL, - "Unable to find peer"); - + ret = glusterd_xfer_friend_add_resp (req, hostname, rhost, port, + -1, GF_PROBE_UNKNOWN_PEER); + if (friend_req->vols.vols_val) { + free (friend_req->vols.vols_val); + friend_req->vols.vols_val = NULL; + } + goto out; } ret = glusterd_friend_sm_new_event @@ -215,12 +133,26 @@ glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid, if (hostname) ctx->hostname = gf_strdup (hostname); ctx->req = req; - ctx->vols = dict; + dict = dict_new (); + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize (friend_req->vols.vols_val, + friend_req->vols.vols_len, + &dict); + + if (ret) + goto out; + else + dict->extra_stdfree = friend_req->vols.vols_val; + + ctx->vols = dict; event->ctx = ctx; ret = glusterd_friend_sm_inject_event (event); - if (ret) { gf_log ("glusterd", GF_LOG_ERROR, "Unable to inject event %d, " "ret = %d", event->event, ret); @@ -233,16 +165,23 @@ out: if (0 != ret) { if (ctx && ctx->hostname) GF_FREE (ctx->hostname); - if (ctx && ctx->vols) - dict_destroy (ctx->vols); - if (ctx) - GF_FREE (ctx); + GF_FREE (ctx); + if (dict) { + if ((!dict->extra_stdfree) && + friend_req->vols.vols_val) + free (friend_req->vols.vols_val); + dict_unref (dict); + } else { + free (friend_req->vols.vols_val); + } + GF_FREE (event); + } else { + if (peerinfo && (0 == peerinfo->connected)) + ret = GLUSTERD_CONNECTION_AWAITED; } - return ret; } - static int glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid, char *hostname, int port) @@ -253,14 +192,17 @@ glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid, glusterd_friend_req_ctx_t *ctx = NULL; if (!port) - port = 6969; //TODO: use define'd macro + port = GF_DEFAULT_BASE_PORT; ret = glusterd_friend_find (uuid, hostname, &peerinfo); if (ret) { - gf_log ("glusterd", GF_LOG_NORMAL, - "Unable to find peer"); - + gf_log ("glusterd", GF_LOG_CRITICAL, + "Received remove-friend from unknown peer %s", + hostname); + ret = glusterd_xfer_friend_remove_resp (req, hostname, + port); + goto out; } ret = glusterd_friend_sm_new_event @@ -302,8 +244,7 @@ out: if (0 != ret) { if (ctx && ctx->hostname) GF_FREE (ctx->hostname); - if (ctx) - GF_FREE (ctx); + GF_FREE (ctx); } return ret; @@ -316,13 +257,14 @@ glusterd_add_peer_detail_to_dict (glusterd_peerinfo_t *peerinfo, int ret = -1; char key[256] = {0, }; + char *peer_uuid_str = NULL; GF_ASSERT (peerinfo); GF_ASSERT (friends); snprintf (key, 256, "friend%d.uuid", count); - uuid_unparse (peerinfo->uuid, peerinfo->uuid_str); - ret = dict_set_str (friends, key, peerinfo->uuid_str); + peer_uuid_str = gd_peer_uuid_str (peerinfo); + ret = dict_set_str (friends, key, peer_uuid_str); if (ret) goto out; @@ -336,8 +278,14 @@ glusterd_add_peer_detail_to_dict (glusterd_peerinfo_t *peerinfo, if (ret) goto out; + snprintf (key, 256, "friend%d.stateId", count); + ret = dict_set_int32 (friends, key, peerinfo->state.state); + if (ret) + goto out; + snprintf (key, 256, "friend%d.state", count); - ret = dict_set_int32 (friends, key, (int32_t)peerinfo->state.state); + ret = dict_set_str (friends, key, + glusterd_friend_sm_state_name_get(peerinfo->state.state)); if (ret) goto out; @@ -350,9 +298,34 @@ out: return ret; } +struct args_pack { + dict_t *dict; + int vol_count; + int opt_count; +}; + +static int +_build_option_key (dict_t *d, char *k, data_t *v, void *tmp) +{ + char reconfig_key[256] = {0, }; + struct args_pack *pack = NULL; + int ret = -1; + + pack = tmp; + if (strcmp (k, GLUSTERD_GLOBAL_OPT_VERSION) == 0) + return 0; + snprintf (reconfig_key, 256, "volume%d.option.%s", + pack->vol_count, k); + ret = dict_set_str (pack->dict, reconfig_key, v->data); + if (0 == ret) + pack->opt_count++; + + return 0; +} + int glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, - dict_t *volumes, int count) + dict_t *volumes, int count) { int ret = -1; @@ -360,10 +333,23 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo = NULL; char *buf = NULL; int i = 1; + dict_t *dict = NULL; + glusterd_conf_t *priv = NULL; + char *volume_id_str = NULL; + struct args_pack pack = {0,}; + xlator_t *this = NULL; +#ifdef HAVE_BD_XLATOR + int caps = 0; +#endif GF_ASSERT (volinfo); GF_ASSERT (volumes); + this = THIS; + priv = this->private; + + GF_ASSERT (priv); + snprintf (key, 256, "volume%d.name", count); ret = dict_set_str (volumes, key, volinfo->volname); if (ret) @@ -379,18 +365,131 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, if (ret) goto out; + /* As of now, the snap volumes are also displayed as part of + volume info command. So this change is to display whether + the volume is original volume or the snap_volume. If + displaying of snap volumes in volume info o/p is not needed + this should be removed. + */ + snprintf (key, 256, "volume%d.snap_volume", count); + ret = dict_set_int32 (volumes, key, volinfo->is_snap_volume); + if (ret) { + gf_log (this->name, GF_LOG_WARNING, "failed to set whether " + "the volume is a snap volume or actual volume (%s)", + volinfo->volname); + goto out; + } + snprintf (key, 256, "volume%d.brick_count", count); ret = dict_set_int32 (volumes, key, volinfo->brick_count); if (ret) goto out; + snprintf (key, 256, "volume%d.dist_count", count); + ret = dict_set_int32 (volumes, key, volinfo->dist_leaf_count); + if (ret) + goto out; + + snprintf (key, 256, "volume%d.stripe_count", count); + ret = dict_set_int32 (volumes, key, volinfo->stripe_count); + if (ret) + goto out; + + snprintf (key, 256, "volume%d.replica_count", count); + ret = dict_set_int32 (volumes, key, volinfo->replica_count); + if (ret) + goto out; + snprintf (key, 256, "volume%d.transport", count); ret = dict_set_int32 (volumes, key, volinfo->transport_type); if (ret) goto out; + volume_id_str = gf_strdup (uuid_utoa (volinfo->volume_id)); + if (!volume_id_str) + goto out; + + snprintf (key, sizeof (key), "volume%d.volume_id", count); + ret = dict_set_dynstr (volumes, key, volume_id_str); + if (ret) + goto out; + + snprintf (key, 256, "volume%d.rebalance", count); + ret = dict_set_int32 (volumes, key, volinfo->rebal.defrag_cmd); + if (ret) + goto out; + +#ifdef HAVE_BD_XLATOR + if (volinfo->caps) { + caps = 0; + snprintf (key, 256, "volume%d.xlator0", count); + buf = GF_MALLOC (256, gf_common_mt_char); + if (!buf) { + ret = ENOMEM; + goto out; + } + if (volinfo->caps & CAPS_BD) + snprintf (buf, 256, "BD"); + ret = dict_set_dynstr (volumes, key, buf); + if (ret) { + GF_FREE (buf); + goto out; + } + + if (volinfo->caps & CAPS_THIN) { + snprintf (key, 256, "volume%d.xlator0.caps%d", count, + caps++); + buf = GF_MALLOC (256, gf_common_mt_char); + if (!buf) { + ret = ENOMEM; + goto out; + } + snprintf (buf, 256, "thin"); + ret = dict_set_dynstr (volumes, key, buf); + if (ret) { + GF_FREE (buf); + goto out; + } + } + + if (volinfo->caps & CAPS_OFFLOAD_COPY) { + snprintf (key, 256, "volume%d.xlator0.caps%d", count, + caps++); + buf = GF_MALLOC (256, gf_common_mt_char); + if (!buf) { + ret = ENOMEM; + goto out; + } + snprintf (buf, 256, "offload_copy"); + ret = dict_set_dynstr (volumes, key, buf); + if (ret) { + GF_FREE (buf); + goto out; + } + } + + if (volinfo->caps & CAPS_OFFLOAD_SNAPSHOT) { + snprintf (key, 256, "volume%d.xlator0.caps%d", count, + caps++); + buf = GF_MALLOC (256, gf_common_mt_char); + if (!buf) { + ret = ENOMEM; + goto out; + } + snprintf (buf, 256, "offload_snapshot"); + ret = dict_set_dynstr (volumes, key, buf); + if (ret) { + GF_FREE (buf); + goto out; + } + } + + } +#endif + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { char brick[1024] = {0,}; + char brick_uuid[64] = {0,}; snprintf (key, 256, "volume%d.brick%d", count, i); snprintf (brick, 1024, "%s:%s", brickinfo->hostname, brickinfo->path); @@ -398,8 +497,42 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, ret = dict_set_dynstr (volumes, key, buf); if (ret) goto out; + snprintf (key, 256, "volume%d.brick%d.uuid", count, i); + snprintf (brick_uuid, 64, "%s", uuid_utoa (brickinfo->uuid)); + buf = gf_strdup (brick_uuid); + if (!buf) + goto out; + ret = dict_set_dynstr (volumes, key, buf); + if (ret) + goto out; + +#ifdef HAVE_BD_XLATOR + if (volinfo->caps & CAPS_BD) { + snprintf (key, 256, "volume%d.vg%d", count, i); + snprintf (brick, 1024, "%s", brickinfo->vg); + buf = gf_strdup (brick); + ret = dict_set_dynstr (volumes, key, buf); + if (ret) + goto out; + } +#endif i++; } + + dict = volinfo->dict; + if (!dict) { + ret = 0; + goto out; + } + + pack.dict = volumes; + pack.vol_count = count; + pack.opt_count = 0; + dict_foreach (dict, _build_option_key, (void *) &pack); + dict_foreach (priv->opts, _build_option_key, &pack); + + snprintf (key, 256, "volume%d.opt_count", pack.vol_count); + ret = dict_set_int32 (volumes, key, pack.opt_count); out: return ret; } @@ -409,13 +542,18 @@ glusterd_friend_find (uuid_t uuid, char *hostname, glusterd_peerinfo_t **peerinfo) { int ret = -1; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); if (uuid) { ret = glusterd_friend_find_by_uuid (uuid, peerinfo); if (ret) { - gf_log ("glusterd", GF_LOG_NORMAL, - "Unable to find peer by uuid"); + gf_log (this->name, GF_LOG_DEBUG, + "Unable to find peer by uuid: %s", + uuid_utoa (uuid)); } else { goto out; } @@ -426,7 +564,7 @@ glusterd_friend_find (uuid_t uuid, char *hostname, ret = glusterd_friend_find_by_hostname (hostname, peerinfo); if (ret) { - gf_log ("glusterd", GF_LOG_NORMAL, + gf_log (this->name, GF_LOG_DEBUG, "Unable to find hostname: %s", hostname); } else { goto out; @@ -437,649 +575,705 @@ out: return ret; } -int -glusterd_handle_cluster_lock (rpcsvc_request_t *req) +int32_t +glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, + char *err_str, size_t err_len) { - gd1_mgmt_cluster_lock_req lock_req = {{0},}; - int32_t ret = -1; - char str[50] = {0,}; - glusterd_op_lock_ctx_t *ctx = NULL; + int32_t ret = -1; + dict_t *dict = NULL; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + int32_t locked = 0; + char *tmp = NULL; + char *volname = NULL; + uuid_t *txn_id = NULL; + uuid_t *originator_uuid = NULL; + glusterd_op_info_t txn_op_info = {{0},}; + glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; GF_ASSERT (req); + GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX)); + GF_ASSERT (NULL != ctx); - if (!gd_xdr_to_mgmt_cluster_lock_req (req->msg[0], &lock_req)) { - //failed to decode msg; - req->rpc_err = GARBAGE_ARGS; + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); + + dict = ctx; + + /* Generate a transaction-id for this operation and + * save it in the dict. This transaction id distinguishes + * each transaction, and helps separate opinfos in the + * op state machine. */ + txn_id = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t); + if (!txn_id) + goto out; + + uuid_generate (*txn_id); + + ret = dict_set_bin (dict, "transaction_id", + txn_id, sizeof(*txn_id)); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Failed to set transaction id."); goto out; } - uuid_unparse (lock_req.uuid, str); - gf_log ("glusterd", GF_LOG_NORMAL, - "Received LOCK from uuid: %s", str); + gf_log (this->name, GF_LOG_DEBUG, + "Transaction_id = %s", uuid_utoa (*txn_id)); + /* Save the MY_UUID as the originator_uuid. This originator_uuid + * will be used by is_origin_glusterd() to determine if a node + * is the originator node for a command. */ + originator_uuid = GF_CALLOC (1, sizeof(uuid_t), + gf_common_mt_uuid_t); + if (!originator_uuid) { + ret = -1; + goto out; + } - ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t); + uuid_copy (*originator_uuid, MY_UUID); + ret = dict_set_bin (dict, "originator_uuid", + originator_uuid, sizeof (uuid_t)); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Failed to set originator uuid."); + goto out; + } - if (!ctx) { - //respond here - return -1; + /* Based on the op_version, acquire a cluster or mgmt_v3 lock */ + if (priv->op_version < 3) { + ret = glusterd_lock (MY_UUID); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Unable to acquire lock on localhost, ret: %d", + ret); + snprintf (err_str, err_len, + "Another transaction is in progress. " + "Please try again after sometime."); + goto out; + } + } else { + /* If no volname is given as a part of the command, locks will + * not be held */ + ret = dict_get_str (dict, "volname", &tmp); + if (ret) { + gf_log ("", GF_LOG_ERROR, "Failed to get volume " + "name"); + goto local_locking_done; + } else { + /* Use a copy of volname, as cli response will be + * sent before the unlock, and the volname in the + * dict, might be removed */ + volname = gf_strdup (tmp); + if (!volname) + goto out; + } + + ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol"); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Unable to acquire lock for %s", volname); + snprintf (err_str, err_len, + "Another transaction is in progress for %s. " + "Please try again after sometime.", volname); + goto out; + } } - uuid_copy (ctx->uuid, lock_req.uuid); - ctx->req = req; + locked = 1; + gf_log (this->name, GF_LOG_DEBUG, "Acquired lock on localhost"); + +local_locking_done: - ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, ctx); + /* If no volname is given as a part of the command, locks will + * not be held, hence sending stage event. */ + if (volname) + event_type = GD_OP_EVENT_START_LOCK; + else { + txn_op_info.state.state = GD_OP_STATE_LOCK_SENT; + event_type = GD_OP_EVENT_ALL_ACC; + } + + /* Save opinfo for this transaction with the transaction id */ + glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, ctx, req); + + ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Unable to set transaction's opinfo"); + if (ctx) + dict_unref (ctx); + goto out; + } + + ret = glusterd_op_sm_inject_event (event_type, txn_id, ctx); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to acquire cluster" + " lock."); + goto out; + } out: - gf_log ("", GF_LOG_NORMAL, "Returning %d", ret); + if (locked && ret) { + /* Based on the op-version, we release the + * cluster or mgmt_v3 lock */ + if (priv->op_version < 3) + glusterd_unlock (MY_UUID); + else { + ret = glusterd_mgmt_v3_unlock (volname, MY_UUID, + "vol"); + if (ret) + gf_log (this->name, GF_LOG_ERROR, + "Unable to release lock for %s", + volname); + ret = -1; + } + } + if (volname) + GF_FREE (volname); + + gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret); return ret; } int -glusterd_handle_stage_op (rpcsvc_request_t *req) +__glusterd_handle_cluster_lock (rpcsvc_request_t *req) { - int32_t ret = -1; - char str[50] = {0,}; - gd1_mgmt_stage_op_req stage_req = {{0,}}; - glusterd_op_stage_ctx_t *ctx = NULL; + dict_t *op_ctx = NULL; + int32_t ret = -1; + gd1_mgmt_cluster_lock_req lock_req = {{0},}; + glusterd_op_lock_ctx_t *ctx = NULL; + glusterd_op_t op = GD_OP_EVENT_LOCK; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_op_info_t txn_op_info = {{0},}; + uuid_t *txn_id = &global_txn_id; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); - if (!gd_xdr_to_mgmt_stage_op_req (req->msg[0], &stage_req)) { - //failed to decode msg; + ret = xdr_to_generic (req->msg[0], &lock_req, + (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock " + "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } - uuid_unparse (stage_req.uuid, str); - gf_log ("glusterd", GF_LOG_NORMAL, - "Received stage op from uuid: %s", str); + gf_log (this->name, GF_LOG_DEBUG, "Received LOCK from uuid: %s", + uuid_utoa (lock_req.uuid)); - ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_stage_ctx_t); + if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) { + gf_log (this->name, GF_LOG_WARNING, "%s doesn't " + "belong to the cluster. Ignoring request.", + uuid_utoa (lock_req.uuid)); + ret = -1; + goto out; + } + + ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t); if (!ctx) { //respond here return -1; } - //CHANGE THIS - uuid_copy (ctx->stage_req.uuid, stage_req.uuid); - ctx->stage_req.op = stage_req.op; - ctx->stage_req.buf.buf_len = stage_req.buf.buf_len; - ctx->stage_req.buf.buf_val = GF_CALLOC (1, stage_req.buf.buf_len, - gf_gld_mt_string); - if (!ctx->stage_req.buf.buf_val) + uuid_copy (ctx->uuid, lock_req.uuid); + ctx->req = req; + ctx->dict = NULL; + + op_ctx = dict_new (); + if (!op_ctx) { + gf_log (this->name, GF_LOG_ERROR, + "Unable to set new dict"); goto out; + } - memcpy (ctx->stage_req.buf.buf_val, stage_req.buf.buf_val, - stage_req.buf.buf_len); + glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, op_ctx, req); - ctx->req = req; + ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Unable to set transaction's opinfo"); + dict_unref (txn_op_info.op_ctx); + goto out; + } - ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP, ctx); + ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, txn_id, ctx); + if (ret) + gf_log (this->name, GF_LOG_ERROR, + "Failed to inject event GD_OP_EVENT_LOCK"); out: - if (stage_req.buf.buf_val) - free (stage_req.buf.buf_val);//malloced by xdr + gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret); + + glusterd_friend_sm (); + glusterd_op_sm (); + return ret; } int -glusterd_handle_commit_op (rpcsvc_request_t *req) +glusterd_handle_cluster_lock (rpcsvc_request_t *req) { - int32_t ret = -1; - char str[50] = {0,}; - gd1_mgmt_commit_op_req commit_req = {{0},}; - glusterd_op_commit_ctx_t *ctx = NULL; - - GF_ASSERT (req); + return glusterd_big_locked_handler (req, + __glusterd_handle_cluster_lock); +} - if (!gd_xdr_to_mgmt_commit_op_req (req->msg[0], &commit_req)) { - //failed to decode msg; - req->rpc_err = GARBAGE_ARGS; - goto out; - } +int +glusterd_req_ctx_create (rpcsvc_request_t *rpc_req, + glusterd_op_t op, uuid_t uuid, + char *buf_val, size_t buf_len, + gf_gld_mem_types_t mem_type, + glusterd_req_ctx_t **req_ctx_out) +{ + int ret = -1; + char str[50] = {0,}; + glusterd_req_ctx_t *req_ctx = NULL; + dict_t *dict = NULL; + xlator_t *this = NULL; - uuid_unparse (commit_req.uuid, str); + this = THIS; + GF_ASSERT (this); - gf_log ("glusterd", GF_LOG_NORMAL, - "Received commit op from uuid: %s", str); + uuid_unparse (uuid, str); + gf_log (this->name, GF_LOG_DEBUG, "Received op from uuid %s", str); - ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_commit_ctx_t); + dict = dict_new (); + if (!dict) + goto out; - if (!ctx) { - //respond here - return -1; + req_ctx = GF_CALLOC (1, sizeof (*req_ctx), mem_type); + if (!req_ctx) { + goto out; } - ctx->req = req; - //CHANGE THIS - uuid_copy (ctx->stage_req.uuid, commit_req.uuid); - ctx->stage_req.op = commit_req.op; - ctx->stage_req.buf.buf_len = commit_req.buf.buf_len; - ctx->stage_req.buf.buf_val = GF_CALLOC (1, commit_req.buf.buf_len, - gf_gld_mt_string); - if (!ctx->stage_req.buf.buf_val) + uuid_copy (req_ctx->uuid, uuid); + req_ctx->op = op; + ret = dict_unserialize (buf_val, buf_len, &dict); + if (ret) { + gf_log (this->name, GF_LOG_WARNING, + "failed to unserialize the dictionary"); goto out; + } - memcpy (ctx->stage_req.buf.buf_val, commit_req.buf.buf_val, - commit_req.buf.buf_len); - - ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP, ctx); - + req_ctx->dict = dict; + req_ctx->req = rpc_req; + *req_ctx_out = req_ctx; + ret = 0; out: - if (commit_req.buf.buf_val) - free (commit_req.buf.buf_val);//malloced by xdr + if (ret) { + if (dict) + dict_unref (dict); + GF_FREE (req_ctx); + } return ret; } int -glusterd_handle_cli_probe (rpcsvc_request_t *req) +__glusterd_handle_stage_op (rpcsvc_request_t *req) { int32_t ret = -1; - gf1_cli_probe_req cli_req = {0,}; + glusterd_req_ctx_t *req_ctx = NULL; + gd1_mgmt_stage_op_req op_req = {{0},}; glusterd_peerinfo_t *peerinfo = NULL; + xlator_t *this = NULL; + uuid_t *txn_id = &global_txn_id; + glusterd_op_info_t txn_op_info = {{0},}; + glusterd_op_sm_state_info_t state; + + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); - if (!gf_xdr_to_cli_probe_req (req->msg[0], &cli_req)) { - //failed to decode msg; - gf_log ("", GF_LOG_ERROR, "xdr decoding error"); + ret = xdr_to_generic (req->msg[0], &op_req, + (xdrproc_t)xdr_gd1_mgmt_stage_op_req); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to decode stage " + "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } - gf_cmd_log ("peer probe", " on host %s:%d", cli_req.hostname, - cli_req.port); - gf_log ("glusterd", GF_LOG_NORMAL, "Received CLI probe req %s %d", - cli_req.hostname, cli_req.port); - - if (!(ret = glusterd_is_local_addr(cli_req.hostname))) { - glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_LOCALHOST, - cli_req.hostname, cli_req.port); + if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) { + gf_log (this->name, GF_LOG_WARNING, "%s doesn't " + "belong to the cluster. Ignoring request.", + uuid_utoa (op_req.uuid)); + ret = -1; goto out; } - if (!(ret = glusterd_friend_find_by_hostname(cli_req.hostname, - &peerinfo))) { - if ((peerinfo->state.state != GD_FRIEND_STATE_REQ_RCVD) - || (peerinfo->state.state != GD_FRIEND_STATE_DEFAULT)) { - gf_log ("glusterd", GF_LOG_NORMAL, "Probe host %s port %d" - "already a friend", cli_req.hostname, cli_req.port); - glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, - cli_req.hostname, cli_req.port); + ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid, + op_req.buf.buf_val, op_req.buf.buf_len, + gf_gld_mt_op_stage_ctx_t, &req_ctx); + if (ret) + goto out; + + ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id); + + gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id)); + + /* In cases where there is no volname, the receivers won't have a + * transaction opinfo created, as for those operations, the locking + * phase where the transaction opinfos are created, won't be called. */ + ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info); + if (ret) { + gf_log (this->name, GF_LOG_DEBUG, + "No transaction's opinfo set"); + + state.state = GD_OP_STATE_LOCKED; + glusterd_txn_opinfo_init (&txn_op_info, &state, + &op_req.op, req_ctx->dict, req); + + ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Unable to set transaction's opinfo"); + dict_unref (req_ctx->dict); goto out; } } - ret = glusterd_probe_begin (req, cli_req.hostname, cli_req.port); - gf_cmd_log ("peer probe","on host %s:%d %s",cli_req.hostname, cli_req.port, - (ret) ? "FAILED" : "SUCCESS"); -out: - if (cli_req.hostname) - free (cli_req.hostname);//its malloced by xdr + ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP, + txn_id, req_ctx); + if (ret) + gf_log (this->name, GF_LOG_ERROR, + "Failed to inject event GD_OP_EVENT_STAGE_OP"); + + out: + free (op_req.buf.buf_val);//malloced by xdr + glusterd_friend_sm (); + glusterd_op_sm (); return ret; } int -glusterd_handle_cli_deprobe (rpcsvc_request_t *req) +glusterd_handle_stage_op (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_stage_op); +} + + +int +__glusterd_handle_commit_op (rpcsvc_request_t *req) { int32_t ret = -1; - gf1_cli_probe_req cli_req = {0,}; + glusterd_req_ctx_t *req_ctx = NULL; + gd1_mgmt_commit_op_req op_req = {{0},}; + glusterd_peerinfo_t *peerinfo = NULL; + xlator_t *this = NULL; + uuid_t *txn_id = &global_txn_id; + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); - if (!gf_xdr_to_cli_probe_req (req->msg[0], &cli_req)) { - //failed to decode msg; + ret = xdr_to_generic (req->msg[0], &op_req, + (xdrproc_t)xdr_gd1_mgmt_commit_op_req); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to decode commit " + "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } - gf_log ("glusterd", GF_LOG_NORMAL, "Received CLI deprobe req"); + if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) { + gf_log (this->name, GF_LOG_WARNING, "%s doesn't " + "belong to the cluster. Ignoring request.", + uuid_utoa (op_req.uuid)); + ret = -1; + goto out; + } + + //the structures should always be equal + GF_ASSERT (sizeof (gd1_mgmt_commit_op_req) == sizeof (gd1_mgmt_stage_op_req)); + ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid, + op_req.buf.buf_val, op_req.buf.buf_len, + gf_gld_mt_op_commit_ctx_t, &req_ctx); + if (ret) + goto out; + + ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id); + gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id)); - ret = glusterd_deprobe_begin (req, cli_req.hostname, cli_req.port); + ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP, + txn_id, req_ctx); - gf_cmd_log ("peer deprobe", "on host %s:%d %s", cli_req.hostname, - cli_req.port, (ret) ? "FAILED" : "SUCCESS"); out: - if (cli_req.hostname) - free (cli_req.hostname);//malloced by xdr + free (op_req.buf.buf_val);//malloced by xdr + glusterd_friend_sm (); + glusterd_op_sm (); return ret; } int -glusterd_handle_cli_list_friends (rpcsvc_request_t *req) +glusterd_handle_commit_op (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_commit_op); +} + +int +__glusterd_handle_cli_probe (rpcsvc_request_t *req) { int32_t ret = -1; - gf1_cli_peer_list_req cli_req = {0,}; - dict_t *dict = NULL; + gf_cli_req cli_req = {{0,},}; + glusterd_peerinfo_t *peerinfo = NULL; + gf_boolean_t run_fsm = _gf_true; + xlator_t *this = NULL; + char *bind_name = NULL; + dict_t *dict = NULL; + char *hostname = NULL; + int port = 0; GF_ASSERT (req); + this = THIS; - if (!gf_xdr_to_cli_peer_list_req (req->msg[0], &cli_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { //failed to decode msg; + gf_log ("", GF_LOG_ERROR, "xdr decoding error"); req->rpc_err = GARBAGE_ARGS; goto out; } - gf_log ("glusterd", GF_LOG_NORMAL, "Received cli list req"); - if (cli_req.dict.dict_len) { - /* Unserialize the dictionary */ - dict = dict_new (); + dict = dict_new (); ret = dict_unserialize (cli_req.dict.dict_val, - cli_req.dict.dict_len, - &dict); + cli_req.dict.dict_len, &dict); if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, - "failed to " + gf_log (this->name, GF_LOG_ERROR, "Failed to " "unserialize req-buffer to dictionary"); goto out; - } else { - dict->extra_stdfree = cli_req.dict.dict_val; } } - ret = glusterd_list_friends (req, dict, cli_req.flags); - -out: - if (dict) - dict_unref (dict); - return ret; -} - -int -glusterd_check_and_rebalance (glusterd_volinfo_t *volinfo, char *dir) -{ - int ret = -1; - int dst_fd = -1; - int src_fd = -1; - DIR *fd = NULL; - glusterd_defrag_info_t *defrag = NULL; - struct dirent *entry = NULL; - struct stat stbuf = {0,}; - struct stat new_stbuf = {0,}; - char full_path[1024] = {0,}; - char tmp_filename[1024] = {0,}; - char value[128] = {0,}; - - defrag = volinfo->defrag; - if (!defrag) + ret = dict_get_str (dict, "hostname", &hostname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to get hostname"); goto out; + } - - /* Fix files at this level */ - fd = opendir (dir); - if (!fd) + ret = dict_get_int32 (dict, "port", &port); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to get port"); goto out; - while ((entry = readdir (fd))) { - if (!entry) - break; - - if (!strcmp (entry->d_name, ".") || !strcmp (entry->d_name, "..")) - continue; - - snprintf (full_path, 1024, "%s/%s", dir, entry->d_name); - - ret = stat (full_path, &stbuf); - if (ret == -1) - continue; - - if (S_ISDIR (stbuf.st_mode)) { - /* Fix the layout of the directory */ - getxattr (full_path, "trusted.distribute.fix.layout", - &value, 128); - continue; - } - if (S_ISREG (stbuf.st_mode) && ((stbuf.st_mode & 01000) == 01000)) { - /* TODO: run the defrag */ - snprintf (tmp_filename, 1024, "%s/.%s.gfs%llu", dir, - entry->d_name, - (unsigned long long)stbuf.st_size); - - dst_fd = creat (tmp_filename, (stbuf.st_mode & ~01000)); - if (dst_fd == -1) - continue; - - src_fd = open (full_path, O_RDONLY); - if (src_fd == -1) { - close (dst_fd); - continue; - } - - while (1) { - ret = read (src_fd, defrag->databuf, 131072); - if (!ret || (ret < 0)) { - close (dst_fd); - close (src_fd); - break; - } - ret = write (dst_fd, defrag->databuf, ret); - if (ret < 0) { - close (dst_fd); - close (src_fd); - break; - } - } - - ret = stat (full_path, &new_stbuf); - if (ret < 0) - continue; - if (new_stbuf.st_mtime != stbuf.st_mtime) - continue; - - ret = rename (tmp_filename, full_path); - if (ret != -1) { - LOCK (&defrag->lock); - { - defrag->total_files += 1; - defrag->total_data += stbuf.st_size; - } - UNLOCK (&defrag->lock); - } - } else { - LOCK (&defrag->lock); - { - if (S_ISREG (stbuf.st_mode)) - defrag->num_files_lookedup += 1; - } - UNLOCK (&defrag->lock); - } - - if (volinfo->defrag_status == GF_DEFRAG_STATUS_STOPED) { - closedir (fd); - goto out; - } } - closedir (fd); - /* Iterate over directories */ - fd = opendir (dir); - if (!fd) + if (glusterd_is_any_volume_in_server_quorum (this) && + !does_gd_meet_server_quorum (this)) { + glusterd_xfer_cli_probe_resp (req, -1, GF_PROBE_QUORUM_NOT_MET, + NULL, hostname, port, dict); + gf_log (this->name, GF_LOG_ERROR, "Quorum does not meet, " + "rejecting operation"); + ret = 0; goto out; - while ((entry = readdir (fd))) { - if (!entry) - break; - - if (!strcmp (entry->d_name, ".") || !strcmp (entry->d_name, "..")) - continue; - - snprintf (full_path, 1024, "%s/%s", dir, entry->d_name); - - ret = stat (full_path, &stbuf); - if (ret == -1) - continue; - - if (S_ISDIR (stbuf.st_mode)) { - /* iterate in subdirectories */ - ret = glusterd_check_and_rebalance (volinfo, full_path); - if (ret) - break; - } } - closedir (fd); + gf_log ("glusterd", GF_LOG_INFO, "Received CLI probe req %s %d", + hostname, port); - if (!entry) + if (dict_get_str(this->options,"transport.socket.bind-address", + &bind_name) == 0) { + gf_log ("glusterd", GF_LOG_DEBUG, + "only checking probe address vs. bind address"); + ret = gf_is_same_address (bind_name, hostname); + } + else { + ret = gf_is_local_addr (hostname); + } + if (ret) { + glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_LOCALHOST, + NULL, hostname, port, dict); ret = 0; -out: - return ret; -} + goto out; + } -void * -glusterd_defrag_start (void *data) -{ - glusterd_volinfo_t *volinfo = data; - glusterd_defrag_info_t *defrag = NULL; - char cmd_str[1024] = {0,}; - int ret = -1; - struct stat stbuf = {0,}; - char value[128] = {0,}; - - defrag = volinfo->defrag; - if (!defrag) - goto out; - - sleep (1); - ret = stat (defrag->mount, &stbuf); - if ((ret == -1) && (errno == ENOTCONN)) { - /* Wait for some more time before starting rebalance */ - sleep (2); - ret = stat (defrag->mount, &stbuf); - if (ret == -1) { - volinfo->defrag_status = GF_DEFRAG_STATUS_FAILED; - volinfo->rebalance_files = 0; - volinfo->rebalance_data = 0; - volinfo->lookedup_files = 0; + if (!(ret = glusterd_friend_find_by_hostname (hostname, &peerinfo))) { + if (strcmp (peerinfo->hostname, hostname) == 0) { + + gf_log ("glusterd", GF_LOG_DEBUG, "Probe host %s port " + "%d already a peer", hostname, port); + glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, + NULL, hostname, port, + dict); goto out; } } + ret = glusterd_probe_begin (req, hostname, port, dict); - /* Fix the root ('/') first */ - getxattr (defrag->mount, "trusted.distribute.fix.layout", &value, 128); - - ret = glusterd_check_and_rebalance (volinfo, defrag->mount); - - /* TODO: This should run in a thread, and finish the thread when - the task is complete. While defrag is running, keep updating - files */ - - volinfo->defrag_status = GF_DEFRAG_STATUS_COMPLETE; - volinfo->rebalance_files = defrag->total_files; - volinfo->rebalance_data = defrag->total_data; - volinfo->lookedup_files = defrag->num_files_lookedup; -out: - if (defrag) { - gf_log ("defrag", GF_LOG_NORMAL, "defrag on %s complete", - defrag->mount); - - snprintf (cmd_str, 1024, "umount -l %s", defrag->mount); - ret = system (cmd_str); - LOCK_DESTROY (&defrag->lock); - GF_FREE (defrag); + if (ret == GLUSTERD_CONNECTION_AWAITED) { + //fsm should be run after connection establishes + run_fsm = _gf_false; + ret = 0; } - volinfo->defrag = NULL; - - return NULL; -} -int -glusterd_defrag_stop (glusterd_volinfo_t *volinfo, - gf1_cli_defrag_vol_rsp *rsp) -{ - /* TODO: set a variaeble 'stop_defrag' here, it should be checked - in defrag loop */ - if (!volinfo || !volinfo->defrag) - goto out; +out: + free (cli_req.dict.dict_val); - LOCK (&volinfo->defrag->lock); - { - volinfo->defrag_status = GF_DEFRAG_STATUS_STOPED; - rsp->files = volinfo->defrag->total_files; - rsp->size = volinfo->defrag->total_data; + if (run_fsm) { + glusterd_friend_sm (); + glusterd_op_sm (); } - UNLOCK (&volinfo->defrag->lock); - rsp->op_ret = 0; -out: - return 0; + return ret; } int -glusterd_defrag_status_get (glusterd_volinfo_t *volinfo, - gf1_cli_defrag_vol_rsp *rsp) +glusterd_handle_cli_probe (rpcsvc_request_t *req) { - if (!volinfo) - goto out; - - if (volinfo->defrag) { - LOCK (&volinfo->defrag->lock); - { - rsp->files = volinfo->defrag->total_files; - rsp->size = volinfo->defrag->total_data; - rsp->lookedup_files = volinfo->defrag->num_files_lookedup; - } - UNLOCK (&volinfo->defrag->lock); - } else { - rsp->files = volinfo->rebalance_files; - rsp->size = volinfo->rebalance_data; - rsp->lookedup_files = volinfo->lookedup_files; - } - - rsp->op_errno = volinfo->defrag_status; - rsp->op_ret = 0; -out: - return 0; + return glusterd_big_locked_handler (req, __glusterd_handle_cli_probe); } int -glusterd_handle_defrag_volume (rpcsvc_request_t *req) +__glusterd_handle_cli_deprobe (rpcsvc_request_t *req) { - int32_t ret = -1; - gf1_cli_defrag_vol_req cli_req = {0,}; - glusterd_conf_t *priv = NULL; - char cmd_str[4096] = {0,}; - glusterd_volinfo_t *volinfo = NULL; - glusterd_defrag_info_t *defrag = NULL; - gf1_cli_defrag_vol_rsp rsp = {0,}; + int32_t ret = -1; + gf_cli_req cli_req = {{0,},}; + uuid_t uuid = {0}; + int op_errno = 0; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + dict_t *dict = NULL; + char *hostname = NULL; + int port = 0; + int flags = 0; + this = THIS; + GF_ASSERT (this); + priv = this->private; + GF_ASSERT (priv); GF_ASSERT (req); - priv = THIS->private; - if (!gf_xdr_to_cli_defrag_vol_req (req->msg[0], &cli_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, + (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - switch (cli_req.cmd) { - case GF_DEFRAG_CMD_START: - gf_cmd_log ("Volume rebalance"," on volname: %s " - "cmd: start, attempted", cli_req.volname); - break; - case GF_DEFRAG_CMD_STOP: - gf_cmd_log ("Volume rebalance"," on volname: %s " - "cmd: stop, attempted", cli_req.volname); - break; - default: - break; + if (cli_req.dict.dict_len) { + dict = dict_new (); + + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, &dict); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to " + "unserialize req-buffer to dictionary"); + goto out; + } } - gf_log ("glusterd", GF_LOG_NORMAL, "Received defrag volume on %s", - cli_req.volname); - rsp.volname = cli_req.volname; - rsp.op_ret = -1; - if (glusterd_volinfo_find(cli_req.volname, &volinfo)) { - gf_log ("glusterd", GF_LOG_NORMAL, "Received defrag on invalid" - " volname %s", cli_req.volname); + gf_log ("glusterd", GF_LOG_INFO, "Received CLI deprobe req"); + + ret = dict_get_str (dict, "hostname", &hostname); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to get hostname"); goto out; } - if (volinfo->status != GLUSTERD_STATUS_STARTED) { - gf_log ("glusterd", GF_LOG_NORMAL, "Received defrag on stopped" - " volname %s", cli_req.volname); + ret = dict_get_int32 (dict, "port", &port); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to get port"); goto out; } - switch (cli_req.cmd) { - case GF_DEFRAG_CMD_START: - { - if (volinfo->defrag) { - gf_log ("glusterd", GF_LOG_DEBUG, - "defrag on volume %s already started", - cli_req.volname); - goto out; - } - - volinfo->defrag = GF_CALLOC (1, sizeof (glusterd_defrag_info_t), - gf_gld_mt_defrag_info); - if (!volinfo->defrag) - goto out; + ret = dict_get_int32 (dict, "flags", &flags); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to get flags"); + goto out; + } - defrag = volinfo->defrag; + ret = glusterd_hostname_to_uuid (hostname, uuid); + if (ret) { + op_errno = GF_DEPROBE_NOT_FRIEND; + goto out; + } - LOCK_INIT (&defrag->lock); - snprintf (defrag->mount, 1024, "%s/mount/%s", - priv->workdir, cli_req.volname); - /* Create a directory, mount glusterfs over it, start glusterfs-defrag */ - snprintf (cmd_str, 4096, "mkdir -p %s", defrag->mount); - ret = system (cmd_str); + if (!uuid_compare (uuid, MY_UUID)) { + op_errno = GF_DEPROBE_LOCALHOST; + ret = -1; + goto out; + } - if (ret) { - gf_log("glusterd", GF_LOG_DEBUG, "command: %s failed", cmd_str); - goto out; + if (!(flags & GF_CLI_FLAG_OP_FORCE)) { + if (!uuid_is_null (uuid)) { + /* Check if peers are connected, except peer being detached*/ + if (!glusterd_chk_peers_connected_befriended (uuid)) { + ret = -1; + op_errno = GF_DEPROBE_FRIEND_DOWN; + goto out; + } + ret = glusterd_all_volume_cond_check ( + glusterd_friend_brick_belongs, + -1, &uuid); + if (ret) { + op_errno = GF_DEPROBE_BRICK_EXIST; + goto out; + } } - snprintf (cmd_str, 4096, "%s/sbin/glusterfs -s localhost " - "--volfile-id %s --volume-name %s-quick-read " - "--xlator-option *dht.unhashed-sticky-bit=yes " - "--xlator-option *dht.use-readdirp=yes " - "--xlator-option *dht.lookup-unhashed=yes %s", - GFS_PREFIX, cli_req.volname, cli_req.volname, - defrag->mount); - ret = gf_system (cmd_str); - if (ret) { - gf_log("glusterd", GF_LOG_DEBUG, "command: %s failed", cmd_str); + if (glusterd_is_any_volume_in_server_quorum (this) && + !does_gd_meet_server_quorum (this)) { + gf_log (this->name, GF_LOG_ERROR, "Quorum does not " + "meet, rejecting operation"); + ret = -1; + op_errno = GF_DEPROBE_QUORUM_NOT_MET; goto out; } - - volinfo->defrag_status = GF_DEFRAG_STATUS_STARTED; - rsp.op_ret = 0; - - ret = pthread_create (&defrag->th, NULL, glusterd_defrag_start, - volinfo); - if (ret) { - snprintf (cmd_str, 1024, "umount -l %s", defrag->mount); - ret = system (cmd_str); - rsp.op_ret = -1; - } - break; } - case GF_DEFRAG_CMD_STOP: - ret = glusterd_defrag_stop (volinfo, &rsp); - break; - case GF_DEFRAG_CMD_STATUS: - ret = glusterd_defrag_status_get (volinfo, &rsp); - break; - default: - break; - } - if (ret) - gf_log("glusterd", GF_LOG_DEBUG, "command: %s failed",cmd_str); - if (cli_req.cmd != GF_DEFRAG_CMD_STATUS) { - gf_cmd_log ("volume rebalance"," on volname: %s %d %s", - cli_req.volname, - cli_req.cmd, ((ret)?"FAILED":"SUCCESS")); + if (!uuid_is_null (uuid)) { + ret = glusterd_deprobe_begin (req, hostname, port, uuid, dict); + } else { + ret = glusterd_deprobe_begin (req, hostname, port, NULL, dict); } out: + free (cli_req.dict.dict_val); + + if (ret) { + ret = glusterd_xfer_cli_deprobe_resp (req, ret, op_errno, NULL, + hostname, dict); + } + + glusterd_friend_sm (); + glusterd_op_sm (); - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gf_xdr_serialize_cli_defrag_vol_rsp); - if (cli_req.volname) - free (cli_req.volname);//malloced by xdr return ret; } int -glusterd_handle_cli_get_volume (rpcsvc_request_t *req) +glusterd_handle_cli_deprobe (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, __glusterd_handle_cli_deprobe); +} + +int +__glusterd_handle_cli_list_friends (rpcsvc_request_t *req) { int32_t ret = -1; - gf1_cli_get_vol_req cli_req = {0,}; + gf1_cli_peer_list_req cli_req = {0,}; dict_t *dict = NULL; GF_ASSERT (req); - if (!gf_xdr_to_cli_get_vol_req (req->msg[0], &cli_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, + (xdrproc_t)xdr_gf1_cli_peer_list_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - gf_log ("glusterd", GF_LOG_NORMAL, "Received get vol req"); + gf_log ("glusterd", GF_LOG_INFO, "Received cli list req"); if (cli_req.dict.dict_len) { /* Unserialize the dictionary */ @@ -1098,61 +1292,50 @@ glusterd_handle_cli_get_volume (rpcsvc_request_t *req) } } - ret = glusterd_get_volumes (req, dict, cli_req.flags); + ret = glusterd_list_friends (req, dict, cli_req.flags); out: if (dict) dict_unref (dict); + + glusterd_friend_sm (); + glusterd_op_sm (); + return ret; } int -glusterd_handle_create_volume (rpcsvc_request_t *req) -{ - int32_t ret = -1; - gf1_cli_create_vol_req cli_req = {0,}; - dict_t *dict = NULL; - glusterd_brickinfo_t *brickinfo = NULL; - char *brick = NULL; - char *bricks = NULL; - char *volname = NULL; - int brick_count = 0; - char *tmpptr = NULL; - int i = 0; - glusterd_peerinfo_t *peerinfo = NULL; - char *brick_list = NULL; - void *cli_rsp = NULL; - char err_str[1048]; - gf1_cli_create_vol_rsp rsp = {0,}; - glusterd_conf_t *priv = NULL; - int err_ret = 0; - xlator_t *this = NULL; - char *free_ptr = NULL; - char *trans_type = NULL; - uuid_t volume_id = {0,}; - char volid[64] = {0,}; - - GF_ASSERT (req); +glusterd_handle_cli_list_friends (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_list_friends); +} - this = THIS; - GF_ASSERT(this); +int +__glusterd_handle_cli_get_volume (rpcsvc_request_t *req) +{ + int32_t ret = -1; + gf_cli_req cli_req = {{0,}}; + dict_t *dict = NULL; + int32_t flags = 0; - priv = this->private; + GF_ASSERT (req); - if (!gf_xdr_to_cli_create_vol_req (req->msg[0], &cli_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - gf_log ("glusterd", GF_LOG_NORMAL, "Received create volume req"); + gf_log ("glusterd", GF_LOG_INFO, "Received get vol req"); - if (cli_req.bricks.bricks_len) { + if (cli_req.dict.dict_len) { /* Unserialize the dictionary */ dict = dict_new (); - ret = dict_unserialize (cli_req.bricks.bricks_val, - cli_req.bricks.bricks_len, + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, &dict); if (ret < 0) { gf_log ("glusterd", GF_LOG_ERROR, @@ -1160,526 +1343,413 @@ glusterd_handle_create_volume (rpcsvc_request_t *req) "unserialize req-buffer to dictionary"); goto out; } else { - dict->extra_stdfree = cli_req.bricks.bricks_val; + dict->extra_stdfree = cli_req.dict.dict_val; } } - ret = dict_get_str (dict, "volname", &volname); - - if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get volume name"); - goto out; - } - gf_cmd_log ("Volume create", "on volname: %s attempted", volname); - - if ((ret = glusterd_check_volume_exists (volname))) { - snprintf(err_str, 1048, "Volname %s already exists", - volname); - gf_log ("glusterd", GF_LOG_ERROR, "%s", err_str); - err_ret = 1; - goto out; - } - - ret = dict_get_int32 (dict, "count", &brick_count); - if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get count"); - goto out; - } - - ret = dict_get_str (dict, "transport", &trans_type); - if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get transport-type"); - goto out; - } - ret = dict_get_str (dict, "bricks", &bricks); - if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get bricks"); - goto out; - } - - uuid_generate (volume_id); - uuid_unparse (volume_id, volid); - free_ptr = gf_strdup (volid); - ret = dict_set_dynstr (dict, "volume-id", free_ptr); + ret = dict_get_int32 (dict, "flags", &flags); if (ret) { - gf_log ("", GF_LOG_ERROR, "unable to set volume-id"); + gf_log (THIS->name, GF_LOG_ERROR, "failed to get flags"); goto out; } - free_ptr = NULL; - - if (bricks) { - brick_list = gf_strdup (bricks); - free_ptr = brick_list; - } - gf_cmd_log ("Volume create", "on volname: %s type:%s count:%d bricks:%s", - cli_req.volname, ((cli_req.type == 0)? "DEFAULT": - ((cli_req.type == 1)? "STRIPE":"REPLICATE")), cli_req.count, - bricks); - - while ( i < brick_count) { - i++; - brick= strtok_r (brick_list, " \n", &tmpptr); - brick_list = tmpptr; - if (brickinfo) - glusterd_brickinfo_delete (brickinfo); - ret = glusterd_brickinfo_from_brick (brick, &brickinfo); - if (ret) - goto out; - - if(!(ret = glusterd_is_local_addr (brickinfo->hostname))) - goto brick_validation; //localhost, continue without validation - - ret = glusterd_friend_find_by_hostname (brickinfo->hostname, - &peerinfo); - if (ret) { - snprintf (err_str, 1048, "Host %s not a friend", - brickinfo->hostname); - gf_log ("glusterd", GF_LOG_ERROR, "%s", err_str); - err_ret = 1; - goto out; - } - if ((!peerinfo->connected) || - (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)) { - snprintf(err_str, 1048, "Host %s not connected", - brickinfo->hostname); - gf_log ("glusterd", GF_LOG_ERROR, "%s", err_str); - err_ret = 1; - goto out; - } -brick_validation: - err_ret = glusterd_is_exisiting_brick (brickinfo->hostname, - brickinfo->path); - if (err_ret) { - snprintf(err_str, 1048, "Brick: %s already in use", - brick); - goto out; - } - } - ret = glusterd_create_volume (req, dict); - - gf_cmd_log ("Volume create", "on volname: %s %s", volname, - ((ret || err_ret) != 0) ? "FAILED": "SUCCESS"); + ret = glusterd_get_volumes (req, dict, flags); out: - if ((err_ret || ret) && dict) + if (dict) dict_unref (dict); - if (err_ret) { - rsp.op_ret = -1; - rsp.op_errno = 0; - rsp.volname = ""; - rsp.op_errstr = err_str; - cli_rsp = &rsp; - glusterd_submit_reply(req, cli_rsp, NULL, 0, NULL, - gf_xdr_serialize_cli_create_vol_rsp); - if (!glusterd_opinfo_unlock()) - gf_log ("glusterd", GF_LOG_ERROR, "Unlock on opinfo" - " failed"); - ret = 0; //Client response sent, prevent second response - } - - if (free_ptr) - GF_FREE(free_ptr); - if (brickinfo) - glusterd_brickinfo_delete (brickinfo); - if (cli_req.volname) - free (cli_req.volname); // its a malloced by xdr + + glusterd_friend_sm (); + glusterd_op_sm (); + return ret; } int -glusterd_handle_cli_start_volume (rpcsvc_request_t *req) +glusterd_handle_cli_get_volume (rpcsvc_request_t *req) { - int32_t ret = -1; - gf1_cli_start_vol_req cli_req = {0,}; - int32_t flags = 0; - - GF_ASSERT (req); - - if (!gf_xdr_to_cli_start_vol_req (req->msg[0], &cli_req)) { - //failed to decode msg; - req->rpc_err = GARBAGE_ARGS; - goto out; - } - - gf_log ("glusterd", GF_LOG_NORMAL, "Received start vol req" - "for volume %s", cli_req.volname); - - ret = glusterd_start_volume (req, cli_req.volname, flags); - - gf_cmd_log ("volume start","on volname: %s %s", cli_req.volname, - ((ret == 0) ? "SUCCESS": "FAILED")); - -out: - if (cli_req.volname) - free (cli_req.volname); //its malloced by xdr - return ret; + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_get_volume); } - int -glusterd_handle_cli_stop_volume (rpcsvc_request_t *req) +__glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req) { - int32_t ret = -1; - gf1_cli_stop_vol_req cli_req = {0,}; + int ret = -1; + dict_t *dict = NULL; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + uuid_t uuid = {0}; + gf_cli_rsp rsp = {0,}; + gf_cli_req cli_req = {{0,}}; + char msg_str[2048] = {0,}; GF_ASSERT (req); - if (!gf_xdr_to_cli_stop_vol_req (req->msg[0], &cli_req)) { + this = THIS; + priv = this->private; + GF_ASSERT (priv); + + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - gf_log ("glusterd", GF_LOG_NORMAL, "Received stop vol req" - "for volume %s", cli_req.volname); + gf_log ("glusterd", GF_LOG_DEBUG, "Received uuid reset req"); - ret = glusterd_stop_volume (req, cli_req.volname, cli_req.flags); + if (cli_req.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new (); - gf_cmd_log ("Volume stop","on volname: %s %s", cli_req.volname, - ((ret)?"FAILED":"SUCCESS")); + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, + &dict); + if (ret < 0) { + gf_log ("glusterd", GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + snprintf (msg_str, sizeof (msg_str), "Unable to decode " + "the buffer"); + goto out; + } else { + dict->extra_stdfree = cli_req.dict.dict_val; + } + } -out: - if (cli_req.volname) - free (cli_req.volname); //its malloced by xdr - return ret; -} + /* In the above section if dict_unserialize is successful, ret is set + * to zero. + */ + ret = -1; + // Do not allow peer reset if there are any volumes in the cluster + if (!list_empty (&priv->volumes)) { + snprintf (msg_str, sizeof (msg_str), "volumes are already " + "present in the cluster. Resetting uuid is not " + "allowed"); + gf_log (this->name, GF_LOG_WARNING, "%s", msg_str); + goto out; + } -int -glusterd_handle_cli_delete_volume (rpcsvc_request_t *req) -{ - int32_t ret = -1; - gf1_cli_delete_vol_req cli_req = {0,}; - int32_t flags = 0; + // Do not allow peer reset if trusted storage pool is already formed + if (!list_empty (&priv->peers)) { + snprintf (msg_str, sizeof (msg_str),"trusted storage pool " + "has been already formed. Please detach this peer " + "from the pool and reset its uuid."); + gf_log (this->name, GF_LOG_WARNING, "%s", msg_str); + goto out; + } - GF_ASSERT (req); + uuid_copy (uuid, priv->uuid); + ret = glusterd_uuid_generate_save (); - if (!gf_xdr_to_cli_delete_vol_req (req->msg[0], &cli_req)) { - //failed to decode msg; - req->rpc_err = GARBAGE_ARGS; + if (!uuid_compare (uuid, MY_UUID)) { + snprintf (msg_str, sizeof (msg_str), "old uuid and the new uuid" + " are same. Try gluster peer reset again"); + gf_log (this->name, GF_LOG_ERROR, "%s", msg_str); + ret = -1; goto out; } - gf_cmd_log ("Volume delete","on volname: %s attempted", cli_req.volname); - - gf_log ("glusterd", GF_LOG_NORMAL, "Received delete vol req" - "for volume %s", cli_req.volname); - ret = glusterd_delete_volume (req, cli_req.volname, flags); +out: + if (ret) { + rsp.op_ret = -1; + if (msg_str[0] == '\0') + snprintf (msg_str, sizeof (msg_str), "Operation " + "failed"); + rsp.op_errstr = msg_str; + ret = 0; + } else { + rsp.op_errstr = ""; + } - gf_cmd_log ("Volume delete", "on volname: %s %s", cli_req.volname, - ((ret) ? "FAILED" : "SUCCESS")); + glusterd_to_cli (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp, dict); -out: - if (cli_req.volname) - free (cli_req.volname); //its malloced by xdr return ret; } int -glusterd_handle_add_brick (rpcsvc_request_t *req) +glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req) { - int32_t ret = -1; - gf1_cli_add_brick_req cli_req = {0,}; - dict_t *dict = NULL; - glusterd_brickinfo_t *brickinfo = NULL; - char *brick = NULL; - char *bricks = NULL; - char *volname = NULL; - int brick_count = 0; - char *tmpptr = NULL; - int i = 0; - glusterd_peerinfo_t *peerinfo = NULL; - char *brick_list = NULL; - void *cli_rsp = NULL; - char err_str[1048]; - gf1_cli_add_brick_rsp rsp = {0,}; - glusterd_volinfo_t *volinfo = NULL; - int32_t err_ret = 0; - glusterd_conf_t *priv = NULL; - xlator_t *this = NULL; - char *free_ptr = NULL; - - this = THIS; - GF_ASSERT(this); + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_uuid_reset); +} - priv = this->private; +int +__glusterd_handle_cli_uuid_get (rpcsvc_request_t *req) +{ + int ret = -1; + dict_t *dict = NULL; + dict_t *rsp_dict = NULL; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + gf_cli_rsp rsp = {0,}; + gf_cli_req cli_req = {{0,}}; + char msg_str[2048] = {0,}; + char uuid_str[64] = {0,}; GF_ASSERT (req); - if (!gf_xdr_to_cli_add_brick_req (req->msg[0], &cli_req)) { - //failed to decode msg; + this = THIS; + priv = this->private; + GF_ASSERT (priv); + + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { req->rpc_err = GARBAGE_ARGS; goto out; } - gf_cmd_log ("Volume add-brick", "on volname: %s attempted", - cli_req.volname); - gf_log ("glusterd", GF_LOG_NORMAL, "Received add brick req"); + gf_log ("glusterd", GF_LOG_DEBUG, "Received uuid get req"); - if (cli_req.bricks.bricks_len) { - /* Unserialize the dictionary */ + if (cli_req.dict.dict_len) { dict = dict_new (); + if (!dict) { + ret = -1; + goto out; + } - ret = dict_unserialize (cli_req.bricks.bricks_val, - cli_req.bricks.bricks_len, + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, &dict); if (ret < 0) { gf_log ("glusterd", GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); + snprintf (msg_str, sizeof (msg_str), "Unable to decode " + "the buffer"); goto out; + } else { - dict->extra_stdfree = cli_req.bricks.bricks_val; + dict->extra_stdfree = cli_req.dict.dict_val; + } } - ret = dict_get_str (dict, "volname", &volname); - - if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get volume name"); + rsp_dict = dict_new (); + if (!rsp_dict) { + ret = -1; goto out; } - if (!(ret = glusterd_check_volume_exists (volname))) { - snprintf(err_str, 1048, "Volname %s does not exist", - volname); - gf_log ("glusterd", GF_LOG_ERROR, "%s", err_str); - err_ret = -1; + uuid_utoa_r (MY_UUID, uuid_str); + ret = dict_set_str (rsp_dict, "uuid", uuid_str); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to set uuid in " + "dictionary."); goto out; } - ret = dict_get_int32 (dict, "count", &brick_count); + ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val, + &rsp.dict.dict_len); if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get count"); + gf_log (this->name, GF_LOG_ERROR, "Failed to serialize " + "dictionary."); goto out; } + ret = 0; +out: + if (ret) { + rsp.op_ret = -1; + if (msg_str[0] == '\0') + snprintf (msg_str, sizeof (msg_str), "Operation " + "failed"); + rsp.op_errstr = msg_str; - if (!(ret = glusterd_volinfo_find (volname, &volinfo))) { - if (volinfo->type == GF_CLUSTER_TYPE_NONE) - goto brick_val; - if (!brick_count || !volinfo->sub_count) - goto brick_val; - - if (volinfo->brick_count < volinfo->sub_count) { - if ((volinfo->sub_count - volinfo->brick_count) == brick_count) - goto brick_val; - } - - if ((brick_count % volinfo->sub_count) != 0) { - snprintf(err_str, 2048, "Incorrect number of bricks" - " supplied %d for type %s with count %d", - brick_count, (volinfo->type == 1)? "STRIPE": - "REPLICATE", volinfo->sub_count); - gf_log("glusterd", GF_LOG_ERROR, "%s", err_str); - err_ret = 1; - goto out; - } } else { - gf_log("", GF_LOG_ERROR, "Unable to get volinfo for volname" - " %s", volname); - goto out; - } + rsp.op_errstr = ""; -brick_val: - ret = dict_get_str (dict, "bricks", &bricks); - if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get bricks"); - goto out; } - if (bricks) - brick_list = gf_strdup (bricks); - if (!brick_list) { - gf_log ("", GF_LOG_ERROR, "Out of memory"); - ret = -1; - goto out; - } else { - free_ptr = brick_list; - } + glusterd_to_cli (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp, dict); - gf_cmd_log ("Volume add-brick", "volname: %s type %s count:%d bricks:%s" - ,volname, ((volinfo->type == 0)? "DEFAULT" : ((volinfo->type - == 1)? "STRIPE": "REPLICATE")), brick_count, brick_list); + return 0; +} +int +glusterd_handle_cli_uuid_get (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_uuid_get); +} - while ( i < brick_count) { - i++; - brick= strtok_r (brick_list, " \n", &tmpptr); - brick_list = tmpptr; - if (brickinfo) - glusterd_brickinfo_delete (brickinfo); - ret = glusterd_brickinfo_from_brick (brick, &brickinfo); +int +__glusterd_handle_cli_list_volume (rpcsvc_request_t *req) +{ + int ret = -1; + dict_t *dict = NULL; + glusterd_conf_t *priv = NULL; + glusterd_volinfo_t *volinfo = NULL; + int count = 0; + char key[1024] = {0,}; + gf_cli_rsp rsp = {0,}; + + GF_ASSERT (req); + + priv = THIS->private; + GF_ASSERT (priv); + + dict = dict_new (); + if (!dict) + goto out; + + list_for_each_entry (volinfo, &priv->volumes, vol_list) { + memset (key, 0, sizeof (key)); + snprintf (key, sizeof (key), "volume%d", count); + ret = dict_set_str (dict, key, volinfo->volname); if (ret) goto out; - if(!(ret = glusterd_is_local_addr(brickinfo->hostname))) - goto brick_validation; //localhost, continue without validation - ret = glusterd_friend_find_by_hostname(brickinfo->hostname, - &peerinfo); - if (ret) { - snprintf(err_str, 1048, "Host %s not a friend", - brickinfo->hostname); - gf_log ("glusterd", GF_LOG_ERROR, "%s", err_str); - err_ret = 1; - goto out; - } - if ((!peerinfo->connected) || - (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)) { - snprintf(err_str, 1048, "Host %s not connected", - brickinfo->hostname); - gf_log ("glusterd", GF_LOG_ERROR, "%s", err_str); - err_ret = 1; - goto out; - } -brick_validation: - err_ret = glusterd_is_exisiting_brick (brickinfo->hostname, - brickinfo->path); - if (err_ret) { - snprintf(err_str, 1048, "Brick: %s already in use", - brick); - goto out; - } + count++; } - ret = glusterd_add_brick (req, dict); + ret = dict_set_int32 (dict, "count", count); + if (ret) + goto out; - gf_cmd_log ("Volume add-brick","on volname: %s %s", volname, - ((ret || err_ret) != 0)? "FAILED" : "SUCCESS"); + ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val, + &rsp.dict.dict_len); + if (ret) + goto out; + + ret = 0; out: - if ((err_ret || ret) && dict) + rsp.op_ret = ret; + if (ret) + rsp.op_errstr = "Error listing volumes"; + else + rsp.op_errstr = ""; + + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp); + ret = 0; + + if (dict) dict_unref (dict); - if (err_ret) { - rsp.op_ret = -1; - rsp.op_errno = 0; - rsp.volname = ""; - rsp.op_errstr = err_str; - cli_rsp = &rsp; - glusterd_submit_reply(req, cli_rsp, NULL, 0, NULL, - gf_xdr_serialize_cli_add_brick_rsp); - if (!glusterd_opinfo_unlock()) - gf_log ("glusterd", GF_LOG_ERROR, "Unlock on " - "opinfo failed"); - ret = 0; //sent error to cli, prevent second reply - } - if (brickinfo) - glusterd_brickinfo_delete (brickinfo); - if (free_ptr) - GF_FREE (free_ptr); - if (cli_req.volname) - free (cli_req.volname); //its malloced by xdr + glusterd_friend_sm (); + glusterd_op_sm (); + + return ret; +} + +int +glusterd_handle_cli_list_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_list_volume); +} + +int32_t +glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, + char *err_str, size_t err_len) +{ + int ret = -1; + + ret = glusterd_op_txn_begin (req, op, ctx, err_str, err_len); + return ret; } int -glusterd_handle_replace_brick (rpcsvc_request_t *req) +__glusterd_handle_reset_volume (rpcsvc_request_t *req) { int32_t ret = -1; - gf1_cli_replace_brick_req cli_req = {0,}; + gf_cli_req cli_req = {{0,}}; dict_t *dict = NULL; - char *src_brick = NULL; - char *dst_brick = NULL; - int32_t op = 0; - char operation[8]; + glusterd_op_t cli_op = GD_OP_RESET_VOLUME; + char *volname = NULL; + char err_str[2048] = {0,}; + xlator_t *this = NULL; GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); - if (!gf_xdr_to_cli_replace_brick_req (req->msg[0], &cli_req)) { - //failed to decode msg; + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + snprintf (err_str, sizeof (err_str), "Failed to decode request " + "received from cli"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); req->rpc_err = GARBAGE_ARGS; goto out; } - gf_cmd_log ("Volume replace-brick","on volname: %s attempted", cli_req.volname); - - gf_log ("glusterd", GF_LOG_NORMAL, "Received replace brick req"); - - if (cli_req.bricks.bricks_len) { + if (cli_req.dict.dict_len) { /* Unserialize the dictionary */ dict = dict_new (); - ret = dict_unserialize (cli_req.bricks.bricks_val, - cli_req.bricks.bricks_len, + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, &dict); if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, - "failed to " - "unserialize req-buffer to dictionary"); + gf_log (this->name, GF_LOG_ERROR, "failed to " + "unserialize req-buffer to dictionary"); + snprintf (err_str, sizeof (err_str), "Unable to decode " + "the command"); goto out; } else { - dict->extra_stdfree = cli_req.bricks.bricks_val; + dict->extra_stdfree = cli_req.dict.dict_val; } } - ret = dict_get_int32 (dict, "operation", &op); + ret = dict_get_str (dict, "volname", &volname); if (ret) { - gf_log ("", GF_LOG_DEBUG, - "dict_get on operation failed"); + snprintf (err_str, sizeof (err_str), "Failed to get volume " + "name"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); goto out; } + gf_log (this->name, GF_LOG_DEBUG, "Received volume reset request for " + "volume %s", volname); - ret = dict_get_str (dict, "src-brick", &src_brick); + ret = glusterd_op_begin_synctask (req, GD_OP_RESET_VOLUME, dict); +out: if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get src brick"); - goto out; + if (err_str[0] == '\0') + snprintf (err_str, sizeof (err_str), + "Operation failed"); + ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, + dict, err_str); } - gf_log ("", GF_LOG_DEBUG, - "src brick=%s", src_brick); - - ret = dict_get_str (dict, "dst-brick", &dst_brick); - if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get dest brick"); - goto out; - } - - gf_log ("", GF_LOG_DEBUG, - "dst brick=%s", dst_brick); - - switch (op) { - case GF_REPLACE_OP_START: strcpy (operation, "start"); - break; - case GF_REPLACE_OP_COMMIT: strcpy (operation, "commit"); - break; - case GF_REPLACE_OP_PAUSE: strcpy (operation, "pause"); - break; - case GF_REPLACE_OP_ABORT: strcpy (operation, "abort"); - break; - case GF_REPLACE_OP_STATUS: strcpy (operation, "status"); - break; - default:strcpy (operation, "unknown"); - break; - } - - gf_cmd_log ("Volume replace-brick","volname: %s src_brick:%s" - " dst_brick:%s op:%s",cli_req.volname, src_brick, dst_brick - ,operation); - - - ret = glusterd_replace_brick (req, dict); - - gf_cmd_log ("Volume replace-brick","on volname: %s %s", cli_req.volname, - (ret) ? "FAILED" : "SUCCESS"); - -out: - if (ret && dict) - dict_unref (dict); - if (cli_req.volname) - free (cli_req.volname);//malloced by xdr return ret; } int -glusterd_handle_set_volume (rpcsvc_request_t *req) +glusterd_handle_reset_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_reset_volume); +} + +int +__glusterd_handle_set_volume (rpcsvc_request_t *req) { - int32_t ret = -1; - gf1_cli_set_vol_req cli_req = {0,}; + int32_t ret = -1; + gf_cli_req cli_req = {{0,}}; dict_t *dict = NULL; + glusterd_op_t cli_op = GD_OP_SET_VOLUME; + char *key = NULL; + char *value = NULL; + char *volname = NULL; + char *op_errstr = NULL; + gf_boolean_t help = _gf_false; + char err_str[2048] = {0,}; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); - if (!gf_xdr_to_cli_set_vol_req (req->msg[0], &cli_req)) { - //failed to decode msg; + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + snprintf (err_str, sizeof (err_str), "Failed to decode " + "request received from cli"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); req->rpc_err = GARBAGE_ARGS; goto out; } @@ -1692,528 +1762,382 @@ glusterd_handle_set_volume (rpcsvc_request_t *req) cli_req.dict.dict_len, &dict); if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, + gf_log (this->name, GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); + snprintf (err_str, sizeof (err_str), "Unable to decode " + "the command"); goto out; + } else { + dict->extra_stdfree = cli_req.dict.dict_val; } } - ret = glusterd_set_volume (req, dict); + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + snprintf (err_str, sizeof (err_str), "Failed to get volume " + "name while handling volume set command"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); + goto out; + } + + if (strcmp (volname, "help") == 0 || + strcmp (volname, "help-xml") == 0) { + ret = glusterd_volset_help (dict, &op_errstr); + help = _gf_true; + goto out; + } + + ret = dict_get_str (dict, "key1", &key); + if (ret) { + snprintf (err_str, sizeof (err_str), "Failed to get key while" + " handling volume set for %s", volname); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); + goto out; + } + + ret = dict_get_str (dict, "value1", &value); + if (ret) { + snprintf (err_str, sizeof (err_str), "Failed to get value while" + " handling volume set for %s", volname); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); + goto out; + } + gf_log (this->name, GF_LOG_DEBUG, "Received volume set request for " + "volume %s", volname); + + ret = glusterd_op_begin_synctask (req, GD_OP_SET_VOLUME, dict); out: + if (help) + ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, dict, + (op_errstr)? op_errstr:""); + else if (ret) { + if (err_str[0] == '\0') + snprintf (err_str, sizeof (err_str), + "Operation failed"); + ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, + dict, err_str); + } + if (op_errstr) + GF_FREE (op_errstr); + return ret; } int -glusterd_handle_remove_brick (rpcsvc_request_t *req) +glusterd_handle_set_volume (rpcsvc_request_t *req) { - int32_t ret = -1; - gf1_cli_remove_brick_req cli_req = {0,}; - dict_t *dict = NULL; - int32_t count = 0; - char *brick = NULL; - char key[256] = {0,}; - char *brick_list = NULL; - int i = 1; - glusterd_volinfo_t *volinfo = NULL; - glusterd_brickinfo_t *brickinfo = NULL; - int32_t pos = 0; - int32_t sub_volume = 0; - int32_t sub_volume_start = 0; - int32_t sub_volume_end = 0; - glusterd_brickinfo_t *tmp = NULL; - int32_t err_ret = 0; - char *err_str = NULL; - gf1_cli_remove_brick_rsp rsp = {0,}; - void *cli_rsp = NULL; - char vol_type[256] = {0,}; + return glusterd_big_locked_handler (req, __glusterd_handle_set_volume); +} + +int +__glusterd_handle_sync_volume (rpcsvc_request_t *req) +{ + int32_t ret = -1; + gf_cli_req cli_req = {{0,}}; + dict_t *dict = NULL; + gf_cli_rsp cli_rsp = {0.}; + char msg[2048] = {0,}; + char *volname = NULL; + gf1_cli_sync_volume flags = 0; + char *hostname = NULL; + xlator_t *this = NULL; GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); - if (!gf_xdr_to_cli_remove_brick_req (req->msg[0], &cli_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - gf_cmd_log ("Volume remove-brick","on volname: %s attempted",cli_req.volname); - gf_log ("glusterd", GF_LOG_NORMAL, "Received rem brick req"); - - if (cli_req.bricks.bricks_len) { + if (cli_req.dict.dict_len) { /* Unserialize the dictionary */ dict = dict_new (); - ret = dict_unserialize (cli_req.bricks.bricks_val, - cli_req.bricks.bricks_len, + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, &dict); if (ret < 0) { - gf_log ("glusterd", GF_LOG_ERROR, + gf_log (this->name, GF_LOG_ERROR, "failed to " "unserialize req-buffer to dictionary"); + snprintf (msg, sizeof (msg), "Unable to decode the " + "command"); goto out; } else { - dict->extra_stdfree = cli_req.bricks.bricks_val; + dict->extra_stdfree = cli_req.dict.dict_val; } } - ret = dict_get_int32 (dict, "count", &count); + ret = dict_get_str (dict, "hostname", &hostname); if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get count"); + snprintf (msg, sizeof (msg), "Failed to get hostname"); + gf_log (this->name, GF_LOG_ERROR, "%s", msg); goto out; } - err_str = GF_MALLOC (2048 * sizeof(*err_str),gf_common_mt_char); - - if (!err_str) { - gf_log ("",GF_LOG_ERROR,"glusterd_handle_remove_brick: " - "Unable to get memory"); - ret = -1; - goto out; - } - - ret = glusterd_volinfo_find (cli_req.volname, &volinfo); + ret = dict_get_str (dict, "volname", &volname); if (ret) { - snprintf (err_str, 2048, "volname %s not found", - cli_req.volname); - gf_log ("", GF_LOG_ERROR, "%s", err_str); - err_ret = 1; - goto out; - } - - if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) - strcpy (vol_type, "replica"); - else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE) - strcpy (vol_type, "stripe"); - else - strcpy (vol_type, "distribute"); - - if ((volinfo->type == (GF_CLUSTER_TYPE_REPLICATE || - GF_CLUSTER_TYPE_STRIPE)) && - !(volinfo->brick_count <= volinfo->sub_count)) { - if (volinfo->sub_count && (count % volinfo->sub_count != 0)) { - snprintf (err_str, 2048, "Remove brick incorrect" - " brick count of %d for %s %d", - count, vol_type, volinfo->sub_count); - gf_log ("", GF_LOG_ERROR, "%s", err_str); - err_ret = 1; - ret = -1; + ret = dict_get_int32 (dict, "flags", (int32_t*)&flags); + if (ret) { + snprintf (msg, sizeof (msg), "Failed to get volume name" + " or flags"); + gf_log (this->name, GF_LOG_ERROR, "%s", msg); goto out; } - } - brick_list = GF_MALLOC (120000 * sizeof(*brick_list),gf_common_mt_char); + gf_log (this->name, GF_LOG_INFO, "Received volume sync req " + "for volume %s", (flags & GF_CLI_SYNC_ALL) ? "all" : volname); - if (!brick_list) { - gf_log ("",GF_LOG_ERROR,"glusterd_handle_remove_brick: " - "Unable to get memory"); + if (gf_is_local_addr (hostname)) { ret = -1; + snprintf (msg, sizeof (msg), "sync from localhost" + " not allowed"); + gf_log (this->name, GF_LOG_ERROR, "%s", msg); goto out; } - strcpy (brick_list, " "); - while ( i <= count) { - snprintf (key, 256, "brick%d", i); - ret = dict_get_str (dict, key, &brick); - if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get %s", key); - goto out; - } - gf_log ("", GF_LOG_DEBUG, "Remove brick count %d brick: %s", - i, brick); - - ret = glusterd_brickinfo_get(brick, volinfo, &brickinfo); - if (ret) { - snprintf(err_str, 2048," Incorrect brick %s for volname" - " %s", brick, cli_req.volname); - gf_log ("", GF_LOG_ERROR, "%s", err_str); - err_ret = 1; - goto out; - } - strcat(brick_list, brick); - strcat(brick_list, " "); - - i++; - if ((volinfo->type == GF_CLUSTER_TYPE_NONE) || - (volinfo->brick_count <= volinfo->sub_count)) - continue; - - pos = 0; - list_for_each_entry (tmp, &volinfo->bricks, brick_list) { - - if ((!strcmp (tmp->hostname,brickinfo->hostname)) && - !strcmp (tmp->path, brickinfo->path)) { - gf_log ("", GF_LOG_NORMAL, "Found brick"); - if (!sub_volume && volinfo->sub_count) { - sub_volume = (pos / volinfo-> - sub_count) + 1; - sub_volume_start = volinfo->sub_count * - (sub_volume - 1); - sub_volume_end = (volinfo->sub_count * - sub_volume) -1 ; - } else { - if (pos < sub_volume_start || - pos >sub_volume_end) { - ret = -1; - snprintf(err_str, 2048,"Bricks" - " not from same subvol" - " for %s", vol_type); - gf_log ("",GF_LOG_ERROR, - "%s", err_str); - err_ret = 1; - goto out; - } - } - break; - } - pos++; - } - } - gf_cmd_log ("Volume remove-brick","volname: %s count:%d bricks:%s", - cli_req.volname, count, brick_list); - - ret = glusterd_remove_brick (req, dict); - - gf_cmd_log ("Volume remove-brick","on volname: %s %s",cli_req.volname, - (ret) ? "FAILED" : "SUCCESS"); + ret = glusterd_op_begin_synctask (req, GD_OP_SYNC_VOLUME, dict); out: - if ((ret || err_ret) && dict) - dict_unref (dict); - if (err_ret) { - rsp.op_ret = -1; - rsp.op_errno = 0; - rsp.volname = ""; - rsp.op_errstr = err_str; - cli_rsp = &rsp; - glusterd_submit_reply(req, cli_rsp, NULL, 0, NULL, - gf_xdr_serialize_cli_remove_brick_rsp); - if (!glusterd_opinfo_unlock()) - gf_log ("glusterd", GF_LOG_ERROR, "Unlock on " - "opinfo failed"); + if (ret) { + cli_rsp.op_ret = -1; + cli_rsp.op_errstr = msg; + if (msg[0] == '\0') + snprintf (msg, sizeof (msg), "Operation failed"); + glusterd_to_cli (req, &cli_rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp, dict); ret = 0; //sent error to cli, prevent second reply - } - if (brick_list) - GF_FREE (brick_list); - if (err_str) - GF_FREE (err_str); - if (cli_req.volname) - free (cli_req.volname); //its malloced by xdr + return ret; } int -glusterd_handle_log_filename (rpcsvc_request_t *req) +glusterd_handle_sync_volume (rpcsvc_request_t *req) { - int32_t ret = -1; - gf1_cli_log_filename_req cli_req = {0,}; - dict_t *dict = NULL; + return glusterd_big_locked_handler (req, __glusterd_handle_sync_volume); +} - GF_ASSERT (req); +int +glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret, + char *op_errstr, dict_t *dict) +{ - if (!gf_xdr_to_cli_log_filename_req (req->msg[0], &cli_req)) { - //failed to decode msg; - req->rpc_err = GARBAGE_ARGS; - goto out; - } + int ret = -1; + gf1_cli_fsm_log_rsp rsp = {0}; - gf_log ("glusterd", GF_LOG_NORMAL, "Received log filename req " - "for volume %s", cli_req.volname); + GF_ASSERT (req); + GF_ASSERT (op_errstr); - dict = dict_new (); - if (!dict) - goto out; + rsp.op_ret = op_ret; + rsp.op_errstr = op_errstr; + if (rsp.op_ret == 0) + ret = dict_allocate_and_serialize (dict, &rsp.fsm_log.fsm_log_val, + &rsp.fsm_log.fsm_log_len); - ret = dict_set_dynmstr (dict, "volname", cli_req.volname); - if (ret) - goto out; - ret = dict_set_dynmstr (dict, "brick", cli_req.brick); - if (ret) - goto out; - ret = dict_set_dynmstr (dict, "path", cli_req.path); - if (ret) - goto out; + ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf1_cli_fsm_log_rsp); + GF_FREE (rsp.fsm_log.fsm_log_val); - ret = glusterd_log_filename (req, dict); + gf_log ("glusterd", GF_LOG_DEBUG, "Responded, ret: %d", ret); -out: - if (ret && dict) - dict_unref (dict); - return ret; + return 0; } int -glusterd_handle_log_locate (rpcsvc_request_t *req) +__glusterd_handle_fsm_log (rpcsvc_request_t *req) { - int32_t ret = -1; - gf1_cli_log_locate_req cli_req = {0,}; - gf1_cli_log_locate_rsp rsp = {0,}; - glusterd_conf_t *priv = NULL; - glusterd_volinfo_t *volinfo = NULL; - glusterd_brickinfo_t *brickinfo = NULL; - char tmp_str[PATH_MAX] = {0,}; + int32_t ret = -1; + gf1_cli_fsm_log_req cli_req = {0,}; + dict_t *dict = NULL; + glusterd_sm_tr_log_t *log = NULL; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + char msg[2048] = {0}; + glusterd_peerinfo_t *peerinfo = NULL; GF_ASSERT (req); - priv = THIS->private; - - if (!gf_xdr_to_cli_log_locate_req (req->msg[0], &cli_req)) { + ret = xdr_to_generic (req->msg[0], &cli_req, + (xdrproc_t)xdr_gf1_cli_fsm_log_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; + snprintf (msg, sizeof (msg), "Garbage request"); goto out; } - gf_log ("glusterd", GF_LOG_NORMAL, "Received log locate req " - "for volume %s", cli_req.volname); - - if (strchr (cli_req.brick, ':')) { - /* TODO: need to get info of only that brick and then - tell what is the exact location */ - gf_log ("", GF_LOG_DEBUG, "brick : %s", cli_req.brick); + if (strcmp ("", cli_req.name) == 0) { + this = THIS; + conf = this->private; + log = &conf->op_sm_log; + } else { + ret = glusterd_friend_find_by_hostname (cli_req.name, + &peerinfo); + if (ret) { + snprintf (msg, sizeof (msg), "%s is not a peer", + cli_req.name); + goto out; + } + log = &peerinfo->sm_log; } - ret = glusterd_volinfo_find (cli_req.volname, &volinfo); - if (ret) { - rsp.path = "request sent on non-existent volume"; + dict = dict_new (); + if (!dict) { + ret = -1; goto out; } - list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { - if (brickinfo->logfile) { - strcpy (tmp_str, brickinfo->logfile); - rsp.path = dirname (tmp_str); - } else { - snprintf (tmp_str, PATH_MAX, "%s/logs/bricks/", - priv->workdir); - rsp.path = tmp_str; - } - break; - } - - ret = 0; + ret = glusterd_sm_tr_log_add_to_dict (dict, log); out: - rsp.op_ret = ret; - if (!rsp.path) - rsp.path = ""; + (void)glusterd_fsm_log_send_resp (req, ret, msg, dict); + free (cli_req.name);//malloced by xdr + if (dict) + dict_unref (dict); - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gf_xdr_serialize_cli_log_locate_rsp); + glusterd_friend_sm (); + glusterd_op_sm (); - if (cli_req.brick) - free (cli_req.brick); //its malloced by xdr - if (cli_req.volname) - free (cli_req.volname); //its malloced by xdr - return ret; + return 0;//send 0 to avoid double reply } int -glusterd_handle_log_rotate (rpcsvc_request_t *req) +glusterd_handle_fsm_log (rpcsvc_request_t *req) { - int32_t ret = -1; - gf1_cli_log_rotate_req cli_req = {0,}; - dict_t *dict = NULL; - - GF_ASSERT (req); - - if (!gf_xdr_to_cli_log_rotate_req (req->msg[0], &cli_req)) { - //failed to decode msg; - req->rpc_err = GARBAGE_ARGS; - goto out; - } - - gf_log ("glusterd", GF_LOG_NORMAL, "Received log rotate req " - "for volume %s", cli_req.volname); - - dict = dict_new (); - if (!dict) - goto out; - - ret = dict_set_dynmstr (dict, "volname", cli_req.volname); - if (ret) - goto out; - - ret = dict_set_dynmstr (dict, "brick", cli_req.brick); - if (ret) - goto out; - - ret = dict_set_uint64 (dict, "rotate-key", (uint64_t)time (NULL)); - if (ret) - goto out; - - ret = glusterd_log_rotate (req, dict); - -out: - if (ret && dict) - dict_unref (dict); - return ret; + return glusterd_big_locked_handler (req, __glusterd_handle_fsm_log); } int -glusterd_handle_sync_volume (rpcsvc_request_t *req) +glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status) { - int32_t ret = -1; - gf1_cli_sync_volume_req cli_req = {0,}; - dict_t *dict = NULL; - gf1_cli_sync_volume_rsp cli_rsp = {0.}; - char msg[2048] = {0,}; - gf_boolean_t free_hostname = _gf_true; - gf_boolean_t free_volname = _gf_true; - glusterd_volinfo_t *volinfo = NULL; - - GF_ASSERT (req); - if (!gf_xdr_to_cli_sync_volume_req (req->msg[0], &cli_req)) { - //failed to decode msg; - req->rpc_err = GARBAGE_ARGS; - goto out; - } - gf_log ("glusterd", GF_LOG_NORMAL, "Received volume sync req " - "for volume %s", - (cli_req.flags & GF_CLI_SYNC_ALL) ? "all" : cli_req.volname); + gd1_mgmt_cluster_lock_rsp rsp = {{0},}; + int ret = -1; - dict = dict_new (); - if (!dict) { - gf_log ("", GF_LOG_ERROR, "Can't allocate sync vol dict"); - goto out; - } + GF_ASSERT (req); + glusterd_get_uuid (&rsp.uuid); + rsp.op_ret = status; - if (!glusterd_is_local_addr (cli_req.hostname)) { - ret = -1; - snprintf (msg, sizeof (msg), "sync from localhost" - " not allowed"); - goto out; - } + ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp); - ret = dict_set_dynmstr (dict, "hostname", cli_req.hostname); - if (ret) { - gf_log ("", GF_LOG_ERROR, "hostname set failed"); - snprintf (msg, sizeof (msg), "hostname set failed"); - goto out; - } else { - free_hostname = _gf_false; - } + gf_log (THIS->name, GF_LOG_DEBUG, "Responded to lock, ret: %d", ret); - ret = dict_set_int32 (dict, "flags", cli_req.flags); - if (ret) { - gf_log ("", GF_LOG_ERROR, "volume flags set failed"); - snprintf (msg, sizeof (msg), "volume flags set failed"); - goto out; - } + return 0; +} - if (!cli_req.flags) { - ret = glusterd_volinfo_find (cli_req.volname, &volinfo); - if (!ret) { - snprintf (msg, sizeof (msg), "please delete the " - "volume: %s before sync", cli_req.volname); - ret = -1; - goto out; - } +int +glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status) +{ - ret = dict_set_dynmstr (dict, "volname", cli_req.volname); - if (ret) { - gf_log ("", GF_LOG_ERROR, "volume name set failed"); - snprintf (msg, sizeof (msg), "volume name set failed"); - goto out; - } else { - free_volname = _gf_false; - } - } else { - free_volname = _gf_false; - if (glusterd_volume_count_get ()) { - snprintf (msg, sizeof (msg), "please delete all the " - "volumes before full sync"); - ret = -1; - goto out; - } - } + gd1_mgmt_cluster_unlock_rsp rsp = {{0},}; + int ret = -1; - ret = glusterd_sync_volume (req, dict); + GF_ASSERT (req); + rsp.op_ret = status; + glusterd_get_uuid (&rsp.uuid); -out: - if (ret) { - cli_rsp.op_ret = -1; - cli_rsp.op_errstr = msg; - glusterd_submit_reply(req, &cli_rsp, NULL, 0, NULL, - gf_xdr_from_cli_sync_volume_rsp); - if (free_hostname && cli_req.hostname) - free (cli_req.hostname); - if (free_volname && cli_req.volname) - free (cli_req.volname); - if (dict) - dict_unref (dict); - if (!glusterd_opinfo_unlock()) - gf_log ("glusterd", GF_LOG_ERROR, "Unlock on " - "opinfo failed"); + ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp); - ret = 0; //sent error to cli, prevent second reply - } + gf_log (THIS->name, GF_LOG_DEBUG, "Responded to unlock, ret: %d", ret); return ret; } int -glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status) +glusterd_op_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id, + int32_t status) { - gd1_mgmt_cluster_lock_rsp rsp = {{0},}; - int ret = -1; + gd1_mgmt_v3_lock_rsp rsp = {{0},}; + int ret = -1; GF_ASSERT (req); + GF_ASSERT (txn_id); glusterd_get_uuid (&rsp.uuid); rsp.op_ret = status; + if (rsp.op_ret) + rsp.op_errno = errno; + uuid_copy (rsp.txn_id, *txn_id); ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gd_xdr_serialize_mgmt_cluster_lock_rsp); + (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp); - gf_log ("glusterd", GF_LOG_NORMAL, - "Responded, ret: %d", ret); + gf_log (THIS->name, GF_LOG_DEBUG, "Responded to mgmt_v3 lock, ret: %d", + ret); - return 0; + return ret; } int -glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status) +glusterd_op_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id, + int32_t status) { - gd1_mgmt_cluster_unlock_rsp rsp = {{0},}; + gd1_mgmt_v3_unlock_rsp rsp = {{0},}; int ret = -1; GF_ASSERT (req); + GF_ASSERT (txn_id); rsp.op_ret = status; + if (rsp.op_ret) + rsp.op_errno = errno; glusterd_get_uuid (&rsp.uuid); + uuid_copy (rsp.txn_id, *txn_id); ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gd_xdr_serialize_mgmt_cluster_unlock_rsp); + (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp); - gf_log ("glusterd", GF_LOG_NORMAL, - "Responded to unlock, ret: %d", ret); + gf_log (THIS->name, GF_LOG_DEBUG, + "Responded to mgmt_v3 unlock, ret: %d", + ret); return ret; } int -glusterd_handle_cluster_unlock (rpcsvc_request_t *req) +__glusterd_handle_cluster_unlock (rpcsvc_request_t *req) { gd1_mgmt_cluster_unlock_req unlock_req = {{0}, }; int32_t ret = -1; - char str[50] = {0, }; glusterd_op_lock_ctx_t *ctx = NULL; + glusterd_peerinfo_t *peerinfo = NULL; + xlator_t *this = NULL; + uuid_t *txn_id = &global_txn_id; + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); - if (!gd_xdr_to_mgmt_cluster_unlock_req (req->msg[0], &unlock_req)) { - //failed to decode msg; + ret = xdr_to_generic (req->msg[0], &unlock_req, + (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock " + "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } - uuid_unparse (unlock_req.uuid, str); - gf_log ("glusterd", GF_LOG_NORMAL, - "Received UNLOCK from uuid: %s", str); + gf_log (this->name, GF_LOG_DEBUG, + "Received UNLOCK from uuid: %s", uuid_utoa (unlock_req.uuid)); + + if (glusterd_friend_find_by_uuid (unlock_req.uuid, &peerinfo)) { + gf_log (this->name, GF_LOG_WARNING, "%s doesn't " + "belong to the cluster. Ignoring request.", + uuid_utoa (unlock_req.uuid)); + ret = -1; + goto out; + } ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t); @@ -2223,22 +2147,37 @@ glusterd_handle_cluster_unlock (rpcsvc_request_t *req) } uuid_copy (ctx->uuid, unlock_req.uuid); ctx->req = req; + ctx->dict = NULL; - ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, ctx); + ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, txn_id, ctx); out: + glusterd_friend_sm (); + glusterd_op_sm (); + return ret; } int -glusterd_op_stage_send_resp (rpcsvc_request_t *req, - int32_t op, int32_t status, char *op_errstr) +glusterd_handle_cluster_unlock (rpcsvc_request_t *req) { + return glusterd_big_locked_handler (req, + __glusterd_handle_cluster_unlock); +} - gd1_mgmt_stage_op_rsp rsp = {{0},}; - int ret = -1; +int +glusterd_op_stage_send_resp (rpcsvc_request_t *req, + int32_t op, int32_t status, + char *op_errstr, dict_t *rsp_dict) +{ + gd1_mgmt_stage_op_rsp rsp = {{0},}; + int ret = -1; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); + rsp.op_ret = status; glusterd_get_uuid (&rsp.uuid); rsp.op = op; @@ -2247,11 +2186,19 @@ glusterd_op_stage_send_resp (rpcsvc_request_t *req, else rsp.op_errstr = ""; + ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val, + &rsp.dict.dict_len); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, + "failed to get serialized length of dict"); + return ret; + } + ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gd_xdr_serialize_mgmt_stage_op_rsp); + (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp); - gf_log ("glusterd", GF_LOG_NORMAL, - "Responded to stage, ret: %d", ret); + gf_log (this->name, GF_LOG_DEBUG, "Responded to stage, ret: %d", ret); + GF_FREE (rsp.dict.dict_val); return ret; } @@ -2263,7 +2210,10 @@ glusterd_op_commit_send_resp (rpcsvc_request_t *req, { gd1_mgmt_commit_op_rsp rsp = {{0}, }; int ret = -1; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT (this); GF_ASSERT (req); rsp.op_ret = status; glusterd_get_uuid (&rsp.uuid); @@ -2274,110 +2224,169 @@ glusterd_op_commit_send_resp (rpcsvc_request_t *req, else rsp.op_errstr = ""; - ret = dict_allocate_and_serialize (rsp_dict, - &rsp.dict.dict_val, - (size_t *)&rsp.dict.dict_len); - if (ret < 0) { - gf_log ("", GF_LOG_DEBUG, - "failed to get serialized length of dict"); - goto out; + if (rsp_dict) { + ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val, + &rsp.dict.dict_len); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, + "failed to get serialized length of dict"); + goto out; + } } ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gd_xdr_serialize_mgmt_commit_op_rsp); + (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp); - gf_log ("glusterd", GF_LOG_NORMAL, - "Responded to commit, ret: %d", ret); + gf_log (this->name, GF_LOG_DEBUG, "Responded to commit, ret: %d", ret); out: - if (rsp.dict.dict_val) - GF_FREE (rsp.dict.dict_val); + GF_FREE (rsp.dict.dict_val); return ret; } int -glusterd_handle_incoming_friend_req (rpcsvc_request_t *req) +__glusterd_handle_incoming_friend_req (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_friend_req friend_req = {{0},}; - char str[50] = {0,}; - dict_t *dict = NULL; + gf_boolean_t run_fsm = _gf_true; GF_ASSERT (req); - if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &friend_req)) { + ret = xdr_to_generic (req->msg[0], &friend_req, + (xdrproc_t)xdr_gd1_mgmt_friend_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - uuid_unparse (friend_req.uuid, str); - - gf_log ("glusterd", GF_LOG_NORMAL, - "Received probe from uuid: %s", str); - - dict = dict_new (); - if (!dict) { - ret = -1; - goto out; - } - - ret = dict_unserialize (friend_req.vols.vols_val, - friend_req.vols.vols_len, - &dict); - - if (ret) - goto out; - else - dict->extra_stdfree = friend_req.vols.vols_val; + gf_log ("glusterd", GF_LOG_INFO, + "Received probe from uuid: %s", uuid_utoa (friend_req.uuid)); ret = glusterd_handle_friend_req (req, friend_req.uuid, friend_req.hostname, friend_req.port, - dict); + &friend_req); + + if (ret == GLUSTERD_CONNECTION_AWAITED) { + //fsm should be run after connection establishes + run_fsm = _gf_false; + ret = 0; + } out: - if (ret && dict) - dict_unref (dict); - if (friend_req.hostname) - free (friend_req.hostname);//malloced by xdr + free (friend_req.hostname);//malloced by xdr + + if (run_fsm) { + glusterd_friend_sm (); + glusterd_op_sm (); + } return ret; } int -glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req) +glusterd_handle_incoming_friend_req (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_incoming_friend_req); +} + +int +__glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_friend_req friend_req = {{0},}; - char str[50]; + char remote_hostname[UNIX_PATH_MAX + 1] = {0,}; GF_ASSERT (req); - if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &friend_req)) { + ret = xdr_to_generic (req->msg[0], &friend_req, + (xdrproc_t)xdr_gd1_mgmt_friend_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - uuid_unparse (friend_req.uuid, str); - gf_log ("glusterd", GF_LOG_NORMAL, - "Received unfriend from uuid: %s", str); + gf_log ("glusterd", GF_LOG_INFO, + "Received unfriend from uuid: %s", uuid_utoa (friend_req.uuid)); + ret = glusterd_remote_hostname_get (req, remote_hostname, + sizeof (remote_hostname)); + if (ret) { + gf_log ("", GF_LOG_ERROR, "Unable to get the remote hostname"); + goto out; + } ret = glusterd_handle_unfriend_req (req, friend_req.uuid, - friend_req.hostname, friend_req.port); + remote_hostname, friend_req.port); out: - if (friend_req.hostname) - free (friend_req.hostname);//malloced by xdr - if (friend_req.vols.vols_val) - free (friend_req.vols.vols_val);//malloced by xdr + free (friend_req.hostname);//malloced by xdr + free (friend_req.vols.vols_val);//malloced by xdr + + glusterd_friend_sm (); + glusterd_op_sm (); + return ret; } int -glusterd_handle_friend_update (rpcsvc_request_t *req) +glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_incoming_unfriend_req); + +} + +int +glusterd_handle_friend_update_delete (dict_t *dict) +{ + char *hostname = NULL; + int32_t ret = -1; + + GF_ASSERT (dict); + + ret = dict_get_str (dict, "hostname", &hostname); + if (ret) + goto out; + + ret = glusterd_friend_remove (NULL, hostname); + +out: + gf_log ("", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int +glusterd_friend_hostname_update (glusterd_peerinfo_t *peerinfo, + char *hostname, + gf_boolean_t store_update) +{ + char *new_hostname = NULL; + int ret = 0; + + GF_ASSERT (peerinfo); + GF_ASSERT (hostname); + + new_hostname = gf_strdup (hostname); + if (!new_hostname) { + ret = -1; + goto out; + } + + GF_FREE (peerinfo->hostname); + peerinfo->hostname = new_hostname; + if (store_update) + ret = glusterd_store_peerinfo (peerinfo); +out: + gf_log ("", GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + +int +__glusterd_handle_friend_update (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_friend_update friend_req = {{0},}; - char str[50] = {0,}; glusterd_peerinfo_t *peerinfo = NULL; glusterd_conf_t *priv = NULL; xlator_t *this = NULL; @@ -2390,6 +2399,8 @@ glusterd_handle_friend_update (rpcsvc_request_t *req) int i = 1; int count = 0; uuid_t uuid = {0,}; + glusterd_peerctx_args_t args = {0}; + int32_t op = 0; GF_ASSERT (req); @@ -2398,15 +2409,22 @@ glusterd_handle_friend_update (rpcsvc_request_t *req) priv = this->private; GF_ASSERT (priv); - if (!gd_xdr_to_mgmt_friend_update (req->msg[0], &friend_req)) { + ret = xdr_to_generic (req->msg[0], &friend_req, + (xdrproc_t)xdr_gd1_mgmt_friend_update); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - uuid_unparse (friend_req.uuid, str); - gf_log ("glusterd", GF_LOG_NORMAL, - "Received friend update from uuid: %s", str); + ret = glusterd_friend_find (friend_req.uuid, NULL, &tmp); + if (ret) { + gf_log ("", GF_LOG_CRITICAL, "Received friend update request " + "from unknown peer %s", uuid_utoa (friend_req.uuid)); + goto out; + } + gf_log ("glusterd", GF_LOG_INFO, + "Received friend update from uuid: %s", uuid_utoa (friend_req.uuid)); if (friend_req.friends.friends_len) { /* Unserialize the dictionary */ @@ -2429,6 +2447,16 @@ glusterd_handle_friend_update (rpcsvc_request_t *req) if (ret) goto out; + ret = dict_get_int32 (dict, "op", &op); + if (ret) + goto out; + + if (GD_FRIEND_UPDATE_DEL == op) { + ret = glusterd_handle_friend_update_delete (dict); + goto out; + } + + args.mode = GD_MODE_ON; while ( i <= count) { snprintf (key, sizeof (key), "friend%d.uuid", i); ret = dict_get_str (dict, key, &uuid_buf); @@ -2440,11 +2468,17 @@ glusterd_handle_friend_update (rpcsvc_request_t *req) if (ret) goto out; - gf_log ("", GF_LOG_NORMAL, "Received uuid: %s, hostname:%s", + gf_log ("", GF_LOG_INFO, "Received uuid: %s, hostname:%s", uuid_buf, hostname); - if (!uuid_compare (uuid, priv->uuid)) { - gf_log ("", GF_LOG_NORMAL, "Received my uuid as Friend"); + if (uuid_is_null (uuid)) { + gf_log (this->name, GF_LOG_WARNING, "Updates mustn't " + "contain peer with 'null' uuid"); + continue; + } + + if (!uuid_compare (uuid, MY_UUID)) { + gf_log ("", GF_LOG_INFO, "Received my uuid as Friend"); i++; continue; } @@ -2452,744 +2486,1025 @@ glusterd_handle_friend_update (rpcsvc_request_t *req) ret = glusterd_friend_find (uuid, hostname, &tmp); if (!ret) { + if (strcmp (hostname, tmp->hostname) != 0) { + glusterd_friend_hostname_update (tmp, hostname, + _gf_true); + } i++; continue; } ret = glusterd_friend_add (hostname, friend_req.port, GD_FRIEND_STATE_BEFRIENDED, - &uuid, NULL, &peerinfo, 0); + &uuid, &peerinfo, 0, &args); i++; } out: - uuid_copy (rsp.uuid, priv->uuid); + uuid_copy (rsp.uuid, MY_UUID); ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gd_xdr_serialize_mgmt_friend_update_rsp); - if (dict) + (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp); + if (dict) { + if (!dict->extra_stdfree && friend_req.friends.friends_val) + free (friend_req.friends.friends_val);//malloced by xdr dict_unref (dict); + } else { + free (friend_req.friends.friends_val);//malloced by xdr + } + + glusterd_friend_sm (); + glusterd_op_sm (); + return ret; } int -glusterd_handle_probe_query (rpcsvc_request_t *req) +glusterd_handle_friend_update (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_friend_update); +} + +int +__glusterd_handle_probe_query (rpcsvc_request_t *req) { - int32_t ret = -1; - char str[50]; - xlator_t *this = NULL; - glusterd_conf_t *conf = NULL; - gd1_mgmt_probe_req probe_req = {{0},}; - gd1_mgmt_probe_rsp rsp = {{0},}; - glusterd_peer_hostname_t *name = NULL; + int32_t ret = -1; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + gd1_mgmt_probe_req probe_req = {{0},}; + gd1_mgmt_probe_rsp rsp = {{0},}; glusterd_peerinfo_t *peerinfo = NULL; + glusterd_peerctx_args_t args = {0}; + int port = 0; char remote_hostname[UNIX_PATH_MAX + 1] = {0,}; GF_ASSERT (req); - if (!gd_xdr_to_mgmt_probe_req (req->msg[0], &probe_req)) { + ret = xdr_to_generic (req->msg[0], &probe_req, + (xdrproc_t)xdr_gd1_mgmt_probe_req); + if (ret < 0) { //failed to decode msg; req->rpc_err = GARBAGE_ARGS; goto out; } - this = THIS; conf = this->private; - uuid_unparse (probe_req.uuid, str); - - gf_log ("glusterd", GF_LOG_NORMAL, - "Received probe from uuid: %s", str); + if (probe_req.port) + port = probe_req.port; + else + port = GF_DEFAULT_BASE_PORT; + + gf_log ("glusterd", GF_LOG_INFO, + "Received probe from uuid: %s", uuid_utoa (probe_req.uuid)); + + /* Check for uuid collision and handle it in a user friendly way by + * sending the error. + */ + if (!uuid_compare (probe_req.uuid, MY_UUID)) { + gf_log (THIS->name, GF_LOG_ERROR, "Peer uuid %s is same as " + "local uuid. Please check the uuid of both the peers " + "from %s/%s", uuid_utoa (probe_req.uuid), + GLUSTERD_DEFAULT_WORKDIR, GLUSTERD_INFO_FILE); + rsp.op_ret = -1; + rsp.op_errno = GF_PROBE_SAME_UUID; + rsp.port = port; + goto respond; + } ret = glusterd_remote_hostname_get (req, remote_hostname, sizeof (remote_hostname)); if (ret) { - GF_ASSERT (0); + gf_log ("", GF_LOG_ERROR, "Unable to get the remote hostname"); goto out; } ret = glusterd_friend_find (probe_req.uuid, remote_hostname, &peerinfo); - if ((ret == 0 ) || list_empty (&conf->peers)) { - ret = glusterd_peer_hostname_new (probe_req.hostname, &name); - - if (ret) { - gf_log ("", GF_LOG_ERROR, "Unable to get new peer_hostname"); - } else { - list_add_tail (&name->hostname_list, &conf->hostnames); - } - - } else { + if ((ret != 0 ) && (!list_empty (&conf->peers))) { rsp.op_ret = -1; rsp.op_errno = GF_PROBE_ANOTHER_CLUSTER; + } else if (ret) { + gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo" + " for host: %s (%d)", remote_hostname, port); + args.mode = GD_MODE_ON; + ret = glusterd_friend_add (remote_hostname, port, + GD_FRIEND_STATE_PROBE_RCVD, + NULL, &peerinfo, 0, &args); + if (ret) { + gf_log ("", GF_LOG_ERROR, "Failed to add peer %s", + remote_hostname); + rsp.op_errno = GF_PROBE_ADD_FAILED; + } } - uuid_copy (rsp.uuid, conf->uuid); +respond: + uuid_copy (rsp.uuid, MY_UUID); rsp.hostname = probe_req.hostname; + rsp.op_errstr = ""; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gd_xdr_serialize_mgmt_probe_rsp); + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_probe_rsp); + ret = 0; - gf_log ("glusterd", GF_LOG_NORMAL, "Responded to %s, op_ret: %d, " - "op_errno: %d, ret: %d", probe_req.hostname, + gf_log ("glusterd", GF_LOG_INFO, "Responded to %s, op_ret: %d, " + "op_errno: %d, ret: %d", remote_hostname, rsp.op_ret, rsp.op_errno, ret); out: - if (probe_req.hostname) - free (probe_req.hostname);//malloced by xdr + free (probe_req.hostname);//malloced by xdr + + glusterd_friend_sm (); + glusterd_op_sm (); + return ret; } -int -glusterd_friend_remove (uuid_t uuid, char *hostname) +int glusterd_handle_probe_query (rpcsvc_request_t *req) { - int ret = 0; - glusterd_peerinfo_t *peerinfo = NULL; - - ret = glusterd_friend_find (uuid, hostname, &peerinfo); - if (ret) - goto out; - - ret = glusterd_friend_cleanup (peerinfo); -out: - gf_log ("", GF_LOG_DEBUG, "returning %d"); - return ret; + return glusterd_big_locked_handler (req, __glusterd_handle_probe_query); } int -glusterd_friend_add (const char *hoststr, int port, - glusterd_friend_sm_state_t state, - uuid_t *uuid, - struct rpc_clnt *rpc, - glusterd_peerinfo_t **friend, - gf_boolean_t restore) +__glusterd_handle_cli_profile_volume (rpcsvc_request_t *req) { - int ret = 0; - glusterd_conf_t *priv = NULL; - glusterd_peerinfo_t *peerinfo = NULL; - dict_t *options = NULL; - struct rpc_clnt_config rpc_cfg = {0,}; - glusterd_peer_hostname_t *name = NULL; - char *hostname = NULL; - - priv = THIS->private; - - peerinfo = GF_CALLOC (1, sizeof(*peerinfo), gf_gld_mt_peerinfo_t); + int32_t ret = -1; + gf_cli_req cli_req = {{0,}}; + dict_t *dict = NULL; + glusterd_op_t cli_op = GD_OP_PROFILE_VOLUME; + char *volname = NULL; + int32_t op = 0; + char err_str[2048] = {0,}; + xlator_t *this = NULL; - if (!peerinfo) - return -1; + GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); - if (friend) - *friend = peerinfo; + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } - INIT_LIST_HEAD (&peerinfo->hostnames); - peerinfo->state.state = state; - if (hoststr) { - ret = glusterd_peer_hostname_new ((char *)hoststr, &name); - if (ret) + if (cli_req.dict.dict_len > 0) { + dict = dict_new(); + if (!dict) goto out; - list_add_tail (&peerinfo->hostnames, &name->hostname_list); - rpc_cfg.remote_host = gf_strdup (hoststr); - peerinfo->hostname = gf_strdup (hoststr); + dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, &dict); } - INIT_LIST_HEAD (&peerinfo->uuid_list); - list_add_tail (&peerinfo->uuid_list, &priv->peers); - - if (uuid) { - uuid_copy (peerinfo->uuid, *uuid); + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + snprintf (err_str, sizeof (err_str), "Unable to get volume " + "name"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); + goto out; } + gf_log (this->name, GF_LOG_INFO, "Received volume profile req " + "for volume %s", volname); + ret = dict_get_int32 (dict, "op", &op); + if (ret) { + snprintf (err_str, sizeof (err_str), "Unable to get operation"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); + goto out; + } - if (hoststr) { - options = dict_new (); - if (!options) - return -1; - - hostname = gf_strdup((char*)hoststr); - if (!hostname) { - ret = -1; - goto out; - } - - ret = dict_set_dynstr (options, "remote-host", hostname); - if (ret) - goto out; + ret = glusterd_op_begin (req, cli_op, dict, err_str, sizeof (err_str)); +out: + glusterd_friend_sm (); + glusterd_op_sm (); - if (!port) - port = GLUSTERD_DEFAULT_PORT; + free (cli_req.dict.dict_val); - rpc_cfg.remote_port = port; + if (ret) { + if (err_str[0] == '\0') + snprintf (err_str, sizeof (err_str), + "Operation failed"); + ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, + dict, err_str); + } - ret = dict_set_int32 (options, "remote-port", port); - if (ret) - goto out; + gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} - ret = dict_set_str (options, "transport.address-family", "inet"); - if (ret) - goto out; +int +glusterd_handle_cli_profile_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_profile_volume); +} - rpc = rpc_clnt_init (&rpc_cfg, options, THIS->ctx, THIS->name); +int +__glusterd_handle_getwd (rpcsvc_request_t *req) +{ + int32_t ret = -1; + gf1_cli_getwd_rsp rsp = {0,}; + glusterd_conf_t *priv = NULL; - if (!rpc) { - gf_log ("glusterd", GF_LOG_ERROR, - "rpc init failed for peer: %s!", hoststr); - ret = -1; - goto out; - } + GF_ASSERT (req); - ret = rpc_clnt_register_notify (rpc, glusterd_rpc_notify, - peerinfo); + priv = THIS->private; + GF_ASSERT (priv); - peerinfo->rpc = rpc; + gf_log ("glusterd", GF_LOG_INFO, "Received getwd req"); - } + rsp.wd = priv->workdir; - if (!restore) - ret = glusterd_store_update_peerinfo (peerinfo); + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf1_cli_getwd_rsp); + ret = 0; + glusterd_friend_sm (); + glusterd_op_sm (); -out: - gf_log ("glusterd", GF_LOG_NORMAL, "connect returned %d", ret); - if (rpc_cfg.remote_host) - GF_FREE (rpc_cfg.remote_host); return ret; } - - int -glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port) +glusterd_handle_getwd (rpcsvc_request_t *req) { - int ret = -1; - glusterd_peerinfo_t *peerinfo = NULL; - glusterd_friend_sm_event_t *event = NULL; - glusterd_probe_ctx_t *ctx = NULL; + return glusterd_big_locked_handler (req, __glusterd_handle_getwd); +} - GF_ASSERT (hoststr); +int +__glusterd_handle_mount (rpcsvc_request_t *req) +{ + gf1_cli_mount_req mnt_req = {0,}; + gf1_cli_mount_rsp rsp = {0,}; + dict_t *dict = NULL; + int ret = 0; + glusterd_conf_t *priv = NULL; - ret = glusterd_friend_find (NULL, (char *)hoststr, &peerinfo); + GF_ASSERT (req); + priv = THIS->private; - if (ret) { - gf_log ("glusterd", GF_LOG_NORMAL, "Unable to find peerinfo" - " for host: %s (%d)", hoststr, port); - ret = glusterd_friend_add ((char *)hoststr, port, - GD_FRIEND_STATE_DEFAULT, - NULL, NULL, &peerinfo, 0); + ret = xdr_to_generic (req->msg[0], &mnt_req, + (xdrproc_t)xdr_gf1_cli_mount_req); + if (ret < 0) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + rsp.op_ret = -1; + rsp.op_errno = EINVAL; + goto out; } - ret = glusterd_friend_sm_new_event - (GD_FRIEND_EVENT_PROBE, &event); - - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, "Unable to get new event"); - return ret; - } + gf_log ("glusterd", GF_LOG_INFO, "Received mount req"); - ctx = GF_CALLOC (1, sizeof(*ctx), gf_gld_mt_probe_ctx_t); + if (mnt_req.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new (); - if (!ctx) { - return ret; + ret = dict_unserialize (mnt_req.dict.dict_val, + mnt_req.dict.dict_len, + &dict); + if (ret < 0) { + gf_log ("glusterd", GF_LOG_ERROR, + "failed to " + "unserialize req-buffer to dictionary"); + rsp.op_ret = -1; + rsp.op_errno = -EINVAL; + goto out; + } else { + dict->extra_stdfree = mnt_req.dict.dict_val; + } } - ctx->hostname = gf_strdup (hoststr); - ctx->port = port; - ctx->req = req; + synclock_unlock (&priv->big_lock); + rsp.op_ret = glusterd_do_mount (mnt_req.label, dict, + &rsp.path, &rsp.op_errno); + synclock_lock (&priv->big_lock); - event->peerinfo = peerinfo; - event->ctx = ctx; - - ret = glusterd_friend_sm_inject_event (event); + out: + if (!rsp.path) + rsp.path = ""; - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, "Unable to inject event %d, " - "ret = %d", event->event, ret); - return ret; - } + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf1_cli_mount_rsp); + ret = 0; - if (!peerinfo->connected) { - return GLUSTERD_CONNECTION_AWAITED; - } + if (dict) + dict_unref (dict); + if (*rsp.path) + GF_FREE (rsp.path); + glusterd_friend_sm (); + glusterd_op_sm (); return ret; } int -glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port) +glusterd_handle_mount (rpcsvc_request_t *req) { - int ret = -1; - glusterd_peerinfo_t *peerinfo = NULL; - glusterd_friend_sm_event_t *event = NULL; - glusterd_probe_ctx_t *ctx = NULL; + return glusterd_big_locked_handler (req, __glusterd_handle_mount); +} - GF_ASSERT (hoststr); - GF_ASSERT (req); +int +__glusterd_handle_umount (rpcsvc_request_t *req) +{ + gf1_cli_umount_req umnt_req = {0,}; + gf1_cli_umount_rsp rsp = {0,}; + char *mountbroker_root = NULL; + char mntp[PATH_MAX] = {0,}; + char *path = NULL; + runner_t runner = {0,}; + int ret = 0; + xlator_t *this = THIS; + gf_boolean_t dir_ok = _gf_false; + char *pdir = NULL; + char *t = NULL; + glusterd_conf_t *priv = NULL; - ret = glusterd_friend_find (NULL, (char *)hoststr, &peerinfo); + GF_ASSERT (req); + GF_ASSERT (this); + priv = this->private; - if (ret) { - gf_log ("glusterd", GF_LOG_NORMAL, "Unable to find peerinfo" - " for host: %s %d", hoststr, port); + ret = xdr_to_generic (req->msg[0], &umnt_req, + (xdrproc_t)xdr_gf1_cli_umount_req); + if (ret < 0) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + rsp.op_ret = -1; goto out; } - if (!peerinfo->rpc) { - //handle this case + gf_log ("glusterd", GF_LOG_INFO, "Received umount req"); + + if (dict_get_str (this->options, "mountbroker-root", + &mountbroker_root) != 0) { + rsp.op_errno = ENOENT; goto out; } - ret = glusterd_friend_sm_new_event - (GD_FRIEND_EVENT_INIT_REMOVE_FRIEND, &event); - - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, - "Unable to get new event"); - return ret; + /* check if it is allowed to umount path */ + path = gf_strdup (umnt_req.path); + if (!path) { + rsp.op_errno = ENOMEM; + goto out; } - - ctx = GF_CALLOC (1, sizeof(*ctx), gf_gld_mt_probe_ctx_t); - - if (!ctx) { + dir_ok = _gf_false; + pdir = dirname (path); + t = strtail (pdir, mountbroker_root); + if (t && *t == '/') { + t = strtail(++t, MB_HIVE); + if (t && !*t) + dir_ok = _gf_true; + } + GF_FREE (path); + if (!dir_ok) { + rsp.op_errno = EACCES; goto out; } - ctx->hostname = gf_strdup (hoststr); - ctx->port = port; - ctx->req = req; - - event->ctx = ctx; + runinit (&runner); + runner_add_args (&runner, "umount", umnt_req.path, NULL); + if (umnt_req.lazy) + runner_add_arg (&runner, "-l"); + synclock_unlock (&priv->big_lock); + rsp.op_ret = runner_run (&runner); + synclock_lock (&priv->big_lock); + if (rsp.op_ret == 0) { + if (realpath (umnt_req.path, mntp)) + rmdir (mntp); + else { + rsp.op_ret = -1; + rsp.op_errno = errno; + } + if (unlink (umnt_req.path) != 0) { + rsp.op_ret = -1; + rsp.op_errno = errno; + } + } - event->peerinfo = peerinfo; + out: + if (rsp.op_errno) + rsp.op_ret = -1; - ret = glusterd_friend_sm_inject_event (event); + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf1_cli_umount_rsp); + ret = 0; - if (ret) { - gf_log ("glusterd", GF_LOG_ERROR, "Unable to inject event %d, " - "ret = %d", event->event, ret); - goto out; - } + glusterd_friend_sm (); + glusterd_op_sm (); -out: return ret; } - int -glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int port) +glusterd_handle_umount (rpcsvc_request_t *req) { - gd1_mgmt_friend_rsp rsp = {{0}, }; - int32_t ret = -1; - xlator_t *this = NULL; - glusterd_conf_t *conf = NULL; - - GF_ASSERT (hostname); - - rsp.op_ret = 0; - this = THIS; - GF_ASSERT (this); - - conf = this->private; + return glusterd_big_locked_handler (req, __glusterd_handle_umount); +} - uuid_copy (rsp.uuid, conf->uuid); - rsp.hostname = hostname; - rsp.port = port; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gd_xdr_serialize_mgmt_friend_rsp); +int +glusterd_friend_remove (uuid_t uuid, char *hostname) +{ + int ret = 0; + glusterd_peerinfo_t *peerinfo = NULL; + ret = glusterd_friend_find (uuid, hostname, &peerinfo); + if (ret) + goto out; - gf_log ("glusterd", GF_LOG_NORMAL, - "Responded to %s (%d), ret: %d", hostname, port, ret); + ret = glusterd_friend_remove_cleanup_vols (peerinfo->uuid); + if (ret) + gf_log (THIS->name, GF_LOG_WARNING, "Volumes cleanup failed"); + ret = glusterd_friend_cleanup (peerinfo); +out: + gf_log ("", GF_LOG_DEBUG, "returning %d", ret); return ret; } int -glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *hostname, int port, - int32_t op_ret, int32_t op_errno) +glusterd_rpc_create (struct rpc_clnt **rpc, + dict_t *options, + rpc_clnt_notify_t notify_fn, + void *notify_data) { - gd1_mgmt_friend_rsp rsp = {{0}, }; - int32_t ret = -1; - xlator_t *this = NULL; - glusterd_conf_t *conf = NULL; - - GF_ASSERT (hostname); + struct rpc_clnt *new_rpc = NULL; + int ret = -1; + xlator_t *this = NULL; this = THIS; GF_ASSERT (this); - conf = this->private; + GF_ASSERT (options); - uuid_copy (rsp.uuid, conf->uuid); - rsp.op_ret = op_ret; - rsp.op_errno = op_errno; - rsp.hostname = gf_strdup (hostname); - rsp.port = port; + /* TODO: is 32 enough? or more ? */ + new_rpc = rpc_clnt_new (options, this->ctx, this->name, 16); + if (!new_rpc) + goto out; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gd_xdr_serialize_mgmt_friend_rsp); + ret = rpc_clnt_register_notify (new_rpc, notify_fn, notify_data); + *rpc = new_rpc; + if (ret) + goto out; + ret = rpc_clnt_start (new_rpc); +out: + if (ret) { + if (new_rpc) { + (void) rpc_clnt_unref (new_rpc); + } + } - gf_log ("glusterd", GF_LOG_NORMAL, - "Responded to %s (%d), ret: %d", hostname, port, ret); - if (rsp.hostname) - GF_FREE (rsp.hostname) + gf_log (this->name, GF_LOG_DEBUG, "returning %d", ret); return ret; } int -glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret, - int32_t op_errno, char *hostname, int port) +glusterd_transport_keepalive_options_get (int *interval, int *time) { - gf1_cli_probe_rsp rsp = {0, }; - int32_t ret = -1; - - GF_ASSERT (req); - - rsp.op_ret = op_ret; - rsp.op_errno = op_errno; - rsp.hostname = hostname; - rsp.port = port; + int ret = 0; + xlator_t *this = NULL; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gf_xdr_serialize_cli_probe_rsp); - - gf_log ("glusterd", GF_LOG_NORMAL, "Responded to CLI, ret: %d",ret); + this = THIS; + GF_ASSERT (this); - return ret; + ret = dict_get_int32 (this->options, + "transport.socket.keepalive-interval", + interval); + ret = dict_get_int32 (this->options, + "transport.socket.keepalive-time", + time); + return 0; } int -glusterd_xfer_cli_deprobe_resp (rpcsvc_request_t *req, int32_t op_ret, - int32_t op_errno, char *hostname) +glusterd_transport_inet_options_build (dict_t **options, const char *hostname, + int port) { - gf1_cli_deprobe_rsp rsp = {0, }; - int32_t ret = -1; - - GF_ASSERT (req); - - rsp.op_ret = op_ret; - rsp.op_errno = op_errno; - rsp.hostname = hostname; - - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gf_xdr_serialize_cli_deprobe_rsp); - - gf_log ("glusterd", GF_LOG_NORMAL, "Responded to CLI, ret: %d",ret); + dict_t *dict = NULL; + int32_t interval = -1; + int32_t time = -1; + int ret = 0; - return ret; -} -int32_t -glusterd_op_txn_begin () -{ - int32_t ret = -1; - glusterd_conf_t *priv = NULL; - int32_t locked = 0; + GF_ASSERT (options); + GF_ASSERT (hostname); - priv = THIS->private; - GF_ASSERT (priv); + if (!port) + port = GLUSTERD_DEFAULT_PORT; - ret = glusterd_lock (priv->uuid); + /* Build default transport options */ + ret = rpc_transport_inet_options_build (&dict, hostname, port); + if (ret) + goto out; + /* Set frame-timeout to 10mins. Default timeout of 30 mins is too long + * when compared to 2 mins for cli timeout. This ensures users don't + * wait too long after cli timesout before being able to resume normal + * operations + */ + ret = dict_set_int32 (dict, "frame-timeout", 600); if (ret) { gf_log ("glusterd", GF_LOG_ERROR, - "Unable to acquire local lock, ret: %d", ret); + "Failed to set frame-timeout"); goto out; } - locked = 1; - gf_log ("glusterd", GF_LOG_NORMAL, "Acquired local lock"); - - ret = glusterd_op_sm_inject_event (GD_OP_EVENT_START_LOCK, NULL); - - gf_log ("glusterd", GF_LOG_NORMAL, "Returning %d", ret); + /* Set keepalive options */ + glusterd_transport_keepalive_options_get (&interval, &time); + if ((interval > 0) || (time > 0)) + ret = rpc_transport_keepalive_options_set (dict, interval, time); + *options = dict; out: - if (locked && ret) - glusterd_unlock (priv->uuid); + gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret); return ret; } -int32_t -glusterd_create_volume (rpcsvc_request_t *req, dict_t *dict) +int +glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo, + glusterd_peerctx_args_t *args) { - int32_t ret = -1; - data_t *data = NULL; + dict_t *options = NULL; + int ret = -1; + glusterd_peerctx_t *peerctx = NULL; + data_t *data = NULL; - GF_ASSERT (req); - GF_ASSERT (dict); - - glusterd_op_set_op (GD_OP_CREATE_VOLUME); - - glusterd_op_set_ctx (GD_OP_CREATE_VOLUME, dict); + peerctx = GF_CALLOC (1, sizeof (*peerctx), gf_gld_mt_peerctx_t); + if (!peerctx) + goto out; - glusterd_op_set_ctx_free (GD_OP_CREATE_VOLUME, _gf_true); + if (args) + peerctx->args = *args; - glusterd_op_set_req (req); + peerctx->peerinfo = peerinfo; - data = dict_get (dict, "volname"); - if (!data) + ret = glusterd_transport_inet_options_build (&options, + peerinfo->hostname, + peerinfo->port); + if (ret) goto out; - data = dict_get (dict, "type"); - if (!data) - goto out; + /* + * For simulated multi-node testing, we need to make sure that we + * create our RPC endpoint with the same address that the peer would + * use to reach us. + */ + if (this->options) { + data = dict_get(this->options,"transport.socket.bind-address"); + if (data) { + ret = dict_set(options, + "transport.socket.source-addr",data); + } + } - data = dict_get (dict, "count"); - if (!data) + ret = glusterd_rpc_create (&peerinfo->rpc, options, + glusterd_peer_rpc_notify, peerctx); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "failed to create rpc for" + " peer %s", peerinfo->hostname); goto out; + } + peerctx = NULL; + ret = 0; +out: + GF_FREE (peerctx); + return ret; +} - data = dict_get (dict, "bricks"); - if (!data) - goto out; +int +glusterd_friend_add (const char *hoststr, int port, + glusterd_friend_sm_state_t state, + uuid_t *uuid, + glusterd_peerinfo_t **friend, + gf_boolean_t restore, + glusterd_peerctx_args_t *args) +{ + int ret = 0; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; - data = dict_get (dict, "transport"); - if (!data) - goto out; + this = THIS; + conf = this->private; + GF_ASSERT (conf); + GF_ASSERT (hoststr); - data = dict_get (dict, "volume-id"); - if (!data) + ret = glusterd_peerinfo_new (friend, state, uuid, hoststr, port); + if (ret) { goto out; + } - ret = glusterd_op_txn_begin (); + /* + * We can't add to the list after calling glusterd_friend_rpc_create, + * even if it succeeds, because by then the callback to take it back + * off and free might have happened already (notably in the case of an + * invalid peer name). That would mean we're adding something that had + * just been free, and we're likely to crash later. + */ + list_add_tail (&(*friend)->uuid_list, &conf->peers); + + //restore needs to first create the list of peers, then create rpcs + //to keep track of quorum in race-free manner. In restore for each peer + //rpc-create calls rpc_notify when the friend-list is partially + //constructed, leading to wrong quorum calculations. + if (!restore) { + ret = glusterd_store_peerinfo (*friend); + if (ret == 0) { + synclock_unlock (&conf->big_lock); + ret = glusterd_friend_rpc_create (this, *friend, args); + synclock_lock (&conf->big_lock); + } + else { + gf_log (this->name, GF_LOG_ERROR, + "Failed to store peerinfo"); + } + } + + if (ret) { + (void) glusterd_friend_cleanup (*friend); + *friend = NULL; + } out: + gf_log (this->name, GF_LOG_INFO, "connect returned %d", ret); return ret; } -int32_t -glusterd_start_volume (rpcsvc_request_t *req, char *volname, int flags) +int +glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port, + dict_t *dict) { - int32_t ret = -1; - glusterd_op_start_volume_ctx_t *ctx = NULL; - - GF_ASSERT (req); - GF_ASSERT (volname); - - ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_start_volume_ctx_t); - - if (!ctx) - goto out; + int ret = -1; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_peerctx_args_t args = {0}; + glusterd_friend_sm_event_t *event = NULL; - strncpy (ctx->volume_name, volname, GD_VOLUME_NAME_MAX); + GF_ASSERT (hoststr); - glusterd_op_set_op (GD_OP_START_VOLUME); + ret = glusterd_friend_find (NULL, (char *)hoststr, &peerinfo); - glusterd_op_set_ctx (GD_OP_START_VOLUME, ctx); - glusterd_op_set_ctx_free (GD_OP_START_VOLUME, _gf_true); - glusterd_op_set_req (req); + if (ret) { + gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo" + " for host: %s (%d)", hoststr, port); + args.mode = GD_MODE_ON; + args.req = req; + args.dict = dict; + ret = glusterd_friend_add ((char *)hoststr, port, + GD_FRIEND_STATE_DEFAULT, + NULL, &peerinfo, 0, &args); + if ((!ret) && (!peerinfo->connected)) { + ret = GLUSTERD_CONNECTION_AWAITED; + } - ret = glusterd_op_txn_begin (); + } else if (peerinfo->connected && + (GD_FRIEND_STATE_BEFRIENDED == peerinfo->state.state)) { + ret = glusterd_friend_hostname_update (peerinfo, (char*)hoststr, + _gf_false); + if (ret) + goto out; + //this is just to rename so inject local acc for cluster update + ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_LOCAL_ACC, + &event); + if (!ret) { + event->peerinfo = peerinfo; + ret = glusterd_friend_sm_inject_event (event); + glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_SUCCESS, + NULL, (char*)hoststr, + port, dict); + } + } else { + glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, NULL, + (char*)hoststr, port, dict); + } out: + gf_log ("", GF_LOG_DEBUG, "returning %d", ret); return ret; } -int32_t -glusterd_stop_volume (rpcsvc_request_t *req, char *volname, int flags) +int +glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port, + uuid_t uuid, dict_t *dict) { - int32_t ret = -1; - dict_t *ctx = NULL; - char *dup_volname = NULL; + int ret = -1; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_friend_sm_event_t *event = NULL; + glusterd_probe_ctx_t *ctx = NULL; + GF_ASSERT (hoststr); GF_ASSERT (req); - GF_ASSERT (volname); - ctx = dict_new (); + ret = glusterd_friend_find (uuid, (char *)hoststr, &peerinfo); - if (!ctx) - goto out; - - dup_volname = gf_strdup(volname); - if (!dup_volname) + if (ret) { + gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo" + " for host: %s %d", hoststr, port); goto out; + } - ret = dict_set_dynstr (ctx, "volname", dup_volname); - if (ret) + if (!peerinfo->rpc) { + //handle this case goto out; + } - ret = dict_set_int32 (ctx, "flags", flags); - if (ret) - goto out; + ret = glusterd_friend_sm_new_event + (GD_FRIEND_EVENT_INIT_REMOVE_FRIEND, &event); - glusterd_op_set_op (GD_OP_STOP_VOLUME); + if (ret) { + gf_log ("glusterd", GF_LOG_ERROR, + "Unable to get new event"); + return ret; + } - glusterd_op_set_ctx (GD_OP_STOP_VOLUME, ctx); - glusterd_op_set_ctx_free (GD_OP_STOP_VOLUME, _gf_true); - glusterd_op_set_req (req); + ctx = GF_CALLOC (1, sizeof(*ctx), gf_gld_mt_probe_ctx_t); - ret = glusterd_op_txn_begin (); + if (!ctx) { + goto out; + } -out: - if (ret && ctx) - dict_unref (ctx); - return ret; -} + ctx->hostname = gf_strdup (hoststr); + ctx->port = port; + ctx->req = req; + ctx->dict = dict; -int32_t -glusterd_delete_volume (rpcsvc_request_t *req, char *volname, int flags) -{ - int32_t ret = -1; - glusterd_op_delete_volume_ctx_t *ctx = NULL; + event->ctx = ctx; - GF_ASSERT (req); - GF_ASSERT (volname); + event->peerinfo = peerinfo; - ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_delete_volume_ctx_t); + ret = glusterd_friend_sm_inject_event (event); - if (!ctx) + if (ret) { + gf_log ("glusterd", GF_LOG_ERROR, "Unable to inject event %d, " + "ret = %d", event->event, ret); goto out; - - strncpy (ctx->volume_name, volname, GD_VOLUME_NAME_MAX); - - glusterd_op_set_op (GD_OP_DELETE_VOLUME); - - glusterd_op_set_ctx (GD_OP_DELETE_VOLUME, ctx); - glusterd_op_set_ctx_free (GD_OP_DELETE_VOLUME, _gf_true); - glusterd_op_set_req (req); - - ret = glusterd_op_txn_begin (); + } out: return ret; } -int32_t -glusterd_add_brick (rpcsvc_request_t *req, dict_t *dict) + +int +glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int port) { - int32_t ret = -1; + gd1_mgmt_friend_rsp rsp = {{0}, }; + int32_t ret = -1; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; - GF_ASSERT (req); - GF_ASSERT (dict); + GF_ASSERT (hostname); - glusterd_op_set_op (GD_OP_ADD_BRICK); + rsp.op_ret = 0; + this = THIS; + GF_ASSERT (this); - glusterd_op_set_ctx (GD_OP_ADD_BRICK, dict); - glusterd_op_set_ctx_free (GD_OP_ADD_BRICK, _gf_true); - glusterd_op_set_req (req); + conf = this->private; - ret = glusterd_op_txn_begin (); + uuid_copy (rsp.uuid, MY_UUID); + rsp.hostname = hostname; + rsp.port = port; + ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_friend_rsp); + gf_log ("glusterd", GF_LOG_INFO, + "Responded to %s (%d), ret: %d", hostname, port, ret); return ret; } -int32_t -glusterd_replace_brick (rpcsvc_request_t *req, dict_t *dict) + +int +glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *myhostname, + char *remote_hostname, int port, int32_t op_ret, + int32_t op_errno) { - int32_t ret = -1; + gd1_mgmt_friend_rsp rsp = {{0}, }; + int32_t ret = -1; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; - GF_ASSERT (req); - GF_ASSERT (dict); + GF_ASSERT (myhostname); - glusterd_op_set_op (GD_OP_REPLACE_BRICK); + this = THIS; + GF_ASSERT (this); - glusterd_op_set_ctx (GD_OP_REPLACE_BRICK, dict); + conf = this->private; - glusterd_op_set_ctx_free (GD_OP_REPLACE_BRICK, _gf_true); - glusterd_op_set_req (req); + uuid_copy (rsp.uuid, MY_UUID); + rsp.op_ret = op_ret; + rsp.op_errno = op_errno; + rsp.hostname = gf_strdup (myhostname); + rsp.port = port; - ret = glusterd_op_txn_begin (); + ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_friend_rsp); + gf_log ("glusterd", GF_LOG_INFO, + "Responded to %s (%d), ret: %d", remote_hostname, port, ret); + GF_FREE (rsp.hostname); return ret; } -int32_t -glusterd_set_volume (rpcsvc_request_t *req, dict_t *dict) +static void +set_probe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr, + size_t len, char *hostname, int port) { - int32_t ret = -1; + if ((op_errstr) && (strcmp (op_errstr, ""))) { + snprintf (errstr, len, "%s", op_errstr); + return; + } - GF_ASSERT (req); - GF_ASSERT (dict); + if (!op_ret) { + switch (op_errno) { + case GF_PROBE_LOCALHOST: + snprintf (errstr, len, "Probe on localhost not " + "needed"); + break; - glusterd_op_set_op (GD_OP_SET_VOLUME); + case GF_PROBE_FRIEND: + snprintf (errstr, len, "Host %s port %d already" + " in peer list", hostname, port); + break; - glusterd_op_set_ctx (GD_OP_SET_VOLUME, dict); + default: + if (op_errno != 0) + snprintf (errstr, len, "Probe returned " + "with unknown errno %d", + op_errno); + break; + } + } else { + switch (op_errno) { + case GF_PROBE_ANOTHER_CLUSTER: + snprintf (errstr, len, "%s is already part of " + "another cluster", hostname); + break; - glusterd_op_set_ctx_free (GD_OP_SET_VOLUME, _gf_true); + case GF_PROBE_VOLUME_CONFLICT: + snprintf (errstr, len, "Atleast one volume on " + "%s conflicts with existing volumes " + "in the cluster", hostname); + break; - glusterd_op_set_cli_op (GD_MGMT_CLI_SET_VOLUME); + case GF_PROBE_UNKNOWN_PEER: + snprintf (errstr, len, "%s responded with " + "'unknown peer' error, this could " + "happen if %s doesn't have localhost " + "in its peer database", hostname, + hostname); + break; - glusterd_op_set_req (req); + case GF_PROBE_ADD_FAILED: + snprintf (errstr, len, "Failed to add peer " + "information on %s", hostname); + break; - ret = glusterd_op_txn_begin (); + case GF_PROBE_SAME_UUID: + snprintf (errstr, len, "Peer uuid (host %s) is " + "same as local uuid", hostname); + break; - return ret; + case GF_PROBE_QUORUM_NOT_MET: + snprintf (errstr, len, "Cluster quorum is not " + "met. Changing peers is not allowed " + "in this state"); + break; + + default: + snprintf (errstr, len, "Probe returned with " + "unknown errno %d", op_errno); + break; + } + } } -int32_t -glusterd_remove_brick (rpcsvc_request_t *req, dict_t *dict) +int +glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret, + int32_t op_errno, char *op_errstr, char *hostname, + int port, dict_t *dict) { - int32_t ret = -1; + gf_cli_rsp rsp = {0,}; + int32_t ret = -1; + char errstr[2048] = {0,}; + char *cmd_str = NULL; + xlator_t *this = THIS; GF_ASSERT (req); - GF_ASSERT (dict); + GF_ASSERT (this); + + (void) set_probe_error_str (op_ret, op_errno, op_errstr, errstr, + sizeof (errstr), hostname, port); + + if (dict) { + ret = dict_get_str (dict, "cmd-str", &cmd_str); + if (ret) + gf_log (this->name, GF_LOG_ERROR, "Failed to get " + "command string"); + } + + rsp.op_ret = op_ret; + rsp.op_errno = op_errno; + rsp.op_errstr = (errstr[0] != '\0') ? errstr : ""; - glusterd_op_set_op (GD_OP_REMOVE_BRICK); + gf_cmd_log ("", "%s : %s %s %s", cmd_str, + (op_ret) ? "FAILED" : "SUCCESS", + (errstr[0] != '\0') ? ":" : " ", + (errstr[0] != '\0') ? errstr : " "); - glusterd_op_set_ctx (GD_OP_REMOVE_BRICK, dict); - glusterd_op_set_ctx_free (GD_OP_REMOVE_BRICK, _gf_true); - glusterd_op_set_req (req); + ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp); - ret = glusterd_op_txn_begin (); + if (dict) + dict_unref (dict); + gf_log (this->name, GF_LOG_DEBUG, "Responded to CLI, ret: %d",ret); return ret; } -int32_t -glusterd_log_filename (rpcsvc_request_t *req, dict_t *dict) +static void +set_deprobe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr, + size_t len, char *hostname) { - int32_t ret = -1; + if ((op_errstr) && (strcmp (op_errstr, ""))) { + snprintf (errstr, len, "%s", op_errstr); + return; + } - GF_ASSERT (req); - GF_ASSERT (dict); + if (op_ret) { + switch (op_errno) { + case GF_DEPROBE_LOCALHOST: + snprintf (errstr, len, "%s is localhost", + hostname); + break; - glusterd_op_set_op (GD_OP_LOG_FILENAME); - glusterd_op_set_ctx (GD_OP_LOG_FILENAME, dict); - glusterd_op_set_ctx_free (GD_OP_LOG_FILENAME, _gf_true); - glusterd_op_set_req (req); + case GF_DEPROBE_NOT_FRIEND: + snprintf (errstr, len, "%s is not part of " + "cluster", hostname); + break; - ret = glusterd_op_txn_begin (); + case GF_DEPROBE_BRICK_EXIST: + snprintf (errstr, len, "Brick(s) with the peer " + "%s exist in cluster", hostname); + break; - return ret; + case GF_DEPROBE_FRIEND_DOWN: + snprintf (errstr, len, "One of the peers is " + "probably down. Check with " + "'peer status'"); + break; + + case GF_DEPROBE_QUORUM_NOT_MET: + snprintf (errstr, len, "Cluster quorum is not " + "met. Changing peers is not allowed " + "in this state"); + break; + + default: + snprintf (errstr, len, "Detach returned with " + "unknown errno %d", op_errno); + break; + + } + } } -int32_t -glusterd_log_rotate (rpcsvc_request_t *req, dict_t *dict) +int +glusterd_xfer_cli_deprobe_resp (rpcsvc_request_t *req, int32_t op_ret, + int32_t op_errno, char *op_errstr, + char *hostname, dict_t *dict) { - int32_t ret = -1; + gf_cli_rsp rsp = {0,}; + int32_t ret = -1; + char *cmd_str = NULL; + char errstr[2048] = {0,}; GF_ASSERT (req); - GF_ASSERT (dict); - - glusterd_op_set_op (GD_OP_LOG_ROTATE); - glusterd_op_set_ctx (GD_OP_LOG_ROTATE, dict); - glusterd_op_set_ctx_free (GD_OP_LOG_ROTATE, _gf_true); - glusterd_op_set_req (req); - ret = glusterd_op_txn_begin (); + (void) set_deprobe_error_str (op_ret, op_errno, op_errstr, errstr, + sizeof (errstr), hostname); - return ret; -} + if (dict) { + ret = dict_get_str (dict, "cmd-str", &cmd_str); + if (ret) + gf_log (THIS->name, GF_LOG_ERROR, "Failed to get " + "command string"); + } -int32_t -glusterd_sync_volume (rpcsvc_request_t *req, dict_t *ctx) -{ - int32_t ret = -1; + rsp.op_ret = op_ret; + rsp.op_errno = op_errno; + rsp.op_errstr = (errstr[0] != '\0') ? errstr : ""; - GF_ASSERT (req); - GF_ASSERT (ctx); + gf_cmd_log ("", "%s : %s %s %s", cmd_str, + (op_ret) ? "FAILED" : "SUCCESS", + (errstr[0] != '\0') ? ":" : " ", + (errstr[0] != '\0') ? errstr : " "); - glusterd_op_set_op (GD_OP_SYNC_VOLUME); - glusterd_op_set_ctx (GD_OP_SYNC_VOLUME, ctx); - glusterd_op_set_ctx_free (GD_OP_SYNC_VOLUME, _gf_true); - glusterd_op_set_req (req); + ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp); - ret = glusterd_op_txn_begin (); + gf_log (THIS->name, GF_LOG_DEBUG, "Responded to CLI, ret: %d",ret); return ret; } - int32_t glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags) { @@ -3199,39 +3514,52 @@ glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags) int32_t count = 0; dict_t *friends = NULL; gf1_cli_peer_list_rsp rsp = {0,}; + char my_uuid_str[64] = {0,}; + char key[256] = {0,}; priv = THIS->private; GF_ASSERT (priv); - if (!list_empty (&priv->peers)) { - friends = dict_new (); - if (!friends) { - gf_log ("", GF_LOG_WARNING, "Out of Memory"); - goto out; - } - } else { - ret = 0; + friends = dict_new (); + if (!friends) { + gf_log ("", GF_LOG_WARNING, "Out of Memory"); goto out; } - - if (flags == GF_CLI_LIST_ALL) { - list_for_each_entry (entry, &priv->peers, uuid_list) { - count++; - ret = glusterd_add_peer_detail_to_dict (entry, + if (!list_empty (&priv->peers)) { + list_for_each_entry (entry, &priv->peers, uuid_list) { + count++; + ret = glusterd_add_peer_detail_to_dict (entry, friends, count); - if (ret) - goto out; + if (ret) + goto out; + } + } - } + if (flags == GF_CLI_LIST_POOL_NODES) { + count++; + snprintf (key, 256, "friend%d.uuid", count); + uuid_utoa_r (MY_UUID, my_uuid_str); + ret = dict_set_str (friends, key, my_uuid_str); + if (ret) + goto out; - ret = dict_set_int32 (friends, "count", count); + snprintf (key, 256, "friend%d.hostname", count); + ret = dict_set_str (friends, key, "localhost"); + if (ret) + goto out; - if (ret) - goto out; + snprintf (key, 256, "friend%d.connected", count); + ret = dict_set_int32 (friends, key, 1); + if (ret) + goto out; } + ret = dict_set_int32 (friends, "count", count); + if (ret) + goto out; + ret = dict_allocate_and_serialize (friends, &rsp.friends.friends_val, - (size_t *)&rsp.friends.friends_len); + &rsp.friends.friends_len); if (ret) goto out; @@ -3244,10 +3572,10 @@ out: rsp.op_ret = ret; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gf_xdr_serialize_cli_peer_list_rsp); - if (rsp.friends.friends_val) - GF_FREE (rsp.friends.friends_val); + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf1_cli_peer_list_rsp); + ret = 0; + GF_FREE (rsp.friends.friends_val); return ret; } @@ -3260,7 +3588,7 @@ glusterd_get_volumes (rpcsvc_request_t *req, dict_t *dict, int32_t flags) glusterd_volinfo_t *entry = NULL; int32_t count = 0; dict_t *volumes = NULL; - gf1_cli_get_vol_rsp rsp = {0,}; + gf_cli_rsp rsp = {0,}; char *volname = NULL; priv = THIS->private; @@ -3337,9 +3665,8 @@ respond: ret = dict_set_int32 (volumes, "count", count); if (ret) goto out; - - ret = dict_allocate_and_serialize (volumes, &rsp.volumes.volumes_val, - (size_t *)&rsp.volumes.volumes_len); + ret = dict_allocate_and_serialize (volumes, &rsp.dict.dict_val, + &rsp.dict.dict_len); if (ret) goto out; @@ -3348,61 +3675,435 @@ respond: out: rsp.op_ret = ret; - ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, - gf_xdr_serialize_cli_peer_list_rsp); + rsp.op_errstr = ""; + glusterd_submit_reply (req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_cli_rsp); + ret = 0; if (volumes) dict_unref (volumes); - if (rsp.volumes.volumes_val) - GF_FREE (rsp.volumes.volumes_val); + GF_FREE (rsp.dict.dict_val); return ret; } int -glusterd_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, - void *data) +__glusterd_handle_status_volume (rpcsvc_request_t *req) +{ + int32_t ret = -1; + uint32_t cmd = 0; + dict_t *dict = NULL; + char *volname = 0; + gf_cli_req cli_req = {{0,}}; + glusterd_op_t cli_op = GD_OP_STATUS_VOLUME; + char err_str[2048] = {0,}; + xlator_t *this = NULL; + + GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); + + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + //failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + if (cli_req.dict.dict_len > 0) { + dict = dict_new(); + if (!dict) + goto out; + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, &dict); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, "failed to " + "unserialize buffer"); + snprintf (err_str, sizeof (err_str), "Unable to decode " + "the command"); + goto out; + } + + } + + ret = dict_get_uint32 (dict, "cmd", &cmd); + if (ret) + goto out; + + if (!(cmd & GF_CLI_STATUS_ALL)) { + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + snprintf (err_str, sizeof (err_str), "Unable to get " + "volume name"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); + goto out; + } + gf_log (this->name, GF_LOG_INFO, + "Received status volume req for volume %s", volname); + + } + + ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict); + +out: + + if (ret) { + if (err_str[0] == '\0') + snprintf (err_str, sizeof (err_str), + "Operation failed"); + ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, + dict, err_str); + } + free (cli_req.dict.dict_val); + + return ret; +} + +int +glusterd_handle_status_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_status_volume); +} + +int +__glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req) +{ + int32_t ret = -1; + gf_cli_req cli_req = {{0,}}; + glusterd_op_t cli_op = GD_OP_CLEARLOCKS_VOLUME; + char *volname = NULL; + dict_t *dict = NULL; + char err_str[2048] = {0,}; + xlator_t *this = NULL; + + GF_ASSERT (req); + this = THIS; + GF_ASSERT (this); + + ret = -1; + ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + if (cli_req.dict.dict_len) { + dict = dict_new (); + + ret = dict_unserialize (cli_req.dict.dict_val, + cli_req.dict.dict_len, + &dict); + if (ret < 0) { + gf_log (this->name, GF_LOG_ERROR, + "failed to unserialize req-buffer to" + " dictionary"); + snprintf (err_str, sizeof (err_str), "unable to decode " + "the command"); + goto out; + } + + } else { + ret = -1; + gf_log (this->name, GF_LOG_ERROR, "Empty cli request."); + goto out; + } + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + snprintf (err_str, sizeof (err_str), "Unable to get volume " + "name"); + gf_log (this->name, GF_LOG_ERROR, "%s", err_str); + goto out; + } + + gf_log (this->name, GF_LOG_INFO, "Received clear-locks volume req " + "for volume %s", volname); + + ret = glusterd_op_begin_synctask (req, GD_OP_CLEARLOCKS_VOLUME, dict); + +out: + if (ret) { + if (err_str[0] == '\0') + snprintf (err_str, sizeof (err_str), + "Operation failed"); + ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, + dict, err_str); + } + free (cli_req.dict.dict_val); + + return ret; +} + +int +glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler (req, + __glusterd_handle_cli_clearlocks_volume); +} + +static int +get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo) +{ + glusterd_volinfo_t *volinfo = NULL; + char *volid_str = NULL; + char *brick = NULL; + char *brickid_dup = NULL; + uuid_t volid = {0}; + int ret = -1; + + brickid_dup = gf_strdup (brickid); + if (!brickid_dup) + goto out; + + volid_str = brickid_dup; + brick = strchr (brickid_dup, ':'); + *brick = '\0'; + brick++; + if (!volid_str || !brick) + goto out; + + uuid_parse (volid_str, volid); + ret = glusterd_volinfo_find_by_volume_id (volid, &volinfo); + if (ret) { + /* Check if it a snapshot volume */ + ret = glusterd_snap_volinfo_find_by_volume_id (volid, &volinfo); + if (ret) + goto out; + } + + ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo, + brickinfo); + if (ret) + goto out; + + ret = 0; +out: + GF_FREE (brickid_dup); + return ret; +} + +int +__glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) { xlator_t *this = NULL; - char *handshake = "on"; glusterd_conf_t *conf = NULL; int ret = 0; - glusterd_peerinfo_t *peerinfo = NULL; + char *brickid = NULL; + glusterd_brickinfo_t *brickinfo = NULL; + + brickid = mydata; + if (!brickid) + return 0; + + ret = get_brickinfo_from_brickid (brickid, &brickinfo); + if (ret) + return 0; - peerinfo = mydata; this = THIS; + GF_ASSERT (this); conf = this->private; - + GF_ASSERT (conf); switch (event) { case RPC_CLNT_CONNECT: - { + gf_log (this->name, GF_LOG_DEBUG, "Connected to %s:%s", + brickinfo->hostname, brickinfo->path); + glusterd_set_brick_status (brickinfo, GF_BRICK_STARTED); + ret = default_notify (this, GF_EVENT_CHILD_UP, NULL); + + break; + + case RPC_CLNT_DISCONNECT: + if (GF_BRICK_STARTED == brickinfo->status) + gf_log (this->name, GF_LOG_INFO, "Disconnected from " + "%s:%s", brickinfo->hostname, brickinfo->path); + + glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPED); + if (rpc_clnt_is_disabled (rpc)) + GF_FREE (brickid); + break; + + default: + gf_log (this->name, GF_LOG_TRACE, + "got some other RPC event %d", event); + break; + } + + return ret; +} + +int +glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + return glusterd_big_locked_notify (rpc, mydata, event, data, + __glusterd_brick_rpc_notify); +} + +int +__glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + char *server = NULL; + int ret = 0; + this = THIS; + GF_ASSERT (this); + conf = this->private; + GF_ASSERT (conf); + + server = mydata; + if (!server) + return 0; + + switch (event) { + case RPC_CLNT_CONNECT: gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT"); - peerinfo->connected = 1; - glusterd_friend_sm (); - glusterd_op_sm (); + (void) glusterd_nodesvc_set_online_status (server, _gf_true); + ret = default_notify (this, GF_EVENT_CHILD_UP, NULL); - if ((ret < 0) || (strcasecmp (handshake, "on"))) { - //ret = glusterd_handshake (this, peerinfo->rpc); + break; - } else { - //conf->rpc->connected = 1; - ret = default_notify (this, GF_EVENT_CHILD_UP, NULL); + case RPC_CLNT_DISCONNECT: + gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT"); + (void) glusterd_nodesvc_set_online_status (server, _gf_false); + break; + + default: + gf_log (this->name, GF_LOG_TRACE, + "got some other RPC event %d", event); + break; + } + + return ret; +} + +int +glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + return glusterd_big_locked_notify (rpc, mydata, event, data, + __glusterd_nodesvc_rpc_notify); +} + +int +glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx) +{ + int ret = -1; + glusterd_friend_sm_event_t *new_event = NULL; + glusterd_peerinfo_t *peerinfo = peerctx->peerinfo; + rpcsvc_request_t *req = peerctx->args.req; + char *errstr = peerctx->errstr; + dict_t *dict = NULL; + + GF_ASSERT (peerctx); + + peerinfo = peerctx->peerinfo; + req = peerctx->args.req; + dict = peerctx->args.dict; + errstr = peerctx->errstr; + + ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_REMOVE_FRIEND, + &new_event); + if (!ret) { + if (!req) { + gf_log (THIS->name, GF_LOG_WARNING, + "Unable to find the request for responding " + "to User (%s)", peerinfo->hostname); + goto out; } + + glusterd_xfer_cli_probe_resp (req, -1, ENOTCONN, errstr, + peerinfo->hostname, + peerinfo->port, dict); + + new_event->peerinfo = peerinfo; + ret = glusterd_friend_sm_inject_event (new_event); + + } else { + gf_log ("glusterd", GF_LOG_ERROR, + "Unable to create event for removing peer %s", + peerinfo->hostname); + } + +out: + return ret; +} + +int +__glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + int ret = 0; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_peerctx_t *peerctx = NULL; + gf_boolean_t quorum_action = _gf_false; + glusterd_volinfo_t *volinfo = NULL; + + peerctx = mydata; + if (!peerctx) + return 0; + + peerinfo = peerctx->peerinfo; + this = THIS; + conf = this->private; + + switch (event) { + case RPC_CLNT_CONNECT: + { + gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT"); + peerinfo->connected = 1; + peerinfo->quorum_action = _gf_true; + + ret = glusterd_peer_dump_version (this, rpc, peerctx); + if (ret) + gf_log ("", GF_LOG_ERROR, "glusterd handshake failed"); break; } case RPC_CLNT_DISCONNECT: + { + gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT %d", + peerinfo->state.state); + + if (peerinfo->connected) { + list_for_each_entry (volinfo, &conf->volumes, vol_list) { + ret = glusterd_mgmt_v3_unlock (volinfo->volname, + peerinfo->uuid, + "vol"); + if (ret) + gf_log (this->name, GF_LOG_TRACE, + "Lock not released for %s", + volinfo->volname); + } - //Inject friend disconnected here + ret = 0; + } - gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT"); - peerinfo->connected = 0; + if ((peerinfo->quorum_contrib != QUORUM_DOWN) && + (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) { + peerinfo->quorum_contrib = QUORUM_DOWN; + quorum_action = _gf_true; + peerinfo->quorum_action = _gf_false; + } - //default_notify (this, GF_EVENT_CHILD_DOWN, NULL); - break; + /* Remove peer if it is not a friend and connection/handshake + * fails, and notify cli. Happens only during probe. + */ + if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) { + glusterd_friend_remove_notify (peerctx); + goto out; + } + peerinfo->connected = 0; + break; + } default: gf_log (this->name, GF_LOG_TRACE, "got some other RPC event %d", event); @@ -3410,5 +4111,127 @@ glusterd_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event, break; } +out: + glusterd_friend_sm (); + glusterd_op_sm (); + if (quorum_action) + glusterd_do_quorum_action (); return ret; } + +int +glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) +{ + return glusterd_big_locked_notify (rpc, mydata, event, data, + __glusterd_peer_rpc_notify); +} + +int +glusterd_null (rpcsvc_request_t *req) +{ + + return 0; +} + +rpcsvc_actor_t gd_svc_mgmt_actors[] = { + [GLUSTERD_MGMT_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GLUSTERD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_STAGE_OP] = { "STAGE_OP", GLUSTERD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, 0, DRC_NA}, + [GLUSTERD_MGMT_COMMIT_OP] = { "COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, 0, DRC_NA}, +}; + +struct rpcsvc_program gd_svc_mgmt_prog = { + .progname = "GlusterD svc mgmt", + .prognum = GD_MGMT_PROGRAM, + .progver = GD_MGMT_VERSION, + .numactors = GLUSTERD_MGMT_MAXVALUE, + .actors = gd_svc_mgmt_actors, + .synctask = _gf_true, +}; + +rpcsvc_actor_t gd_svc_peer_actors[] = { + [GLUSTERD_FRIEND_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA}, + [GLUSTERD_PROBE_QUERY] = { "PROBE_QUERY", GLUSTERD_PROBE_QUERY, glusterd_handle_probe_query, NULL, 0, DRC_NA}, + [GLUSTERD_FRIEND_ADD] = { "FRIEND_ADD", GLUSTERD_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, 0, DRC_NA}, + [GLUSTERD_FRIEND_REMOVE] = { "FRIEND_REMOVE", GLUSTERD_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, 0, DRC_NA}, + [GLUSTERD_FRIEND_UPDATE] = { "FRIEND_UPDATE", GLUSTERD_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, 0, DRC_NA}, +}; + +struct rpcsvc_program gd_svc_peer_prog = { + .progname = "GlusterD svc peer", + .prognum = GD_FRIEND_PROGRAM, + .progver = GD_FRIEND_VERSION, + .numactors = GLUSTERD_FRIEND_MAXVALUE, + .actors = gd_svc_peer_actors, + .synctask = _gf_false, +}; + + + +rpcsvc_actor_t gd_svc_cli_actors[] = { + [GLUSTER_CLI_PROBE] = { "CLI_PROBE", GLUSTER_CLI_PROBE, glusterd_handle_cli_probe, NULL, 0, DRC_NA}, + [GLUSTER_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GLUSTER_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GLUSTER_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_DEPROBE] = { "FRIEND_REMOVE", GLUSTER_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, 0, DRC_NA}, + [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA}, + [GLUSTER_CLI_UUID_RESET] = { "UUID_RESET", GLUSTER_CLI_UUID_RESET, glusterd_handle_cli_uuid_reset, NULL, 0, DRC_NA}, + [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA}, + [GLUSTER_CLI_START_VOLUME] = { "START_VOLUME", GLUSTER_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_STOP_VOLUME] = { "STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_ADD_BRICK] = { "ADD_BRICK", GLUSTER_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, 0, DRC_NA}, + [GLUSTER_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, 0, DRC_NA}, + [GLUSTER_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, 0, DRC_NA}, + [GLUSTER_CLI_LOG_ROTATE] = { "LOG FILENAME", GLUSTER_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, 0, DRC_NA}, + [GLUSTER_CLI_SET_VOLUME] = { "SET_VOLUME", GLUSTER_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_RESET_VOLUME] = { "RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_FSM_LOG] = { "FSM_LOG", GLUSTER_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, 0, DRC_NA}, + [GLUSTER_CLI_GSYNC_SET] = { "GSYNC_SET", GLUSTER_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, 0, DRC_NA}, + [GLUSTER_CLI_PROFILE_VOLUME] = { "STATS_VOLUME", GLUSTER_CLI_PROFILE_VOLUME, glusterd_handle_cli_profile_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_QUOTA] = { "QUOTA", GLUSTER_CLI_QUOTA, glusterd_handle_quota, NULL, 0, DRC_NA}, + [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA}, + [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_MOUNT] = { "MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount, NULL, 1, DRC_NA}, + [GLUSTER_CLI_UMOUNT] = { "UMOUNT", GLUSTER_CLI_UMOUNT, glusterd_handle_umount, NULL, 1, DRC_NA}, + [GLUSTER_CLI_HEAL_VOLUME] = { "HEAL_VOLUME", GLUSTER_CLI_HEAL_VOLUME, glusterd_handle_cli_heal_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", GLUSTER_CLI_CLRLOCKS_VOLUME, glusterd_handle_cli_clearlocks_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE, glusterd_handle_copy_file, NULL, 0, DRC_NA}, + [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA}, + [GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot, NULL, 0, DRC_NA}, +}; + +struct rpcsvc_program gd_svc_cli_prog = { + .progname = "GlusterD svc cli", + .prognum = GLUSTER_CLI_PROGRAM, + .progver = GLUSTER_CLI_VERSION, + .numactors = GLUSTER_CLI_MAXVALUE, + .actors = gd_svc_cli_actors, + .synctask = _gf_true, +}; + +/* This is a minimal RPC prog, which contains only the readonly RPC procs from + * the cli rpcsvc + */ +rpcsvc_actor_t gd_svc_cli_actors_ro[] = { + [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA}, + [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA}, + [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA}, + [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA}, + [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA}, +}; + +struct rpcsvc_program gd_svc_cli_prog_ro = { + .progname = "GlusterD svc cli read-only", + .prognum = GLUSTER_CLI_PROGRAM, + .progver = GLUSTER_CLI_VERSION, + .numactors = GLUSTER_CLI_MAXVALUE, + .actors = gd_svc_cli_actors_ro, + .synctask = _gf_true, +}; |
