summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-handler.c
diff options
context:
space:
mode:
authorKaushal M <kaushal@redhat.com>2015-01-08 19:24:59 +0530
committerKrishnan Parthasarathi <kparthas@redhat.com>2015-03-16 02:19:14 -0700
commitc7785f78420c94220954eef538ed4698713ebcdb (patch)
treeb10ad0468f21835121262463f517cad58614d49a /xlators/mgmt/glusterd/src/glusterd-handler.c
parent7d8be3613f7384f5118f26e194fe7c64ea69d11c (diff)
glusterd: Protect the peer list and peerinfos with RCU.
The peer list and the peerinfo objects are now protected using RCU. Design patterns described in the Paul McKenney's RCU dissertation [1] (sections 5 and 6) have been used to convert existing non-RCU protected code to RCU protected code. Currently, we are only targetting guaranteeing the existence of the peerinfo objects, ie., we are only looking to protect deletes, not all updaters. We chose this, as protecting all updates is a much more complex task. The steps used to accomplish this are, 1. Remove all long lived direct references to peerinfo objects (apart from the peerinfo list). This includes references in glusterd_peerctx_t (RPC), glusterd_friend_sm_event_t (friend state machine) and others. This way no one has a reference to deleted peerinfo object. 2. Replace the direct references with indirect references, ie., use peer uuid and peer hostname as indirect references to the peerinfo object. Any reader or updater now uses the indirect references to get to the actual peerinfo object, using glusterd_peerinfo_find. Cases where a peerinfo cannot be found are handled gracefully. 3. The readers get and use the peerinfo object only within a RCU read critical section. This prevents the object from being deleted/freed when in actual use. 4. The deletion of a peerinfo object is done in a ordered manner (glusterd_peerinfo_destroy). The object is first removed from the peerinfo list using an atomic list remove, but the list head is not reset to allow existing list readers to complete correctly. We wait for readers to complete, before resetting the list head. This removes the object from the list completely. After this no new readers can get a reference to the object, and it can be freed. This change was developed on the git branch at [2]. This commit is a combination of the following commits on the development branch. d7999b9 Protect the glusterd_conf_t->peers_list with RCU. 0da85c4 Synchronize before INITing peerinfo list head after removing from list. 32ec28a Add missing rcu_read_unlock 8fed0b8 Correctly exit read critical section once peer is found. 63db857 Free peerctx only on rpc destruction 56eff26 Cleanup style issues e5f38b0 Indirection for events and friend_sm 3c84ac4 In __glusterd_probe_cbk goto unlock only if peer already exists 141d855 Address review comments on 9695/1 aaeefed Protection during peer updates 6eda33d Revert "Synchronize before INITing peerinfo list head after removing from list." f69db96 Remove unneeded line b43d2ec Address review comments on 9695/4 7781921 Address review comments on 9695/5 eb6467b Add some missing semi-colons 328a47f Remove synchronize_rcu from glusterd_friend_sm_transition_state 186e429 Run part of glusterd_friend_remove in critical section 55c0a2e Fix gluster (peer status/ pool list) with no peers 93f8dcf Use call_rcu to free peerinfo c36178c Introduce composite struct, gd_rcu_head [1]: http://www.rdrop.com/~paulmck/RCU/RCUdissertation.2004.07.14e1.pdf [2]: https://github.com/kshlm/glusterfs/tree/urcu Change-Id: Ic1480e59c86d41d25a6a3d159aa3e11fbb3cbc7b BUG: 1191030 Signed-off-by: Kaushal M <kaushal@redhat.com> Reviewed-on: http://review.gluster.org/9695 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Atin Mukherjee <amukherj@redhat.com> Reviewed-by: Anand Nekkunti <anekkunt@redhat.com> Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com> Tested-by: Krishnan Parthasarathi <kparthas@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c189
1 files changed, 150 insertions, 39 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index eaa05969656..cc97baf6f21 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -103,6 +103,9 @@ glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
port = GF_DEFAULT_BASE_PORT;
ret = glusterd_remote_hostname_get (req, rhost, sizeof (rhost));
+
+ rcu_read_lock ();
+
peerinfo = glusterd_peerinfo_find (uuid, rhost);
if (peerinfo == NULL) {
@@ -120,10 +123,11 @@ glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
if (ret) {
gf_log ("", GF_LOG_ERROR, "event generation failed: %d", ret);
- return ret;
+ goto out;
}
- event->peerinfo = peerinfo;
+ event->peername = gf_strdup (peerinfo->hostname);
+ uuid_copy (event->peerid, peerinfo->uuid);
ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_friend_req_ctx_t);
@@ -164,9 +168,13 @@ glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
}
ret = 0;
+ if (peerinfo && (0 == peerinfo->connected))
+ ret = GLUSTERD_CONNECTION_AWAITED;
out:
- if (0 != ret) {
+ rcu_read_unlock ();
+
+ if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
if (ctx && ctx->hostname)
GF_FREE (ctx->hostname);
GF_FREE (ctx);
@@ -178,11 +186,12 @@ out:
} else {
free (friend_req->vols.vols_val);
}
+ if (event)
+ GF_FREE (event->peername);
GF_FREE (event);
- } else {
- if (peerinfo && (0 == peerinfo->connected))
- ret = GLUSTERD_CONNECTION_AWAITED;
}
+
+
return ret;
}
@@ -198,6 +207,8 @@ glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid,
if (!port)
port = GF_DEFAULT_BASE_PORT;
+ rcu_read_lock ();
+
peerinfo = glusterd_peerinfo_find (uuid, hostname);
if (peerinfo == NULL) {
@@ -214,10 +225,11 @@ glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid,
if (ret) {
gf_log ("", GF_LOG_ERROR, "event generation failed: %d", ret);
- return ret;
+ goto out;
}
- event->peerinfo = peerinfo;
+ event->peername = gf_strdup (hostname);
+ uuid_copy (event->peerid, uuid);
ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_friend_req_ctx_t);
@@ -245,10 +257,15 @@ glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid,
ret = 0;
out:
+ rcu_read_unlock ();
+
if (0 != ret) {
if (ctx && ctx->hostname)
GF_FREE (ctx->hostname);
GF_FREE (ctx);
+ if (event)
+ GF_FREE (event->peername);
+ GF_FREE (event);
}
return ret;
@@ -698,7 +715,10 @@ __glusterd_handle_cluster_lock (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_DEBUG, "Received LOCK from uuid: %s",
uuid_utoa (lock_req.uuid));
- if (glusterd_peerinfo_find_by_uuid (lock_req.uuid) == NULL) {
+ rcu_read_lock ();
+ ret = (glusterd_peerinfo_find_by_uuid (lock_req.uuid) == NULL);
+ rcu_read_unlock ();
+ if (ret) {
gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
"belong to the cluster. Ignoring request.",
uuid_utoa (lock_req.uuid));
@@ -846,7 +866,10 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_DEBUG, "transaction ID = %s",
uuid_utoa (*txn_id));
- if (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL) {
+ rcu_read_lock ();
+ ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
+ rcu_read_unlock ();
+ if (ret) {
gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
"belong to the cluster. Ignoring request.",
uuid_utoa (op_req.uuid));
@@ -922,7 +945,10 @@ __glusterd_handle_commit_op (rpcsvc_request_t *req)
goto out;
}
- if (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL) {
+ rcu_read_lock ();
+ ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
+ rcu_read_unlock ();
+ if (ret) {
gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
"belong to the cluster. Ignoring request.",
uuid_utoa (op_req.uuid));
@@ -1037,14 +1063,22 @@ __glusterd_handle_cli_probe (rpcsvc_request_t *req)
goto out;
}
+ rcu_read_lock ();
+
peerinfo = glusterd_peerinfo_find_by_hostname (hostname);
- if (peerinfo && gd_peer_has_address (peerinfo, hostname)) {
+ ret = (peerinfo && gd_peer_has_address (peerinfo, hostname));
+
+ rcu_read_unlock ();
+
+ if (ret) {
gf_log ("glusterd", GF_LOG_DEBUG, "Probe host %s port %d "
"already a peer", hostname, port);
glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, NULL,
hostname, port, dict);
+ ret = 0;
goto out;
}
+
ret = glusterd_probe_begin (req, hostname, port, dict, &op_errno);
if (ret == GLUSTERD_CONNECTION_AWAITED) {
@@ -1931,27 +1965,32 @@ __glusterd_handle_fsm_log (rpcsvc_request_t *req)
goto out;
}
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
if (strcmp ("", cli_req.name) == 0) {
this = THIS;
conf = this->private;
- log = &conf->op_sm_log;
+ ret = glusterd_sm_tr_log_add_to_dict (dict, &conf->op_sm_log);
} else {
+ rcu_read_lock ();
+
peerinfo = glusterd_peerinfo_find_by_hostname (cli_req.name);
if (!peerinfo) {
+ ret = -1;
snprintf (msg, sizeof (msg), "%s is not a peer",
cli_req.name);
- goto out;
+ } else {
+ ret = glusterd_sm_tr_log_add_to_dict
+ (dict, &peerinfo->sm_log);
}
- log = &peerinfo->sm_log;
- }
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- goto out;
+ rcu_read_unlock ();
}
- ret = glusterd_sm_tr_log_add_to_dict (dict, log);
out:
(void)glusterd_fsm_log_send_resp (req, ret, msg, dict);
free (cli_req.name);//malloced by xdr
@@ -2089,7 +2128,10 @@ __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_DEBUG,
"Received UNLOCK from uuid: %s", uuid_utoa (unlock_req.uuid));
- if (glusterd_peerinfo_find_by_uuid (unlock_req.uuid) == NULL) {
+ rcu_read_lock ();
+ ret = (glusterd_peerinfo_find_by_uuid (unlock_req.uuid) == NULL);
+ rcu_read_unlock ();
+ if (ret) {
gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
"belong to the cluster. Ignoring request.",
uuid_utoa (unlock_req.uuid));
@@ -2370,12 +2412,18 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
goto out;
}
+ ret = 0;
+ rcu_read_lock ();
if (glusterd_peerinfo_find (friend_req.uuid, NULL) == NULL) {
ret = -1;
+ }
+ rcu_read_unlock ();
+ if (ret) {
gf_log ("", GF_LOG_CRITICAL, "Received friend update request "
"from unknown peer %s", uuid_utoa (friend_req.uuid));
goto out;
}
+
gf_log ("glusterd", GF_LOG_INFO,
"Received friend update from uuid: %s", uuid_utoa (friend_req.uuid));
@@ -2428,6 +2476,7 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "friend%d", i);
+ rcu_read_lock ();
peerinfo = glusterd_peerinfo_find (uuid, NULL);
if (peerinfo == NULL) {
/* Create a new peer and add it to the list as there is
@@ -2439,7 +2488,7 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_ERROR,
"Could not create peerinfo from dict "
"for prefix %s", key);
- goto out;
+ goto unlock;
}
/* As this is a new peer, it should be added as a
@@ -2459,9 +2508,12 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to "
"update peer %s", peerinfo->hostname);
- goto out;
}
}
+unlock:
+ rcu_read_unlock ();
+ if (ret)
+ break;
peerinfo = NULL;
i++;
@@ -2549,6 +2601,8 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req)
gf_log ("", GF_LOG_ERROR, "Unable to get the remote hostname");
goto out;
}
+
+ rcu_read_lock ();
peerinfo = glusterd_peerinfo_find (probe_req.uuid, remote_hostname);
if ((peerinfo == NULL) && (!cds_list_empty (&conf->peers))) {
rsp.op_ret = -1;
@@ -2566,6 +2620,7 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req)
rsp.op_errno = GF_PROBE_ADD_FAILED;
}
}
+ rcu_read_unlock ();
respond:
uuid_copy (rsp.uuid, MY_UUID);
@@ -2882,6 +2937,8 @@ glusterd_friend_remove (uuid_t uuid, char *hostname)
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
+ rcu_read_lock ();
+
peerinfo = glusterd_peerinfo_find (uuid, hostname);
if (peerinfo == NULL)
goto out;
@@ -2889,6 +2946,11 @@ glusterd_friend_remove (uuid_t uuid, char *hostname)
ret = glusterd_friend_remove_cleanup_vols (peerinfo->uuid);
if (ret)
gf_log (THIS->name, GF_LOG_WARNING, "Volumes cleanup failed");
+
+ rcu_read_unlock ();
+ /* Giving up the critical section here as glusterd_peerinfo_cleanup must
+ * be called from outside a critical section
+ */
ret = glusterd_peerinfo_cleanup (peerinfo);
out:
gf_log ("", GF_LOG_DEBUG, "returning %d", ret);
@@ -3008,7 +3070,8 @@ glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo,
if (args)
peerctx->args = *args;
- peerctx->peerinfo = peerinfo;
+ uuid_copy (peerctx->peerid, peerinfo->uuid);
+ peerctx->peername = gf_strdup (peerinfo->hostname);
ret = glusterd_transport_inet_options_build (&options,
peerinfo->hostname,
@@ -3079,7 +3142,7 @@ glusterd_friend_add (const char *hoststr, int port,
* invalid peer name). That would mean we're adding something that had
* just been free, and we're likely to crash later.
*/
- cds_list_add_tail (&(*friend)->uuid_list, &conf->peers);
+ cds_list_add_tail_rcu (&(*friend)->uuid_list, &conf->peers);
//restore needs to first create the list of peers, then create rpcs
//to keep track of quorum in race-free manner. In restore for each peer
@@ -3132,7 +3195,7 @@ glusterd_friend_add_from_peerinfo (glusterd_peerinfo_t *friend,
* invalid peer name). That would mean we're adding something that had
* just been free, and we're likely to crash later.
*/
- cds_list_add_tail (&friend->uuid_list, &conf->peers);
+ cds_list_add_tail_rcu (&friend->uuid_list, &conf->peers);
//restore needs to first create the list of peers, then create rpcs
//to keep track of quorum in race-free manner. In restore for each peer
@@ -3165,6 +3228,7 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
GF_ASSERT (hoststr);
+ rcu_read_lock ();
peerinfo = glusterd_peerinfo_find (NULL, hoststr);
if (peerinfo == NULL) {
@@ -3196,7 +3260,9 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_LOCAL_ACC,
&event);
if (!ret) {
- event->peerinfo = peerinfo;
+ event->peername = gf_strdup (peerinfo->hostname);
+ uuid_copy (event->peerid, peerinfo->uuid);
+
ret = glusterd_friend_sm_inject_event (event);
glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_SUCCESS,
NULL, (char*)hoststr,
@@ -3208,6 +3274,7 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
}
out:
+ rcu_read_unlock ();
gf_log ("", GF_LOG_DEBUG, "returning %d", ret);
return ret;
}
@@ -3224,8 +3291,9 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
GF_ASSERT (hoststr);
GF_ASSERT (req);
- peerinfo = glusterd_peerinfo_find (uuid, hoststr);
+ rcu_read_lock ();
+ peerinfo = glusterd_peerinfo_find (uuid, hoststr);
if (peerinfo == NULL) {
ret = -1;
gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo"
@@ -3251,7 +3319,7 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
if (ret) {
gf_log ("glusterd", GF_LOG_ERROR,
"Unable to get new event");
- return ret;
+ goto out;
}
ctx = GF_CALLOC (1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
@@ -3267,7 +3335,8 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
event->ctx = ctx;
- event->peerinfo = peerinfo;
+ event->peername = gf_strdup (hoststr);
+ uuid_copy (event->peerid, uuid);
ret = glusterd_friend_sm_inject_event (event);
@@ -3279,6 +3348,7 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
peerinfo->detaching = _gf_true;
out:
+ rcu_read_unlock ();
return ret;
}
@@ -3590,15 +3660,23 @@ glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
gf_log ("", GF_LOG_WARNING, "Out of Memory");
goto out;
}
+
+ /* Reset ret to 0, needed to prevent failure incase no peers exist */
+ ret = 0;
+ rcu_read_lock ();
if (!cds_list_empty (&priv->peers)) {
- cds_list_for_each_entry (entry, &priv->peers, uuid_list) {
+ cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
count++;
ret = gd_add_peer_detail_to_dict (entry,
friends, count);
if (ret)
- goto out;
+ goto unlock;
}
}
+unlock:
+ rcu_read_unlock ();
+ if (ret)
+ goto out;
if (flags == GF_CLI_LIST_POOL_NODES) {
count++;
@@ -4417,14 +4495,23 @@ glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx)
{
int ret = -1;
glusterd_friend_sm_event_t *new_event = NULL;
- glusterd_peerinfo_t *peerinfo = peerctx->peerinfo;
- rpcsvc_request_t *req = peerctx->args.req;
- char *errstr = peerctx->errstr;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ rpcsvc_request_t *req = NULL;
+ char *errstr = NULL;
dict_t *dict = NULL;
GF_ASSERT (peerctx);
- peerinfo = peerctx->peerinfo;
+ rcu_read_lock ();
+ peerinfo = glusterd_peerinfo_find (peerctx->peerid, peerctx->peername);
+ if (!peerinfo) {
+ gf_log (THIS->name, GF_LOG_DEBUG, "Could not find peer %s(%s). "
+ "Peer could have been deleted.", peerctx->peername,
+ uuid_utoa (peerctx->peerid));
+ ret = 0;
+ goto out;
+ }
+
req = peerctx->args.req;
dict = peerctx->args.dict;
errstr = peerctx->errstr;
@@ -4443,7 +4530,8 @@ glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx)
peerinfo->hostname,
peerinfo->port, dict);
- new_event->peerinfo = peerinfo;
+ new_event->peername = gf_strdup (peerinfo->hostname);
+ uuid_copy (new_event->peerid, peerinfo->uuid);
ret = glusterd_friend_sm_inject_event (new_event);
} else {
@@ -4453,6 +4541,7 @@ glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx)
}
out:
+ rcu_read_unlock ();
return ret;
}
@@ -4473,10 +4562,29 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
if (!peerctx)
return 0;
- peerinfo = peerctx->peerinfo;
this = THIS;
conf = this->private;
+ if (RPC_CLNT_DESTROY == event) {
+ GF_FREE (peerctx->errstr);
+ GF_FREE (peerctx->peername);
+ GF_FREE (peerctx);
+ return 0;
+ }
+
+ rcu_read_lock ();
+
+ peerinfo = glusterd_peerinfo_find (peerctx->peerid, peerctx->peername);
+ if (!peerinfo) {
+ /* Peerinfo should be available at this point. Not finding it
+ * means that something terrible has happened
+ */
+ gf_log (THIS->name, GF_LOG_CRITICAL, "Could not find peer "
+ "%s(%s)", peerctx->peername, uuid_utoa (peerctx->peerid));
+ ret = -1;
+ goto out;
+ }
+
switch (event) {
case RPC_CLNT_CONNECT:
{
@@ -4545,6 +4653,7 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
peerinfo->connected = 0;
break;
}
+
default:
gf_log (this->name, GF_LOG_TRACE,
"got some other RPC event %d", event);
@@ -4553,6 +4662,8 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
}
out:
+ rcu_read_unlock ();
+
glusterd_friend_sm ();
glusterd_op_sm ();
if (quorum_action)