summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-sm.c
diff options
context:
space:
mode:
authorGaurav Kumar Garg <garg.gaurav52@gmail.com>2015-12-28 11:46:54 +0530
committerAtin Mukherjee <amukherj@redhat.com>2015-12-28 22:26:01 -0800
commitc0cc93dfe6fc63caeae9448dc689adcf13ea3aae (patch)
treeca43fda901afb6ec405d87c84a553cb9841994eb /xlators/mgmt/glusterd/src/glusterd-sm.c
parent15965dd310192cae8f05fddda309e77ff5c72ca2 (diff)
glusterd: reduce friend update flood
This patch is backport of: http://review.gluster.org/#/c/12999/ When in a befriended state, glusterd would broadcast friend updates to all other peers whenver a ACC or LOCAL_ACC event occurred. When a downed glusterd came back up and established connections again, this lead to a flood of friend updates to happen on the order of N^2 (N is the number of peers in the cluster) In larger clusters this was problematic, and could lead to very long times for the cluster to settle down when a peer came back up. Multiple peers coming back up at the same time would compound the problem. Broadcasting of friend updates doesn't have much use in places other that during a peer probe. Instead of broadcasting friend updates on connection re-establishment, updates can just be exchanged between the peers involved in the connection. This patch changes the glusterd friend state-machine to send updates only to the required peer for ACC or LOCAL_ACC events when in befriended state. The number of updates sent now is in the order of N. For a 10 node cluster, the number of updates reduced by 5 times. When creating the 10 node cluster, the updates reduced from ~500 to ~150. When a glusterd restarted, the number of exchanges reduced from ~160 to ~35. >> BUG: 1292749 >> Change-Id: Ib6072090c7069b081d018cdaa3dc878819ab1d18 >> Signed-off-by: Kaushal M <kaushal@redhat.com> >> Reviewed-on: http://review.gluster.org/12999 >> Reviewed-by: Atin Mukherjee <amukherj@redhat.com> >> Tested-by: NetBSD Build System <jenkins@build.gluster.org> >> Tested-by: Gluster Build System <jenkins@build.gluster.com> Change-Id: I389de2cc224f0ed627d98ae062209dd4f93e3b19 BUG: 1294410 Signed-off-by: Gaurav Kumar Garg <ggarg@redhat.com> Signed-off-by: Kaushal M <kaushal@redhat.com> Reviewed-on: http://review.gluster.org/13095 Reviewed-by: Atin Mukherjee <amukherj@redhat.com> Tested-by: NetBSD Build System <jenkins@build.gluster.org> Tested-by: Gluster Build System <jenkins@build.gluster.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-sm.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c98
1 files changed, 96 insertions, 2 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
index 9c772ba4d4b..4f345bc2c79 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -598,6 +598,100 @@ out:
return ret;
}
+/* ac_update_friend only sends friend update to the friend that caused this
+ * event to happen
+ */
+static int
+glusterd_ac_update_friend (glusterd_friend_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_peerinfo_t *cur_peerinfo = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ xlator_t *this = NULL;
+ glusterd_friend_update_ctx_t ev_ctx = {{0}};
+ glusterd_conf_t *priv = NULL;
+ dict_t *friends = NULL;
+ char key[100] = {0,};
+ int32_t count = 0;
+
+ GF_ASSERT (event);
+
+ this = THIS;
+ priv = this->private;
+
+ GF_ASSERT (priv);
+
+ rcu_read_lock ();
+
+ cur_peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
+ if (!cur_peerinfo) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_PEER_NOT_FOUND, "Could not find peer %s(%s)",
+ event->peername, uuid_utoa (event->peerid));
+ ret = -1;
+ goto out;
+ }
+
+ /* Bail out early if peer is not connected.
+ * We cannot send requests to the peer until we have established our
+ * client connection to it.
+ */
+ if (!cur_peerinfo->connected || !cur_peerinfo->peer) {
+ ret = 0;
+ goto out;
+ }
+
+ ev_ctx.op = GD_FRIEND_UPDATE_ADD;
+
+ friends = dict_new ();
+ if (!friends)
+ goto out;
+
+ snprintf (key, sizeof (key), "op");
+ ret = dict_set_int32 (friends, key, ev_ctx.op);
+ if (ret)
+ goto out;
+
+ cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
+ if (!glusterd_should_update_peer (peerinfo, cur_peerinfo))
+ continue;
+
+ count++;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "friend%d", count);
+ ret = gd_add_friend_to_dict (peerinfo, friends, key);
+ if (ret)
+ goto out;
+ }
+
+ ret = dict_set_int32 (friends, "count", count);
+ if (ret)
+ goto out;
+
+ ret = dict_set_static_ptr (friends, "peerinfo", cur_peerinfo);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set peerinfo");
+ goto out;
+ }
+
+ proc = &cur_peerinfo->peer->proctable[GLUSTERD_FRIEND_UPDATE];
+ if (proc->fn)
+ ret = proc->fn (NULL, this, friends);
+
+ gf_msg_debug (this->name, 0, "Returning with %d", ret);
+
+out:
+ rcu_read_unlock ();
+
+ if (friends)
+ dict_unref (friends);
+
+ return ret;
+}
+
/* Clean up stale volumes on the peer being detached. The volumes which have
* bricks on other peers are stale with respect to the detached peer.
*/
@@ -1043,8 +1137,8 @@ glusterd_sm_t glusterd_state_befriended [] = {
{GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_NONE,
{GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_PROBE,
{GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_INIT_FRIEND_REQ,
- {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_RCVD_ACC
- {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_send_friend_update}, //EVENT_RCVD_LOCAL_ACC
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_update_friend}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_update_friend}, //EVENT_RCVD_LOCAL_ACC
{GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_RCVD_RJT
{GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_RCVD_LOCAL_RJT
{GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_handle_friend_add_req}, //EVENT_RCV_FRIEND_REQ