From 12e603bb9d8bc220eeea01c0ecc8ca63f9df4bc4 Mon Sep 17 00:00:00 2001 From: Vijay Bellur Date: Thu, 2 Apr 2015 00:29:59 +0530 Subject: glusterd: Fix rebase errors introduced by 4b18fba 4b18fba 'glusterd: group server-quorum related code together' a refactoring change, introduced some errors related to usage of userspace-rcu lists when rebasing. This patch fixes it. Changes done include, - Redo changes done by 673ba26 glusterd: Replace libglusterfs lists with liburcu lists - Redo changes done by c7785f7 glusterd: Protect the peer list and peerinfos with RCU. - Redo changes done by 891c7d0 glusterd: Prevent possible deadlock due to glusterd_quorum_count Change-Id: I789e5bc8b209d9ed6dd951d609baa90e89817639 BUG: 1205592 Signed-off-by: Vijay Bellur Signed-off-by: Kaushal M Reviewed-on: http://review.gluster.org/10105 Reviewed-by: Atin Mukherjee Reviewed-by: Niels de Vos Tested-by: Gluster Build System --- xlators/mgmt/glusterd/src/glusterd-server-quorum.c | 24 ++++++++++++---------- 1 file changed, 13 insertions(+), 11 deletions(-) (limited to 'xlators/mgmt/glusterd/src/glusterd-server-quorum.c') diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c index cb707a0caae..6178f273629 100644 --- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c +++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c @@ -201,7 +201,7 @@ _does_quorum_meet (int active_count, int quorum_count) int glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count, int *quorum_count, - struct list_head *peer_list, + struct cds_list_head *peer_list, gf_boolean_t _local_xaction_peers) { glusterd_peerinfo_t *peerinfo = NULL; @@ -220,23 +220,24 @@ glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count, if (active_count) *active_count = 1; + rcu_read_lock (); if (!peer_list) { - list_for_each_entry (peerinfo, &conf->peers, uuid_list) { - glusterd_quorum_count(peerinfo, inquorum_count, - active_count, out); + cds_list_for_each_entry (peerinfo, &conf->peers, uuid_list) { + GLUSTERD_QUORUM_COUNT (peerinfo, inquorum_count, + active_count, out); } } else { if (_local_xaction_peers) { list_for_each_local_xaction_peers (peerinfo, peer_list) { - glusterd_quorum_count(peerinfo, inquorum_count, - active_count, out); + GLUSTERD_QUORUM_COUNT (peerinfo, inquorum_count, + active_count, out); } } else { - list_for_each_entry (peerinfo, peer_list, - op_peers_list) { - glusterd_quorum_count(peerinfo, inquorum_count, - active_count, out); + cds_list_for_each_entry (peerinfo, peer_list, + op_peers_list) { + GLUSTERD_QUORUM_COUNT (peerinfo, inquorum_count, + active_count, out); } } } @@ -256,6 +257,7 @@ glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count, *quorum_count = count; ret = 0; out: + rcu_read_unlock (); return ret; } @@ -293,7 +295,7 @@ glusterd_is_any_volume_in_server_quorum (xlator_t *this) } gf_boolean_t -does_gd_meet_server_quorum (xlator_t *this, struct list_head *peers_list, +does_gd_meet_server_quorum (xlator_t *this, struct cds_list_head *peers_list, gf_boolean_t _local_xaction_peers) { int quorum_count = 0; -- cgit