summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd
diff options
context:
space:
mode:
authorSanju Rakonde <srakonde@redhat.com>2018-11-28 16:13:58 +0530
committerAtin Mukherjee <amukherj@redhat.com>2018-12-03 17:03:57 +0000
commit2bb0e89e4bb113a93c6e786446a140cd99261af8 (patch)
tree4e3ad012d934fb471b60bbd3b18fd61f4fc4c8cf /xlators/mgmt/glusterd
parent220722b426f4014abdca0c719b2ca4e3aefeecc4 (diff)
glusterd: perform rcu_read_lock/unlock() under cleanup_lock mutex
Problem: glusterd should not try to acquire locks on any resources, when it already received a SIGTERM and cleanup is started. Otherwise we might hit segfault, since the thread which is going through cleanup path will be freeing up the resouces and some other thread might be trying to acquire locks on freed resources. Solution: perform rcu_read_lock/unlock() under cleanup_lock mutex. fixes: bz#1654270 Change-Id: I87a97cfe4f272f74f246d688660934638911ce54 Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c74
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c32
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c28
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c30
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.c40
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-reset-brick.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c48
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-server-quorum.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c64
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c40
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h18
16 files changed, 213 insertions, 199 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index f64237c4e18..6a015a88147 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -1904,7 +1904,7 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
continue;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_uuid(brickinfo->uuid);
if (!peerinfo) {
snprintf(msg, sizeof(msg),
@@ -1913,7 +1913,7 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
brick);
*errstr = gf_strdup(msg);
ret = -1;
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
if (!peerinfo->connected) {
@@ -1923,10 +1923,10 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
brick);
*errstr = gf_strdup(msg);
ret = -1;
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
}
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index ef1df3c3788..db58b3af8cf 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -103,7 +103,7 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ret = glusterd_remote_hostname_get(req, rhost, sizeof(rhost));
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, rhost);
@@ -174,7 +174,7 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ret = GLUSTERD_CONNECTION_AWAITED;
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
if (ctx && ctx->hostname)
@@ -207,7 +207,7 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
if (!port)
port = GF_DEFAULT_BASE_PORT;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, hostname);
@@ -261,7 +261,7 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ret = 0;
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 != ret) {
if (ctx && ctx->hostname)
@@ -904,9 +904,9 @@ __glusterd_handle_cluster_lock(rpcsvc_request_t *req)
gf_msg_debug(this->name, 0, "Received LOCK from uuid: %s",
uuid_utoa(lock_req.uuid));
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(lock_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -1063,9 +1063,9 @@ __glusterd_handle_stage_op(rpcsvc_request_t *req)
ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -1144,9 +1144,9 @@ __glusterd_handle_commit_op(rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -1267,12 +1267,12 @@ __glusterd_handle_cli_probe(rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_hostname(hostname);
ret = (peerinfo && gd_peer_has_address(peerinfo, hostname));
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg_debug("glusterd", 0,
@@ -2286,7 +2286,7 @@ __glusterd_handle_fsm_log(rpcsvc_request_t *req)
conf = this->private;
ret = glusterd_sm_tr_log_add_to_dict(dict, &conf->op_sm_log);
} else {
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_hostname(cli_req.name);
if (!peerinfo) {
@@ -2296,7 +2296,7 @@ __glusterd_handle_fsm_log(rpcsvc_request_t *req)
ret = glusterd_sm_tr_log_add_to_dict(dict, &peerinfo->sm_log);
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
}
out:
@@ -2440,9 +2440,9 @@ __glusterd_handle_cluster_unlock(rpcsvc_request_t *req)
gf_msg_debug(this->name, 0, "Received UNLOCK from uuid: %s",
uuid_utoa(unlock_req.uuid));
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(unlock_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_LOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -2753,11 +2753,11 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
}
ret = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
if (glusterd_peerinfo_find(friend_req.uuid, NULL) == NULL) {
ret = -1;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_REQ_FROM_UNKNOWN_PEER,
"Received friend update request "
@@ -2816,7 +2816,7 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
snprintf(key, sizeof(key), "friend%d", i);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, NULL);
if (peerinfo == NULL) {
/* Create a new peer and add it to the list as there is
@@ -2861,7 +2861,7 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
}
}
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret)
break;
@@ -2964,7 +2964,7 @@ __glusterd_handle_probe_query(rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(probe_req.uuid, remote_hostname);
if ((peerinfo == NULL) && (!cds_list_empty(&conf->peers))) {
rsp.op_ret = -1;
@@ -2984,7 +2984,7 @@ __glusterd_handle_probe_query(rpcsvc_request_t *req)
rsp.op_errno = GF_PROBE_ADD_FAILED;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
respond:
gf_uuid_copy(rsp.uuid, MY_UUID);
@@ -3334,11 +3334,11 @@ glusterd_friend_remove(uuid_t uuid, char *hostname)
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, hostname);
if (peerinfo == NULL) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
@@ -3346,7 +3346,7 @@ glusterd_friend_remove(uuid_t uuid, char *hostname)
if (ret)
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_CLEANUP_FAIL,
"Volumes cleanup failed");
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
/* Giving up the critical section here as glusterd_peerinfo_cleanup must
* be called from outside a critical section
*/
@@ -3657,7 +3657,7 @@ glusterd_probe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
GF_ASSERT(hoststr);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, hoststr);
if (peerinfo == NULL) {
@@ -3702,7 +3702,7 @@ glusterd_probe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg_debug("glusterd", 0, "returning %d", ret);
return ret;
}
@@ -3719,7 +3719,7 @@ glusterd_deprobe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
GF_ASSERT(hoststr);
GF_ASSERT(req);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, hoststr);
if (peerinfo == NULL) {
@@ -3780,7 +3780,7 @@ glusterd_deprobe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
peerinfo->detaching = _gf_true;
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
return ret;
}
@@ -4145,7 +4145,7 @@ glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
/* Reset ret to 0, needed to prevent failure in case no peers exist */
ret = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
if (!cds_list_empty(&priv->peers)) {
cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
{
@@ -4156,7 +4156,7 @@ glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
}
}
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret)
goto out;
@@ -5609,7 +5609,7 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
if (priv->opts)
dict_foreach(priv->opts, glusterd_print_global_options, fp);
- rcu_read_lock();
+ RCU_READ_LOCK;
fprintf(fp, "\n[Peers]\n");
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
@@ -5639,7 +5639,7 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
count_bkp = 0;
fprintf(fp, "\n");
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
count = 0;
fprintf(fp, "\n[Volumes]\n");
@@ -6253,7 +6253,7 @@ glusterd_friend_remove_notify(glusterd_peerctx_t *peerctx, int32_t op_errno)
GF_ASSERT(peerctx);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
gf_msg_debug(THIS->name, 0,
@@ -6293,7 +6293,7 @@ glusterd_friend_remove_notify(glusterd_peerctx_t *peerctx, int32_t op_errno)
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
return ret;
}
@@ -6340,7 +6340,7 @@ __glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
event, peerctx->peername);
return 0;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -6453,7 +6453,7 @@ __glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
glusterd_friend_sm();
glusterd_op_sm();
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index 53b500f4986..1466d8f0653 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -1189,9 +1189,9 @@ gd_validate_mgmt_hndsk_req(rpcsvc_request_t *req, dict_t *dict)
*/
if (!ret) {
gf_uuid_parse(uuid_str, peer_uuid);
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(peer_uuid, NULL) != NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret)
return _gf_true;
}
@@ -1207,7 +1207,7 @@ gd_validate_mgmt_hndsk_req(rpcsvc_request_t *req, dict_t *dict)
* is available in the peerinfo list but the uuid has changed of the
* node due to a reinstall, in that case the validation should fail!
*/
- rcu_read_lock();
+ RCU_READ_LOCK;
if (!uuid_str) {
ret = (glusterd_peerinfo_find(NULL, hostname) == NULL);
} else {
@@ -1225,7 +1225,7 @@ gd_validate_mgmt_hndsk_req(rpcsvc_request_t *req, dict_t *dict)
ret = -1;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HANDSHAKE_REQ_REJECTED,
"Rejecting management "
@@ -1768,7 +1768,7 @@ glusterd_event_connected_inject(glusterd_peerctx_t *peerctx)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -1796,7 +1796,7 @@ glusterd_event_connected_inject(glusterd_peerctx_t *peerctx)
"EVENT_CONNECTED ret = %d",
ret);
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
gf_msg_debug("glusterd", 0, "returning %d", ret);
@@ -1870,7 +1870,7 @@ __glusterd_mgmt_hndsk_version_ack_cbk(struct rpc_req *req, struct iovec *iov,
frame = myframe;
peerctx = frame->local;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
gf_msg_debug(this->name, 0, "Could not find peer %s(%s)",
@@ -1930,7 +1930,7 @@ out:
if (ret != 0 && peerinfo)
rpc_transport_disconnect(peerinfo->rpc->conn.trans, _gf_false);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
frame->local = NULL;
STACK_DESTROY(frame->root);
@@ -1979,7 +1979,7 @@ __glusterd_mgmt_hndsk_version_cbk(struct rpc_req *req, struct iovec *iov,
frame = myframe;
peerctx = frame->local;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -2055,7 +2055,7 @@ out:
rpc_transport_disconnect(peerinfo->rpc->conn.trans, _gf_false);
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (rsp.hndsk.hndsk_val)
free(rsp.hndsk.hndsk_val);
@@ -2114,7 +2114,7 @@ glusterd_mgmt_handshake(xlator_t *this, glusterd_peerctx_t *peerctx)
GF_PROTOCOL_DICT_SERIALIZE(this, req_dict, (&req.hndsk.hndsk_val),
req.hndsk.hndsk_len, ret, out);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -2129,7 +2129,7 @@ glusterd_mgmt_handshake(xlator_t *this, glusterd_peerctx_t *peerctx)
(xdrproc_t)xdr_gf_mgmt_hndsk_req);
ret = 0;
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
if (ret && frame)
STACK_DESTROY(frame->root);
@@ -2244,7 +2244,7 @@ __glusterd_peer_dump_version_cbk(struct rpc_req *req, struct iovec *iov,
frame = myframe;
peerctx = frame->local;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -2320,7 +2320,7 @@ out:
if (ret != 0 && peerinfo)
rpc_transport_disconnect(peerinfo->rpc->conn.trans, _gf_false);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
glusterd_friend_sm();
glusterd_op_sm();
@@ -2369,7 +2369,7 @@ glusterd_peer_dump_version(xlator_t *this, struct rpc_clnt *rpc,
if (!peerctx)
goto out;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -2384,7 +2384,7 @@ glusterd_peer_dump_version(xlator_t *this, struct rpc_clnt *rpc,
peerinfo->rpc, &req, frame, &glusterd_dump_prog, GF_DUMP_DUMP, NULL,
this, glusterd_peer_dump_version_cbk, (xdrproc_t)xdr_gf_dump_req);
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
if (ret && frame)
STACK_DESTROY(frame->root);
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index c7e9193d8aa..ee358808236 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -51,14 +51,14 @@ gd_mgmt_v3_collate_errors(struct syncargs *args, int op_ret, int op_errno,
args->op_ret = op_ret;
args->op_errno = op_errno;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(peerid, NULL);
if (peerinfo)
peer_str = gf_strdup(peerinfo->hostname);
else
peer_str = gf_strdup(uuid_utoa(uuid));
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
is_operrstr_blk = (op_errstr && strcmp(op_errstr, ""));
err_string = (is_operrstr_blk) ? op_errstr : err_str;
@@ -708,7 +708,7 @@ glusterd_mgmt_v3_initiate_lockdown(glusterd_op_t op, dict_t *dict,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -726,7 +726,7 @@ glusterd_mgmt_v3_initiate_lockdown(glusterd_op_t op, dict_t *dict,
gd_mgmt_v3_lock(op, dict, peerinfo, &args, MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1046,7 +1046,7 @@ glusterd_mgmt_v3_pre_validate(glusterd_op_t op, dict_t *req_dict,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1065,7 +1065,7 @@ glusterd_mgmt_v3_pre_validate(glusterd_op_t op, dict_t *req_dict,
peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1328,7 +1328,7 @@ glusterd_mgmt_v3_brick_op(glusterd_op_t op, dict_t *req_dict, char **op_errstr,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1347,7 +1347,7 @@ glusterd_mgmt_v3_brick_op(glusterd_op_t op, dict_t *req_dict, char **op_errstr,
peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1585,7 +1585,7 @@ glusterd_mgmt_v3_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1617,7 +1617,7 @@ glusterd_mgmt_v3_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1826,7 +1826,7 @@ glusterd_mgmt_v3_post_validate(glusterd_op_t op, int32_t op_ret, dict_t *dict,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1845,7 +1845,7 @@ glusterd_mgmt_v3_post_validate(glusterd_op_t op, int32_t op_ret, dict_t *dict,
MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -2010,7 +2010,7 @@ glusterd_mgmt_v3_release_peer_locks(glusterd_op_t op, dict_t *dict,
if (ret)
goto out;
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -2028,7 +2028,7 @@ glusterd_mgmt_v3_release_peer_locks(glusterd_op_t op, dict_t *dict,
gd_mgmt_v3_unlock(op, dict, peerinfo, &args, MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index e72bec4e55c..0e5b75bf5f0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -1785,7 +1785,7 @@ glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr)
ret = 0;
}
} else {
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, hostname);
if (peerinfo == NULL) {
@@ -1802,7 +1802,7 @@ glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr)
ret = -1;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
}
out:
@@ -3861,7 +3861,7 @@ glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
priv = this->private;
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -3882,7 +3882,7 @@ glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = proc->fn(NULL, this, peerinfo);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_WARNING, 0,
GD_MSG_LOCK_REQ_SEND_FAIL,
"Failed to send lock request "
@@ -3903,7 +3903,7 @@ glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"failed to set peerinfo");
dict_unref(dict);
@@ -3912,7 +3912,7 @@ glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
ret = proc->fn(NULL, this, dict);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_WARNING, 0,
GD_MSG_MGMTV3_LOCK_REQ_SEND_FAIL,
"Failed to send mgmt_v3 lock "
@@ -3928,7 +3928,7 @@ glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
}
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
@@ -3964,7 +3964,7 @@ glusterd_op_ac_send_unlock(glusterd_op_sm_event_t *event, void *ctx)
priv = this->private;
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -4036,7 +4036,7 @@ glusterd_op_ac_send_unlock(glusterd_op_sm_event_t *event, void *ctx)
}
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
@@ -4589,7 +4589,7 @@ glusterd_op_ac_send_stage_op(glusterd_op_sm_event_t *event, void *ctx)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -4609,7 +4609,7 @@ glusterd_op_ac_send_stage_op(glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"failed to "
"set peerinfo");
@@ -4629,7 +4629,7 @@ glusterd_op_ac_send_stage_op(glusterd_op_sm_event_t *event, void *ctx)
pending_count++;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
out:
@@ -5216,7 +5216,7 @@ glusterd_op_ac_send_commit_op(glusterd_op_sm_event_t *event, void *ctx)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -5236,7 +5236,7 @@ glusterd_op_ac_send_commit_op(glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"failed to set peerinfo");
goto out;
@@ -5254,7 +5254,7 @@ glusterd_op_ac_send_commit_op(glusterd_op_sm_event_t *event, void *ctx)
pending_count++;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
gf_msg_debug(this->name, 0,
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
index 7d2d28520fc..9356ec30fb3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
@@ -188,7 +188,7 @@ glusterd_peerinfo_find_by_uuid(uuid_t uuid)
if (gf_uuid_is_null(uuid))
return NULL;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
{
if (!gf_uuid_compare(entry->uuid, uuid)) {
@@ -198,7 +198,7 @@ glusterd_peerinfo_find_by_uuid(uuid_t uuid)
break;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (!found)
gf_msg_debug(this->name, 0, "Friend with uuid: %s, not found",
@@ -323,7 +323,7 @@ glusterd_chk_peers_connected_befriended(uuid_t skip_uuid)
priv = THIS->private;
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
if (!gf_uuid_is_null(skip_uuid) &&
@@ -336,7 +336,7 @@ glusterd_chk_peers_connected_befriended(uuid_t skip_uuid)
break;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg_debug(THIS->name, 0, "Returning %s", (ret ? "TRUE" : "FALSE"));
return ret;
@@ -358,7 +358,7 @@ glusterd_uuid_to_hostname(uuid_t uuid)
if (!gf_uuid_compare(MY_UUID, uuid)) {
hostname = gf_strdup("localhost");
}
- rcu_read_lock();
+ RCU_READ_LOCK;
if (!cds_list_empty(&priv->peers)) {
cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
{
@@ -368,7 +368,7 @@ glusterd_uuid_to_hostname(uuid_t uuid)
}
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
return hostname;
}
@@ -399,15 +399,15 @@ glusterd_are_all_peers_up()
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
if (!peerinfo->connected) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
peers_up = _gf_true;
@@ -428,7 +428,7 @@ glusterd_are_vol_all_peers_up(glusterd_volinfo_t *volinfo,
if (!gf_uuid_compare(brickinfo->uuid, MY_UUID))
continue;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, peers, uuid_list)
{
if (gf_uuid_compare(peerinfo->uuid, brickinfo->uuid))
@@ -441,11 +441,11 @@ glusterd_are_vol_all_peers_up(glusterd_volinfo_t *volinfo,
*down_peerstr = gf_strdup(peerinfo->hostname);
gf_msg_debug(THIS->name, 0, "Peer %s is down. ",
peerinfo->hostname);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
}
ret = _gf_true;
@@ -644,7 +644,7 @@ gd_peerinfo_find_from_hostname(const char *hoststr)
GF_VALIDATE_OR_GOTO(this->name, (hoststr != NULL), out);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peer, &priv->peers, uuid_list)
{
cds_list_for_each_entry_rcu(tmphost, &peer->hostnames, hostname_list)
@@ -659,7 +659,7 @@ gd_peerinfo_find_from_hostname(const char *hoststr)
}
}
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
return found;
}
@@ -693,7 +693,7 @@ gd_peerinfo_find_from_addrinfo(const struct addrinfo *addr)
GF_VALIDATE_OR_GOTO(this->name, (addr != NULL), out);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peer, &conf->peers, uuid_list)
{
cds_list_for_each_entry_rcu(address, &peer->hostnames, hostname_list)
@@ -725,7 +725,7 @@ gd_peerinfo_find_from_addrinfo(const struct addrinfo *addr)
}
}
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
return found;
}
@@ -992,7 +992,7 @@ glusterd_peerinfo_find_by_generation(uint32_t generation)
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
{
if (entry->generation == generation) {
@@ -1002,7 +1002,7 @@ glusterd_peerinfo_find_by_generation(uint32_t generation)
break;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (!found)
gf_msg_debug(this->name, 0,
@@ -1025,9 +1025,9 @@ glusterd_get_peers_count()
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peer, &conf->peers, uuid_list) count++;
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
return count;
diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
index f14e79ecf5f..355391db69f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
+++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
@@ -268,7 +268,7 @@ glusterd_op_stage_replace_brick(dict_t *dict, char **op_errstr,
}
if (!gf_is_local_addr(host)) {
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, host);
if (peerinfo == NULL) {
@@ -292,7 +292,7 @@ glusterd_op_stage_replace_brick(dict_t *dict, char **op_errstr,
*op_errstr = gf_strdup(msg);
ret = -1;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-reset-brick.c b/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
index 41adc40b5ce..83d3128f8f0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
+++ b/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
@@ -153,7 +153,7 @@ glusterd_reset_brick_prevalidate(dict_t *dict, char **op_errstr,
if (ret)
goto out;
} else {
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, host);
if (peerinfo == NULL) {
@@ -178,7 +178,7 @@ glusterd_reset_brick_prevalidate(dict_t *dict, char **op_errstr,
*op_errstr = gf_strdup(msg);
ret = -1;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 40e22deff9b..49a3d3b5a32 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -266,7 +266,7 @@ __glusterd_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(rsp.uuid, rsp.hostname);
if (peerinfo == NULL) {
ret = -1;
@@ -403,7 +403,7 @@ cont:
"Received resp to probe req");
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
free(rsp.hostname); // malloced by xdr
@@ -467,7 +467,7 @@ __glusterd_friend_add_cbk(struct rpc_req *req, struct iovec *iov, int count,
(op_ret) ? "RJT" : "ACC", uuid_utoa(rsp.uuid), rsp.hostname,
rsp.port);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(rsp.uuid, rsp.hostname);
if (peerinfo == NULL) {
@@ -507,7 +507,7 @@ __glusterd_friend_add_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = glusterd_friend_sm_inject_event(event);
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
ctx = ((call_frame_t *)myframe)->local;
((call_frame_t *)myframe)->local = NULL;
@@ -589,7 +589,7 @@ __glusterd_friend_remove_cbk(struct rpc_req *req, struct iovec *iov, int count,
rsp.port);
inject:
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(rsp.uuid, ctx->hostname);
if (peerinfo == NULL) {
@@ -622,7 +622,7 @@ inject:
op_ret = 0;
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
respond:
ret = glusterd_xfer_cli_deprobe_resp(ctx->req, op_ret, op_errno, NULL,
@@ -748,9 +748,9 @@ __glusterd_cluster_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
uuid_utoa(rsp.uuid));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -865,9 +865,9 @@ glusterd_mgmt_v3_lock_peers_cbk_fn(struct rpc_req *req, struct iovec *iov,
uuid_utoa(rsp.uuid));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -973,9 +973,9 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn(struct rpc_req *req, struct iovec *iov,
uuid_utoa(rsp.uuid));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
@@ -1079,9 +1079,9 @@ __glusterd_cluster_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
uuid_utoa(rsp.uuid));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
@@ -1203,7 +1203,7 @@ out:
uuid_utoa(rsp.uuid));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(rsp.uuid, NULL);
if (peerinfo == NULL) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -1230,7 +1230,7 @@ out:
event_type = GD_OP_EVENT_RCVD_ACC;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
if (ret)
@@ -1357,7 +1357,7 @@ __glusterd_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
uuid_utoa(*txn_id));
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(rsp.uuid, NULL);
if (peerinfo == NULL) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -1408,7 +1408,7 @@ __glusterd_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
@@ -1510,11 +1510,11 @@ glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
@@ -1526,7 +1526,7 @@ glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data)
req.hostname = gf_strdup(peerinfo->hostname);
req.port = peerinfo->port;
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
ret = glusterd_add_volumes_to_export_dict(&peer_data);
if (ret) {
@@ -1604,11 +1604,11 @@ glusterd_rpc_friend_remove(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
@@ -1625,7 +1625,7 @@ glusterd_rpc_friend_remove(call_frame_t *frame, xlator_t *this, void *data)
glusterd_friend_remove_cbk,
(xdrproc_t)xdr_gd1_mgmt_friend_req);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
out:
GF_FREE(req.hostname);
diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
index cfa0cce0aba..66c6419b535 100644
--- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
+++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
@@ -217,7 +217,7 @@ glusterd_get_quorum_cluster_counts(xlator_t *this, int *active_count,
if (active_count)
*active_count = 1;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
if (_is_contributing_to_quorum(peerinfo->quorum_contrib))
@@ -225,7 +225,7 @@ glusterd_get_quorum_cluster_counts(xlator_t *this, int *active_count,
if (active_count && (peerinfo->quorum_contrib == QUORUM_UP))
*active_count = *active_count + 1;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
ret = dict_get_str(conf->opts, GLUSTERD_QUORUM_RATIO_KEY, &val);
if (ret == 0) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
index 35bc71455d2..f9b044fae47 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -163,7 +163,7 @@ glusterd_broadcast_friend_delete(char *hostname, uuid_t uuid)
if (ret)
goto out;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
if (!peerinfo->connected || !peerinfo->peer)
@@ -186,7 +186,7 @@ glusterd_broadcast_friend_delete(char *hostname, uuid_t uuid)
}
}
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg_debug("glusterd", 0, "Returning with %d", ret);
@@ -229,7 +229,7 @@ glusterd_ac_reverse_probe_begin(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(event);
GF_ASSERT(ctx);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
@@ -274,7 +274,7 @@ glusterd_ac_reverse_probe_begin(glusterd_friend_sm_event_t *event, void *ctx)
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
if (new_event)
@@ -305,7 +305,7 @@ glusterd_ac_friend_add(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(conf);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
@@ -328,7 +328,7 @@ glusterd_ac_friend_add(glusterd_friend_sm_event_t *event, void *ctx)
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret && frame)
STACK_DESTROY(frame->root);
@@ -361,7 +361,7 @@ glusterd_ac_friend_probe(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(conf);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, probe_ctx->hostname);
if (peerinfo == NULL) {
// We should not reach this state ideally
@@ -407,7 +407,7 @@ glusterd_ac_friend_probe(glusterd_friend_sm_event_t *event, void *ctx)
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (dict)
dict_unref(dict);
@@ -440,7 +440,7 @@ glusterd_ac_send_friend_remove_req(glusterd_friend_sm_event_t *event,
GF_ASSERT(conf);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
@@ -487,7 +487,7 @@ glusterd_ac_send_friend_remove_req(glusterd_friend_sm_event_t *event,
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg_debug("glusterd", 0, "Returning with %d", ret);
@@ -534,7 +534,7 @@ glusterd_ac_send_friend_update(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
cur_peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!cur_peerinfo) {
@@ -597,7 +597,7 @@ glusterd_ac_send_friend_update(glusterd_friend_sm_event_t *event, void *ctx)
gf_msg_debug("glusterd", 0, "Returning with %d", ret);
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (friends)
dict_unref(friends);
@@ -632,7 +632,7 @@ glusterd_ac_update_friend(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(priv);
- rcu_read_lock();
+ RCU_READ_LOCK;
cur_peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!cur_peerinfo) {
@@ -694,7 +694,7 @@ glusterd_ac_update_friend(glusterd_friend_sm_event_t *event, void *ctx)
gf_msg_debug(this->name, 0, "Returning with %d", ret);
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (friends)
dict_unref(friends);
@@ -801,13 +801,13 @@ glusterd_ac_handle_friend_remove_req(glusterd_friend_sm_event_t *event,
ret = glusterd_xfer_friend_remove_resp(ev_ctx->req, ev_ctx->hostname,
ev_ctx->port);
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_REMOVE_FRIEND,
&new_event);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
@@ -816,13 +816,13 @@ glusterd_ac_handle_friend_remove_req(glusterd_friend_sm_event_t *event,
ret = glusterd_friend_sm_inject_event(new_event);
if (ret) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
new_event = NULL;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
glusterd_peer_detach_cleanup(priv);
out:
@@ -842,14 +842,14 @@ glusterd_ac_friend_remove(glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT(event);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
uuid_utoa(event->peerid));
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
ret = glusterd_friend_remove_cleanup_vols(peerinfo->uuid);
@@ -857,7 +857,7 @@ glusterd_ac_friend_remove(glusterd_friend_sm_event_t *event, void *ctx)
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_CLEANUP_FAIL,
"Volumes cleanup failed");
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
/* Exiting read critical section as glusterd_peerinfo_cleanup calls
* synchronize_rcu before freeing the peerinfo
*/
@@ -905,14 +905,14 @@ glusterd_ac_handle_friend_add_req(glusterd_friend_sm_event_t *event, void *ctx)
ev_ctx = ctx;
gf_uuid_copy(uuid, ev_ctx->uuid);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)", event->peername,
uuid_utoa(event->peerid));
ret = -1;
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
@@ -922,7 +922,7 @@ glusterd_ac_handle_friend_add_req(glusterd_friend_sm_event_t *event, void *ctx)
*/
gf_uuid_copy(peerinfo->uuid, ev_ctx->uuid);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
conf = this->private;
GF_ASSERT(conf);
@@ -1047,7 +1047,7 @@ glusterd_friend_sm_transition_state(uuid_t peerid, char *peername,
GF_ASSERT(state);
GF_ASSERT(peername);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(peerid, peername);
if (!peerinfo) {
goto out;
@@ -1061,7 +1061,7 @@ glusterd_friend_sm_transition_state(uuid_t peerid, char *peername,
ret = 0;
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
return ret;
}
@@ -1413,7 +1413,7 @@ glusterd_friend_sm()
cds_list_del_init(&event->list);
event_type = event->event;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
@@ -1423,7 +1423,7 @@ glusterd_friend_sm()
glusterd_friend_sm_event_name_get(event_type));
GF_FREE(event);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
continue;
}
gf_msg_debug("glusterd", 0, "Dequeued event of type: '%s'",
@@ -1431,7 +1431,7 @@ glusterd_friend_sm()
old_state = peerinfo->state.state;
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
/* Giving up read-critical section here as we only need
* the current state to call the handler.
*
@@ -1489,10 +1489,10 @@ glusterd_friend_sm()
/* We need to obtain peerinfo reference once again as we
* had exited the read critical section above.
*/
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
if (!peerinfo) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
/* A peer can only be deleted as a effect of
* this state machine, and two such state
* machines can never run at the same time.
@@ -1518,7 +1518,7 @@ glusterd_friend_sm()
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
"Failed to store peerinfo");
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
glusterd_destroy_friend_event_context(event);
GF_FREE(event);
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 543d677f420..48af5e8355a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -186,7 +186,7 @@ glusterd_find_missed_snap(dict_t *rsp_dict, glusterd_volinfo_t *vol,
continue;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, peers, uuid_list)
{
if (gf_uuid_compare(peerinfo->uuid, brickinfo->uuid)) {
@@ -208,12 +208,12 @@ glusterd_find_missed_snap(dict_t *rsp_dict, glusterd_volinfo_t *vol,
"info for %s:%s in the "
"rsp_dict",
brickinfo->hostname, brickinfo->path);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
brick_count++;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 57ad7ca501d..be5f072de66 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -4657,14 +4657,14 @@ glusterd_store_retrieve_peers(xlator_t *this)
args.mode = GD_MODE_ON;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
ret = glusterd_friend_rpc_create(this, peerinfo, &args);
if (ret)
break;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
peerinfo = NULL;
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 0bf03358ffd..bdb4b137d6b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -52,13 +52,13 @@ gd_collate_errors(struct syncargs *args, int op_ret, int op_errno,
args->op_ret = op_ret;
args->op_errno = op_errno;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(peerid, NULL);
if (peerinfo)
peer_str = gf_strdup(peerinfo->hostname);
else
peer_str = gf_strdup(uuid_utoa(uuid));
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (op_errstr && strcmp(op_errstr, "")) {
len = snprintf(err_str, sizeof(err_str) - 1, "Error: %s",
@@ -560,7 +560,7 @@ _gd_syncop_mgmt_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
gf_uuid_copy(args->uuid, rsp.uuid);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(*peerid, NULL);
if (peerinfo) {
/* Set peer as locked, so we unlock only the locked peers */
@@ -573,7 +573,7 @@ _gd_syncop_mgmt_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
"ID %s",
uuid_utoa(*peerid));
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
@@ -661,7 +661,7 @@ _gd_syncop_mgmt_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
gf_uuid_copy(args->uuid, rsp.uuid);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(*peerid, NULL);
if (peerinfo) {
peerinfo->locked = _gf_false;
@@ -672,7 +672,7 @@ _gd_syncop_mgmt_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
"ID %s",
uuid_utoa(*peerid));
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
@@ -770,9 +770,9 @@ _gd_syncop_stage_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
ret = -1;
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -1072,9 +1072,9 @@ _gd_syncop_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == 0);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
ret = -1;
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
@@ -1185,7 +1185,7 @@ gd_lock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1209,7 +1209,7 @@ gd_lock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
peer_uuid, txn_id);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1315,7 +1315,7 @@ stage_done:
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1334,7 +1334,7 @@ stage_done:
req_dict, op_ctx);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1441,7 +1441,7 @@ commit_done:
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before the
@@ -1460,7 +1460,7 @@ commit_done:
req_dict, op_ctx);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1520,7 +1520,7 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
peer_cnt = 0;
if (cluster_lock) {
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were available before
@@ -1541,7 +1541,7 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
peer_cnt++;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
} else {
ret = dict_get_int32(op_ctx, "hold_global_locks", &global);
if (!ret && global)
@@ -1549,7 +1549,7 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
else
type = "vol";
if (volname || global) {
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Only send requests to peers who were
@@ -1568,7 +1568,7 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
tmp_uuid, txn_id);
peer_cnt++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
}
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index cdac6d5b8bf..d58121f91e9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -10828,7 +10828,7 @@ glusterd_volume_rebalance_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict)
node_uuid_str = gf_strdup(node_uuid);
/* Finding the index of the node-uuid in the peer-list */
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
peer_uuid_str = gd_peer_uuid_str(peerinfo);
@@ -10837,7 +10837,7 @@ glusterd_volume_rebalance_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict)
current_index++;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
/* Setting the largest index value as the total count. */
ret = dict_get_int32n(ctx_dict, "count", SLEN("count"), &count);
@@ -13771,7 +13771,7 @@ glusterd_count_connected_peers(int32_t *count)
*count = 1;
- rcu_read_lock();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
{
/* Find peer who is connected and is a friend */
@@ -13780,7 +13780,7 @@ glusterd_count_connected_peers(int32_t *count)
(*count)++;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
ret = 0;
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index b0a7d9a448d..be1eed04a68 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -925,6 +925,20 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
*snap_volname_ptr = '\0'; \
} while (0)
+#define RCU_READ_LOCK \
+ pthread_mutex_lock(&(THIS->ctx)->cleanup_lock); \
+ { \
+ rcu_read_lock(); \
+ } \
+ pthread_mutex_unlock(&(THIS->ctx)->cleanup_lock);
+
+#define RCU_READ_UNLOCK \
+ pthread_mutex_lock(&(THIS->ctx)->cleanup_lock); \
+ { \
+ rcu_read_unlock(); \
+ } \
+ pthread_mutex_unlock(&(THIS->ctx)->cleanup_lock);
+
#define GLUSTERD_DUMP_PEERS(head, member, xpeers) \
do { \
glusterd_peerinfo_t *_peerinfo = NULL; \
@@ -933,7 +947,7 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
\
key = xpeers ? "glusterd.xaction_peer" : "glusterd.peer"; \
\
- rcu_read_lock(); \
+ RCU_READ_LOCK; \
cds_list_for_each_entry_rcu(_peerinfo, head, member) \
{ \
glusterd_dump_peer(_peerinfo, key, index, xpeers); \
@@ -941,7 +955,7 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
glusterd_dump_peer_rpcstat(_peerinfo, key, index); \
index++; \
} \
- rcu_read_unlock(); \
+ RCU_READ_UNLOCK; \
\
} while (0)