summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--libglusterfs/src/mem-types.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c319
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c67
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.h3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.c13
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c250
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.h9
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c19
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h14
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h3
13 files changed, 404 insertions, 319 deletions
diff --git a/libglusterfs/src/mem-types.h b/libglusterfs/src/mem-types.h
index f4d3974f0b2..fc06d52239b 100644
--- a/libglusterfs/src/mem-types.h
+++ b/libglusterfs/src/mem-types.h
@@ -150,7 +150,6 @@ enum gf_common_mem_types_ {
gf_common_mt_nfs_exports = 131,
gf_common_mt_gf_brick_spec_t = 132,
gf_common_mt_gf_timer_entry_t = 133,
- gf_common_mt_list_head_t = 134,
gf_common_mt_end
};
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 469c95c9890..ac69fc8712d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -626,22 +626,6 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
gf_log (this->name, GF_LOG_DEBUG, "Acquired lock on localhost");
local_locking_done:
- txn_op_info.local_xaction_peers =
- GF_CALLOC (1, sizeof (struct cds_list_head),
- gf_common_mt_list_head_t);
- if (!txn_op_info.local_xaction_peers) {
- ret = -1;
- gf_log (this->name, GF_LOG_ERROR, "Out of memory");
- goto out;
- }
- CDS_INIT_LIST_HEAD (txn_op_info.local_xaction_peers);
-
- /* Maintain xaction_peers on per transaction basis */
- npeers = gd_build_local_xaction_peers_list
- (&priv->peers,
- txn_op_info.local_xaction_peers,
- op);
-
/* If no volname is given as a part of the command, locks will
* not be held, hence sending stage event. */
if (volname || (priv->op_version < GD_OP_VERSION_3_6_0))
@@ -898,8 +882,8 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)
"No transaction's opinfo set");
state.state = GD_OP_STATE_LOCKED;
- glusterd_txn_opinfo_init (&txn_op_info, &state,
- &op_req.op, req_ctx->dict, req);
+ glusterd_txn_opinfo_init (&txn_op_info, &state, &op_req.op,
+ req_ctx->dict, req);
ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
if (ret) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
index a200c8e1230..859690eee65 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
@@ -93,8 +93,8 @@ glusterd_op_state_machine_mgmt_v3_lock (rpcsvc_request_t *req,
GF_ASSERT (this);
GF_ASSERT (req);
- glusterd_txn_opinfo_init (&txn_op_info, NULL, &lock_req->op,
- ctx->dict, req);
+ glusterd_txn_opinfo_init (&txn_op_info, NULL, &lock_req->op, ctx->dict,
+ req);
ret = glusterd_set_txn_opinfo (&lock_req->txn_id, &txn_op_info);
if (ret) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index 2a34c78b35b..4e4dd047281 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -383,9 +383,9 @@ out:
int
glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict,
- char **op_errstr, int npeers,
+ char **op_errstr,
gf_boolean_t *is_acquired,
- struct cds_list_head *peers)
+ uint64_t txn_generation)
{
char *volname = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
@@ -394,9 +394,13 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict,
struct syncargs args = {0};
uuid_t peer_uuid = {0};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
GF_ASSERT (dict);
GF_ASSERT (op_errstr);
GF_ASSERT (is_acquired);
@@ -411,20 +415,36 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict,
*is_acquired = _gf_true;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
/* Sending mgmt_v3 lock req to other nodes in the cluster */
gd_syncargs_init (&args, NULL);
synctask_barrier_init((&args));
peer_cnt = 0;
- list_for_each_local_xaction_peers (peerinfo, peers) {
+
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
gd_mgmt_v3_lock (op, dict, peerinfo, &args,
MY_UUID, peer_uuid);
peer_cnt++;
}
+ rcu_read_unlock ();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
gd_synctask_barrier_wait((&args), peer_cnt);
if (args.errstr)
@@ -633,8 +653,8 @@ out:
int
glusterd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *req_dict,
- char **op_errstr, int npeers,
- struct cds_list_head *peers)
+ char **op_errstr,
+ uint64_t txn_generation)
{
int32_t ret = -1;
int32_t peer_cnt = 0;
@@ -643,9 +663,13 @@ glusterd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *req_dict,
struct syncargs args = {0};
uuid_t peer_uuid = {0};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
GF_ASSERT (req_dict);
GF_ASSERT (op_errstr);
@@ -691,20 +715,36 @@ glusterd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *req_dict,
dict_unref (rsp_dict);
rsp_dict = NULL;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
/* Sending Pre Validation req to other nodes in the cluster */
gd_syncargs_init (&args, req_dict);
synctask_barrier_init((&args));
peer_cnt = 0;
- list_for_each_local_xaction_peers (peerinfo, peers) {
+
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
gd_mgmt_v3_pre_validate_req (op, req_dict, peerinfo, &args,
MY_UUID, peer_uuid);
peer_cnt++;
}
+ rcu_read_unlock ();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
gd_synctask_barrier_wait((&args), peer_cnt);
if (args.op_ret) {
@@ -865,9 +905,8 @@ out:
}
int
-glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *req_dict,
- char **op_errstr, int npeers,
- struct cds_list_head *peers)
+glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *req_dict, char **op_errstr,
+ uint64_t txn_generation)
{
int32_t ret = -1;
int32_t peer_cnt = 0;
@@ -876,9 +915,13 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *req_dict,
struct syncargs args = {0};
uuid_t peer_uuid = {0};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
GF_ASSERT (req_dict);
GF_ASSERT (op_errstr);
@@ -915,20 +958,36 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *req_dict,
dict_unref (rsp_dict);
rsp_dict = NULL;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
/* Sending brick op req to other nodes in the cluster */
gd_syncargs_init (&args, NULL);
synctask_barrier_init((&args));
peer_cnt = 0;
- list_for_each_local_xaction_peers (peerinfo, peers) {
+
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
gd_mgmt_v3_brick_op_req (op, req_dict, peerinfo, &args,
MY_UUID, peer_uuid);
peer_cnt++;
}
+ rcu_read_unlock ();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
gd_synctask_barrier_wait((&args), peer_cnt);
if (args.op_ret) {
@@ -1084,9 +1143,8 @@ out:
}
int
-glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx,
- dict_t *req_dict, char **op_errstr,
- int npeers, struct cds_list_head *peers)
+glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr, uint64_t txn_generation)
{
int32_t ret = -1;
int32_t peer_cnt = 0;
@@ -1095,9 +1153,13 @@ glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx,
struct syncargs args = {0};
uuid_t peer_uuid = {0};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
GF_ASSERT (op_ctx);
GF_ASSERT (req_dict);
GF_ASSERT (op_errstr);
@@ -1144,20 +1206,36 @@ glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx,
dict_unref (rsp_dict);
rsp_dict = NULL;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
/* Sending commit req to other nodes in the cluster */
gd_syncargs_init (&args, op_ctx);
synctask_barrier_init((&args));
peer_cnt = 0;
- list_for_each_local_xaction_peers (peerinfo, peers) {
+
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
gd_mgmt_v3_commit_req (op, req_dict, peerinfo, &args,
MY_UUID, peer_uuid);
peer_cnt++;
}
+ rcu_read_unlock ();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
gd_synctask_barrier_wait((&args), peer_cnt);
if (args.op_ret) {
@@ -1282,8 +1360,8 @@ out:
int
glusterd_mgmt_v3_post_validate (glusterd_op_t op, int32_t op_ret, dict_t *dict,
- dict_t *req_dict, char **op_errstr, int npeers,
- struct cds_list_head *peers)
+ dict_t *req_dict, char **op_errstr,
+ uint64_t txn_generation)
{
int32_t ret = -1;
int32_t peer_cnt = 0;
@@ -1292,9 +1370,13 @@ glusterd_mgmt_v3_post_validate (glusterd_op_t op, int32_t op_ret, dict_t *dict,
struct syncargs args = {0};
uuid_t peer_uuid = {0};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
GF_ASSERT (dict);
GF_VALIDATE_OR_GOTO (this->name, req_dict, out);
GF_ASSERT (op_errstr);
@@ -1335,20 +1417,36 @@ glusterd_mgmt_v3_post_validate (glusterd_op_t op, int32_t op_ret, dict_t *dict,
dict_unref (rsp_dict);
rsp_dict = NULL;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
/* Sending Post Validation req to other nodes in the cluster */
gd_syncargs_init (&args, req_dict);
synctask_barrier_init((&args));
peer_cnt = 0;
- list_for_each_local_xaction_peers (peerinfo, peers) {
+
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
gd_mgmt_v3_post_validate_req (op, op_ret, req_dict, peerinfo,
&args, MY_UUID, peer_uuid);
peer_cnt++;
}
+ rcu_read_unlock ();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
gd_synctask_barrier_wait((&args), peer_cnt);
if (args.op_ret) {
@@ -1468,11 +1566,10 @@ out:
}
int
-glusterd_mgmt_v3_release_peer_locks (glusterd_op_t op,
- dict_t *dict, int32_t op_ret,
- char **op_errstr, int npeers,
+glusterd_mgmt_v3_release_peer_locks (glusterd_op_t op, dict_t *dict,
+ int32_t op_ret, char **op_errstr,
gf_boolean_t is_acquired,
- struct cds_list_head *peers)
+ uint64_t txn_generation)
{
int32_t ret = -1;
int32_t peer_cnt = 0;
@@ -1480,9 +1577,13 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_op_t op,
xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
struct syncargs args = {0};
+ glusterd_conf_t *conf = NULL;
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
GF_ASSERT (dict);
GF_ASSERT (op_errstr);
@@ -1491,20 +1592,36 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_op_t op,
if (!is_acquired)
goto out;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
/* Sending mgmt_v3 unlock req to other nodes in the cluster */
gd_syncargs_init (&args, NULL);
synctask_barrier_init((&args));
peer_cnt = 0;
- list_for_each_local_xaction_peers (peerinfo, peers) {
+
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
gd_mgmt_v3_unlock (op, dict, peerinfo, &args,
MY_UUID, peer_uuid);
peer_cnt++;
}
+ rcu_read_unlock ();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
gd_synctask_barrier_wait((&args), peer_cnt);
if (args.op_ret) {
@@ -1530,7 +1647,6 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
{
int32_t ret = -1;
int32_t op_ret = -1;
- int32_t npeers = 0;
dict_t *req_dict = NULL;
dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
@@ -1538,7 +1654,7 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
xlator_t *this = NULL;
gf_boolean_t is_acquired = _gf_false;
uuid_t *originator_uuid = NULL;
- struct cds_list_head xaction_peers = {0,};
+ uint64_t txn_generation = 0;
this = THIS;
GF_ASSERT (this);
@@ -1547,14 +1663,14 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
conf = this->private;
GF_ASSERT (conf);
- CDS_INIT_LIST_HEAD (&xaction_peers);
- npeers = gd_build_local_xaction_peers_list (&conf->peers,
- &xaction_peers, op);
- if (npeers == -1) {
- gf_log (this->name, GF_LOG_ERROR, "building local peers list "
- "failed");
- goto rsp;
- }
+ /* Save the peer list generation */
+ txn_generation = conf->generation;
+ cmm_smp_rmb ();
+ /* This read memory barrier makes sure that this assignment happens here
+ * only and is not reordered and optimized by either the compiler or the
+ * processor.
+ */
+
/* Save the MY_UUID as the originator_uuid. This originator_uuid
* will be used by is_origin_glusterd() to determine if a node
@@ -1594,8 +1710,7 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
/* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
ret = glusterd_mgmt_v3_initiate_lockdown (op, dict, &op_errstr,
- npeers, &is_acquired,
- &xaction_peers);
+ &is_acquired, txn_generation);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
goto out;
@@ -1612,17 +1727,16 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
}
/* PRE-COMMIT VALIDATE PHASE */
- ret = glusterd_mgmt_v3_pre_validate (op, req_dict,
- &op_errstr, npeers,
- &xaction_peers);
+ ret = glusterd_mgmt_v3_pre_validate (op, req_dict, &op_errstr,
+ txn_generation);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Pre Validation Failed");
goto out;
}
/* COMMIT OP PHASE */
- ret = glusterd_mgmt_v3_commit (op, dict, req_dict,
- &op_errstr, npeers, &xaction_peers);
+ ret = glusterd_mgmt_v3_commit (op, dict, req_dict, &op_errstr,
+ txn_generation);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Commit Op Failed");
goto out;
@@ -1633,9 +1747,8 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
commands other than snapshot. So as of now, I am
sending 0 (op_ret as 0).
*/
- ret = glusterd_mgmt_v3_post_validate (op, 0, dict, req_dict,
- &op_errstr, npeers,
- &xaction_peers);
+ ret = glusterd_mgmt_v3_post_validate (op, 0, dict, req_dict, &op_errstr,
+ txn_generation);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Post Validation Failed");
goto out;
@@ -1645,10 +1758,9 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
out:
op_ret = ret;
/* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_mgmt_v3_release_peer_locks (op, dict,
- op_ret, &op_errstr,
- npeers, is_acquired,
- &xaction_peers);
+ (void) glusterd_mgmt_v3_release_peer_locks (op, dict, op_ret,
+ &op_errstr, is_acquired,
+ txn_generation);
/* LOCAL VOLUME(S) UNLOCK */
if (is_acquired) {
@@ -1660,12 +1772,10 @@ out:
op_ret = ret;
}
}
-rsp:
+
/* SEND CLI RESPONSE */
glusterd_op_send_cli_response (op, op_ret, 0, req, dict, op_errstr);
- gd_cleanup_local_xaction_peers_list (&xaction_peers);
-
if (req_dict)
dict_unref (req_dict);
@@ -1748,7 +1858,6 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
{
int32_t ret = -1;
int32_t op_ret = -1;
- int32_t npeers = 0;
dict_t *req_dict = NULL;
dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
@@ -1758,7 +1867,7 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
uuid_t *originator_uuid = NULL;
gf_boolean_t success = _gf_false;
char *cli_errstr = NULL;
- struct cds_list_head xaction_peers = {0,};
+ uint64_t txn_generation = 0;
this = THIS;
GF_ASSERT (this);
@@ -1767,14 +1876,13 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
conf = this->private;
GF_ASSERT (conf);
- CDS_INIT_LIST_HEAD (&xaction_peers);
- npeers = gd_build_local_xaction_peers_list (&conf->peers,
- &xaction_peers, op);
- if (npeers == -1) {
- gf_log (this->name, GF_LOG_ERROR, "building local peers list "
- "failed");
- goto rsp;
- }
+ /* Save the peer list generation */
+ txn_generation = conf->generation;
+ cmm_smp_rmb ();
+ /* This read memory barrier makes sure that this assignment happens here
+ * only and is not reordered and optimized by either the compiler or the
+ * processor.
+ */
/* Save the MY_UUID as the originator_uuid. This originator_uuid
* will be used by is_origin_glusterd() to determine if a node
@@ -1814,8 +1922,7 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
/* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
ret = glusterd_mgmt_v3_initiate_lockdown (op, dict, &op_errstr,
- npeers, &is_acquired,
- &xaction_peers);
+ &is_acquired, txn_generation);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
goto out;
@@ -1832,8 +1939,8 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
}
/* PRE-COMMIT VALIDATE PHASE */
- ret = glusterd_mgmt_v3_pre_validate (op, req_dict,
- &op_errstr, npeers, &xaction_peers);
+ ret = glusterd_mgmt_v3_pre_validate (op, req_dict, &op_errstr,
+ txn_generation);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Pre Validation Failed");
goto out;
@@ -1857,8 +1964,8 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
goto out;
}
- ret = glusterd_mgmt_v3_brick_op (op, req_dict,
- &op_errstr, npeers, &xaction_peers);
+ ret = glusterd_mgmt_v3_brick_op (op, req_dict, &op_errstr,
+ txn_generation);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Brick Ops Failed");
goto unbarrier;
@@ -1888,8 +1995,8 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
goto unbarrier;
}
- ret = glusterd_mgmt_v3_commit (op, dict, req_dict,
- &op_errstr, npeers, &xaction_peers);
+ ret = glusterd_mgmt_v3_commit (op, dict, req_dict, &op_errstr,
+ txn_generation);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Commit Op Failed");
/* If the main op fails, we should save the error string.
@@ -1914,8 +2021,8 @@ unbarrier:
goto out;
}
- ret = glusterd_mgmt_v3_brick_op (op, req_dict,
- &op_errstr, npeers, &xaction_peers);
+ ret = glusterd_mgmt_v3_brick_op (op, req_dict, &op_errstr,
+ txn_generation);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Brick Ops Failed");
@@ -1943,18 +2050,16 @@ out:
/* POST-COMMIT VALIDATE PHASE */
ret = glusterd_mgmt_v3_post_validate (op, op_ret, dict, req_dict,
- &op_errstr, npeers,
- &xaction_peers);
+ &op_errstr, txn_generation);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Post Validation Failed");
op_ret = -1;
}
/* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_mgmt_v3_release_peer_locks (op, dict,
- op_ret, &op_errstr,
- npeers, is_acquired,
- &xaction_peers);
+ (void) glusterd_mgmt_v3_release_peer_locks (op, dict, op_ret,
+ &op_errstr, is_acquired,
+ txn_generation);
/* If the commit op (snapshot taking) failed, then the error is stored
in cli_errstr and unbarrier is called. Suppose, if unbarrier also
@@ -1978,12 +2083,10 @@ out:
op_ret = ret;
}
}
-rsp:
+
/* SEND CLI RESPONSE */
glusterd_op_send_cli_response (op, op_ret, 0, req, dict, op_errstr);
- gd_cleanup_local_xaction_peers_list (&xaction_peers);
-
if (req_dict)
dict_unref (req_dict);
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index d7694258301..5bfdb0bb43e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -119,13 +119,16 @@ glusterd_txn_opinfo_dict_fini ()
void
glusterd_txn_opinfo_init (glusterd_op_info_t *opinfo,
- glusterd_op_sm_state_info_t *state,
- glusterd_op_t *op,
- dict_t *op_ctx,
- rpcsvc_request_t *req)
+ glusterd_op_sm_state_info_t *state, glusterd_op_t *op,
+ dict_t *op_ctx, rpcsvc_request_t *req)
{
+ glusterd_conf_t *conf = NULL;
+
GF_ASSERT (opinfo);
+ conf = THIS->private;
+ GF_ASSERT (conf);
+
if (state)
opinfo->state = *state;
@@ -140,6 +143,9 @@ glusterd_txn_opinfo_init (glusterd_op_info_t *opinfo,
if (req)
opinfo->req = req;
+ opinfo->txn_generation = conf->generation;
+ cmm_smp_rmb ();
+
return;
}
@@ -314,9 +320,6 @@ glusterd_clear_txn_opinfo (uuid_t *txn_id)
dict_del(priv->glusterd_txn_opinfo, uuid_utoa (*txn_id));
- if (txn_op_info.local_xaction_peers)
- GF_FREE (txn_op_info.local_xaction_peers);
-
gf_log ("", GF_LOG_DEBUG,
"Successfully cleared opinfo for transaction ID : %s",
uuid_utoa (*txn_id));
@@ -2919,9 +2922,13 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
priv = this->private;
GF_ASSERT (priv);
- list_for_each_local_xaction_peers (peerinfo,
- opinfo.local_xaction_peers) {
- GF_ASSERT (peerinfo);
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > opinfo.txn_generation)
+ continue;
if (!peerinfo->connected || !peerinfo->mgmt)
continue;
@@ -2936,6 +2943,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = proc->fn (NULL, this, peerinfo);
if (ret) {
+ rcu_read_unlock ();
gf_log (this->name, GF_LOG_WARNING,
"Failed to send lock request "
"for operation 'Volume %s' to "
@@ -2958,6 +2966,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
ret = dict_set_static_ptr (dict, "peerinfo",
peerinfo);
if (ret) {
+ rcu_read_unlock ();
gf_log (this->name, GF_LOG_ERROR,
"failed to set peerinfo");
dict_unref (dict);
@@ -2966,6 +2975,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
ret = proc->fn (NULL, this, dict);
if (ret) {
+ rcu_read_unlock ();
gf_log (this->name, GF_LOG_WARNING,
"Failed to send mgmt_v3 lock "
"request for operation "
@@ -2981,6 +2991,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
}
}
}
+ rcu_read_unlock ();
opinfo.pending_count = pending_count;
if (!opinfo.pending_count)
@@ -3009,9 +3020,13 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
priv = this->private;
GF_ASSERT (priv);
- list_for_each_local_xaction_peers (peerinfo,
- opinfo.local_xaction_peers) {
- GF_ASSERT (peerinfo);
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > opinfo.txn_generation)
+ continue;
if (!peerinfo->connected || !peerinfo->mgmt ||
!peerinfo->locked)
@@ -3083,6 +3098,7 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
}
}
}
+ rcu_read_unlock ();
opinfo.pending_count = pending_count;
if (!opinfo.pending_count)
@@ -3562,9 +3578,13 @@ glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
if (op == GD_OP_REPLACE_BRICK)
glusterd_rb_use_rsp_dict (NULL, rsp_dict);
- list_for_each_local_xaction_peers (peerinfo,
- opinfo.local_xaction_peers) {
- GF_ASSERT (peerinfo);
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > opinfo.txn_generation)
+ continue;
if (!peerinfo->connected || !peerinfo->mgmt)
continue;
@@ -3577,6 +3597,7 @@ glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = dict_set_static_ptr (dict, "peerinfo", peerinfo);
if (ret) {
+ rcu_read_unlock ();
gf_log (this->name, GF_LOG_ERROR, "failed to "
"set peerinfo");
goto out;
@@ -3593,6 +3614,7 @@ glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
pending_count++;
}
}
+ rcu_read_unlock ();
opinfo.pending_count = pending_count;
out:
@@ -4212,9 +4234,13 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
goto out;
}
- list_for_each_local_xaction_peers (peerinfo,
- opinfo.local_xaction_peers) {
- GF_ASSERT (peerinfo);
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > opinfo.txn_generation)
+ continue;
if (!peerinfo->connected || !peerinfo->mgmt)
continue;
@@ -4227,6 +4253,7 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = dict_set_static_ptr (dict, "peerinfo", peerinfo);
if (ret) {
+ rcu_read_unlock ();
gf_log (this->name, GF_LOG_ERROR,
"failed to set peerinfo");
goto out;
@@ -4242,6 +4269,7 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
pending_count++;
}
}
+ rcu_read_unlock ();
opinfo.pending_count = pending_count;
gf_log (this->name, GF_LOG_DEBUG, "Sent commit op req for 'Volume %s' "
@@ -4528,7 +4556,6 @@ glusterd_op_txn_complete (uuid_t *txn_id)
glusterd_op_clear_op ();
glusterd_op_reset_ctx ();
glusterd_op_clear_errstr ();
- gd_cleanup_local_xaction_peers_list (opinfo.local_xaction_peers);
/* Based on the op-version, we release the cluster or mgmt_v3 lock */
if (priv->op_version < GD_OP_VERSION_3_6_0) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
index 69bfd4c92a5..f6eaa372f35 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -103,8 +103,7 @@ struct glusterd_op_info_ {
int32_t op_errno;
char *op_errstr;
struct cds_list_head pending_bricks;
- struct cds_list_head *local_xaction_peers;
-
+ uint64_t txn_generation;
};
typedef struct glusterd_op_info_ glusterd_op_info_t;
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
index 5f098839398..f3241e918f7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
@@ -258,8 +258,15 @@ glusterd_peerinfo_t *
glusterd_peerinfo_new (glusterd_friend_sm_state_t state, uuid_t *uuid,
const char *hostname, int port)
{
- glusterd_peerinfo_t *new_peer = NULL;
- int ret = -1;
+ glusterd_peerinfo_t *new_peer = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
new_peer = GF_CALLOC (1, sizeof (*new_peer), gf_gld_mt_peerinfo_t);
if (!new_peer)
@@ -297,6 +304,8 @@ glusterd_peerinfo_new (glusterd_friend_sm_state_t state, uuid_t *uuid,
new_peer->port = port;
pthread_mutex_init (&new_peer->delete_lock, NULL);
+
+ new_peer->generation = uatomic_add_return (&conf->generation, 1);
out:
if (ret && new_peer) {
glusterd_peerinfo_cleanup (new_peer);
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.h b/xlators/mgmt/glusterd/src/glusterd-sm.h
index be137802a64..dceaa3e46b9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.h
@@ -104,6 +104,7 @@ struct glusterd_peerinfo_ {
/* Members required for proper cleanup using RCU */
gd_rcu_head rcu_head;
pthread_mutex_t delete_lock;
+ uint64_t generation;
};
typedef struct glusterd_peerinfo_ glusterd_peerinfo_t;
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index a00438e6d96..c5066b015a3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -1067,89 +1067,36 @@ out:
int
-gd_build_peers_list (struct cds_list_head *peers,
- struct cds_list_head *xact_peers, glusterd_op_t op)
+gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
+ char **op_errstr, uuid_t txn_id,
+ glusterd_op_info_t *txn_opinfo)
{
- glusterd_peerinfo_t *peerinfo = NULL;
- int npeers = 0;
+ int ret = -1;
+ int peer_cnt = 0;
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
- GF_ASSERT (peers);
- GF_ASSERT (xact_peers);
+ this = THIS;
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, peers, uuid_list) {
- if (!peerinfo->connected)
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_opinfo->txn_generation)
continue;
- if (op != GD_OP_SYNC_VOLUME &&
- peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
- continue;
-
- cds_list_add_tail (&peerinfo->op_peers_list, xact_peers);
- npeers++;
- }
- rcu_read_unlock ();
-
- return npeers;
-}
-int
-gd_build_local_xaction_peers_list (struct cds_list_head *peers,
- struct cds_list_head *xact_peers,
- glusterd_op_t op)
-{
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_local_peers_t *local_peers = NULL;
- int npeers = 0;
-
- GF_ASSERT (peers);
- GF_ASSERT (xact_peers);
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, peers, uuid_list) {
if (!peerinfo->connected)
continue;
if (op != GD_OP_SYNC_VOLUME &&
peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
continue;
- local_peers = GF_CALLOC (1, sizeof (*local_peers),
- gf_gld_mt_local_peers_t);
- if (!local_peers) {
- npeers = -1;
- goto unlock;
- }
- CDS_INIT_LIST_HEAD (&local_peers->op_peers_list);
- local_peers->peerinfo = peerinfo;
- cds_list_add_tail (&local_peers->op_peers_list, xact_peers);
- npeers++;
- }
-unlock:
- rcu_read_unlock ();
-
- return npeers;
-}
-int
-gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
- char **op_errstr, int npeers, uuid_t txn_id,
- struct cds_list_head *peers)
-{
- int ret = -1;
- int peer_cnt = 0;
- uuid_t peer_uuid = {0};
- xlator_t *this = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- struct syncargs args = {0};
-
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
- this = THIS;
- synctask_barrier_init((&args));
- peer_cnt = 0;
- list_for_each_local_xaction_peers (peerinfo, peers) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
/* Reset lock status */
peerinfo->locked = _gf_false;
@@ -1160,6 +1107,13 @@ gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
MY_UUID, peer_uuid, txn_id);
peer_cnt++;
}
+ rcu_read_unlock ();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
gd_synctask_barrier_wait((&args), peer_cnt);
if (args.op_ret) {
@@ -1187,15 +1141,15 @@ out:
}
int
-gd_stage_op_phase (struct cds_list_head *peers, glusterd_op_t op,
- dict_t *op_ctx, dict_t *req_dict, char **op_errstr,
- int npeers)
+gd_stage_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr, glusterd_op_info_t *txn_opinfo)
{
int ret = -1;
int peer_cnt = 0;
dict_t *rsp_dict = NULL;
char *hostname = NULL;
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
uuid_t tmp_uuid = {0};
char *errstr = NULL;
@@ -1204,6 +1158,8 @@ gd_stage_op_phase (struct cds_list_head *peers, glusterd_op_t op,
this = THIS;
GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
rsp_dict = dict_new ();
if (!rsp_dict)
@@ -1252,21 +1208,36 @@ stage_done:
goto out;
}
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
gd_syncargs_init (&args, aggr_dict);
synctask_barrier_init((&args));
peer_cnt = 0;
- list_for_each_local_xaction_peers (peerinfo, peers) {
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_opinfo->txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
ret = gd_syncop_mgmt_stage_op (peerinfo, &args,
MY_UUID, tmp_uuid,
op, req_dict, op_ctx);
peer_cnt++;
}
+ rcu_read_unlock ();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
gf_log (this->name, GF_LOG_DEBUG, "Sent stage op req for 'Volume %s' "
"to %d peers", gd_op_list[op], peer_cnt);
@@ -1295,9 +1266,8 @@ out:
}
int
-gd_commit_op_phase (struct cds_list_head *peers, glusterd_op_t op,
- dict_t *op_ctx, dict_t *req_dict, char **op_errstr,
- int npeers)
+gd_commit_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr, glusterd_op_info_t *txn_opinfo)
{
dict_t *rsp_dict = NULL;
int peer_cnt = -1;
@@ -1305,12 +1275,17 @@ gd_commit_op_phase (struct cds_list_head *peers, glusterd_op_t op,
char *hostname = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
uuid_t tmp_uuid = {0};
char *errstr = NULL;
struct syncargs args = {0};
int type = GF_QUOTA_OPTION_TYPE_NONE;
this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
rsp_dict = dict_new ();
if (!rsp_dict) {
ret = -1;
@@ -1359,21 +1334,36 @@ commit_done:
goto out;
}
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
gd_syncargs_init (&args, op_ctx);
synctask_barrier_init((&args));
peer_cnt = 0;
- list_for_each_local_xaction_peers (peerinfo, peers) {
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_opinfo->txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
ret = gd_syncop_mgmt_commit_op (peerinfo, &args,
MY_UUID, tmp_uuid,
op, req_dict, op_ctx);
peer_cnt++;
}
+ rcu_read_unlock ();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
gd_synctask_barrier_wait((&args), peer_cnt);
ret = args.op_ret;
if (args.errstr)
@@ -1399,8 +1389,8 @@ out:
int
gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
rpcsvc_request_t *req, dict_t *op_ctx, char *op_errstr,
- int npeers, char *volname, gf_boolean_t is_acquired,
- uuid_t txn_id, struct cds_list_head *peers)
+ char *volname, gf_boolean_t is_acquired, uuid_t txn_id,
+ glusterd_op_info_t *txn_opinfo)
{
glusterd_peerinfo_t *peerinfo = NULL;
uuid_t tmp_uuid = {0};
@@ -1412,11 +1402,6 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
this = THIS;
GF_ASSERT (this);
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
/* If the lock has not been held during this
* transaction, do not send unlock requests */
if (!is_acquired) {
@@ -1428,7 +1413,21 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
peer_cnt = 0;
if (conf->op_version < GD_OP_VERSION_3_6_0) {
- list_for_each_local_xaction_peers (peerinfo, peers) {
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers,
+ uuid_list) {
+ /* Only send requests to peers who were available before
+ * the transaction started
+ */
+ if (peerinfo->generation > txn_opinfo->txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
/* Only unlock peers that were locked */
if (peerinfo->locked) {
gd_syncop_mgmt_unlock (peerinfo, &args,
@@ -1436,16 +1435,39 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
peer_cnt++;
}
}
+ rcu_read_unlock ();
} else {
if (volname) {
- list_for_each_local_xaction_peers (peerinfo, peers) {
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers,
+ uuid_list) {
+ /* Only send requests to peers who were
+ * available before the transaction started
+ */
+ if (peerinfo->generation >
+ txn_opinfo->txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
gd_syncop_mgmt_v3_unlock (op_ctx, peerinfo,
&args, MY_UUID,
tmp_uuid, txn_id);
peer_cnt++;
}
+ rcu_read_unlock ();
}
}
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
gd_synctask_barrier_wait((&args), peer_cnt);
ret = args.op_ret;
@@ -1585,7 +1607,6 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
{
int ret = -1;
int op_ret = -1;
- int npeers = 0;
dict_t *req_dict = NULL;
glusterd_conf_t *conf = NULL;
glusterd_op_t op = 0;
@@ -1596,7 +1617,6 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
xlator_t *this = NULL;
gf_boolean_t is_acquired = _gf_false;
uuid_t *txn_id = NULL;
- struct cds_list_head xaction_peers = {0,};
glusterd_op_info_t txn_opinfo = {{0},};
this = THIS;
@@ -1604,8 +1624,6 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
conf = this->private;
GF_ASSERT (conf);
- CDS_INIT_LIST_HEAD (&xaction_peers);
-
ret = dict_get_int32 (op_ctx, GD_SYNC_OPCODE_KEY, &tmp_op);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get volume "
@@ -1686,20 +1704,11 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
local_locking_done:
- /* Maintain xaction_peers on per transaction basis */
- npeers = gd_build_local_xaction_peers_list (&conf->peers,
- &xaction_peers, op);
- if (npeers == -1) {
- gf_log (this->name, GF_LOG_ERROR, "building local peers list "
- "failed");
- goto out;
- }
-
/* If no volname is given as a part of the command, locks will
* not be held */
if (volname || (conf->op_version < GD_OP_VERSION_3_6_0)) {
- ret = gd_lock_op_phase (conf, op, op_ctx, &op_errstr,
- npeers, *txn_id, &xaction_peers);
+ ret = gd_lock_op_phase (conf, op, op_ctx, &op_errstr, *txn_id,
+ &txn_opinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Locking Peers Failed.");
@@ -1716,8 +1725,7 @@ local_locking_done:
goto out;
}
- ret = gd_stage_op_phase (&xaction_peers, op, op_ctx, req_dict,
- &op_errstr, npeers);
+ ret = gd_stage_op_phase (op, op_ctx, req_dict, &op_errstr, &txn_opinfo);
if (ret)
goto out;
@@ -1725,8 +1733,8 @@ local_locking_done:
if (ret)
goto out;
- ret = gd_commit_op_phase (&xaction_peers, op, op_ctx, req_dict,
- &op_errstr, npeers);
+ ret = gd_commit_op_phase (op, op_ctx, req_dict, &op_errstr,
+ &txn_opinfo);
if (ret)
goto out;
@@ -1734,11 +1742,9 @@ local_locking_done:
out:
op_ret = ret;
if (txn_id) {
- (void) gd_unlock_op_phase (conf, op, &op_ret, req,
- op_ctx, op_errstr,
- npeers, volname,
- is_acquired, *txn_id,
- &xaction_peers);
+ (void) gd_unlock_op_phase (conf, op, &op_ret, req, op_ctx,
+ op_errstr, volname, is_acquired,
+ *txn_id, &txn_opinfo);
/* Clearing the transaction opinfo */
ret = glusterd_clear_txn_opinfo (txn_id);
@@ -1751,8 +1757,6 @@ out:
glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr);
- gd_cleanup_local_xaction_peers_list (&xaction_peers);
-
if (volname)
GF_FREE (volname);
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.h b/xlators/mgmt/glusterd/src/glusterd-syncop.h
index d86a5ba2131..87a3c76f9fb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.h
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.h
@@ -59,15 +59,6 @@ void
gd_synctask_barrier_wait (struct syncargs *args, int count);
int
-gd_build_peers_list (struct cds_list_head *peers,
- struct cds_list_head *xact_peers, glusterd_op_t op);
-
-int
-gd_build_local_xaction_peers_list (struct cds_list_head *peers,
- struct cds_list_head *xact_peers,
- glusterd_op_t op);
-
-int
gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
char **op_errstr);
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index aa27ebb18d3..afc3faaefb5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -9557,22 +9557,3 @@ glusterd_list_add_order (struct cds_list_head *new, struct cds_list_head *head,
cds_list_add_rcu (new, rcu_dereference (pos->prev));
}
-
-void
-gd_cleanup_local_xaction_peers_list (struct cds_list_head *xact_peers)
-{
- glusterd_local_peers_t *local_peers = NULL;
- glusterd_local_peers_t *tmp = NULL;
-
- GF_ASSERT (xact_peers);
-
- if (cds_list_empty (xact_peers))
- return;
-
- cds_list_for_each_entry_safe (local_peers, tmp, xact_peers,
- op_peers_list) {
- GF_FREE (local_peers);
- /* local_peers->peerinfo need not be freed because it does not
- * ownership of peerinfo, but merely refer it */
- }
-}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 80c7c19d508..d2dbddec3f1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -37,16 +37,6 @@
volinfo->volname, brickid);\
} while (0)
-#define list_for_each_local_xaction_peers(xact_peer, xact_peers_head) \
- glusterd_local_peers_t *pos = NULL; \
- for (pos = cds_list_entry ((xact_peers_head)->next, \
- glusterd_local_peers_t, op_peers_list), \
- xact_peer = pos->peerinfo; \
- &pos->op_peers_list != (xact_peers_head); \
- pos = cds_list_entry(pos->op_peers_list.next, \
- glusterd_local_peers_t, op_peers_list), \
- xact_peer = pos->peerinfo)
-
struct glusterd_lock_ {
uuid_t owner;
time_t timestamp;
@@ -670,8 +660,4 @@ void
glusterd_list_add_order (struct cds_list_head *new, struct cds_list_head *head,
int (*compare)(struct cds_list_head *,
struct cds_list_head *));
-
-void
-gd_cleanup_local_xaction_peers_list (struct cds_list_head *peers);
-
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 480cfc66269..ff63cce2234 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -171,7 +171,8 @@ typedef struct {
char *snap_bricks_directory;
gf_store_handle_t *missed_snaps_list_shandle;
struct cds_list_head missed_snaps_list;
- int ping_timeout;
+ int ping_timeout;
+ uint64_t generation;
} glusterd_conf_t;
href='/cgit/glusterfs.git/tag/?h=v3.3.0qa27'>v3.3.0qa27commit 152a0194e7...Vijay Bellur14 years v3.2.6commit fafd5c17c0...Vijay Bellur14 years v3.2.6qa6commit fafd5c17c0...Vijay Bellur14 years v3.2.6qa5commit e657569da2...Vijay Bellur14 years v3.3.0qa26commit f6a779ffc5...Vijay Bellur14 years v3.2.6qa4commit 8127a6f35e...Vijay Bellur14 years v3.3.0qa25commit 468768d280...Vijay Bellur14 years v3.3.0qa24commit 88c6c11813...Vijay Bellur14 years v3.3.0qa23commit 42cc043875...Vijay Bellur14 years v3.3.0qa22commit c8d47f056e...Vijay Bellur14 years v3.2.6qa3commit cd3ad588f2...Anand Avati14 years v3.2.6qa2commit fa580e9299...Anand Avati14 years v3.3.0qa21commit 83a3daf7c2...Vijay Bellur14 years v3.3.0qa20commit 0694749c3e...Vijay Bellur14 years v3.2.6qa1commit 1020a3dfe9...Anand Avati14 years v3.3.0qa19commit be003fbb3a...Vijay Bellur14 years v3.3.0qa18commit d7d9f3d400...Vijay Bellur14 years v3.3.0qa17commit 0074f20844...Vijay Bellur14 years v3.3.0qa16commit 7235e5b1af...Vijay Bellur14 years v3.3.0qa15commit 289c2902d6...Vijay Bellur14 years v3.2.5commit edf9551b38...Vijay Bellur14 years v3.2.5qa9commit edf9551b38...Vijay Bellur14 years v3.2.5qa8commit 252c9e5cf2...Vijay Bellur14 years v3.2.5qa7commit d2a05724a6...Vijay Bellur14 years v3.2.5qa6commit 51601b2bff...Vijay Bellur14 years v3.2.5qa5commit 8668da9744...Vijay Bellur14 years v3.2.5qa4commit bca358604d...Vijay Bellur14 years v3.2.5qa3commit 3b0eecb53f...Vijay Bellur14 years v3.2.5qa2commit 7dcc94cf1f...Vijay Bellur14 years v3.2.5qa1commit 449f31c8ae...Vijay Bellur14 years v3.3.0qa14commit 4235f7a74e...Vijay Bellur15 years v3.2.4commit da73b31942...Vijay Bellur15 years v3.3.0qa13commit 795c8996c1...Vijay Bellur15 years v3.2.4qa5commit 6c5d3e40a6...Vijay Bellur15 years v3.3.0qa12commit 16b7e3bf20...Vijay Bellur15 years v3.2.4qa4commit edd9461647...Vijay Bellur15 years v3.3.0qa11commit 7658047903...Vijay Bellur15 years v3.3.0qa10commit 4765dd1a1c...Vijay Bellur15 years v3.2.4qa3commit 9564e09e53...Vijay Bellur15 years v3.2.4qa2commit 0f9502d5eb...Vijay Bellur15 years v3.2.4qa1commit 6fe790ee35...Vijay Bellur15 years v3.3.0qa9commit b827cdb230...Vijay Bellur15 years v3.1.7commit a2739b842b...Vijay Bellur15 years v3.1.7qa4commit a2739b842b...Vijay Bellur15 years v3.1.7qa3commit f9fa468090...Vijay Bellur15 years v3.1.7qa2commit d120020fd5...Vijay Bellur15 years v3.1.7qa1commit 561bba7ae4...Vijay Bellur15 years v3.2.3commit 1acef91232...Vijay Bellur15 years v3.3beta2commit b827cdb230...Vijay Bellur15 years v3.3.0qa8commit b827cdb230...Vijay Bellur15 years v3.3.0qa7commit 601f5725a0...Vijay Bellur15 years v3.2.3qa6commit 1acef91232...Vijay Bellur15 years v3.3.0qa6commit b6e3e9c480...Vijay Bellur15 years v3.3.0qa5commit 5ace31ac21...Vijay Bellur15 years v3.2.3qa5commit 10f69943c4...Vijay Bellur15 years v3.3.0qa4commit 350ae611ca...Vijay Bellur15 years v3.2.3qa4commit 0564d1198b...Vijay Bellur15 years v3.2.3qa3commit 2f53b7857c...Vijay Bellur15 years v3.3.0qa3commit 6073fc29bf...Vijay Bellur15 years v3.3.0qa2commit a0071bdf2a...Vijay Bellur15 years v3.1.6commit 98a487f842...Vijay Bellur15 years v3.1.6qa8commit ef517191c5...Vijay Bellur15 years v3.3.0qa1commit 1b5a860f15...Vijay Bellur15 years v3.1.6qa7commit 05e3dcc9b1...Vijay Bellur15 years v3.2.3qa1commit 62adb4d1c2...Vijay Bellur15 years v3.1.6qa6commit c92f45c742...Anand Avati15 years v3.1.6qa5commit 0c01d96a06...Vijay Bellur15 years v3.1.6qa4commit dfc317a77f...Anand Avati15 years v3.1.6qa3commit 967199adb1...Anand Avati15 years v3.1.6qa2commit 7382534ac1...Anand Avati15 years v3.3beta1commit fd60df8798...Anand Avati15 years v3.2.2commit c82a9d438b...Anand Avati15 years v3.2.2qa8commit c82a9d438b...Anand Avati15 years v3.1.6qa1commit 0c9648c1a0...Anand Avati15 years v3.2.2qa7commit 972c4a3c34...Anand Avati15 years v3.2.2qa5commit 7685cec583...Anand Avati15 years v3.2.2qa4commit 817bda650c...Anand Avati15 years v3.2.2qa3commit 1b01b64894...Anand Avati15 years v3.2.2qa2commit 5c20eb3bbf...Vijay Bellur15 years v3.2.2qa1commit 6ca8604204...Anand Avati15 years v3.1.5commit a64d1a8157...Anand Avati15 years v3.1.5qa4commit a64d1a8157...Vijay Bellur15 years v3.1.5qa3commit 5bcb4ddca3...Anand Avati15 years v3.1.5qa2commit 25da481bc5...Anand Avati15 years v3.2.1commit c5321286e5...Anand Avati15 years v3.2.1qa5commit c5321286e5...Anand Avati15 years v3.2.1qa4commit 8dee45b3a7...Anand Avati15 years v3.2.1qa3commit c51b2f7c6c...Anand Avati15 years v3.2.1qa2commit 05c4dced82...Anand Avati15 years v3.2.1qa1commit ef39bf9d23...Anand Avati15 years v3.1.5qa1commit 5f1efbc32d...Vijay Bellur15 years v3.0.8commit ee744e0908...Vijay Bellur15 years v3.0.8qa1commit ee744e0908...Vijay Bellur15 years v3.2.0commit 77f485dc30...Anand Avati15 years branchpoint-3.2commit 1f06da6875...Anand Avati15 years v3.2.0qa16commit 625f779dba...Anand Avati15 years v3.2.0qa15commit b5848ed21b...Anand Avati15 years v3.2.0qa14commit 72b57e311f...Anand Avati15 years v3.2.0qa13commit da66edbe92...Vijay Bellur15 years v3.2.0qa12commit 1c5706c43d...Anand Avati15 years v3.2.0qa11commit 902478bf9e...Anand Avati15 years v3.1.4commit 7b368061ea...Anand Vishweshwaran Avati15 years v3.2.0qa10commit 6db2b422f0...Vijay Bellur15 years v3.1.4qa3commit 7b368061ea...Vijay Bellur15 years v3.2.0qa9commit 56814fefa0...Vijay Bellur15 years v3.2.0qa8commit 35dea20e40...Vijay Bellur15 years v3.1.4qa2commit 2b55a49045...Vijay Bellur15 years v3.2.0qa7commit f338193a70...Vijay Bellur15 years v3.2.0qa6commit 498dbbc506...Vijay Bellur15 years v3.2.0qa5commit 408a2b0298...Vijay Bellur15 years v3.1.3solariscommit 9c0d73d37b...Anand V. Avati15 years v3.2.0qa4commit bd132d8e41...Vijay Bellur15 years v3.1.3commit 1641d8bb4c...Vijay Bellur15 years v3.1.3qa8commit c549807c23...Vijay Bellur15 years v3.1.3qa7commit 5017098718...Vijay Bellur15 years v3.1.3qa6commit 93845ea7cc...Vijay Bellur15 years v3.1.3qa5commit cad088fe3a...Vijay Bellur15 years v3.1.3qa4commit 135aca330b...Pranith K15 years v3.1.3qa3commit 5b909c83de...Vijay Bellur15 years v3.1.3qa2commit 77d82df9d5...Rahul15 years v3.1.3qa1commit b99e0e0678...Vijay Bellur15 years v3.1.2gsyncqa6commit 3bad56d0d3...Amar Tumballi15 years v3.1.2gsyncqa5commit a139e43f48...Mohammed Junaid Ahmed15 years v3.1.2gsyncqa4commit cbd61752ff...Raghavendra G15 years v3.1.2commit f2a067c4fe...Vijay Bellur15 years v3.1.2qa4commit 5368b898fa...Raghavendra G15 years v3.1.2qa3commit cbba1c3f55...Shehjar Tikoo15 years v3.1.2qa2commit df5f71b401...Amar Tumballi15 years v3.1.2qa1commit 147b20c4a4...Anand Avati15 years v3.0.7commit 6da4cc87ff...Anand V. Avati15 years v3.0.7qa2commit 6da4cc87ff...Raghavendra Bhat15 years v3.0.7qa1commit e602c69bed...Vijay Bellur15 years v3.1.1commit 69a62d2a6d...Anand V. Avati15 years v3.1.1qa11commit c0be54cfcd...Anand Avati15 years v3.1.1qa10commit b605865986...Shehjar Tikoo15 years v3.1.1qa9commit f6785d2b49...Anand Avati15 years v3.1.1qa8commit ce9f328aa9...Anand Avati15 years v3.1.1qa7commit 961fc917e8...shishir gowda15 years v3.1.1qa6commit d6f1f04ef0...Raghavendra G15 years v3.1.1qa5commit eaf0618e47...Anand Avati15 years v3.1.1qa4commit 8ca96737a9...shishir gowda15 years v3.1.1qa3commit 1b4613936e...Raghavendra Bhat15 years v3.1.1qa2commit c65be2d304...Shehjar Tikoo15 years v3.1.1qa1commit b2f195720b...Vijay Bellur15 years v3.0.6commit 5cbc81a8d3...Vijay Bellur15 years v3.0.6rc2commit 5cbc81a8d3...Pavan Sondur15 years v3.0.6rc1commit ef4005be3a...Vijay Bellur15 years v3.1.0commit 6e6b4b4fd0...Vijay Bellur16 years v3.1.0qa46commit f182151cf3...Vijay Bellur16 years v3.1.0qa45commit 27c8b7a369...Vijay Bellur16 years v3.1.0qa44commit 2eb9861cbc...Kaushik BV16 years v3.1.0qa43commit 13f1fff6da...Kaushik BV16 years v3.1.0qa42commit cd5c9df4b6...Pavan Sondur16 years v3.1.0qa41commit 4c7ca7ec15...Pranith K16 years v3.1.0qa40commit ca8615173f...Pranith K16 years v3.1.0qa39commit 609a89ceac...Kaushik BV16 years v3.1.0qa38commit 365c814f7b...Pranith K16 years v3.1.0qa37commit 17295c37f9...Amar Tumballi16 years v3.1.0qa36commit 760daf2889...Amar Tumballi16 years v3.1.0qa35commit 6686ddc227...Vijay Bellur16 years v3.1.0qa34commit dbbec1261e...Amar Tumballi16 years v3.1.0qa33commit 336e2df7b7...Shehjar Tikoo16 years v3.1.0qa32commit 0b68f788a8...Vijay Bellur16 years v3.1.0qa31commit 6e952607f1...Raghavendra G16 years v3.1.0betacommit c5a5fea9e6...Pavan Sondur16 years v3.1.0qa30commit c5a5fea9e6...Pavan Sondur16 years v3.1.0qa29commit 7f645c3ac3...Amar Tumballi16 years v3.1.0qa28commit 435603caeb...Amar Tumballi16 years v3.1.0qa27commit 6dbd618548...Raghavendra G16 years v3.1.0qa26commit 4e6fb304ce...Shehjar Tikoo16 years v3.1.0qa25commit 47bc630dca...Shehjar Tikoo16 years v3.1.0qa24commit 0e2c2f46dd...Raghavendra Bhat16 years v3.1.0qa23commit e7535ad313...Pranith Kumar K16 years v3.1.0qa22commit a9cbdd2916...Amar Tumballi16 years v3.1.0qa21commit 993edcc972...Balamurugan Arumugam16 years v3.1.0alphacommit 288040196c...Vijay Bellur16 years v3.1.0qa20commit c1f4f9ba17...Raghavendra Bhat16 years v3.1.0qa19commit 9b226cc588...Vijay Bellur16 years v3.1.0qa18commit 440ffb55f0...Pavan Sondur16 years v3.1.0qa17commit 37f01b2714...Raghavendra G16 years v3.1.0qa16commit 1e99540dc0...Pranith Kumar K16 years v3.1.0qa15commit b3a4a0e885...Vijay Bellur16 years v3.1.0qa14commit c02661a69d...Vijay Bellur16 years v3.1.0qa13commit 780023f5e5...Vijay Bellur16 years v3.1.0qa12commit e1afe36eb3...Amar Tumballi16 years v3.1.0qa11commit fb3cb751f1...Amar Tumballi16 years v3.1.0qa10commit 4a62b116ef...Vijay Bellur16 years v3.1.0qa9commit d13ddaf872...Anand V. Avati16 years v3.1.0qa8commit df4a7d7576...Anand V. Avati16 years v3.1.0prealpha4commit 12e997d863...Anand V. Avati16 years v3.1.0prealpha3commit f51252fa0d...Anand V. Avati16 years v3.1.0prealpha2commit 03df087149...Anand V. Avati16 years v3.1.0prealpha1commit 7e6b5454ad...Anand V. Avati16 years v3.1.0qa7commit ab72e06f7b...Anand V. Avati16 years v3.1.0qa6commit 0ec245abd6...Anand V. Avati16 years v3.1.0qa5commit 9349f559dc...Anand V. Avati16 years v3.1.0qa4commit 4f4dcb98a7...Pavan Sondur16 years v3.1.0qa3commit 543f9ef575...Anand V. Avati16 years v3.1.0qa2commit 931a59e2b9...Anand V. Avati16 years v3.0.5commit 002d35bfb1...Anand V. Avati16 years v3.0.5rc9commit 2e35a3eef6...Anand Avati16 years v3.0.5rc8commit e5d4a9bac5...Pavan Sondur16 years v3.0.5rc7commit da1123b9d8...Pavan Sondur16 years v3.0.5rc6commit 4437568045...Vijay Bellur16 years v3.0.5rc5commit c9676d181d...Anand V. Avati16 years v3.0.5rc4commit e338603747...Raghavendra G16 years v3.0.5rc3commit af5ac6eb2e...Anand V. Avati16 years v3.0.5rc2commit 6d9b11dba6...Raghavendra G16 years v2.0.10rc3commit b8f058432a...Pavan Sondur16 years v3.0.5rc1commit f55b20076b...Raghavendra Bhat16 years v2.0.10rc2commit 6607f92f57...Vijay Bellur16 years v3.0.4commit aaeddc5084...Anand V. Avati16 years v3.0.4rc5commit aaeddc5084...Anand Avati16 years v3.0.4rc4commit 6f67027d78...Vijay Bellur16 years v3.0.4rc3commit 391023ddc5...Raghavendra G16 years v3.0.4rc2commit 4cb614047e...Amar Tumballi16 years v3.0.4rc1commit 9aed760471...Vikas Gorur16 years v3.0.3commit 029062c103...Anand V. Avati16 years v3.0.3rc2commit 029062c103...Pavan Sondur16 years v3.0.3rc1commit 789a2aa227...Raghavendra G16 years v3.0.2commit 15043b6d97...Anand V. Avati16 years v2.0.10rc1commit 2d85ef645f...Raghavendra G16 years v3.0.2rc1commit c15449aaae...Harshavardhana Ranganath16 years v3.0.1commit 4c20b5377e...Anand V. Avati16 years v3.0.1rc5commit 899b89a8c2...Raghavendra G16 years v3.0.1rc4commit 546168723f...Anand Avati16 years v3.0.1rc3commit 375f08e1b5...Anand Avati16 years v3.0.1rc2commit 84fe79c086...Vikas Gorur16 years v3.0.1rc1commit e6f074f931...Amar Tumballi16 years v2.0.9commit 7e1ba386dd...Anand V. Avati16 years v3.0.0commit 8379edd978...Anand V. Avati16 years v2.0.8commit 1a53a5a4bf...Anand V. Avati16 years v3.0.0pre1commit f8a56c6322...Vijay Bellur16 years v2.0.7commit 7ba890140f...Anand V. Avati17 years v2.0.6commit 8dfdde57b3...Anand V. Avati17 years v2.0.5commit 683fda4bf0...Anand V. Avati17 years v2.0.4commit 55f476455c...Anand V. Avati17 years v2.0.3commit b470684cbf...Anand V. Avati17 years v2.0.2commit 01b9e59055...Vikas Gorur17 years tag-release-2.0commit 4d4cfc6e45...Anand V. Avati17 years 2.0.0commit 7b2e459db6...Anand V. Avati17 years 2.0.1commit 5c1d9108c1...Anand V. Avati17 years 2.0.0rc9commit 689347f278...Vikas Gorur17 years 2.0.0rc8commit 82394d4848...Vikas Gorur17 years 2.0.0rc7commit 4e5c297d7c...Raghavendra G17 years 2.0.0rc6commit 270621b34a...Amar Tumballi17 years 2.0.0rc5commit c20359b5b2...Amar Tumballi17 years 2.0.0rc3commit b6bf3b8d6e...Harshavardhana17 years 2.0.0rc2commit d47eb5d681...Anand V. Avati17 years