From 3b1ecc6a7fd961c709e82862fd4760b223365863 Mon Sep 17 00:00:00 2001 From: Krishnan Parthasarathi Date: Mon, 22 Apr 2013 12:27:07 +0530 Subject: glusterd: Removed 'proactive' failing of volume op Volume operations were failed 'proactively', on the first disconnect of a peer that was participating in the transaction. The reason behind having this kludgey code in the first place was to 'abort' an ongoing volume operation as soon as we perceive the first disconnect. But the rpc call backs themselves are capable of injecting appropriate state machine events, which would set things in motion for an eventual abort of the transaction. Change-Id: Iad7cb2bd076f22d89a793dfcd08c2d208b39c4be BUG: 847214 Signed-off-by: Krishnan Parthasarathi Reviewed-on: http://review.gluster.org/4869 Reviewed-by: Jeff Darcy Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- xlators/mgmt/glusterd/src/glusterd-handler.c | 56 ++-------------------------- 1 file changed, 3 insertions(+), 53 deletions(-) (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c') diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index faba30221fd..e21b67e6d11 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -3423,7 +3423,6 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, int ret = 0; glusterd_peerinfo_t *peerinfo = NULL; glusterd_peerctx_t *peerctx = NULL; - uuid_t *peer_uuid = NULL; gf_boolean_t quorum_action = _gf_false; peerctx = mydata; @@ -3459,64 +3458,15 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata, peerinfo->quorum_action = _gf_false; } - // Remove peer if it is not a friend and connection/handshake - // fails, and notify cli. Happens only during probe. + /* Remove peer if it is not a friend and connection/handshake + * fails, and notify cli. Happens only during probe. + */ if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) { glusterd_friend_remove_notify (peerctx); goto out; } - /* - local glusterd (thinks that it) is the owner of the cluster - lock and 'fails' the operation on the first disconnect from - a peer. - */ - if (peerinfo->connected) { - /*TODO: The following is needed till all volume - * operations are synctaskized. - * */ - if (is_origin_glusterd ()) { - switch (glusterd_op_get_op ()) { - case GD_OP_START_VOLUME: - case GD_OP_ADD_BRICK: - case GD_OP_REMOVE_BRICK: - case GD_OP_STATUS_VOLUME: - break; - - default: - ret = glusterd_op_sm_inject_event - (GD_OP_EVENT_START_UNLOCK, NULL); - if (ret) - gf_log (this->name, - GF_LOG_ERROR, - "Unable to enqueue " - "cluster unlock event"); - - break; - } - - } else { - peer_uuid = GF_CALLOC (1, sizeof (*peer_uuid), - gf_common_mt_char); - - if (peer_uuid) { - uuid_copy (*peer_uuid, peerinfo->uuid); - ret = glusterd_op_sm_inject_event - (GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP, - peer_uuid); - if (ret) - gf_log (this->name, - GF_LOG_ERROR, - "Unable to enqueue " - "local lock flush " - "event."); - } - } - - } - peerinfo->connected = 0; - //default_notify (this, GF_EVENT_CHILD_DOWN, NULL); break; } default: -- cgit