summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd
diff options
context:
space:
mode:
authorAtin Mukherjee <amukherj@redhat.com>2016-07-26 16:01:56 +0530
committerAtin Mukherjee <amukherj@redhat.com>2016-08-22 06:10:44 -0700
commitca18f4bccd090e98ee5342ca05d3c0f9f94e9e2c (patch)
tree4ea51c5424f1b0d927292c8b51177da99d1cd213 /xlators/mgmt/glusterd
parentfcb5b70b1099d0379b40c81f35750df8bb9545a5 (diff)
glusterd: Add async events
As the eventing framework is already in the code, this patch targets to capture all the async glusterd events which are important to be notified to the higher layers which consume the eventing framework. I plan to break this work into two different patches where this patch set covers the first set of events. Change-Id: Ie1bd4f6fa84117b26ccb4c75bc4dc68e6ef19134 BUG: 1360809 Signed-off-by: Atin Mukherjee <amukherj@redhat.com> Reviewed-on: http://review.gluster.org/15015 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Rohan Kanade <rkanade@redhat.com> Reviewed-by: Samikshan Bairagya <samikshan@gmail.com>
Diffstat (limited to 'xlators/mgmt/glusterd')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitd-svc.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c59
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quotad-svc.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.c4
9 files changed, 78 insertions, 15 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
index ee96ccbff80..f9294019234 100644
--- a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
@@ -106,6 +106,9 @@ glusterd_bitdsvc_manager (glusterd_svc_t *svc, void *data, int flags)
}
out:
+ if (ret)
+ gf_event (EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
+
gf_msg_debug (THIS->name, 0, "Returning %d", ret);
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 91ae6237c54..0085dce5d4c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -2771,6 +2771,8 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
GD_MSG_REQ_FROM_UNKNOWN_PEER,
"Received friend update request "
"from unknown peer %s", uuid_utoa (friend_req.uuid));
+ gf_event (EVENT_UNKNOWN_PEER, "peer=%s",
+ uuid_utoa (friend_req.uuid));
goto out;
}
@@ -2867,10 +2869,13 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
goto unlock;
}
ret = glusterd_store_peerinfo (peerinfo);
- if (ret)
+ if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_PEERINFO_CREATE_FAIL,
"Failed to store peerinfo");
+ gf_event (EVENT_PEER_STORE_FAILURE, "peer=%s",
+ peerinfo->hostname);
+ }
}
unlock:
rcu_read_unlock ();
@@ -3525,6 +3530,8 @@ glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo,
GD_MSG_RPC_CREATE_FAIL,
"failed to create rpc for"
" peer %s", peerinfo->hostname);
+ gf_event (EVENT_PEER_RPC_CREATE_FAILED, "peer=%s",
+ peerinfo->hostname);
goto out;
}
peerctx = NULL;
@@ -3580,6 +3587,8 @@ glusterd_friend_add (const char *hoststr, int port,
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_PEERINFO_CREATE_FAIL,
"Failed to store peerinfo");
+ gf_event (EVENT_PEER_STORE_FAILURE, "peer=%s",
+ (*friend)->hostname);
}
}
@@ -3635,6 +3644,8 @@ glusterd_friend_add_from_peerinfo (glusterd_peerinfo_t *friend,
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_PEERINFO_CREATE_FAIL,
"Failed to store peerinfo");
+ gf_event (EVENT_PEER_STORE_FAILURE, "peer=%s",
+ friend->hostname);
}
}
@@ -4999,6 +5010,14 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
switch (event) {
case RPC_CLNT_CONNECT:
+ ret = get_volinfo_from_brickid (brickid, &volinfo);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "brickid(%s)", brickid);
+ goto out;
+ }
/* If a node on coming back up, already starts a brick
* before the handshake, and the notification comes after
* the handshake is done, then we need to check if this
@@ -5012,15 +5031,6 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
"Hence not starting the brick",
brickinfo->hostname,
brickinfo->path);
- ret = get_volinfo_from_brickid (brickid, &volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL,
- "Failed to get volinfo from "
- "brickid(%s)", brickid);
- goto out;
- }
-
ret = glusterd_brick_stop (volinfo, brickinfo,
_gf_false);
if (ret) {
@@ -5037,17 +5047,34 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
gf_msg_debug (this->name, 0, "Connected to %s:%s",
brickinfo->hostname, brickinfo->path);
glusterd_set_brick_status (brickinfo, GF_BRICK_STARTED);
+ gf_event (EVENT_BRICK_CONNECTED, "peer=%s;volume=%s;brick=%s",
+ brickinfo->hostname, volinfo->volname,
+ brickinfo->path);
+
ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
break;
case RPC_CLNT_DISCONNECT:
rpc_clnt_unset_connected (&rpc->conn);
- if (glusterd_is_brick_started (brickinfo))
+ if (glusterd_is_brick_started (brickinfo)) {
gf_msg (this->name, GF_LOG_INFO, 0,
GD_MSG_BRICK_DISCONNECTED,
"Brick %s:%s has disconnected from glusterd.",
brickinfo->hostname, brickinfo->path);
+ ret = get_volinfo_from_brickid (brickid, &volinfo);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "brickid(%s)", brickid);
+ goto out;
+ }
+ gf_event (EVENT_BRICK_DISCONNECTED,
+ "peer=%s;volume=%s;brick=%s",
+ brickinfo->hostname, volinfo->volname,
+ brickinfo->path);
+ }
glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPED);
break;
@@ -5174,6 +5201,11 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
"%s(%s)", peerctx->peername,
uuid_utoa (peerctx->peerid));
+ if (RPC_CLNT_CONNECT == event) {
+ gf_event (EVENT_PEER_NOT_FOUND, "peer=%s;uuid=%s",
+ peerctx->peername,
+ uuid_utoa (peerctx->peerid));
+ }
ret = -1;
goto out;
}
@@ -5188,6 +5220,8 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
peerinfo->generation = uatomic_add_return
(&conf->generation, 1);
peerctx->peerinfo_gen = peerinfo->generation;
+ gf_event (EVENT_PEER_CONNECT, "host=%s;uuid=%s",
+ peerinfo->hostname, uuid_utoa (peerinfo->uuid));
ret = glusterd_peer_dump_version (this, rpc, peerctx);
if (ret)
@@ -5212,6 +5246,9 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
"from glusterd.",
peerinfo->hostname, uuid_utoa (peerinfo->uuid),
glusterd_friend_sm_state_name_get (peerinfo->state.state));
+ gf_event (EVENT_PEER_DISCONNECT, "peer=%s;uuid=%s;state=%s",
+ peerinfo->hostname, uuid_utoa (peerinfo->uuid),
+ glusterd_friend_sm_state_name_get (peerinfo->state.state));
if (peerinfo->connected) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index 0ea66a027bf..9f162d8afc2 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -984,6 +984,7 @@ __server_event_notify (rpcsvc_request_t *req)
gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
GD_MSG_OP_UNSUPPORTED, "Unknown op received in event "
"notify");
+ gf_event (EVENT_NOTIFY_UNKNOWN_OP, "op=%d", args.op);
ret = -1;
break;
}
@@ -1118,6 +1119,8 @@ gd_validate_mgmt_hndsk_req (rpcsvc_request_t *req, dict_t *dict)
GD_MSG_HANDSHAKE_REQ_REJECTED, "Rejecting management "
"handshake request from unknown peer %s",
req->trans->peerinfo.identifier);
+ gf_event (EVENT_PEER_REJECT, "peer=%s",
+ req->trans->peerinfo.identifier);
return _gf_false;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
index 60b792ffac2..c6ab0c5d521 100644
--- a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
@@ -95,6 +95,9 @@ glusterd_nfssvc_manager (glusterd_svc_t *svc, void *data, int flags)
goto out;
}
out:
+ if (ret)
+ gf_event (EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
+
gf_msg_debug (THIS->name, 0, "Returning %d", ret);
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index b06a5978540..a0904fb9634 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -2194,8 +2194,13 @@ glusterd_stop_bricks (glusterd_volinfo_t *volinfo)
cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
/*TODO: Need to change @del_brick in brick_stop to _gf_true
* once we enable synctask in peer rpc prog */
- if (glusterd_brick_stop (volinfo, brickinfo, _gf_false))
+ if (glusterd_brick_stop (volinfo, brickinfo, _gf_false)) {
+ gf_event (EVENT_BRICK_STOP_FAILED,
+ "peer=%s;volume=%s;brick=%s",
+ brickinfo->hostname, volinfo->volname,
+ brickinfo->path);
return -1;
+ }
}
return 0;
@@ -2217,6 +2222,10 @@ glusterd_start_bricks (glusterd_volinfo_t *volinfo)
"Failed to start %s:%s for %s",
brickinfo->hostname, brickinfo->path,
volinfo->volname);
+ gf_event (EVENT_BRICK_START_FAILED,
+ "peer=%s;volume=%s;brick=%s",
+ brickinfo->hostname, volinfo->volname,
+ brickinfo->path);
goto out;
}
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.c b/xlators/mgmt/glusterd/src/glusterd-pmap.c
index 6c65da79392..2c27473f190 100644
--- a/xlators/mgmt/glusterd/src/glusterd-pmap.c
+++ b/xlators/mgmt/glusterd/src/glusterd-pmap.c
@@ -217,7 +217,7 @@ pmap_assign_port (xlator_t *this, int old_port, const char *path)
GF_PMAP_PORT_BRICKSERVER, NULL);
if (ret) {
gf_msg (this->name, GF_LOG_WARNING,
- GD_MSG_PMAP_REGISTRY_REMOVE_FAIL, 0, "Failed toi"
+ GD_MSG_PMAP_REGISTRY_REMOVE_FAIL, 0, "Failed to"
"remove pmap registry for older signin for path"
" %s", path);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c b/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c
index f3475a3f0ec..3c457be96e5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c
@@ -108,6 +108,9 @@ glusterd_quotadsvc_manager (glusterd_svc_t *svc, void *data, int flags)
}
}
out:
+ if (ret)
+ gf_event (EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
+
gf_msg_debug (THIS->name, 0, "Returning %d", ret);
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index 35fa4627d04..86e21f396ba 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -1124,10 +1124,13 @@ glusterd_defrag_event_notify_handle (dict_t *dict)
ret = glusterd_defrag_volume_status_update (volinfo, dict);
- if (ret)
+ if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_DEFRAG_STATUS_UPDATE_FAIL,
"Failed to update status");
+ gf_event (EVENT_DEFRAG_STATUS_UPDATE_FAILED, "volume=%s",
+ volinfo->volname);
+ }
out:
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
index 44ee6d08d68..72f00922667 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
@@ -52,11 +52,13 @@ glusterd_svcs_reconfigure ()
ret = glusterd_bitdsvc_reconfigure ();
if (ret)
goto out;
-
ret = glusterd_scrubsvc_reconfigure ();
if (ret)
goto out;
out:
+ if (ret)
+ gf_event (EVENT_SVC_RECONFIGURE_FAILED, "");
+
return ret;
}