summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt
diff options
context:
space:
mode:
authorAtin Mukherjee <amukherj@redhat.com>2016-08-12 10:22:17 +0530
committerAtin Mukherjee <amukherj@redhat.com>2016-08-23 07:48:01 -0700
commit063a234e6265265606425449da1d6c2f97fbf457 (patch)
tree66f0cc09c46c37da8d077d3315f69687339b87c7 /xlators/mgmt
parent1c4c75c358df745cc73b73bf2ee08e5c5d0b598f (diff)
glusterd: add async events (part 2)
Change-Id: I7a5687143713c283f0051aac2383f780e3e43646 BUG: 1360809 Signed-off-by: Atin Mukherjee <amukherj@redhat.com> Reviewed-on: http://review.gluster.org/15153 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Samikshan Bairagya <samikshan@gmail.com>
Diffstat (limited to 'xlators/mgmt')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-scrub-svc.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-server-quorum.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.c42
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c60
7 files changed, 86 insertions, 27 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index 86e21f396ba..d77e2057cbd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -1128,7 +1128,7 @@ glusterd_defrag_event_notify_handle (dict_t *dict)
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_DEFRAG_STATUS_UPDATE_FAIL,
"Failed to update status");
- gf_event (EVENT_DEFRAG_STATUS_UPDATE_FAILED, "volume=%s",
+ gf_event (EVENT_REBALANCE_STATUS_UPDATE_FAILED, "volume=%s",
volinfo->volname);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c b/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c
index 3761dbadfd1..7544529f785 100644
--- a/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c
@@ -101,6 +101,8 @@ glusterd_scrubsvc_manager (glusterd_svc_t *svc, void *data, int flags)
}
out:
+ if (ret)
+ gf_event (EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
gf_msg_debug (THIS->name, 0, "Returning %d", ret);
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
index ecf9d53b71e..35f6ad19f71 100644
--- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
+++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
@@ -339,11 +339,13 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo,
GD_MSG_SERVER_QUORUM_MET_STARTING_BRICKS,
"Server quorum regained for volume %s. Starting local "
"bricks.", volinfo->volname);
+ gf_event (EVENT_QUORUM_REGAINED, "volume=%s", volinfo->volname);
} else if (quorum_status == DOESNT_MEET_QUORUM) {
gf_msg (this->name, GF_LOG_CRITICAL, 0,
GD_MSG_SERVER_QUORUM_LOST_STOPPING_BRICKS,
"Server quorum lost for volume %s. Stopping local "
"bricks.", volinfo->volname);
+ gf_event (EVENT_QUORUM_LOST, "volume=%s", volinfo->volname);
}
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
index 0e664b5c786..e0135ea2be3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
@@ -136,6 +136,8 @@ glusterd_shdsvc_manager (glusterd_svc_t *svc, void *data, int flags)
}
}
out:
+ if (ret)
+ gf_event (EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
gf_msg_debug (THIS->name, 0, "Returning %d", ret);
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
index 36e4a196845..acb24fff187 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
@@ -204,6 +204,10 @@ glusterd_snapdsvc_manager (glusterd_svc_t *svc, void *data, int flags)
}
out:
+ if (ret) {
+ gf_event (EVENT_SVC_MANAGER_FAILED, "volume=%s;svc_name=%s",
+ volinfo->volname, svc->name);
+ }
gf_msg_debug (THIS->name, 0, "Returning %d", ret);
return ret;
@@ -347,6 +351,9 @@ glusterd_snapdsvc_restart ()
GD_MSG_SNAPD_START_FAIL,
"Couldn't resolve snapd for "
"vol: %s on restart", volinfo->volname);
+ gf_event (EVENT_SVC_MANAGER_FAILED,
+ "volume=%s;svc_name=%s",
+ volinfo->volname, svc->name);
goto out;
}
}
@@ -373,11 +380,28 @@ glusterd_snapdsvc_rpc_notify (glusterd_conn_t *conn, rpc_clnt_event_t event)
GD_MSG_SVC_GET_FAIL, "Failed to get the service");
return -1;
}
+ snapd = cds_list_entry (svc, glusterd_snapdsvc_t, svc);
+ if (!snapd) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SNAPD_OBJ_GET_FAIL, "Failed to get the "
+ "snapd object");
+ return -1;
+ }
+
+ volinfo = cds_list_entry (snapd, glusterd_volinfo_t, snapd);
+ if (!volinfo) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLINFO_GET_FAIL, "Failed to get the "
+ "volinfo object");
+ return -1;
+ }
switch (event) {
case RPC_CLNT_CONNECT:
gf_msg_debug (this->name, 0, "%s has connected with "
"glusterd.", svc->name);
+ gf_event (EVENT_SVC_CONNECTED, "volume=%s;svc_name=%s",
+ volinfo->volname, svc->name);
svc->online = _gf_true;
break;
@@ -386,26 +410,14 @@ glusterd_snapdsvc_rpc_notify (glusterd_conn_t *conn, rpc_clnt_event_t event)
gf_msg (this->name, GF_LOG_INFO, 0,
GD_MSG_NODE_DISCONNECTED, "%s has disconnected "
"from glusterd.", svc->name);
+ gf_event (EVENT_SVC_DISCONNECTED,
+ "volume=%s;svc_name=%s", volinfo->volname,
+ svc->name);
svc->online = _gf_false;
}
break;
case RPC_CLNT_DESTROY:
- snapd = cds_list_entry (svc, glusterd_snapdsvc_t, svc);
- if (!snapd) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAPD_OBJ_GET_FAIL, "Failed to get the "
- "snapd object");
- return -1;
- }
-
- volinfo = cds_list_entry (snapd, glusterd_volinfo_t, snapd);
- if (!volinfo) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL, "Failed to get the "
- "volinfo object");
- return -1;
- }
glusterd_volinfo_unref (volinfo);
default:
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
index 454c2a453b2..d6e57a432cd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
@@ -316,6 +316,7 @@ glusterd_svc_common_rpc_notify (glusterd_conn_t *conn,
case RPC_CLNT_CONNECT:
gf_msg_debug (this->name, 0, "%s has connected with "
"glusterd.", svc->name);
+ gf_event (EVENT_SVC_CONNECTED, "svc_name=%s", svc->name);
svc->online = _gf_true;
break;
@@ -324,6 +325,8 @@ glusterd_svc_common_rpc_notify (glusterd_conn_t *conn,
gf_msg (this->name, GF_LOG_INFO, 0,
GD_MSG_NODE_DISCONNECTED, "%s has disconnected "
"from glusterd.", svc->name);
+ gf_event (EVENT_SVC_DISCONNECTED, "svc_name=%s",
+ svc->name);
svc->online = _gf_false;
}
break;
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 0e0e36e6d21..a458a1c9245 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -2985,6 +2985,10 @@ glusterd_compare_friend_volume (dict_t *peer_data, int32_t count,
*status = GLUSTERD_VOL_COMP_SCS;
out:
+ if (ret) {
+ gf_event (EVENT_COMPARE_FRIEND_VOLUME_FAILED, "volume=%s",
+ volinfo->volname);
+ }
gf_msg_debug (this->name, 0, "Returning with ret: %d, status: %d",
ret, *status);
return ret;
@@ -3203,9 +3207,12 @@ glusterd_import_new_brick (dict_t *peer_data, int32_t vol_count,
*brickinfo = new_brickinfo;
out:
- if (msg[0])
+ if (msg[0]) {
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_BRICK_IMPORT_FAIL, "%s", msg);
+ gf_event (EVENT_IMPORT_BRICK_FAILED, "peer=%s;brick=%s",
+ new_brickinfo->hostname, new_brickinfo->path);
+ }
gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -3811,9 +3818,12 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
*volinfo = new_volinfo;
out:
- if (msg[0])
+ if (msg[0]) {
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_VOLINFO_IMPORT_FAIL, "%s", msg);
+ gf_event (EVENT_IMPORT_VOLUME_FAILED, "volume=%s",
+ new_volinfo->volname);
+ }
gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -4126,11 +4136,17 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count)
}
if (glusterd_is_volume_started (new_volinfo)) {
- (void) glusterd_start_bricks (new_volinfo);
+ if (glusterd_start_bricks (new_volinfo)) {
+ gf_event (EVENT_BRICKS_START_FAILED, "volume=%s",
+ new_volinfo->volname);
+ }
if (glusterd_is_snapd_enabled (new_volinfo)) {
svc = &(new_volinfo->snapd.svc);
- (void) svc->manager (svc, new_volinfo,
- PROC_START_NO_WAIT);
+ if (svc->manager (svc, new_volinfo,
+ PROC_START_NO_WAIT)){
+ gf_event (EVENT_SVC_MANAGER_FAILED,
+ "svc_name=%s", svc->name);
+ }
}
}
@@ -4156,6 +4172,8 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count)
" ret: %d for volume %s ganesha.enable %s",
ret, new_volinfo->volname,
value);
+ gf_event (EVENT_NFS_GANESHA_EXPORT_FAILED, "volume=%s",
+ new_volinfo->volname);
goto out;
}
}
@@ -4173,9 +4191,11 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count)
ret = glusterd_import_quota_conf (peer_data, count,
new_volinfo, "volume");
- if (ret)
+ if (ret) {
+ gf_event (EVENT_IMPORT_QUOTA_CONF_FAILED, "volume=%s",
+ new_volinfo->volname);
goto out;
-
+ }
glusterd_list_add_order (&new_volinfo->vol_list, &priv->volumes,
glusterd_compare_volume_name);
@@ -4392,7 +4412,9 @@ glusterd_compare_friend_data (dict_t *peer_data, int32_t *status,
if (ret)
goto out;
- glusterd_svcs_manager (NULL);
+ if (glusterd_svcs_manager (NULL)) {
+ gf_event (EVENT_SVC_MANAGER_FAILED, "");
+ }
}
out:
@@ -4860,6 +4882,10 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo,
GD_MSG_RESOLVE_BRICK_FAIL,
FMTSTR_RESOLVE_BRICK,
brickinfo->hostname, brickinfo->path);
+ gf_event (EVENT_BRICKPATH_RESOLVE_FAILED,
+ "peer=%s;volume=%s;brick=%s",
+ brickinfo->hostname, volinfo->volname,
+ brickinfo->path);
goto out;
}
}
@@ -4874,6 +4900,9 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo,
GD_MSG_BRICK_DISCONNECTED,
"Unable to start brick %s:%s",
brickinfo->hostname, brickinfo->path);
+ gf_event (EVENT_BRICK_START_FAILED,
+ "peer=%s;volume=%s;brick=%s", brickinfo->hostname,
+ volinfo->volname, brickinfo->path);
goto out;
}
@@ -4914,7 +4943,9 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
continue;
if (start_svcs == _gf_false) {
start_svcs = _gf_true;
- glusterd_svcs_manager (NULL);
+ if (glusterd_svcs_manager (NULL)) {
+ gf_event (EVENT_SVC_MANAGER_FAILED, "");
+ }
}
gf_msg_debug (this->name, 0, "starting the volume %s",
volinfo->volname);
@@ -4964,7 +4995,9 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
}
if (start_svcs == _gf_false) {
start_svcs = _gf_true;
- glusterd_svcs_manager (volinfo);
+ if (glusterd_svcs_manager (volinfo)) {
+ gf_event (EVENT_SVC_MANAGER_FAILED, "");
+ }
}
start_svcs = _gf_true;
gf_msg_debug (this->name, 0, "starting the snap "
@@ -7369,6 +7402,8 @@ glusterd_volume_defrag_restart (glusterd_volinfo_t *volinfo, char *op_errstr,
"Failed to initialize defrag."
"Not starting rebalance process for "
"%s.", volinfo->volname);
+ gf_event (EVENT_REBALANCE_START_FAILED,
+ "volume=%s", volinfo->volname);
goto out;
}
ret = glusterd_rebalance_rpc_create (volinfo, _gf_true);
@@ -7377,8 +7412,11 @@ glusterd_volume_defrag_restart (glusterd_volinfo_t *volinfo, char *op_errstr,
case GF_DEFRAG_STATUS_NOT_STARTED:
ret = glusterd_handle_defrag_start (volinfo, op_errstr, len,
cmd, cbk, volinfo->rebal.op);
- if (ret)
+ if (ret) {
volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_FAILED;
+ gf_event (EVENT_REBALANCE_START_FAILED,
+ "volume=%s", volinfo->volname);
+ }
break;
default:
gf_msg (this->name, GF_LOG_ERROR, 0,