summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-handler.c
diff options
context:
space:
mode:
authorNandaja Varma <nandaja.varma@gmail.com>2015-06-25 00:57:00 +0530
committerKaushal M <kaushal@redhat.com>2015-06-26 23:56:09 -0700
commit8708953fa3d9187997dc6d484dae663b4469c7ca (patch)
tree38e9ef4db291ee0c1c50805bc08aee6fa3c7780f /xlators/mgmt/glusterd/src/glusterd-handler.c
parent08586ee518de438fe2bbbaa74ae4c9a02a5d88cf (diff)
glusterd: Porting left out log messages to new framework
This is a backport of http://review.gluster.org/11388 cherry-picked from commit 23c1e6dc0fa86c014e1a8b6aa5729675f6d69017 >Change-Id: I70d40ae3b5f49a21e1b93f82885cd58fa2723647 >BUG: 1235538 >Signed-off-by: Nandaja Varma <nandaja.varma@gmail.com> Change-Id: I70d40ae3b5f49a21e1b93f82885cd58fa2723647 BUG: 1217722 Signed-off-by: Nandaja Varma <nandaja.varma@gmail.com> Reviewed-on: http://review.gluster.org/11422 Tested-by: Gluster Build System <jenkins@build.gluster.com> Tested-by: NetBSD Build System <jenkins@build.gluster.org> Reviewed-by: Anand Nekkunti <anekkunt@redhat.com> Reviewed-by: Kaushal M <kaushal@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c93
1 files changed, 65 insertions, 28 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index ab2f310b6a0..b8e02ba1332 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -697,7 +697,8 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
* not be held */
ret = dict_get_str (dict, "volname", &tmp);
if (ret) {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, errno,
+ GD_MSG_DICT_GET_FAILED,
"No Volume name present. "
"Locks not being held.");
goto local_locking_done;
@@ -1162,7 +1163,9 @@ __glusterd_handle_cli_probe (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received CLI probe req %s %d",
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_CLI_REQ_RECVD,
+ "Received CLI probe req %s %d",
hostname, port);
if (dict_get_str(this->options,"transport.socket.bind-address",
@@ -1273,7 +1276,9 @@ __glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
}
}
- gf_log ("glusterd", GF_LOG_INFO, "Received CLI deprobe req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_CLI_REQ_RECVD,
+ "Received CLI deprobe req");
ret = dict_get_str (dict, "hostname", &hostname);
if (ret) {
@@ -1392,7 +1397,9 @@ __glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received cli list req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_CLI_REQ_RECVD,
+ "Received cli list req");
if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
@@ -1451,7 +1458,9 @@ __glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received get vol req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_GET_VOL_REQ_RCVD,
+ "Received get vol req");
if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
@@ -1843,7 +1852,7 @@ __glusterd_handle_ganesha_cmd (rpcsvc_request_t *req)
}
}
- gf_log (this->name, GF_LOG_TRACE, "Received global option request");
+ gf_msg_trace (this->name, 0, "Received global option request");
ret = glusterd_op_begin_synctask (req, GD_OP_GANESHA, dict);
out:
@@ -2123,7 +2132,8 @@ __glusterd_handle_sync_volume (rpcsvc_request_t *req)
}
}
- gf_log (this->name, GF_LOG_INFO, "Received volume sync req "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_SYNC_REQ_RCVD, "Received volume sync req "
"for volume %s", (flags & GF_CLI_SYNC_ALL) ? "all" : volname);
if (gf_is_local_addr (hostname)) {
@@ -2515,7 +2525,8 @@ __glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_PROBE_RCVD,
"Received probe from uuid: %s", uuid_utoa (friend_req.uuid));
ret = glusterd_handle_friend_req (req, friend_req.uuid,
friend_req.hostname, friend_req.port,
@@ -2564,7 +2575,8 @@ __glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_UNFRIEND_REQ_RCVD,
"Received unfriend from uuid: %s", uuid_utoa (friend_req.uuid));
ret = glusterd_remote_hostname_get (req, remote_hostname,
@@ -2689,7 +2701,8 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_FRIEND_UPDATE_RCVD,
"Received friend update from uuid: %s", uuid_utoa (friend_req.uuid));
if (friend_req.friends.friends_len) {
@@ -2733,7 +2746,8 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
gf_uuid_parse (uuid_buf, uuid);
if (!gf_uuid_compare (uuid, MY_UUID)) {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_UUID_RECEIVED,
"Received my uuid as Friend");
i++;
continue;
@@ -2856,7 +2870,8 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req)
else
port = GF_DEFAULT_BASE_PORT;
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_PROBE_RCVD,
"Received probe from uuid: %s", uuid_utoa (probe_req.uuid));
/* Check for uuid collision and handle it in a user friendly way by
@@ -2889,7 +2904,9 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req)
rsp.op_ret = -1;
rsp.op_errno = GF_PROBE_ANOTHER_CLUSTER;
} else if (peerinfo == NULL) {
- gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo"
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_PEER_NOT_FOUND,
+ "Unable to find peerinfo"
" for host: %s (%d)", remote_hostname, port);
args.mode = GD_MODE_ON;
ret = glusterd_friend_add (remote_hostname, port,
@@ -2915,7 +2932,8 @@ respond:
(xdrproc_t)xdr_gd1_mgmt_probe_rsp);
ret = 0;
- gf_log ("glusterd", GF_LOG_INFO, "Responded to %s, op_ret: %d, "
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_RESPONSE_INFO, "Responded to %s, op_ret: %d, "
"op_errno: %d, ret: %d", remote_hostname,
rsp.op_ret, rsp.op_errno, ret);
@@ -2976,7 +2994,9 @@ __glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received volume profile req "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_PROFILE_REQ_RCVD,
+ "Received volume profile req "
"for volume %s", volname);
ret = dict_get_int32 (dict, "op", &op);
if (ret) {
@@ -3025,7 +3045,8 @@ __glusterd_handle_getwd (rpcsvc_request_t *req)
priv = THIS->private;
GF_ASSERT (priv);
- gf_log ("glusterd", GF_LOG_INFO, "Received getwd req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_GETWD_REQ_RCVD, "Received getwd req");
rsp.wd = priv->workdir;
@@ -3070,7 +3091,9 @@ __glusterd_handle_mount (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received mount req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_MOUNT_REQ_RCVD,
+ "Received mount req");
if (mnt_req.dict.dict_len) {
/* Unserialize the dictionary */
@@ -3154,7 +3177,9 @@ __glusterd_handle_umount (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received umount req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_UMOUNT_REQ_RCVD,
+ "Received umount req");
if (dict_get_str (this->options, "mountbroker-root",
&mountbroker_root) != 0) {
@@ -3476,7 +3501,8 @@ glusterd_friend_add (const char *hoststr, int port,
}
out:
- gf_log (this->name, GF_LOG_INFO, "connect returned %d", ret);
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_CONNECT_RETURNED, "connect returned %d", ret);
return ret;
}
@@ -3525,7 +3551,9 @@ glusterd_friend_add_from_peerinfo (glusterd_peerinfo_t *friend,
}
out:
- gf_log (this->name, GF_LOG_INFO, "connect returned %d", ret);
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_CONNECT_RETURNED,
+ "connect returned %d", ret);
return ret;
}
@@ -3544,7 +3572,8 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
peerinfo = glusterd_peerinfo_find (NULL, hoststr);
if (peerinfo == NULL) {
- gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo"
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_PEER_NOT_FOUND, "Unable to find peerinfo"
" for host: %s (%d)", hoststr, port);
args.mode = GD_MODE_ON;
args.req = req;
@@ -3608,7 +3637,8 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
peerinfo = glusterd_peerinfo_find (uuid, hoststr);
if (peerinfo == NULL) {
ret = -1;
- gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo"
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_PEER_NOT_FOUND, "Unable to find peerinfo"
" for host: %s %d", hoststr, port);
goto out;
}
@@ -3689,7 +3719,8 @@ glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int por
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
(xdrproc_t)xdr_gd1_mgmt_friend_rsp);
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_RESPONSE_INFO,
"Responded to %s (%d), ret: %d", hostname, port, ret);
return ret;
}
@@ -3721,7 +3752,8 @@ glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *myhostname,
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
(xdrproc_t)xdr_gd1_mgmt_friend_rsp);
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_RESPONSE_INFO,
"Responded to %s (%d), ret: %d", remote_hostname, port, ret);
GF_FREE (rsp.hostname);
return ret;
@@ -4207,7 +4239,8 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
GD_MSG_VOL_NOT_FOUND, "%s", err_str);
goto out;
}
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_STATUS_VOL_REQ_RCVD,
"Received status volume req for volume %s", volname);
}
@@ -4330,7 +4363,8 @@ __glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received clear-locks volume req "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_CLRCLK_VOL_REQ_RCVD, "Received clear-locks volume req "
"for volume %s", volname);
ret = glusterd_op_begin_synctask (req, GD_OP_CLEARLOCKS_VOLUME, dict);
@@ -4452,7 +4486,9 @@ __glusterd_handle_barrier (rpcsvc_request_t *req)
"dict");
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received barrier volume request for "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_BARRIER_VOL_REQ_RCVD,
+ "Received barrier volume request for "
"volume %s", volname);
ret = glusterd_op_begin_synctask (req, GD_OP_BARRIER, dict);
@@ -4807,7 +4843,8 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
* need to stop the brick
*/
if (brickinfo->snap_status == -1) {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_SNAPSHOT_PENDING,
"Snapshot is pending on %s:%s. "
"Hence not starting the brick",
brickinfo->hostname,