summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNandaja Varma <nandaja.varma@gmail.com>2015-06-25 00:57:00 +0530
committerKaushal M <kaushal@redhat.com>2015-06-26 23:32:01 -0700
commit911e9228f31e89fe5df6e2282ce449b2a94c42b1 (patch)
tree517bb1fd3d56c6016b21bc394956064ee56294ab
parent2b9b3ef3b646989bbc0412dca187b3f5fcad3283 (diff)
glusterd: Porting left out log messages to new framework
Change-Id: I70d40ae3b5f49a21e1b93f82885cd58fa2723647 BUG: 1235538 Signed-off-by: Nandaja Varma <nandaja.varma@gmail.com> Reviewed-on: http://review.gluster.org/11388 Tested-by: NetBSD Build System <jenkins@build.gluster.org> Reviewed-by: Anand Nekkunti <anekkunt@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaushal M <kaushal@redhat.com>
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitd-svc.c8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitrot.c87
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c54
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-ganesha.c90
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c93
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.c50
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-log-ops.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-messages.h830
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c80
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.c20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c37
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c30
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-scrub-svc.c7
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c42
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c28
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c189
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c349
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c35
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c19
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c68
25 files changed, 1677 insertions, 476 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
index c1acd40184d..1b71b6ca7a5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
@@ -43,12 +43,14 @@ glusterd_bitdsvc_create_volfile ()
ret = glusterd_create_global_volfile (build_bitd_graph,
filepath, NULL);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to create volfile");
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL,
+ "Failed to create volfile");
goto out;
}
out:
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug (this->name, 0, "Returning %d", ret);
return ret;
}
@@ -84,7 +86,7 @@ glusterd_bitdsvc_manager (glusterd_svc_t *svc, void *data, int flags)
}
out:
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug (THIS->name, 0, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitrot.c b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
index c32aa1e3ff3..c9cf9297bb8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-bitrot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
@@ -21,6 +21,7 @@
#include "byte-order.h"
#include "compat-errno.h"
#include "glusterd-scrub-svc.h"
+#include "glusterd-messages.h"
#include <sys/wait.h>
#include <dlfcn.h>
@@ -70,7 +71,8 @@ __glusterd_handle_bitrot (rpcsvc_request_t *req)
cli_req.dict.dict_len,
&dict);
if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR, "failed to "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to "
"unserialize req-buffer to dictionary");
snprintf (msg, sizeof (msg), "Unable to decode the "
"command");
@@ -83,7 +85,8 @@ __glusterd_handle_bitrot (rpcsvc_request_t *req)
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
snprintf (msg, sizeof (msg), "Unable to get volume name");
- gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name, "
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "Unable to get volume name, "
"while handling bitrot command");
goto out;
}
@@ -91,7 +94,8 @@ __glusterd_handle_bitrot (rpcsvc_request_t *req)
ret = dict_get_int32 (dict, "type", &type);
if (ret) {
snprintf (msg, sizeof (msg), "Unable to get type of command");
- gf_log (this->name, GF_LOG_ERROR, "Unable to get type of cmd, "
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "Unable to get type of cmd, "
"while handling bitrot command");
goto out;
}
@@ -138,7 +142,8 @@ glusterd_bitrot_scrub_throttle (glusterd_volinfo_t *volinfo, dict_t *dict,
ret = dict_get_str (dict, "scrub-throttle-value", &scrub_throttle);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to fetch scrub-"
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "Unable to fetch scrub-"
"throttle value");
goto out;
}
@@ -146,14 +151,17 @@ glusterd_bitrot_scrub_throttle (glusterd_volinfo_t *volinfo, dict_t *dict,
option = gf_strdup (scrub_throttle);
ret = dict_set_dynstr (volinfo->dict, key, option);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set option %s",
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "Failed to set option %s",
key);
goto out;
}
ret = glusterd_scrubsvc_reconfigure ();
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to reconfigure scrub "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SCRUBSVC_RECONF_FAIL,
+ "Failed to reconfigure scrub "
"services");
goto out;
}
@@ -176,7 +184,8 @@ glusterd_bitrot_scrub_freq (glusterd_volinfo_t *volinfo, dict_t *dict,
ret = dict_get_str (dict, "scrub-frequency-value", &scrub_freq);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to fetch scrub-"
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "Unable to fetch scrub-"
"freq value");
goto out;
}
@@ -184,14 +193,17 @@ glusterd_bitrot_scrub_freq (glusterd_volinfo_t *volinfo, dict_t *dict,
option = gf_strdup (scrub_freq);
ret = dict_set_dynstr (volinfo->dict, key, option);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set option %s",
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "Failed to set option %s",
key);
goto out;
}
ret = glusterd_scrubsvc_reconfigure ();
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to reconfigure scrub "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SCRUBSVC_RECONF_FAIL,
+ "Failed to reconfigure scrub "
"services");
goto out;
}
@@ -214,7 +226,8 @@ glusterd_bitrot_scrub (glusterd_volinfo_t *volinfo, dict_t *dict,
ret = dict_get_str (dict, "scrub-value", &scrub_value);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to fetch scrub"
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_GET_FAILED, "Unable to fetch scrub"
"value");
goto out;
}
@@ -227,14 +240,17 @@ glusterd_bitrot_scrub (glusterd_volinfo_t *volinfo, dict_t *dict,
ret = dict_set_dynstr (volinfo->dict, key, option);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set option %s",
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "Failed to set option %s",
key);
goto out;
}
ret = glusterd_scrubsvc_reconfigure ();
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to reconfigure scrub "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SCRUBSVC_RECONF_FAIL,
+ "Failed to reconfigure scrub "
"services");
goto out;
}
@@ -257,7 +273,8 @@ glusterd_bitrot_expiry_time (glusterd_volinfo_t *volinfo, dict_t *dict,
ret = dict_get_uint32 (dict, "expiry-time", &expiry_time);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to get bitrot expiry"
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "Unable to get bitrot expiry"
" timer value.");
goto out;
}
@@ -266,14 +283,17 @@ glusterd_bitrot_expiry_time (glusterd_volinfo_t *volinfo, dict_t *dict,
ret = dict_set_dynstr_with_alloc (volinfo->dict, key, dkey);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set option %s",
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "Failed to set option %s",
key);
goto out;
}
ret = glusterd_bitdsvc_reconfigure ();
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to reconfigure bitrot"
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BITDSVC_RECONF_FAIL,
+ "Failed to reconfigure bitrot"
"services");
goto out;
}
@@ -310,7 +330,8 @@ glusterd_bitrot_enable (glusterd_volinfo_t *volinfo, char **op_errstr)
ret = dict_set_dynstr_with_alloc (volinfo->dict, VKEY_FEATURES_BITROT,
"on");
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "dict set failed");
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "dict set failed");
goto out;
}
@@ -318,7 +339,8 @@ glusterd_bitrot_enable (glusterd_volinfo_t *volinfo, char **op_errstr)
ret = dict_set_dynstr_with_alloc (volinfo->dict, "features.scrub",
"Active");
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set option "
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "Failed to set option "
"features.scrub value");
goto out;
}
@@ -346,7 +368,8 @@ glusterd_bitrot_disable (glusterd_volinfo_t *volinfo, char **op_errstr)
ret = dict_set_dynstr_with_alloc (volinfo->dict, VKEY_FEATURES_BITROT,
"off");
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "dict set failed");
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "dict set failed");
goto out;
}
@@ -354,7 +377,8 @@ glusterd_bitrot_disable (glusterd_volinfo_t *volinfo, char **op_errstr)
ret = dict_set_dynstr_with_alloc (volinfo->dict, "features.scrub",
"Inactive");
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "Failed to set "
"features.scrub value");
goto out;
}
@@ -456,7 +480,8 @@ glusterd_op_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
goto out;
}
@@ -468,7 +493,8 @@ glusterd_op_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = dict_get_int32 (dict, "type", &type);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to get type from "
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "Unable to get type from "
"dict");
goto out;
}
@@ -531,7 +557,8 @@ glusterd_op_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = glusterd_create_volfiles_and_notify_services (volinfo);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to re-create "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL, "Unable to re-create "
"volfiles");
ret = -1;
goto out;
@@ -540,7 +567,7 @@ glusterd_op_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = glusterd_store_volinfo (volinfo,
GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret) {
- gf_log (this->name, GF_LOG_DEBUG, "Failed to store volinfo for "
+ gf_msg_debug (this->name, 0, "Failed to store volinfo for "
"bitrot");
goto out;
}
@@ -572,7 +599,8 @@ glusterd_op_stage_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
goto out;
}
@@ -591,7 +619,8 @@ glusterd_op_stage_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = dict_get_int32 (dict, "type", &type);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to get type for "
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "Unable to get type for "
"operation");
*op_errstr = gf_strdup ("Staging stage failed for bitrot "
@@ -614,7 +643,8 @@ glusterd_op_stage_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (!ret) {
ret = dict_get_str (dict, "scrub-value", &scrub_cmd);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to "
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "Unable to "
"get scrub-value");
*op_errstr = gf_strdup ("Staging failed for "
"bitrot operation. "
@@ -641,8 +671,9 @@ glusterd_op_stage_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
out:
if (ret && op_errstr && *op_errstr)
- gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_OP_STAGE_BITROT_FAIL, "%s", *op_errstr);
+ gf_msg_debug (this->name, 0, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index 984a24523f0..6b5bdafd582 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -125,7 +125,8 @@ gd_addbr_validate_stripe_count (glusterd_volinfo_t *volinfo, int stripe_count,
if ((volinfo->brick_count * stripe_count) == total_bricks) {
/* Change the volume type */
*type = GF_CLUSTER_TYPE_STRIPE;
- gf_log (THIS->name, GF_LOG_INFO,
+ gf_msg (THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_TYPE_CHANGING_INFO,
"Changing the type of volume %s from "
"'distribute' to 'stripe'", volinfo->volname);
ret = 0;
@@ -144,7 +145,8 @@ gd_addbr_validate_stripe_count (glusterd_volinfo_t *volinfo, int stripe_count,
if (!(total_bricks % (volinfo->replica_count * stripe_count))) {
/* Change the volume type */
*type = GF_CLUSTER_TYPE_STRIPE_REPLICATE;
- gf_log (THIS->name, GF_LOG_INFO,
+ gf_msg (THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_TYPE_CHANGING_INFO,
"Changing the type of volume %s from "
"'replicate' to 'replicate-stripe'",
volinfo->volname);
@@ -188,7 +190,8 @@ gd_addbr_validate_stripe_count (glusterd_volinfo_t *volinfo, int stripe_count,
volinfo->replica_count)) ==
(total_bricks * volinfo->dist_leaf_count)) {
/* Change the dist_leaf_count */
- gf_log (THIS->name, GF_LOG_INFO,
+ gf_msg (THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_STRIPE_COUNT_CHANGE_INFO,
"Changing the stripe count of "
"volume %s from %d to %d",
volinfo->volname,
@@ -224,7 +227,8 @@ gd_addbr_validate_replica_count (glusterd_volinfo_t *volinfo, int replica_count,
if ((volinfo->brick_count * replica_count) == total_bricks) {
/* Change the volume type */
*type = GF_CLUSTER_TYPE_REPLICATE;
- gf_log (THIS->name, GF_LOG_INFO,
+ gf_msg (THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_TYPE_CHANGING_INFO,
"Changing the type of volume %s from "
"'distribute' to 'replica'", volinfo->volname);
ret = 0;
@@ -289,7 +293,8 @@ gd_addbr_validate_replica_count (glusterd_volinfo_t *volinfo, int replica_count,
(volinfo->brick_count * (replica_count *
volinfo->stripe_count))) {
/* Change the dist_leaf_count */
- gf_log (THIS->name, GF_LOG_INFO,
+ gf_msg (THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_REPLICA_COUNT_CHANGE_INFO,
"Changing the replica count of "
"volume %s from %d to %d",
volinfo->volname, volinfo->replica_count,
@@ -421,7 +426,8 @@ __glusterd_handle_add_brick (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received add brick req");
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_ADD_BRICK_REQ_RECVD, "Received add brick req");
if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
@@ -505,13 +511,15 @@ __glusterd_handle_add_brick (rpcsvc_request_t *req)
if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
snprintf (err_str, sizeof (err_str),
"Volume %s is already a tier.", volname);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOL_ALREADY_TIER, "%s", err_str);
ret = -1;
goto out;
}
if (glusterd_is_tiering_supported(err_str) == _gf_false) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VERSION_UNSUPPORTED,
"Tiering not supported at this version");
ret = -1;
goto out;
@@ -519,7 +527,8 @@ __glusterd_handle_add_brick (rpcsvc_request_t *req)
ret = dict_get_int32 (dict, "hot-type", &type);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
"failed to get type from dictionary");
goto out;
}
@@ -531,7 +540,8 @@ __glusterd_handle_add_brick (rpcsvc_request_t *req)
if (ret) {
snprintf (err_str, sizeof (err_str), "Add-brick operation is "
"not supported on a tiered volume %s", volname);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_OP_UNSUPPORTED, "%s", err_str);
goto out;
}
@@ -799,7 +809,9 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received rem brick req");
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_REM_BRICK_REQ_RECVD,
+ "Received rem brick req");
if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
@@ -848,7 +860,8 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req)
if ((volinfo->type == GF_CLUSTER_TYPE_TIER) &&
(glusterd_is_tiering_supported(err_str) == _gf_false)) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VERSION_UNSUPPORTED,
"Tiering not supported at this version");
ret = -1;
goto out;
@@ -858,7 +871,8 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req)
if (ret) {
snprintf (err_str, sizeof (err_str), "Unable to get cmd "
"ccommand");
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "%s", err_str);
goto out;
}
@@ -866,7 +880,8 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req)
if (ret) {
snprintf (err_str, sizeof (err_str),
"Removing brick from a Tier volume is not allowed");
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_OP_UNSUPPORTED, "%s", err_str);
goto out;
}
@@ -1730,7 +1745,8 @@ glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof(msg), "volume %s is not a tier "
"volume", volinfo->volname);
errstr = gf_strdup (msg);
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOL_NOT_TIER, "%s", errstr);
goto out;
}
@@ -1828,7 +1844,8 @@ glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof(msg), "volume %s is not a tier "
"volume", volinfo->volname);
errstr = gf_strdup (msg);
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOL_NOT_TIER, "%s", errstr);
goto out;
}
@@ -1887,7 +1904,8 @@ glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr)
snprintf (msg, sizeof(msg), "volume %s is not a tier "
"volume", volinfo->volname);
errstr = gf_strdup (msg);
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOL_NOT_TIER, "%s", errstr);
goto out;
}
case GF_OP_CMD_COMMIT_FORCE:
@@ -2068,7 +2086,7 @@ glusterd_op_add_brick (dict_t *dict, char **op_errstr)
}
if (dict_get(dict, "attach-tier")) {
- gf_log (THIS->name, GF_LOG_DEBUG, "Adding tier");
+ gf_msg_debug (THIS->name, 0, "Adding tier");
glusterd_op_perform_attach_tier (dict, volinfo, count, bricks);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index 2638e105408..73e54d627bf 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -16,6 +16,7 @@
#include "glusterd-utils.h"
#include "glusterd-nfs-svc.h"
#include "glusterd-volgen.h"
+#include "glusterd-messages.h"
#define MAXBUF 1024
#define DELIM "=\""
#define SHARED_STORAGE_MNT "/var/run/gluster/shared_storage/nfs-ganesha"
@@ -75,7 +76,7 @@ manage_service (char *action)
while (sc_list[i].binary != NULL) {
ret = stat (sc_list[i].binary, &stbuf);
if (ret == 0) {
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"%s found.", sc_list[i].binary);
if (strcmp (sc_list[i].binary, "/usr/bin/systemctl") == 0)
ret = sc_systemctl_action (&sc_list[i], action);
@@ -86,7 +87,8 @@ manage_service (char *action)
}
i++;
}
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_UNRECOGNIZED_SVC_MNGR,
"Could not %s NFS-Ganesha.Service manager for distro"
" not recognized.", action);
return ret;
@@ -103,7 +105,7 @@ glusterd_check_ganesha_export (glusterd_volinfo_t *volinfo) {
ret = glusterd_volinfo_get (volinfo, "ganesha.enable", &value);
if ((ret == 0) && value) {
if (strcmp (value, "on") == 0) {
- gf_log (THIS->name, GF_LOG_DEBUG, "ganesha.enable set"
+ gf_msg_debug (THIS->name, 0, "ganesha.enable set"
" to %s", value);
is_exported = _gf_true;
}
@@ -132,7 +134,9 @@ glusterd_check_ganesha_cmd (char *key, char *value, char **errstr, dict_t *dict)
}
ret = glusterd_handle_ganesha_op (dict, errstr, key, value);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Handling NFS-Ganesha"
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_NFS_GNS_OP_HANDLE_FAIL,
+ "Handling NFS-Ganesha"
" op failed.");
}
}
@@ -164,7 +168,8 @@ glusterd_op_stage_set_ganesha (dict_t *dict, char **op_errstr)
value = dict_get_str_boolean (dict, "value", _gf_false);
if (value == -1) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
"value not present.");
goto out;
}
@@ -172,7 +177,8 @@ glusterd_op_stage_set_ganesha (dict_t *dict, char **op_errstr)
/*Ignoring the ret value and proceeding */
ret = dict_get_str (priv->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str);
if (ret == -1) {
- gf_log (this->name, GF_LOG_WARNING, "Global dict not present.");
+ gf_msg (this->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_GET_FAILED, "Global dict not present.");
ret = 0;
goto out;
}
@@ -188,7 +194,8 @@ glusterd_op_stage_set_ganesha (dict_t *dict, char **op_errstr)
if (value) {
ret = start_ganesha (op_errstr);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_NFS_GNS_START_FAIL,
"Could not start NFS-Ganesha");
}
@@ -199,11 +206,11 @@ out:
if (ret) {
if (!(*op_errstr)) {
*op_errstr = gf_strdup ("Error, Validation Failed");
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"Error, Cannot Validate option :%s",
GLUSTERD_STORE_KEY_GANESHA_GLOBAL);
} else {
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"Error, Cannot Validate option");
}
}
@@ -231,22 +238,25 @@ glusterd_op_set_ganesha (dict_t *dict, char **errstr)
ret = dict_get_str (dict, "key", &key);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Couldn't get key in global option set");
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
+ "Couldn't get key in global option set");
goto out;
}
ret = dict_get_str (dict, "value", &value);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
"Couldn't get value in global option set");
goto out;
}
ret = glusterd_handle_ganesha_op (dict, errstr, key, value);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Initial NFS-Ganesha set up failed");
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_NFS_GNS_SETUP_FAIL,
+ "Initial NFS-Ganesha set up failed");
ret = -1;
goto out;
}
@@ -254,14 +264,15 @@ glusterd_op_set_ganesha (dict_t *dict, char **errstr)
GLUSTERD_STORE_KEY_GANESHA_GLOBAL,
value);
if (ret) {
- gf_log (this->name, GF_LOG_WARNING, "Failed to set"
+ gf_msg (this->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_SET_FAILED, "Failed to set"
" nfs-ganesha in dict.");
goto out;
}
ret = glusterd_get_next_global_opt_version_str (priv->opts,
&next_version);
if (ret) {
- gf_log (THIS->name, GF_LOG_DEBUG, "Could not fetch "
+ gf_msg_debug (THIS->name, 0, "Could not fetch "
" global op version");
goto out;
}
@@ -272,13 +283,13 @@ glusterd_op_set_ganesha (dict_t *dict, char **errstr)
ret = glusterd_store_options (this, priv->opts);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Failed to store options");
- goto out;
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_STORE_FAIL, "Failed to store options");
+ goto out;
}
out:
- gf_log (this->name, GF_LOG_DEBUG, "returning %d", ret);
+ gf_msg_debug (this->name, 0, "returning %d", ret);
return ret;
}
@@ -307,7 +318,8 @@ is_ganesha_host (void)
fp = fopen (GANESHA_HA_CONF, "r");
if (fp == NULL) {
- gf_log (this->name, GF_LOG_INFO, "couldn't open the file %s",
+ gf_msg (this->name, GF_LOG_INFO, errno,
+ GD_MSG_FILE_OP_FAILED, "couldn't open the file %s",
GANESHA_HA_CONF);
return _gf_false;
}
@@ -326,7 +338,9 @@ is_ganesha_host (void)
ret = gf_is_local_addr (host_from_file);
if (ret) {
- gf_log (this->name, GF_LOG_INFO, "ganesha host found "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_NFS_GNS_HOST_FOUND,
+ "ganesha host found "
"Hostname is %s", host_from_file);
}
@@ -355,7 +369,8 @@ check_host_list (void)
fp = fopen (GANESHA_HA_CONF, "r");
if (fp == NULL) {
- gf_log (this->name, GF_LOG_INFO, "couldn't open the file %s",
+ gf_msg (this->name, GF_LOG_INFO, errno,
+ GD_MSG_FILE_OP_FAILED, "couldn't open the file %s",
GANESHA_HA_CONF);
return 0;
}
@@ -377,7 +392,9 @@ check_host_list (void)
while (hostname != NULL) {
ret = gf_is_local_addr (hostname);
if (ret) {
- gf_log (this->name, GF_LOG_INFO, "ganesha host found "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_NFS_GNS_HOST_FOUND,
+ "ganesha host found "
"Hostname is %s", hostname);
break;
}
@@ -435,19 +452,22 @@ ganesha_manage_export (dict_t *dict, char *value, char **op_errstr)
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
"Unable to get volume name");
goto out;
}
ret = gf_string2boolean (value, &option);
if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR, "invalid value.");
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INVALID_ENTRY, "invalid value.");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_VOL_NOT_FOUND,
FMTSTR_CHECK_VOL_EXISTS, volname);
goto out;
}
@@ -470,7 +490,7 @@ ganesha_manage_export (dict_t *dict, char *value, char **op_errstr)
ret = dict_get_str_boolean (priv->opts,
GLUSTERD_STORE_KEY_GANESHA_GLOBAL, _gf_false);
if (ret == -1) {
- gf_log (this->name, GF_LOG_DEBUG, "Failed to get "
+ gf_msg_debug (this->name, 0, "Failed to get "
"global option dict.");
gf_asprintf (op_errstr, "The option "
"nfs-ganesha should be "
@@ -489,7 +509,9 @@ ganesha_manage_export (dict_t *dict, char *value, char **op_errstr)
if (option) {
ret = create_export_config (volname, op_errstr);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to create"
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_EXPORT_FILE_CREATE_FAIL,
+ "Failed to create"
"export file for NFS-Ganesha\n");
goto out;
}
@@ -580,7 +602,7 @@ teardown (char **op_errstr)
"cleanup", CONFDIR, NULL);
ret = runner_run (&runner);
if (ret)
- gf_log (THIS->name, GF_LOG_DEBUG, "Could not clean up"
+ gf_msg_debug (THIS->name, 0, "Could not clean up"
" NFS-Ganesha related config");
cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
@@ -589,7 +611,8 @@ teardown (char **op_errstr)
unexported, hence setting the appropriate key */
ret = dict_set_str (vol_opts, "ganesha.enable", "off");
if (ret)
- gf_log (THIS->name, GF_LOG_WARNING,
+ gf_msg (THIS->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_SET_FAILED,
"Could not set ganesha.enable to off");
}
out:
@@ -670,8 +693,9 @@ pre_setup (char **op_errstr)
ret = mkdir (SHARED_STORAGE_MNT, 0775);
if ((-1 == ret) && (EEXIST != errno)) {
- gf_log ("THIS->name", GF_LOG_ERROR, "mkdir() failed on path %s,"
- "errno: %s", SHARED_STORAGE_MNT, strerror (errno));
+ gf_msg ("THIS->name", GF_LOG_ERROR, errno,
+ GD_MSG_CREATE_DIR_FAILED, "mkdir() failed on path %s,",
+ SHARED_STORAGE_MNT);
goto out;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index e8742c42f70..6fccad5cd9b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -693,7 +693,8 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
* not be held */
ret = dict_get_str (dict, "volname", &tmp);
if (ret) {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, errno,
+ GD_MSG_DICT_GET_FAILED,
"No Volume name present. "
"Locks not being held.");
goto local_locking_done;
@@ -1158,7 +1159,9 @@ __glusterd_handle_cli_probe (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received CLI probe req %s %d",
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_CLI_REQ_RECVD,
+ "Received CLI probe req %s %d",
hostname, port);
if (dict_get_str(this->options,"transport.socket.bind-address",
@@ -1269,7 +1272,9 @@ __glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
}
}
- gf_log ("glusterd", GF_LOG_INFO, "Received CLI deprobe req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_CLI_REQ_RECVD,
+ "Received CLI deprobe req");
ret = dict_get_str (dict, "hostname", &hostname);
if (ret) {
@@ -1388,7 +1393,9 @@ __glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received cli list req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_CLI_REQ_RECVD,
+ "Received cli list req");
if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
@@ -1447,7 +1454,9 @@ __glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received get vol req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_GET_VOL_REQ_RCVD,
+ "Received get vol req");
if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
@@ -1839,7 +1848,7 @@ __glusterd_handle_ganesha_cmd (rpcsvc_request_t *req)
}
}
- gf_log (this->name, GF_LOG_TRACE, "Received global option request");
+ gf_msg_trace (this->name, 0, "Received global option request");
ret = glusterd_op_begin_synctask (req, GD_OP_GANESHA, dict);
out:
@@ -2119,7 +2128,8 @@ __glusterd_handle_sync_volume (rpcsvc_request_t *req)
}
}
- gf_log (this->name, GF_LOG_INFO, "Received volume sync req "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_SYNC_REQ_RCVD, "Received volume sync req "
"for volume %s", (flags & GF_CLI_SYNC_ALL) ? "all" : volname);
if (gf_is_local_addr (hostname)) {
@@ -2511,7 +2521,8 @@ __glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_PROBE_RCVD,
"Received probe from uuid: %s", uuid_utoa (friend_req.uuid));
ret = glusterd_handle_friend_req (req, friend_req.uuid,
friend_req.hostname, friend_req.port,
@@ -2560,7 +2571,8 @@ __glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_UNFRIEND_REQ_RCVD,
"Received unfriend from uuid: %s", uuid_utoa (friend_req.uuid));
ret = glusterd_remote_hostname_get (req, remote_hostname,
@@ -2685,7 +2697,8 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_FRIEND_UPDATE_RCVD,
"Received friend update from uuid: %s", uuid_utoa (friend_req.uuid));
if (friend_req.friends.friends_len) {
@@ -2729,7 +2742,8 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
gf_uuid_parse (uuid_buf, uuid);
if (!gf_uuid_compare (uuid, MY_UUID)) {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_UUID_RECEIVED,
"Received my uuid as Friend");
i++;
continue;
@@ -2852,7 +2866,8 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req)
else
port = GF_DEFAULT_BASE_PORT;
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_PROBE_RCVD,
"Received probe from uuid: %s", uuid_utoa (probe_req.uuid));
/* Check for uuid collision and handle it in a user friendly way by
@@ -2885,7 +2900,9 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req)
rsp.op_ret = -1;
rsp.op_errno = GF_PROBE_ANOTHER_CLUSTER;
} else if (peerinfo == NULL) {
- gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo"
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_PEER_NOT_FOUND,
+ "Unable to find peerinfo"
" for host: %s (%d)", remote_hostname, port);
args.mode = GD_MODE_ON;
ret = glusterd_friend_add (remote_hostname, port,
@@ -2911,7 +2928,8 @@ respond:
(xdrproc_t)xdr_gd1_mgmt_probe_rsp);
ret = 0;
- gf_log ("glusterd", GF_LOG_INFO, "Responded to %s, op_ret: %d, "
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_RESPONSE_INFO, "Responded to %s, op_ret: %d, "
"op_errno: %d, ret: %d", remote_hostname,
rsp.op_ret, rsp.op_errno, ret);
@@ -2972,7 +2990,9 @@ __glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received volume profile req "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_PROFILE_REQ_RCVD,
+ "Received volume profile req "
"for volume %s", volname);
ret = dict_get_int32 (dict, "op", &op);
if (ret) {
@@ -3021,7 +3041,8 @@ __glusterd_handle_getwd (rpcsvc_request_t *req)
priv = THIS->private;
GF_ASSERT (priv);
- gf_log ("glusterd", GF_LOG_INFO, "Received getwd req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_GETWD_REQ_RCVD, "Received getwd req");
rsp.wd = priv->workdir;
@@ -3066,7 +3087,9 @@ __glusterd_handle_mount (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received mount req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_MOUNT_REQ_RCVD,
+ "Received mount req");
if (mnt_req.dict.dict_len) {
/* Unserialize the dictionary */
@@ -3150,7 +3173,9 @@ __glusterd_handle_umount (rpcsvc_request_t *req)
goto out;
}
- gf_log ("glusterd", GF_LOG_INFO, "Received umount req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_UMOUNT_REQ_RCVD,
+ "Received umount req");
if (dict_get_str (this->options, "mountbroker-root",
&mountbroker_root) != 0) {
@@ -3472,7 +3497,8 @@ glusterd_friend_add (const char *hoststr, int port,
}
out:
- gf_log (this->name, GF_LOG_INFO, "connect returned %d", ret);
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_CONNECT_RETURNED, "connect returned %d", ret);
return ret;
}
@@ -3521,7 +3547,9 @@ glusterd_friend_add_from_peerinfo (glusterd_peerinfo_t *friend,
}
out:
- gf_log (this->name, GF_LOG_INFO, "connect returned %d", ret);
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_CONNECT_RETURNED,
+ "connect returned %d", ret);
return ret;
}
@@ -3540,7 +3568,8 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
peerinfo = glusterd_peerinfo_find (NULL, hoststr);
if (peerinfo == NULL) {
- gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo"
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_PEER_NOT_FOUND, "Unable to find peerinfo"
" for host: %s (%d)", hoststr, port);
args.mode = GD_MODE_ON;
args.req = req;
@@ -3604,7 +3633,8 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
peerinfo = glusterd_peerinfo_find (uuid, hoststr);
if (peerinfo == NULL) {
ret = -1;
- gf_log ("glusterd", GF_LOG_INFO, "Unable to find peerinfo"
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_PEER_NOT_FOUND, "Unable to find peerinfo"
" for host: %s %d", hoststr, port);
goto out;
}
@@ -3685,7 +3715,8 @@ glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int por
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
(xdrproc_t)xdr_gd1_mgmt_friend_rsp);
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_RESPONSE_INFO,
"Responded to %s (%d), ret: %d", hostname, port, ret);
return ret;
}
@@ -3717,7 +3748,8 @@ glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *myhostname,
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
(xdrproc_t)xdr_gd1_mgmt_friend_rsp);
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_RESPONSE_INFO,
"Responded to %s (%d), ret: %d", remote_hostname, port, ret);
GF_FREE (rsp.hostname);
return ret;
@@ -4203,7 +4235,8 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
GD_MSG_VOL_NOT_FOUND, "%s", err_str);
goto out;
}
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_STATUS_VOL_REQ_RCVD,
"Received status volume req for volume %s", volname);
}
@@ -4326,7 +4359,8 @@ __glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received clear-locks volume req "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_CLRCLK_VOL_REQ_RCVD, "Received clear-locks volume req "
"for volume %s", volname);
ret = glusterd_op_begin_synctask (req, GD_OP_CLEARLOCKS_VOLUME, dict);
@@ -4448,7 +4482,9 @@ __glusterd_handle_barrier (rpcsvc_request_t *req)
"dict");
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received barrier volume request for "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_BARRIER_VOL_REQ_RCVD,
+ "Received barrier volume request for "
"volume %s", volname);
ret = glusterd_op_begin_synctask (req, GD_OP_BARRIER, dict);
@@ -4803,7 +4839,8 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
* need to stop the brick
*/
if (brickinfo->snap_status == -1) {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_SNAPSHOT_PENDING,
"Snapshot is pending on %s:%s. "
"Hence not starting the brick",
brickinfo->hostname,
diff --git a/xlators/mgmt/glusterd/src/glusterd-hooks.c b/xlators/mgmt/glusterd/src/glusterd-hooks.c
index e3c2ee0ad78..48a9bc4f79b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-hooks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-hooks.c
@@ -23,6 +23,7 @@
#include "glusterd-utils.h"
#include "glusterd-store.h"
#include "glusterd-hooks.h"
+#include "glusterd-messages.h"
#include <fnmatch.h>
@@ -84,16 +85,18 @@ glusterd_hooks_create_hooks_directory (char *basedir)
snprintf (path, sizeof (path), "%s/hooks", basedir);
ret = mkdir_p (path, 0777, _gf_true);
if (ret) {
- gf_log (THIS->name, GF_LOG_CRITICAL, "Unable to create %s due"
- "to %s", path, strerror (errno));
+ gf_msg (THIS->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_CREATE_DIR_FAILED, "Unable to create %s",
+ path);
goto out;
}
GLUSTERD_GET_HOOKS_DIR (version_dir, GLUSTERD_HOOK_VER, priv);
ret = mkdir_p (version_dir, 0777, _gf_true);
if (ret) {
- gf_log (THIS->name, GF_LOG_CRITICAL, "Unable to create %s due "
- "to %s", version_dir, strerror (errno));
+ gf_msg (THIS->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_CREATE_DIR_FAILED, "Unable to create %s",
+ version_dir);
goto out;
}
@@ -106,9 +109,10 @@ glusterd_hooks_create_hooks_directory (char *basedir)
cmd_subdir);
ret = mkdir_p (path, 0777, _gf_true);
if (ret) {
- gf_log (THIS->name, GF_LOG_CRITICAL,
- "Unable to create %s due to %s",
- path, strerror (errno));
+ gf_msg (THIS->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_CREATE_DIR_FAILED,
+ "Unable to create %s",
+ path);
goto out;
}
@@ -118,9 +122,10 @@ glusterd_hooks_create_hooks_directory (char *basedir)
version_dir, cmd_subdir, type_subdir[type]);
ret = mkdir_p (path, 0777, _gf_true);
if (ret) {
- gf_log (THIS->name, GF_LOG_CRITICAL,
- "Unable to create %s due to %s",
- path, strerror (errno));
+ gf_msg (THIS->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_CREATE_DIR_FAILED,
+ "Unable to create %s",
+ path);
goto out;
}
}
@@ -171,10 +176,10 @@ glusterd_hooks_add_custom_args (dict_t *dict, runner_t *runner)
ret = dict_get_str (dict, "hooks_args", &hooks_args);
if (ret)
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"No Hooks Arguments.");
else
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"Hooks Args = %s", hooks_args);
if (hooks_args)
@@ -332,7 +337,8 @@ glusterd_hooks_run_hooks (char *hooks_path, glusterd_op_t op, dict_t *op_ctx,
ret = dict_get_str (op_ctx, "volname", &volname);
if (ret) {
- gf_log (this->name, GF_LOG_CRITICAL, "Failed to get volname "
+ gf_msg (this->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_DICT_GET_FAILED, "Failed to get volname "
"from operation context");
goto out;
}
@@ -340,8 +346,10 @@ glusterd_hooks_run_hooks (char *hooks_path, glusterd_op_t op, dict_t *op_ctx,
hookdir = opendir (hooks_path);
if (!hookdir) {
ret = -1;
- gf_log (this->name, GF_LOG_ERROR, "Failed to open dir %s, due "
- "to %s", hooks_path, strerror (errno));
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DIR_OP_FAILED,
+ "Failed to open dir %s",
+ hooks_path);
goto out;
}
@@ -385,7 +393,8 @@ glusterd_hooks_run_hooks (char *hooks_path, glusterd_op_t op, dict_t *op_ctx,
runner_argprintf (&runner, "--volname=%s", volname);
ret = glusterd_hooks_add_op_args (&runner, op, op_ctx, type);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to add "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_ADD_OP_ARGS_FAIL, "Failed to add "
"command specific arguments");
goto out;
}
@@ -475,7 +484,8 @@ glusterd_hooks_stub_init (glusterd_hooks_stub_t **stub, char *scriptdir,
ret = 0;
out:
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to initialize "
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_POST_HOOK_STUB_INIT_FAIL, "Failed to initialize "
"post hooks stub");
glusterd_hooks_stub_cleanup (hooks_stub);
}
@@ -487,7 +497,8 @@ void
glusterd_hooks_stub_cleanup (glusterd_hooks_stub_t *stub)
{
if (!stub) {
- gf_log_callingfn (THIS->name, GF_LOG_WARNING,
+ gf_msg_callingfn (THIS->name, GF_LOG_WARNING, 0,
+ GD_MSG_HOOK_STUB_NULL,
"hooks_stub is NULL");
return;
}
@@ -577,7 +588,8 @@ glusterd_hooks_spawn_worker (xlator_t *this)
ret = pthread_create (&hooks_priv->worker, NULL, hooks_worker,
(void *)this);
if (ret)
- gf_log (this->name, GF_LOG_CRITICAL, "Failed to spawn post "
+ gf_msg (this->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_SPAWN_THREADS_FAIL, "Failed to spawn post "
"hooks worker thread");
out:
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-log-ops.c b/xlators/mgmt/glusterd/src/glusterd-log-ops.c
index 561d354b72c..938a066e9a0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-log-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-log-ops.c
@@ -67,7 +67,9 @@ __glusterd_handle_log_rotate (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received log rotate req "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_LOG_ROTATE_REQ_RECVD,
+ "Received log rotate req "
"for volume %s", volname);
ret = dict_set_uint64 (dict, "rotate-key", (uint64_t)time (NULL));
diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h
index 6cb1a09afe1..cd2c6f8e818 100644
--- a/xlators/mgmt/glusterd/src/glusterd-messages.h
+++ b/xlators/mgmt/glusterd/src/glusterd-messages.h
@@ -40,7 +40,7 @@
*/
#define GLUSTERD_COMP_BASE GLFS_MSGID_GLUSTERD
-#define GLFS_NUM_MESSAGES 459
+#define GLFS_NUM_MESSAGES 563
#define GLFS_MSGID_END (GLUSTERD_COMP_BASE + GLFS_NUM_MESSAGES + 1)
/* Messaged with message IDs */
#define glfs_msg_start_x GLFS_COMP_BASE, "Invalid: Start of messages"
@@ -3723,7 +3723,833 @@
* @recommendedaction
*
*/
+#define GD_MSG_BITROT_NOT_RUNNING (GLUSTERD_COMP_BASE + 460)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_SCRUBBER_NOT_RUNNING (GLUSTERD_COMP_BASE + 461)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_SRC_BRICK_PORT_UNAVAIL (GLUSTERD_COMP_BASE + 462)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_BITD_INIT_FAIL (GLUSTERD_COMP_BASE + 463)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_SCRUB_INIT_FAIL (GLUSTERD_COMP_BASE + 464)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_VAR_RUN_DIR_INIT_FAIL (GLUSTERD_COMP_BASE + 465)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_VAR_RUN_DIR_FIND_FAIL (GLUSTERD_COMP_BASE + 466)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_SCRUBSVC_RECONF_FAIL (GLUSTERD_COMP_BASE + 467)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_BITDSVC_RECONF_FAIL (GLUSTERD_COMP_BASE + 468)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_NFS_GNS_START_FAIL (GLUSTERD_COMP_BASE + 469)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_NFS_GNS_SETUP_FAIL (GLUSTERD_COMP_BASE + 470)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_UNRECOGNIZED_SVC_MNGR (GLUSTERD_COMP_BASE + 471)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_NFS_GNS_OP_HANDLE_FAIL (GLUSTERD_COMP_BASE + 472)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_EXPORT_FILE_CREATE_FAIL (GLUSTERD_COMP_BASE + 473)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_NFS_GNS_HOST_FOUND (GLUSTERD_COMP_BASE + 474)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_REBALANCE_CMD_IN_TIER_VOL (GLUSTERD_COMP_BASE + 475)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_INCOMPATIBLE_VALUE (GLUSTERD_COMP_BASE + 476)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_GENERATED_UUID (GLUSTERD_COMP_BASE + 477)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_FILE_DESC_LIMIT_SET (GLUSTERD_COMP_BASE + 478)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_CURR_WORK_DIR_INFO (GLUSTERD_COMP_BASE + 479)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_STRIPE_COUNT_CHANGE_INFO (GLUSTERD_COMP_BASE + 480)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_REPLICA_COUNT_CHANGE_INFO (GLUSTERD_COMP_BASE + 481)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_ADD_BRICK_REQ_RECVD (GLUSTERD_COMP_BASE + 482)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_VOL_ALREADY_TIER (GLUSTERD_COMP_BASE + 483)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_REM_BRICK_REQ_RECVD (GLUSTERD_COMP_BASE + 484)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_VOL_NOT_TIER (GLUSTERD_COMP_BASE + 485)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_LOG_ROTATE_REQ_RECVD (GLUSTERD_COMP_BASE + 486)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_CLI_REQ_RECVD (GLUSTERD_COMP_BASE + 487)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_GET_VOL_REQ_RCVD (GLUSTERD_COMP_BASE + 488)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_VOL_SYNC_REQ_RCVD (GLUSTERD_COMP_BASE + 489)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_PROBE_RCVD (GLUSTERD_COMP_BASE + 490)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_UNFRIEND_REQ_RCVD (GLUSTERD_COMP_BASE + 491)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_FRIEND_UPDATE_RCVD (GLUSTERD_COMP_BASE + 492)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_RESPONSE_INFO (GLUSTERD_COMP_BASE + 493)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_VOL_PROFILE_REQ_RCVD (GLUSTERD_COMP_BASE + 494)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_GETWD_REQ_RCVD (GLUSTERD_COMP_BASE + 495)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_MOUNT_REQ_RCVD (GLUSTERD_COMP_BASE + 496)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_UMOUNT_REQ_RCVD (GLUSTERD_COMP_BASE + 497)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_CONNECT_RETURNED (GLUSTERD_COMP_BASE + 498)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_STATUS_VOL_REQ_RCVD (GLUSTERD_COMP_BASE + 499)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_CLRCLK_VOL_REQ_RCVD (GLUSTERD_COMP_BASE + 500)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_BARRIER_VOL_REQ_RCVD (GLUSTERD_COMP_BASE + 501)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_UUID_RECEIVED (GLUSTERD_COMP_BASE + 502)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_REPLACE_BRK_COMMIT_FORCE_REQ_RCVD (GLUSTERD_COMP_BASE + 503)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_BRK_PORT_NO_ADD_INDO (GLUSTERD_COMP_BASE + 504)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_REPLACE_BRK_REQ_RCVD (GLUSTERD_COMP_BASE + 505)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_ADD_OP_ARGS_FAIL (GLUSTERD_COMP_BASE + 506)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_POST_HOOK_STUB_INIT_FAIL (GLUSTERD_COMP_BASE + 507)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_HOOK_STUB_NULL (GLUSTERD_COMP_BASE + 508)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_SPAWN_THREADS_FAIL (GLUSTERD_COMP_BASE + 509)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_STALE_VOL_DELETE_INFO (GLUSTERD_COMP_BASE + 510)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_PROBE_REQ_RESP_RCVD (GLUSTERD_COMP_BASE + 511)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_HOST_PRESENT_ALREADY (GLUSTERD_COMP_BASE + 512)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_OP_VERS_INFO (GLUSTERD_COMP_BASE + 513)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_OP_VERS_SET_INFO (GLUSTERD_COMP_BASE + 514)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_NEW_NODE_STATE_CREATION (GLUSTERD_COMP_BASE + 515)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_ALREADY_MOUNTED (GLUSTERD_COMP_BASE + 516)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_SHARED_STRG_VOL_OPT_VALIDATE_FAIL (GLUSTERD_COMP_BASE + 517)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_NFS_GNS_STOP_FAIL (GLUSTERD_COMP_BASE + 518)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_NFS_GNS_RESET_FAIL (GLUSTERD_COMP_BASE + 519)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_SHARED_STRG_SET_FAIL (GLUSTERD_COMP_BASE + 520)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_VOL_TRANSPORT_TYPE_CHANGE (GLUSTERD_COMP_BASE + 521)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_PEER_COUNT_GET_FAIL (GLUSTERD_COMP_BASE + 522)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_INSUFFICIENT_UP_NODES (GLUSTERD_COMP_BASE + 523)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_OP_STAGE_STATS_VOL_FAIL (GLUSTERD_COMP_BASE + 524)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_VOL_ID_SET_FAIL (GLUSTERD_COMP_BASE + 525)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_OP_STAGE_RESET_VOL_FAIL (GLUSTERD_COMP_BASE + 526)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_OP_STAGE_BITROT_FAIL (GLUSTERD_COMP_BASE + 527)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_OP_STAGE_QUOTA_FAIL (GLUSTERD_COMP_BASE + 528)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_OP_STAGE_DELETE_VOL_FAIL (GLUSTERD_COMP_BASE + 529)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_HANDLE_HEAL_CMD_FAIL (GLUSTERD_COMP_BASE + 530)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_CLRCLK_SND_CMD_FAIL (GLUSTERD_COMP_BASE + 531)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_DISPERSE_CLUSTER_FOUND (GLUSTERD_COMP_BASE + 532)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_HEAL_VOL_REQ_RCVD (GLUSTERD_COMP_BASE + 533)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_STATEDUMP_VOL_REQ_RCVD (GLUSTERD_COMP_BASE + 534)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_THINPOOLS_FOR_THINLVS (GLUSTERD_COMP_BASE + 535)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_OP_STAGE_CREATE_VOL_FAIL (GLUSTERD_COMP_BASE + 536)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_OP_STAGE_START_VOL_FAIL (GLUSTERD_COMP_BASE + 537)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_NFS_GNS_UNEXPRT_VOL_FAIL (GLUSTERD_COMP_BASE + 538)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_TASK_ID_INFO (GLUSTERD_COMP_BASE + 539)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_DEREGISTER_SUCCESS (GLUSTERD_COMP_BASE + 540)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_STATEDUMP_OPTS_RCVD (GLUSTERD_COMP_BASE + 541)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_STATEDUMP_INFO (GLUSTERD_COMP_BASE + 542)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_RECOVERING_CORRUPT_CONF (GLUSTERD_COMP_BASE + 543)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_RETRIEVED_UUID (GLUSTERD_COMP_BASE + 544)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_XLATOR_CREATE_FAIL (GLUSTERD_COMP_BASE + 545)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_GRAPH_ENTRY_ADD_FAIL (GLUSTERD_COMP_BASE + 546)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_ERROR_ENCOUNTERED (GLUSTERD_COMP_BASE + 547)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_FILTER_RUN_FAILED (GLUSTERD_COMP_BASE + 548)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_DEFAULT_OPT_INFO (GLUSTERD_COMP_BASE + 549)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_MARKER_STATUS_GET_FAIL (GLUSTERD_COMP_BASE + 550)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_MARKER_DISABLE_FAIL (GLUSTERD_COMP_BASE + 551)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_GRAPH_FEATURE_ADD_FAIL (GLUSTERD_COMP_BASE + 552)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_XLATOR_SET_OPT_FAIL (GLUSTERD_COMP_BASE + 553)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_BUILD_GRAPH_FAILED (GLUSTERD_COMP_BASE + 554)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_XML_TEXT_WRITE_FAIL (GLUSTERD_COMP_BASE + 555)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_XML_DOC_START_FAIL (GLUSTERD_COMP_BASE + 556)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_XML_ELE_CREATE_FAIL (GLUSTERD_COMP_BASE + 557)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_VOLUME_INCONSISTENCY (GLUSTERD_COMP_BASE + 558)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_XLATOR_LINK_FAIL (GLUSTERD_COMP_BASE + 559)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_REMOTE_HOST_GET_FAIL (GLUSTERD_COMP_BASE + 560)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_GRAPH_SET_OPT_FAIL (GLUSTERD_COMP_BASE + 561)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_ROOT_SQUASH_ENABLED (GLUSTERD_COMP_BASE + 562)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendedaction
+ *
+ */
+#define GD_MSG_ROOT_SQUASH_FAILED (GLUSTERD_COMP_BASE + 563)
+
/*------------*/
#define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages"
-
#endif /* !_GLUSTERD_MESSAGES_H_ */
+
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index a7720c47713..bb1ecbb7df3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -231,7 +231,8 @@ glusterd_get_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo)
GF_ASSERT (priv);
if (!txn_id || !opinfo) {
- gf_log_callingfn (this->name, GF_LOG_ERROR,
+ gf_msg_callingfn (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_TRANS_ID_GET_FAIL,
"Empty transaction id or opinfo received.");
ret = -1;
goto out;
@@ -241,7 +242,8 @@ glusterd_get_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo)
uuid_utoa (*txn_id),
(void **) &opinfo_obj);
if (ret) {
- gf_log_callingfn (this->name, GF_LOG_ERROR,
+ gf_msg_callingfn (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
"Unable to get transaction opinfo "
"for transaction ID : %s",
uuid_utoa (*txn_id));
@@ -274,7 +276,8 @@ glusterd_set_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo)
GF_ASSERT (priv);
if (!txn_id) {
- gf_log_callingfn (this->name, GF_LOG_ERROR,
+ gf_msg_callingfn (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_TRANS_ID_GET_FAIL,
"Empty transaction id received.");
ret = -1;
goto out;
@@ -295,7 +298,8 @@ glusterd_set_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo)
uuid_utoa (*txn_id), opinfo_obj,
sizeof(glusterd_txn_opinfo_obj));
if (ret) {
- gf_log_callingfn (this->name, GF_LOG_ERROR,
+ gf_msg_callingfn (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED,
"Unable to set opinfo for transaction"
" ID : %s", uuid_utoa (*txn_id));
goto out;
@@ -780,7 +784,8 @@ glusterd_validate_shared_storage (char *key, char *value, char *errstr)
snprintf (errstr, PATH_MAX,
"Invalid option(%s). Valid options "
"are 'enable' and 'disable'", value);
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INVALID_ENTRY, "%s", errstr);
ret = -1;
goto out;
}
@@ -794,7 +799,8 @@ glusterd_validate_shared_storage (char *key, char *value, char *errstr)
snprintf (errstr, PATH_MAX,
"Shared storage volume("GLUSTER_SHARED_STORAGE
") already exists.");
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOL_ALREADY_EXIST, "%s", errstr);
ret = -1;
goto out;
}
@@ -803,7 +809,8 @@ glusterd_validate_shared_storage (char *key, char *value, char *errstr)
if (ret) {
snprintf (errstr, PATH_MAX,
"Failed to calculate number of connected peers.");
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_PEER_COUNT_GET_FAIL, "%s", errstr);
goto out;
}
@@ -811,7 +818,8 @@ glusterd_validate_shared_storage (char *key, char *value, char *errstr)
snprintf (errstr, PATH_MAX,
"More than one node should "
"be up/present in the cluster to enable this option");
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_INSUFFICIENT_UP_NODES, "%s", errstr);
ret = -1;
goto out;
}
@@ -1164,7 +1172,8 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
ret = glusterd_validate_shared_storage (key, value, errstr);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SHARED_STRG_VOL_OPT_VALIDATE_FAIL,
"Failed to validate shared "
"storage volume options");
goto out;
@@ -1429,7 +1438,8 @@ out:
GF_FREE (key_fixed);
if (msg[0] != '\0') {
- gf_log (this->name, GF_LOG_ERROR, "%s", msg);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_OP_STAGE_RESET_VOL_FAIL, "%s", msg);
*op_errstr = gf_strdup (msg);
}
@@ -1758,7 +1768,8 @@ glusterd_op_stage_stats_volume (dict_t *dict, char **op_errstr)
ret = 0;
out:
if (msg[0] != '\0') {
- gf_log ("glusterd", GF_LOG_ERROR, "%s", msg);
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_OP_STAGE_STATS_VOL_FAIL, "%s", msg);
*op_errstr = gf_strdup (msg);
}
gf_msg_debug ("glusterd", 0, "Returning %d", ret);
@@ -1936,11 +1947,13 @@ glusterd_op_reset_all_volume_options (xlator_t *this, dict_t *dict)
if (option) {
ret = tear_down_cluster();
if (ret == -1)
- gf_log (THIS->name, GF_LOG_WARNING,
+ gf_msg (THIS->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_GET_FAILED,
"Could not tear down NFS-Ganesha cluster");
ret = stop_ganesha (&op_errstr);
if (ret)
- gf_log (THIS->name, GF_LOG_WARNING,
+ gf_msg (THIS->name, GF_LOG_WARNING, 0,
+ GD_MSG_NFS_GNS_STOP_FAIL,
"Could not stop NFS-Ganesha service");
}
@@ -2055,7 +2068,8 @@ glusterd_op_reset_volume (dict_t *dict, char **op_rspstr)
if (ret) {
ret = ganesha_manage_export (dict, "off", op_rspstr);
if (ret) {
- gf_log (THIS->name, GF_LOG_WARNING,
+ gf_msg (THIS->name, GF_LOG_WARNING, 0,
+ GD_MSG_NFS_GNS_RESET_FAIL,
"Could not reset ganesha.enable key");
ret = 0;
}
@@ -2166,7 +2180,8 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict,
ret = glusterd_set_shared_storage (dict, key, value, op_errstr);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SHARED_STRG_SET_FAIL,
"Failed to set shared storage option");
goto out;
}
@@ -2283,7 +2298,8 @@ glusterd_set_shared_storage (dict_t *dict, char *key, char *value,
"storage brick(%s). "
"Reason: %s", ss_brick_path,
strerror (errno));
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DIR_OP_FAILED, "%s", errstr);
ret = -1;
goto out;
}
@@ -2295,7 +2311,8 @@ glusterd_set_shared_storage (dict_t *dict, char *key, char *value,
"storage brick(%s). "
"Reason: %s", ss_brick_path,
strerror (errno));
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_CREATE_DIR_FAILED, "%s", errstr);
goto out;
}
@@ -2311,7 +2328,8 @@ glusterd_set_shared_storage (dict_t *dict, char *key, char *value,
ret = dict_set_dynstr_with_alloc (dict, "hooks_args", hooks_args);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set"
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "Failed to set"
" hooks_args in dict.");
goto out;
}
@@ -2433,7 +2451,8 @@ glusterd_op_set_volume (dict_t *dict, char **errstr)
}
if (strcmp (key, "config.transport") == 0) {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_TRANSPORT_TYPE_CHANGE,
"changing transport-type for volume %s to %s",
volname, value);
ret = 0;
@@ -3586,7 +3605,8 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
ret = glusterd_mgmt_v3_lock (globalname, lock_ctx->uuid,
&op_errno, "global");
if (ret)
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_MGMTV3_LOCK_GET_FAIL,
"Unable to acquire lock for %s",
globalname);
@@ -3649,7 +3669,8 @@ glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
ret = glusterd_mgmt_v3_unlock (globalname, lock_ctx->uuid,
"global");
if (ret)
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_MGMTV3_UNLOCK_FAIL,
"Unable to release lock for %s",
globalname);
@@ -3741,7 +3762,8 @@ glusterd_dict_set_volid (dict_t *dict, char *volname, char **op_errstr)
}
out:
if (msg[0] != '\0') {
- gf_log (this->name, GF_LOG_ERROR, "%s", msg);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOL_ID_SET_FAIL, "%s", msg);
*op_errstr = gf_strdup (msg);
}
return ret;
@@ -3981,7 +4003,8 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx)
if (do_common) {
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (this->name, GF_LOG_CRITICAL,
+ gf_msg (this->name, GF_LOG_CRITICAL, -ret,
+ GD_MSG_DICT_GET_FAILED,
"volname is not present in "
"operation ctx");
goto out;
@@ -5637,14 +5660,15 @@ glusterd_bricks_select_remove_brick (dict_t *dict, char **op_errstr,
ret = dict_get_int32 (dict, "count", &count);
if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, -ret,
GD_MSG_DICT_GET_FAILED, "Unable to get count");
goto out;
}
ret = dict_get_int32 (dict, "command", &command);
if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR, "Unable to get command");
+ gf_msg ("glusterd", GF_LOG_ERROR, -ret,
+ GD_MSG_DICT_GET_FAILED, "Unable to get command");
goto out;
}
@@ -6551,7 +6575,8 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
ret = 0;
} else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
if (!priv->bitd_svc.online) {
- gf_log (this->name, GF_LOG_ERROR, "Bitrot is not "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BITROT_NOT_RUNNING, "Bitrot is not "
"running");
ret = -1;
goto out;
@@ -6570,7 +6595,8 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
ret = 0;
} else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
if (!priv->scrub_svc.online) {
- gf_log (this->name, GF_LOG_ERROR, "Scrubber is not "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SCRUBBER_NOT_RUNNING, "Scrubber is not "
"running");
ret = -1;
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
index 51003fb44eb..607ad3d38be 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
@@ -990,7 +990,7 @@ glusterd_peerinfo_find_by_generation (uint32_t generation) {
cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
if (entry->generation == generation) {
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"Friend found... state: %s",
glusterd_friend_sm_state_name_get (entry->state.state));
found = entry; /* Probably should be rcu_dereferenced */
@@ -1000,7 +1000,7 @@ glusterd_peerinfo_find_by_generation (uint32_t generation) {
rcu_read_unlock ();
if (!found)
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"Friend with generation: %"PRIu32", not found",
generation);
return found;
diff --git a/xlators/mgmt/glusterd/src/glusterd-quota.c b/xlators/mgmt/glusterd/src/glusterd-quota.c
index 6332fb774c0..074c767654b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-quota.c
+++ b/xlators/mgmt/glusterd/src/glusterd-quota.c
@@ -400,7 +400,9 @@ glusterd_inode_quota_enable (glusterd_volinfo_t *volinfo, char **op_errstr,
ret = dict_set_dynstr_with_alloc (volinfo->dict,
VKEY_FEATURES_INODE_QUOTA, "on");
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "dict set failed");
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED,
+ "dict set failed");
goto out;
}
@@ -466,7 +468,8 @@ glusterd_quota_enable (glusterd_volinfo_t *volinfo, char **op_errstr,
"features.quota-deem-statfs",
"on");
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "setting quota-deem-statfs"
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "setting quota-deem-statfs"
"in volinfo failed");
goto out;
}
@@ -838,21 +841,25 @@ out:
} else if (!ret) {
ret = gf_store_rename_tmppath (volinfo->quota_conf_shandle);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to rename "
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_FILE_OP_FAILED,
+ "Failed to rename "
"quota conf file");
return ret;
}
ret = glusterd_compute_cksum (volinfo, _gf_true);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CKSUM_COMPUTE_FAIL, "Failed to "
"compute cksum for quota conf file");
return ret;
}
ret = glusterd_store_save_quota_version_and_cksum (volinfo);
if (ret)
- gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_QUOTA_CKSUM_VER_STORE_FAIL, "Failed to "
"store quota version and cksum");
}
@@ -1823,7 +1830,8 @@ glusterd_op_stage_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
out:
if (ret && op_errstr && *op_errstr)
- gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_OP_STAGE_QUOTA_FAIL, "%s", *op_errstr);
gf_msg_debug (this->name, 0, "Returning %d", ret);
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index ef003c2bb8d..03cc53c6cce 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -422,7 +422,9 @@ glusterd_rebalance_cmd_validate (int cmd, char *volname,
ret = glusterd_disallow_op_for_tier (*volinfo, GD_OP_REBALANCE, cmd);
if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR, "Received rebalance command "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_REBALANCE_CMD_IN_TIER_VOL,
+ "Received rebalance command "
"on Tier volume %s", volname);
snprintf (op_errstr, len, "Rebalance operations are not "
"supported on a tiered volume");
@@ -548,13 +550,16 @@ glusterd_brick_validation (dict_t *dict, char *key, data_t *value,
ret = glusterd_volume_brickinfo_get_by_brick (value->data, volinfo,
&brickinfo);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Incorrect brick %s for "
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_BRICK_NOT_FOUND,
+ "Incorrect brick %s for "
"volume %s", value->data, volinfo->volname);
return ret;
}
if (!brickinfo->decommissioned) {
- gf_log (this->name, GF_LOG_ERROR, "Incorrect brick %s for "
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_BRICK_NOT_FOUND, "Incorrect brick %s for "
"volume %s", value->data, volinfo->volname);
ret = -1;
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
index 1dbd82c4364..c4533a13946 100644
--- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
+++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
@@ -135,7 +135,9 @@ __glusterd_handle_replace_brick (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received replace brick req");
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_REPLACE_BRK_REQ_RCVD,
+ "Received replace brick req");
if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
@@ -192,7 +194,9 @@ __glusterd_handle_replace_brick (rpcsvc_request_t *req)
}
gf_msg_debug (this->name, 0, "dst brick=%s", dst_brick);
- gf_log (this->name, GF_LOG_INFO, "Received replace brick commit-force "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_REPLACE_BRK_COMMIT_FORCE_REQ_RCVD,
+ "Received replace brick commit-force "
"request operation");
ret = glusterd_mgmt_v3_initiate_replace_brick_cmd_phases (req,
@@ -387,7 +391,7 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
ret = dict_set_int32 (rsp_dict, "src-brick-port",
src_brickinfo->port);
if (ret) {
- gf_msg_debug ("", 0,
+ gf_msg_debug (this->name, 0,
"Could not set src-brick-port=%d",
src_brickinfo->port);
}
@@ -426,7 +430,8 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
if (ret) {
*op_errstr = gf_strdup (msg);
ret = -1;
- gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRICK_VALIDATE_FAIL, "%s", *op_errstr);
goto out;
}
@@ -562,7 +567,8 @@ rb_update_srcbrick_port (glusterd_volinfo_t *volinfo,
src_brickinfo->port = src_port;
if (gf_is_local_addr (src_brickinfo->hostname)) {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_BRK_PORT_NO_ADD_INDO,
"adding src-brick port no");
if (volinfo->transport_type == GF_TRANSPORT_RDMA) {
@@ -575,7 +581,8 @@ rb_update_srcbrick_port (glusterd_volinfo_t *volinfo,
src_brickinfo->port = pmap_registry_search (this,
brickname, GF_PMAP_PORT_BRICKSERVER);
if (!src_brickinfo->port) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SRC_BRICK_PORT_UNAVAIL,
"Src brick port not available");
ret = -1;
goto out;
@@ -585,7 +592,7 @@ rb_update_srcbrick_port (glusterd_volinfo_t *volinfo,
ret = dict_set_int32 (rsp_dict, "src-brick-port",
src_brickinfo->port);
if (ret) {
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"Could not set src-brick port no");
goto out;
}
@@ -595,7 +602,7 @@ rb_update_srcbrick_port (glusterd_volinfo_t *volinfo,
ret = dict_set_int32 (req_dict, "src-brick-port",
src_brickinfo->port);
if (ret) {
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"Could not set src-brick port no");
goto out;
}
@@ -621,14 +628,15 @@ rb_update_dstbrick_port (glusterd_brickinfo_t *dst_brickinfo, dict_t *rsp_dict,
dst_brickinfo->port = dst_port;
if (gf_is_local_addr (dst_brickinfo->hostname)) {
- gf_log ("", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_BRK_PORT_NO_ADD_INDO,
"adding dst-brick port no");
if (rsp_dict) {
ret = dict_set_int32 (rsp_dict, "dst-brick-port",
dst_brickinfo->port);
if (ret) {
- gf_log ("", GF_LOG_DEBUG,
+ gf_msg_debug ("glusterd", 0,
"Could not set dst-brick port no in rsp dict");
goto out;
}
@@ -638,7 +646,7 @@ rb_update_dstbrick_port (glusterd_brickinfo_t *dst_brickinfo, dict_t *rsp_dict,
ret = dict_set_int32 (req_dict, "dst-brick-port",
dst_brickinfo->port);
if (ret) {
- gf_log ("", GF_LOG_DEBUG,
+ gf_msg_debug ("glusterd", 0,
"Could not set dst-brick port no");
goto out;
}
@@ -732,7 +740,7 @@ glusterd_op_perform_replace_brick (glusterd_volinfo_t *volinfo,
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -837,7 +845,7 @@ glusterd_op_replace_brick (dict_t *dict, dict_t *rsp_dict)
}
if (gf_is_local_addr (dst_brickinfo->hostname)) {
- gf_log (this->name, GF_LOG_DEBUG, "I AM THE DESTINATION HOST");
+ gf_msg_debug (this->name, 0, "I AM THE DESTINATION HOST");
ret = rb_kill_destination_brick (volinfo, dst_brickinfo);
if (ret) {
gf_msg (this->name, GF_LOG_CRITICAL, 0,
@@ -849,7 +857,8 @@ glusterd_op_replace_brick (dict_t *dict, dict_t *rsp_dict)
ret = glusterd_svcs_stop (volinfo);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_NFS_SERVER_STOP_FAIL,
"Unable to stop nfs server, ret: %d", ret);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 2a0fba23496..00389feb5d8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -243,7 +243,8 @@ __glusterd_probe_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_PROBE_REQ_RESP_RCVD,
"Received probe resp from uuid: %s, host: %s",
uuid_utoa (rsp.uuid), rsp.hostname);
if (rsp.op_ret != 0) {
@@ -270,7 +271,8 @@ __glusterd_probe_cbk (struct rpc_req *req, struct iovec *iov,
peerinfo = glusterd_peerinfo_find (rsp.uuid, rsp.hostname);
if (peerinfo == NULL) {
ret = -1;
- gf_log (this->name, GF_LOG_ERROR, "Could not find peerd %s(%s)",
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_PEER_NOT_FOUND, "Could not find peerd %s(%s)",
rsp.hostname, uuid_utoa (rsp.uuid));
goto unlock;
}
@@ -353,7 +355,8 @@ reply:
goto unlock;
} else if (strncasecmp (rsp.hostname, peerinfo->hostname, 1024)) {
- gf_log (THIS->name, GF_LOG_INFO, "Host: %s with uuid: %s "
+ gf_msg (THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_HOST_PRESENT_ALREADY, "Host: %s with uuid: %s "
"already present in cluster with alias hostname: %s",
rsp.hostname, uuid_utoa (rsp.uuid), peerinfo->hostname);
@@ -402,7 +405,8 @@ cont:
ret = glusterd_friend_sm_inject_event (event);
- gf_log ("glusterd", GF_LOG_INFO, "Received resp to probe req");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_PROBE_REQ_RESP_RCVD, "Received resp to probe req");
unlock:
rcu_read_unlock ();
@@ -463,7 +467,8 @@ __glusterd_friend_add_cbk (struct rpc_req * req, struct iovec *iov,
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_RESPONSE_INFO,
"Received %s from uuid: %s, host: %s, port: %d",
(op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid), rsp.hostname, rsp.port);
@@ -580,7 +585,8 @@ __glusterd_friend_remove_cbk (struct rpc_req * req, struct iovec *iov,
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_RESPONSE_INFO,
"Received %s from uuid: %s, host: %s, port: %d",
(op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid), rsp.hostname, rsp.port);
@@ -676,7 +682,8 @@ __glusterd_friend_update_cbk (struct rpc_req *req, struct iovec *iov,
ret = 0;
out:
- gf_log (this->name, GF_LOG_INFO, "Received %s from uuid: %s",
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_RESPONSE_INFO, "Received %s from uuid: %s",
(ret)?"RJT":"ACC", uuid_utoa (rsp.uuid));
GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe));
@@ -1516,7 +1523,8 @@ glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this,
if (!peerinfo) {
rcu_read_unlock ();
ret = -1;
- gf_log (this->name, GF_LOG_ERROR, "Could not find peer %s(%s)",
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_PEER_NOT_FOUND, "Could not find peer %s(%s)",
event->peername, uuid_utoa (event->peerid));
goto out;
}
@@ -1540,7 +1548,8 @@ glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this,
"hostname_in_cluster",
peerinfo->hostname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED,
"Unable to add hostname of the peer");
goto out;
}
@@ -1613,7 +1622,8 @@ glusterd_rpc_friend_remove (call_frame_t *frame, xlator_t *this,
if (!peerinfo) {
rcu_read_unlock ();
ret = -1;
- gf_log (this->name, GF_LOG_ERROR, "Could not find peer %s(%s)",
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_PEER_NOT_FOUND, "Could not find peer %s(%s)",
event->peername, uuid_utoa (event->peerid));
goto out;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c b/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c
index 8d1133f9f55..36863e2a248 100644
--- a/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c
@@ -44,12 +44,13 @@ glusterd_scrubsvc_create_volfile ()
ret = glusterd_create_global_volfile (build_scrub_graph,
filepath, NULL);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to create volfile");
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL, "Failed to create volfile");
goto out;
}
out:
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug (this->name, 0, "Returning %d", ret);
return ret;
}
@@ -80,7 +81,7 @@ glusterd_scrubsvc_manager (glusterd_svc_t *svc, void *data, int flags)
}
out:
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug (THIS->name, 0, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
index 10b3ff91882..6acf56fd2d8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -615,7 +615,8 @@ glusterd_peer_detach_cleanup (glusterd_conf_t *priv)
*/
if (!glusterd_friend_contains_vol_bricks (volinfo,
MY_UUID)) {
- gf_log (THIS->name, GF_LOG_INFO,
+ gf_msg (THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_STALE_VOL_DELETE_INFO,
"Deleting stale volume %s", volinfo->volname);
ret = glusterd_delete_volume (volinfo);
if (ret) {
@@ -866,7 +867,7 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
ret = dict_get_str (ev_ctx->vols, "hostname_in_cluster",
&hostname);
if (ret || !hostname) {
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"Unable to fetch local hostname from peer");
} else
strncpy (local_node_hostname, hostname,
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
index ae2521c570b..cf1c8f3be2a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
@@ -1992,7 +1992,7 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
uint64_t voldisplaycount = 0;
if (!dst || !src) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_EMPTY, "Source or Destination "
"dict is empty.");
goto out;
@@ -2000,7 +2000,7 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
ret = dict_get_int32 (dst, "config-command", &config_command);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED,
"failed to get config-command type");
goto out;
@@ -2016,7 +2016,7 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT,
hard_limit);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED,
"Unable to set snap_max_hard_limit");
goto out;
@@ -2031,7 +2031,7 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
GLUSTERD_STORE_KEY_SNAP_MAX_SOFT_LIMIT,
&soft_limit);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED,
"Unable to get snap_max_soft_limit");
goto out;
@@ -2041,7 +2041,7 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
GLUSTERD_STORE_KEY_SNAP_MAX_SOFT_LIMIT,
soft_limit);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED,
"Unable to set snap_max_soft_limit");
goto out;
@@ -2050,7 +2050,7 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
ret = dict_get_uint64 (src, "voldisplaycount",
&voldisplaycount);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED,
"Unable to get voldisplaycount");
goto out;
@@ -2059,7 +2059,7 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
ret = dict_set_uint64 (dst, "voldisplaycount",
voldisplaycount);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED,
"Unable to set voldisplaycount");
goto out;
@@ -2070,14 +2070,14 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
"volume%"PRIu64"-volname", i);
ret = dict_get_str (src, buf, &volname);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED,
"Unable to get %s", buf);
goto out;
}
ret = dict_set_str (dst, buf, volname);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED,
"Unable to set %s", buf);
goto out;
@@ -2087,14 +2087,14 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
"volume%"PRIu64"-snap-max-hard-limit", i);
ret = dict_get_uint64 (src, buf, &value);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED,
"Unable to get %s", buf);
goto out;
}
ret = dict_set_uint64 (dst, buf, value);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED,
"Unable to set %s", buf);
goto out;
@@ -2104,14 +2104,14 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
"volume%"PRIu64"-active-hard-limit", i);
ret = dict_get_uint64 (src, buf, &value);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED,
"Unable to get %s", buf);
goto out;
}
ret = dict_set_uint64 (dst, buf, value);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED,
"Unable to set %s", buf);
goto out;
@@ -2121,14 +2121,14 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
"volume%"PRIu64"-snap-max-soft-limit", i);
ret = dict_get_uint64 (src, buf, &value);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED,
"Unable to get %s", buf);
goto out;
}
ret = dict_set_uint64 (dst, buf, value);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED,
"Unable to set %s", buf);
goto out;
@@ -2142,7 +2142,7 @@ glusterd_snap_config_use_rsp_dict (dict_t *dst, dict_t *src)
ret = 0;
out:
- gf_msg_debug ("", 0, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -2379,7 +2379,7 @@ glusterd_snap_use_rsp_dict (dict_t *dst, dict_t *src)
int32_t snap_command = 0;
if (!dst || !src) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_EMPTY, "Source or Destination "
"dict is empty.");
goto out;
@@ -2387,7 +2387,7 @@ glusterd_snap_use_rsp_dict (dict_t *dst, dict_t *src)
ret = dict_get_int32 (dst, "type", &snap_command);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED, "unable to get the type of "
"the snapshot command");
goto out;
@@ -2399,7 +2399,7 @@ glusterd_snap_use_rsp_dict (dict_t *dst, dict_t *src)
case GF_SNAP_OPTION_TYPE_CLONE:
ret = glusterd_snap_create_use_rsp_dict (dst, src);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_RSP_DICT_USE_FAIL,
"Unable to use rsp dict");
goto out;
@@ -2408,7 +2408,7 @@ glusterd_snap_use_rsp_dict (dict_t *dst, dict_t *src)
case GF_SNAP_OPTION_TYPE_CONFIG:
ret = glusterd_snap_config_use_rsp_dict (dst, src);
if (ret) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_RSP_DICT_USE_FAIL,
"Unable to use rsp dict");
goto out;
@@ -2423,7 +2423,7 @@ glusterd_snap_use_rsp_dict (dict_t *dst, dict_t *src)
ret = 0;
out:
- gf_msg_debug ("", 0, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index f7d9a4fb621..155484b5fbd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -2252,7 +2252,8 @@ glusterd_snapshot_clone_prevalidate (dict_t *dict, char **op_errstr,
out:
if (ret && err_str[0] != '\0') {
- gf_log (this->name, loglevel, "%s", err_str);
+ gf_msg (this->name, loglevel, 0,
+ GD_MSG_SNAP_CLONE_PREVAL_FAILED, "%s", err_str);
*op_errstr = gf_strdup (err_str);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 125d1430ee5..8d6e3d04cb8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -2026,7 +2026,8 @@ glusterd_restore_op_version (xlator_t *this)
goto out;
}
conf->op_version = op_version;
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_OP_VERS_INFO,
"retrieved op-version: %d", conf->op_version);
goto out;
}
@@ -2046,11 +2047,15 @@ glusterd_restore_op_version (xlator_t *this)
*/
ret = glusterd_retrieve_uuid();
if (ret) {
- gf_log (this->name, GF_LOG_INFO, "Detected new install. Setting"
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_OP_VERS_SET_INFO,
+ "Detected new install. Setting"
" op-version to maximum : %d", GD_OP_VERSION_MAX);
conf->op_version = GD_OP_VERSION_MAX;
} else {
- gf_log (this->name, GF_LOG_INFO, "Upgrade detected. Setting"
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_OP_VERS_SET_INFO,
+ "Upgrade detected. Setting"
" op-version to minimum : %d", GD_OP_VERSION_MIN);
conf->op_version = GD_OP_VERSION_MIN;
}
@@ -2808,7 +2813,8 @@ glusterd_store_retrieve_volume (char *volname, glusterd_snap_t *snap)
/* Initialize the snapd service */
ret = glusterd_snapdsvc_init (volinfo);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to initialize snapd "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SNAPD_INIT_FAIL, "Failed to initialize snapd "
"service for volume %s", volinfo->volname);
goto out;
}
@@ -3023,7 +3029,8 @@ glusterd_store_retrieve_volumes (xlator_t *this, glusterd_snap_t *snap)
ret = glusterd_store_retrieve_node_state (volinfo);
if (ret) {
/* Backward compatibility */
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_NEW_NODE_STATE_CREATION,
"Creating a new node_state "
"for volume: %s.", entry->d_name);
glusterd_store_create_nodestate_sh_on_absence (volinfo);
@@ -3127,7 +3134,8 @@ glusterd_mount_brick_paths (char *brick_mount_path,
entry = glusterd_get_mnt_entry_info (brick_mount_path, buff,
sizeof (buff), &save_entry);
if (entry) {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_ALREADY_MOUNTED,
"brick_mount_path (%s) already mounted.",
brick_mount_path);
ret = 0;
@@ -4455,7 +4463,9 @@ glusterd_quota_conf_write_header (int fd)
out:
if (ret < 0)
- gf_log_callingfn ("quota", GF_LOG_ERROR, "failed to write "
+ gf_msg_callingfn ("quota", GF_LOG_ERROR, 0,
+ GD_MSG_QUOTA_CONF_WRITE_FAIL,
+ "failed to write "
"header to a quota conf");
return ret;
@@ -4493,7 +4503,9 @@ glusterd_quota_conf_write_gfid (int fd, void *buf, char type)
out:
if (ret < 0)
- gf_log_callingfn ("quota", GF_LOG_ERROR, "failed to write "
+ gf_msg_callingfn ("quota", GF_LOG_ERROR, 0,
+ GD_MSG_QUOTA_CONF_WRITE_FAIL,
+ "failed to write "
"gfid %s to a quota conf", uuid_utoa (buf));
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 37daa9d1e8f..9d7b91336ed 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -569,7 +569,9 @@ _gd_syncop_mgmt_lock_cbk (struct rpc_req *req, struct iovec *iov,
peerinfo->locked = _gf_true;
} else {
rsp.op_ret = -1;
- gf_log (this->name, GF_LOG_ERROR, "Could not find peer with "
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_PEER_NOT_FOUND,
+ "Could not find peer with "
"ID %s", uuid_utoa (*peerid));
}
rcu_read_unlock ();
@@ -662,7 +664,8 @@ _gd_syncop_mgmt_unlock_cbk (struct rpc_req *req, struct iovec *iov,
peerinfo->locked = _gf_false;
} else {
rsp.op_ret = -1;
- gf_log (this->name, GF_LOG_ERROR, "Could not find peer with "
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_PEER_NOT_FOUND, "Could not find peer with "
"ID %s", uuid_utoa (*peerid));
}
rcu_read_unlock ();
@@ -1839,7 +1842,8 @@ global:
ret = glusterd_mgmt_v3_lock (global, MY_UUID, &op_errno,
"global");
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_MGMTV3_LOCK_GET_FAIL,
"Unable to acquire lock for %s", global);
gf_asprintf (&op_errstr,
"Another transaction is in progress "
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 04a181ff874..d21862c76db 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -1271,7 +1271,7 @@ glusterd_volume_brickinfo_get_by_brick (char *brick,
brickinfo);
(void) glusterd_brickinfo_delete (tmp_brickinfo);
out:
- gf_msg_debug ("", 0, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -1359,7 +1359,9 @@ glusterd_service_stop (const char *service, char *pidfile, int sig,
GF_ASSERT (this);
if (!gf_is_service_running (pidfile, &pid)) {
ret = 0;
- gf_log (this->name, GF_LOG_INFO, "%s already stopped", service);
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_ALREADY_STOPPED,
+ "%s already stopped", service);
goto out;
}
gf_msg_debug (this->name, 0, "Stopping gluster %s running in pid: "
@@ -1482,7 +1484,7 @@ glusterd_brick_connect (glusterd_volinfo_t *volinfo,
}
out:
- gf_msg_debug ("", 0, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -1536,7 +1538,8 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo,
GF_ASSERT (priv);
if (brickinfo->snap_status == -1) {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_SNAPSHOT_PENDING,
"Snapshot is pending on %s:%s. "
"Hence not starting the brick",
brickinfo->hostname,
@@ -1650,7 +1653,7 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo,
if (volinfo->memory_accounting)
runner_add_arg (&runner, "--mem-accounting");
- runner_log (&runner, "", GF_LOG_DEBUG, "Starting GlusterFS");
+ runner_log (&runner, "", 0, "Starting GlusterFS");
if (wait) {
synclock_unlock (&priv->big_lock);
ret = runner_run (&runner);
@@ -1711,7 +1714,8 @@ glusterd_brick_disconnect (glusterd_brickinfo_t *brickinfo)
GF_ASSERT (brickinfo);
if (!brickinfo) {
- gf_log_callingfn ("glusterd", GF_LOG_WARNING, "!brickinfo");
+ gf_msg_callingfn ("glusterd", GF_LOG_WARNING, EINVAL,
+ GD_MSG_BRICK_NOT_FOUND, "!brickinfo");
return -1;
}
@@ -2735,7 +2739,7 @@ out:
if (msg[0])
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_IMPORT_PRDICT_DICT, "%s", msg);
- gf_msg_debug ("", 0, "Returning with %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -2814,7 +2818,7 @@ out:
if (msg[0])
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_VOL_OPTS_IMPORT_FAIL, "%s", msg);
- gf_msg_debug ("", 0, "Returning with %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -2902,7 +2906,7 @@ out:
if (msg[0])
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_BRICK_IMPORT_FAIL, "%s", msg);
- gf_msg_debug ("", 0, "Returning with %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -2942,7 +2946,7 @@ glusterd_import_bricks (dict_t *peer_data, int32_t vol_count,
}
ret = 0;
out:
- gf_msg_debug ("", 0, "Returning with %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -3256,7 +3260,7 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
ret = dict_get_int32 (peer_data, key,
&new_volinfo->tier_info.hot_brick_count);
if (ret)
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"peer is possibly old version");
/* not having a 'hot_type' key is not a error
@@ -3266,7 +3270,7 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
ret = dict_get_int32 (peer_data, key,
&new_volinfo->tier_info.hot_type);
if (ret)
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"peer is possibly old version");
/* not having a 'hot_replica_count' key is not a error
@@ -3276,7 +3280,7 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
ret = dict_get_int32 (peer_data, key,
&new_volinfo->tier_info.hot_replica_count);
if (ret)
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"peer is possibly old version");
/* not having a 'cold_brick_count' key is not a error
@@ -3286,7 +3290,7 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
ret = dict_get_int32 (peer_data, key,
&new_volinfo->tier_info.cold_brick_count);
if (ret)
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"peer is possibly old version");
/* not having a 'cold_type' key is not a error
@@ -3296,7 +3300,7 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
ret = dict_get_int32 (peer_data, key,
&new_volinfo->tier_info.cold_type);
if (ret)
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"peer is possibly old version");
/* not having a 'cold_replica_count' key is not a error
@@ -3306,7 +3310,7 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
ret = dict_get_int32 (peer_data, key,
&new_volinfo->tier_info.cold_replica_count);
if (ret)
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"peer is possibly old version");
/* not having a 'cold_disperse_count' key is not a error
@@ -3316,7 +3320,7 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
ret = dict_get_int32 (peer_data, key,
&new_volinfo->tier_info.cold_disperse_count);
if (ret)
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"peer is possibly old version");
/* not having a 'cold_redundancy_count' key is not a error
@@ -3327,7 +3331,7 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
ret = dict_get_int32 (peer_data, key,
&new_volinfo->tier_info.cold_redundancy_count);
if (ret)
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"peer is possibly old version");
/* not having a 'cold_dist_count' key is not a error
@@ -3337,7 +3341,7 @@ glusterd_import_volinfo (dict_t *peer_data, int count,
ret = dict_get_int32 (peer_data, key,
&new_volinfo->tier_info.cold_dist_leaf_count);
if (ret)
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"peer is possibly old version");
new_volinfo->subvol_count = new_volinfo->brick_count/
@@ -3493,7 +3497,7 @@ out:
if (msg[0])
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_VOLINFO_IMPORT_FAIL, "%s", msg);
- gf_msg_debug ("", 0, "Returning with %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -3600,7 +3604,7 @@ glusterd_volinfo_stop_stale_bricks (glusterd_volinfo_t *new_volinfo,
}
ret = 0;
out:
- gf_msg_debug ("", 0, "Returning with %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -3780,7 +3784,8 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count)
ret = glusterd_snapdsvc_init (new_volinfo);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to initialize "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SNAPD_INIT_FAIL, "Failed to initialize "
"snapdsvc for volume %s", new_volinfo->volname);
goto out;
}
@@ -3795,7 +3800,8 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count)
ret = glusterd_store_volinfo (new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to store "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLINFO_STORE_FAIL, "Failed to store "
"volinfo for volume %s", new_volinfo->volname);
goto out;
}
@@ -3813,7 +3819,7 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count)
glusterd_compare_volume_name);
out:
- gf_msg_debug ("", 0, "Returning with ret: %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning with ret: %d", ret);
return ret;
}
@@ -3838,7 +3844,7 @@ glusterd_import_friend_volumes (dict_t *peer_data)
}
out:
- gf_msg_debug ("", 0, "Returning with %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
return ret;
}
@@ -4100,42 +4106,54 @@ void
glusterd_nfs_pmap_deregister ()
{
if (pmap_unset (MOUNT_PROGRAM, MOUNTV3_VERSION))
- gf_log ("", GF_LOG_INFO, "De-registered MOUNTV3 successfully");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_DEREGISTER_SUCCESS,
+ "De-registered MOUNTV3 successfully");
else
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_PMAP_UNSET_FAIL,
"De-register MOUNTV3 is unsuccessful");
if (pmap_unset (MOUNT_PROGRAM, MOUNTV1_VERSION))
- gf_log ("", GF_LOG_INFO, "De-registered MOUNTV1 successfully");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_DEREGISTER_SUCCESS,
+ "De-registered MOUNTV1 successfully");
else
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_PMAP_UNSET_FAIL,
"De-register MOUNTV1 is unsuccessful");
if (pmap_unset (NFS_PROGRAM, NFSV3_VERSION))
- gf_log ("", GF_LOG_INFO, "De-registered NFSV3 successfully");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_DEREGISTER_SUCCESS,
+ "De-registered NFSV3 successfully");
else
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_PMAP_UNSET_FAIL,
"De-register NFSV3 is unsuccessful");
if (pmap_unset (NLM_PROGRAM, NLMV4_VERSION))
- gf_log ("", GF_LOG_INFO, "De-registered NLM v4 successfully");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_DEREGISTER_SUCCESS,
+ "De-registered NLM v4 successfully");
else
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_PMAP_UNSET_FAIL,
"De-registration of NLM v4 failed");
if (pmap_unset (NLM_PROGRAM, NLMV1_VERSION))
- gf_log ("", GF_LOG_INFO, "De-registered NLM v1 successfully");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_DEREGISTER_SUCCESS,
+ "De-registered NLM v1 successfully");
else
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_PMAP_UNSET_FAIL,
"De-registration of NLM v1 failed");
if (pmap_unset (ACL_PROGRAM, ACLV3_VERSION))
- gf_log ("", GF_LOG_INFO, "De-registered ACL v3 successfully");
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_DEREGISTER_SUCCESS,
+ "De-registered ACL v3 successfully");
else
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_PMAP_UNSET_FAIL,
@@ -4360,7 +4378,7 @@ glusterd_volume_count_get (void)
}
- gf_msg_debug ("", 0, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -4611,7 +4629,8 @@ _local_gsyncd_start (dict_t *this, char *key, data_t *value, void *data)
* not started, do not restart the geo-rep session */
if ((!strcmp (buf, "Created")) ||
(!strcmp (buf, "Stopped"))) {
- gf_log (this1->name, GF_LOG_INFO,
+ gf_msg (this1->name, GF_LOG_INFO, 0,
+ GD_MSG_GEO_REP_START_FAILED,
"Geo-Rep Session was not started between "
"%s and %s::%s. Not Restarting", volinfo->volname,
slave_url, slave_vol);
@@ -4619,7 +4638,8 @@ _local_gsyncd_start (dict_t *this, char *key, data_t *value, void *data)
} else if (strstr(buf, "Paused")) {
is_paused = _gf_true;
} else if ((!strcmp (buf, "Config Corrupted"))) {
- gf_log (this1->name, GF_LOG_INFO,
+ gf_msg (this1->name, GF_LOG_INFO, 0,
+ GD_MSG_RECOVERING_CORRUPT_CONF,
"Recovering from a corrupted config. "
"Not Restarting. Use start (force) to "
"start the session between %s and %s::%s.",
@@ -5381,7 +5401,7 @@ glusterd_all_volume_cond_check (glusterd_condition_func func, int status,
}
ret = 0;
out:
- gf_msg_debug ("", 0, "returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "returning %d", ret);
return ret;
}
@@ -5566,7 +5586,7 @@ glusterd_rb_check_bricks (glusterd_volinfo_t *volinfo,
if (strcmp (rb->src_brick->hostname, src->hostname) ||
strcmp (rb->src_brick->path, src->path)) {
- gf_msg("", GF_LOG_ERROR, 0,
+ gf_msg("glusterd", GF_LOG_ERROR, 0,
GD_MSG_RB_SRC_BRICKS_MISMATCH,
"Replace brick src bricks differ");
return -1;
@@ -5793,7 +5813,7 @@ glusterd_sm_tr_log_transition_add_to_dict (dict_t *dict,
goto out;
out:
- gf_msg_debug ("", 0, "returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "returning %d", ret);
return ret;
}
@@ -5834,7 +5854,7 @@ glusterd_sm_tr_log_add_to_dict (dict_t *dict,
ret = dict_set_int32 (dict, key, log->count);
out:
- gf_msg_debug ("", 0, "returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "returning %d", ret);
return ret;
}
@@ -5865,7 +5885,7 @@ glusterd_sm_tr_log_init (glusterd_sm_tr_log_t *log,
ret = 0;
out:
- gf_msg_debug ("", 0, "returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "returning %d", ret);
return ret;
}
@@ -6026,7 +6046,7 @@ glusterd_get_local_brickpaths (glusterd_volinfo_t *volinfo, char **pathlist)
path_tokens = GF_CALLOC (sizeof(char*), volinfo->brick_count,
gf_gld_mt_charptr);
if (!path_tokens) {
- gf_msg_debug ("", 0, "Could not allocate memory.");
+ gf_msg_debug ("glusterd", 0, "Could not allocate memory.");
ret = -1;
goto out;
}
@@ -6043,7 +6063,7 @@ glusterd_get_local_brickpaths (glusterd_volinfo_t *volinfo, char **pathlist)
path[sizeof(path)-1] = '\0';
path_tokens[count] = gf_strdup (path);
if (!path_tokens[count]) {
- gf_msg_debug ("", 0,
+ gf_msg_debug ("glusterd", 0,
"Could not allocate memory.");
ret = -1;
goto out;
@@ -6055,7 +6075,7 @@ glusterd_get_local_brickpaths (glusterd_volinfo_t *volinfo, char **pathlist)
tmp_path_list = GF_CALLOC (sizeof(char), total_len + 1,
gf_gld_mt_char);
if (!tmp_path_list) {
- gf_msg_debug ("", 0, "Could not allocate memory.");
+ gf_msg_debug ("glusterd", 0, "Could not allocate memory.");
ret = -1;
goto out;
}
@@ -6078,12 +6098,12 @@ out:
path_tokens = NULL;
if (ret == 0) {
- gf_msg_debug ("", 0, "No Local Bricks Present.");
+ gf_msg_debug ("glusterd", 0, "No Local Bricks Present.");
GF_FREE (tmp_path_list);
tmp_path_list = NULL;
}
- gf_msg_debug ("", 0, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -6111,7 +6131,7 @@ glusterd_start_gsync (glusterd_volinfo_t *master_vol, char *slave,
if (!path_list) {
ret = 0;
- gf_msg_debug ("", 0, "No Bricks in this node."
+ gf_msg_debug ("glusterd", 0, "No Bricks in this node."
" Not starting gsyncd.");
goto out;
}
@@ -6174,7 +6194,7 @@ out:
"the " GEOREP " session");
}
- gf_msg_debug ("", 0, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -6328,7 +6348,9 @@ glusterd_set_dump_options (char *dumpoptions_path, char *options,
goto out;
}
dup_options = gf_strdup (options);
- gf_log ("", GF_LOG_INFO, "Received following statedump options: %s",
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_STATEDUMP_OPTS_RCVD,
+ "Received following statedump options: %s",
dup_options);
option = strtok_r (dup_options, " ", &tmpptr);
while (option) {
@@ -6419,7 +6441,9 @@ glusterd_brick_statedump (glusterd_volinfo_t *volinfo,
goto out;
}
- gf_log ("", GF_LOG_INFO, "Performing statedump on brick with pid %d",
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_STATEDUMP_INFO,
+ "Performing statedump on brick with pid %d",
pid);
kill (pid, SIGUSR1);
@@ -6497,7 +6521,9 @@ glusterd_nfs_statedump (char *options, int option_cnt, char **op_errstr)
goto out;
}
- gf_log ("", GF_LOG_INFO, "Performing statedump on nfs server with "
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_STATEDUMP_INFO,
+ "Performing statedump on nfs server with "
"pid %d", pid);
kill (pid, SIGUSR1);
@@ -6576,7 +6602,9 @@ glusterd_quotad_statedump (char *options, int option_cnt, char **op_errstr)
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Performing statedump on quotad with "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_STATEDUMP_INFO,
+ "Performing statedump on quotad with "
"pid %d", pid);
kill (pid, SIGUSR1);
@@ -6642,7 +6670,8 @@ glusterd_friend_remove_cleanup_vols (uuid_t uuid)
cds_list_for_each_entry_safe (volinfo, tmp_volinfo, &priv->volumes,
vol_list) {
if (glusterd_friend_contains_vol_bricks (volinfo, uuid) == 2) {
- gf_log (THIS->name, GF_LOG_INFO,
+ gf_msg (THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_STALE_VOL_DELETE_INFO,
"Deleting stale volume %s", volinfo->volname);
ret = glusterd_delete_volume (volinfo);
if (ret) {
@@ -6936,12 +6965,12 @@ glusterd_defrag_volume_status_update (glusterd_volinfo_t *volinfo,
ret = dict_get_uint64 (rsp_dict, "promoted", &promoted);
if (ret)
- gf_log (this->name, GF_LOG_TRACE,
+ gf_msg_trace (this->name, 0,
"failed to get promoted count");
ret = dict_get_uint64 (rsp_dict, "demoted", &demoted);
if (ret)
- gf_log (this->name, GF_LOG_TRACE,
+ gf_msg_trace (this->name, 0,
"failed to get demoted count");
ret = dict_get_double (rsp_dict, "run-time", &run_time);
@@ -7240,7 +7269,7 @@ glusterd_append_status_dicts (dict_t *dst, dict_t *src)
ret = dict_get_int32 (src, "gsync-count", &src_count);
if (ret || !src_count) {
- gf_msg_debug ("", 0, "Source brick empty");
+ gf_msg_debug ("glusterd", 0, "Source brick empty");
ret = 0;
goto out;
}
@@ -7274,7 +7303,7 @@ glusterd_append_status_dicts (dict_t *dst, dict_t *src)
ret = dict_set_int32 (dst, "gsync-count", dst_count+src_count);
out:
- gf_msg_debug ("", 0, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -7378,7 +7407,7 @@ glusterd_gsync_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict, char *op_errstr)
ret = 0;
out:
- gf_msg_debug ("", 0, "Returning %d ", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d ", ret);
return ret;
}
@@ -7410,13 +7439,13 @@ glusterd_rb_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
if (rsp_dict) {
ret = dict_get_int32 (rsp_dict, "src-brick-port", &src_port);
if (ret == 0) {
- gf_msg_debug ("", 0,
+ gf_msg_debug ("glusterd", 0,
"src-brick-port=%d found", src_port);
}
ret = dict_get_int32 (rsp_dict, "dst-brick-port", &dst_port);
if (ret == 0) {
- gf_msg_debug ("", 0,
+ gf_msg_debug ("glusterd", 0,
"dst-brick-port=%d found", dst_port);
}
@@ -7434,7 +7463,7 @@ glusterd_rb_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
ret = dict_set_int32 (ctx, "src-brick-port",
src_port);
if (ret) {
- gf_msg_debug ("", 0,
+ gf_msg_debug ("glusterd", 0,
"Could not set src-brick");
goto out;
}
@@ -7444,7 +7473,7 @@ glusterd_rb_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
ret = dict_set_int32 (ctx, "dst-brick-port",
dst_port);
if (ret) {
- gf_msg_debug ("", 0,
+ gf_msg_debug ("glusterd", 0,
"Could not set dst-brick");
goto out;
}
@@ -7888,28 +7917,32 @@ glusterd_volume_status_copy_to_op_ctx_dict (dict_t *aggr, dict_t *rsp_dict)
ret = dict_get_int32 (rsp_dict, "hot_brick_count", &hot_brick_count);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_msg (THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
"Failed to get hot brick count from rsp_dict");
goto out;
}
ret = dict_set_int32 (ctx_dict, "hot_brick_count", hot_brick_count);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_msg (THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED,
"Failed to update hot_brick_count");
goto out;
}
ret = dict_get_int32 (rsp_dict, "type", &type);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_msg (THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
"Failed to get type from rsp_dict");
goto out;
}
ret = dict_set_int32 (ctx_dict, "type", type);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_msg (THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED,
"Failed to update type");
goto out;
}
@@ -8121,7 +8154,7 @@ glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
snprintf (key, 256, "demoted-%d", current_index);
ret = dict_set_uint64 (ctx_dict, key, value);
if (ret) {
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"failed to set demoted count");
}
}
@@ -8133,7 +8166,7 @@ glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
snprintf (key, 256, "promoted-%d", current_index);
ret = dict_set_uint64 (ctx_dict, key, value);
if (ret) {
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_msg_debug (THIS->name, 0,
"failed to set promoted count");
}
}
@@ -8167,7 +8200,7 @@ glusterd_sys_exec_output_rsp_dict (dict_t *dst, dict_t *src)
ret = dict_get_int32 (src, "output_count", &src_output_count);
if (ret) {
- gf_msg_debug ("", 0, "No output from source");
+ gf_msg_debug ("glusterd", 0, "No output from source");
ret = 0;
goto out;
}
@@ -8201,7 +8234,7 @@ glusterd_sys_exec_output_rsp_dict (dict_t *dst, dict_t *src)
ret = dict_set_int32 (dst, "output_count",
dst_output_count+src_output_count);
out:
- gf_msg_debug ("", 0, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -8782,14 +8815,16 @@ glusterd_defrag_volume_node_rsp (dict_t *req_dict, dict_t *rsp_dict,
snprintf (key, 256, "promoted-%d", i);
ret = dict_set_uint64 (op_ctx, key, volinfo->tier_info.promoted);
if (ret)
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_msg (THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED,
"failed to set lookedup file count");
memset (key, 0 , 256);
snprintf (key, 256, "demoted-%d", i);
ret = dict_set_uint64 (op_ctx, key, volinfo->tier_info.demoted);
if (ret)
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_msg (THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED,
"failed to set lookedup file count");
out:
@@ -8828,7 +8863,7 @@ glusterd_handle_node_rsp (dict_t *req_dict, void *pending_entry,
break;
}
- gf_msg_debug ("", 0, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -8922,7 +8957,8 @@ glusterd_generate_and_set_task_id (dict_t *dict, char *key)
key);
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Generated task-id %s for key %s",
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_TASK_ID_INFO, "Generated task-id %s for key %s",
uuid_str, key);
out:
@@ -9509,7 +9545,8 @@ glusterd_enable_default_options (glusterd_volinfo_t *volinfo, char *option)
ret = dict_set_dynstr_with_alloc (volinfo->dict,
"performance.readdir-ahead", "on");
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED,
"Failed to set option "
"'performance.readdir-ahead' on volume "
"%s", volinfo->volname);
@@ -9983,7 +10020,7 @@ glusterd_disallow_op_for_tier (glusterd_volinfo_t *volinfo, glusterd_op_t op,
case GD_OP_ADD_BRICK:
case GD_OP_REPLACE_BRICK:
ret = -1;
- gf_log (this->name, GF_LOG_DEBUG, "Operation not "
+ gf_msg_debug (this->name, 0, "Operation not "
"permitted on tiered volume %s",
volinfo->volname);
break;
@@ -9997,7 +10034,7 @@ glusterd_disallow_op_for_tier (glusterd_volinfo_t *volinfo, glusterd_op_t op,
ret = 0;
break;
default:
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"Rebalance Operation not permitted"
" on tiered volume %s",
volinfo->volname);
@@ -10014,7 +10051,7 @@ glusterd_disallow_op_for_tier (glusterd_volinfo_t *volinfo, glusterd_op_t op,
ret = 0;
break;
default:
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"Remove brick operation not "
"permitted on tiered volume %s",
volinfo->volname);
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index 75d19911333..0164cbe1de8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -83,7 +83,9 @@ xlator_instantiate_va (const char *type, const char *format, va_list arg)
return xl;
error:
- gf_log ("", GF_LOG_ERROR, "creating xlator of type %s failed",
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_XLATOR_CREATE_FAIL,
+ "creating xlator of type %s failed",
type);
GF_FREE (volname);
if (xl)
@@ -114,7 +116,8 @@ volgen_xlator_link (xlator_t *pxl, xlator_t *cxl)
ret = glusterfs_xlator_link (pxl, cxl);
if (ret == -1) {
- gf_log ("", GF_LOG_ERROR,
+ gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
+ GD_MSG_NO_MEMORY,
"Out of memory, cannot link xlators %s <- %s",
pxl->name, cxl->name);
}
@@ -131,7 +134,9 @@ volgen_graph_link (volgen_graph_t *graph, xlator_t *xl)
if (graph->graph.first)
ret = volgen_xlator_link (xl, graph->graph.first);
if (ret == -1) {
- gf_log ("", GF_LOG_ERROR, "failed to add graph entry %s",
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_GRAPH_ENTRY_ADD_FAIL,
+ "failed to add graph entry %s",
xl->name);
return -1;
@@ -206,7 +211,8 @@ xlator_set_option (xlator_t *xl, char *key, char *value)
dval = gf_strdup (value);
if (!dval) {
- gf_log ("", GF_LOG_ERROR,
+ gf_msg ("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_NO_MEMORY,
"failed to set xlator opt: %s[%s] = %s",
xl->name, key, value);
@@ -577,7 +583,8 @@ volgen_dict_get (dict_t *dict, char *key, char **value)
ret = volgen_graph_set_options_generic (NULL, dict, &vme,
&optget_option_handler);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Out of memory");
+ gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
+ GD_MSG_NO_MEMORY, "Out of memory");
return -1;
}
@@ -638,7 +645,9 @@ glusterd_volinfo_get_boolean (glusterd_volinfo_t *volinfo, char *key)
if (val)
ret = gf_string2boolean (val, &enabled);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "value for %s option is not valid", key);
+ gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
+ GD_MSG_INVALID_ENTRY,
+ "value for %s option is not valid", key);
return -1;
}
@@ -745,7 +754,8 @@ glusterd_check_option_exists (char *key, char **completion)
if (completion) {
ret = option_complete (key, completion);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
+ GD_MSG_NO_MEMORY,
"Out of memory");
return -1;
}
@@ -772,7 +782,8 @@ glusterd_check_option_exists (char *key, char **completion)
trie:
ret = volopt_trie (key, completion);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_ERROR_ENCOUNTERED,
"Some error occurred during keyword hinting");
}
@@ -789,7 +800,8 @@ glusterd_volopt_validate (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
xlator_t *this = THIS;
if (!dict || !key || !value) {
- gf_log_callingfn (this->name, GF_LOG_WARNING, "Invalid "
+ gf_msg_callingfn (this->name, GF_LOG_WARNING, EINVAL,
+ GD_MSG_INVALID_ENTRY, "Invalid "
"Arguments (dict=%p, key=%s, value=%s)", dict,
key, value);
return -1;
@@ -824,7 +836,8 @@ glusterd_get_trans_type_rb (gf_transport_type ttype)
gf_asprintf (&trans_type, "tcp");
break;
default:
- gf_log (THIS->name, GF_LOG_ERROR, "Unknown "
+ gf_msg (THIS->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INVALID_ENTRY, "Unknown "
"transport type");
}
@@ -844,7 +857,7 @@ _xl_link_children (xlator_t *parent, xlator_t *children, size_t child_count)
for (trav = children; --seek; trav = trav->next);
for (; child_count--; trav = trav->prev) {
ret = volgen_xlator_link (parent, trav);
- gf_log (THIS->name, GF_LOG_DEBUG, "%s:%s", parent->name,
+ gf_msg_debug (THIS->name, 0, "%s:%s", parent->name,
trav->name);
if (ret)
goto out;
@@ -923,8 +936,10 @@ volgen_apply_filters (char *orig_volfile)
goto free_fp;
}
if (runcmd(filterpath,orig_volfile,NULL)) {
- gf_log("",GF_LOG_ERROR,"failed to run filter %.*s",
- (int)sizeof(entry.d_name), entry.d_name);
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_FILTER_RUN_FAILED,
+ "failed to run filter %.*s",
+ (int)sizeof(entry.d_name), entry.d_name);
}
free_fp:
GF_FREE(filterpath);
@@ -951,8 +966,8 @@ volgen_write_volfile (volgen_graph_t *graph, char *filename)
fd = creat (ftmp, S_IRUSR | S_IWUSR);
if (fd < 0) {
- gf_log (this->name, GF_LOG_ERROR, "%s",
- strerror (errno));
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_FILE_OP_FAILED, "file creation failed");
goto error;
}
@@ -966,8 +981,9 @@ volgen_write_volfile (volgen_graph_t *graph, char *filename)
goto error;
if (fclose (f) != 0) {
- gf_log (THIS->name, GF_LOG_ERROR, "fclose on the file %s "
- "failed (%s)", ftmp, strerror (errno));
+ gf_msg (THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_FILE_OP_FAILED, "fclose on the file %s "
+ "failed", ftmp);
/*
* Even though fclose has failed here, we have to set f to NULL.
* Otherwise when the code path goes to error, there again we
@@ -995,7 +1011,8 @@ volgen_write_volfile (volgen_graph_t *graph, char *filename)
if (f)
fclose (f);
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL,
"failed to create volfile %s", filename);
return -1;
@@ -1093,7 +1110,8 @@ get_vol_nfs_transport_type (glusterd_volinfo_t *volinfo, char *tt)
{
if (volinfo->transport_type == GF_TRANSPORT_BOTH_TCP_RDMA) {
strcpy (tt, "tcp");
- gf_log ("glusterd", GF_LOG_INFO,
+ gf_msg ("glusterd", GF_LOG_INFO, 0,
+ GD_MSG_DEFAULT_OPT_INFO,
"The default transport type for tcp,rdma volume "
"is tcp if option is not defined by the user ");
} else
@@ -1200,7 +1218,9 @@ server_check_marker_off (volgen_graph_t *graph, struct volopt_map_entry *vme,
ret = glusterd_volinfo_get_boolean (volinfo, VKEY_MARKER_XTIME);
if (ret < 0) {
- gf_log ("", GF_LOG_WARNING, "failed to get the marker status");
+ gf_msg ("glusterd", GF_LOG_WARNING, 0,
+ GD_MSG_MARKER_STATUS_GET_FAIL,
+ "failed to get the marker status");
ret = -1;
goto out;
}
@@ -1210,7 +1230,9 @@ server_check_marker_off (volgen_graph_t *graph, struct volopt_map_entry *vme,
glusterd_check_geo_rep_configured (volinfo, &enabled);
if (enabled) {
- gf_log ("", GF_LOG_WARNING, GEOREP" sessions active"
+ gf_msg ("glusterd", GF_LOG_WARNING, 0,
+ GD_MSG_MARKER_DISABLE_FAIL,
+ GEOREP" sessions active"
"for the volume %s, cannot disable marker "
,volinfo->volname);
set_graph_errstr (graph,
@@ -1223,7 +1245,7 @@ server_check_marker_off (volgen_graph_t *graph, struct volopt_map_entry *vme,
ret = 0;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -1347,8 +1369,9 @@ volgen_graph_set_xl_options (volgen_graph_t *graph, dict_t *dict)
for (trav = first_of (graph); trav; trav = trav->next) {
if (fnmatch(xlator_match, trav->type, FNM_NOESCAPE) == 0) {
- gf_log ("glusterd", GF_LOG_DEBUG, "Setting log level for xlator: %s",
- trav->type);
+ gf_msg_debug ("glusterd", 0,
+ "Setting log level for xlator: %s",
+ trav->type);
ret = xlator_set_option (trav, "log-level", loglevel);
if (ret)
break;
@@ -1887,7 +1910,8 @@ brick_graph_add_ro (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (dict_get_str_boolean (set_dict, "features.read-only", 0) &&
dict_get_str_boolean (set_dict, "features.worm", 0)) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_msg (THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
"read-only and worm cannot be set together");
ret = -1;
goto out;
@@ -1918,7 +1942,8 @@ brick_graph_add_worm (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (dict_get_str_boolean (set_dict, "features.read-only", 0) &&
dict_get_str_boolean (set_dict, "features.worm", 0)) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_INCOMPATIBLE_VALUE,
"read-only and worm cannot be set together");
ret = -1;
goto out;
@@ -1996,7 +2021,8 @@ brick_graph_add_upcall (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
xl = volgen_graph_add (graph, "features/upcall", volinfo->volname);
if (!xl) {
- gf_log ("glusterd", GF_LOG_WARNING,
+ gf_msg ("glusterd", GF_LOG_WARNING, 0,
+ GD_MSG_GRAPH_FEATURE_ADD_FAIL,
"failed to add features/upcall to graph");
goto out;
}
@@ -2048,7 +2074,8 @@ brick_graph_add_server (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (dict_get_str (set_dict, SSL_CERT_DEPTH_OPT, &value) == 0) {
ret = xlator_set_option (xl, "ssl-cert-depth", value);
if (ret) {
- gf_log ("glusterd", GF_LOG_WARNING,
+ gf_msg ("glusterd", GF_LOG_WARNING, 0,
+ GD_MSG_XLATOR_SET_OPT_FAIL,
"failed to set ssl-cert-depth");
return -1;
}
@@ -2057,7 +2084,8 @@ brick_graph_add_server (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (dict_get_str (set_dict, SSL_CIPHER_LIST_OPT, &value) == 0) {
ret = xlator_set_option (xl, "ssl-cipher-list", value);
if (ret) {
- gf_log ("glusterd", GF_LOG_WARNING,
+ gf_msg ("glusterd", GF_LOG_WARNING, 0,
+ GD_MSG_XLATOR_SET_OPT_FAIL,
"failed to set ssl-cipher-list");
return -1;
}
@@ -2140,7 +2168,8 @@ brick_graph_add_pump (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (dict_get_str (set_dict, SSL_CERT_DEPTH_OPT, &value) == 0) {
ret = xlator_set_option (rbxl, "ssl-cert-depth", value);
if (ret) {
- gf_log ("glusterd", GF_LOG_WARNING,
+ gf_msg ("glusterd", GF_LOG_WARNING, errno,
+ GD_MSG_DICT_GET_FAILED,
"failed to set ssl-cert-depth");
return -1;
}
@@ -2150,7 +2179,8 @@ brick_graph_add_pump (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
ret = xlator_set_option (rbxl, "ssl-cipher-list",
value);
if (ret) {
- gf_log ("glusterd", GF_LOG_WARNING,
+ gf_msg ("glusterd", GF_LOG_WARNING, errno,
+ GD_MSG_DICT_GET_FAILED,
"failed to set ssl-cipher-list");
return -1;
}
@@ -2332,7 +2362,8 @@ server_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
ret = server_graph_table[i].builder (graph, volinfo, set_dict,
param);
if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR, "Builing graph "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_BUILD_GRAPH_FAILED, "Builing graph "
"failed for server graph table entry: %d", i);
goto out;
}
@@ -2353,7 +2384,8 @@ server_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (!ret) {
ret = dict_get_str (set_dict, "loglevel", &loglevel);
if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR, "could not get both"
+ gf_msg ("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "could not get both"
" translator name and loglevel for log level request");
goto out;
}
@@ -2439,21 +2471,23 @@ end_sethelp_xml_doc (xmlTextWriterPtr writer)
ret = xmlTextWriterEndElement(writer);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "Could not end an "
- "xmlElemetnt");
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_XML_TEXT_WRITE_FAIL, "Could not end an "
+ "xmlElement");
ret = -1;
goto out;
}
ret = xmlTextWriterEndDocument (writer);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "Could not end an "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_XML_TEXT_WRITE_FAIL, "Could not end an "
"xmlDocument");
ret = -1;
goto out;
}
ret = 0;
out:
- gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -2465,7 +2499,8 @@ init_sethelp_xml_doc (xmlTextWriterPtr *writer, xmlBufferPtr *buf)
*buf = xmlBufferCreateSize (8192);
if (buf == NULL) {
- gf_log ("glusterd", GF_LOG_ERROR, "Error creating the xml "
+ gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
+ GD_MSG_NO_MEMORY, "Error creating the xml "
"buffer");
ret = -1;
goto out;
@@ -2475,7 +2510,8 @@ init_sethelp_xml_doc (xmlTextWriterPtr *writer, xmlBufferPtr *buf)
*writer = xmlNewTextWriterMemory(*buf, 0);
if (writer == NULL) {
- gf_log ("glusterd", GF_LOG_ERROR, " Error creating the xml "
+ gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
+ GD_MSG_NO_MEMORY, " Error creating the xml "
"writer");
ret = -1;
goto out;
@@ -2483,15 +2519,17 @@ init_sethelp_xml_doc (xmlTextWriterPtr *writer, xmlBufferPtr *buf)
ret = xmlTextWriterStartDocument(*writer, "1.0", "UTF-8", "yes");
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "Error While starting the "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_XML_DOC_START_FAIL, "Error While starting the "
"xmlDoc");
goto out;
}
ret = xmlTextWriterStartElement(*writer, (xmlChar *)"options");
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "Could not create an "
- "xmlElemetnt");
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_XML_ELE_CREATE_FAIL, "Could not create an "
+ "xmlElement");
ret = -1;
goto out;
}
@@ -2500,7 +2538,7 @@ init_sethelp_xml_doc (xmlTextWriterPtr *writer, xmlBufferPtr *buf)
ret = 0;
out:
- gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -2516,7 +2554,8 @@ xml_add_volset_element (xmlTextWriterPtr writer, const char *name,
ret = xmlTextWriterStartElement(writer, (xmlChar *) "option");
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "Could not create an "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_XML_ELE_CREATE_FAIL, "Could not create an "
"xmlElemetnt");
ret = -1;
goto out;
@@ -2525,7 +2564,8 @@ xml_add_volset_element (xmlTextWriterPtr writer, const char *name,
ret = xmlTextWriterWriteFormatElement(writer, (xmlChar*)"defaultValue",
"%s", def_val);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "Could not create an "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_XML_ELE_CREATE_FAIL, "Could not create an "
"xmlElemetnt");
ret = -1;
goto out;
@@ -2534,7 +2574,8 @@ xml_add_volset_element (xmlTextWriterPtr writer, const char *name,
ret = xmlTextWriterWriteFormatElement(writer, (xmlChar *)"description",
"%s", dscrpt );
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "Could not create an "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_XML_ELE_CREATE_FAIL, "Could not create an "
"xmlElemetnt");
ret = -1;
goto out;
@@ -2543,7 +2584,8 @@ xml_add_volset_element (xmlTextWriterPtr writer, const char *name,
ret = xmlTextWriterWriteFormatElement(writer, (xmlChar *) "name", "%s",
name);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "Could not create an "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_XML_ELE_CREATE_FAIL, "Could not create an "
"xmlElemetnt");
ret = -1;
goto out;
@@ -2551,7 +2593,8 @@ xml_add_volset_element (xmlTextWriterPtr writer, const char *name,
ret = xmlTextWriterEndElement(writer);
if (ret < 0) {
- gf_log ("glusterd", GF_LOG_ERROR, "Could not end an "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_XML_ELE_CREATE_FAIL, "Could not end an "
"xmlElemetnt");
ret = -1;
goto out;
@@ -2559,7 +2602,7 @@ xml_add_volset_element (xmlTextWriterPtr writer, const char *name,
ret = 0;
out:
- gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -2602,10 +2645,11 @@ _get_xlator_opt_key_from_vme ( struct volopt_map_entry *vme, char **key)
}
}
if (ret)
- gf_log ("glusterd", GF_LOG_ERROR, "Wrong entry found in "
+ gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
+ GD_MSG_INVALID_ENTRY, "Wrong entry found in "
"glusterd_volopt_map entry %s", vme->key);
else
- gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -2702,7 +2746,8 @@ volgen_graph_build_client (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (dict_get_str (set_dict, SSL_CERT_DEPTH_OPT, &value) == 0) {
ret = xlator_set_option (xl, "ssl-cert-depth", value);
if (ret) {
- gf_log ("glusterd", GF_LOG_WARNING,
+ gf_msg ("glusterd", GF_LOG_WARNING, errno,
+ GD_MSG_DICT_GET_FAILED,
"failed to set ssl-cert-depth");
goto err;
}
@@ -2711,7 +2756,8 @@ volgen_graph_build_client (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (dict_get_str (set_dict, SSL_CIPHER_LIST_OPT, &value) == 0) {
ret = xlator_set_option (xl, "ssl-cipher-list", value);
if (ret) {
- gf_log ("glusterd", GF_LOG_WARNING,
+ gf_msg ("glusterd", GF_LOG_WARNING, errno,
+ GD_MSG_DICT_GET_FAILED,
"failed to set ssl-cipher-list");
goto err;
}
@@ -2736,7 +2782,8 @@ volgen_graph_build_clients (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
volname = volinfo->volname;
if (volinfo->brick_count == 0) {
- gf_log ("", GF_LOG_ERROR,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_VOLUME_INCONSISTENCY,
"volume inconsistency: brick count is 0");
goto out;
}
@@ -2744,7 +2791,8 @@ volgen_graph_build_clients (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if ((volinfo->type != GF_CLUSTER_TYPE_TIER) &&
(volinfo->dist_leaf_count < volinfo->brick_count) &&
((volinfo->brick_count % volinfo->dist_leaf_count) != 0)) {
- gf_log ("", GF_LOG_ERROR,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_VOLUME_INCONSISTENCY,
"volume inconsistency: "
"total number of bricks (%d) is not divisible with "
"number of bricks per cluster (%d) in a multi-cluster "
@@ -2773,7 +2821,8 @@ volgen_graph_build_clients (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
}
if (i != volinfo->brick_count) {
- gf_log ("", GF_LOG_ERROR,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_VOLUME_INCONSISTENCY,
"volume inconsistency: actual number of bricks (%d) "
"differs from brick count (%d)", i,
volinfo->brick_count);
@@ -2916,14 +2965,16 @@ volgen_graph_build_snapview_client (volgen_graph_t *graph,
**/
ret = volgen_xlator_link (graph->graph.first, prev_top);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "failed to link the "
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_XLATOR_LINK_FAIL, "failed to link the "
"snapview-client to distribute");
goto out;
}
ret = volgen_xlator_link (graph->graph.first, prot_clnt);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "failed to link the "
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_XLATOR_LINK_FAIL, "failed to link the "
"snapview-client to snapview-server");
goto out;
}
@@ -2944,14 +2995,18 @@ _xl_is_client_decommissioned (xlator_t *xl, glusterd_volinfo_t *volinfo)
ret = xlator_get_option (xl, "remote-host", &hostname);
if (ret) {
GF_ASSERT (0);
- gf_log ("glusterd", GF_LOG_ERROR, "Failed to get remote-host "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_REMOTE_HOST_GET_FAIL,
+ "Failed to get remote-host "
"from client %s", xl->name);
goto out;
}
ret = xlator_get_option (xl, "remote-subvolume", &path);
if (ret) {
GF_ASSERT (0);
- gf_log ("glusterd", GF_LOG_ERROR, "Failed to get remote-host "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_REMOTE_HOST_GET_FAIL,
+ "Failed to get remote-host "
"from client %s", xl->name);
goto out;
}
@@ -3041,7 +3096,8 @@ volgen_graph_build_dht_cluster (volgen_graph_t *graph,
/* NUFA and Switch section */
if (dict_get_str_boolean (volinfo->dict, "cluster.nufa", 0) &&
dict_get_str_boolean (volinfo->dict, "cluster.switch", 0)) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_msg (THIS->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED,
"nufa and switch cannot be set together");
ret = -1;
goto out;
@@ -3238,7 +3294,9 @@ volume_volgen_graph_build_clusters (volgen_graph_t *graph,
break;
default:
- gf_log ("", GF_LOG_ERROR, "volume inconsistency: "
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_VOLUME_INCONSISTENCY,
+ "volume inconsistency: "
"unrecognized clustering type");
goto out;
}
@@ -3293,38 +3351,50 @@ graph_set_generic_options (xlator_t *this, volgen_graph_t *graph,
&loglevel_option_handler);
if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing %s log level"
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_GRAPH_SET_OPT_FAIL,
+ "changing %s log level"
" failed", identifier);
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
&sys_loglevel_option_handler);
if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing %s syslog "
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_GRAPH_SET_OPT_FAIL,
+ "changing %s syslog "
"level failed", identifier);
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
&logger_option_handler);
if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing %s logger"
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_GRAPH_SET_OPT_FAIL,
+ "changing %s logger"
" failed", identifier);
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
&log_format_option_handler);
if (ret)
- gf_log (this->name, GF_LOG_WARNING, "changing %s log format"
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_GRAPH_SET_OPT_FAIL,
+ "changing %s log format"
" failed", identifier);
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
&log_buf_size_option_handler);
if (ret)
- gf_log (this->name, GF_LOG_WARNING, "Failed to change "
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_GRAPH_SET_OPT_FAIL,
+ "Failed to change "
"log-buf-size option");
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
&log_flush_timeout_option_handler);
if (ret)
- gf_log (this->name, GF_LOG_WARNING, "Failed to change "
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_GRAPH_SET_OPT_FAIL,
+ "Failed to change "
"log-flush-timeout option");
return 0;
}
@@ -3499,7 +3569,9 @@ client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (volinfo->is_snap_volume) {
xl = volgen_graph_add (graph, "features/read-only", volname);
if (!xl) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to add "
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_GRAPH_FEATURE_ADD_FAIL,
+ "Failed to add "
"read-only feature to the graph of %s "
"snapshot with %s origin volume",
volname, volinfo->parent_volname);
@@ -3577,7 +3649,8 @@ client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
ob = _gf_false;
ret = gf_string2boolean (tmp, &ob);
if (!ret && ob) {
- gf_log (this->name, GF_LOG_WARNING,
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_ROOT_SQUASH_ENABLED,
"root-squash is enabled. Please turn it"
" off to change read-after-open "
"option");
@@ -3646,7 +3719,9 @@ client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
ret = 0;
}
if (ret) {
- gf_log (this->name, GF_LOG_WARNING, "setting "
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_ROOT_SQUASH_FAILED,
+ "setting "
"open behind option as part of root "
"squash failed");
goto out;
@@ -3658,8 +3733,9 @@ client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
ret = dict_set_str (set_dict, "client.send-gids",
ret ? "false" : "true");
if (ret)
- gf_log (THIS->name, GF_LOG_WARNING, "changing client"
- " protocol option failed");
+ gf_msg (THIS->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_SET_FAILED, "changing client"
+ " protocol option failed");
}
ret = client_graph_set_perf_options(graph, volinfo, set_dict);
@@ -3689,7 +3765,8 @@ client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
ret = dict_get_str_boolean (set_dict, "ganesha.enable", _gf_false);
if (ret == -1) {
- gf_log (this->name, GF_LOG_WARNING, "setting ganesha.enable"
+ gf_msg (this->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_GET_FAILED, "setting ganesha.enable"
"option failed.");
goto out;
}
@@ -3698,8 +3775,10 @@ client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
xl = volgen_graph_add (graph, "features/ganesha", volname);
if (!xl) {
- gf_log (this->name, GF_LOG_ERROR, "failed to add"
- "add features/ganesha to graph");
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_GRAPH_FEATURE_ADD_FAIL,
+ "failed to add"
+ "add features/ganesha to graph");
ret = -1;
goto out;
}
@@ -4246,7 +4325,9 @@ build_nfs_graph (volgen_graph_t *graph, dict_t *mod_dict)
set_dict = dict_new ();
if (!set_dict) {
- gf_log ("", GF_LOG_ERROR, "Out of memory");
+ gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
+ GD_MSG_NO_MEMORY,
+ "Out of memory");
return -1;
}
@@ -4277,7 +4358,8 @@ build_nfs_graph (volgen_graph_t *graph, dict_t *mod_dict)
ret = gf_asprintf (&skey, "rpc-auth.addr.%s.allow",
voliter->volname);
if (ret == -1) {
- gf_log ("", GF_LOG_ERROR, "Out of memory");
+ gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
+ GD_MSG_NO_MEMORY, "Out of memory");
goto out;
}
ret = xlator_set_option (nfsxl, skey, "*");
@@ -4288,7 +4370,8 @@ build_nfs_graph (volgen_graph_t *graph, dict_t *mod_dict)
ret = gf_asprintf (&skey, "nfs3.%s.volume-id",
voliter->volname);
if (ret == -1) {
- gf_log ("", GF_LOG_ERROR, "Out of memory");
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_NO_MEMORY, "Out of memory");
goto out;
}
ret = xlator_set_option (nfsxl, skey, uuid_utoa (voliter->volume_id));
@@ -4376,12 +4459,13 @@ build_nfs_graph (volgen_graph_t *graph, dict_t *mod_dict)
}
if (ret)
- gf_log ("glusterd", GF_LOG_WARNING, "Could not set "
+ gf_msg ("glusterd", GF_LOG_WARNING, 0,
+ GD_MSG_GRAPH_SET_OPT_FAIL, "Could not set "
"vol-options for the volume %s", voliter->volname);
}
out:
- gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
dict_destroy (set_dict);
return ret;
@@ -4427,14 +4511,18 @@ glusterd_is_valid_volfpath (char *volname, char *brick)
ret = glusterd_brickinfo_new_from_brick (brick, &brickinfo);
if (ret) {
- gf_log (this->name, GF_LOG_WARNING, "Failed to create brickinfo"
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_BRICKINFO_CREATE_FAIL,
+ "Failed to create brickinfo"
" for brick %s", brick );
ret = 0;
goto out;
}
ret = glusterd_volinfo_new (&volinfo);
if (ret) {
- gf_log (this->name, GF_LOG_WARNING, "Failed to create volinfo");
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_VOLINFO_STORE_FAIL,
+ "Failed to create volinfo");
ret = 0;
goto out;
}
@@ -4522,7 +4610,8 @@ build_quotad_graph (volgen_graph_t *graph, dict_t *mod_dict)
ret = gf_asprintf(&skey, "%s.volume-id", voliter->volname);
if (ret == -1) {
- gf_log("", GF_LOG_ERROR, "Out of memory");
+ gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
+ GD_MSG_NO_MEMORY, "Out of memory");
goto out;
}
ret = xlator_set_option(quotad_xl, skey, voliter->volname);
@@ -4627,13 +4716,15 @@ generate_brick_volfiles (glusterd_volinfo_t *volinfo)
if (ret) {
ret = open (tstamp_file, O_WRONLY|O_CREAT|O_EXCL, 0600);
if (ret == -1 && errno == EEXIST) {
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"timestamp file exist");
ret = -2;
}
if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR, "failed to create "
- "%s (%s)", tstamp_file, strerror (errno));
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_FILE_OP_FAILED,
+ "failed to create "
+ "%s", tstamp_file);
return -1;
}
if (ret >= 0) {
@@ -4649,7 +4740,8 @@ generate_brick_volfiles (glusterd_volinfo_t *volinfo)
ret = gf_set_timestamp (parent_tstamp_file,
tstamp_file);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_TSTAMP_SET_FAIL,
"Unable to set atime and mtime"
" of %s as of %s", tstamp_file,
parent_tstamp_file);
@@ -4662,14 +4754,16 @@ generate_brick_volfiles (glusterd_volinfo_t *volinfo)
if (ret == -1 && errno == ENOENT)
ret = 0;
if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR, "failed to unlink "
- "%s (%s)", tstamp_file, strerror (errno));
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_FILE_OP_FAILED,
+ "failed to unlink "
+ "%s", tstamp_file);
return -1;
}
}
cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"Found a brick - %s:%s", brickinfo->hostname,
brickinfo->path);
@@ -4682,7 +4776,7 @@ generate_brick_volfiles (glusterd_volinfo_t *volinfo)
ret = 0;
out:
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug (this->name, 0, "Returning %d", ret);
return ret;
}
@@ -4758,7 +4852,8 @@ generate_client_volfiles (glusterd_volinfo_t *volinfo,
type);
}
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INVALID_ENTRY,
"Received invalid transport-type");
goto out;
}
@@ -4773,7 +4868,8 @@ generate_client_volfiles (glusterd_volinfo_t *volinfo,
/* Generate volfile for rebalance process */
ret = dict_set_int32 (dict, "rebalance-volfile-creation", _gf_true);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED,
"Failed to set rebalance-volfile-creation");
goto out;
}
@@ -4784,7 +4880,8 @@ generate_client_volfiles (glusterd_volinfo_t *volinfo,
filepath,
dict);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL,
"Failed to create rebalance volfile for %s",
volinfo->volname);
goto out;
@@ -4794,7 +4891,7 @@ out:
if (dict)
dict_unref (dict);
- gf_log ("", GF_LOG_TRACE, "Returning %d", ret);
+ gf_msg_trace ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -4820,7 +4917,8 @@ glusterd_snapdsvc_generate_volfile (volgen_graph_t *graph,
if (!ret) {
ret = dict_get_str (set_dict, "loglevel", &loglevel);
if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR, "could not get both"
+ gf_msg ("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_DICT_GET_FAILED, "could not get both"
" translator name and loglevel for log level "
"request");
return -1;
@@ -4857,7 +4955,8 @@ glusterd_snapdsvc_generate_volfile (volgen_graph_t *graph,
if (dict_get_str (set_dict, SSL_CERT_DEPTH_OPT, &value) == 0) {
ret = xlator_set_option (xl, "ssl-cert-depth", value);
if (ret) {
- gf_log ("glusterd", GF_LOG_WARNING,
+ gf_msg ("glusterd", GF_LOG_WARNING, 0,
+ GD_MSG_XLATOR_SET_OPT_FAIL,
"failed to set ssl-cert-depth");
return -1;
}
@@ -4866,7 +4965,8 @@ glusterd_snapdsvc_generate_volfile (volgen_graph_t *graph,
if (dict_get_str (set_dict, SSL_CIPHER_LIST_OPT, &value) == 0) {
ret = xlator_set_option (xl, "ssl-cipher-list", value);
if (ret) {
- gf_log ("glusterd", GF_LOG_WARNING,
+ gf_msg ("glusterd", GF_LOG_WARNING, 0,
+ GD_MSG_XLATOR_SET_OPT_FAIL,
"failed to set ssl-cipher-list");
return -1;
}
@@ -5268,21 +5368,24 @@ glusterd_create_volfiles (glusterd_volinfo_t *volinfo)
ret = generate_brick_volfiles (volinfo);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL,
"Could not generate volfiles for bricks");
goto out;
}
ret = generate_client_volfiles (volinfo, GF_CLIENT_TRUSTED);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL,
"Could not generate trusted client volfiles");
goto out;
}
ret = generate_client_volfiles (volinfo, GF_CLIENT_OTHER);
if (ret)
- gf_log (this->name, GF_LOG_ERROR,
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL,
"Could not generate client volfiles");
out:
@@ -5337,8 +5440,10 @@ glusterd_delete_volfile (glusterd_volinfo_t *volinfo,
get_brick_filepath (filename, volinfo, brickinfo);
ret = unlink (filename);
if (ret)
- gf_log ("glusterd", GF_LOG_ERROR, "failed to delete file: %s, "
- "reason: %s", filename, strerror (errno));
+ gf_msg ("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_FILE_OP_FAILED,
+ "failed to delete file: %s",
+ filename);
return ret;
}
@@ -5365,7 +5470,7 @@ validate_shdopts (glusterd_volinfo_t *volinfo,
volgen_graph_free (&graph);
- gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
out:
dict_del (val_dict, "graph-check");
return ret;
@@ -5394,7 +5499,8 @@ validate_nfsopts (glusterd_volinfo_t *volinfo,
snprintf (err_str, sizeof (err_str), "Changing nfs "
"transport type is allowed only for volumes "
"of transport type tcp,rdma");
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_OP_UNSUPPORTED, "%s", err_str);
*op_errstr = gf_strdup (err_str);
ret = -1;
goto out;
@@ -5410,7 +5516,8 @@ validate_nfsopts (glusterd_volinfo_t *volinfo,
ret = dict_set_str (val_dict, "volume-name", volinfo->volname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set volume name");
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "Failed to set volume name");
goto out;
}
@@ -5423,7 +5530,7 @@ validate_nfsopts (glusterd_volinfo_t *volinfo,
out:
if (dict_get (val_dict, "volume-name"))
dict_del (val_dict, "volume-name");
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug (this->name, 0, "Returning %d", ret);
return ret;
}
@@ -5446,7 +5553,7 @@ validate_clientopts (glusterd_volinfo_t *volinfo,
volgen_graph_free (&graph);
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -5469,7 +5576,7 @@ validate_brickopts (glusterd_volinfo_t *volinfo,
volgen_graph_free (&graph);
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -5482,7 +5589,7 @@ glusterd_validate_brickreconf (glusterd_volinfo_t *volinfo,
int ret = -1;
cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- gf_log ("", GF_LOG_DEBUG,
+ gf_msg_debug ("glusterd", 0,
"Validating %s", brickinfo->hostname);
ret = validate_brickopts (volinfo, brickinfo, val_dict,
@@ -5525,32 +5632,32 @@ glusterd_validate_globalopts (glusterd_volinfo_t *volinfo,
ret = glusterd_validate_brickreconf (volinfo, val_dict, op_errstr);
if (ret) {
- gf_log ("", GF_LOG_DEBUG,
+ gf_msg_debug ("glusterd", 0,
"Could not Validate bricks");
goto out;
}
ret = validate_clientopts (volinfo, val_dict, op_errstr);
if (ret) {
- gf_log ("", GF_LOG_DEBUG,
+ gf_msg_debug ("glusterd", 0,
"Could not Validate client");
goto out;
}
ret = validate_nfsopts (volinfo, val_dict, op_errstr);
if (ret) {
- gf_log ("", GF_LOG_DEBUG, "Could not Validate nfs");
+ gf_msg_debug ("glusterd", 0, "Could not Validate nfs");
goto out;
}
ret = validate_shdopts (volinfo, val_dict, op_errstr);
if (ret) {
- gf_log ("", GF_LOG_DEBUG, "Could not Validate self-heald");
+ gf_msg_debug ("glusterd", 0, "Could not Validate self-heald");
goto out;
}
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -5582,34 +5689,34 @@ glusterd_validate_reconfopts (glusterd_volinfo_t *volinfo, dict_t *val_dict,
ret = glusterd_validate_brickreconf (volinfo, val_dict, op_errstr);
if (ret) {
- gf_log ("", GF_LOG_DEBUG,
+ gf_msg_debug ("glusterd", 0,
"Could not Validate bricks");
goto out;
}
ret = validate_clientopts (volinfo, val_dict, op_errstr);
if (ret) {
- gf_log ("", GF_LOG_DEBUG,
+ gf_msg_debug ("glusterd", 0,
"Could not Validate client");
goto out;
}
ret = validate_nfsopts (volinfo, val_dict, op_errstr);
if (ret) {
- gf_log ("", GF_LOG_DEBUG, "Could not Validate nfs");
+ gf_msg_debug ("glusterd", 0, "Could not Validate nfs");
goto out;
}
ret = validate_shdopts (volinfo, val_dict, op_errstr);
if (ret) {
- gf_log ("", GF_LOG_DEBUG, "Could not Validate self-heald");
+ gf_msg_debug ("glusterd", 0, "Could not Validate self-heald");
goto out;
}
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 65043736ea5..26650934026 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -162,7 +162,8 @@ glusterd_check_brick_order(dict_t *dict, char *err_str)
" not retrieve disperse count");
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Disperse cluster type"
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_DISPERSE_CLUSTER_FOUND, "Disperse cluster type"
" found. Checking brick order.");
}
@@ -777,7 +778,8 @@ __glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received heal vol req "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_HEAL_VOL_REQ_RCVD, "Received heal vol req "
"for volume %s", volname);
ret = glusterd_volinfo_find (volname, &volinfo);
@@ -912,7 +914,8 @@ __glusterd_handle_cli_statedump_volume (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_INFO, "Received statedump request for "
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_STATEDUMP_VOL_REQ_RCVD, "Received statedump request for "
"volume %s with options %s", volname, options);
ret = glusterd_op_begin_synctask (req, GD_OP_STATEDUMP_VOLUME, dict);
@@ -1013,7 +1016,9 @@ next:
continue;
if (!strcmp (prop.value.string, "thin-pool")) {
brick->caps |= CAPS_THIN;
- gf_log (THIS->name, GF_LOG_INFO, "Thin Pool "
+ gf_msg (THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_THINPOOLS_FOR_THINLVS,
+ "Thin Pool "
"\"%s\" will be used for thin LVs",
lvm_lv_get_name (lv_list->lv));
break;
@@ -1254,7 +1259,8 @@ out:
glusterd_brickinfo_delete (brick_info);
if (msg[0] != '\0') {
- gf_log (this->name, GF_LOG_ERROR, "%s", msg);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_OP_STAGE_CREATE_VOL_FAIL, "%s", msg);
*op_errstr = gf_strdup (msg);
}
gf_msg_debug (this->name, 0, "Returning %d", ret);
@@ -1502,7 +1508,8 @@ glusterd_op_stage_start_volume (dict_t *dict, char **op_errstr,
ret = 0;
out:
if (ret && (msg[0] != '\0')) {
- gf_log (this->name, GF_LOG_ERROR, "%s", msg);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_OP_STAGE_START_VOL_FAIL, "%s", msg);
*op_errstr = gf_strdup (msg);
}
return ret;
@@ -1573,7 +1580,8 @@ glusterd_op_stage_stop_volume (dict_t *dict, char **op_errstr)
if (ret) {
ret = ganesha_manage_export(dict, "off", op_errstr);
if (ret) {
- gf_log (THIS->name, GF_LOG_WARNING, "Could not "
+ gf_msg (THIS->name, GF_LOG_WARNING, 0,
+ GD_MSG_NFS_GNS_UNEXPRT_VOL_FAIL, "Could not "
"unexport volume via NFS-Ganesha");
ret = 0;
}
@@ -1658,7 +1666,8 @@ glusterd_op_stage_delete_volume (dict_t *dict, char **op_errstr)
out:
if (msg[0] != '\0') {
- gf_log (this->name, GF_LOG_ERROR, "%s", msg);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_OP_STAGE_DELETE_VOL_FAIL, "%s", msg);
*op_errstr = gf_strdup (msg);
}
gf_msg_debug (this->name, 0, "Returning %d", ret);
@@ -1743,7 +1752,8 @@ glusterd_handle_heal_cmd (xlator_t *this, glusterd_volinfo_t *volinfo,
}
out:
if (ret)
- gf_log (this->name, GF_LOG_WARNING, "%s", *op_errstr);
+ gf_msg (this->name, GF_LOG_WARNING, 0,
+ GD_MSG_HANDLE_HEAL_CMD_FAIL, "%s", *op_errstr);
return ret;
}
@@ -2904,7 +2914,9 @@ glusterd_op_clearlocks_volume (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (ret)
ret = 0;
- gf_log (THIS->name, GF_LOG_INFO, "Received clear-locks request for "
+ gf_msg (THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_CLRCLK_VOL_REQ_RCVD,
+ "Received clear-locks request for "
"volume %s with kind %s type %s and options %s", volname,
kind, type, opts);
@@ -2961,7 +2973,8 @@ glusterd_op_clearlocks_volume (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = glusterd_clearlocks_send_cmd (volinfo, cmd_str, path, result,
msg, sizeof (msg), mntpt);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLRCLK_SND_CMD_FAIL, "%s", msg);
goto umount;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 78220ef1db7..22abeefe32e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -27,7 +27,8 @@ validate_tier (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
snprintf (errstr, sizeof (errstr), "Volume %s is not a tier "
"volume. Option %s is only valid for tier volume.",
volinfo->volname, key);
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INCOMPATIBLE_VALUE, "%s", errstr);
*op_errstr = gf_strdup (errstr);
ret = -1;
goto out;
@@ -44,7 +45,8 @@ validate_tier (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
snprintf (errstr, sizeof (errstr), "%s is not a compatible "
"value. %s expects an integer value.",
value, key);
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INCOMPATIBLE_VALUE, "%s", errstr);
*op_errstr = gf_strdup (errstr);
ret = -1;
goto out;
@@ -57,7 +59,8 @@ validate_tier (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
"compatible value. %s expects a positive "
"integer value.",
value, key);
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INCOMPATIBLE_VALUE, "%s", errstr);
*op_errstr = gf_strdup (errstr);
ret = -1;
goto out;
@@ -68,7 +71,8 @@ validate_tier (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
"compatible value. %s expects a non-negative"
" integer value.",
value, key);
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INCOMPATIBLE_VALUE, "%s", errstr);
*op_errstr = gf_strdup (errstr);
ret = -1;
goto out;
@@ -76,7 +80,7 @@ validate_tier (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
}
out:
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug (this->name, 0, "Returning %d", ret);
return ret;
}
@@ -159,11 +163,12 @@ validate_defrag_throttle_option (glusterd_volinfo_t *volinfo, dict_t *dict,
ret = -1;
snprintf (errstr, sizeof (errstr), "%s should be "
"{lazy|normal|aggressive}", key);
- gf_log (this->name, GF_LOG_ERROR, "%s", errstr);
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INVALID_ENTRY, "%s", errstr);
*op_errstr = gf_strdup (errstr);
}
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_msg_debug (this->name, 0, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index 9b86f9bdb94..bdd141d24ae 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -148,7 +148,8 @@ glusterd_uuid_init ()
ret = glusterd_retrieve_uuid ();
if (ret == 0) {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_RETRIEVED_UUID,
"retrieved UUID: %s", uuid_utoa (priv->uuid));
return 0;
}
@@ -179,7 +180,8 @@ glusterd_uuid_generate_save ()
gf_uuid_generate (priv->uuid);
- gf_log (this->name, GF_LOG_INFO, "generated UUID: %s",
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_GENERATED_UUID, "generated UUID: %s",
uuid_utoa (priv->uuid));
ret = glusterd_store_global_info (this);
@@ -391,7 +393,7 @@ glusterd_rpcsvc_options_build (dict_t *options)
goto out;
}
- gf_msg_debug ("", 0, "listen-backlog value: %d", backlog);
+ gf_msg_debug ("glusterd", 0, "listen-backlog value: %d", backlog);
out:
return ret;
@@ -474,7 +476,7 @@ group_write_allow (char *path, gid_t gid)
out:
if (ret == -1)
- gf_msg ("", GF_LOG_CRITICAL, errno,
+ gf_msg ("glusterd", GF_LOG_CRITICAL, errno,
GD_MSG_WRITE_ACCESS_GRANT_FAIL,
"failed to set up write access to %s for group %d (%s)",
path, gid, strerror (errno));
@@ -586,7 +588,7 @@ glusterd_crt_georep_folders (char *georepdir, glusterd_conf_t *conf)
}
out:
- gf_msg_debug ("", 0, "Returning %d", ret);
+ gf_msg_debug ("glusterd", 0, "Returning %d", ret);
return ret;
}
@@ -838,7 +840,7 @@ check_prepare_mountbroker_root (char *mountbroker_root)
ret = fstat (dfd, &st);
}
if (ret == -1 || !S_ISDIR (st.st_mode)) {
- gf_msg ("", GF_LOG_ERROR, errno,
+ gf_msg ("glusterd", GF_LOG_ERROR, errno,
GD_MSG_DIR_OP_FAILED,
"cannot access mountbroker-root directory %s",
mountbroker_root);
@@ -847,7 +849,7 @@ check_prepare_mountbroker_root (char *mountbroker_root)
}
if (st.st_uid != 0 ||
(st.st_mode & (S_IWGRP|S_IWOTH))) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DIR_PERM_LIBERAL,
"permissions on mountbroker-root directory %s are "
"too liberal", mountbroker_root);
@@ -855,7 +857,7 @@ check_prepare_mountbroker_root (char *mountbroker_root)
goto out;
}
if (!(st.st_mode & (S_IXGRP|S_IXOTH))) {
- gf_msg ("", GF_LOG_WARNING, 0,
+ gf_msg ("glusterd", GF_LOG_WARNING, 0,
GD_MSG_DIR_PERM_STRICT,
"permissions on mountbroker-root directory %s are "
"probably too strict", mountbroker_root);
@@ -870,7 +872,7 @@ check_prepare_mountbroker_root (char *mountbroker_root)
ret = fstat (dfd2, &st2);
}
if (ret == -1) {
- gf_msg ("", GF_LOG_ERROR, errno,
+ gf_msg ("glusterd", GF_LOG_ERROR, errno,
GD_MSG_DIR_OP_FAILED,
"error while checking mountbroker-root ancestors "
"%d (%s)", errno, strerror (errno));
@@ -883,7 +885,7 @@ check_prepare_mountbroker_root (char *mountbroker_root)
if (st2.st_uid != 0 ||
((st2.st_mode & (S_IWGRP|S_IWOTH)) &&
!(st2.st_mode & S_ISVTX))) {
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DIR_PERM_LIBERAL,
"permissions on ancestors of mountbroker-root "
"directory are too liberal");
@@ -891,7 +893,7 @@ check_prepare_mountbroker_root (char *mountbroker_root)
goto out;
}
if (!(st.st_mode & (S_IXGRP|S_IXOTH))) {
- gf_msg ("", GF_LOG_WARNING, 0,
+ gf_msg ("glusterd", GF_LOG_WARNING, 0,
GD_MSG_DIR_PERM_STRICT,
"permissions on ancestors of mountbroker-root "
"directory are probably too strict");
@@ -908,7 +910,7 @@ check_prepare_mountbroker_root (char *mountbroker_root)
if (ret != -1)
ret = sys_fstatat (dfd0, MB_HIVE, &st, AT_SYMLINK_NOFOLLOW);
if (ret == -1 || st.st_mode != (S_IFDIR|0711)) {
- gf_msg ("", GF_LOG_ERROR, errno,
+ gf_msg ("glusterd", GF_LOG_ERROR, errno,
GD_MSG_CREATE_DIR_FAILED,
"failed to set up mountbroker-root directory %s",
mountbroker_root);
@@ -1000,7 +1002,7 @@ _install_mount_spec (dict_t *opts, char *key, data_t *value, void *data)
return 0;
err:
- gf_msg ("", GF_LOG_ERROR, 0,
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_MOUNT_SPEC_INSTALL_FAIL,
"adding %smount spec failed: label: %s desc: %s",
georep ? GEOREP" " : "", label, pdesc);
@@ -1155,7 +1157,7 @@ glusterd_stop_listener (xlator_t *this)
conf = this->private;
GF_VALIDATE_OR_GOTO (this->name, conf, out);
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_msg_debug (this->name, 0,
"%s function called ", __func__);
for (i = 0; i < gd_inet_programs_count; i++) {
@@ -1307,19 +1309,21 @@ glusterd_svc_init_all ()
/* Init BitD svc */
ret = glusterd_bitdsvc_init (&(priv->bitd_svc));
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to initialized BitD "
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_BITD_INIT_FAIL, "Failed to initialized BitD "
"service");
goto out;
}
- gf_log (THIS->name, GF_LOG_DEBUG, "BitD service initialized");
+ gf_msg_debug (THIS->name, 0, "BitD service initialized");
ret = glusterd_scrubsvc_init (&(priv->scrub_svc));
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to initialized scrub "
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_SCRUB_INIT_FAIL, "Failed to initialized scrub "
"service");
goto out;
}
- gf_log (THIS->name, GF_LOG_DEBUG, "scrub service initialized");
+ gf_msg_debug (THIS->name, 0, "scrub service initialized");
out:
return ret;
@@ -1361,9 +1365,10 @@ init (xlator_t *this)
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_SETXATTR_FAIL,
"Failed to set 'ulimit -n "
- " 65536': %s", strerror(errno));
+ " 65536'");
} else {
- gf_log (this->name, GF_LOG_INFO,
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_FILE_DESC_LIMIT_SET,
"Maximum allowed open file descriptors "
"set to 65536");
}
@@ -1412,12 +1417,14 @@ init (xlator_t *this)
}
setenv ("GLUSTERD_WORKDIR", workdir, 1);
- gf_log (this->name, GF_LOG_INFO, "Using %s as working directory",
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_CURR_WORK_DIR_INFO, "Using %s as working directory",
workdir);
ret = glusterd_find_correct_var_run_dir (this, var_run_dir);
if (ret) {
- gf_log (this->name, GF_LOG_CRITICAL, "Unable to find "
+ gf_msg (this->name, GF_LOG_CRITICAL, 0,
+ GD_MSG_VAR_RUN_DIR_FIND_FAIL, "Unable to find "
"the correct var run dir");
exit (1);
}
@@ -1437,7 +1444,8 @@ init (xlator_t *this)
ret = glusterd_init_var_run_dirs (this, var_run_dir,
GLUSTER_SHARED_STORAGE_BRICK_DIR);
if (ret) {
- gf_log (this->name, GF_LOG_CRITICAL, "Unable to create "
+ gf_msg (this->name, GF_LOG_CRITICAL, 0,
+ GD_MSG_VAR_RUN_DIR_INIT_FAIL, "Unable to create "
"shared storage brick");
exit (1);
}
@@ -1515,18 +1523,20 @@ init (xlator_t *this)
snprintf (storedir, PATH_MAX, "%s/bitd", workdir);
ret = mkdir (storedir, 0777);
if ((-1 == ret) && (errno != EEXIST)) {
- gf_log (this->name, GF_LOG_CRITICAL,
- "Unable to create bitrot directory %s"
- " ,errno = %d", storedir, errno);
+ gf_msg (this->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_CREATE_DIR_FAILED,
+ "Unable to create bitrot directory %s",
+ storedir);
exit (1);
}
snprintf (storedir, PATH_MAX, "%s/scrub", workdir);
ret = mkdir (storedir, 0777);
if ((-1 == ret) && (errno != EEXIST)) {
- gf_log (this->name, GF_LOG_CRITICAL,
- "Unable to create scrub directory %s"
- " ,errno = %d", storedir, errno);
+ gf_msg (this->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_CREATE_DIR_FAILED,
+ "Unable to create scrub directory %s",
+ storedir);
exit (1);
}