summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt
diff options
context:
space:
mode:
authorAvra Sengupta <asengupt@redhat.com>2014-03-27 00:50:32 +0000
committerRajesh Joseph <rjoseph@redhat.com>2014-03-27 09:25:56 -0700
commit42c98f4f1180e8cf91068a70646ca94a70400f2a (patch)
tree29850c7d81186e08954e6087f52f46c02d86c543 /xlators/mgmt
parent3b2408904dbbfb7987a4792fa71ba3fe61f18398 (diff)
Review Comments
Change-Id: Ifce90b0b617bc0b43a9af0bd692a7290820ac62c Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/7358 Reviewed-by: Rajesh Joseph <rjoseph@redhat.com> Tested-by: Rajesh Joseph <rjoseph@redhat.com>
Diffstat (limited to 'xlators/mgmt')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c29
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c29
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c459
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c7
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c2
7 files changed, 345 insertions, 189 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index 98f2a0d7e..fee627f52 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -1113,12 +1113,6 @@ glusterd_set_clnt_mgmt_program (glusterd_peerinfo_t *peerinfo,
ret = 0;
}
- if ((gd_mgmt_v3_prog.prognum == trav->prognum) &&
- (gd_mgmt_v3_prog.progver == trav->progver)) {
- peerinfo->mgmt_v3 = &gd_mgmt_v3_prog;
- ret = 0;
- }
-
if (ret) {
gf_log ("", GF_LOG_DEBUG,
"%s (%"PRId64":%"PRId64") not supported",
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
index 36da3273f..531e0ba6c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -28,7 +28,7 @@
#define MAX_LOCKING_ENTITIES 2
-/* Valid entities that the mgt_v3 lock can hold locks upon *
+/* Valid entities that the mgmt_v3 lock can hold locks upon *
* To add newer entities to be locked, we can just add more *
* entries to this table along with the type and default value */
valid_entities valid_types[] = {
@@ -128,12 +128,11 @@ glusterd_release_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,
int32_t ret = -1;
xlator_t *this = NULL;
- GF_ASSERT(THIS);
+ this = THIS;
+ GF_ASSERT(this);
GF_ASSERT (dict);
GF_ASSERT (type);
- this = THIS;
-
if (locked_count == 0) {
gf_log (this->name, GF_LOG_DEBUG,
"No %s locked as part of this transaction",
@@ -186,12 +185,11 @@ glusterd_acquire_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,
int32_t locked_count = 0;
xlator_t *this = NULL;
- GF_ASSERT(THIS);
+ this = THIS;
+ GF_ASSERT(this);
GF_ASSERT (dict);
GF_ASSERT (type);
- this = THIS;
-
/* Locking one element after other */
for (i = 0; i < count; i++) {
snprintf (name_buf, sizeof(name_buf),
@@ -255,12 +253,11 @@ glusterd_mgmt_v3_unlock_entity (dict_t *dict, uuid_t uuid, char *type,
gf_boolean_t hold_locks = _gf_false;
xlator_t *this = NULL;
- GF_ASSERT(THIS);
+ this = THIS;
+ GF_ASSERT(this);
GF_ASSERT (dict);
GF_ASSERT (type);
- this = THIS;
-
snprintf (name_buf, sizeof(name_buf), "hold_%s_locks", type);
hold_locks = dict_get_str_boolean (dict, name_buf, default_value);
@@ -328,12 +325,11 @@ glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, char *type,
gf_boolean_t hold_locks = _gf_false;
xlator_t *this = NULL;
- GF_ASSERT(THIS);
+ this = THIS;
+ GF_ASSERT(this);
GF_ASSERT (dict);
GF_ASSERT (type);
- this = THIS;
-
snprintf (name_buf, sizeof(name_buf), "hold_%s_locks", type);
hold_locks = dict_get_str_boolean (dict, name_buf, default_value);
@@ -395,8 +391,8 @@ glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid)
int32_t op_ret = 0;
xlator_t *this = NULL;
- GF_ASSERT(THIS);
this = THIS;
+ GF_ASSERT(this);
if (!dict) {
gf_log (this->name, GF_LOG_ERROR, "dict is null.");
@@ -433,8 +429,8 @@ glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid)
int32_t locked_count = 0;
xlator_t *this = NULL;
- GF_ASSERT(THIS);
this = THIS;
+ GF_ASSERT(this);
if (!dict) {
gf_log (this->name, GF_LOG_ERROR, "dict is null.");
@@ -449,7 +445,8 @@ glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid)
valid_types[i].type,
valid_types[i].default_value);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to lock all %s",
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to lock all %s",
valid_types[i].type);
break;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
index a2546ca94..a5f38ce9c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
@@ -100,8 +100,6 @@ glusterd_op_state_machine_mgmt_v3_lock (rpcsvc_request_t *req,
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to set transaction's opinfo");
- if (ctx->dict)
- dict_unref (ctx->dict);
goto out;
}
@@ -188,11 +186,13 @@ out:
if (ret) {
if (ctx->dict)
- dict_destroy (ctx->dict);
+ dict_unref (ctx->dict);
if (ctx)
GF_FREE (ctx);
}
+ free (lock_req.dict.dict_val);
+
gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
@@ -254,7 +254,8 @@ glusterd_handle_pre_validate_fn (rpcsvc_request_t *req)
ret = xdr_to_generic (req->msg[0], &op_req,
(xdrproc_t)xdr_gd1_mgmt_v3_pre_val_req);
if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to decode pre validation "
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to decode pre validation "
"request received from peer");
req->rpc_err = GARBAGE_ARGS;
goto out;
@@ -294,7 +295,6 @@ glusterd_handle_pre_validate_fn (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_ERROR,
"Pre Validation failed on operation %s",
gd_op_list[op_req.op]);
- goto out;
}
ret = glusterd_mgmt_v3_pre_validate_send_resp (req, op_req.op,
@@ -314,6 +314,9 @@ out:
free (op_req.dict.dict_val);
+ if (dict)
+ dict_unref (dict);
+
if (rsp_dict)
dict_unref (rsp_dict);
@@ -418,7 +421,6 @@ glusterd_handle_brick_op_fn (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_ERROR,
"Brick Op failed on operation %s",
gd_op_list[op_req.op]);
- goto out;
}
ret = glusterd_mgmt_v3_brick_op_send_resp (req, op_req.op,
@@ -438,6 +440,9 @@ out:
free (op_req.dict.dict_val);
+ if (dict)
+ dict_unref (dict);
+
if (rsp_dict)
dict_unref (rsp_dict);
@@ -541,7 +546,6 @@ glusterd_handle_commit_fn (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_ERROR,
"commit failed on operation %s",
gd_op_list[op_req.op]);
- goto out;
}
ret = glusterd_mgmt_v3_commit_send_resp (req, op_req.op,
@@ -561,6 +565,9 @@ out:
free (op_req.dict.dict_val);
+ if (dict)
+ dict_unref (dict);
+
if (rsp_dict)
dict_unref (rsp_dict);
@@ -666,7 +673,6 @@ glusterd_handle_post_validate_fn (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_ERROR,
"Post Validation failed on operation %s",
gd_op_list[op_req.op]);
- goto out;
}
ret = glusterd_mgmt_v3_post_validate_send_resp (req, op_req.op,
@@ -686,6 +692,9 @@ out:
free (op_req.dict.dict_val);
+ if (dict)
+ dict_unref (dict);
+
if (rsp_dict)
dict_unref (rsp_dict);
@@ -842,11 +851,13 @@ out:
if (ret) {
if (ctx->dict)
- dict_destroy (ctx->dict);
+ dict_unref (ctx->dict);
if (ctx)
GF_FREE (ctx);
}
+ free (lock_req.dict.dict_val);
+
gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index e24faa0be..1e5353591 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -25,13 +25,20 @@ extern struct rpc_clnt_program gd_mgmt_v3_prog;
static void
gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
- char *op_errstr, int op_code,
- glusterd_peerinfo_t *peerinfo, u_char *uuid)
+ char *op_errstr, int op_code,
+ glusterd_peerinfo_t *peerinfo, u_char *uuid)
{
- char err_str[PATH_MAX] = "Please check log file for details.";
- char op_err[PATH_MAX] = "";
- int len = -1;
- char *peer_str = NULL;
+ char *peer_str = NULL;
+ char err_str[PATH_MAX] = "Please check log file for details.";
+ char op_err[PATH_MAX] = "";
+ int32_t len = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (args);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (uuid);
if (op_ret) {
args->op_ret = op_ret;
@@ -105,7 +112,7 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
"%s", op_err);
err_str[len] = '\0';
- gf_log ("", GF_LOG_ERROR, "%s", op_err);
+ gf_log (this->name, GF_LOG_ERROR, "%s", op_err);
args->errstr = gf_strdup (err_str);
}
@@ -114,32 +121,52 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
int32_t
gd_mgmt_v3_pre_validate_fn (glusterd_op_t op, dict_t *dict,
- char **op_errstr, dict_t *rsp_dict)
+ char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
- xlator_t *this = THIS;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
switch (op) {
case GD_OP_SNAP:
{
ret = glusterd_snapshot_prevalidate (dict, op_errstr,
rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Snapshot Prevalidate Failed");
+ goto out;
+ }
+
break;
}
default:
break;
}
+ ret = 0;
+out:
gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
return ret;
}
int32_t
gd_mgmt_v3_brick_op_fn (glusterd_op_t op, dict_t *dict,
- char **op_errstr, dict_t *rsp_dict)
+ char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
- xlator_t *this = THIS;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
switch (op) {
case GD_OP_SNAP:
@@ -155,6 +182,7 @@ gd_mgmt_v3_brick_op_fn (glusterd_op_t op, dict_t *dict,
default:
break;
}
+
ret = 0;
out:
gf_log (this->name, GF_LOG_TRACE, "OP = %d. Returning %d", op, ret);
@@ -163,24 +191,33 @@ out:
int32_t
gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict,
- char **op_errstr, dict_t *rsp_dict)
+ char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
- xlator_t *this = THIS;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
switch (op) {
case GD_OP_SNAP:
{
ret = glusterd_snapshot (dict, op_errstr, rsp_dict);
- if (ret)
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Snapshot Commit Failed");
goto out;
+ }
break;
}
default:
break;
}
- ret = 0;
+ ret = 0;
out:
gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
return ret;
@@ -188,12 +225,16 @@ out:
int32_t
gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict,
- char **op_errstr, dict_t *rsp_dict)
+ char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
- xlator_t *this = THIS;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ this = THIS;
GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
switch (op) {
case GD_OP_SNAP:
@@ -223,16 +264,19 @@ int32_t
gd_mgmt_v3_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
gd1_mgmt_v3_lock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
- GF_ASSERT(req);
- GF_ASSERT(myframe);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
/* Even though the lock command has failed, while collating the errors
(gd_mgmt_v3_collate_errors), args->op_ret and args->op_errno will be
@@ -254,7 +298,7 @@ gd_mgmt_v3_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
if (!iov) {
- gf_log (THIS->name, GF_LOG_ERROR, "iov is NULL");
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
op_errno = EINVAL;
goto out;
}
@@ -273,6 +317,7 @@ out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
GLUSTERD_MGMT_V3_LOCK,
peerinfo, rsp.uuid);
+ free (rsp.dict.dict_val);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
@@ -292,13 +337,16 @@ gd_mgmt_v3_lock (glusterd_op_t op, dict_t *op_ctx,
struct syncargs *args, uuid_t my_uuid,
uuid_t recv_uuid)
{
- int ret = -1;
gd1_mgmt_v3_lock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -316,31 +364,38 @@ gd_mgmt_v3_lock (glusterd_op_t op, dict_t *op_ctx,
(xdrproc_t) xdr_gd1_mgmt_v3_lock_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_TRACE, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int
glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *dict, char **op_errstr, int npeers,
- gf_boolean_t *is_acquired)
+ dict_t *dict, char **op_errstr, int npeers,
+ gf_boolean_t *is_acquired)
{
- int ret = -1;
- int peer_cnt = 0;
char *volname = NULL;
- uuid_t peer_uuid = {0};
- xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
struct syncargs args = {0};
struct list_head *peers = NULL;
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (is_acquired);
+
peers = &conf->xaction_peers;
/* Trying to acquire multiple mgmt_v3 locks on local node */
ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID);
if (ret) {
- gf_log ("", GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"Failed to acquire mgmt_v3 locks on localhost");
goto out;
}
@@ -379,11 +434,11 @@ out:
if (volname)
ret = gf_asprintf (op_errstr,
"Another transaction is in progress "
- "for %s. Please try again after sometime.",
- volname);
+ "for %s. Please try again after "
+ "sometime.", volname);
else
ret = gf_asprintf (op_errstr,
- "Another transaction is in progress. "
+ "Another transaction is in progress "
"Please try again after sometime.");
if (ret == -1)
@@ -398,17 +453,27 @@ out:
int
glusterd_pre_validate_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp)
{
- int ret = 0;
+ int32_t ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (aggr);
+ GF_ASSERT (rsp);
switch (op) {
case GD_OP_SNAP:
ret = glusterd_snap_pre_validate_use_rsp_dict (aggr, rsp);
- if (ret)
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to aggregate prevalidate "
+ "response dictionaries.");
goto out;
+ }
break;
default:
ret = -1;
- gf_log ("", GF_LOG_ERROR, "Invalid op (%s)", gd_op_list[op]);
+ gf_log (this->name, GF_LOG_ERROR, "Invalid op (%s)", gd_op_list[op]);
break;
}
@@ -420,17 +485,20 @@ int32_t
gd_mgmt_v3_pre_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
gd1_mgmt_v3_pre_val_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
- GF_ASSERT(req);
- GF_ASSERT(myframe);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
frame = myframe;
args = frame->local;
@@ -444,7 +512,7 @@ gd_mgmt_v3_pre_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
if (!iov) {
- gf_log (THIS->name, GF_LOG_ERROR, "iov is NULL");
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
op_errno = EINVAL;
}
@@ -477,7 +545,7 @@ gd_mgmt_v3_pre_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
pthread_mutex_unlock (&args->lock_dict);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "%s",
+ gf_log (this->name, GF_LOG_ERROR, "%s",
"Failed to aggregate response from "
" node/brick");
if (!rsp.op_ret)
@@ -498,6 +566,12 @@ out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
GLUSTERD_MGMT_V3_PRE_VALIDATE,
peerinfo, rsp.uuid);
+
+ if (rsp.op_errstr)
+ free (rsp.op_errstr);
+
+ free (rsp.dict.dict_val);
+
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
@@ -505,25 +579,28 @@ out:
int32_t
gd_mgmt_v3_pre_validate_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
gd_mgmt_v3_pre_validate_cbk_fn);
}
int
-gd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *op_ctx,
+gd_mgmt_v3_pre_validate_req (glusterd_op_t op, dict_t *op_ctx,
glusterd_peerinfo_t *peerinfo,
struct syncargs *args, uuid_t my_uuid,
uuid_t recv_uuid)
{
- int ret = -1;
- gd1_mgmt_v3_pre_val_req req = {{0},};
- glusterd_conf_t *conf = THIS->private;
+ int32_t ret = -1;
+ gd1_mgmt_v3_pre_val_req req = {{0},};
+ glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -541,7 +618,8 @@ gd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *op_ctx,
(xdrproc_t) xdr_gd1_mgmt_v3_pre_val_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_TRACE, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
@@ -549,8 +627,8 @@ int
glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
dict_t *req_dict, char **op_errstr, int npeers)
{
- int ret = -1;
- int peer_cnt = 0;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
dict_t *rsp_dict = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
struct syncargs args = {0};
@@ -559,6 +637,11 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
xlator_t *this = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
peers = &conf->xaction_peers;
rsp_dict = dict_new ();
@@ -594,7 +677,7 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
ret = glusterd_pre_validate_aggr_rsp_dict (op, req_dict,
rsp_dict);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "%s",
+ gf_log (this->name, GF_LOG_ERROR, "%s",
"Failed to aggregate response from "
" node/brick");
goto out;
@@ -613,7 +696,7 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_pre_validate (op, req_dict, peerinfo, &args,
+ gd_mgmt_v3_pre_validate_req (op, req_dict, peerinfo, &args,
MY_UUID, peer_uuid);
peer_cnt++;
}
@@ -637,22 +720,22 @@ out:
int
glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
- glusterd_op_t op)
+ glusterd_op_t op)
{
- int ret = -1;
+ int32_t ret = -1;
dict_t *req_dict = NULL;
xlator_t *this = NULL;
- GF_ASSERT (req);
-
this = THIS;
GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (dict);
req_dict = dict_new ();
if (!req_dict)
goto out;
-
switch (op) {
case GD_OP_SNAP:
dict_copy (dict, req_dict);
@@ -663,25 +746,27 @@ glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
*req = req_dict;
ret = 0;
-
out:
return ret;
}
int32_t
gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
gd1_mgmt_v3_brick_op_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
- GF_ASSERT(req);
- GF_ASSERT(myframe);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
frame = myframe;
args = frame->local;
@@ -699,7 +784,7 @@ gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
if (!iov) {
- gf_log (THIS->name, GF_LOG_ERROR, "iov is NULL");
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
op_errno = EINVAL;
goto out;
}
@@ -718,6 +803,12 @@ out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
GLUSTERD_MGMT_V3_BRICK_OP,
peerinfo, rsp.uuid);
+
+ if (rsp.op_errstr)
+ free (rsp.op_errstr);
+
+ free (rsp.dict.dict_val);
+
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
@@ -725,25 +816,28 @@ out:
int32_t
gd_mgmt_v3_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
gd_mgmt_v3_brick_op_cbk_fn);
}
int
-gd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *op_ctx,
+gd_mgmt_v3_brick_op_req (glusterd_op_t op, dict_t *op_ctx,
glusterd_peerinfo_t *peerinfo,
struct syncargs *args, uuid_t my_uuid,
uuid_t recv_uuid)
{
- int ret = -1;
+ int32_t ret = -1;
gd1_mgmt_v3_brick_op_req req = {{0},};
- glusterd_conf_t *conf = THIS->private;
+ glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -761,16 +855,17 @@ gd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *op_ctx,
(xdrproc_t) xdr_gd1_mgmt_v3_brick_op_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_TRACE, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int
glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *req_dict, char **op_errstr, int npeers)
+ dict_t *req_dict, char **op_errstr, int npeers)
{
- int ret = -1;
- int peer_cnt = 0;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
dict_t *rsp_dict = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
struct syncargs args = {0};
@@ -779,6 +874,11 @@ glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
xlator_t *this = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
peers = &conf->xaction_peers;
rsp_dict = dict_new ();
@@ -824,7 +924,7 @@ glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_brick_op (op, req_dict, peerinfo, &args,
+ gd_mgmt_v3_brick_op_req (op, req_dict, peerinfo, &args,
MY_UUID, peer_uuid);
peer_cnt++;
}
@@ -848,19 +948,22 @@ out:
int32_t
gd_mgmt_v3_commit_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
gd1_mgmt_v3_commit_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
- GF_ASSERT(req);
- GF_ASSERT(myframe);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
frame = myframe;
args = frame->local;
@@ -874,7 +977,7 @@ gd_mgmt_v3_commit_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
if (!iov) {
- gf_log (THIS->name, GF_LOG_ERROR, "iov is NULL");
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
op_errno = EINVAL;
goto out;
}
@@ -908,7 +1011,7 @@ gd_mgmt_v3_commit_cbk_fn (struct rpc_req *req, struct iovec *iov,
pthread_mutex_unlock (&args->lock_dict);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "%s",
+ gf_log (this->name, GF_LOG_ERROR, "%s",
"Failed to aggregate response from "
" node/brick");
if (!rsp.op_ret)
@@ -923,9 +1026,14 @@ gd_mgmt_v3_commit_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
out:
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
GLUSTERD_MGMT_V3_COMMIT,
peerinfo, rsp.uuid);
+
+ free (rsp.dict.dict_val);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
@@ -933,25 +1041,28 @@ out:
int32_t
gd_mgmt_v3_commit_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
gd_mgmt_v3_commit_cbk_fn);
}
int
-gd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx,
+gd_mgmt_v3_commit_req (glusterd_op_t op, dict_t *op_ctx,
glusterd_peerinfo_t *peerinfo,
struct syncargs *args, uuid_t my_uuid,
uuid_t recv_uuid)
{
- int ret = -1;
- gd1_mgmt_v3_commit_req req = {{0},};
+ int32_t ret = -1;
+ gd1_mgmt_v3_commit_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -969,17 +1080,18 @@ gd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx,
(xdrproc_t) xdr_gd1_mgmt_v3_commit_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_TRACE, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int
glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *op_ctx, dict_t *req_dict,
- char **op_errstr, int npeers)
+ dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr, int npeers)
{
- int ret = -1;
- int peer_cnt = 0;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
dict_t *rsp_dict = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
struct syncargs args = {0};
@@ -988,6 +1100,12 @@ glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
xlator_t *this = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
peers = &conf->xaction_peers;
rsp_dict = dict_new ();
@@ -1023,7 +1141,7 @@ glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
ret = glusterd_syncop_aggr_rsp_dict (op, op_ctx,
rsp_dict);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "%s",
+ gf_log (this->name, GF_LOG_ERROR, "%s",
"Failed to aggregate response from "
" node/brick");
goto out;
@@ -1042,7 +1160,7 @@ glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_commit (op, req_dict, peerinfo, &args,
+ gd_mgmt_v3_commit_req (op, req_dict, peerinfo, &args,
MY_UUID, peer_uuid);
peer_cnt++;
}
@@ -1066,18 +1184,21 @@ out:
int32_t
gd_mgmt_v3_post_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
gd1_mgmt_v3_post_val_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
- GF_ASSERT(req);
- GF_ASSERT(myframe);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
frame = myframe;
args = frame->local;
@@ -1091,7 +1212,7 @@ gd_mgmt_v3_post_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
if (!iov) {
- gf_log (THIS->name, GF_LOG_ERROR, "iov is NULL");
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
op_errno = EINVAL;
goto out;
}
@@ -1110,6 +1231,10 @@ out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
GLUSTERD_MGMT_V3_POST_VALIDATE,
peerinfo, rsp.uuid);
+ if (rsp.op_errstr)
+ free (rsp.op_errstr);
+
+ free (rsp.dict.dict_val);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
@@ -1117,25 +1242,28 @@ out:
int32_t
gd_mgmt_v3_post_validate_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
gd_mgmt_v3_post_validate_cbk_fn);
}
int
-gd_mgmt_v3_post_validate (glusterd_op_t op, int32_t op_ret, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_post_validate_req (glusterd_op_t op, int32_t op_ret, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
{
- int ret = -1;
+ int32_t ret = -1;
gd1_mgmt_v3_post_val_req req = {{0},};
- glusterd_conf_t *conf = THIS->private;
+ glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -1154,7 +1282,8 @@ gd_mgmt_v3_post_validate (glusterd_op_t op, int32_t op_ret, dict_t *op_ctx,
(xdrproc_t) xdr_gd1_mgmt_v3_post_val_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_TRACE, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
@@ -1163,8 +1292,8 @@ glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
int32_t op_ret, dict_t *req_dict,
char **op_errstr, int npeers)
{
- int ret = -1;
- int peer_cnt = 0;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
dict_t *rsp_dict = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
struct syncargs args = {0};
@@ -1173,6 +1302,11 @@ glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
xlator_t *this = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
peers = &conf->xaction_peers;
rsp_dict = dict_new ();
@@ -1218,8 +1352,8 @@ glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_post_validate (op, op_ret, req_dict, peerinfo, &args,
- MY_UUID, peer_uuid);
+ gd_mgmt_v3_post_validate_req (op, op_ret, req_dict, peerinfo,
+ &args, MY_UUID, peer_uuid);
peer_cnt++;
}
gd_synctask_barrier_wait((&args), peer_cnt);
@@ -1244,16 +1378,19 @@ int32_t
gd_mgmt_v3_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
gd1_mgmt_v3_unlock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
- GF_ASSERT(req);
- GF_ASSERT(myframe);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
frame = myframe;
args = frame->local;
@@ -1267,7 +1404,7 @@ gd_mgmt_v3_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
if (!iov) {
- gf_log (THIS->name, GF_LOG_ERROR, "iov is NULL");
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
op_errno = EINVAL;
goto out;
}
@@ -1286,6 +1423,7 @@ out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
GLUSTERD_MGMT_V3_UNLOCK,
peerinfo, rsp.uuid);
+ free (rsp.dict.dict_val);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
@@ -1305,13 +1443,16 @@ gd_mgmt_v3_unlock (glusterd_op_t op, dict_t *op_ctx,
struct syncargs *args, uuid_t my_uuid,
uuid_t recv_uuid)
{
- int ret = -1;
+ int32_t ret = -1;
gd1_mgmt_v3_unlock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -1329,7 +1470,8 @@ gd_mgmt_v3_unlock (glusterd_op_t op, dict_t *op_ctx,
(xdrproc_t) xdr_gd1_mgmt_v3_unlock_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_TRACE, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
@@ -1339,8 +1481,8 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
char **op_errstr, int npeers,
gf_boolean_t is_acquired)
{
- int ret = -1;
- int peer_cnt = 0;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
uuid_t peer_uuid = {0};
xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
@@ -1348,6 +1490,11 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
struct list_head *peers = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+
peers = &conf->xaction_peers;
/* If the lock has not been held during this
@@ -1392,9 +1539,9 @@ int32_t
glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
dict_t *dict)
{
- int ret = -1;
- int op_ret = -1;
- int npeers = 0;
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ int32_t npeers = 0;
dict_t *req_dict = NULL;
dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
@@ -1405,6 +1552,8 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
this = THIS;
GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
conf = this->private;
GF_ASSERT (conf);
@@ -1532,11 +1681,11 @@ out:
int32_t
glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
- dict_t *dict)
+ dict_t *dict)
{
- int ret = -1;
- int op_ret = -1;
- int npeers = 0;
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ int32_t npeers = 0;
dict_t *req_dict = NULL;
dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
@@ -1549,6 +1698,8 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
this = THIS;
GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
conf = this->private;
GF_ASSERT (conf);
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 90f0c6c22..ba4b05b99 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index a3685dbeb..206a5ecc3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -2920,6 +2920,7 @@ glusterd_import_volinfo (dict_t *vols, int count,
char *rb_id_str = NULL;
int op_version = 0;
int client_op_version = 0;
+ uint32_t is_snap_volume = 0;
GF_ASSERT (vols);
GF_ASSERT (volinfo);
@@ -2933,14 +2934,14 @@ glusterd_import_volinfo (dict_t *vols, int count,
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "volume%d.is_snap_volume", count);
- ret = dict_get_uint32 (vols, key, &new_volinfo->is_snap_volume);
+ ret = dict_get_uint32 (vols, key, &is_snap_volume);
if (ret) {
snprintf (msg, sizeof (msg), "%s missing in payload for %s",
key, volname);
goto out;
}
- if (new_volinfo->is_snap_volume == _gf_true) {
+ if (is_snap_volume == _gf_true) {
gf_log (THIS->name, GF_LOG_DEBUG,
"Not syncing snap volume %s", volname);
ret = 0;
@@ -3072,6 +3073,8 @@ glusterd_import_volinfo (dict_t *vols, int count,
goto out;
}
+ new_volinfo->is_snap_volume = is_snap_volume;
+
snprintf (key, sizeof (key), "volume%d.snap-max-hard-limit", count);
ret = dict_get_uint64 (vols, key, &new_volinfo->snap_max_hard_limit);
if (ret) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index 4c4dbd735..621e2fda9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -4107,7 +4107,7 @@ gd_is_boolean_option (char *key)
return _gf_false;
}
-/* This function will restore origin volume to it it's snap.
+/* This function will restore origin volume to it's snap.
* The restore operation will simply replace the Gluster origin
* volume with the snap volume.
* TODO: Multi-volume delete to be done.