summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c465
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.h8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c74
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c78
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c2
5 files changed, 420 insertions, 207 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
index f5636a0f6..a099c7b1e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -26,8 +26,16 @@
#include <signal.h>
-/* Valid entities that the mgt_v3 lock can hold locks upon */
-char *valid_types[] = { "vol", "snap", NULL };
+#define MAX_LOCKING_ENTITIES 2
+
+/* Valid entities that the mgt_v3 lock can hold locks upon *
+ * To add newer entities to be locked, we can just add more *
+ * entries to this table along with the type and default value */
+valid_entities valid_types[] = {
+ { "vol", _gf_true },
+ { "snap", _gf_false },
+ { NULL },
+};
static dict_t *mgmt_v3_lock;
@@ -40,8 +48,8 @@ glusterd_mgmt_v3_is_type_valid (char *type)
GF_ASSERT (type);
- for (i = 0; valid_types[i]; i++) {
- if (!strcmp (type, valid_types[i])) {
+ for (i = 0; valid_types[i].type; i++) {
+ if (!strcmp (type, valid_types[i].type)) {
ret = _gf_true;
break;
}
@@ -83,9 +91,13 @@ glusterd_get_mgmt_v3_lock_owner (char *key, uuid_t *uuid)
int32_t ret = -1;
mgmt_v3_lock_obj *lock_obj = NULL;
uuid_t no_owner = {"\0"};
+ xlator_t *this = NULL;
+
+ GF_ASSERT(THIS);
+ this = THIS;
if (!key || !uuid) {
- gf_log ("", GF_LOG_ERROR, "key or uuid is null.");
+ gf_log (this->name, GF_LOG_ERROR, "key or uuid is null.");
ret = -1;
goto out;
}
@@ -98,134 +110,373 @@ glusterd_get_mgmt_v3_lock_owner (char *key, uuid_t *uuid)
ret = 0;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
-int32_t
-glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid)
+/* This function is called with the locked_count and type, to *
+ * release all the acquired locks. */
+static int32_t
+glusterd_release_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,
+ int32_t locked_count,
+ char *type)
{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t i = -1;
+ int32_t op_ret = 0;
int32_t ret = -1;
- int32_t op_ret = 0;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(THIS);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ this = THIS;
+
+ if (locked_count == 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "No %s locked as part of this transaction",
+ type);
+ goto out;
+ }
+
+ /* Release all the locks held */
+ for (i = 0; i < locked_count; i++) {
+ snprintf (name_buf, sizeof(name_buf),
+ "%sname%d", type, i+1);
+
+ /* Looking for volname1, volname2 or snapname1, *
+ * as key in the dict snapname2 */
+ ret = dict_get_str (dict, name_buf, &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get %s locked_count = %d",
+ name_buf, locked_count);
+ op_ret = ret;
+ continue;
+ }
+
+ ret = glusterd_mgmt_v3_unlock (name, uuid, type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release lock for %s.",
+ name);
+ op_ret = ret;
+ }
+ }
+
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", op_ret);
+ return op_ret;
+}
+
+/* Given the count and type of the entity this function acquires *
+ * locks on multiple elements of the same entity. For example: *
+ * If type is "vol" this function tries to acquire locks on multiple *
+ * volumes */
+static int32_t
+glusterd_acquire_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,
+ int32_t count, char *type)
+{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
int32_t i = -1;
- int32_t volcount = -1;
- char volname_buf[PATH_MAX] = "";
- char *volname = NULL;
+ int32_t ret = -1;
+ int32_t locked_count = 0;
+ xlator_t *this = NULL;
- if (!dict) {
- gf_log ("", GF_LOG_ERROR, "dict is null.");
- ret = -1;
+ GF_ASSERT(THIS);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ this = THIS;
+
+ /* Locking one element after other */
+ for (i = 0; i < count; i++) {
+ snprintf (name_buf, sizeof(name_buf),
+ "%sname%d", type, i+1);
+
+ /* Looking for volname1, volname2 or snapname1, *
+ * as key in the dict snapname2 */
+ ret = dict_get_str (dict, name_buf, &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get %s count = %d",
+ name_buf, count);
+ break;
+ }
+
+ ret = glusterd_mgmt_v3_lock (name, uuid, type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire lock for %s %s "
+ "on behalf of %s. Reversing "
+ "this transaction", type, name,
+ uuid_utoa(uuid));
+ break;
+ }
+ locked_count++;
+ }
+
+ if (count == locked_count) {
+ /* If all locking ops went successfuly, return as success */
+ ret = 0;
goto out;
}
- ret = dict_get_int32 (dict, "volcount", &volcount);
+ /* If we failed to lock one element, unlock others and return failure */
+ ret = glusterd_release_multiple_locks_per_entity (dict, uuid,
+ locked_count,
+ type);
if (ret) {
- gf_log ("", GF_LOG_DEBUG, "Failed to get volcount"
- "name");
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release multiple %s locks",
+ type);
+ }
+ ret = -1;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+/* Given the type of entity, this function figures out if it should unlock a *
+ * single element of multiple elements of the said entity. For example: *
+ * if the type is "vol", this function will accordingly unlock a single volume *
+ * or multiple volumes */
+static int32_t
+glusterd_mgmt_v3_unlock_entity (dict_t *dict, uuid_t uuid, char *type,
+ gf_boolean_t default_value)
+{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t count = -1;
+ int32_t ret = -1;
+ gf_boolean_t hold_locks = _gf_false;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(THIS);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ this = THIS;
+
+ snprintf (name_buf, sizeof(name_buf), "hold_%s_locks", type);
+ hold_locks = dict_get_str_boolean (dict, name_buf, default_value);
+
+ if (hold_locks == _gf_false) {
+ /* Locks were not held for this particular entity *
+ * Hence nothing to release */
+ ret = 0;
goto out;
}
- /* Unlocking one volume after other */
- for (i = 1; i <= volcount; i++) {
- ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
- "volname%d", i);
- volname_buf[ret] = '\0';
+ /* Looking for volcount or snapcount in the dict */
+ snprintf (name_buf, sizeof(name_buf), "%scount", type);
+ ret = dict_get_int32 (dict, name_buf, &count);
+ if (ret) {
+ /* count is not present. Only one *
+ * element name needs to be unlocked */
+ snprintf (name_buf, sizeof(name_buf), "%sname",
+ type);
+ ret = dict_get_str (dict, name_buf, &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch %sname", type);
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_unlock (name, uuid, type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release lock for %s %s "
+ "on behalf of %s.", type, name,
+ uuid_utoa(uuid));
+ goto out;
+ }
+ } else {
+ /* Unlocking one element name after another */
+ ret = glusterd_release_multiple_locks_per_entity (dict,
+ uuid,
+ count,
+ type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release all %s locks", type);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
- ret = dict_get_str (dict, volname_buf, &volname);
+/* Given the type of entity, this function figures out if it should lock a *
+ * single element of multiple elements of the said entity. For example: *
+ * if the type is "vol", this function will accordingly lock a single volume *
+ * or multiple volumes */
+static int32_t
+glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, char *type,
+ gf_boolean_t default_value)
+{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t count = -1;
+ int32_t ret = -1;
+ gf_boolean_t hold_locks = _gf_false;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(THIS);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ this = THIS;
+
+ snprintf (name_buf, sizeof(name_buf), "hold_%s_locks", type);
+ hold_locks = dict_get_str_boolean (dict, name_buf, default_value);
+
+ if (hold_locks == _gf_false) {
+ /* Not holding locks for this particular entity */
+ ret = 0;
+ goto out;
+ }
+
+ /* Looking for volcount or snapcount in the dict */
+ snprintf (name_buf, sizeof(name_buf), "%scount", type);
+ ret = dict_get_int32 (dict, name_buf, &count);
+ if (ret) {
+ /* count is not present. Only one *
+ * element name needs to be locked */
+ snprintf (name_buf, sizeof(name_buf), "%sname",
+ type);
+ ret = dict_get_str (dict, name_buf, &name);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get %s Volcount = %d",
- volname_buf, volcount);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch %sname", type);
goto out;
}
- ret = glusterd_mgmt_v3_unlock (volname, uuid, "vol");
+ ret = glusterd_mgmt_v3_lock (name, uuid, type);
if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to release lock for %s. ", volname);
- op_ret = ret;
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire lock for %s %s "
+ "on behalf of %s.", type, name,
+ uuid_utoa(uuid));
+ goto out;
+ }
+ } else {
+ /* Locking one element name after another */
+ ret = glusterd_acquire_multiple_locks_per_entity (dict,
+ uuid,
+ count,
+ type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire all %s locks", type);
+ goto out;
}
}
- ret = op_ret;
+ ret = 0;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
+/* Try to release locks of multiple entities like *
+ * volume, snaps etc. */
int32_t
-glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid)
+glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid)
{
- int32_t ret = -1;
int32_t i = -1;
- int32_t volcount = -1;
- char volname_buf[PATH_MAX] = "";
- char *volname = NULL;
- int32_t locked_volcount = 0;
+ int32_t ret = -1;
+ int32_t op_ret = 0;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(THIS);
+ this = THIS;
if (!dict) {
- gf_log ("", GF_LOG_ERROR, "dict is null.");
+ gf_log (this->name, GF_LOG_ERROR, "dict is null.");
ret = -1;
goto out;
}
- ret = dict_get_int32 (dict, "volcount", &volcount);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Failed to get volcount"
- "name");
- goto out;
+ for (i = 0; valid_types[i].type; i++) {
+ ret = glusterd_mgmt_v3_unlock_entity
+ (dict, uuid,
+ valid_types[i].type,
+ valid_types[i].default_value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to unlock all %s",
+ valid_types[i].type);
+ op_ret = ret;
+ }
}
- /* Locking one volume after other */
- for (i = 1; i <= volcount; i++) {
- ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
- "volname%d", i);
- volname_buf[ret] = '\0';
+ ret = op_ret;
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
- ret = dict_get_str (dict, volname_buf, &volname);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get %s Volcount = %d",
- volname_buf, volcount);
- goto out;
- }
+/* Try to acquire locks on multiple entities like *
+ * volume, snaps etc. */
+int32_t
+glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid)
+{
+ int32_t i = -1;
+ int32_t ret = -1;
+ int32_t locked_count = 0;
+ xlator_t *this = NULL;
- ret = glusterd_mgmt_v3_lock (volname, uuid, "vol");
+ GF_ASSERT(THIS);
+ this = THIS;
+
+ if (!dict) {
+ gf_log (this->name, GF_LOG_ERROR, "dict is null.");
+ ret = -1;
+ goto out;
+ }
+
+ /* Locking one entity after other */
+ for (i = 0; valid_types[i].type; i++) {
+ ret = glusterd_mgmt_v3_lock_entity
+ (dict, uuid,
+ valid_types[i].type,
+ valid_types[i].default_value);
if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to acquire lock for %s "
- "on behalf of %s. Reversing "
- "this transaction", volname,
- uuid_utoa(uuid));
+ gf_log (this->name, GF_LOG_ERROR, "Unable to lock all %s",
+ valid_types[i].type);
break;
}
- locked_volcount ++;
+ locked_count++;
}
- /* If we failed to lock one volume, unlock others and return failure */
- if (volcount != locked_volcount) {
- for (i = 1; i <= locked_volcount; i++) {
- ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
- "volname%d", i);
- volname_buf[ret] = '\0';
-
- ret = dict_get_str (dict, volname_buf, &volname);
- if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Unable to get %s lockd_volcount = %d",
- volname_buf, volcount);
- goto out;
- }
-
- ret = glusterd_mgmt_v3_unlock (volname, uuid, "vol");
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to release lock for %s.",
- volname);
- }
- ret = -1;
+ if (locked_count == MAX_LOCKING_ENTITIES) {
+ /* If all locking ops went successfuly, return as success */
+ ret = 0;
+ goto out;
}
+ /* If we failed to lock one entity, unlock others and return failure */
+ for (i = 0; i < locked_count; i++) {
+ ret = glusterd_mgmt_v3_unlock_entity
+ (dict, uuid,
+ valid_types[i].type,
+ valid_types[i].default_value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to unlock all %s",
+ valid_types[i].type);
+ }
+ }
+ ret = -1;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@@ -237,16 +488,20 @@ glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, char *type)
mgmt_v3_lock_obj *lock_obj = NULL;
gf_boolean_t is_valid = _gf_true;
uuid_t owner = {0};
+ xlator_t *this = NULL;
+
+ GF_ASSERT(THIS);
+ this = THIS;
if (!name || !type) {
- gf_log (THIS->name, GF_LOG_ERROR, "name or type is null.");
+ gf_log (this->name, GF_LOG_ERROR, "name or type is null.");
ret = -1;
goto out;
}
is_valid = glusterd_mgmt_v3_is_type_valid (type);
if (is_valid != _gf_true) {
- gf_log ("", GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"Invalid entity. Cannot perform locking "
"operation on %s types", type);
ret = -1;
@@ -256,17 +511,17 @@ glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, char *type)
ret = snprintf (key, sizeof(key), "%s_%s", name, type);
if (ret != strlen(name) + 1 + strlen(type)) {
ret = -1;
- gf_log (THIS->name, GF_LOG_ERROR, "Unable to create key");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to create key");
goto out;
}
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_DEBUG,
"Trying to acquire lock of %s %s for %s as %s",
type, name, uuid_utoa (uuid), key);
ret = glusterd_get_mgmt_v3_lock_owner (key, &owner);
if (ret) {
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_DEBUG,
"Unable to get mgmt_v3 lock owner");
goto out;
}
@@ -274,7 +529,7 @@ glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, char *type)
/* If the lock has already been held for the given volume
* we fail */
if (!uuid_is_null (owner)) {
- gf_log (THIS->name, GF_LOG_ERROR, "Lock for %s held by %s",
+ gf_log (this->name, GF_LOG_ERROR, "Lock for %s held by %s",
name, uuid_utoa (owner));
ret = -1;
goto out;
@@ -292,20 +547,20 @@ glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, char *type)
ret = dict_set_bin (mgmt_v3_lock, key, lock_obj,
sizeof(*lock_obj));
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"Unable to set lock owner in mgmt_v3 lock");
if (lock_obj)
GF_FREE (lock_obj);
goto out;
}
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_DEBUG,
"Lock for %s %s successfully held by %s",
type, name, uuid_utoa (uuid));
ret = 0;
out:
- gf_log (THIS->name, GF_LOG_TRACE, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
@@ -316,16 +571,20 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
int32_t ret = -1;
gf_boolean_t is_valid = _gf_true;
uuid_t owner = {0};
+ xlator_t *this = NULL;
+
+ GF_ASSERT(THIS);
+ this = THIS;
if (!name || !type) {
- gf_log (THIS->name, GF_LOG_ERROR, "name is null.");
+ gf_log (this->name, GF_LOG_ERROR, "name is null.");
ret = -1;
goto out;
}
is_valid = glusterd_mgmt_v3_is_type_valid (type);
if (is_valid != _gf_true) {
- gf_log ("", GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"Invalid entity. Cannot perform unlocking "
"operation on %s types", type);
ret = -1;
@@ -335,24 +594,24 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
ret = snprintf (key, sizeof(key), "%s_%s",
name, type);
if (ret != strlen(name) + 1 + strlen(type)) {
- gf_log (THIS->name, GF_LOG_ERROR, "Unable to create key");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to create key");
ret = -1;
goto out;
}
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_DEBUG,
"Trying to release lock of %s %s for %s as %s",
type, name, uuid_utoa (uuid), key);
ret = glusterd_get_mgmt_v3_lock_owner (key, &owner);
if (ret) {
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_DEBUG,
"Unable to get mgmt_v3 lock owner");
goto out;
}
if (uuid_is_null (owner)) {
- gf_log (THIS->name, GF_LOG_ERROR,
+ gf_log (this->name, GF_LOG_ERROR,
"Lock for %s %s not held", type, name);
ret = -1;
goto out;
@@ -361,7 +620,7 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
ret = uuid_compare (uuid, owner);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Lock owner mismatch. "
+ gf_log (this->name, GF_LOG_ERROR, "Lock owner mismatch. "
"Lock for %s %s held by %s",
type, name, uuid_utoa (owner));
goto out;
@@ -370,12 +629,12 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
/* Removing the mgmt_v3 lock from the global list */
dict_del (mgmt_v3_lock, key);
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_DEBUG,
"Lock for %s %s successfully released",
type, name);
ret = 0;
out:
- gf_log (THIS->name, GF_LOG_TRACE, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h
index 9ca332fe9..83eb8c997 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.h
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.h
@@ -19,6 +19,14 @@ typedef struct mgmt_v3_lock_object_ {
uuid_t lock_owner;
} mgmt_v3_lock_obj;
+typedef struct mgmt_v3_lock_valid_entities {
+ char *type; /* Entity type like vol, snap */
+ gf_boolean_t default_value; /* The default value that *
+ * determines if the locks *
+ * should be held for that *
+ * entity */
+} valid_entities;
+
int32_t
glusterd_mgmt_v3_lock_init ();
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
index 5077a0092..27d40b3a7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
@@ -30,9 +30,12 @@ static int
glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status)
{
- gd1_mgmt_v3_lock_rsp rsp = {{0},};
- int ret = -1;
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
rsp.op_ret = status;
@@ -44,7 +47,7 @@ glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status)
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
(xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_DEBUG,
"Responded to mgmt_v3 lock, ret: %d", ret);
return ret;
@@ -56,9 +59,7 @@ glusterd_synctasked_mgmt_v3_lock (rpcsvc_request_t *req,
glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
- int32_t volcount = -1;
xlator_t *this = NULL;
- char *volname = NULL;
this = THIS;
GF_ASSERT (this);
@@ -66,29 +67,13 @@ glusterd_synctasked_mgmt_v3_lock (rpcsvc_request_t *req,
GF_ASSERT (ctx);
GF_ASSERT (ctx->dict);
- ret = dict_get_int32 (ctx->dict, "volcount", &volcount);
- if (ret) {
- ret = dict_get_str (ctx->dict, "volname", &volname);
- if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to get volname");
- goto out;
- }
- ret = glusterd_mgmt_v3_lock (volname, ctx->uuid, "vol");
-
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire local lock for %s", volname);
- } else {
- /* Trying to acquire multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_lock (ctx->dict, ctx->uuid);
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to acquire mgmt_v3 locks for %s",
- uuid_utoa (ctx->uuid));
- }
+ /* Trying to acquire multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_lock (ctx->dict, ctx->uuid);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire mgmt_v3 locks for %s",
+ uuid_utoa (ctx->uuid));
-out:
ret = glusterd_mgmt_v3_lock_send_resp (req, ret);
gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
@@ -712,9 +697,12 @@ static int
glusterd_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
{
- gd1_mgmt_v3_unlock_rsp rsp = {{0},};
- int ret = -1;
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
rsp.op_ret = status;
@@ -726,7 +714,7 @@ glusterd_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
(xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
- gf_log (THIS->name, GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_DEBUG,
"Responded to mgmt_v3 unlock, ret: %d", ret);
return ret;
@@ -738,37 +726,21 @@ glusterd_synctasked_mgmt_v3_unlock (rpcsvc_request_t *req,
glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
- int32_t volcount = -1;
xlator_t *this = NULL;
- char *volname = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (req);
GF_ASSERT (ctx);
- ret = dict_get_int32 (ctx->dict, "volcount", &volcount);
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (ctx->dict, ctx->uuid);
if (ret) {
- ret = dict_get_str (ctx->dict, "volname", &volname);
- if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to get volname");
- goto out;
- }
- ret = glusterd_mgmt_v3_unlock (volname, ctx->uuid, "vol");
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to release lock for %s", volname);
- } else {
- /* Trying to release multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_unlock (ctx->dict, ctx->uuid);
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to release mgmt_v3 locks for %s",
- uuid_utoa(ctx->uuid));
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release mgmt_v3 locks for %s",
+ uuid_utoa(ctx->uuid));
}
-out:
ret = glusterd_mgmt_v3_unlock_send_resp (req, ret);
gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index e895f6e4f..cdc51849f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -302,23 +302,12 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- /* mgmt_v3 lock on local node */
- ret = dict_get_str (dict, "volname", &volname);
+ /* Trying to acquire multiple mgmt_v3 locks on local node */
+ ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID);
if (ret) {
- /* Trying to acquire multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID);
- if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to acquire mgmt_v3 locks on localhost");
- goto out;
- }
- } else {
- ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol");
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire local lock for %s", volname);
- goto out;
- }
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to acquire mgmt_v3 locks on localhost");
+ goto out;
}
*is_acquired = _gf_true;
@@ -1277,8 +1266,9 @@ out:
int
glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *dict, char **op_errstr, int npeers,
- gf_boolean_t is_acquired)
+ dict_t *dict, int32_t op_ret,
+ char **op_errstr, int npeers,
+ gf_boolean_t is_acquired)
{
int ret = -1;
int peer_cnt = 0;
@@ -1316,7 +1306,7 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
gf_log (this->name, GF_LOG_ERROR,
"Unlock failed on peers");
- if (args.errstr)
+ if (!op_ret && args.errstr)
*op_errstr = gf_strdup (args.errstr);
}
@@ -1339,7 +1329,6 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
char *op_errstr = NULL;
- char *volname = NULL;
xlator_t *this = NULL;
gf_boolean_t is_acquired = _gf_false;
uuid_t *originator_uuid = NULL;
@@ -1434,8 +1423,9 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
ret = 0;
out:
/* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, &op_errstr,
- npeers, is_acquired);
+ (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict,
+ ret, &op_errstr,
+ npeers, is_acquired);
/* SEND CLI RESPONSE */
glusterd_op_send_cli_response (op, ret, 0, req, dict, op_errstr);
@@ -1444,19 +1434,11 @@ out:
if (!is_acquired)
goto cleanup;
- ret = dict_get_str (tmp_dict, "volname", &volname);
- if (ret) {
- /* Trying to release multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to release mgmt_v3 locks on localhost");
- } else {
- ret = glusterd_mgmt_v3_unlock (volname, MY_UUID, "vol");
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to release local lock for %s", volname);
- }
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release mgmt_v3 locks on localhost");
cleanup:
if (req_dict)
@@ -1483,7 +1465,6 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
char *op_errstr = NULL;
- char *volname = NULL;
xlator_t *this = NULL;
gf_boolean_t is_acquired = _gf_false;
uuid_t *originator_uuid = NULL;
@@ -1537,7 +1518,7 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
/* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr,
- npeers, &is_acquired);
+ npeers, &is_acquired);
if (ret) {
gf_log ("", GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
goto out;
@@ -1612,8 +1593,9 @@ unbarrier:
out:
/* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, &op_errstr,
- npeers, is_acquired);
+ (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict,
+ ret, &op_errstr,
+ npeers, is_acquired);
/* If the commit op (snapshot taking) failed, then the error is stored
in tmp_errstr and unbarrier is called. Suppose, if unbarrier also
@@ -1638,19 +1620,11 @@ out:
if (!is_acquired)
goto cleanup;
- ret = dict_get_str (tmp_dict, "volname", &volname);
- if (ret) {
- /* Trying to release multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to release mgmt_v3 locks on localhost");
- } else {
- ret = glusterd_mgmt_v3_unlock (volname, MY_UUID, "vol");
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to release local lock for %s", volname);
- }
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release mgmt_v3 locks on localhost");
cleanup:
if (req_dict)
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 05ae2fc12..7960e84ac 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -251,7 +251,7 @@ glusterd_snapshot_restore_prevalidate (dict_t *dict, char **op_errstr,
ret = dict_get_str (dict, "snapname", &snapname);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get "
- "snap name");
+ "snap name");
goto out;
}