summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvra Sengupta <asengupt@redhat.com>2013-10-09 22:13:34 +0530
committerAvra Sengupta <asengupt@redhat.com>2013-10-10 14:35:47 +0530
commitff10de25c08e3f5af85f5154a71750c21c80eab2 (patch)
treea951d472305cef817738cf3a5e67595d2fc83062
parentb0ffb89c34fc26828861443d39c379b058b1f01d (diff)
glusterd/Jarvis/locks: Adding multiple volume locks supports
Also linking snap create command to Jarvis Change-Id: If2ed29be072e10d0b0bd271d53e48eeaa6501ed7 Signed-off-by: Avra Sengupta <asengupt@redhat.com>
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-jarvis-handler.c40
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-jarvis.c161
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-jarvis.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c126
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.h6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c14
6 files changed, 246 insertions, 105 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-jarvis-handler.c b/xlators/mgmt/glusterd/src/glusterd-jarvis-handler.c
index f07b54ac6..0e1961e7c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-jarvis-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-jarvis-handler.c
@@ -98,19 +98,21 @@ glusterd_handle_vol_lock_fn (rpcsvc_request_t *req)
}
ret = dict_get_str (dict, "volname", &volname);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire volname");
- else {
- ret = glusterd_volume_lock (volname, lock_req.uuid);
+ if (ret) {
+ /* Trying to acquire volume locks on multiple volumes */
+ ret = glusterd_multiple_volumes_lock (dict, MY_UUID);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to acquire volume locks on localhost");
+ } else {
+ ret = glusterd_volume_lock (volname, MY_UUID);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire lock for %s",
- volname);
-
- glusterd_jarvis_vol_lock_send_resp (req, ret);
+ "Unable to acquire local lock for %s", volname);
}
+ glusterd_jarvis_vol_lock_send_resp (req, ret);
+
out:
if (dict)
dict_unref (dict);
@@ -688,19 +690,21 @@ glusterd_handle_vol_unlock_fn (rpcsvc_request_t *req)
}
ret = dict_get_str (dict, "volname", &volname);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire volname");
- else {
- ret = glusterd_volume_unlock (volname, unlock_req.uuid);
+ if (ret) {
+ /* Trying to release volume locks on multiple volumes */
+ ret = glusterd_multiple_volumes_unlock (dict, MY_UUID);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release volume locks on localhost");
+ } else {
+ ret = glusterd_volume_unlock (volname, MY_UUID);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
- "Unable to release lock for %s",
- volname);
-
- glusterd_jarvis_vol_unlock_send_resp (req, ret);
+ "Unable to acquire local lock for %s", volname);
}
+ glusterd_jarvis_vol_unlock_send_resp (req, ret);
+
out:
if (dict)
dict_unref (dict);
diff --git a/xlators/mgmt/glusterd/src/glusterd-jarvis.c b/xlators/mgmt/glusterd/src/glusterd-jarvis.c
index 5d921c12f..84dc6c7a7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-jarvis.c
+++ b/xlators/mgmt/glusterd/src/glusterd-jarvis.c
@@ -249,11 +249,12 @@ out:
int
glusterd_jarvis_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *dict, char *volname, char **op_errstr,
- int npeers, gf_boolean_t *is_acquired)
+ dict_t *dict, char **op_errstr, int npeers,
+ gf_boolean_t *is_acquired)
{
int ret = -1;
int peer_cnt = 0;
+ char *volname = NULL;
uuid_t peer_uuid = {0};
xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
@@ -263,21 +264,32 @@ glusterd_jarvis_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
- /* Volume lock on local node */
- ret = glusterd_volume_lock (volname, MY_UUID);
+ /* Volume(s) lock on local node */
+ ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire local lock for %s", volname);
- goto out;
+ /* Trying to acquire volume locks on multiple volumes */
+ ret = glusterd_multiple_volumes_lock (dict, MY_UUID);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to acquire volume locks on localhost");
+ goto out;
+ }
+ } else {
+ ret = glusterd_volume_lock (volname, MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire local lock for %s", volname);
+ goto out;
+ }
}
*is_acquired = _gf_true;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending Volume lock req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -417,11 +429,6 @@ glusterd_jarvis_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
rsp_dict = dict_new ();
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR,
@@ -454,6 +461,11 @@ glusterd_jarvis_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
dict_unref (rsp_dict);
rsp_dict = NULL;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending Pre Validation req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -613,11 +625,6 @@ glusterd_jarvis_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
rsp_dict = dict_new ();
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR,
@@ -650,6 +657,11 @@ glusterd_jarvis_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
dict_unref (rsp_dict);
rsp_dict = NULL;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending brick op req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -779,11 +791,6 @@ glusterd_jarvis_commit (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
rsp_dict = dict_new ();
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR,
@@ -816,6 +823,11 @@ glusterd_jarvis_commit (glusterd_conf_t *conf, glusterd_op_t op,
dict_unref (rsp_dict);
rsp_dict = NULL;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending commit req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -945,11 +957,6 @@ glusterd_jarvis_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
rsp_dict = dict_new ();
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR,
@@ -982,6 +989,11 @@ glusterd_jarvis_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
dict_unref (rsp_dict);
rsp_dict = NULL;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending Post Validation req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -1097,8 +1109,8 @@ out:
int
glusterd_jarvis_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *dict, char *volname, char **op_errstr,
- int npeers, gf_boolean_t is_acquired)
+ dict_t *dict, char **op_errstr, int npeers,
+ gf_boolean_t is_acquired)
{
int ret = -1;
int peer_cnt = 0;
@@ -1111,16 +1123,16 @@ glusterd_jarvis_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- if (!npeers) {
- ret = 0;
- goto out;
- }
-
/* If the lock has not been held during this
* transaction, do not send unlock requests */
if (!is_acquired)
goto out;
+ if (!npeers) {
+ ret = 0;
+ goto out;
+ }
+
/* Sending Volume unlock req to other nodes in the cluster */
synctask_barrier_init((&args));
peer_cnt = 0;
@@ -1155,9 +1167,9 @@ glusterd_jarvis_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
int ret = -1;
int npeers = 0;
dict_t *req_dict = NULL;
+ dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
char *op_errstr = NULL;
- char *tmp = NULL;
char *volname = NULL;
xlator_t *this = NULL;
gf_boolean_t is_acquired = _gf_false;
@@ -1187,28 +1199,23 @@ glusterd_jarvis_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
goto out;
}
+ /* Use a copy at local unlock as cli response will be sent before
+ * the unlock and the volname in the dict might be removed */
+ tmp_dict = dict_new();
+ if (!tmp_dict) {
+ gf_log ("", GF_LOG_ERROR, "Unable to create dict");
+ goto out;
+ }
+ dict_copy (dict, tmp_dict);
/* BUILD PEERS LIST */
INIT_LIST_HEAD (&conf->xaction_peers);
npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
- ret = dict_get_str (dict, "volname", &tmp);
- if (ret) {
- gf_log ("", GF_LOG_DEBUG, "Failed to get volume "
- "name");
- goto out;
- } else {
- /* Use a copy of volname, as cli response will be
- * sent before the unlock, and the volname in the
- * dict, might be removed */
- volname = gf_strdup (tmp);
- if (!volname)
- goto out;
- }
-
- /* LOCKDOWN PHASE */
- ret = glusterd_jarvis_initiate_lockdown (conf, op, dict, volname,
- &op_errstr, npeers, &is_acquired);
+ /* LOCKDOWN PHASE - Based on the number of volumes either single
+ * or multiple volume locks is acquired */
+ ret = glusterd_jarvis_initiate_lockdown (conf, op, dict, &op_errstr,
+ npeers, &is_acquired);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Volume lockdown failed.");
goto out;
@@ -1259,35 +1266,37 @@ glusterd_jarvis_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
ret = 0;
out:
/* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_jarvis_release_peer_locks (conf, op, dict, volname,
- &op_errstr, npeers, is_acquired);
+ (void) glusterd_jarvis_release_peer_locks (conf, op, dict, &op_errstr,
+ npeers, is_acquired);
/* SEND CLI RESPONSE */
glusterd_op_send_cli_response (op, ret, 0, req, dict, op_errstr);
- /* Volume unlock on local node */
- ret = glusterd_volume_unlock (volname, MY_UUID);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to release local lock for %s", volname);
- if (op_errstr == NULL) {
- ret = gf_asprintf (&op_errstr,
- "Failed to release lock "
- "on localhost");
- if (ret == -1)
- op_errstr = NULL;
+ /* LOCAL VOLUME(S) UNLOCK */
+ if (!is_acquired)
+ goto cleanup;
- ret = -1;
- }
- goto out;
+ ret = dict_get_str (tmp_dict, "volname", &volname);
+ if (ret) {
+ /* Trying to release volume locks on multiple volumes */
+ ret = glusterd_multiple_volumes_unlock (tmp_dict, MY_UUID);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release volume locks on localhost");
+ } else {
+ ret = glusterd_volume_unlock (volname, MY_UUID);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release local lock for %s", volname);
}
- if (volname)
- GF_FREE (volname);
-
+cleanup:
if (req_dict)
dict_unref (req_dict);
+ if (tmp_dict)
+ dict_unref (tmp_dict);
+
if (op_errstr) {
GF_FREE (op_errstr);
op_errstr = NULL;
diff --git a/xlators/mgmt/glusterd/src/glusterd-jarvis.h b/xlators/mgmt/glusterd/src/glusterd-jarvis.h
index 032632e86..d9e4f0af1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-jarvis.h
+++ b/xlators/mgmt/glusterd/src/glusterd-jarvis.h
@@ -31,4 +31,8 @@ int32_t
gd_jarvis_post_validate_fn (glusterd_op_t op, dict_t *dict,
char **op_errstr, dict_t *rsp_dict);
+int32_t
+glusterd_jarvis_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
index c09ba33a7..f0658da3a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -81,6 +81,132 @@ out:
}
int32_t
+glusterd_multiple_volumes_unlock (dict_t *dict, uuid_t uuid)
+{
+ int32_t ret = -1;
+ int32_t op_ret = 0;
+ int32_t i = -1;
+ int32_t volcount = -1;
+ char volname_buf[PATH_MAX] = "";
+ char *volname = NULL;
+
+ if (!dict) {
+ gf_log ("", GF_LOG_ERROR, "dict is null.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "volcount", &volcount);
+ if (ret) {
+ gf_log ("", GF_LOG_DEBUG, "Failed to get volcount"
+ "name");
+ goto out;
+ }
+
+ /* Unlocking one volume after other */
+ for (i = 1; i <= volcount; i++) {
+ ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
+ "volname%d", i);
+ volname_buf[ret] = '\0';
+
+ ret = dict_get_str (dict, volname_buf, &volname);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get %s Volcount = %d",
+ volname_buf, volcount);
+ goto out;
+ }
+
+ ret = glusterd_volume_unlock (volname, uuid);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release lock for %s. ", volname);
+ op_ret = ret;
+ }
+ }
+
+ ret = op_ret;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_multiple_volumes_lock (dict_t *dict, uuid_t uuid)
+{
+ int32_t ret = -1;
+ int32_t i = -1;
+ int32_t volcount = -1;
+ char volname_buf[PATH_MAX] = "";
+ char *volname = NULL;
+ int32_t locked_volcount = 0;
+
+ if (!dict) {
+ gf_log ("", GF_LOG_ERROR, "dict is null.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "volcount", &volcount);
+ if (ret) {
+ gf_log ("", GF_LOG_DEBUG, "Failed to get volcount"
+ "name");
+ goto out;
+ }
+
+ /* Locking one volume after other */
+ for (i = 1; i <= volcount; i++) {
+ ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
+ "volname%d", i);
+ volname_buf[ret] = '\0';
+
+ ret = dict_get_str (dict, volname_buf, &volname);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get %s Volcount = %d",
+ volname_buf, volcount);
+ goto out;
+ }
+
+ ret = glusterd_volume_lock (volname, uuid);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to acquire lock for %s. "
+ "Unlocking other volumes locked "
+ "by this transaction", volname);
+ break;
+ }
+ locked_volcount ++;
+ }
+
+ /* If we failed to lock one volume, unlock others and return failure */
+ if (volcount != locked_volcount) {
+ for (i = 1; i <= locked_volcount; i++) {
+ ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
+ "volname%d", i);
+ volname_buf[ret] = '\0';
+
+ ret = dict_get_str (dict, volname_buf, &volname);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to get %s lockd_volcount = %d",
+ volname_buf, volcount);
+ goto out;
+ }
+
+ ret = glusterd_volume_unlock (volname, uuid);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release lock for %s.",
+ volname);
+ }
+ ret = -1;
+ }
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
glusterd_volume_lock (char *volname, uuid_t uuid)
{
int32_t ret = -1;
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h
index 2a8cc20ed..956ae7565 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.h
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.h
@@ -35,4 +35,10 @@ glusterd_volume_lock (char *volname, uuid_t uuid);
int32_t
glusterd_volume_unlock (char *volname, uuid_t uuid);
+int32_t
+glusterd_multiple_volumes_lock (dict_t *dict, uuid_t uuid);
+
+int32_t
+glusterd_multiple_volumes_unlock (dict_t *dict, uuid_t uuid);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 0b84a5075..044c4ceed 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -32,6 +32,7 @@
#include "glusterd-store.h"
#include "run.h"
#include "glusterd-volgen.h"
+#include "glusterd-jarvis.h"
#include "syscall.h"
#include "cli1-xdr.h"
@@ -345,7 +346,6 @@ glusterd_handle_snapshot_fn (rpcsvc_request_t *req)
dict_t *dict = NULL;
gf_cli_req cli_req = {{0},};
glusterd_op_t cli_op = GD_OP_SNAP;
- char operation[256] = {0,};
int type = 0;
glusterd_conf_t *priv = NULL;
char *host_uuid = NULL;
@@ -406,20 +406,12 @@ glusterd_handle_snapshot_fn (rpcsvc_request_t *req)
switch (type) {
case GF_SNAP_OPTION_TYPE_CREATE:
- strncpy (operation, "create", sizeof (operation));
+ ret = glusterd_jarvis_initiate_all_phases (req, cli_op, dict);
break;
}
- //ret = glusterd_op_begin_synctask (req, cli_op, dict);
-
out:
- /* Temporary Will be removed by the glusterd syncop framework */
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, err_str);
-
-/* Commenting out the code coz the syncop framework
- should take care of this.
if (ret) {
if (err_str[0] == '\0')
snprintf (err_str, sizeof (err_str),
@@ -427,7 +419,7 @@ out:
ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
dict, err_str);
}
-*/
+
return ret;
}