summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAtin Mukherjee <amukherj@redhat.com>2016-06-09 18:22:43 +0530
committerKaushal M <kaushal@redhat.com>2016-06-10 00:30:57 -0700
commit5016cc548d4368b1c180459d6fa8ae012bb21d6e (patch)
tree165d66e6007ed247ff4ce0d9e175b72e258e3242
parentc62493efadbcf5085bbd65a409eed9391301c154 (diff)
glusterd: fail volume delete if one of the node is down
Deleting a volume on a cluster where one of the node in the cluster is down is buggy since once that node comes back the resync of the same volume will happen. Till we bring in the soft delete feature tracked in http://review.gluster.org/12963 this is a safe guard to block the volume deletion. Change-Id: I9c13869c4a7e7a947f88842c6dc6f231c0eeda6c BUG: 1344407 Signed-off-by: Atin Mukherjee <amukherj@redhat.com> Reviewed-on: http://review.gluster.org/14681 Smoke: Gluster Build System <jenkins@build.gluster.com> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaushal M <kaushal@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
-rwxr-xr-xtests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.c29
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.h3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c6
4 files changed, 58 insertions, 0 deletions
diff --git a/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t b/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t
new file mode 100755
index 00000000000..623b37c7ac0
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1344407-volume-delete-on-node-down.t
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0
+TEST $CLI_1 volume start $V0
+
+TEST kill_glusterd 2
+TEST ! $CLI_1 volume delete $V0
+
+cleanup;
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
index 607ad3d38be..4131296ef12 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
@@ -394,6 +394,35 @@ gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo)
}
gf_boolean_t
+glusterd_are_all_peers_up ()
+{
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gf_boolean_t peers_up = _gf_false;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO (this->name, conf, out);
+
+ rcu_read_lock ();
+ cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
+ if (!peerinfo->connected) {
+ rcu_read_unlock ();
+ goto out;
+ }
+ }
+ rcu_read_unlock ();
+
+ peers_up = _gf_true;
+
+out:
+ return peers_up;
+}
+
+gf_boolean_t
glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
struct cds_list_head *peers,
char **down_peerstr)
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
index bd30e335f69..9332cf2ea02 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
@@ -43,6 +43,9 @@ char*
gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo);
gf_boolean_t
+glusterd_are_all_peers_up ();
+
+gf_boolean_t
glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
struct cds_list_head *peers,
char **down_peerstr);
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 7b833a9a737..60577505cfc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -1781,6 +1781,12 @@ glusterd_op_stage_delete_volume (dict_t *dict, char **op_errstr)
goto out;
}
+ if (!glusterd_are_all_peers_up ()) {
+ ret = -1;
+ snprintf (msg, sizeof(msg), "Some of the peers are down");
+ goto out;
+ }
+
ret = 0;
out: