summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt
diff options
context:
space:
mode:
authorGaurav Yadav <gyadav@redhat.com>2017-03-16 14:56:39 +0530
committerAtin Mukherjee <amukherj@redhat.com>2017-03-31 21:53:10 -0400
commit1c92f83ec041176ad7c42ef83525cda7d3eda3c5 (patch)
tree1826ab841b94ceec21be71297b8cf4d1e5a09c0a /xlators/mgmt
parent4c3aa910e7913c34db24f864a33dfb6d1e0234a4 (diff)
glusterd : Disallow peer detach if snapshot bricks exist on it
Problem : - Deploy gluster on 2 nodes, one brick each, one volume replicated - Create a snapshot - Lose one server - Add a replacement peer and new brick with a new IP address - replace-brick the missing brick onto the new server (wait for replication to finish) - peer detach the old server - after doing above steps, glusterd fails to restart. Solution: With the fix detach peer will populate an error : "N2 is part of existing snapshots. Remove those snapshots before proceeding". While doing so we force user to stay with that peer or to delete all snapshots. Change-Id: I3699afb9b2a5f915768b77f885e783bd9b51818c BUG: 1322145 Signed-off-by: Gaurav Yadav <gyadav@redhat.com> Reviewed-on: https://review.gluster.org/16907 Smoke: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Atin Mukherjee <amukherj@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Diffstat (limited to 'xlators/mgmt')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c19
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c36
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h4
3 files changed, 57 insertions, 2 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index a630d55d9a3..8368354e65f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -1318,6 +1318,8 @@ __glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
int flags = 0;
glusterd_volinfo_t *volinfo = NULL;
glusterd_volinfo_t *tmp = NULL;
+ glusterd_snap_t *snapinfo = NULL;
+ glusterd_snap_t *tmpsnap = NULL;
this = THIS;
GF_ASSERT (this);
@@ -1402,14 +1404,21 @@ __glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
*/
cds_list_for_each_entry_safe (volinfo, tmp, &priv->volumes,
vol_list) {
- ret = glusterd_friend_contains_vol_bricks (volinfo,
- uuid);
+ ret = glusterd_friend_contains_vol_bricks (volinfo, uuid);
if (ret == 1) {
op_errno = GF_DEPROBE_BRICK_EXIST;
goto out;
}
}
+ cds_list_for_each_entry_safe (snapinfo, tmpsnap, &priv->snapshots,
+ snap_list) {
+ ret = glusterd_friend_contains_snap_bricks (snapinfo, uuid);
+ if (ret == 1) {
+ op_errno = GF_DEPROBE_SNAP_BRICK_EXIST;
+ goto out;
+ }
+ }
if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
if (glusterd_is_any_volume_in_server_quorum (this) &&
!does_gd_meet_server_quorum (this)) {
@@ -3954,6 +3963,12 @@ set_deprobe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
"%s exist in cluster", hostname);
break;
+ case GF_DEPROBE_SNAP_BRICK_EXIST:
+ snprintf (errstr, len, "%s is part of existing "
+ "snapshot. Remove those snapshots "
+ "before proceeding ", hostname);
+ break;
+
case GF_DEPROBE_FRIEND_DOWN:
snprintf (errstr, len, "One of the peers is "
"probably down. Check with "
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index fbec4fcafb8..89bfa3d7358 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -7777,6 +7777,42 @@ glusterd_friend_contains_vol_bricks (glusterd_volinfo_t *volinfo,
return ret;
}
+/* Checks if the given peer contains bricks belonging to the given volume.
+ * Returns,
+ * 2 - if peer contains all the bricks
+ * 1 - if peer contains at least 1 brick
+ * 0 - if peer contains no bricks
+ */
+int
+glusterd_friend_contains_snap_bricks (glusterd_snap_t *snapinfo,
+ uuid_t friend_uuid)
+{
+ int ret = -1;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ int count = 0;
+
+ GF_VALIDATE_OR_GOTO ("glusterd", snapinfo, out);
+
+ cds_list_for_each_entry (volinfo, &snapinfo->volumes, vol_list) {
+ cds_list_for_each_entry (brickinfo, &volinfo->bricks,
+ brick_list) {
+ if (!gf_uuid_compare (brickinfo->uuid, friend_uuid)) {
+ count++;
+ }
+ }
+ }
+
+ if (count > 0)
+ ret = 1;
+ else
+ ret = 0;
+
+out:
+ gf_msg_debug (THIS->name, 0, "Returning %d", ret);
+ return ret;
+}
+
/* Cleanup the stale volumes left behind in the cluster. The volumes which are
* contained completely within the detached peer are stale with respect to the
* cluster.
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index db13c4c8ad4..1a0376da5cd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -411,6 +411,10 @@ glusterd_is_brick_decommissioned (glusterd_volinfo_t *volinfo, char *hostname,
int
glusterd_friend_contains_vol_bricks (glusterd_volinfo_t *volinfo,
uuid_t friend_uuid);
+
+int
+glusterd_friend_contains_snap_bricks (glusterd_snap_t *snapinfo,
+ uuid_t friend_uuid);
int
glusterd_friend_remove_cleanup_vols (uuid_t uuid);