From 1c92f83ec041176ad7c42ef83525cda7d3eda3c5 Mon Sep 17 00:00:00 2001 From: Gaurav Yadav Date: Thu, 16 Mar 2017 14:56:39 +0530 Subject: glusterd : Disallow peer detach if snapshot bricks exist on it Problem : - Deploy gluster on 2 nodes, one brick each, one volume replicated - Create a snapshot - Lose one server - Add a replacement peer and new brick with a new IP address - replace-brick the missing brick onto the new server (wait for replication to finish) - peer detach the old server - after doing above steps, glusterd fails to restart. Solution: With the fix detach peer will populate an error : "N2 is part of existing snapshots. Remove those snapshots before proceeding". While doing so we force user to stay with that peer or to delete all snapshots. Change-Id: I3699afb9b2a5f915768b77f885e783bd9b51818c BUG: 1322145 Signed-off-by: Gaurav Yadav Reviewed-on: https://review.gluster.org/16907 Smoke: Gluster Build System Reviewed-by: Atin Mukherjee NetBSD-regression: NetBSD Build System CentOS-regression: Gluster Build System --- xlators/mgmt/glusterd/src/glusterd-handler.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c') diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index a630d55d9a3..8368354e65f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -1318,6 +1318,8 @@ __glusterd_handle_cli_deprobe (rpcsvc_request_t *req) int flags = 0; glusterd_volinfo_t *volinfo = NULL; glusterd_volinfo_t *tmp = NULL; + glusterd_snap_t *snapinfo = NULL; + glusterd_snap_t *tmpsnap = NULL; this = THIS; GF_ASSERT (this); @@ -1402,14 +1404,21 @@ __glusterd_handle_cli_deprobe (rpcsvc_request_t *req) */ cds_list_for_each_entry_safe (volinfo, tmp, &priv->volumes, vol_list) { - ret = glusterd_friend_contains_vol_bricks (volinfo, - uuid); + ret = glusterd_friend_contains_vol_bricks (volinfo, uuid); if (ret == 1) { op_errno = GF_DEPROBE_BRICK_EXIST; goto out; } } + cds_list_for_each_entry_safe (snapinfo, tmpsnap, &priv->snapshots, + snap_list) { + ret = glusterd_friend_contains_snap_bricks (snapinfo, uuid); + if (ret == 1) { + op_errno = GF_DEPROBE_SNAP_BRICK_EXIST; + goto out; + } + } if (!(flags & GF_CLI_FLAG_OP_FORCE)) { if (glusterd_is_any_volume_in_server_quorum (this) && !does_gd_meet_server_quorum (this)) { @@ -3954,6 +3963,12 @@ set_deprobe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr, "%s exist in cluster", hostname); break; + case GF_DEPROBE_SNAP_BRICK_EXIST: + snprintf (errstr, len, "%s is part of existing " + "snapshot. Remove those snapshots " + "before proceeding ", hostname); + break; + case GF_DEPROBE_FRIEND_DOWN: snprintf (errstr, len, "One of the peers is " "probably down. Check with " -- cgit