summaryrefslogtreecommitdiffstats
path: root/xlators
diff options
context:
space:
mode:
authorSunny Kumar <sunkumar@redhat.com>2017-08-16 14:04:45 +0530
committerAmar Tumballi <amarts@redhat.com>2017-10-23 10:05:02 +0000
commit2b3b3edee2d849b4aee314048987dc995d9679a1 (patch)
tree9523ebf6ad226735b9a2eac40c2701653be307bf /xlators
parent87bd25b64ae34cce95e87e724acfeab4c13d60a4 (diff)
snapshot: Issue with other processes accessing the mounted brick
Added code for unmount of activated snapshot brick during snapshot deactivation process which make sense as mount point for deactivated bricks should not exist. Removed code for mounting newly created snapshot, as newly created snapshots should not mount until it is activated. Added code for mount point creation and snapshot mount during snapshot activation. Added validation during glusterd init for mounting only those snapshot whose status is either STARTED or RESTORED. During snapshot restore, mount point for stopped snap should exist as it is required to set extended attribute. During handshake, after getting updates from friend mount point for activated snapshot should exist and should not for deactivated snapshot. While getting snap status we should show relevent information for deactivated snapshots, after this pathch 'gluster snap status' command will show output like- Snap Name : snap1 Snap UUID : snap-uuid Brick Path : server1:/run/gluster/snaps/snap-vol-name/brick Volume Group : N/A (Deactivated Snapshot) Brick Running : No Brick PID : N/A Data Percentage : N/A LV Size : N/A Fixes: #276 Change-Id: I65783488e35fac43632615ce1b8ff7b8e84834dc BUG: 1482023 Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
Diffstat (limited to 'xlators')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c85
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c145
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c25
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h4
5 files changed, 218 insertions, 44 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
index 38fea9dbaa4..f1a2bbb4c35 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
@@ -1678,10 +1678,31 @@ glusterd_import_friend_snap (dict_t *peer_data, int32_t snap_count,
"for snap %s", peer_snap_name);
goto out;
}
+ /* During handshake, after getting updates from friend mount
+ * point for activated snapshot should exist and should not
+ * for deactivated snapshot.
+ */
if (glusterd_is_volume_started (snap_vol)) {
+ ret = glusterd_recreate_vol_brick_mounts (this,
+ snap_vol);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRK_MNT_RECREATE_FAIL,
+ "Failed to recreate brick mounts"
+ " for %s", snap->snapname);
+ goto out;
+ }
+
(void) glusterd_start_bricks (snap_vol);
} else {
(void) glusterd_stop_bricks(snap_vol);
+ ret = glusterd_snap_unmount(this, snap_vol);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_GLUSTERD_UMOUNT_FAIL,
+ "Failed to unmounts for %s",
+ snap->snapname);
+ }
}
ret = glusterd_import_quota_conf (peer_data, i,
@@ -3337,6 +3358,70 @@ out:
return ret;
}
+/* This function will do unmount for snaps.
+ */
+int32_t
+glusterd_snap_unmount (xlator_t *this, glusterd_volinfo_t *volinfo)
+{
+ char *brick_mount_path = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ int32_t ret = -1;
+ int retry_count = 0;
+
+ GF_ASSERT (this);
+ GF_ASSERT (volinfo);
+
+ cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ /* If the brick is not of this node, we continue */
+ if (gf_uuid_compare (brickinfo->uuid, MY_UUID)) {
+ continue;
+ }
+ /* If snapshot is pending, we continue */
+ if (brickinfo->snap_status == -1) {
+ continue;
+ }
+
+ /* Fetch the brick mount path from the brickinfo->path */
+ ret = glusterd_get_brick_root (brickinfo->path,
+ &brick_mount_path);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_BRICK_PATH_UNMOUNTED,
+ "Failed to find brick_mount_path for %s",
+ brickinfo->path);
+ /* There is chance that brick path is already
+ * unmounted. */
+ ret = 0;
+ goto out;
+ }
+ /* unmount cannot be done when the brick process is still in
+ * the process of shutdown, so give three re-tries
+ */
+ retry_count = 0;
+ while (retry_count <= 2) {
+ retry_count++;
+ /* umount2 system call doesn't cleanup mtab entry
+ * after un-mount, using external umount command.
+ */
+ ret = glusterd_umount(brick_mount_path);
+ if (!ret)
+ break;
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_GLUSTERD_UMOUNT_FAIL, "umount failed "
+ "for path %s (brick: %s): %s. Retry(%d)",
+ brick_mount_path, brickinfo->path,
+ strerror (errno), retry_count);
+ sleep (3);
+ }
+ }
+
+out:
+ if (brick_mount_path)
+ GF_FREE(brick_mount_path);
+
+ return ret;
+}
+
int32_t
glusterd_umount (const char *path)
{
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
index b13493d2dff..d619f1d3106 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
@@ -76,6 +76,9 @@ int32_t
glusterd_umount (const char *path);
int32_t
+glusterd_snap_unmount (xlator_t *this, glusterd_volinfo_t *volinfo);
+
+int32_t
glusterd_add_snapshots_to_export_dict (dict_t *peer_data);
int32_t
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index d315b6582df..a8436e31c9f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -872,6 +872,17 @@ glusterd_snapshot_restore (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
}
+ /* During snapshot restore, mount point for stopped snap
+ * should exist as it is required to set extended attribute.
+ */
+ ret = glusterd_recreate_vol_brick_mounts (this, snap_volinfo);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRK_MNT_RECREATE_FAIL,
+ "Failed to recreate brick mounts for %s",
+ snap->snapname);
+ goto out;
+ }
ret = gd_restore_snap_volume (dict, rsp_dict, parent_volinfo,
snap_volinfo, volcount);
@@ -5116,13 +5127,17 @@ glusterd_take_brick_snapshot (dict_t *dict, glusterd_volinfo_t *snap_vol,
char *origin_brick_path = NULL;
char key[PATH_MAX] = "";
int32_t ret = -1;
+ gf_boolean_t snap_activate = _gf_false;
xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
this = THIS;
+ priv = this->private;
GF_ASSERT (this);
GF_ASSERT (dict);
GF_ASSERT (snap_vol);
GF_ASSERT (brickinfo);
+ GF_ASSERT (priv);
if (strlen(brickinfo->device_path) == 0) {
gf_msg (this->name, GF_LOG_ERROR, EINVAL,
@@ -5166,16 +5181,23 @@ glusterd_take_brick_snapshot (dict_t *dict, glusterd_volinfo_t *snap_vol,
*/
}
- /* create the complete brick here */
- ret = glusterd_snap_brick_create (snap_vol, brickinfo,
- brick_count, clone);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_CREATION_FAIL, "not able to"
- " create the brick for the snap %s"
- ", volume %s", snap_vol->snapshot->snapname,
- snap_vol->volname);
- goto out;
+ /* create the complete brick here in case of clone and
+ * activate-on-create configuration.
+ */
+ snap_activate = dict_get_str_boolean (priv->opts,
+ GLUSTERD_STORE_KEY_SNAP_ACTIVATE,
+ _gf_false);
+ if (clone || snap_activate) {
+ ret = glusterd_snap_brick_create (snap_vol, brickinfo,
+ brick_count, clone);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRICK_CREATION_FAIL, "not able to "
+ "create the brick for the snap %s, volume %s",
+ snap_vol->snapshot->snapname,
+ snap_vol->volname);
+ goto out;
+ }
}
out:
@@ -6040,8 +6062,10 @@ glusterd_snapshot_activate_commit (dict_t *dict, char **op_errstr,
char *snapname = NULL;
glusterd_snap_t *snap = NULL;
glusterd_volinfo_t *snap_volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
xlator_t *this = NULL;
- int flags = 0;
+ int flags = 0;
+ int brick_count = -1;
this = THIS;
GF_ASSERT (this);
@@ -6092,6 +6116,24 @@ glusterd_snapshot_activate_commit (dict_t *dict, char **op_errstr,
goto out;
}
+ /* create the complete brick here */
+ cds_list_for_each_entry (brickinfo, &snap_volinfo->bricks,
+ brick_list) {
+ brick_count++;
+ if (gf_uuid_compare (brickinfo->uuid, MY_UUID))
+ continue;
+ ret = glusterd_snap_brick_create (snap_volinfo, brickinfo,
+ brick_count, _gf_false);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRICK_CREATION_FAIL, "not able to "
+ "create the brick for the snap %s, volume %s",
+ snap_volinfo->snapshot->snapname,
+ snap_volinfo->volname);
+ goto out;
+ }
+ }
+
ret = glusterd_start_volume (snap_volinfo, flags, _gf_true);
if (ret) {
@@ -6177,6 +6219,13 @@ glusterd_snapshot_deactivate_commit (dict_t *dict, char **op_errstr,
goto out;
}
+ ret = glusterd_snap_unmount(this, snap_volinfo);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_GLUSTERD_UMOUNT_FAIL,
+ "Failed to unmounts for %s", snap->snapname);
+ }
+
ret = dict_set_dynstr_with_alloc (rsp_dict, "snapuuid",
uuid_utoa (snap->snap_id));
if (ret) {
@@ -6821,6 +6870,7 @@ glusterd_snapshot_create_commit (dict_t *dict, char **op_errstr,
int64_t i = 0;
int64_t volcount = 0;
int32_t snap_activate = 0;
+ int32_t flags = 0;
char *snapname = NULL;
char *volname = NULL;
char *tmp_name = NULL;
@@ -6829,7 +6879,6 @@ glusterd_snapshot_create_commit (dict_t *dict, char **op_errstr,
glusterd_snap_t *snap = NULL;
glusterd_volinfo_t *origin_vol = NULL;
glusterd_volinfo_t *snap_vol = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
glusterd_conf_t *priv = NULL;
this = THIS;
@@ -6968,30 +7017,21 @@ glusterd_snapshot_create_commit (dict_t *dict, char **op_errstr,
goto out;
}
- cds_list_for_each_entry (snap_vol, &snap->volumes, vol_list) {
- cds_list_for_each_entry (brickinfo, &snap_vol->bricks,
- brick_list) {
- ret = glusterd_brick_start (snap_vol, brickinfo,
- _gf_false);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_BRICK_DISCONNECTED, "starting "
- "the brick %s:%s for the snap %s "
- "(volume: %s) failed",
- brickinfo->hostname, brickinfo->path,
- snap_vol->snapshot->snapname,
- snap_vol->volname);
- goto out;
- }
- }
+ /* Activate created bricks in case of activate-on-create config. */
+ ret = dict_get_int32 (dict, "flags", &flags);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_GET_FAILED, "Unable to get flags");
+ goto out;
+ }
- snap_vol->status = GLUSTERD_STATUS_STARTED;
- ret = glusterd_store_volinfo (snap_vol,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ cds_list_for_each_entry (snap_vol, &snap->volumes, vol_list) {
+ ret = glusterd_start_volume (snap_vol, flags, _gf_true);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_SET_FAIL, "Failed to store "
- "snap volinfo %s", snap_vol->volname);
+ GD_MSG_SNAP_ACTIVATE_FAIL,
+ "Failed to activate snap volume %s of the "
+ "snap %s", snap_vol->volname, snap->snapname);
goto out;
}
}
@@ -7533,6 +7573,30 @@ glusterd_get_single_brick_status (char **op_errstr, dict_t *rsp_dict,
if (ret < 0) {
goto out;
}
+ /* While getting snap status we should show relevent information
+ * for deactivated snaps.
+ */
+ if (snap_volinfo->status == GLUSTERD_STATUS_STOPPED) {
+ /* Setting vgname as "Deactivated Snapshot" */
+ value = gf_strdup ("N/A (Deactivated Snapshot)");
+ if (!value) {
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof (key), "%s.brick%d.vgname",
+ keyprefix, index);
+ ret = dict_set_dynstr (rsp_dict, key, value);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_SET_FAILED,
+ "Could not save vgname ");
+ goto out;
+ }
+
+ ret = 0;
+ goto out;
+ }
ret = glusterd_get_brick_lvm_details (rsp_dict, brickinfo,
snap_volinfo->volname,
@@ -9114,6 +9178,19 @@ glusterd_snapshot_restore_postop (dict_t *dict, int32_t op_ret,
snap->snapname);
goto out;
}
+
+ /* After restore fails, we have to remove mount point for
+ * deactivated snaps which was created at start of restore op.
+ */
+ if (volinfo->status == GLUSTERD_STATUS_STOPPED) {
+ ret = glusterd_snap_unmount(this, volinfo);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_GLUSTERD_UMOUNT_FAIL,
+ "Failed to unmounts for %s",
+ snap->snapname);
+ }
+ }
}
ret = 0;
@@ -9879,7 +9956,7 @@ gd_restore_snap_volume (dict_t *dict, dict_t *rsp_dict,
glusterd_conf_t *conf = NULL;
glusterd_volinfo_t *temp_volinfo = NULL;
glusterd_volinfo_t *voliter = NULL;
- gf_boolean_t conf_present = _gf_false;
+ gf_boolean_t conf_present = _gf_false;
this = THIS;
GF_ASSERT (this);
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 862cba4f9cc..6747c038773 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -3515,7 +3515,7 @@ out:
return ret;
}
-static int32_t
+int32_t
glusterd_recreate_vol_brick_mounts (xlator_t *this,
glusterd_volinfo_t *volinfo)
{
@@ -4521,17 +4521,22 @@ glusterd_recreate_all_snap_brick_mounts (xlator_t *this)
}
}
- /* Recreate bricks of snapshot volumes */
+ /* Recreate bricks of snapshot volumes
+ * We are not creating brick mounts for stopped snaps.
+ */
cds_list_for_each_entry (snap, &priv->snapshots, snap_list) {
cds_list_for_each_entry (volinfo, &snap->volumes, vol_list) {
- ret = glusterd_recreate_vol_brick_mounts (this,
- volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRK_MNT_RECREATE_FAIL,
- "Failed to recreate brick mounts "
- "for %s", snap->snapname);
- goto out;
+ if (volinfo->status != GLUSTERD_STATUS_STOPPED) {
+ ret = glusterd_recreate_vol_brick_mounts
+ (this, volinfo);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRK_MNT_RECREATE_FAIL,
+ "Failed to recreate brick "
+ "mounts for %s",
+ snap->snapname);
+ goto out;
+ }
}
}
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h
index 603151a361d..381676078cb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.h
+++ b/xlators/mgmt/glusterd/src/glusterd-store.h
@@ -202,4 +202,8 @@ glusterd_quota_conf_write_header (int fd);
int32_t
glusterd_quota_conf_write_gfid (int fd, void *buf, char type);
+int32_t
+glusterd_recreate_vol_brick_mounts (xlator_t *this,
+ glusterd_volinfo_t *volinfo);
+
#endif