summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArthy Loganathan <aloganat@redhat.com>2020-10-01 01:45:18 +0530
committerBala Konda Reddy M <bala12352@gmail.com>2020-10-06 07:18:24 +0000
commit71aa8da8f0207d7d66c37983e75a96c31d6dde5d (patch)
treee9be5de928898b4fd950a3fcd6f81f840780bd23
parent8ef5df7ea27f245180d034a24d3f5d6b893eae4f (diff)
[Test] Replica 3 to arbiter conversion with ongoing IO's
Change-Id: I3920be66ac84fe700c4d0d6a1d2c1750efb43335 Signed-off-by: Arthy Loganathan <aloganat@redhat.com>
-rwxr-xr-xtests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py119
1 files changed, 106 insertions, 13 deletions
diff --git a/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py b/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
index 4f6bba95a..24c014502 100755
--- a/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
+++ b/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
@@ -22,6 +22,12 @@ from glustolibs.gluster.volume_libs import (
expand_volume, wait_for_volume_process_to_be_online,
verify_all_process_of_volume_are_online, shrink_volume, get_subvols)
from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.io.utils import run_linux_untar
+from glustolibs.gluster.heal_libs import (monitor_heal_completion,
+ is_heal_complete,
+ is_volume_in_split_brain)
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.heal_ops import trigger_heal
@runs_on([['replicated', 'distributed-replicated'],
@@ -37,22 +43,29 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass):
# Calling GlusterBaseClass setUp
self.get_super_method(self, 'setUp')()
+ # Set I/O flag to false
+ self.is_io_running = False
+
# Setup Volume
- g.log.info("Starting to Setup Volume")
- ret = self.setup_volume()
+ g.log.info("Starting to Setup and Mount Volume")
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
if not ret:
- raise ExecutionError("Failed to Setup_Volume")
- g.log.info("Successful in Setup Volume")
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
self.subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
def tearDown(self):
- # Cleanup Volume
- g.log.info("Starting to Unmount Volume and Cleanup Volume")
- ret = self.cleanup_volume()
+ # Wait for I/O if not completed
+ if self.is_io_running:
+ if not self._wait_for_untar_completion():
+ g.log.error("I/O failed to stop on clients")
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
if not ret:
- raise ExecutionError("Failed to Cleanup Volume")
- g.log.info("Successful Cleanup Volume")
+ raise ExecutionError("Unable to delete volume % s" % self.volname)
# Calling GlusterBaseClass tearDown
self.get_super_method(self, 'tearDown')()
@@ -67,10 +80,22 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass):
g.log.info('Clearing brick %s is successful', brick)
g.log.info('Clearing for all brick is successful')
- def test_replicated_to_arbiter_volume(self):
+ def _wait_for_untar_completion(self):
+ """Wait for untar to complete"""
+ has_process_stopped = []
+ for proc in self.io_process:
+ try:
+ ret, _, _ = proc.async_communicate()
+ if not ret:
+ has_process_stopped.append(False)
+ has_process_stopped.append(True)
+ except ValueError:
+ has_process_stopped.append(True)
+ return all(has_process_stopped)
+
+ def _convert_replicated_to_arbiter_volume(self):
"""
- Description:-
- Reduce the replica count from replica 3 to arbiter
+ Helper module to convert replicated to arbiter volume.
"""
# pylint: disable=too-many-statements
# Remove brick to reduce the replica count from replica 3
@@ -99,7 +124,8 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass):
g.log.info("Adding bricks to convert to Arbiter Volume")
replica_arbiter = {'replica_count': 1, 'arbiter_count': 1}
ret = expand_volume(self.mnode, self.volname, self.servers,
- self.all_servers_info, **replica_arbiter)
+ self.all_servers_info, force=True,
+ **replica_arbiter)
self.assertTrue(ret, "Failed to expand the volume %s" % self.volname)
g.log.info("Changing volume to arbiter volume is successful %s",
self.volname)
@@ -118,3 +144,70 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass):
self.assertTrue(ret, "Volume %s : All process are not online"
% self.volname)
g.log.info("Volume %s : All process are online", self.volname)
+
+ def test_replicated_to_arbiter_volume(self):
+ """
+ Description:-
+ Reduce the replica count from replica 3 to arbiter
+ """
+ # pylint: disable=too-many-statements
+ self._convert_replicated_to_arbiter_volume()
+
+ def test_replica_to_arbiter_volume_with_io(self):
+ """
+ Description: Replica 3 to arbiter conversion with ongoing IO's
+
+ Steps :
+ 1) Create a replica 3 volume and start volume.
+ 2) Set client side self heal off.
+ 3) Fuse mount the volume.
+ 4) Create directory dir1 and write data.
+ Example: untar linux tar from the client into the dir1
+ 5) When IO's is running, execute remove-brick command,
+ and convert replica 3 to replica 2 volume
+ 6) Execute add-brick command and convert to arbiter volume,
+ provide the path of new arbiter brick.
+ 7) Issue gluster volume heal.
+ 8) Heal should be completed with no files in split-brain.
+ """
+
+ # pylint: disable=too-many-statements
+ # Create a dir to start untar
+ self.linux_untar_dir = "{}/{}".format(self.mounts[0].mountpoint,
+ "linuxuntar")
+ ret = mkdir(self.clients[0], self.linux_untar_dir)
+ self.assertTrue(ret, "Failed to create dir linuxuntar for untar")
+
+ # Start linux untar on dir linuxuntar
+ self.io_process = run_linux_untar(self.clients[0],
+ self.mounts[0].mountpoint,
+ dirs=tuple(['linuxuntar']))
+ self.is_io_running = True
+
+ # Convert relicated to arbiter volume
+ self._convert_replicated_to_arbiter_volume()
+
+ # Wait for IO to complete.
+ ret = self._wait_for_untar_completion()
+ self.assertFalse(ret, "IO didn't complete or failed on client")
+ self.is_io_running = False
+
+ # Start healing
+ ret = trigger_heal(self.mnode, self.volname)
+ self.assertTrue(ret, 'Heal is not started')
+ g.log.info('Healing is started')
+
+ # Monitor heal completion
+ ret = monitor_heal_completion(self.mnode, self.volname,
+ timeout_period=3600)
+ self.assertTrue(ret, 'Heal has not yet completed')
+
+ # Check if heal is completed
+ ret = is_heal_complete(self.mnode, self.volname)
+ self.assertTrue(ret, 'Heal is not complete')
+ g.log.info('Heal is completed successfully')
+
+ # Check for split-brain
+ ret = is_volume_in_split_brain(self.mnode, self.volname)
+ self.assertFalse(ret, 'Volume is in split-brain state')
+ g.log.info('Volume is not in split-brain state')