summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBala Konda Reddy M <bala12352@gmail.com>2020-06-22 17:20:40 +0530
committerBala Konda Reddy M <bala12352@gmail.com>2020-07-17 11:09:24 +0000
commit89e632000d85e1a90a1eb8504d21ac504c738cf0 (patch)
tree39b9965e662900f090e4c3743c02a6e71625481e
parentf39fb79e9aaf97c1acad64b4abd104a2a279cf04 (diff)
[Test] mv and ls operations with bricks offline
Test Steps: 1. Created a volume and mount this volume on 3 clients. 2. Bring down two bricks offline in each subvol. 3. On client1: under dir1 create files f{1..10000} run in background 4. On client2: under / touch x{1..1000} 5. On client3: start creating x{1001..10000} 6. Bring bricks online which were offline(brought up all the bricks which were down (2 in each of the two subvols) 7. While IO on Client1 and Client3 were happening, On client2 move all the x* files into dir1 8. Perform lookup from client 3 Change-Id: Ib72648af783535557e20cea7e64ea68036b23121 Signed-off-by: Bala Konda Reddy M <bala12352@gmail.com>
-rw-r--r--tests/functional/disperse/test_ec_lookup_and_move_operations.py123
1 files changed, 121 insertions, 2 deletions
diff --git a/tests/functional/disperse/test_ec_lookup_and_move_operations.py b/tests/functional/disperse/test_ec_lookup_and_move_operations.py
index 3ecec11..c6bc873 100644
--- a/tests/functional/disperse/test_ec_lookup_and_move_operations.py
+++ b/tests/functional/disperse/test_ec_lookup_and_move_operations.py
@@ -14,16 +14,22 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-from random import choice
+from random import choice, sample
import os
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import (bring_bricks_offline,
+ are_bricks_offline,
+ are_bricks_online)
+from glustolibs.gluster.heal_libs import monitor_heal_completion
from glustolibs.gluster.glusterdir import mkdir
from glustolibs.gluster.mount_ops import create_mount_objs
+from glustolibs.gluster.volume_ops import volume_start
+from glustolibs.gluster.volume_libs import get_subvols
from glustolibs.misc.misc_libs import upload_scripts
-from glustolibs.io.utils import validate_io_procs
+from glustolibs.io.utils import (validate_io_procs, wait_for_io_to_complete)
@runs_on([['dispersed', 'distributed-dispersed'], ['glusterfs', 'nfs']])
@@ -75,6 +81,12 @@ class TestEcLookupAndMoveOperations(GlusterBaseClass):
# Calling GlusterBaseClass tearDown
self.get_super_method(self, 'tearDown')()
+ if self.mount_procs:
+ ret = wait_for_io_to_complete(self.mount_procs, self.mounts)
+ if ret:
+ raise ExecutionError(
+ "Wait for IO completion failed on some of the clients")
+
# Unmount and cleanup the volume
if not self.unmount_volume_and_cleanup_volume(self.mounts):
raise ExecutionError("Unable to unmount and cleanup volume")
@@ -153,3 +165,110 @@ class TestEcLookupAndMoveOperations(GlusterBaseClass):
self.assertTrue(validate_io_procs(self.mount_procs, self.mounts),
"IO failed on the clients")
+
+ def test_ec_lookup_and_move_operations_few_bricks_are_offline(self):
+ """
+ Test Steps:
+ 1. Mount this volume on 3 mount point, c1, c2, and c3
+ 2. Bring down two bricks offline in each subvol.
+ 3. On client1: under dir1 create files f{1..10000} run in background
+ 4. On client2: under root dir of mountpoint touch x{1..1000}
+ 5. On client3: after step 4 action completed, start creating
+ x{1001..10000}
+ 6. Bring bricks online which were offline(brought up all the bricks
+ which were down (2 in each of the two subvols)
+ 7. While IO on Client1 and Client3 were happening, On client2 move all
+ the x* files into dir1
+ 8. Perform lookup from client 3
+ """
+ # List two bricks in each subvol
+ all_subvols_dict = get_subvols(self.mnode, self.volname)
+ subvols = all_subvols_dict['volume_subvols']
+ bricks_to_bring_offline = []
+ for subvol in subvols:
+ self.assertTrue(subvol, "List is empty")
+ bricks_to_bring_offline.extend(sample(subvol, 2))
+
+ # Bring two bricks of each subvol offline
+ ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
+ self.assertTrue(ret, "Bricks are still online")
+ g.log.info("Bricks are offline %s", bricks_to_bring_offline)
+
+ # Validating the bricks are offline or not
+ ret = are_bricks_offline(self.mnode, self.volname,
+ bricks_to_bring_offline)
+ self.assertTrue(ret, "Few of the bricks are still online in"
+ " {} in".format(bricks_to_bring_offline))
+ g.log.info("%s bricks are offline as expected",
+ bricks_to_bring_offline)
+
+ # Create directory on client1
+ dir_on_mount = self.mounts[0].mountpoint + '/dir1'
+ ret = mkdir(self.mounts[0].client_system, dir_on_mount)
+ self.assertTrue(ret, "unable to create directory on client"
+ " 1 {}".format(self.mounts[0].client_system))
+ g.log.info("Dir1 created on %s successfully",
+ self.mounts[0].client_system)
+
+ # Next IO to be ran in the background so using mount_procs
+ # and run_async.
+ self.mount_procs = []
+
+ # On client1: under dir1 create files f{1..10000} run in background
+ self._run_create_files(file_count=10000, base_name="f_",
+ mpoint=dir_on_mount,
+ client=self.mounts[0].client_system)
+
+ # On client2: under root dir of the mountpoint touch x{1..1000}
+ cmd = ("/usr/bin/env python {} create_files -f 1000 --fixed-file-size"
+ " 10k --base-file-name x {}".format(self.script_upload_path,
+ self.mounts[1].mountpoint))
+ ret, _, err = g.run(self.mounts[1].client_system, cmd)
+ self.assertEqual(ret, 0, "File creation failed on {} with {}".
+ format(self.mounts[1].client_system, err))
+ g.log.info("File creation successful on %s",
+ self.mounts[1].client_system)
+
+ # On client3: start creating x{1001..10000}
+ cmd = ("cd {}; for i in `seq 1000 10000`; do touch x$i; done; "
+ "cd -".format(self.mounts[2].mountpoint))
+ proc = g.run_async(self.mounts[2].client_system, cmd)
+ self.mount_procs.append(proc)
+
+ # Bring bricks online with volume start force
+ ret, _, err = volume_start(self.mnode, self.volname, force=True)
+ self.assertEqual(ret, 0, err)
+ g.log.info("Volume: %s started successfully", self.volname)
+
+ # Check whether bricks are online or not
+ ret = are_bricks_online(self.mnode, self.volname,
+ bricks_to_bring_offline)
+ self.assertTrue(ret, "Bricks {} are still offline".
+ format(bricks_to_bring_offline))
+ g.log.info("Bricks %s are online now", bricks_to_bring_offline)
+
+ # From client2 move all the files with name starting with x into dir1
+ cmd = ("for i in `seq 0 999`; do mv {}/x$i.txt {}; "
+ "done".format(self.mounts[1].mountpoint, dir_on_mount))
+ proc = g.run_async(self.mounts[1].client_system, cmd)
+ self.mount_procs.append(proc)
+
+ # Perform a lookup in loop from client3 for 20 iterations
+ cmd = ("ls -R {}".format(self.mounts[2].mountpoint))
+ counter = 20
+ while counter:
+ ret, _, err = g.run(self.mounts[2].client_system, cmd)
+ self.assertEqual(ret, 0, "ls while mv operation being carried"
+ " failed with {}".format(err))
+ g.log.debug("ls successful for the %s time", 21-counter)
+ counter -= 1
+
+ self.assertTrue(validate_io_procs(self.mount_procs, self.mounts),
+ "IO failed on the clients")
+ # Emptying mount_procs for not validating IO in tearDown
+ self.mount_procs *= 0
+
+ # Wait for heal to complete
+ ret = monitor_heal_completion(self.mnode, self.volname,)
+ self.assertTrue(ret, "Heal didn't completed in the expected time")
+ g.log.info("Heal completed successfully on %s volume", self.volname)