summaryrefslogtreecommitdiffstats
path: root/tests/functional
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional')
-rw-r--r--tests/functional/disperse/test_ec_remove_brick.py166
1 files changed, 166 insertions, 0 deletions
diff --git a/tests/functional/disperse/test_ec_remove_brick.py b/tests/functional/disperse/test_ec_remove_brick.py
new file mode 100644
index 000000000..cb8238831
--- /dev/null
+++ b/tests/functional/disperse/test_ec_remove_brick.py
@@ -0,0 +1,166 @@
+# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import (GlusterBaseClass,
+ runs_on)
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.io.utils import validate_io_procs
+from glustolibs.gluster.volume_libs import (
+ log_volume_info_and_status, shrink_volume,
+ wait_for_volume_process_to_be_online)
+
+
+@runs_on([['distributed-dispersed'],
+ ['glusterfs']])
+class DispersedWithAddBrick(GlusterBaseClass):
+ """
+ Test case validates remove-brick and rebalance in a dispersed
+ volume
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Setup volume and mount volume
+ Calling GlusterBaseClass setUpClass
+ """
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ # Setup Volume and Mount Volume
+ g.log.info("Starting to Setup and Mount Volume %s",
+ cls.volname)
+ ret = cls.setup_volume_and_mount_volume(mounts=cls.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume "
+ "and Mount_Volume %s" % cls.volname)
+ g.log.info("Successful in Setup and Mount Volume %s", cls.volname)
+
+ # Upload io scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on "
+ "mounts", cls.clients)
+ script_local_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, script_local_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
+ cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def test_disperse_removebrick(self):
+
+ # pylint: disable=too-many-branches,too-many-statements,too-many-locals
+ """
+ - Write IO's
+ - Start remove brick
+ - Validate IOs
+ - Start rebalance
+ - Wait for rebalance to complete
+ - Start IO's and Vaildate IO's
+ """
+
+ # Write IO
+ all_mounts_procs = []
+ count = 1
+ for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
+ cmd = ("python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 10 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (self.script_upload_path, count,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ count = count + 10
+
+ # Start remove-brick (subvolume-decrease)
+ g.log.info("Start removing bricks from volume")
+ ret = shrink_volume(self.mnode, self.volname)
+ self.assertTrue(ret, ("Remove brick operation failed on "
+ "%s", self.volname))
+ g.log.info("Remove brick operation is successful on "
+ "volume %s", self.volname)
+
+ # Log Volume Info and Status after shrinking the volume
+ g.log.info("Logging volume info and Status after shrinking volume")
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+ g.log.info("Successful in logging volume info and status of volume %s",
+ self.volname)
+
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("All process for volume %s are not"
+ "online", self.volname))
+ g.log.info("All volume %s processes are now online",
+ self.volname)
+
+ # Validating IO's and waiting to complete
+ g.log.info("Validating IO's")
+ ret = validate_io_procs(all_mounts_procs, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("Successfully validated all io's")
+
+ # Start IO on all mounts after rebalance completes
+ all_mounts_procs = []
+ count = 21
+ for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
+ cmd = ("python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 10 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (self.script_upload_path, count,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ count = count + 10
+
+ # Validate IO
+ g.log.info("Validating IO's")
+ ret = validate_io_procs(all_mounts_procs, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("Successfully validated all io's")
+
+ def tearDown(self):
+ """
+ Clean up the volume and umount volume from client
+ """
+
+ # Unmount volume from client
+ # Test needs to continue if unmount fail.Not asserting here.
+ ret = self.unmount_volume(self.mounts)
+ if ret:
+ g.log.info("Successfully unmounted all the volumes")
+ else:
+ g.log.error(ret, "Failed to unmount volumes")
+
+ # cleanup-volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to Cleanup Volume")
+ g.log.info("Cleanup volume %s Completed Successfully", self.volname)