summaryrefslogtreecommitdiffstats
path: root/tests/functional/arbiter
diff options
context:
space:
mode:
authorroot <root@localhost.localdomain>2017-12-29 12:10:53 +0530
committerVitalii Koriakov <vkoriako@redhat.com>2018-05-11 12:52:06 +0300
commit615d7886ee4d8cd6b9b9ab21e4e2b486fb4bb2fd (patch)
tree8ad18690c98cbb6154fbf1f3519a1db6502e7488 /tests/functional/arbiter
parent8055694d4b956ba2cae9b3a6fb9ae2168f20475f (diff)
TestCase Testing VolumeType change from replicated to Arbiter volume along with volume operations add-brick, remove-brick, replace-brick post volume type change
Change-Id: I44a1ff6fab3228736ae9c83fe67b16c2e8c40adc Signed-off-by: Karan Sandha <ksandha@redhat.com> Signed-off-by: Vitalii Koriakov <vkoriako@redhat.com>
Diffstat (limited to 'tests/functional/arbiter')
-rwxr-xr-xtests/functional/arbiter/brick_cases/test_brickcases.py394
1 files changed, 394 insertions, 0 deletions
diff --git a/tests/functional/arbiter/brick_cases/test_brickcases.py b/tests/functional/arbiter/brick_cases/test_brickcases.py
new file mode 100755
index 0000000..5d3a038
--- /dev/null
+++ b/tests/functional/arbiter/brick_cases/test_brickcases.py
@@ -0,0 +1,394 @@
+# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Test Arbiter Specific Cases"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
+from glustolibs.gluster.volume_libs import (
+ log_volume_info_and_status, replace_brick_from_volume,
+ expand_volume, wait_for_volume_process_to_be_online,
+ verify_all_process_of_volume_are_online, shrink_volume)
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, rebalance_status, wait_for_rebalance_to_complete)
+from glustolibs.gluster.heal_libs import monitor_heal_completion
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.io.utils import (validate_io_procs,
+ list_all_files_and_dirs_mounts,
+ wait_for_io_to_complete)
+from glustolibs.misc.misc_libs import upload_scripts
+
+
+@runs_on([['replicated', 'distributed-replicated'],
+ ['glusterfs', 'cifs', 'nfs']])
+class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
+ """Class for testing Volume Type Change from replicated to
+ Arbitered volume
+ """
+ @classmethod
+ def setUpClass(cls):
+ # Calling GlusterBaseClass setUpClass
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ # Overriding the volume type to specifically test the volume type
+ # change from replicated to arbiter
+ if cls.volume_type == "replicated":
+ cls.volume['voltype'] = {
+ 'type': 'replicated',
+ 'replica_count': 2,
+ 'dist_count': 1,
+ 'transport': 'tcp'}
+
+ if cls.volume_type == "distributed-replicated":
+ cls.volume['voltype'] = {
+ 'type': 'distributed-replicated',
+ 'dist_count': 2,
+ 'replica_count': 2,
+ 'transport': 'tcp'}
+
+ # Upload io scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on mounts",
+ cls.clients)
+ script_local_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, [script_local_path])
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s"
+ % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ cls.counter = 1
+ # int: Value of counter is used for dirname-start-num argument for
+ # file_dir_ops.py create_deep_dirs_with_files.
+
+ # The --dir-length argument value for file_dir_ops.py
+ # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp
+ # method). This means every mount will create
+ # 10 top level dirs. For every mountpoint/testcase to create new set of
+ # dirs, we are incrementing the counter by --dir-length value i.e 10
+ # in this test suite.
+
+ # If we are changing the --dir-length to new value, ensure the counter
+ # is also incremented by same value to create new set of files/dirs.
+
+ def setUp(self):
+ """
+ - Setup Volume and Mount Volume
+ """
+ # Calling GlusterBaseClass setUpClass
+ GlusterBaseClass.setUpClass.im_func(self)
+
+ self.all_mounts_procs = []
+ self.io_validation_complete = False
+
+ # Setup Volume and Mount Volume
+ g.log.info("Starting to Setup Volume and Mount Volume")
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+
+ def tearDown(self):
+ """If test method failed before validating IO, tearDown waits for the
+ IO's to complete and checks for the IO exit status
+
+ Unmount Volume and Cleanup Volume
+ """
+ # Wait for IO to complete if io validation is not executed in the
+ # test method
+ if not self.io_validation_complete:
+ g.log.info("Wait for IO to complete as IO validation did not "
+ "succeed in test method")
+ ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
+ if not ret:
+ raise ExecutionError("IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+
+ # List all files and dirs created
+ g.log.info("List all files and directories:")
+ ret = list_all_files_and_dirs_mounts(self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to list all files and dirs")
+ g.log.info("Listing all files and directories is successful")
+
+ # Unmount Volume and Cleanup Volume
+ g.log.info("Starting to Unmount Volume and Cleanup Volume")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ GlusterBaseClass.tearDown.im_func(self)
+
+ def test_replicated_to_arbiter_volume_change_with_volume_ops(self):
+ """
+ - Change the volume type from replicated to arbiter
+ - Perform add-brick, rebalance on arbitered volume
+ - Perform replace-brick on arbitered volume
+ - Perform remove-brick on arbitered volume
+ """
+ # pylint: disable=too-many-statements
+
+ # Start IO on mounts
+ g.log.info("Starting IO on all mounts...")
+ self.all_mounts_procs = []
+ for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
+ cmd = ("python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 15 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (self.script_upload_path,
+ self.counter,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ self.all_mounts_procs.append(proc)
+ self.counter = self.counter + 10
+ self.io_validation_complete = False
+
+ # Validate IO
+ g.log.info("Wait for IO to complete and validate IO ...")
+ ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.io_validation_complete = True
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+
+ # Adding bricks to make an Arbiter Volume
+ g.log.info("Adding bricks to convert to Arbiter Volume")
+ ret = expand_volume(self.mnode, self.volname, self.servers[2:],
+ self.all_servers_info, replica_count=1,
+ arbiter_count=1)
+ self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
+ g.log.info("Changing volume to arbiter volume is successfull %s",
+ self.volname)
+
+ # Log Volume Info and Status after changing the volume type from
+ # replicated to arbitered
+ g.log.info("Logging volume info and Status after changing to "
+ "arbitered volume")
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+ g.log.info("Successful in logging volume info and status of volume %s",
+ self.volname)
+
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to wait for volume %s processes to "
+ "be online", self.volname))
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", self.volname)
+
+ # Verifying all bricks online
+ g.log.info("Verifying volume's all process are online")
+ ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Volume %s : All process are not online",
+ self.volname))
+ g.log.info("Volume %s : All process are online", self.volname)
+
+ # Checking for heals to finish after changing the volume type
+ # from replicated to arbitered volume
+ g.log.info("Wait for self-heal to complete after changing the "
+ "volume type from replicated to arbitered volume")
+ ret = monitor_heal_completion(self.mnode, self.volname)
+ self.assertTrue(ret, ("Self heal didn't complete even after waiting "
+ "for 20 minutes."))
+ g.log.info("self-heal is successful after changing the volume type "
+ "from replicated to arbitered volume")
+
+ # Start IO on mounts
+ g.log.info("Starting IO on all mounts...")
+ self.all_mounts_procs = []
+ for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
+ cmd = ("python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 35 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (self.script_upload_path,
+ self.counter,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ self.all_mounts_procs.append(proc)
+ self.counter = self.counter + 10
+ self.io_validation_complete = False
+
+ # Log Volume Info and Status before expanding the volume.
+ g.log.info("Logging volume info and Status before expanding volume")
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+ g.log.info("Successful in logging volume info and status of volume %s",
+ self.volname)
+
+ # Start add-brick (subvolume-increase)
+ g.log.info("Start adding bricks to volume when IO in progress")
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Failed to expand the volume when IO in "
+ "progress on volume %s", self.volname))
+ g.log.info("Expanding volume when IO in progress is successful on "
+ "volume %s", self.volname)
+
+ # Log Volume Info and Status after expanding the volume
+ g.log.info("Logging volume info and Status after expanding volume")
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+ g.log.info("Successful in logging volume info and status of volume %s",
+ self.volname)
+
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to wait for volume %s processes to "
+ "be online", self.volname))
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", self.volname)
+
+ # Verify volume's all process are online
+ g.log.info("Verifying volume's all process are online")
+ ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Volume %s : All process are not online",
+ self.volname))
+ g.log.info("Volume %s : All process are online", self.volname)
+
+ # Start Rebalance
+ g.log.info("Starting Rebalance on the volume")
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
+ "%s", self.volname))
+ g.log.info("Successfully started rebalance on the volume %s",
+ self.volname)
+
+ # Log Rebalance status
+ g.log.info("Log Rebalance status")
+ _, _, _ = rebalance_status(self.mnode, self.volname)
+
+ # Wait for rebalance to complete
+ g.log.info("Waiting for rebalance to complete")
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ self.assertTrue(ret, "Rebalance did not start "
+ "despite waiting for 5 mins")
+ g.log.info("Rebalance is successfully complete on the volume %s",
+ self.volname)
+
+ # Log Volume Info and Status before replacing brick from the volume.
+ g.log.info("Logging volume info and Status before replacing brick "
+ "from the volume %s", self.volname)
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+ g.log.info("Successful in logging volume info and status of volume %s",
+ self.volname)
+
+ # Replace brick from a sub-volume
+ g.log.info("Replace a faulty brick from the volume")
+ ret = replace_brick_from_volume(self.mnode, self.volname,
+ self.servers, self.all_servers_info)
+ self.assertTrue(ret, "Failed to replace faulty brick from the volume")
+ g.log.info("Successfully replaced faulty brick from the volume")
+
+ # Log Volume Info and Status after replacing the brick
+ g.log.info("Logging volume info and Status after replacing brick "
+ "from the volume %s", self.volname)
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+ g.log.info("Successful in logging volume info and status of volume %s",
+ self.volname)
+
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to wait for volume %s processes to "
+ "be online", self.volname))
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", self.volname)
+
+ # Verify volume's all process are online
+ g.log.info("Verifying volume's all process are online")
+ ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Volume %s : All process are not online",
+ self.volname))
+ g.log.info("Volume %s : All process are online", self.volname)
+
+ # Wait for self-heal to complete
+ g.log.info("Wait for self-heal to complete")
+ ret = monitor_heal_completion(self.mnode, self.volname)
+ self.assertTrue(ret, "Self heal didn't complete even after waiting "
+ "for 20 minutes. 20 minutes is too much a time for "
+ "current test workload")
+ g.log.info("self-heal is successful after replace-brick operation")
+
+ # Log Volume Info and Status before shrinking the volume.
+ g.log.info("Logging volume info and Status before shrinking volume")
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+ g.log.info("Successful in logging volume info and status of volume %s",
+ self.volname)
+
+ # Shrinking volume by removing bricks from volume when IO in progress
+ g.log.info("Start removing bricks from volume when IO in progress")
+ ret = shrink_volume(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to shrink the volume when IO in "
+ "progress on volume %s", self.volname))
+ g.log.info("Shrinking volume when IO in progress is successful on "
+ "volume %s", self.volname)
+
+ # Log Volume Info and Status after shrinking the volume
+ g.log.info("Logging volume info and Status after shrinking volume")
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+ g.log.info("Successful in logging volume info and status of volume %s",
+ self.volname)
+
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to wait for volume %s processes to "
+ "be online", self.volname))
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", self.volname)
+
+ # Verify volume's all process are online
+ g.log.info("Verifying volume's all process are online after "
+ "shrinking volume")
+ ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Volume %s : All process are not online",
+ self.volname))
+ g.log.info("Volume %s : All process are online after shrinking volume",
+ self.volname)
+
+ # Validate IO
+ g.log.info("Wait for IO to complete and validate IO ...")
+ ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ self.io_validation_complete = True
+ g.log.info("IO is successful on all mounts")