diff options
author | Nigel Babu <nigelb@redhat.com> | 2018-03-05 15:49:23 +0530 |
---|---|---|
committer | Nigel Babu <nigelb@redhat.com> | 2018-03-27 16:05:15 +0530 |
commit | fb5145be2db1a7c96b008af8a40e3b7b18df9673 (patch) | |
tree | 3ca087e0996bfd975e97b4f0235421a37d2e4767 /tests/functional/afr | |
parent | 8804c9499e9ed0d37823dc55d03eb7792907cf0b (diff) |
Fix up coding style issues in tests
Change-Id: I14609030983d4485dbce5a4ffed1e0353e3d1bc7
Diffstat (limited to 'tests/functional/afr')
-rw-r--r-- | tests/functional/afr/heal/__init__.py | 0 | ||||
-rw-r--r-- | tests/functional/afr/heal/test_heal_info_while_accessing_file.py (renamed from tests/functional/afr/test_heal_info_while_accessing_file.py) | 50 | ||||
-rw-r--r--[-rwxr-xr-x] | tests/functional/afr/heal/test_self_heal.py | 150 | ||||
-rw-r--r-- | tests/functional/afr/heal/test_self_heal_daemon_process.py (renamed from tests/functional/afr/test_self_heal_daemon_process.py) | 144 | ||||
-rw-r--r-- | tests/functional/afr/test_client_side_quorum.py | 158 |
5 files changed, 229 insertions, 273 deletions
diff --git a/tests/functional/afr/heal/__init__.py b/tests/functional/afr/heal/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/tests/functional/afr/heal/__init__.py diff --git a/tests/functional/afr/test_heal_info_while_accessing_file.py b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py index 316880318..965adbdc1 100644 --- a/tests/functional/afr/test_heal_info_while_accessing_file.py +++ b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py @@ -44,8 +44,8 @@ class TestSelfHeal(GlusterBaseClass): GlusterBaseClass.setUpClass.im_func(cls) # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on mounts" - % cls.clients) + g.log.info("Upload io scripts to clients %s for running IO on mounts", + cls.clients) script_local_path = ("/usr/share/glustolibs/io/scripts/" "file_dir_ops.py") cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" @@ -54,23 +54,22 @@ class TestSelfHeal(GlusterBaseClass): if not ret: raise ExecutionError("Failed to upload IO scripts to clients %s" % cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s" - % cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) cls.counter = 1 - """int: Value of counter is used for dirname-start-num argument for - file_dir_ops.py create_deep_dirs_with_files. - - The --dir-length argument value for - file_dir_ops.py create_deep_dirs_with_files is set to 10 - (refer to the cmd in setUp method). This means every mount will create - 10 top level dirs. For every mountpoint/testcase to create new set of - dirs, we are incrementing the counter by --dir-length value i.e 10 - in this test suite. - - If we are changing the --dir-length to new value, ensure the counter - is also incremented by same value to create new set of files/dirs. - """ + # int: Value of counter is used for dirname-start-num argument for + # file_dir_ops.py create_deep_dirs_with_files. + + # The --dir-length argument value for file_dir_ops.py + # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp + # method). This means every mount will create + # 10 top level dirs. For every mountpoint/testcase to create new set of + # dirs, we are incrementing the counter by --dir-length value i.e 10 in + # this test suite. + + # If we are changing the --dir-length to new value, ensure the counter + # is also incremented by same value to create new set of files/dirs. def setUp(self): # Calling GlusterBaseClass setUp @@ -135,7 +134,7 @@ class TestSelfHeal(GlusterBaseClass): # Bring 1-st brick offline brick_to_bring_offline = [self.bricks_list[0]] - g.log.info('Bringing bricks %s offline...' % brick_to_bring_offline) + g.log.info('Bringing bricks %s offline...', brick_to_bring_offline) ret = bring_bricks_offline(self.volname, brick_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % brick_to_bring_offline) @@ -144,13 +143,13 @@ class TestSelfHeal(GlusterBaseClass): brick_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % brick_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % brick_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + brick_to_bring_offline) # Creating files on client side for mount_obj in self.mounts: - g.log.info("Generating data for %s:%s" - % (mount_obj.client_system, mount_obj.mountpoint)) + g.log.info("Generating data for %s:%s", + mount_obj.client_system, mount_obj.mountpoint) # Creating files cmd = ("python %s create_files -f 100 %s" @@ -184,7 +183,7 @@ class TestSelfHeal(GlusterBaseClass): # Get first brick server and brick path # and get first file from filelist subvol_mnode, mnode_brick = subvol_without_offline_brick[0].split(':') - ret, file_list, err = g.run(subvol_mnode, 'ls %s' % mnode_brick) + ret, file_list, _ = g.run(subvol_mnode, 'ls %s' % mnode_brick) file_to_edit = file_list.splitlines()[0] # Access and modify the file @@ -200,9 +199,8 @@ class TestSelfHeal(GlusterBaseClass): proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) self.all_mounts_procs.append(proc) - g.log.info("IO on %s:%s is modified successfully" - % (mount_obj.client_system, - mount_obj.mountpoint)) + g.log.info("IO on %s:%s is modified successfully", + mount_obj.client_system, mount_obj.mountpoint) self.io_validation_complete = False # Get entries while accessing file diff --git a/tests/functional/afr/heal/test_self_heal.py b/tests/functional/afr/heal/test_self_heal.py index 7837d958c..b2e52e392 100755..100644 --- a/tests/functional/afr/heal/test_self_heal.py +++ b/tests/functional/afr/heal/test_self_heal.py @@ -55,8 +55,8 @@ class TestSelfHeal(GlusterBaseClass): GlusterBaseClass.setUpClass.im_func(cls) # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on mounts" - % cls.clients) + g.log.info("Upload io scripts to clients %s for running IO on mounts", + cls.clients) script_local_path = ("/usr/share/glustolibs/io/scripts/" "file_dir_ops.py") cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" @@ -65,23 +65,22 @@ class TestSelfHeal(GlusterBaseClass): if not ret: raise ExecutionError("Failed to upload IO scripts to clients %s" % cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s" - % cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) cls.counter = 1 - """int: Value of counter is used for dirname-start-num argument for - file_dir_ops.py create_deep_dirs_with_files. - - The --dir-length argument value for - file_dir_ops.py create_deep_dirs_with_files is set to 10 - (refer to the cmd in setUp method). This means every mount will create - 10 top level dirs. For every mountpoint/testcase to create new set of - dirs, we are incrementing the counter by --dir-length value i.e 10 - in this test suite. - - If we are changing the --dir-length to new value, ensure the counter - is also incremented by same value to create new set of files/dirs. - """ + # int: Value of counter is used for dirname-start-num argument for + # file_dir_ops.py create_deep_dirs_with_files. + + # The --dir-length argument value for file_dir_ops.py + # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp + # method). This means every mount will create + # 10 top level dirs. For every mountpoint/testcase to create new set of + # dirs, we are incrementing the counter by --dir-length value i.e 10 + # in this test suite. + + # If we are changing the --dir-length to new value, ensure the counter + # is also incremented by same value to create new set of files/dirs. def setUp(self): # Calling GlusterBaseClass setUp @@ -129,13 +128,6 @@ class TestSelfHeal(GlusterBaseClass): # Calling GlusterBaseClass teardown GlusterBaseClass.tearDown.im_func(self) - @classmethod - def tearDownClass(cls): - """tearDownClass. This will be executed once per class. - """ - # Calling GlusterBaseClass tearDownClass. - GlusterBaseClass.tearDownClass.im_func(cls) - def test_data_self_heal_daemon_off(self): """ Test Data-Self-Heal (heal command) @@ -166,22 +158,22 @@ class TestSelfHeal(GlusterBaseClass): in cycle - validate IO """ + # pylint: disable=too-many-statements # Setting options g.log.info('Setting options...') options = {"metadata-self-heal": "off", "entry-self-heal": "off", - "data-self-heal": "off", - } + "data-self-heal": "off"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) - g.log.info("Successfully set %s for volume %s" - % (options, self.volname)) + g.log.info("Successfully set %s for volume %s", + options, self.volname) # Creating files on client side for mount_obj in self.mounts: - g.log.info("Generating data for %s:%s" - % (mount_obj.client_system, mount_obj.mountpoint)) + g.log.info("Generating data for %s:%s", + mount_obj.client_system, mount_obj.mountpoint) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 100 --fixed-file-size 1k %s" @@ -217,12 +209,12 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( self.mnode, self.volname)) bricks_to_bring_offline = filter(None, ( - bricks_to_bring_offline_dict['hot_tier_bricks'] + - bricks_to_bring_offline_dict['cold_tier_bricks'] + - bricks_to_bring_offline_dict['volume_bricks'])) + bricks_to_bring_offline_dict['hot_tier_bricks'] + + bricks_to_bring_offline_dict['cold_tier_bricks'] + + bricks_to_bring_offline_dict['volume_bricks'])) # Bring brick offline - g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline) ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -231,8 +223,8 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # Get areequal after getting bricks offline g.log.info('Getting areequal after getting bricks offline...') @@ -252,8 +244,8 @@ class TestSelfHeal(GlusterBaseClass): # Modify the data self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Modifying data for %s:%s" % - (mount_obj.client_system, mount_obj.mountpoint)) + g.log.info("Modifying data for %s:%s", mount_obj.client_system, + mount_obj.mountpoint) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 100 --fixed-file-size 10k %s" @@ -272,13 +264,13 @@ class TestSelfHeal(GlusterBaseClass): g.log.info("IO is successful on all mounts") # Bring brick online - g.log.info('Bringing bricks %s online...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online...', bricks_to_bring_offline) ret = bring_bricks_online(self.mnode, self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s online' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s online is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online is successful', + bricks_to_bring_offline) # Setting options g.log.info('Setting options...') @@ -300,7 +292,7 @@ class TestSelfHeal(GlusterBaseClass): ret = verify_all_process_of_volume_are_online(self.mnode, self.volname) self.assertTrue(ret, ("Volume %s : All process are not online" % self.volname)) - g.log.info("Volume %s : All process are online" % self.volname) + g.log.info("Volume %s : All process are online", self.volname) # Wait for self-heal-daemons to be online g.log.info("Waiting for self-heal-daemons to be online") @@ -333,10 +325,10 @@ class TestSelfHeal(GlusterBaseClass): self.all_servers_info) self.assertTrue(ret, ("Failed to expand the volume %s", self.volname)) g.log.info("Expanding volume is successful on " - "volume %s" % self.volname) + "volume %s", self.volname) # Do rebalance - ret, out, err = rebalance_start(self.mnode, self.volname) + ret, _, _ = rebalance_start(self.mnode, self.volname) self.assertEqual(ret, 0, 'Failed to start rebalance') g.log.info('Rebalance is started') @@ -347,8 +339,8 @@ class TestSelfHeal(GlusterBaseClass): # Create 1k files self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Modifying data for %s:%s" % - (mount_obj.client_system, mount_obj.mountpoint)) + g.log.info("Modifying data for %s:%s", mount_obj.client_system, + mount_obj.mountpoint) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 1000 %s" @@ -363,7 +355,7 @@ class TestSelfHeal(GlusterBaseClass): bricks_list = get_all_bricks(self.mnode, self.volname) for brick in bricks_list: # Bring brick offline - g.log.info('Bringing bricks %s offline' % brick) + g.log.info('Bringing bricks %s offline', brick) ret = bring_bricks_offline(self.volname, [brick]) self.assertTrue(ret, 'Failed to bring bricks %s offline' % brick) @@ -371,17 +363,17 @@ class TestSelfHeal(GlusterBaseClass): [brick]) self.assertTrue(ret, 'Bricks %s are not offline' % brick) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # Bring brick online - g.log.info('Bringing bricks %s online...' % brick) + g.log.info('Bringing bricks %s online...', brick) ret = bring_bricks_online(self.mnode, self.volname, [brick]) self.assertTrue(ret, 'Failed to bring bricks %s online' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s online is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online is successful', + bricks_to_bring_offline) # Wait for volume processes to be online g.log.info("Wait for volume processes to be online") @@ -398,7 +390,7 @@ class TestSelfHeal(GlusterBaseClass): self.volname) self.assertTrue(ret, ("Volume %s : All process are not online" % self.volname)) - g.log.info("Volume %s : All process are online" % self.volname) + g.log.info("Volume %s : All process are online", self.volname) # Wait for self-heal-daemons to be online g.log.info("Waiting for self-heal-daemons to be online") @@ -442,13 +434,13 @@ class TestSelfHeal(GlusterBaseClass): - get areequal after getting bricks online and compare with arequal before bringing bricks online """ + # pylint: disable=too-many-statements # Setting options g.log.info('Setting options...') options = {"metadata-self-heal": "off", "entry-self-heal": "off", - "data-self-heal": "off", - } + "data-self-heal": "off"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) g.log.info("Options " @@ -461,9 +453,8 @@ class TestSelfHeal(GlusterBaseClass): g.log.info("Starting IO on all mounts...") self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Starting IO on %s:%s" - % (mount_obj.client_system, - mount_obj.mountpoint)) + g.log.info("Starting IO on %s:%s", mount_obj.client_system, + mount_obj.mountpoint) cmd = ("python %s create_deep_dirs_with_files " "--dirname-start-num %d " "--dir-length 2 " @@ -476,9 +467,8 @@ class TestSelfHeal(GlusterBaseClass): user=mount_obj.user) self.all_mounts_procs.append(proc) self.counter = self.counter + 10 - g.log.info("IO on %s:%s is started successfully" - % (mount_obj.client_system, - mount_obj.mountpoint)) + g.log.info("IO on %s:%s is started successfully", + mount_obj.client_system, mount_obj.mountpoint) self.io_validation_complete = False # Validate IO @@ -493,8 +483,7 @@ class TestSelfHeal(GlusterBaseClass): cmd_list = ["python %s create_files -f 20 %s", "python %s mv -i '.trashcan' %s", "python %s copy --dest-dir new_dir %s", - "python %s delete %s", - ] + "python %s delete %s"] for cmd in cmd_list: # Get areequal before getting bricks offline @@ -506,8 +495,7 @@ class TestSelfHeal(GlusterBaseClass): # Setting options g.log.info('Setting options...') - options = {"self-heal-daemon": "off", - } + options = {"self-heal-daemon": "off"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) g.log.info("Option 'self-heal-daemon' " @@ -517,13 +505,13 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline_dict = (select_bricks_to_bring_offline( self.mnode, self.volname)) bricks_to_bring_offline = filter(None, ( - bricks_to_bring_offline_dict['hot_tier_bricks'] + - bricks_to_bring_offline_dict['cold_tier_bricks'] + - bricks_to_bring_offline_dict['volume_bricks'])) + bricks_to_bring_offline_dict['hot_tier_bricks'] + + bricks_to_bring_offline_dict['cold_tier_bricks'] + + bricks_to_bring_offline_dict['volume_bricks'])) # Bring brick offline - g.log.info('Bringing bricks %s offline...' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', + bricks_to_bring_offline) ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -532,8 +520,8 @@ class TestSelfHeal(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # Get areequal after getting bricks offline g.log.info('Getting areequal after getting bricks offline...') @@ -559,9 +547,8 @@ class TestSelfHeal(GlusterBaseClass): proc = g.run_async(mount_obj.client_system, cmd, user=mount_obj.user) self.all_mounts_procs.append(proc) - g.log.info("IO on %s:%s is modified successfully" - % (mount_obj.client_system, - mount_obj.mountpoint)) + g.log.info("IO on %s:%s is modified successfully", + mount_obj.client_system, mount_obj.mountpoint) self.io_validation_complete = False # Validate IO @@ -586,19 +573,18 @@ class TestSelfHeal(GlusterBaseClass): g.log.info("Listing all files and directories is successful") # Bring brick online - g.log.info('Bringing bricks %s online...' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online...', + bricks_to_bring_offline) ret = bring_bricks_online(self.mnode, self.volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s online' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s online is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s online is successful', + bricks_to_bring_offline) # Setting options g.log.info('Setting options...') - options = {"self-heal-daemon": "on", - } + options = {"self-heal-daemon": "on"} ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, 'Failed to set options %s' % options) g.log.info("Option 'self-heal-daemon' is set to 'on' successfully") @@ -618,7 +604,7 @@ class TestSelfHeal(GlusterBaseClass): self.volname) self.assertTrue(ret, ("Volume %s : All process are not online" % self.volname)) - g.log.info("Volume %s : All process are online" % self.volname) + g.log.info("Volume %s : All process are online", self.volname) # Wait for self-heal-daemons to be online g.log.info("Waiting for self-heal-daemons to be online") diff --git a/tests/functional/afr/test_self_heal_daemon_process.py b/tests/functional/afr/heal/test_self_heal_daemon_process.py index f3c416687..3412c1b49 100644 --- a/tests/functional/afr/test_self_heal_daemon_process.py +++ b/tests/functional/afr/heal/test_self_heal_daemon_process.py @@ -48,63 +48,45 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): SelfHealDaemonProcessTests contains tests which verifies the self-heal daemon process of the nodes """ - @classmethod - def setUpClass(cls): + def setUp(self): """ setup volume, mount volume and initialize necessary variables which is used in tests """ # calling GlusterBaseClass setUpClass - GlusterBaseClass.setUpClass.im_func(cls) + GlusterBaseClass.setUp.im_func(self) # Setup Volume and Mount Volume g.log.info("Starting to Setup Volume and Mount Volume") - ret = cls.setup_volume_and_mount_volume(mounts=cls.mounts) + ret = self.setup_volume_and_mount_volume(mounts=self.mounts) if not ret: raise ExecutionError("Failed to Setup_Volume and Mount_Volume") g.log.info("Successful in Setup Volume and Mount Volume") # Verfiy glustershd process releases its parent process - ret = is_shd_daemonized(cls.servers) + ret = is_shd_daemonized(self.servers) if not ret: raise ExecutionError("Self Heal Daemon process was still" " holding parent process.") g.log.info("Self Heal Daemon processes are online") - cls.GLUSTERSHD = "/var/lib/glusterd/glustershd/glustershd-server.vol" - - def setUp(self): - """ - setUp method for every test - """ - - # calling GlusterBaseClass setUp - GlusterBaseClass.setUp.im_func(self) + self.glustershd = "/var/lib/glusterd/glustershd/glustershd-server.vol" def tearDown(self): """ - tearDown for every test - """ - - # Calling GlusterBaseClass tearDown - GlusterBaseClass.tearDown.im_func(self) - - @classmethod - def tearDownClass(cls): - """ Clean up the volume and umount volume from client """ # stopping the volume g.log.info("Starting to Unmount Volume and Cleanup Volume") - ret = cls.unmount_volume_and_cleanup_volume(mounts=cls.mounts) + ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts) if not ret: raise ExecutionError("Failed to Unmount Volume and Cleanup Volume") g.log.info("Successful in Unmount Volume and Cleanup Volume") # calling GlusterBaseClass tearDownClass - GlusterBaseClass.tearDownClass.im_func(cls) + GlusterBaseClass.tearDown.im_func(self) def test_glustershd_with_add_remove_brick(self): """ @@ -126,40 +108,40 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): in glustershd-server.vol file """ - + # pylint: disable=too-many-statements nodes = self.volume['servers'] bricks_list = [] glustershd_pids = {} # check the self-heal daemon process g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) + "nodes %s", nodes) ret, pids = get_self_heal_daemon_pid(nodes) self.assertTrue(ret, ("Either No self heal daemon process found or " "more than One self heal daemon process " - "found : %s" % pids)) + "found : %s", pids)) g.log.info("Successful in getting Single self heal daemon process" " on all nodes %s", nodes) glustershd_pids = pids # get the bricks for the volume - g.log.info("Fetching bricks for the volume : %s" % self.volname) + g.log.info("Fetching bricks for the volume : %s", self.volname) bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) + g.log.info("Brick List : %s", bricks_list) # validate the bricks present in volume info with # glustershd server volume file g.log.info("Starting parsing file %s on " - "node %s" % (self.GLUSTERSHD, self.mnode)) + "node %s", self.glustershd, self.mnode) ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, bricks_list) self.assertTrue(ret, ("Brick List from volume info is different " "from glustershd server volume file. " "Please check log file for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) + g.log.info("Successfully parsed %s file", self.glustershd) # expanding volume - g.log.info("Start adding bricks to volume %s" % self.volname) + g.log.info("Start adding bricks to volume %s", self.volname) ret = expand_volume(self.mnode, self.volname, self.servers, self.all_servers_info) self.assertTrue(ret, ("Failed to add bricks to " @@ -185,7 +167,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): # Start Rebalance g.log.info("Starting Rebalance on the volume") - ret, out, err = rebalance_start(self.mnode, self.volname) + ret, _, err = rebalance_start(self.mnode, self.volname) self.assertEqual(ret, 0, ("Failed to start rebalance on " "the volume %s with error %s" % (self.volname, err))) @@ -214,17 +196,17 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): # Check the self-heal daemon process after adding bricks g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) + "nodes %s", nodes) glustershd_pids_after_expanding = {} ret, pids = get_self_heal_daemon_pid(nodes) self.assertTrue(ret, ("Either No self heal daemon process found or " "more than One self heal daemon process found")) g.log.info("Successfull in getting self-heal daemon process " - "on nodes %s" % nodes) + "on nodes %s", nodes) glustershd_pids_after_expanding = pids g.log.info("Self Heal Daemon Process ID's afetr expanding " - "volume: %s" % glustershd_pids_after_expanding) + "volume: %s", glustershd_pids_after_expanding) self.assertNotEqual(glustershd_pids, glustershd_pids_after_expanding, @@ -236,11 +218,11 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): # get the bricks for the volume after expanding bricks_list_after_expanding = get_all_bricks(self.mnode, self.volname) g.log.info("Brick List after expanding " - "volume: %s" % bricks_list_after_expanding) + "volume: %s", bricks_list_after_expanding) # validate the bricks present in volume info # with glustershd server volume file after adding bricks - g.log.info("Starting parsing file %s" % self.GLUSTERSHD) + g.log.info("Starting parsing file %s", self.glustershd) ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, bricks_list_after_expanding) @@ -248,7 +230,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): "from glustershd server volume file after " "expanding bricks. Please check log file " "for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) + g.log.info("Successfully parsed %s file", self.glustershd) # shrink the volume g.log.info("Starting volume shrink") @@ -269,7 +251,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): # get the bricks after shrinking the volume bricks_list_after_shrinking = get_all_bricks(self.mnode, self.volname) g.log.info("Brick List after shrinking " - "volume: %s" % bricks_list_after_shrinking) + "volume: %s", bricks_list_after_shrinking) self.assertEqual(len(bricks_list_after_shrinking), len(bricks_list), "Brick Count is mismatched after " @@ -284,7 +266,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): # check the self-heal daemon process after removing bricks g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) + "on nodes %s", nodes) glustershd_pids_after_shrinking = {} ret, pids = get_self_heal_daemon_pid(nodes) glustershd_pids_after_shrinking = pids @@ -297,14 +279,14 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): # validate bricks present in volume info # with glustershd server volume file after removing bricks - g.log.info("Starting parsing file %s" % self.GLUSTERSHD) + g.log.info("Starting parsing file %s", self.glustershd) ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, bricks_list_after_shrinking) self.assertTrue(ret, ("Brick List from volume info is different " "from glustershd server volume file after " "removing bricks. Please check log file " "for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) + g.log.info("Successfully parsed %s file", self.glustershd) def test_glustershd_with_restarting_glusterd(self): """ @@ -322,23 +304,23 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): * brought up the brick """ - + # pylint: disable=too-many-statements nodes = self.volume['servers'] # stop the volume - g.log.info("Stopping the volume %s" % self.volname) + g.log.info("Stopping the volume %s", self.volname) ret = volume_stop(self.mnode, self.volname) self.assertTrue(ret, ("Failed to stop volume %s" % self.volname)) - g.log.info("Successfully stopped volume %s" % self.volname) + g.log.info("Successfully stopped volume %s", self.volname) # check the self heal daemon process after stopping the volume g.log.info("Verifying the self heal daemon process for " - "volume %s" % self.volname) + "volume %s", self.volname) ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname) self.assertFalse(ret, ("Self Heal Daemon process is still running " "even after stopping volume %s" % self.volname)) g.log.info("Self Heal Daemon is not running after stopping " - "volume %s" % self.volname) + "volume %s", self.volname) # restart glusterd service on all the servers g.log.info("Restarting glusterd on all servers %s", nodes) @@ -350,30 +332,30 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): # check the self heal daemon process after restarting glusterd process g.log.info("Starting to get self-heal daemon process on" - " nodes %s" % nodes) + " nodes %s", nodes) ret = are_all_self_heal_daemons_are_online(self.mnode, self.volname) self.assertFalse(ret, ("Self Heal Daemon process is running after " "glusterd restart with volume %s in " "stop state" % self.volname)) g.log.info("Self Heal Daemon is not running after stopping " - "volume and restarting glusterd %s" % self.volname) + "volume and restarting glusterd %s", self.volname) # start the volume - g.log.info("Starting the volume %s" % self.volname) + g.log.info("Starting the volume %s", self.volname) ret = volume_start(self.mnode, self.volname) self.assertTrue(ret, ("Failed to start volume %s" % self.volname)) - g.log.info("Volume %s started successfully" % self.volname) + g.log.info("Volume %s started successfully", self.volname) # Verfiy glustershd process releases its parent process g.log.info("Checking whether glustershd process is daemonized or not") ret = is_shd_daemonized(nodes) self.assertTrue(ret, ("Either No self heal daemon process found or " "more than One self heal daemon process found")) - g.log.info("Single self heal daemon process on all nodes %s" % nodes) + g.log.info("Single self heal daemon process on all nodes %s", nodes) # get the self heal daemon pids after starting volume g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) + "on nodes %s", nodes) ret, pids = get_self_heal_daemon_pid(nodes) self.assertTrue(ret, ("Either No self heal daemon process found or " "more than One self heal daemon process found")) @@ -381,20 +363,20 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): glustershd_pids = pids # get the bricks for the volume - g.log.info("Fetching bricks for the volume : %s" % self.volname) + g.log.info("Fetching bricks for the volume : %s", self.volname) bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) + g.log.info("Brick List : %s", bricks_list) # validate the bricks present in volume info # with glustershd server volume file g.log.info("Starting parsing file %s on " - "node %s" % (self.GLUSTERSHD, self.mnode)) + "node %s", self.glustershd, self.mnode) ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, bricks_list) self.assertTrue(ret, ("Brick List from volume info is different from " "glustershd server volume file. " "Please check log file for details.")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) + g.log.info("Successfully parsed %s file", self.glustershd) # restart glusterd service on all the servers g.log.info("Restarting glusterd on all servers %s", nodes) @@ -421,7 +403,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): # check the self heal daemon process after starting volume and # restarting glusterd process g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) + "on nodes %s", nodes) ret, pids = get_self_heal_daemon_pid(nodes) self.assertTrue(ret, ("Either No self heal daemon process found or " "more than One self heal daemon process found")) @@ -444,12 +426,12 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): # bring bricks offline g.log.info("Going to bring down the brick process " - "for %s" % bricks_to_bring_offline) + "for %s", bricks_to_bring_offline) ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully" % bricks_to_bring_offline) + "for %s succesfully", bricks_to_bring_offline) # restart glusterd after brought down the brick g.log.info("Restart glusterd on all servers %s", nodes) @@ -476,7 +458,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): # check the self heal daemon process after killing brick and # restarting glusterd process g.log.info("Starting to get self-heal daemon process " - "on nodes %s" % nodes) + "on nodes %s", nodes) ret, pids = get_self_heal_daemon_pid(nodes) self.assertTrue(ret, ("Either No self heal daemon process found or " "more than One self heal daemon process found")) @@ -490,7 +472,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): "brick, restarting the glusterd process") # brought the brick online - g.log.info("bringing up the bricks : %s online" % + g.log.info("bringing up the bricks : %s online", bricks_to_bring_offline) ret = bring_bricks_online(self.mnode, self.volname, bricks_to_bring_offline) @@ -528,7 +510,7 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass): 'arbiter_count': 1, 'transport': 'tcp'} - cls.GLUSTERSHD = "/var/lib/glusterd/glustershd/glustershd-server.vol" + cls.glustershd = "/var/lib/glusterd/glustershd/glustershd-server.vol" def setUp(self): """ @@ -542,7 +524,7 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass): self.io_validation_complete = False # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume %s" % self.volname) + g.log.info("Starting to Setup Volume %s", self.volname) ret = self.setup_volume_and_mount_volume(self.mounts, volume_create_force=False) if not ret: @@ -571,7 +553,7 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass): # check the self-heal daemon process g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) + "nodes %s", nodes) ret, pids = get_self_heal_daemon_pid(nodes) self.assertTrue(ret, ("Either No self heal daemon process found or " "more than One self heal daemon process " @@ -581,31 +563,31 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass): glustershd_pids = pids # get the bricks for the volume - g.log.info("Fetching bricks for the volume : %s" % self.volname) + g.log.info("Fetching bricks for the volume : %s", self.volname) bricks_list = get_all_bricks(self.mnode, self.volname) - g.log.info("Brick List : %s" % bricks_list) + g.log.info("Brick List : %s", bricks_list) # validate the bricks present in volume info with # glustershd server volume file g.log.info("Starting parsing file %s on " - "node %s" % (self.GLUSTERSHD, self.mnode)) + "node %s", self.glustershd, self.mnode) ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, bricks_list) self.assertTrue(ret, ("Brick List from volume info is different " "from glustershd server volume file. " "Please check log file for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) + g.log.info("Successfully parsed %s file", self.glustershd) # replace brick brick_to_replace = bricks_list[-1] new_brick = brick_to_replace + 'new' - g.log.info("Replacing the brick %s for the volume : %s" - % (brick_to_replace, self.volname)) - ret, out, err = replace_brick(self.mnode, self.volname, - brick_to_replace, new_brick) + g.log.info("Replacing the brick %s for the volume : %s", + brick_to_replace, self.volname) + ret, _, err = replace_brick(self.mnode, self.volname, + brick_to_replace, new_brick) self.assertFalse(ret, err) - g.log.info('Replaced brick %s to %s successfully' - % (brick_to_replace, new_brick)) + g.log.info('Replaced brick %s to %s successfully', + brick_to_replace, new_brick) # check bricks bricks_list = get_all_bricks(self.mnode, self.volname) @@ -628,7 +610,7 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass): # check the self-heal daemon process g.log.info("Starting to get self-heal daemon process on " - "nodes %s" % nodes) + "nodes %s", nodes) ret, pids = get_self_heal_daemon_pid(nodes) self.assertTrue(ret, ("Either No self heal daemon process found or " "more than One self heal daemon process " @@ -648,11 +630,11 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass): # get the bricks for the volume after replacing bricks_list_after_replacing = get_all_bricks(self.mnode, self.volname) g.log.info("Brick List after expanding " - "volume: %s" % bricks_list_after_replacing) + "volume: %s", bricks_list_after_replacing) # validate the bricks present in volume info # with glustershd server volume file after replacing bricks - g.log.info("Starting parsing file %s" % self.GLUSTERSHD) + g.log.info("Starting parsing file %s", self.glustershd) ret = do_bricks_exist_in_shd_volfile(self.mnode, self.volname, bricks_list_after_replacing) @@ -660,4 +642,4 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass): "from glustershd server volume file after " "replacing bricks. Please check log file " "for details")) - g.log.info("Successfully parsed %s file" % self.GLUSTERSHD) + g.log.info("Successfully parsed %s file", self.glustershd) diff --git a/tests/functional/afr/test_client_side_quorum.py b/tests/functional/afr/test_client_side_quorum.py index 2512faee3..ba0aaa772 100644 --- a/tests/functional/afr/test_client_side_quorum.py +++ b/tests/functional/afr/test_client_side_quorum.py @@ -18,6 +18,7 @@ Test Cases in this module tests the client side quorum. """ +import tempfile from glusto.core import Glusto as g from glustolibs.gluster.exceptions import ExecutionError @@ -33,10 +34,8 @@ from glustolibs.gluster.brick_libs import (bring_bricks_offline, from glustolibs.io.utils import (validate_io_procs, is_io_procs_fail_with_rofs, list_all_files_and_dirs_mounts, - wait_for_io_to_complete - ) + wait_for_io_to_complete) from glustolibs.gluster.mount_ops import mount_volume, umount_volume -import tempfile @runs_on([['replicated', 'distributed-replicated'], @@ -74,7 +73,7 @@ class ClientSideQuorumTests(GlusterBaseClass): GlusterBaseClass.setUp.im_func(self) # Setup Volume and Mount Volume - g.log.info("Starting to Setup Volume %s" % self.volname) + g.log.info("Starting to Setup Volume %s", self.volname) ret = self.setup_volume_and_mount_volume(self.mounts) if not ret: raise ExecutionError("Failed to Setup_Volume and Mount_Volume") @@ -93,6 +92,7 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("Successful in Unmount Volume and Cleanup Volume") # Calling GlusterBaseClass tearDown + GlusterBaseClass.tearDown.im_func(self) def test_client_side_quorum_with_auto_option(self): @@ -105,19 +105,19 @@ class ClientSideQuorumTests(GlusterBaseClass): * perform ops """ + # pylint: disable=too-many-branches,too-many-statements # set cluster.quorum-type to auto options = {"cluster.quorum-type": "auto"} g.log.info("setting cluster.quorum-type to auto on " - "volume %s" % self.volname) + "volume %s", self.volname) ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set volume option %s for" "volume %s" % (options, self.volname))) - g.log.info("Sucessfully set %s for volume %s" - % (options, self.volname)) + g.log.info("Sucessfully set %s for volume %s", options, self.volname) # write files on all mounts g.log.info("Starting IO on all mounts...") - g.log.info("mounts: %s" % self.mounts) + g.log.info("mounts: %s", self.mounts) all_mounts_procs = [] for mount_obj in self.mounts: cmd = ("python %s create_files " @@ -134,28 +134,27 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("IO is successful on all mounts") # get the subvolumes - g.log.info("Starting to get sub-volumes for volume %s" % self.volname) + g.log.info("Starting to get sub-volumes for volume %s", self.volname) subvols_dict = get_subvols(self.mnode, self.volname) num_subvols = len(subvols_dict['volume_subvols']) - g.log.info("Number of subvolumes in volume %s:" % num_subvols) + g.log.info("Number of subvolumes in volume %s:", num_subvols) # bring bricks offline( 2 bricks ) for all the subvolumes for i in range(0, num_subvols): subvol_brick_list = subvols_dict['volume_subvols'][i] - g.log.info("sub-volume %s brick list : %s" - % (i, subvol_brick_list)) + g.log.info("sub-volume %s brick list : %s", i, subvol_brick_list) # For volume type: 1 * 2, bring 1 brick offline if len(subvol_brick_list) == 2: bricks_to_bring_offline = subvol_brick_list[0:1] else: bricks_to_bring_offline = subvol_brick_list[0:2] g.log.info("Going to bring down the brick process " - "for %s" % bricks_to_bring_offline) + "for %s", bricks_to_bring_offline) ret = bring_bricks_offline(self.volname, bricks_to_bring_offline) self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully" % bricks_to_bring_offline) + "for %s succesfully", bricks_to_bring_offline) # create 2 files named newfile0.txt and newfile1.txt g.log.info("Start creating 2 files on all mounts...") @@ -198,7 +197,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "ln %s/file0.txt %s/file0.txt_hwlink" \ % (mount_obj.mountpoint, mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and creating hard link" " successful on read-only filesystem")) self.assertIn("Read-only file system", @@ -211,7 +210,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "ln -s %s/file1.txt %s/file1.txt_swlink" %\ (mount_obj.mountpoint, mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and creating soft link" " successful on read-only filesystem")) self.assertIn("Read-only file system", @@ -224,7 +223,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "cat %s/file0.txt >> %s/file1.txt" %\ (mount_obj.mountpoint, mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and append successful" " on read-only filesystem")) self.assertIn("Read-only file system", @@ -237,7 +236,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = "echo 'Modify Contents' > %s/file1.txt"\ % (mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and modifying successful" " on read-only filesystem")) self.assertIn("Read-only file system", @@ -249,7 +248,7 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("Truncating file1.txt on all mounts") for mount_obj in self.mounts: cmd = "truncate -s 0 %s/file1.txt" % (mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertTrue(ret, ("Unexpected error and truncating file" " successful on read-only filesystem")) self.assertIn("Read-only file system", @@ -277,7 +276,7 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("stat on file1.txt on all mounts") for mount_obj in self.mounts: cmd = "stat %s/file1.txt" % (mount_obj.mountpoint) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and stat on file fails" " on read-only filesystem")) g.log.info("stat on file is successfull on read-only filesystem") @@ -287,7 +286,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = ("python %s stat %s" % (self.script_upload_path, mount_obj.mountpoint)) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and stat on directory" " fails on read-only filesystem")) g.log.info("stat on dir is successfull on read-only filesystem") @@ -297,7 +296,7 @@ class ClientSideQuorumTests(GlusterBaseClass): for mount_obj in self.mounts: cmd = ("python %s ls %s" % (self.script_upload_path, mount_obj.mountpoint)) - ret, out, err = g.run(mount_obj.client_system, cmd) + ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and listing file fails" " on read-only filesystem")) g.log.info("listing files is successfull on read-only filesystem") @@ -316,33 +315,31 @@ class ClientSideQuorumTests(GlusterBaseClass): # set cluster.quorum-type to fixed options = {"cluster.quorum-type": "fixed"} - g.log.info("setting %s for the volume %s" % (options, self.volname)) + g.log.info("setting %s for the volume %s", options, self.volname) ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set %s for volume %s" % (options, self.volname))) - g.log.info("Successfully set %s for volume %s" - % (options, self.volname)) + g.log.info("Successfully set %s for volume %s", options, self.volname) # get the subvolumes - g.log.info("Starting to get sub-volumes for volume %s" % self.volname) + g.log.info("Starting to get sub-volumes for volume %s", self.volname) subvols_dict = get_subvols(self.mnode, self.volname) num_subvols = len(subvols_dict['volume_subvols']) - g.log.info("Number of subvolumes in volume %s is %s" - % (self.volname, num_subvols)) + g.log.info("Number of subvolumes in volume %s is %s", self.volname, + num_subvols) # get the number of bricks in replica set num_bricks_in_subvol = len(subvols_dict['volume_subvols'][0]) - g.log.info("Number of bricks in each replica set : %s" - % num_bricks_in_subvol) + g.log.info("Number of bricks in each replica set : %s", + num_bricks_in_subvol) # set cluster.quorum-count to higher value than the number of bricks in # repliac set start_range = num_bricks_in_subvol + 1 end_range = num_bricks_in_subvol + 30 for i in range(start_range, end_range): - options = {"cluster.quorum-count": "%s" % start_range} - g.log.info("setting %s for the volume %s" % - (options, self.volname)) + options = {"cluster.quorum-count": "%s" % i} + g.log.info("setting %s for the volume %s", options, self.volname) ret = set_volume_options(self.mnode, self.volname, options) self.assertFalse(ret, ("Able to set %s for volume %s, quorum-count" " should not be greater than number of" @@ -350,7 +347,7 @@ class ClientSideQuorumTests(GlusterBaseClass): % (options, self.volname))) g.log.info("Expected: Unable to set %s for volume %s, " "quorum-count should be less than number of bricks " - "in replica set" % (options, self.volname)) + "in replica set", options, self.volname) @runs_on([['distributed-replicated'], @@ -363,8 +360,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): GlusterBaseClass.setUpClass.im_func(cls) # Upload io scripts for running IO on mounts - g.log.info("Upload io scripts to clients %s for running IO on mounts" - % cls.clients) + g.log.info("Upload io scripts to clients %s for running IO on mounts", + cls.clients) script_local_path = ("/usr/share/glustolibs/io/scripts/" "file_dir_ops.py") cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/" @@ -373,23 +370,22 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): if not ret: raise ExecutionError("Failed to upload IO scripts to clients %s" % cls.clients) - g.log.info("Successfully uploaded IO scripts to clients %s" - % cls.clients) + g.log.info("Successfully uploaded IO scripts to clients %s", + cls.clients) cls.counter = 1 - """int: Value of counter is used for dirname-start-num argument for - file_dir_ops.py create_deep_dirs_with_files. - - The --dir-length argument value for - file_dir_ops.py create_deep_dirs_with_files is set to 10 - (refer to the cmd in setUp method). This means every mount will create - 10 top level dirs. For every mountpoint/testcase to create new set of - dirs, we are incrementing the counter by --dir-length value i.e 10 - in this test suite. - - If we are changing the --dir-length to new value, ensure the counter - is also incremented by same value to create new set of files/dirs. - """ + # int: Value of counter is used for dirname-start-num argument for + # file_dir_ops.py create_deep_dirs_with_files. + + # The --dir-length argument value for file_dir_ops.py + # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp + # method). This means every mount will create + # 10 top level dirs. For every mountpoint/testcase to create new set of + # dirs, we are incrementing the counter by --dir-length value i.e 10 in + # this test suite. + + # If we are changing the --dir-length to new value, ensure the counter + # is also incremented by same value to create new set of files/dirs. # Setup Volumes if cls.volume_type == "distributed-replicated": @@ -407,8 +403,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): {'name': 'testvol_%s_%d' % (cls.volume['voltype']['type'], i), 'servers': cls.servers, - 'voltype': cls.volume['voltype'] - }) + 'voltype': cls.volume['voltype']}) # Define two 2x3 distributed-replicated volumes for i in range(1, 3): @@ -422,8 +417,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): {'name': 'testvol_%s_%d' % (cls.volume['voltype']['type'], i+2), 'servers': cls.servers, - 'voltype': cls.volume['voltype'] - }) + 'voltype': cls.volume['voltype']}) # Define distributed volume cls.volume['voltype'] = { @@ -435,8 +429,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): {'name': 'testvol_%s' % cls.volume['voltype']['type'], 'servers': cls.servers, - 'voltype': cls.volume['voltype'] - }) + 'voltype': cls.volume['voltype']}) # Create and mount volumes cls.mount_points = [] @@ -450,7 +443,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): force=False) if not ret: raise ExecutionError("Failed to setup Volume" - " %s", volume_config['name']) + " %s" % volume_config['name']) g.log.info("Successful in setting volume %s", volume_config['name']) @@ -468,8 +461,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): raise ExecutionError( "Failed to do gluster mount on volume %s " % cls.volname) - g.log.info("Successfully mounted %s on client %s" - % (cls.volname, cls.client)) + g.log.info("Successfully mounted %s on client %s", + cls.volname, cls.client) def setUp(self): # Calling GlusterBaseClass setUp @@ -515,7 +508,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): ret = cleanup_volume(cls.mnode, volume) if not ret: raise ExecutionError("Failed to cleanup Volume %s" % volume) - g.log.info("Volume: %s cleanup is done" % volume) + g.log.info("Volume: %s cleanup is done", volume) g.log.info("Successfully Cleanedup all Volumes") # umount all volumes @@ -525,8 +518,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): raise ExecutionError( "Failed to umount on volume %s " % cls.volname) - g.log.info("Successfully umounted %s on client %s" - % (cls.volname, cls.client)) + g.log.info("Successfully umounted %s on client %s", cls.volname, + cls.client) # calling GlusterBaseClass tearDownClass GlusterBaseClass.tearDownClass.im_func(cls) @@ -545,12 +538,13 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): - bring down b0 on vol1 and b0 and b1 on vol3 - try to create files on all vols and check for result """ + # pylint: disable=too-many-locals,too-many-statements # Creating files for all volumes for mount_point in self.mount_points: self.all_mounts_procs = [] for mount_obj in self.mounts: - g.log.info("Generating data for %s:%s" - % (mount_obj.client_system, mount_point)) + g.log.info("Generating data for %s:%s", + mount_obj.client_system, mount_point) # Create files g.log.info('Creating files...') command = ("python %s create_files -f 50 " @@ -576,19 +570,17 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): % vol_number) options = {"cluster.quorum-type": "auto"} g.log.info("setting cluster.quorum-type to auto on " - "volume testvol_distributed-replicated_%s" - % vol_number) + "volume testvol_distributed-replicated_%s", vol_number) ret = set_volume_options(self.mnode, vol_name, options) self.assertTrue(ret, ("Unable to set volume option %s for " "volume %s" % (options, vol_name))) - g.log.info("Sucessfully set %s for volume %s" - % (options, vol_name)) + g.log.info("Sucessfully set %s for volume %s", options, vol_name) # check is options are set correctly volume_list = get_volume_list(self.mnode) for volume in volume_list: - g.log.info('Checking for cluster.quorum-type option for %s' - % volume) + g.log.info('Checking for cluster.quorum-type option for %s', + volume) volume_options_dict = get_volume_options(self.mnode, volume, 'cluster.quorum-type') @@ -599,16 +591,14 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): 'Option cluster.quorum-type ' 'is not AUTO for %s' % volume) - g.log.info('Option cluster.quorum-type is AUTO for %s' - % volume) + g.log.info('Option cluster.quorum-type is AUTO for %s', volume) else: self.assertEqual(volume_options_dict['cluster.quorum-type'], 'none', 'Option cluster.quorum-type ' 'is not NONE for %s' % volume) - g.log.info('Option cluster.quorum-type is NONE for %s' - % volume) + g.log.info('Option cluster.quorum-type is NONE for %s', volume) # Get first brick server and brick path # and get first file from filelist then delete it from volume @@ -616,11 +606,11 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): for volume in volume_list: brick_list = get_all_bricks(self.mnode, volume) brick_server, brick_path = brick_list[0].split(':') - ret, file_list, err = g.run(brick_server, 'ls %s' % brick_path) + ret, file_list, _ = g.run(brick_server, 'ls %s' % brick_path) self.assertFalse(ret, 'Failed to ls files on %s' % brick_server) file_from_vol = file_list.splitlines()[0] - ret, out, err = g.run(brick_server, 'rm -rf %s/%s' - % (brick_path, file_from_vol)) + ret, _, _ = g.run(brick_server, 'rm -rf %s/%s' + % (brick_path, file_from_vol)) self.assertFalse(ret, 'Failed to rm file on %s' % brick_server) vols_file_list[volume] = file_from_vol @@ -629,7 +619,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): volname = 'testvol_distributed-replicated_1' brick_list = get_all_bricks(self.mnode, volname) bricks_to_bring_offline = brick_list[0:1] - g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline) ret = bring_bricks_offline(volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -638,14 +628,14 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # bring first two bricks for testvol_distributed-replicated_3 volname = 'testvol_distributed-replicated_3' brick_list = get_all_bricks(self.mnode, volname) bricks_to_bring_offline = brick_list[0:2] - g.log.info('Bringing bricks %s offline...' % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline) ret = bring_bricks_offline(volname, bricks_to_bring_offline) self.assertTrue(ret, 'Failed to bring bricks %s offline' % bricks_to_bring_offline) @@ -654,8 +644,8 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): bricks_to_bring_offline) self.assertTrue(ret, 'Bricks %s are not offline' % bricks_to_bring_offline) - g.log.info('Bringing bricks %s offline is successful' - % bricks_to_bring_offline) + g.log.info('Bringing bricks %s offline is successful', + bricks_to_bring_offline) # merge two dicts (volname: file_to_delete) and (volname: mountpoint) temp_dict = [vols_file_list, self.mount_points_and_volnames] |