diff options
author | Nigel Babu <nigelb@redhat.com> | 2018-07-27 13:02:05 +0530 |
---|---|---|
committer | Nigel Babu <nigelb@redhat.com> | 2018-08-07 19:48:57 +0530 |
commit | 82c94a926c1f1961932798257d18a2fc306f9393 (patch) | |
tree | a5d5aa43613182aa4d545bc4c0d10c123cf16fa2 /tests/functional/afr | |
parent | e0bb79503c37843d02082b93425230f2afbbbde2 (diff) |
Fix spelling mistake across the codebase
Change-Id: I46fc2feffe6443af6913785d67bf310838532421
Diffstat (limited to 'tests/functional/afr')
10 files changed, 44 insertions, 44 deletions
diff --git a/tests/functional/afr/heal/test_self_heal_daemon_process.py b/tests/functional/afr/heal/test_self_heal_daemon_process.py index ed71e4f2b..15cd43951 100644 --- a/tests/functional/afr/heal/test_self_heal_daemon_process.py +++ b/tests/functional/afr/heal/test_self_heal_daemon_process.py @@ -176,7 +176,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): self.all_servers_info) self.assertTrue(ret, ("Failed to add bricks to " "volume %s " % self.volname)) - g.log.info("Add brick successfull") + g.log.info("Add brick successful") # Log Volume Info and Status after expanding the volume g.log.info("Logging volume info and Status after expanding volume") @@ -231,11 +231,11 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): ret, pids = get_self_heal_daemon_pid(nodes) self.assertTrue(ret, ("Either No self heal daemon process found or " "more than One self heal daemon process found")) - g.log.info("Successfull in getting self-heal daemon process " + g.log.info("Successful in getting self-heal daemon process " "on nodes %s", nodes) glustershd_pids_after_expanding = pids - g.log.info("Self Heal Daemon Process ID's afetr expanding " + g.log.info("Self Heal Daemon Process ID's after expanding " "volume: %s", glustershd_pids_after_expanding) self.assertNotEqual(glustershd_pids, @@ -389,7 +389,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): ret, pids = get_self_heal_daemon_pid(nodes) self.assertTrue(ret, ("Either No self heal daemon process found or " "more than One self heal daemon process found")) - g.log.info("Succesfull in getting self heal daemon pids") + g.log.info("Successful in getting self heal daemon pids") glustershd_pids = pids # get the bricks for the volume @@ -461,7 +461,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass): self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully", bricks_to_bring_offline) + "for %s successfully", bricks_to_bring_offline) # restart glusterd after brought down the brick g.log.info("Restart glusterd on all servers %s", nodes) @@ -922,7 +922,7 @@ class SelfHealDaemonProcessTestsWithHealing(GlusterBaseClass): self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully", bricks_to_bring_offline) + "for %s successfully", bricks_to_bring_offline) # get the bricks which are running g.log.info("getting the brick list which are online") @@ -996,9 +996,9 @@ class SelfHealDaemonProcessTestsWithHealing(GlusterBaseClass): self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully", bricks_to_bring_offline) + "for %s successfully", bricks_to_bring_offline) - # wait for 60 sec and brought up the brick agian + # wait for 60 sec and brought up the brick again time.sleep(60) g.log.info("Bring bricks: %s online", bricks_to_bring_offline) ret = bring_bricks_online(self.mnode, self.volname, @@ -1182,7 +1182,7 @@ class SelfHealDaemonProcessTestsWithMultipleVolumes(GlusterBaseClass): self.all_servers_info) self.assertTrue(ret, ("Failed to add bricks to " "volume %s " % volume)) - g.log.info("Add brick successfull") + g.log.info("Add brick successful") # Log Volume Info and Status after expanding the volume g.log.info("Logging volume info and Status after " diff --git a/tests/functional/afr/test_client_side_quorum.py b/tests/functional/afr/test_client_side_quorum.py index 0432a13ab..1dcb96ef5 100644 --- a/tests/functional/afr/test_client_side_quorum.py +++ b/tests/functional/afr/test_client_side_quorum.py @@ -114,7 +114,7 @@ class ClientSideQuorumTests(GlusterBaseClass): ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set volume option %s for" "volume %s" % (options, self.volname))) - g.log.info("Sucessfully set %s for volume %s", options, self.volname) + g.log.info("Successfully set %s for volume %s", options, self.volname) # write files on all mounts g.log.info("Starting IO on all mounts...") @@ -155,7 +155,7 @@ class ClientSideQuorumTests(GlusterBaseClass): self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully", bricks_to_bring_offline) + "for %s successfully", bricks_to_bring_offline) # create 2 files named newfile0.txt and newfile1.txt g.log.info("Start creating 2 files on all mounts...") @@ -172,7 +172,7 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("Validating whether IO failed with read-only filesystem") ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs, self.mounts) - self.assertTrue(ret, ("Unexpected error and IO successfull" + self.assertTrue(ret, ("Unexpected error and IO successful" " on read-only filesystem")) g.log.info("EXPECTED: Read-only file system in IO while creating file") @@ -190,7 +190,7 @@ class ClientSideQuorumTests(GlusterBaseClass): g.log.info("Validating whether IO failed with read-only filesystem") ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs, self.mounts) - self.assertTrue(ret, ("Unexpected error and IO successfull" + self.assertTrue(ret, ("Unexpected error and IO successful" " on read-only filesystem")) g.log.info("EXPECTED: Read-only file system in IO while" " creating directory") @@ -282,7 +282,7 @@ class ClientSideQuorumTests(GlusterBaseClass): ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and stat on file fails" " on read-only filesystem")) - g.log.info("stat on file is successfull on read-only filesystem") + g.log.info("stat on file is successful on read-only filesystem") # stat on dir g.log.info("stat on directory on all mounts") @@ -292,7 +292,7 @@ class ClientSideQuorumTests(GlusterBaseClass): ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and stat on directory" " fails on read-only filesystem")) - g.log.info("stat on dir is successfull on read-only filesystem") + g.log.info("stat on dir is successful on read-only filesystem") # ls on mount point g.log.info("ls on mount point on all mounts") @@ -302,7 +302,7 @@ class ClientSideQuorumTests(GlusterBaseClass): ret, _, err = g.run(mount_obj.client_system, cmd) self.assertFalse(ret, ("Unexpected error and listing file fails" " on read-only filesystem")) - g.log.info("listing files is successfull on read-only filesystem") + g.log.info("listing files is successful on read-only filesystem") def test_client_side_quorum_with_fixed_validate_max_bricks(self): """ @@ -402,7 +402,7 @@ class ClientSideQuorumTests(GlusterBaseClass): self.assertEqual(option_dict['cluster.quorum-count'], '(null)', ("Default value for %s is not null" " for volume %s" % (option, self.volname))) - g.log.info("Succesfull in getting %s for the volume %s", + g.log.info("Successful in getting %s for the volume %s", option, self.volname) # set cluster.quorum-type to fixed and cluster.quorum-count to 1 @@ -452,7 +452,7 @@ class ClientSideQuorumTests(GlusterBaseClass): self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully", bricks_to_bring_offline) + "for %s successfully", bricks_to_bring_offline) # create files g.log.info("Starting IO on all mounts...") @@ -609,7 +609,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass): ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set volume option %s for" "volume %s" % (options, self.volname))) - g.log.info("Sucessfully set %s for volume %s", options, self.volname) + g.log.info("Successfully set %s for volume %s", options, self.volname) # Start IO on mounts g.log.info("Starting IO on all mounts...") @@ -991,7 +991,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass): g.log.info("Validating whether IO failed with Read Only File System") ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs, self.mounts) - self.assertTrue(ret, ("Unexpected Error and IO successfull" + self.assertTrue(ret, ("Unexpected Error and IO successful" " on Read-Only File System")) g.log.info("EXPECTED Read-only file system in IO while creating file") @@ -1062,7 +1062,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass): self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully", subvolumes_second_brick_list) + "for %s successfully", subvolumes_second_brick_list) # start I/0 ( write and read ) - read must pass, write will fail g.log.info("Start creating files on all mounts...") @@ -1079,7 +1079,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass): g.log.info("Validating whether IO failed with Read Only File System") ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs, self.mounts) - self.assertTrue(ret, ("Unexpected Error and IO successfull" + self.assertTrue(ret, ("Unexpected Error and IO successful" " on Read-Only File System")) g.log.info("EXPECTED Read-only file system in IO while creating file") @@ -1203,7 +1203,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass): self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully", subvolumes_first_brick_list) + "for %s successfully", subvolumes_first_brick_list) # start I/0 ( write and read ) - read must pass, write will fail g.log.info("Start creating files on all mounts...") @@ -1220,7 +1220,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass): g.log.info("Validating whether IO failed with Read Only File System") ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs, self.mounts) - self.assertTrue(ret, ("Unexpected Error and IO successfull" + self.assertTrue(ret, ("Unexpected Error and IO successful" " on Read-Only File System")) g.log.info("EXPECTED Read-only file system in IO while creating file") @@ -1508,7 +1508,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): ret = set_volume_options(self.mnode, vol_name, options) self.assertTrue(ret, ("Unable to set volume option %s for " "volume %s" % (options, vol_name))) - g.log.info("Sucessfully set %s for volume %s", options, vol_name) + g.log.info("Successfully set %s for volume %s", options, vol_name) # check is options are set correctly volume_list = get_volume_list(self.mnode) @@ -1612,7 +1612,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass): g.log.info("Validating if IO failed with read-only filesystem") ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs, self.mounts) - self.assertTrue(ret, ("Unexpected error and IO successfull" + self.assertTrue(ret, ("Unexpected error and IO successful" " on read-only filesystem")) g.log.info("EXPECTED: " "Read-only file system in IO while creating file") @@ -1803,7 +1803,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass): self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully", brick_to_bring_offline1) + "for %s successfully", brick_to_bring_offline1) offline_brick1_from_replicasets.append(brick_to_bring_offline1) # start I/0 ( write and read ) - must succeed @@ -1853,7 +1853,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass): self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully", brick_to_bring_offline2) + "for %s successfully", brick_to_bring_offline2) offline_brick2_from_replicasets.append(brick_to_bring_offline2) # start I/0 ( write and read ) - must succeed @@ -1957,7 +1957,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass): g.log.info("Validating whether IO failed with Read Only File System") ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs, self.mounts) - self.assertTrue(ret, ("Unexpected Error and IO successfull" + self.assertTrue(ret, ("Unexpected Error and IO successful" " on Read-Only File System")) g.log.info("EXPECTED Read-only file system in IO while creating file") @@ -2120,7 +2120,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass): self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully", bricks_to_bring_offline) + "for %s successfully", bricks_to_bring_offline) # start I/0 ( write and read ) - read must pass, write will fail g.log.info("Start creating files on all mounts...") @@ -2137,7 +2137,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass): g.log.info("Validating whether IO failed with Read-only file system") ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs, self.mounts) - self.assertTrue(ret, ("Unexpected error and IO successfull" + self.assertTrue(ret, ("Unexpected error and IO successful" " on Read-only file system")) g.log.info("EXPECTED: Read-only file system in IO while creating file") @@ -2181,7 +2181,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass): g.log.info("Validating whether IO failed with Read-only file system") ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs, self.mounts) - self.assertTrue(ret, ("Unexpected error and IO successfull" + self.assertTrue(ret, ("Unexpected error and IO successful" " on Read-only file system")) g.log.info("EXPECTED: Read-only file system in IO while creating file") @@ -2225,7 +2225,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass): g.log.info("Validating whether IO failed with Read-only file system") ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs, self.mounts) - self.assertTrue(ret, ("Unexpected error and IO successfull" + self.assertTrue(ret, ("Unexpected error and IO successful" " on Read-only file system")) g.log.info("EXPECTED: Read-only file system in IO while creating file") diff --git a/tests/functional/afr/test_conservative_merge_of_files_heal_command.py b/tests/functional/afr/test_conservative_merge_of_files_heal_command.py index 3a6c62069..06514b972 100644 --- a/tests/functional/afr/test_conservative_merge_of_files_heal_command.py +++ b/tests/functional/afr/test_conservative_merge_of_files_heal_command.py @@ -139,7 +139,7 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass): ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set volume option %s for" "volume %s" % (options, self.volname))) - g.log.info("Sucessfully set %s for volume %s", options, self.volname) + g.log.info("Successfully set %s for volume %s", options, self.volname) # Bring brick 0 offline g.log.info('Bringing bricks %s offline', bricks_list[0]) diff --git a/tests/functional/afr/test_gfid_assignment_on_dist_rep_vol.py b/tests/functional/afr/test_gfid_assignment_on_dist_rep_vol.py index e815fa0b6..0015f079e 100644 --- a/tests/functional/afr/test_gfid_assignment_on_dist_rep_vol.py +++ b/tests/functional/afr/test_gfid_assignment_on_dist_rep_vol.py @@ -112,7 +112,7 @@ class AssignGfidsOnAllSubvols(GlusterBaseClass): # Verify gfids are same on all the bricks self.verify_gfid("dir1") - # Creat a new directory on all the bricks directly + # Create a new directory on all the bricks directly bricks_list = get_all_bricks(self.mnode, self.volname) for brick in bricks_list: brick_node, brick_path = brick.split(":") diff --git a/tests/functional/afr/test_gfid_heal.py b/tests/functional/afr/test_gfid_heal.py index 76d423e2d..589a420a0 100644 --- a/tests/functional/afr/test_gfid_heal.py +++ b/tests/functional/afr/test_gfid_heal.py @@ -129,7 +129,7 @@ class HealGfidTest(GlusterBaseClass): - Create a 1x3 volume and fuse mount it. - Create 1 directory with 1 file inside it directly on each brick. - Access the directories from the mount. - - Launch heals ans verify that the heals are over. + - Launch heals and verify that the heals are over. - Verify that the files and directories have gfid assigned. """ # pylint: disable=too-many-statements diff --git a/tests/functional/afr/test_gfid_split_brain_resolution.py b/tests/functional/afr/test_gfid_split_brain_resolution.py index 0d6b0e220..a73ee407d 100644 --- a/tests/functional/afr/test_gfid_split_brain_resolution.py +++ b/tests/functional/afr/test_gfid_split_brain_resolution.py @@ -104,7 +104,7 @@ class TestSelfHeal(GlusterBaseClass): self.assertTrue(ret, ("Failed to bring down the bricks. Please " "check the log file for more details.")) g.log.info("Brought down the brick process " - "for %s succesfully", brick_list) + "for %s successfully", brick_list) ret = are_bricks_offline(self.mnode, self.volname, brick_list) self.assertTrue(ret, 'Bricks %s are not offline' % brick_list) @@ -157,7 +157,7 @@ class TestSelfHeal(GlusterBaseClass): ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set volume option %s for " "volume %s" % (options, self.volname))) - g.log.info("Sucessfully set %s for volume %s", options, self.volname) + g.log.info("Successfully set %s for volume %s", options, self.volname) # Create dir inside which I/O will be performed. ret = mkdir(self.mounts[0].client_system, "%s/test_gfid_split_brain" diff --git a/tests/functional/afr/test_quota_limit_entry_heal.py b/tests/functional/afr/test_quota_limit_entry_heal.py index 56388c157..033d326f4 100644 --- a/tests/functional/afr/test_quota_limit_entry_heal.py +++ b/tests/functional/afr/test_quota_limit_entry_heal.py @@ -130,7 +130,7 @@ class QuotaEntrySelfHealTest(GlusterBaseClass): ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set volume option %s for " "volume %s" % (options, self.volname))) - g.log.info("Sucessfully set %s for volume %s", options, self.volname) + g.log.info("Successfully set %s for volume %s", options, self.volname) # Create directory on mount ret = mkdir(self.mounts[0].client_system, "%s/dir" diff --git a/tests/functional/afr/test_self_heal_when_dir_quota_exceeded.py b/tests/functional/afr/test_self_heal_when_dir_quota_exceeded.py index 92568baa9..4648c0f68 100644 --- a/tests/functional/afr/test_self_heal_when_dir_quota_exceeded.py +++ b/tests/functional/afr/test_self_heal_when_dir_quota_exceeded.py @@ -101,7 +101,7 @@ class HealFilesWhenDirQuotaExceeded(GlusterBaseClass): path)) ret = g.run(mount_object.client_system, cmd) self.assertTrue(ret, "Failed to create directory on mountpoint") - g.log.info("Directory created succesfully on mountpoint") + g.log.info("Directory created successfully on mountpoint") # Enable Quota g.log.info("Enabling quota on the volume %s", self.volname) @@ -142,7 +142,7 @@ class HealFilesWhenDirQuotaExceeded(GlusterBaseClass): "count=20; done" % (mount_object.mountpoint, path)) ret, _, _ = g.run(mount_object.client_system, cmd) self.assertEqual(ret, 0, ("Failed to create files on %s", path)) - g.log.info("Files created succesfully on mountpoint") + g.log.info("Files created successfully on mountpoint") bricks_list = get_all_bricks(self.mnode, self.volname) diff --git a/tests/functional/afr/test_self_heal_with_quota_object_limit.py b/tests/functional/afr/test_self_heal_with_quota_object_limit.py index 6fe45f7b5..ff308c3f6 100644 --- a/tests/functional/afr/test_self_heal_with_quota_object_limit.py +++ b/tests/functional/afr/test_self_heal_with_quota_object_limit.py @@ -101,7 +101,7 @@ class HealFilesWhenQuotaObjectLimitExceeded(GlusterBaseClass): path)) ret = g.run(mount_object.client_system, cmd) self.assertTrue(ret, "Failed to create directory on mountpoint") - g.log.info("Directory created succesfully on mountpoint") + g.log.info("Directory created successfully on mountpoint") # Enable Quota g.log.info("Enabling quota on the volume %s", self.volname) @@ -141,7 +141,7 @@ class HealFilesWhenQuotaObjectLimitExceeded(GlusterBaseClass): % (self.script_upload_path, mount_object.mountpoint, path)) ret, _, _ = g.run(mount_object.client_system, cmd) self.assertEqual(ret, 0, ("Failed to create files on %s", path)) - g.log.info("Files created succesfully on mountpoint") + g.log.info("Files created successfully on mountpoint") bricks_list = get_all_bricks(self.mnode, self.volname) diff --git a/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py b/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py index 96d5d7864..18125933a 100755 --- a/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py +++ b/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py @@ -142,7 +142,7 @@ class ClientSideQuorumRestored(GlusterBaseClass): ret = set_volume_options(self.mnode, self.volname, options) self.assertTrue(ret, ("Unable to set volume option %s for" "volume %s" % (options, self.volname))) - g.log.info("Sucessfully set %s for volume %s", + g.log.info("Successfully set %s for volume %s", options, self.volname) # Creating files on client side |