summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brick_libs.py2
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brickmux_ops.py6
-rw-r--r--glustolibs-gluster/glustolibs/gluster/dht_test_utils.py4
-rw-r--r--glustolibs-gluster/glustolibs/gluster/exceptions.py4
-rw-r--r--glustolibs-gluster/glustolibs/gluster/heal_libs.py2
-rw-r--r--glustolibs-gluster/glustolibs/gluster/lib_utils.py8
-rw-r--r--glustolibs-gluster/glustolibs/gluster/mount_ops.py6
-rw-r--r--glustolibs-gluster/glustolibs/gluster/samba_libs.py2
-rw-r--r--glustolibs-gluster/glustolibs/gluster/volume_libs.py28
-rw-r--r--glustolibs-gluster/glustolibs/gluster/volume_ops.py2
-rwxr-xr-xglustolibs-io/shared_files/scripts/fd_writes.py2
-rwxr-xr-xglustolibs-io/shared_files/scripts/file_dir_ops.py4
-rw-r--r--glustolibs-io/shared_files/scripts/generate_io.py2
-rwxr-xr-xglustolibs-misc/glustolibs/misc/misc_libs.py16
-rw-r--r--tests/functional/afr/heal/test_self_heal_daemon_process.py18
-rw-r--r--tests/functional/afr/test_client_side_quorum.py48
-rw-r--r--tests/functional/afr/test_conservative_merge_of_files_heal_command.py2
-rw-r--r--tests/functional/afr/test_gfid_assignment_on_dist_rep_vol.py2
-rw-r--r--tests/functional/afr/test_gfid_heal.py2
-rw-r--r--tests/functional/afr/test_gfid_split_brain_resolution.py4
-rw-r--r--tests/functional/afr/test_quota_limit_entry_heal.py2
-rw-r--r--tests/functional/afr/test_self_heal_when_dir_quota_exceeded.py4
-rw-r--r--tests/functional/afr/test_self_heal_with_quota_object_limit.py4
-rwxr-xr-xtests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py2
-rwxr-xr-xtests/functional/arbiter/brick_cases/test_brickcases.py2
-rw-r--r--tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py4
-rwxr-xr-xtests/functional/arbiter/test_mount_point_while_deleting_files.py2
-rw-r--r--tests/functional/authentication/test_fusereject.py2
-rw-r--r--tests/functional/authentication/test_vol_auth.py2
-rw-r--r--tests/functional/bvt/test_cvt.py2
-rw-r--r--tests/functional/dht/test_dht_hash_value.py4
-rw-r--r--tests/functional/dht/test_stop_glusterd_while_rebalance_in_progress.py4
-rw-r--r--tests/functional/disperse/test_ec_validate_volume_creation.py14
-rw-r--r--tests/functional/fuse_subdir/test_fusesubdir_with_quota_objects.py6
-rw-r--r--tests/functional/fuse_subdir/test_fusesubdir_with_removebrick.py2
-rw-r--r--tests/functional/fuse_subdir/test_leveldownsubdir_with_multiclient.py4
-rw-r--r--tests/functional/fuse_subdir/test_multisubdir_with_multiclient.py4
-rw-r--r--tests/functional/fuse_subdir/test_quota_limits_fusesubdir_.py4
-rw-r--r--tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py2
-rw-r--r--tests/functional/glusterd/test_brick_status_when_quorum_not_met.py2
-rw-r--r--tests/functional/glusterd/test_concurrent_set.py4
-rw-r--r--tests/functional/glusterd/test_create_vol_with_used_bricks.py2
-rw-r--r--tests/functional/glusterd/test_nfs_quorum.py2
-rw-r--r--tests/functional/glusterd/test_peer_detach.py2
-rw-r--r--tests/functional/glusterd/test_peer_probe_while_snapd_running.py2
-rw-r--r--tests/functional/glusterd/test_probe_glusterd.py2
-rw-r--r--tests/functional/glusterd/test_probe_hostname.py12
-rw-r--r--tests/functional/glusterd/test_quorum_remove_brick.py2
-rw-r--r--tests/functional/glusterd/test_rebalance_hang.py4
-rw-r--r--tests/functional/glusterd/test_rebalance_spurious.py4
-rw-r--r--tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py4
-rw-r--r--tests/functional/glusterd/test_volume_get.py4
-rw-r--r--tests/functional/glusterd/test_volume_network_ping_timeout.py2
-rw-r--r--tests/functional/glusterd/test_volume_reset.py6
-rw-r--r--tests/functional/glusterd/test_volume_status.py8
-rw-r--r--tests/functional/glusterd/test_volume_status_fd.py2
-rw-r--r--tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py2
-rw-r--r--tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py4
-rw-r--r--tests/functional/quota/list_path_values.py2
-rw-r--r--tests/functional/quota/test_limit_usage_deep_dir.py2
-rw-r--r--tests/functional/quota/test_multi_value_limit.py6
-rw-r--r--tests/functional/quota/test_quota_add_brick.py2
-rwxr-xr-xtests/functional/quota/test_quota_limit_dir_breadth.py2
-rw-r--r--tests/functional/quota/test_quota_unique_soft_limit.py2
-rw-r--r--tests/functional/quota/test_quota_volume_subdir_limits.py2
-rw-r--r--tests/functional/snapshot/test_256_snapshots.py6
-rw-r--r--tests/functional/snapshot/test_clone_snap.py4
-rw-r--r--tests/functional/snapshot/test_mount_snap.py2
-rw-r--r--tests/functional/snapshot/test_snap_delete_existing_scheduler.py2
-rw-r--r--tests/functional/snapshot/test_snap_list_after_restart.py2
-rw-r--r--tests/functional/snapshot/test_snap_uss.py6
-rw-r--r--tests/functional/snapshot/test_snapshot_create.py4
-rw-r--r--tests/functional/snapshot/test_snapshot_restore.py4
-rw-r--r--tests/functional/snapshot/test_uss_brick_down.py4
-rw-r--r--tests/functional/snapshot/test_uss_snap_active_deactive.py2
-rw-r--r--tests/functional/snapshot/test_validate_snap_scheduler.py4
-rw-r--r--tests/functional/snapshot/test_validate_snapshot_max_limit.py10
-rw-r--r--tests/functional/snapshot/test_validate_snapshot_rebalance.py4
78 files changed, 188 insertions, 188 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/brick_libs.py b/glustolibs-gluster/glustolibs/gluster/brick_libs.py
index 67496b88a..270b378c1 100644
--- a/glustolibs-gluster/glustolibs/gluster/brick_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/brick_libs.py
@@ -29,7 +29,7 @@ from glustolibs.gluster.volume_libs import (get_subvols, is_tiered_volume,
def get_all_bricks(mnode, volname):
"""Get list of all the bricks of the specified volume.
- If the volume is 'Tier' volume, the list will contian both
+ If the volume is 'Tier' volume, the list will contain both
'hot tier' and 'cold tier' bricks.
Args:
diff --git a/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py b/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py
index 3fcb4ee59..0a88d4b22 100644
--- a/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py
@@ -94,9 +94,9 @@ def check_brick_pid_matches_glusterfsd_pid(mnode, volname):
glusterfsd_pid = pid.split()[1]
if glusterfsd_pid != brick_pid:
- g.log.eror("Brick pid %s doesn't macth glusterfsd "
- "pid %s of the node %s", brick_pid,
- glusterfsd_pid, brick_node)
+ g.log.error("Brick pid %s doesn't match glusterfsd "
+ "pid %s of the node %s", brick_pid,
+ glusterfsd_pid, brick_node)
_rc = False
return _rc
diff --git a/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py b/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py
index 0ed52c457..692f09baf 100644
--- a/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py
+++ b/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py
@@ -186,7 +186,7 @@ def find_hashed_subvol(subvols, parent_path, name):
name: file or directory name
- Retrun Values:
+ Return Values:
hashed_subvol object: An object of type BrickDir type representing
the hashed subvolume
@@ -224,7 +224,7 @@ def find_nonhashed_subvol(subvols, parent_path, name):
name: file or directory name
- Retrun Values:
+ Return Values:
nonhashed_subvol object: An object of type BrickDir type
representing the nonhashed subvolume
diff --git a/glustolibs-gluster/glustolibs/gluster/exceptions.py b/glustolibs-gluster/glustolibs/gluster/exceptions.py
index c52dcee59..8ee443841 100644
--- a/glustolibs-gluster/glustolibs/gluster/exceptions.py
+++ b/glustolibs-gluster/glustolibs/gluster/exceptions.py
@@ -107,8 +107,8 @@ class ExecutionParseError(Exception):
For example, the output of a command when has to be parsed, can have three
states. First, the output was as expected. Second, didn't get the expected
- ouput after the parsing result and Third, didn't get the expected result as
- the command itself failed.
+ output after the parsing result and Third, didn't get the expected result
+ as the command itself failed.
'''
pass
diff --git a/glustolibs-gluster/glustolibs/gluster/heal_libs.py b/glustolibs-gluster/glustolibs/gluster/heal_libs.py
index 1de540836..e71835d78 100644
--- a/glustolibs-gluster/glustolibs/gluster/heal_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/heal_libs.py
@@ -60,7 +60,7 @@ def is_heal_disabled(mnode, volname):
volname : Name of the volume
Returns:
- bool : True if heal is diabled on volume. False otherwise.
+ bool : True if heal is disabled on volume. False otherwise.
NoneType: None if unable to get the volume status shd or parse error.
"""
cmd = "gluster volume status %s shd --xml" % volname
diff --git a/glustolibs-gluster/glustolibs/gluster/lib_utils.py b/glustolibs-gluster/glustolibs/gluster/lib_utils.py
index dcfb297c5..682152f5f 100644
--- a/glustolibs-gluster/glustolibs/gluster/lib_utils.py
+++ b/glustolibs-gluster/glustolibs/gluster/lib_utils.py
@@ -65,7 +65,7 @@ def append_string_to_file(mnode, filename, str_to_add_in_file,
return True
except IOError:
- g.log.error("Exception occured while adding string to "
+ g.log.error("Exception occurred while adding string to "
"file %s in append_string_to_file()", filename)
return False
finally:
@@ -90,7 +90,7 @@ def search_pattern_in_file(mnode, search_pattern, filename, start_str_to_parse,
file from which this method will check
if the given search string is present.
end_str_to_parse (str): this will be as end string in the
- file whithin which this method will check
+ file within which this method will check
if the given search string is present.
Returns:
@@ -288,7 +288,7 @@ def list_files(mnode, dir_path, parse_str="", user="root"):
filepaths.append(filepath)
return filepaths
except StopIteration:
- g.log.error("Exception occured in list_files()")
+ g.log.error("Exception occurred in list_files()")
return None
finally:
@@ -788,7 +788,7 @@ def is_core_file_created(nodes, testrun_timestamp,
List of nodes need to pass from test method
testrun_timestamp:
This time stamp need to pass from test method
- test case runing started time, time format is EPOCH
+ test case running started time, time format is EPOCH
time format, use below command for getting timestamp
of test case 'date +%s'
paths(list):
diff --git a/glustolibs-gluster/glustolibs/gluster/mount_ops.py b/glustolibs-gluster/glustolibs/gluster/mount_ops.py
index c0c7cc08e..637af24b1 100644
--- a/glustolibs-gluster/glustolibs/gluster/mount_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/mount_ops.py
@@ -322,7 +322,7 @@ def mount_volume(volname, mtype, mpoint, mserver, mclient, options='',
ret, out, err = g.run(mclient, mcmd, user=user)
if ret != 0:
- g.log.error("net use comand failed on windows client %s "
+ g.log.error("net use command failed on windows client %s "
"failed: %s" % (mclient, err))
return (ret, out, err)
@@ -332,7 +332,7 @@ def mount_volume(volname, mtype, mpoint, mserver, mclient, options='',
"Share is : %s" % (mclient, drv_ltr))
return (ret, drv_ltr, err)
- g.log.error("net use comand successful but error in mount of samba "
+ g.log.error("net use command successful but error in mount of samba "
" share for windows client %s for reason %s" %
(mclient, err))
return (1, out, err)
@@ -370,7 +370,7 @@ def mount_volume(volname, mtype, mpoint, mserver, mclient, options='',
if "Santiago" in name:
cifs_options = "sec=ntlmssp"
except Exception as e:
- g.log.error("Exception occured while getting the platform "
+ g.log.error("Exception occurred while getting the platform "
"of node %s: %s", mclient, str(e))
return (1, '', '')
finally:
diff --git a/glustolibs-gluster/glustolibs/gluster/samba_libs.py b/glustolibs-gluster/glustolibs/gluster/samba_libs.py
index 8c70b6774..bffe6a12c 100644
--- a/glustolibs-gluster/glustolibs/gluster/samba_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/samba_libs.py
@@ -276,7 +276,7 @@ def share_volume_over_smb(mnode, volname, smb_users_info):
cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " % volname)
ret, _, _ = g.run(mnode, cmd)
if ret != 0:
- g.log.error("volume '%s' not accessable via SMB/CIFS share", volname)
+ g.log.error("volume '%s' not accessible via SMB/CIFS share", volname)
return False
g.log.info("volume '%s' can be accessed from SMB/CIFS share", volname)
diff --git a/glustolibs-gluster/glustolibs/gluster/volume_libs.py b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
index e5a37de1c..784e61697 100644
--- a/glustolibs-gluster/glustolibs/gluster/volume_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
@@ -143,7 +143,7 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
if 'dist_count' in volume_config['voltype']:
kwargs['dist_count'] = (volume_config['voltype']['dist_count'])
else:
- g.log.error("Distibute Count not specified in the volume config")
+ g.log.error("Distribute count not specified in the volume config")
return False
number_of_bricks = kwargs['dist_count']
@@ -166,7 +166,7 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
if 'dist_count' in volume_config['voltype']:
kwargs['dist_count'] = (volume_config['voltype']['dist_count'])
else:
- g.log.error("Distibute Count not specified in the volume config")
+ g.log.error("Distribute count not specified in the volume config")
return False
if 'replica_count' in volume_config['voltype']:
@@ -203,7 +203,7 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
if 'dist_count' in volume_config['voltype']:
kwargs['dist_count'] = (volume_config['voltype']['dist_count'])
else:
- g.log.error("Distibute Count not specified in the volume config")
+ g.log.error("Distribute Count not specified in the volume config")
return False
if 'disperse_count' in volume_config['voltype']:
@@ -613,7 +613,7 @@ def is_tiered_volume(mnode, volname):
Returns:
bool : True if the volume is tiered volume. False otherwise
- NoneType: None if volume doesnot exist.
+ NoneType: None if volume does not exist.
"""
volinfo = get_volume_info(mnode, volname)
if volinfo is None:
@@ -636,7 +636,7 @@ def is_distribute_volume(mnode, volname):
Returns:
bool : True if the volume is distributed volume. False otherwise
- NoneType: None if volume doesnot exist.
+ NoneType: None if volume does not exist.
"""
volume_type_info = get_volume_type_info(mnode, volname)
if volume_type_info is None:
@@ -666,7 +666,7 @@ def get_volume_type_info(mnode, volname):
mnode (str): Node on which commands are executed.
volname (str): Name of the volume.
- Retunrs:
+ Returns:
dict : Dict containing the keys, values defining the volume type:
Example:
volume_type_info = {
@@ -751,7 +751,7 @@ def get_cold_tier_type_info(mnode, volname):
mnode (str): Node on which commands are executed.
volname (str): Name of the volume.
- Retunrs:
+ Returns:
dict : Dict containing the keys, values defining the cold tier type:
Example:
cold_tier_type_info = {
@@ -805,7 +805,7 @@ def get_hot_tier_type_info(mnode, volname):
mnode (str): Node on which commands are executed.
volname (str): Name of the volume.
- Retunrs:
+ Returns:
dict : Dict containing the keys, values defining the hot tier type:
Example:
hot_tier_type_info = {
@@ -865,7 +865,7 @@ def get_num_of_bricks_per_subvol(mnode, volname):
'volume_num_of_bricks_per_subvol': None
}
- NoneType: None if volume doesnot exist or is a tiered volume.
+ NoneType: None if volume does not exist or is a tiered volume.
"""
bricks_per_subvol_dict = {
'is_tier': False,
@@ -899,7 +899,7 @@ def get_cold_tier_num_of_bricks_per_subvol(mnode, volname):
Returns:
int : Number of bricks per subvol on cold tier.
- NoneType: None if volume doesnot exist or not a tiered volume.
+ NoneType: None if volume does not exist or not a tiered volume.
"""
if not is_tiered_volume(mnode, volname):
g.log.error("Volume %s is not a tiered volume", volname)
@@ -920,7 +920,7 @@ def get_hot_tier_num_of_bricks_per_subvol(mnode, volname):
Returns:
int : Number of bricks per subvol on hot tier.
- NoneType: None if volume doesnot exist or not a tiered volume.
+ NoneType: None if volume does not exist or not a tiered volume.
"""
if not is_tiered_volume(mnode, volname):
g.log.error("Volume %s is not a tiered volume", volname)
@@ -1708,8 +1708,8 @@ def shrink_volume(mnode, volname, subvol_num=None, replica_num=None,
g.log.error("Failed to commit remove-brick of bricks %s on volume %s",
bricks_list_to_remove, volname)
return False
- g.log.info("Successfully commited remove-bricks of bricks %s on volume %s",
- bricks_list_to_remove, volname)
+ g.log.info("Successfully committed remove-bricks of bricks %s on volume "
+ "%s", bricks_list_to_remove, volname)
# Delete the removed bricks
if delete_bricks:
@@ -1941,7 +1941,7 @@ def get_client_quorum_info(mnode, volname):
'quorum_count': None
}
}
- NoneType: None if volume doesnot exist.
+ NoneType: None if volume does not exist.
"""
client_quorum_dict = {
'is_tier': False,
diff --git a/glustolibs-gluster/glustolibs/gluster/volume_ops.py b/glustolibs-gluster/glustolibs/gluster/volume_ops.py
index 4302f2780..e478ed979 100644
--- a/glustolibs-gluster/glustolibs/gluster/volume_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/volume_ops.py
@@ -377,7 +377,7 @@ def get_volume_status(mnode, volname='all', service='', options=''):
Kwargs:
volname (str): volume name. Defaults to 'all'
service (str): name of the service to get status.
- serivce can be, [nfs|shd|<BRICK>|quotad]], If not given,
+ service can be, [nfs|shd|<BRICK>|quotad]], If not given,
the function returns all the services
options (str): options can be,
[detail|clients|mem|inode|fd|callpool|tasks]. If not given,
diff --git a/glustolibs-io/shared_files/scripts/fd_writes.py b/glustolibs-io/shared_files/scripts/fd_writes.py
index ba46a5f7d..fc9273633 100755
--- a/glustolibs-io/shared_files/scripts/fd_writes.py
+++ b/glustolibs-io/shared_files/scripts/fd_writes.py
@@ -192,7 +192,7 @@ def fd_writes(args):
if __name__ == "__main__":
parser = argparse.ArgumentParser(
- description="Program to perfrom fd based writes on files for time t",
+ description="Program to perform fd based writes on files for time t",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--file-sizes-list',
diff --git a/glustolibs-io/shared_files/scripts/file_dir_ops.py b/glustolibs-io/shared_files/scripts/file_dir_ops.py
index 5bcc422ef..ba6e0b07f 100755
--- a/glustolibs-io/shared_files/scripts/file_dir_ops.py
+++ b/glustolibs-io/shared_files/scripts/file_dir_ops.py
@@ -785,7 +785,7 @@ if __name__ == "__main__":
create_deep_dir_parser = subparsers.add_parser(
'create_deep_dir',
help=("Create deep dirs under 'dir' with depth 'dir_depth'."
- "In each level creates sub-dirs max upto 'max_num_of_dirs'."),
+ "In each level creates sub-dirs max up to 'max_num_of_dirs'."),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
create_deep_dir_parser.add_argument(
'-d', '--dir-depth',
@@ -814,7 +814,7 @@ if __name__ == "__main__":
create_deep_dir_with_files_parser = subparsers.add_parser(
'create_deep_dirs_with_files',
help=("Create deep dirs under 'dir' with depth 'dir_depth'. "
- "In each level creates sub-dirs max upto 'max_num_of_dirs'. "
+ "In each level creates sub-dirs max up to 'max_num_of_dirs'. "
"Creates specified 'num_of_files' in each dir created."),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
create_deep_dir_with_files_parser.add_argument(
diff --git a/glustolibs-io/shared_files/scripts/generate_io.py b/glustolibs-io/shared_files/scripts/generate_io.py
index 1b28983a0..c9836ba80 100644
--- a/glustolibs-io/shared_files/scripts/generate_io.py
+++ b/glustolibs-io/shared_files/scripts/generate_io.py
@@ -338,7 +338,7 @@ def stop_populate_data(proc_list, proc_queue, mevent=None):
proc.terminate()
return True
except Exception as e:
- print("Exception occured in stop_populate_data(): %s"
+ print("Exception occurred in stop_populate_data(): %s"
% e)
return False
diff --git a/glustolibs-misc/glustolibs/misc/misc_libs.py b/glustolibs-misc/glustolibs/misc/misc_libs.py
index fe117f1a4..fb4bb49f1 100755
--- a/glustolibs-misc/glustolibs/misc/misc_libs.py
+++ b/glustolibs-misc/glustolibs/misc/misc_libs.py
@@ -98,7 +98,7 @@ def upload_scripts(list_of_nodes, list_of_scripts_abs_path,
user (optional[str]): The user to use for the remote connection.
Returns:
- bool: True if uploading scripts is sucessful on all nodes.
+ bool: True if uploading scripts is successful on all nodes.
False otherwise.
"""
if not isinstance(list_of_nodes, list):
@@ -160,7 +160,7 @@ def yum_add_repos(list_of_nodes, list_of_yum_repos):
list_of_yum_repos (list): List of yum repos
Returns:
- bool: True if adding yum repo files is sucessful on all nodes.
+ bool: True if adding yum repo files is successful on all nodes.
False otherwise.
"""
if not isinstance(list_of_nodes, list):
@@ -193,7 +193,7 @@ def yum_install_packages(list_of_nodes, yum_packages):
yum_packages (list): List of yum packages.
Returns:
- bool: True if installation of packages is sucessful on all nodes.
+ bool: True if installation of packages is successful on all nodes.
False otherwise.
"""
if not isinstance(list_of_nodes, list):
@@ -224,7 +224,7 @@ def yum_remove_packages(list_of_nodes, yum_packages):
yum_packages (list): List of yum packages.
Returns:
- bool: True if removing packages is sucessful on all nodes.
+ bool: True if removing packages is successful on all nodes.
False otherwise.
"""
if not isinstance(list_of_nodes, list):
@@ -256,7 +256,7 @@ def pip_install_packages(list_of_nodes, python_packages):
python_packages (list): List of python packages.
Returns:
- bool: True if installation of packages is sucessful on all nodes.
+ bool: True if installation of packages is successful on all nodes.
False otherwise.
"""
if not isinstance(list_of_nodes, list):
@@ -290,7 +290,7 @@ def install_testing_tools(list_of_nodes, testing_tools):
- arequal
Returns:
- bool: True if installation of all testing tools is sucessful on
+ bool: True if installation of all testing tools is successful on
all nodes. False otherwise.
"""
if not isinstance(list_of_nodes, list):
@@ -471,7 +471,7 @@ def reboot_nodes_and_wait_to_come_online(nodes, timeout=300):
The second element 'reboot_results' is of type dictonary and it
contains the node and corresponding result for reboot. If reboot is
- successfull on node, then result contains True else False.
+ successful on node, then result contains True else False.
"""
_rc = reboot_nodes(nodes)
reboot_results = {}
@@ -551,7 +551,7 @@ def drop_caches(hosts):
dropped (Servers/ Clients)
Returns:
- bool : True , post succesful completion.Else,False.
+ bool : True , post successful completion.Else,False.
"""
cmd = "echo 3 > /proc/sys/vm/drop_caches"
results = g.run_parallel(hosts, cmd)
diff --git a/tests/functional/afr/heal/test_self_heal_daemon_process.py b/tests/functional/afr/heal/test_self_heal_daemon_process.py
index ed71e4f2b..15cd43951 100644
--- a/tests/functional/afr/heal/test_self_heal_daemon_process.py
+++ b/tests/functional/afr/heal/test_self_heal_daemon_process.py
@@ -176,7 +176,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
self.all_servers_info)
self.assertTrue(ret, ("Failed to add bricks to "
"volume %s " % self.volname))
- g.log.info("Add brick successfull")
+ g.log.info("Add brick successful")
# Log Volume Info and Status after expanding the volume
g.log.info("Logging volume info and Status after expanding volume")
@@ -231,11 +231,11 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
ret, pids = get_self_heal_daemon_pid(nodes)
self.assertTrue(ret, ("Either No self heal daemon process found or "
"more than One self heal daemon process found"))
- g.log.info("Successfull in getting self-heal daemon process "
+ g.log.info("Successful in getting self-heal daemon process "
"on nodes %s", nodes)
glustershd_pids_after_expanding = pids
- g.log.info("Self Heal Daemon Process ID's afetr expanding "
+ g.log.info("Self Heal Daemon Process ID's after expanding "
"volume: %s", glustershd_pids_after_expanding)
self.assertNotEqual(glustershd_pids,
@@ -389,7 +389,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
ret, pids = get_self_heal_daemon_pid(nodes)
self.assertTrue(ret, ("Either No self heal daemon process found or "
"more than One self heal daemon process found"))
- g.log.info("Succesfull in getting self heal daemon pids")
+ g.log.info("Successful in getting self heal daemon pids")
glustershd_pids = pids
# get the bricks for the volume
@@ -461,7 +461,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully", bricks_to_bring_offline)
+ "for %s successfully", bricks_to_bring_offline)
# restart glusterd after brought down the brick
g.log.info("Restart glusterd on all servers %s", nodes)
@@ -922,7 +922,7 @@ class SelfHealDaemonProcessTestsWithHealing(GlusterBaseClass):
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully", bricks_to_bring_offline)
+ "for %s successfully", bricks_to_bring_offline)
# get the bricks which are running
g.log.info("getting the brick list which are online")
@@ -996,9 +996,9 @@ class SelfHealDaemonProcessTestsWithHealing(GlusterBaseClass):
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully", bricks_to_bring_offline)
+ "for %s successfully", bricks_to_bring_offline)
- # wait for 60 sec and brought up the brick agian
+ # wait for 60 sec and brought up the brick again
time.sleep(60)
g.log.info("Bring bricks: %s online", bricks_to_bring_offline)
ret = bring_bricks_online(self.mnode, self.volname,
@@ -1182,7 +1182,7 @@ class SelfHealDaemonProcessTestsWithMultipleVolumes(GlusterBaseClass):
self.all_servers_info)
self.assertTrue(ret, ("Failed to add bricks to "
"volume %s " % volume))
- g.log.info("Add brick successfull")
+ g.log.info("Add brick successful")
# Log Volume Info and Status after expanding the volume
g.log.info("Logging volume info and Status after "
diff --git a/tests/functional/afr/test_client_side_quorum.py b/tests/functional/afr/test_client_side_quorum.py
index 0432a13ab..1dcb96ef5 100644
--- a/tests/functional/afr/test_client_side_quorum.py
+++ b/tests/functional/afr/test_client_side_quorum.py
@@ -114,7 +114,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, ("Unable to set volume option %s for"
"volume %s" % (options, self.volname)))
- g.log.info("Sucessfully set %s for volume %s", options, self.volname)
+ g.log.info("Successfully set %s for volume %s", options, self.volname)
# write files on all mounts
g.log.info("Starting IO on all mounts...")
@@ -155,7 +155,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully", bricks_to_bring_offline)
+ "for %s successfully", bricks_to_bring_offline)
# create 2 files named newfile0.txt and newfile1.txt
g.log.info("Start creating 2 files on all mounts...")
@@ -172,7 +172,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
g.log.info("Validating whether IO failed with read-only filesystem")
ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successfull"
+ self.assertTrue(ret, ("Unexpected error and IO successful"
" on read-only filesystem"))
g.log.info("EXPECTED: Read-only file system in IO while creating file")
@@ -190,7 +190,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
g.log.info("Validating whether IO failed with read-only filesystem")
ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successfull"
+ self.assertTrue(ret, ("Unexpected error and IO successful"
" on read-only filesystem"))
g.log.info("EXPECTED: Read-only file system in IO while"
" creating directory")
@@ -282,7 +282,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
ret, _, err = g.run(mount_obj.client_system, cmd)
self.assertFalse(ret, ("Unexpected error and stat on file fails"
" on read-only filesystem"))
- g.log.info("stat on file is successfull on read-only filesystem")
+ g.log.info("stat on file is successful on read-only filesystem")
# stat on dir
g.log.info("stat on directory on all mounts")
@@ -292,7 +292,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
ret, _, err = g.run(mount_obj.client_system, cmd)
self.assertFalse(ret, ("Unexpected error and stat on directory"
" fails on read-only filesystem"))
- g.log.info("stat on dir is successfull on read-only filesystem")
+ g.log.info("stat on dir is successful on read-only filesystem")
# ls on mount point
g.log.info("ls on mount point on all mounts")
@@ -302,7 +302,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
ret, _, err = g.run(mount_obj.client_system, cmd)
self.assertFalse(ret, ("Unexpected error and listing file fails"
" on read-only filesystem"))
- g.log.info("listing files is successfull on read-only filesystem")
+ g.log.info("listing files is successful on read-only filesystem")
def test_client_side_quorum_with_fixed_validate_max_bricks(self):
"""
@@ -402,7 +402,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
self.assertEqual(option_dict['cluster.quorum-count'], '(null)',
("Default value for %s is not null"
" for volume %s" % (option, self.volname)))
- g.log.info("Succesfull in getting %s for the volume %s",
+ g.log.info("Successful in getting %s for the volume %s",
option, self.volname)
# set cluster.quorum-type to fixed and cluster.quorum-count to 1
@@ -452,7 +452,7 @@ class ClientSideQuorumTests(GlusterBaseClass):
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully", bricks_to_bring_offline)
+ "for %s successfully", bricks_to_bring_offline)
# create files
g.log.info("Starting IO on all mounts...")
@@ -609,7 +609,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, ("Unable to set volume option %s for"
"volume %s" % (options, self.volname)))
- g.log.info("Sucessfully set %s for volume %s", options, self.volname)
+ g.log.info("Successfully set %s for volume %s", options, self.volname)
# Start IO on mounts
g.log.info("Starting IO on all mounts...")
@@ -991,7 +991,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
g.log.info("Validating whether IO failed with Read Only File System")
ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
self.mounts)
- self.assertTrue(ret, ("Unexpected Error and IO successfull"
+ self.assertTrue(ret, ("Unexpected Error and IO successful"
" on Read-Only File System"))
g.log.info("EXPECTED Read-only file system in IO while creating file")
@@ -1062,7 +1062,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully", subvolumes_second_brick_list)
+ "for %s successfully", subvolumes_second_brick_list)
# start I/0 ( write and read ) - read must pass, write will fail
g.log.info("Start creating files on all mounts...")
@@ -1079,7 +1079,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
g.log.info("Validating whether IO failed with Read Only File System")
ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
self.mounts)
- self.assertTrue(ret, ("Unexpected Error and IO successfull"
+ self.assertTrue(ret, ("Unexpected Error and IO successful"
" on Read-Only File System"))
g.log.info("EXPECTED Read-only file system in IO while creating file")
@@ -1203,7 +1203,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully", subvolumes_first_brick_list)
+ "for %s successfully", subvolumes_first_brick_list)
# start I/0 ( write and read ) - read must pass, write will fail
g.log.info("Start creating files on all mounts...")
@@ -1220,7 +1220,7 @@ class ClientSideQuorumCross2Tests(GlusterBaseClass):
g.log.info("Validating whether IO failed with Read Only File System")
ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
self.mounts)
- self.assertTrue(ret, ("Unexpected Error and IO successfull"
+ self.assertTrue(ret, ("Unexpected Error and IO successful"
" on Read-Only File System"))
g.log.info("EXPECTED Read-only file system in IO while creating file")
@@ -1508,7 +1508,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
ret = set_volume_options(self.mnode, vol_name, options)
self.assertTrue(ret, ("Unable to set volume option %s for "
"volume %s" % (options, vol_name)))
- g.log.info("Sucessfully set %s for volume %s", options, vol_name)
+ g.log.info("Successfully set %s for volume %s", options, vol_name)
# check is options are set correctly
volume_list = get_volume_list(self.mnode)
@@ -1612,7 +1612,7 @@ class ClientSideQuorumTestsMultipleVols(GlusterBaseClass):
g.log.info("Validating if IO failed with read-only filesystem")
ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successfull"
+ self.assertTrue(ret, ("Unexpected error and IO successful"
" on read-only filesystem"))
g.log.info("EXPECTED: "
"Read-only file system in IO while creating file")
@@ -1803,7 +1803,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully", brick_to_bring_offline1)
+ "for %s successfully", brick_to_bring_offline1)
offline_brick1_from_replicasets.append(brick_to_bring_offline1)
# start I/0 ( write and read ) - must succeed
@@ -1853,7 +1853,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully", brick_to_bring_offline2)
+ "for %s successfully", brick_to_bring_offline2)
offline_brick2_from_replicasets.append(brick_to_bring_offline2)
# start I/0 ( write and read ) - must succeed
@@ -1957,7 +1957,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
g.log.info("Validating whether IO failed with Read Only File System")
ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
self.mounts)
- self.assertTrue(ret, ("Unexpected Error and IO successfull"
+ self.assertTrue(ret, ("Unexpected Error and IO successful"
" on Read-Only File System"))
g.log.info("EXPECTED Read-only file system in IO while creating file")
@@ -2120,7 +2120,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully", bricks_to_bring_offline)
+ "for %s successfully", bricks_to_bring_offline)
# start I/0 ( write and read ) - read must pass, write will fail
g.log.info("Start creating files on all mounts...")
@@ -2137,7 +2137,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
g.log.info("Validating whether IO failed with Read-only file system")
ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successfull"
+ self.assertTrue(ret, ("Unexpected error and IO successful"
" on Read-only file system"))
g.log.info("EXPECTED: Read-only file system in IO while creating file")
@@ -2181,7 +2181,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
g.log.info("Validating whether IO failed with Read-only file system")
ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successfull"
+ self.assertTrue(ret, ("Unexpected error and IO successful"
" on Read-only file system"))
g.log.info("EXPECTED: Read-only file system in IO while creating file")
@@ -2225,7 +2225,7 @@ class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
g.log.info("Validating whether IO failed with Read-only file system")
ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successfull"
+ self.assertTrue(ret, ("Unexpected error and IO successful"
" on Read-only file system"))
g.log.info("EXPECTED: Read-only file system in IO while creating file")
diff --git a/tests/functional/afr/test_conservative_merge_of_files_heal_command.py b/tests/functional/afr/test_conservative_merge_of_files_heal_command.py
index 3a6c62069..06514b972 100644
--- a/tests/functional/afr/test_conservative_merge_of_files_heal_command.py
+++ b/tests/functional/afr/test_conservative_merge_of_files_heal_command.py
@@ -139,7 +139,7 @@ class VerifySelfHealTriggersHealCommand(GlusterBaseClass):
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, ("Unable to set volume option %s for"
"volume %s" % (options, self.volname)))
- g.log.info("Sucessfully set %s for volume %s", options, self.volname)
+ g.log.info("Successfully set %s for volume %s", options, self.volname)
# Bring brick 0 offline
g.log.info('Bringing bricks %s offline', bricks_list[0])
diff --git a/tests/functional/afr/test_gfid_assignment_on_dist_rep_vol.py b/tests/functional/afr/test_gfid_assignment_on_dist_rep_vol.py
index e815fa0b6..0015f079e 100644
--- a/tests/functional/afr/test_gfid_assignment_on_dist_rep_vol.py
+++ b/tests/functional/afr/test_gfid_assignment_on_dist_rep_vol.py
@@ -112,7 +112,7 @@ class AssignGfidsOnAllSubvols(GlusterBaseClass):
# Verify gfids are same on all the bricks
self.verify_gfid("dir1")
- # Creat a new directory on all the bricks directly
+ # Create a new directory on all the bricks directly
bricks_list = get_all_bricks(self.mnode, self.volname)
for brick in bricks_list:
brick_node, brick_path = brick.split(":")
diff --git a/tests/functional/afr/test_gfid_heal.py b/tests/functional/afr/test_gfid_heal.py
index 76d423e2d..589a420a0 100644
--- a/tests/functional/afr/test_gfid_heal.py
+++ b/tests/functional/afr/test_gfid_heal.py
@@ -129,7 +129,7 @@ class HealGfidTest(GlusterBaseClass):
- Create a 1x3 volume and fuse mount it.
- Create 1 directory with 1 file inside it directly on each brick.
- Access the directories from the mount.
- - Launch heals ans verify that the heals are over.
+ - Launch heals and verify that the heals are over.
- Verify that the files and directories have gfid assigned.
"""
# pylint: disable=too-many-statements
diff --git a/tests/functional/afr/test_gfid_split_brain_resolution.py b/tests/functional/afr/test_gfid_split_brain_resolution.py
index 0d6b0e220..a73ee407d 100644
--- a/tests/functional/afr/test_gfid_split_brain_resolution.py
+++ b/tests/functional/afr/test_gfid_split_brain_resolution.py
@@ -104,7 +104,7 @@ class TestSelfHeal(GlusterBaseClass):
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
g.log.info("Brought down the brick process "
- "for %s succesfully", brick_list)
+ "for %s successfully", brick_list)
ret = are_bricks_offline(self.mnode, self.volname, brick_list)
self.assertTrue(ret, 'Bricks %s are not offline' % brick_list)
@@ -157,7 +157,7 @@ class TestSelfHeal(GlusterBaseClass):
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, ("Unable to set volume option %s for "
"volume %s" % (options, self.volname)))
- g.log.info("Sucessfully set %s for volume %s", options, self.volname)
+ g.log.info("Successfully set %s for volume %s", options, self.volname)
# Create dir inside which I/O will be performed.
ret = mkdir(self.mounts[0].client_system, "%s/test_gfid_split_brain"
diff --git a/tests/functional/afr/test_quota_limit_entry_heal.py b/tests/functional/afr/test_quota_limit_entry_heal.py
index 56388c157..033d326f4 100644
--- a/tests/functional/afr/test_quota_limit_entry_heal.py
+++ b/tests/functional/afr/test_quota_limit_entry_heal.py
@@ -130,7 +130,7 @@ class QuotaEntrySelfHealTest(GlusterBaseClass):
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, ("Unable to set volume option %s for "
"volume %s" % (options, self.volname)))
- g.log.info("Sucessfully set %s for volume %s", options, self.volname)
+ g.log.info("Successfully set %s for volume %s", options, self.volname)
# Create directory on mount
ret = mkdir(self.mounts[0].client_system, "%s/dir"
diff --git a/tests/functional/afr/test_self_heal_when_dir_quota_exceeded.py b/tests/functional/afr/test_self_heal_when_dir_quota_exceeded.py
index 92568baa9..4648c0f68 100644
--- a/tests/functional/afr/test_self_heal_when_dir_quota_exceeded.py
+++ b/tests/functional/afr/test_self_heal_when_dir_quota_exceeded.py
@@ -101,7 +101,7 @@ class HealFilesWhenDirQuotaExceeded(GlusterBaseClass):
path))
ret = g.run(mount_object.client_system, cmd)
self.assertTrue(ret, "Failed to create directory on mountpoint")
- g.log.info("Directory created succesfully on mountpoint")
+ g.log.info("Directory created successfully on mountpoint")
# Enable Quota
g.log.info("Enabling quota on the volume %s", self.volname)
@@ -142,7 +142,7 @@ class HealFilesWhenDirQuotaExceeded(GlusterBaseClass):
"count=20; done" % (mount_object.mountpoint, path))
ret, _, _ = g.run(mount_object.client_system, cmd)
self.assertEqual(ret, 0, ("Failed to create files on %s", path))
- g.log.info("Files created succesfully on mountpoint")
+ g.log.info("Files created successfully on mountpoint")
bricks_list = get_all_bricks(self.mnode, self.volname)
diff --git a/tests/functional/afr/test_self_heal_with_quota_object_limit.py b/tests/functional/afr/test_self_heal_with_quota_object_limit.py
index 6fe45f7b5..ff308c3f6 100644
--- a/tests/functional/afr/test_self_heal_with_quota_object_limit.py
+++ b/tests/functional/afr/test_self_heal_with_quota_object_limit.py
@@ -101,7 +101,7 @@ class HealFilesWhenQuotaObjectLimitExceeded(GlusterBaseClass):
path))
ret = g.run(mount_object.client_system, cmd)
self.assertTrue(ret, "Failed to create directory on mountpoint")
- g.log.info("Directory created succesfully on mountpoint")
+ g.log.info("Directory created successfully on mountpoint")
# Enable Quota
g.log.info("Enabling quota on the volume %s", self.volname)
@@ -141,7 +141,7 @@ class HealFilesWhenQuotaObjectLimitExceeded(GlusterBaseClass):
% (self.script_upload_path, mount_object.mountpoint, path))
ret, _, _ = g.run(mount_object.client_system, cmd)
self.assertEqual(ret, 0, ("Failed to create files on %s", path))
- g.log.info("Files created succesfully on mountpoint")
+ g.log.info("Files created successfully on mountpoint")
bricks_list = get_all_bricks(self.mnode, self.volname)
diff --git a/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py b/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py
index 96d5d7864..18125933a 100755
--- a/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py
+++ b/tests/functional/afr/test_write_io_mount_point_resumed_quorum_restored.py
@@ -142,7 +142,7 @@ class ClientSideQuorumRestored(GlusterBaseClass):
ret = set_volume_options(self.mnode, self.volname, options)
self.assertTrue(ret, ("Unable to set volume option %s for"
"volume %s" % (options, self.volname)))
- g.log.info("Sucessfully set %s for volume %s",
+ g.log.info("Successfully set %s for volume %s",
options, self.volname)
# Creating files on client side
diff --git a/tests/functional/arbiter/brick_cases/test_brickcases.py b/tests/functional/arbiter/brick_cases/test_brickcases.py
index 9cce9af03..a5e33f018 100755
--- a/tests/functional/arbiter/brick_cases/test_brickcases.py
+++ b/tests/functional/arbiter/brick_cases/test_brickcases.py
@@ -177,7 +177,7 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
self.all_servers_info, replica_count=1,
arbiter_count=1)
self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
- g.log.info("Changing volume to arbiter volume is successfull %s",
+ g.log.info("Changing volume to arbiter volume is successful %s",
self.volname)
# Log Volume Info and Status after changing the volume type from
diff --git a/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py b/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
index 4fe2a8ba8..8d4130d1f 100644
--- a/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
+++ b/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
@@ -124,14 +124,14 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass):
self.volname))
g.log.info("Volume %s : All process are online", self.volname)
- # Adding the bricks to amke arbiter brick
+ # Adding the bricks to make arbiter brick
g.log.info("Adding bricks to convert to Arbiter Volume")
replica_arbiter = {'replica_count': 3, 'arbiter_count': 1}
ret = expand_volume(self.mnode, self.volname, self.servers[2:],
self.all_servers_info, add_to_hot_tier=False,
**replica_arbiter)
self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
- g.log.info("Changing volume to arbiter volume is successfull %s",
+ g.log.info("Changing volume to arbiter volume is successful %s",
self.volname)
# Log Volume Info and Status after expanding the volume
diff --git a/tests/functional/arbiter/test_mount_point_while_deleting_files.py b/tests/functional/arbiter/test_mount_point_while_deleting_files.py
index c8e4804ee..1bbdf279f 100755
--- a/tests/functional/arbiter/test_mount_point_while_deleting_files.py
+++ b/tests/functional/arbiter/test_mount_point_while_deleting_files.py
@@ -229,7 +229,7 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
ret = bring_bricks_offline(volname, bricks_to_bring_offline)
self.assertTrue(ret, ("Failed to bring down the bricks. Please "
"check the log file for more details."))
- g.log.info("Brought down the brick process for %s succesfully",
+ g.log.info("Brought down the brick process for %s successfully",
bricks_to_bring_offline)
# delete files on all mounts
diff --git a/tests/functional/authentication/test_fusereject.py b/tests/functional/authentication/test_fusereject.py
index 44410a5d9..5b403f4ed 100644
--- a/tests/functional/authentication/test_fusereject.py
+++ b/tests/functional/authentication/test_fusereject.py
@@ -129,7 +129,7 @@ class AuthRejectVol(GlusterBaseClass):
if (ret == 0) & (not out):
g.log.error("Mount executed successfully due to bug 1586036")
elif (ret == 1) & (not out):
- g.log.info("Expected:Mounting has failed sucessfully")
+ g.log.info("Expected:Mounting has failed successfully")
else:
raise ExecutionError("Unexpected Mounting of Volume %s successful"
% self.volname)
diff --git a/tests/functional/authentication/test_vol_auth.py b/tests/functional/authentication/test_vol_auth.py
index e8180d1e4..bb3da6f96 100644
--- a/tests/functional/authentication/test_vol_auth.py
+++ b/tests/functional/authentication/test_vol_auth.py
@@ -118,7 +118,7 @@ class AuthRejectVol(GlusterBaseClass):
if (ret == 0) & (not out):
g.log.error("Mount executed successfully due to bug 1586036")
elif (ret == 1) & (not out):
- g.log.info("Expected:Mounting has failed sucessfully")
+ g.log.info("Expected:Mounting has failed successfully")
else:
raise ExecutionError("Unexpected Mounting of Volume %s"
"successful" % self.volname)
diff --git a/tests/functional/bvt/test_cvt.py b/tests/functional/bvt/test_cvt.py
index f1250b764..05578b400 100644
--- a/tests/functional/bvt/test_cvt.py
+++ b/tests/functional/bvt/test_cvt.py
@@ -663,7 +663,7 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass):
# This test is disabled on nfs because of bug 1473668. A patch to apply the
-# workaround mentiond on the bug could not make this test green either.
+# workaround mentioned on the bug could not make this test green either.
@runs_on([['replicated', 'distributed-replicated', 'dispersed',
'distributed-dispersed'],
['glusterfs', 'cifs']])
diff --git a/tests/functional/dht/test_dht_hash_value.py b/tests/functional/dht/test_dht_hash_value.py
index fd83576cf..c5f08dcfa 100644
--- a/tests/functional/dht/test_dht_hash_value.py
+++ b/tests/functional/dht/test_dht_hash_value.py
@@ -187,12 +187,12 @@ class TestDHTHashValue(GlusterBaseClass):
mountpoint)
attributes = get_fattr_list(client_host, mountpoint)
self.assertFalse('trusted.gfid' in attributes,
- "Expected: Mount point should't display xattr:"
+ "Expected: Mount point shouldn't display xattr:"
"{xattr}. Actual: xattrs {xattr} is "
"presented on mount point".
format(xattr='trusted.gfid'))
self.assertFalse('trusted.glusterfs.dht' in attributes,
- "Expected: Mount point should't display xattr:"
+ "Expected: Mount point shouldn't display xattr:"
"{xattr}. Actual: xattrs {xattr} is "
"presented on mount point".
format(xattr='trusted.glusterfs.dht'))
diff --git a/tests/functional/dht/test_stop_glusterd_while_rebalance_in_progress.py b/tests/functional/dht/test_stop_glusterd_while_rebalance_in_progress.py
index bbe4eb52e..c9b60103a 100644
--- a/tests/functional/dht/test_stop_glusterd_while_rebalance_in_progress.py
+++ b/tests/functional/dht/test_stop_glusterd_while_rebalance_in_progress.py
@@ -134,7 +134,7 @@ class RebalanceValidation(GlusterBaseClass):
self.volname))
g.log.info("Volume %s: Started rebalance", self.volname)
- # Wait for atleast one file to be lookedup/scanned on the nodes
+ # Wait for at least one file to be lookedup/scanned on the nodes
status_info = get_rebalance_status(self.mnode, self.volname)
count = 0
while count < 100:
@@ -146,7 +146,7 @@ class RebalanceValidation(GlusterBaseClass):
lookups_start_count += 1
sleep(5)
if lookups_start_count == len(self.servers):
- g.log.info("Volume %s: Atleast one file is lookedup/scanned "
+ g.log.info("Volume %s: At least one file is lookedup/scanned "
"on all nodes", self.volname)
break
count += 1
diff --git a/tests/functional/disperse/test_ec_validate_volume_creation.py b/tests/functional/disperse/test_ec_validate_volume_creation.py
index 99637f20f..d9e490caf 100644
--- a/tests/functional/disperse/test_ec_validate_volume_creation.py
+++ b/tests/functional/disperse/test_ec_validate_volume_creation.py
@@ -96,7 +96,7 @@ class EcValidateVolumeCreate(GlusterBaseClass):
# Setup Volume and Mount Volume
g.log.info("Starting to Setup Volume and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
- self.assertFalse(ret, ("Volume Setup and Mount succeded"
+ self.assertFalse(ret, ("Volume Setup and Mount succeeded"
" for volume %s", self.volname))
g.log.info("Successfully verified invalid input parameters")
@@ -108,8 +108,8 @@ class EcValidateVolumeCreate(GlusterBaseClass):
# Setup Volume and Mount Volume
g.log.info("Starting to Setup Volume and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
- self.assertFalse(ret, ("Volume Setup and Mount succeded for volume %s",
- self.volname))
+ self.assertFalse(ret, ("Volume Setup and Mount succeeded for volume "
+ "%s", self.volname))
g.log.info("Successfully verified invalid input parameters")
def test_invalid_usecase_three(self):
@@ -120,8 +120,8 @@ class EcValidateVolumeCreate(GlusterBaseClass):
# Setup Volume and Mount Volume
g.log.info("Starting to Setup Volume and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
- self.assertFalse(ret, ("Volume Setup and Mount succeded for volume %s",
- self.volname))
+ self.assertFalse(ret, ("Volume Setup and Mount succeeded for volume "
+ "%s", self.volname))
g.log.info("Successfully verified invalid input parameters")
def test_invalid_usecase_four(self):
@@ -132,6 +132,6 @@ class EcValidateVolumeCreate(GlusterBaseClass):
# Setup Volume and Mount Volume
g.log.info("Starting to Setup Volume and Mount Volume")
ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
- self.assertFalse(ret, ("Volume Setup and Mount succeded for volume %s",
- self.volname))
+ self.assertFalse(ret, ("Volume Setup and Mount succeeded for volume "
+ "%s", self.volname))
g.log.info("Successfully verified invalid input parameters")
diff --git a/tests/functional/fuse_subdir/test_fusesubdir_with_quota_objects.py b/tests/functional/fuse_subdir/test_fusesubdir_with_quota_objects.py
index 10dc0ddb3..01ca474f2 100644
--- a/tests/functional/fuse_subdir/test_fusesubdir_with_quota_objects.py
+++ b/tests/functional/fuse_subdir/test_fusesubdir_with_quota_objects.py
@@ -171,7 +171,7 @@ class SubdirWithQuotaObject(GlusterBaseClass):
% (mount_object.mountpoint))
ret, _, _ = g.run(mount_object.client_system, cmd)
self.assertEqual(ret, 0, "Failed to create files on mountpoint")
- g.log.info("Files created succesfully on mountpoint")
+ g.log.info("Files created successfully on mountpoint")
# Fetch Quota List object on the volume
g.log.info("Get Quota list on the volume %s",
@@ -215,7 +215,7 @@ class SubdirWithQuotaObject(GlusterBaseClass):
% (mount_object.mountpoint, i), parents=True)
self.assertTrue(ret, "Failed to create directories"
"on mountpoint")
- g.log.info("Directories created succesfully on mountpoint")
+ g.log.info("Directories created successfully on mountpoint")
# Get Quota List on the volume
g.log.info("Get Quota list on the volume %s",
@@ -240,7 +240,7 @@ class SubdirWithQuotaObject(GlusterBaseClass):
else:
self.assertTrue(ret, "Directory creation got failed"
"on volume")
- g.log.info("Direction creation successfull on volume")
+ g.log.info("Direction creation successful on volume")
def tearDown(self):
"""
diff --git a/tests/functional/fuse_subdir/test_fusesubdir_with_removebrick.py b/tests/functional/fuse_subdir/test_fusesubdir_with_removebrick.py
index 8e060271d..30ac9eefb 100644
--- a/tests/functional/fuse_subdir/test_fusesubdir_with_removebrick.py
+++ b/tests/functional/fuse_subdir/test_fusesubdir_with_removebrick.py
@@ -220,7 +220,7 @@ class SubdirWithRemoveBrick(GlusterBaseClass):
ret, _, _ = umount_volume(client, self.mpoint,
self.mount_type)
if ret != 0:
- raise ExecutionError("Unmounting the moint point %s failed"
+ raise ExecutionError("Unmounting the mount point %s failed"
% self.mpoint)
g.log.info("Unmount Volume Successful")
cmd = ("rm -rf %s") % self.mpoint
diff --git a/tests/functional/fuse_subdir/test_leveldownsubdir_with_multiclient.py b/tests/functional/fuse_subdir/test_leveldownsubdir_with_multiclient.py
index f7115cedd..eef05896c 100644
--- a/tests/functional/fuse_subdir/test_leveldownsubdir_with_multiclient.py
+++ b/tests/functional/fuse_subdir/test_leveldownsubdir_with_multiclient.py
@@ -60,7 +60,7 @@ class SubdirLevelDownDirMapping(GlusterBaseClass):
ret, _, _ = g.run(self.mounts[0].client_system, cmd)
self.assertEqual(ret, 0, "Failed to create Nested directories"
"on mountpoint")
- g.log.info("Nested Directories created succesfully on mountpoint")
+ g.log.info("Nested Directories created successfully on mountpoint")
# unmount volume
ret = self.unmount_volume(self.mounts)
@@ -134,7 +134,7 @@ class SubdirLevelDownDirMapping(GlusterBaseClass):
ret, _, _ = umount_volume(client, self.mpoint,
self.mount_type)
if ret == 1:
- raise ExecutionError("Unmounting the moint point %s failed"
+ raise ExecutionError("Unmounting the mount point %s failed"
% self.mpoint)
g.log.info("Unmount Volume Successful")
cmd = ("rm -rf %s") % self.mpoint
diff --git a/tests/functional/fuse_subdir/test_multisubdir_with_multiclient.py b/tests/functional/fuse_subdir/test_multisubdir_with_multiclient.py
index deb59520d..ccd513e15 100644
--- a/tests/functional/fuse_subdir/test_multisubdir_with_multiclient.py
+++ b/tests/functional/fuse_subdir/test_multisubdir_with_multiclient.py
@@ -59,7 +59,7 @@ class MultipleDirMappingClient(GlusterBaseClass):
self.mounts[0].mountpoint))
ret, _, _ = g.run(self.mounts[0].client_system, cmd)
self.assertEqual(ret, 0, "Failed to create directories on mountpoint")
- g.log.info("Directories created succesfully on mountpoint")
+ g.log.info("Directories created successfully on mountpoint")
# unmount volume
ret = self.unmount_volume(self.mounts)
@@ -127,7 +127,7 @@ class MultipleDirMappingClient(GlusterBaseClass):
ret, _, _ = umount_volume(client, self.mpoint,
self.mount_type)
if ret == 1:
- raise ExecutionError("Unmounting the moint point %s failed"
+ raise ExecutionError("Unmounting the mount point %s failed"
% self.mpoint)
g.log.info("Unmount Volume Successful")
cmd = ("rm -rf %s") % self.mpoint
diff --git a/tests/functional/fuse_subdir/test_quota_limits_fusesubdir_.py b/tests/functional/fuse_subdir/test_quota_limits_fusesubdir_.py
index 4c6e0cf4a..b8b9206e4 100644
--- a/tests/functional/fuse_subdir/test_quota_limits_fusesubdir_.py
+++ b/tests/functional/fuse_subdir/test_quota_limits_fusesubdir_.py
@@ -200,7 +200,7 @@ class FuseSubdirQuotaTest(GlusterBaseClass):
"count=1;done" % (mount_object.mountpoint))
ret, _, _ = g.run(mount_object.client_system, cmd)
self.assertEqual(ret, 0, "Failed to create files on mountpoint")
- g.log.info("Files created succesfully on mountpoint")
+ g.log.info("Files created successfully on mountpoint")
# Again Get Quota List on the volume
@@ -254,7 +254,7 @@ class FuseSubdirQuotaTest(GlusterBaseClass):
ret, _, _ = g.run(self.clients[1], cmd)
self.assertEqual(ret, 0, ("Failed to create files on %s"
% self.clients[1]))
- g.log.info("Files created succesfully on %s:%s",
+ g.log.info("Files created successfully on %s:%s",
self.clients[1], self.subdir_mounts[1].mountpoint)
def tearDown(self):
diff --git a/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py b/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py
index da80f67f4..e44514aff 100644
--- a/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py
+++ b/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py
@@ -104,7 +104,7 @@ class TestBrickPortAfterModifyVolume(GlusterBaseClass):
bricks_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
ret, _, _ = volume_start(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to start the "
diff --git a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
index 6cb3ee075..cc77b3ea5 100644
--- a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
+++ b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
@@ -132,7 +132,7 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass):
# Verfiying node count in volume status after glusterd
# started on servers, Its not possible to check the brick status
- # immediately after glusterd start, thats why verifying that all
+ # immediately after glusterd start, that's why verifying that all
# glusterd started nodes available in gluster volume status or not
count = 0
while count < 50:
diff --git a/tests/functional/glusterd/test_concurrent_set.py b/tests/functional/glusterd/test_concurrent_set.py
index 7c753ea78..4b432b784 100644
--- a/tests/functional/glusterd/test_concurrent_set.py
+++ b/tests/functional/glusterd/test_concurrent_set.py
@@ -60,7 +60,7 @@ class TestConcurrentSet(GlusterBaseClass):
self.brick_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
# Create a volume
self.volname = "second-vol"
@@ -72,7 +72,7 @@ class TestConcurrentSet(GlusterBaseClass):
self.brick_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
cmd1 = ("for i in `seq 1 100`; do gluster volume set first-vol "
"read-ahead on; done")
diff --git a/tests/functional/glusterd/test_create_vol_with_used_bricks.py b/tests/functional/glusterd/test_create_vol_with_used_bricks.py
index 940d7a451..1be34f734 100644
--- a/tests/functional/glusterd/test_create_vol_with_used_bricks.py
+++ b/tests/functional/glusterd/test_create_vol_with_used_bricks.py
@@ -111,7 +111,7 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass):
# Mounting volume
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "Volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
# run IOs
g.log.info("Starting IO on all mounts...")
diff --git a/tests/functional/glusterd/test_nfs_quorum.py b/tests/functional/glusterd/test_nfs_quorum.py
index ced5b719f..62d2ce24a 100644
--- a/tests/functional/glusterd/test_nfs_quorum.py
+++ b/tests/functional/glusterd/test_nfs_quorum.py
@@ -82,7 +82,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
# Mounting a NFS volume
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "NFS volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
# unmounting NFS Volume
ret = self.unmount_volume(self.mounts)
diff --git a/tests/functional/glusterd/test_peer_detach.py b/tests/functional/glusterd/test_peer_detach.py
index 2bae76d2a..633036927 100644
--- a/tests/functional/glusterd/test_peer_detach.py
+++ b/tests/functional/glusterd/test_peer_detach.py
@@ -68,7 +68,7 @@ class PeerDetachVerification(GlusterBaseClass):
# Assigning non existing host to variable
self.non_exist_host = '256.256.256.256'
- # Assigning invalid ip to vaiable
+ # Assigning invalid ip to variable
self.invalid_ip = '10.11.a'
# Peer detach to specified server
diff --git a/tests/functional/glusterd/test_peer_probe_while_snapd_running.py b/tests/functional/glusterd/test_peer_probe_while_snapd_running.py
index c35b3eaeb..aff015638 100644
--- a/tests/functional/glusterd/test_peer_probe_while_snapd_running.py
+++ b/tests/functional/glusterd/test_peer_probe_while_snapd_running.py
@@ -97,7 +97,7 @@ class TestPeerProbeWhileSnapdRunning(GlusterBaseClass):
# Checking snapd running or not
ret = is_snapd_running(self.mnode, self.volname)
- self.assertTrue(ret, "Snapd not runnig for volume %s" % self.volname)
+ self.assertTrue(ret, "Snapd not running for volume %s" % self.volname)
g.log.info("snapd running for volume %s", self.volname)
# Probing new node
diff --git a/tests/functional/glusterd/test_probe_glusterd.py b/tests/functional/glusterd/test_probe_glusterd.py
index d14991dbd..54b99eec2 100644
--- a/tests/functional/glusterd/test_probe_glusterd.py
+++ b/tests/functional/glusterd/test_probe_glusterd.py
@@ -62,7 +62,7 @@ class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass):
# Assigning non existing ip to variable
self.non_exist_ip = '256.256.256.256'
- # Assigning invalid ip to vaiable
+ # Assigning invalid ip to variable
self.invalid_ip = '10.11.a'
# Assigning non existing host to variable
diff --git a/tests/functional/glusterd/test_probe_hostname.py b/tests/functional/glusterd/test_probe_hostname.py
index 55476edc0..6e7d87b53 100644
--- a/tests/functional/glusterd/test_probe_hostname.py
+++ b/tests/functional/glusterd/test_probe_hostname.py
@@ -103,14 +103,14 @@ class TestPeerProbe(GlusterBaseClass):
self.brick_list, force=False)
self.assertEqual(ret, 0, "Unable"
"to create volume % s" % self.volname)
- g.log.info("Volume created successfuly % s", self.volname)
+ g.log.info("Volume created successfully % s", self.volname)
# Start a volume
g.log.info("Start a volume")
ret, _, _ = volume_start(self.mnode, self.volname)
self.assertEqual(ret, 0, "Unable"
"to start volume % s" % self.volname)
- g.log.info("Volume started successfuly % s", self.volname)
+ g.log.info("Volume started successfully % s", self.volname)
# Get volume info
g.log.info("get volume info")
@@ -127,7 +127,7 @@ class TestPeerProbe(GlusterBaseClass):
ret, _, _ = volume_stop(self.mnode, self.volname)
self.assertEqual(ret, 0, "Unable"
"to stop volume % s" % self.volname)
- g.log.info("Volume stopped successfuly % s", self.volname)
+ g.log.info("Volume stopped successfully % s", self.volname)
# Create a volume
self.volname = "test-vol-fqdn"
@@ -154,14 +154,14 @@ class TestPeerProbe(GlusterBaseClass):
my_brick_list, force=False)
self.assertEqual(ret, 0, "Unable"
"to create volume % s" % self.volname)
- g.log.info("Volume created successfuly % s", self.volname)
+ g.log.info("Volume created successfully % s", self.volname)
# Start a volume
g.log.info("Start a volume")
ret, _, _ = volume_start(self.mnode, self.volname)
self.assertEqual(ret, 0, "Unable"
"to start volume % s" % self.volname)
- g.log.info("Volume started successfuly % s", self.volname)
+ g.log.info("Volume started successfully % s", self.volname)
# Get volume info
g.log.info("get volume info")
@@ -178,4 +178,4 @@ class TestPeerProbe(GlusterBaseClass):
ret, _, _ = volume_stop(self.mnode, self.volname)
self.assertEqual(ret, 0, "Unable"
"to stop volume % s" % self.volname)
- g.log.info("Volume stopped successfuly % s", self.volname)
+ g.log.info("Volume stopped successfully % s", self.volname)
diff --git a/tests/functional/glusterd/test_quorum_remove_brick.py b/tests/functional/glusterd/test_quorum_remove_brick.py
index 6d5e45b11..4429d8231 100644
--- a/tests/functional/glusterd/test_quorum_remove_brick.py
+++ b/tests/functional/glusterd/test_quorum_remove_brick.py
@@ -120,7 +120,7 @@ class TestServerQuorumNotMet(GlusterBaseClass):
% self.random_server)
g.log.info("Glusterd stopped successfully on %s", self.random_server)
- # Forming brick list for perfroming remove brick operation
+ # Forming brick list for performing remove brick operation
remove_brick_list = form_bricks_list_to_remove_brick(self.mnode,
self.volname)
self.assertIsNotNone(remove_brick_list, "Failed to get brick list for "
diff --git a/tests/functional/glusterd/test_rebalance_hang.py b/tests/functional/glusterd/test_rebalance_hang.py
index d96a4043a..a826703c1 100644
--- a/tests/functional/glusterd/test_rebalance_hang.py
+++ b/tests/functional/glusterd/test_rebalance_hang.py
@@ -114,7 +114,7 @@ class TestRebalanceHang(GlusterBaseClass):
bricks_list, force=False)
self.assertEqual(ret, 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
ret, _, _ = volume_start(self.mnode, self.volname, False)
self.assertEqual(ret, 0, ("Failed to start the "
@@ -130,7 +130,7 @@ class TestRebalanceHang(GlusterBaseClass):
mserver=self.mnode,
mclient=self.mounts[0].client_system)
self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
self.all_mounts_procs = []
# Creating files
diff --git a/tests/functional/glusterd/test_rebalance_spurious.py b/tests/functional/glusterd/test_rebalance_spurious.py
index 9b7318812..ad16a0039 100644
--- a/tests/functional/glusterd/test_rebalance_spurious.py
+++ b/tests/functional/glusterd/test_rebalance_spurious.py
@@ -114,7 +114,7 @@ class TestSpuriousRebalance(GlusterBaseClass):
bricks_list, force=False)
self.assertEqual(ret, 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
ret, _, _ = volume_start(self.mnode, self.volname, False)
self.assertEqual(ret, 0, ("Failed to start the "
@@ -130,7 +130,7 @@ class TestSpuriousRebalance(GlusterBaseClass):
mserver=self.mnode,
mclient=self.mounts[0].client_system)
self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
remove_brick_list = []
remove_brick_list.append(bricks_list[2])
ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
diff --git a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
index 217eae5dc..f84c7dba8 100644
--- a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
+++ b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
@@ -112,7 +112,7 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass):
bricks_list, force=False, **kwargs)
self.assertEqual(ret[0], 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
ret, _, _ = volume_start(self.mnode, self.volname, False)
self.assertEqual(ret, 0, ("Failed to start the "
@@ -128,7 +128,7 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass):
mserver=self.mnode,
mclient=self.mounts[0].client_system)
self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
self.all_mounts_procs = []
# Creating files
diff --git a/tests/functional/glusterd/test_volume_get.py b/tests/functional/glusterd/test_volume_get.py
index 228b15209..5746d5d59 100644
--- a/tests/functional/glusterd/test_volume_get.py
+++ b/tests/functional/glusterd/test_volume_get.py
@@ -161,7 +161,7 @@ class TestVolumeGet(GlusterBaseClass):
"existing volume %s with non existing option",
self.volname)
- # perfroming gluster volume get volname all
+ # performing gluster volume get volname all
ret = get_volume_options(self.mnode, self.volname, "all")
self.assertIsNotNone(ret, "gluster volume get %s all command "
@@ -194,7 +194,7 @@ class TestVolumeGet(GlusterBaseClass):
"performance.low-prio-threads value is not correct")
g.log.info("performance.low-prio-threads value is correct")
- # perfroming gluster volume get volname all
+ # performing gluster volume get volname all
ret = get_volume_options(self.mnode, self.volname, "all")
self.assertIsNotNone(ret, "gluster volume get %s all command "
"failed" % self.volname)
diff --git a/tests/functional/glusterd/test_volume_network_ping_timeout.py b/tests/functional/glusterd/test_volume_network_ping_timeout.py
index b897c4a7a..7d72d8ab2 100644
--- a/tests/functional/glusterd/test_volume_network_ping_timeout.py
+++ b/tests/functional/glusterd/test_volume_network_ping_timeout.py
@@ -96,7 +96,7 @@ class CheckVolumeChecksumAfterChangingNetworkPingTimeOut(GlusterBaseClass):
# Mounting volume as glusterfs
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
# Checking volume mounted or not
ret = is_mounted(self.volname, self.mounts[0].mountpoint, self.mnode,
diff --git a/tests/functional/glusterd/test_volume_reset.py b/tests/functional/glusterd/test_volume_reset.py
index f61fdaaba..9720c3e92 100644
--- a/tests/functional/glusterd/test_volume_reset.py
+++ b/tests/functional/glusterd/test_volume_reset.py
@@ -124,10 +124,10 @@ class GlusterdVolumeReset(GlusterBaseClass):
cmd = "gluster volume reset " + self.volname
ret, _, _ = g.run(self.mnode, cmd)
self.assertEqual(ret, 0, "volume reset failed for : %s" % self.volname)
- g.log.info("volume resetted succefully :%s", self.volname)
+ g.log.info("Volume reset successfully :%s", self.volname)
# After volume reset snap daemon will not be running,
- # bitd and scrub deamons will be in running state.
+ # bitd and scrub daemons will be in running state.
g.log.info("checking snapshot, scrub and bitrot daemons\
running or not after volume reset")
for mnode in self.servers:
@@ -157,7 +157,7 @@ class GlusterdVolumeReset(GlusterBaseClass):
ret, _, _ = g.run(self.mnode, cmd)
self.assertEqual(ret, 0, "volume reset fail\
for : %s" % self.volname)
- g.log.info("Volume reset sucessfully with force option :%s",
+ g.log.info("Volume reset successfully with force option :%s",
self.volname)
# After volume reset bitd, snapd, scrub daemons will not be running,
diff --git a/tests/functional/glusterd/test_volume_status.py b/tests/functional/glusterd/test_volume_status.py
index ff5d5752f..109586f35 100644
--- a/tests/functional/glusterd/test_volume_status.py
+++ b/tests/functional/glusterd/test_volume_status.py
@@ -110,17 +110,17 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
command on all clusters randomly.
"gluster volume status volname inode" command should not get
hang while IOs in progress.
- Then check that IOs completed successfullly or not on mount point.
+ Then check that IOs completed successfully or not on mount point.
Check that files in mount point listing properly or not.
'''
# Mounting a volume
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "Volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
- # After Mounting immediately writting IO's are failing some times,
- # thats why keeping sleep for 10 secs
+ # After Mounting immediately writing IO's are failing some times,
+ # that's why keeping sleep for 10 secs
sleep(10)
# run IOs
diff --git a/tests/functional/glusterd/test_volume_status_fd.py b/tests/functional/glusterd/test_volume_status_fd.py
index 2765325c5..415c96de7 100644
--- a/tests/functional/glusterd/test_volume_status_fd.py
+++ b/tests/functional/glusterd/test_volume_status_fd.py
@@ -94,7 +94,7 @@ class VolumeStatusFdWhenIOInProgress(GlusterBaseClass):
-> Mount the volume on 2 clients
-> Run I/O's on mountpoint
-> While I/O's are in progress
- -> Perfrom gluster volume status fd repeatedly
+ -> Perform gluster volume status fd repeatedly
-> List all files and dirs listed
'''
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
index a00c22a5a..05caf4a43 100644
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
@@ -75,7 +75,7 @@ class TestNfsGaneshaSanity(NfsGaneshaVolumeBaseClass):
"tar xvf linux-2.6.1.tar.gz" % (mount_obj.mountpoint))
ret, out, err = g.run(mount_obj.client_system, cmd)
if ret == 0:
- g.log.info("Succesfully untared the tarball!")
+ g.log.info("Successfully untared the tarball!")
else:
g.log.error("ERROR ! Kernel untar errored out!")
self.assertEqual(ret, 0, "Kernel untar failed!")
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
index 8a8b28cc3..ead84dbc5 100644
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_volume_exports.py
@@ -69,7 +69,7 @@ class TestNfsGaneshaVolumeExports(NfsGaneshaVolumeBaseClass):
g.log.info("Testing nfs ganesha export after volume stop/start."
"Count : %s", str(i))
- # Stoping volume
+ # Stopping volume
ret = volume_stop(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))
@@ -485,7 +485,7 @@ class TestNfsGaneshaSubDirExportsWithIO(NfsGaneshaIOBaseClass):
export_file)))
# Stop and start volume to take the modified export file to effect.
- # Stoping volume
+ # Stopping volume
ret = volume_stop(self.mnode, self.volname)
self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))
diff --git a/tests/functional/quota/list_path_values.py b/tests/functional/quota/list_path_values.py
index 5c9bf81d4..292370473 100644
--- a/tests/functional/quota/list_path_values.py
+++ b/tests/functional/quota/list_path_values.py
@@ -112,7 +112,7 @@ class QuotaListPathValues(GlusterBaseClass):
"count=1;done" % (mount_object.mountpoint))
ret, _, _ = g.run(mount_object.client_system, cmd)
self.assertEqual(ret, 0, "Failed to create files on mountpoint")
- g.log.info("Files created succesfully on mountpoint")
+ g.log.info("Files created successfully on mountpoint")
# Get Quota list without specifying the path
g.log.info("Get Quota list for the volume %s", self.volname)
diff --git a/tests/functional/quota/test_limit_usage_deep_dir.py b/tests/functional/quota/test_limit_usage_deep_dir.py
index f066441e2..974249c79 100644
--- a/tests/functional/quota/test_limit_usage_deep_dir.py
+++ b/tests/functional/quota/test_limit_usage_deep_dir.py
@@ -200,7 +200,7 @@ class LimitUsageDeepDir(GlusterBaseClass):
dir_name)
g.log.info("Quota list validate and file created successful "
"for %s", dir_name)
- g.log.info("Files created and quota validated succesfully")
+ g.log.info("Files created and quota validated successfully")
# Deleting data and validating quota
self.all_mounts_procs = []
diff --git a/tests/functional/quota/test_multi_value_limit.py b/tests/functional/quota/test_multi_value_limit.py
index 08bf6d259..3991c614b 100644
--- a/tests/functional/quota/test_multi_value_limit.py
+++ b/tests/functional/quota/test_multi_value_limit.py
@@ -154,7 +154,7 @@ class QuotaMultiValueLimits(GlusterBaseClass):
"count=1 ; "
"done" % mount_dir)
ret, _, _ = g.run(client, cmd)
- self.assertEqual(ret, 1, "Failed: Files created successfully inspite "
+ self.assertEqual(ret, 1, "Failed: Files created successfully in spite "
"of crossing hard-limit")
g.log.info("Files creation stopped on mountpoint once exceeded "
"hard limit")
@@ -202,7 +202,7 @@ class QuotaMultiValueLimits(GlusterBaseClass):
"count=1 ; "
"done" % mount_dir)
ret, _, _ = g.run(client, cmd)
- self.assertEqual(ret, 1, "Failed: Files created successfully inspite "
+ self.assertEqual(ret, 1, "Failed: Files created successfully in spite "
"of crossing hard-limit")
g.log.info("Files creation stopped on mountpoint once exceeded "
"hard limit")
@@ -250,7 +250,7 @@ class QuotaMultiValueLimits(GlusterBaseClass):
"count=1 ; "
"done" % mount_dir)
ret, _, _ = g.run(client, cmd)
- self.assertEqual(ret, 1, "Failed: Files created successfully inspite "
+ self.assertEqual(ret, 1, "Failed: Files created successfully in spite "
"of crossing hard-limit")
g.log.info("Files creation stopped on mountpoint once exceeded "
"hard limit")
diff --git a/tests/functional/quota/test_quota_add_brick.py b/tests/functional/quota/test_quota_add_brick.py
index 58e9610ed..1078ceff1 100644
--- a/tests/functional/quota/test_quota_add_brick.py
+++ b/tests/functional/quota/test_quota_add_brick.py
@@ -120,7 +120,7 @@ class QuotaAddBrick(GlusterBaseClass):
% (mount_dir))
ret, _, _ = g.run(client, cmd)
self.assertEqual(ret, 0, ("Failed to create files"))
- g.log.info("Files created succesfully")
+ g.log.info("Files created successfully")
# Quota validate
ret = quota_validate(self.mnode, self.volname,
diff --git a/tests/functional/quota/test_quota_limit_dir_breadth.py b/tests/functional/quota/test_quota_limit_dir_breadth.py
index b70c1187f..952e34134 100755
--- a/tests/functional/quota/test_quota_limit_dir_breadth.py
+++ b/tests/functional/quota/test_quota_limit_dir_breadth.py
@@ -165,7 +165,7 @@ class QuotaLimitDirBreadth(GlusterBaseClass):
ret, _, _ = g.run(mount_object.client_system, cmd)
self.assertFalse(ret, "Failed to create files in %s"
% dir_name)
- g.log.info("Files created succesfully in %s", dir_name)
+ g.log.info("Files created successfully in %s", dir_name)
# Get Quota list on Volume
g.log.info("Get Quota list for every directory on the volume %s",
diff --git a/tests/functional/quota/test_quota_unique_soft_limit.py b/tests/functional/quota/test_quota_unique_soft_limit.py
index f7f726df8..b175f12a2 100644
--- a/tests/functional/quota/test_quota_unique_soft_limit.py
+++ b/tests/functional/quota/test_quota_unique_soft_limit.py
@@ -156,7 +156,7 @@ class QuotaUniqueSoftLimit(GlusterBaseClass):
ret, _, _ = g.run(client, cmd)
self.assertEqual(ret, 0, ("Failed to create files on %s",
dir_name))
- g.log.info("Files created succesfully on %s", dir_name)
+ g.log.info("Files created successfully on %s", dir_name)
time.sleep(1)
diff --git a/tests/functional/quota/test_quota_volume_subdir_limits.py b/tests/functional/quota/test_quota_volume_subdir_limits.py
index 1291796a1..7e4245c16 100644
--- a/tests/functional/quota/test_quota_volume_subdir_limits.py
+++ b/tests/functional/quota/test_quota_volume_subdir_limits.py
@@ -153,7 +153,7 @@ class QuotaVolumeAndSubdirLimits(GlusterBaseClass):
ret, _, _ = g.run(client, cmd)
self.assertEqual(ret, 0, ("Failed to create files in /foo%s",
var1))
- g.log.info("Files created succesfully in /foo%s", var1)
+ g.log.info("Files created successfully in /foo%s", var1)
g.log.info("Files creation is successful on all directories of the "
"volume %s", self.volname)
diff --git a/tests/functional/snapshot/test_256_snapshots.py b/tests/functional/snapshot/test_256_snapshots.py
index f3e6e4b0e..9c5ceaf99 100644
--- a/tests/functional/snapshot/test_256_snapshots.py
+++ b/tests/functional/snapshot/test_256_snapshots.py
@@ -38,7 +38,7 @@ class SanpCreate256(GlusterBaseClass):
4. Varify IO
5. modify max snap limit to default to 256.
6. Create 256 snapshots
- 7. Varify 256 created sucessfully
+ 7. Varify 256 created successfully
8. Create 257th snapshot - check for failure
-- it should fail.
9. Cleanup
@@ -92,7 +92,7 @@ class SanpCreate256(GlusterBaseClass):
ret = cls.unmount_volume_and_cleanup_volume(cls.mounts)
if not ret:
raise ExecutionError("Failed to cleanup volume and mount")
- g.log.info("Cleanup successfull for the volume and mount")
+ g.log.info("Cleanup successful for the volume and mount")
GlusterBaseClass.tearDownClass.im_func(cls)
@@ -155,7 +155,7 @@ class SanpCreate256(GlusterBaseClass):
for i in range(1, 257, 1):
self.assertTrue(("snapy%s" % i in snap_list), "%s snap not "
"found " % ("snapy%s" % i))
- g.log.info("Sucessfully validated names of snap")
+ g.log.info("Successfully validated names of snap")
# Try to create 257th snapshot
cmd_str = "gluster snapshot create %s %s %s" % ("snap", self.volname,
diff --git a/tests/functional/snapshot/test_clone_snap.py b/tests/functional/snapshot/test_clone_snap.py
index a7ebd965c..6311a282a 100644
--- a/tests/functional/snapshot/test_clone_snap.py
+++ b/tests/functional/snapshot/test_clone_snap.py
@@ -198,7 +198,7 @@ class CloneSnapshot(GlusterBaseClass):
"do touch file$i; done; cd;") % mount_obj
proc = g.run(self.clients[0], cmd)
all_mounts_procs.append(proc)
- g.log.info("I/O on mountpoint is Successfull")
+ g.log.info("I/O on mountpoint is successful")
# create snapshot
g.log.info("Starting to Create snapshot of clone volume")
@@ -226,7 +226,7 @@ class CloneSnapshot(GlusterBaseClass):
ret, _, _ = umount_volume(self.clients[0], self.mpoint,
self.mount_type)
if ret == 1:
- raise ExecutionError("Unmounting the moint point %s failed"
+ raise ExecutionError("Unmounting the mount point %s failed"
% self.mpoint)
g.log.info("Mount point %s deleted successfully", self.mpoint)
g.log.info("Unmount Volume Successful")
diff --git a/tests/functional/snapshot/test_mount_snap.py b/tests/functional/snapshot/test_mount_snap.py
index e01be9dd8..67b8a43cd 100644
--- a/tests/functional/snapshot/test_mount_snap.py
+++ b/tests/functional/snapshot/test_mount_snap.py
@@ -177,7 +177,7 @@ class TestSnapMountSnapshot(GlusterBaseClass):
# validate io should fail
self.assertFalse(
validate_io_procs(all_mounts_procs, self.mounts),
- "Unexpected: IO Successfull on all clients"
+ "Unexpected: IO Successful on all clients"
)
g.log.info("Expected: IO failed on clients")
diff --git a/tests/functional/snapshot/test_snap_delete_existing_scheduler.py b/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
index 436e7f214..a321dcd39 100644
--- a/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
+++ b/tests/functional/snapshot/test_snap_delete_existing_scheduler.py
@@ -186,7 +186,7 @@ class SnapshotDeleteExistingScheduler(GlusterBaseClass):
count += 2
self.assertEqual(status.strip().split(":")[2], ' Enabled',
"Failed to check status of scheduler")
- g.log.info("Successfuly checked scheduler status")
+ g.log.info("Successfully checked scheduler status")
# write files on all mounts
g.log.info("Starting IO on all mounts...")
diff --git a/tests/functional/snapshot/test_snap_list_after_restart.py b/tests/functional/snapshot/test_snap_list_after_restart.py
index 1eb83751c..52f34c14a 100644
--- a/tests/functional/snapshot/test_snap_list_after_restart.py
+++ b/tests/functional/snapshot/test_snap_list_after_restart.py
@@ -122,7 +122,7 @@ class SnapshotGlusterddown(GlusterBaseClass):
g.log.info("Starting to list all snapshots")
for server in self.servers[0:]:
out = get_snap_list(server)
- self.assertIsNotNone(out, "Falied to list snap in node"
+ self.assertIsNotNone(out, "Failed to list snap in node"
"%s" % server)
self.assertEqual(len(out), 2, "Failed to validate snap list"
"on node %s" % server)
diff --git a/tests/functional/snapshot/test_snap_uss.py b/tests/functional/snapshot/test_snap_uss.py
index aad6d897c..60e06f359 100644
--- a/tests/functional/snapshot/test_snap_uss.py
+++ b/tests/functional/snapshot/test_snap_uss.py
@@ -197,7 +197,7 @@ class SnapshotUssSnap(GlusterBaseClass):
for mount_obj in self.mounts:
ret, _, _ = uss_list_snaps(mount_obj.client_system,
mount_obj.mountpoint)
- self.assertEqual(ret, 0, "Falied to list snapshot Informations")
+ self.assertEqual(ret, 0, "Failed to list snapshot information")
g.log.info("Successfully Listed snapshots Created")
# Disable USS running
@@ -211,8 +211,8 @@ class SnapshotUssSnap(GlusterBaseClass):
for mount_obj in self.mounts:
ret, out, _ = uss_list_snaps(mount_obj.client_system,
mount_obj.mountpoint)
- self.assertEqual(ret, 0, "Failed to list snapshot Informations")
- g.log.info("Successfully Listed snapshots Created")
+ self.assertEqual(ret, 0, "Failed to list snapshot information")
+ g.log.info("Successfully listed snapshots Created")
# Validate after disabling USS, all files should be same
g.log.info("Validate files after disabling uss")
diff --git a/tests/functional/snapshot/test_snapshot_create.py b/tests/functional/snapshot/test_snapshot_create.py
index 5ffb91681..e1bc9c272 100644
--- a/tests/functional/snapshot/test_snapshot_create.py
+++ b/tests/functional/snapshot/test_snapshot_create.py
@@ -101,7 +101,7 @@ class SnapCreate(GlusterBaseClass):
ret = cls.unmount_volume_and_cleanup_volume(cls.mounts)
if not ret:
raise ExecutionError("Failed to cleanup volume and mount")
- g.log.info("Cleanup successfull for the volume and mount")
+ g.log.info("Cleanup successful for the volume and mount")
GlusterBaseClass.tearDownClass.im_func(cls)
@@ -202,4 +202,4 @@ class SnapCreate(GlusterBaseClass):
for i in range(0, 5):
self.assertIn("snapy%s" % i, snap_list, "%s snap not "
"found " % ("snapy%s" % i))
- g.log.info("Sucessfully validated names of snap")
+ g.log.info("Successfully validated names of snap")
diff --git a/tests/functional/snapshot/test_snapshot_restore.py b/tests/functional/snapshot/test_snapshot_restore.py
index cbec67e04..966551b58 100644
--- a/tests/functional/snapshot/test_snapshot_restore.py
+++ b/tests/functional/snapshot/test_snapshot_restore.py
@@ -106,7 +106,7 @@ class SnapRestore(GlusterBaseClass):
ret = cls.unmount_volume_and_cleanup_volume(cls.mounts)
if not ret:
raise ExecutionError("Failed to cleanup volume and mount")
- g.log.info("Cleanup successfull for the volume and mount")
+ g.log.info("Cleanup successful for the volume and mount")
GlusterBaseClass.tearDownClass.im_func(cls)
@@ -157,7 +157,7 @@ class SnapRestore(GlusterBaseClass):
% self.volname))
g.log.info("Volume options for%s is set successfully", self.volname)
- # Get brick list befor taking snap_restore
+ # Get brick list before taking snap_restore
bricks_before_snap_restore = get_all_bricks(self.mnode, self.volname)
g.log.info("Brick List before snap restore "
"volume: %s", bricks_before_snap_restore)
diff --git a/tests/functional/snapshot/test_uss_brick_down.py b/tests/functional/snapshot/test_uss_brick_down.py
index 94b48c043..172b0f291 100644
--- a/tests/functional/snapshot/test_uss_brick_down.py
+++ b/tests/functional/snapshot/test_uss_brick_down.py
@@ -98,7 +98,7 @@ class SnapUssBrickDown(GlusterBaseClass):
* Mount volume
* Perform I/O on mounts
* Bring down one brick
- * Enbale USS
+ * Enable USS
* Validate USS is enabled
* Bring the brick online using gluster v start force
* Create 2 snapshots snapy1 & snapy2
@@ -164,7 +164,7 @@ class SnapUssBrickDown(GlusterBaseClass):
g.log.info("Bring the brick online using gluster v start force")
ret, _, _ = volume_start(self.mnode, self.volname, force=True)
self.assertEqual(ret, 0, "Volume start with force failed")
- g.log.info("Volume start with force successfull")
+ g.log.info("Volume start with force successful")
# Create 2 snapshot
g.log.info("Creating 2 snapshots for volume %s", self.volname)
diff --git a/tests/functional/snapshot/test_uss_snap_active_deactive.py b/tests/functional/snapshot/test_uss_snap_active_deactive.py
index c28cc1d60..93c0231ee 100644
--- a/tests/functional/snapshot/test_uss_snap_active_deactive.py
+++ b/tests/functional/snapshot/test_uss_snap_active_deactive.py
@@ -100,7 +100,7 @@ class SnapUssActiveD(GlusterBaseClass):
* Perform I/O on mounts
* Create 2 snapshots snapy1 & snapy2
* Validate snap created
- * Enbale USS
+ * Enable USS
* Validate USS is enabled
* Validate snapd is running
* Activate snapy1 & snapy2
diff --git a/tests/functional/snapshot/test_validate_snap_scheduler.py b/tests/functional/snapshot/test_validate_snap_scheduler.py
index ea54f26d2..c8513ca22 100644
--- a/tests/functional/snapshot/test_validate_snap_scheduler.py
+++ b/tests/functional/snapshot/test_validate_snap_scheduler.py
@@ -127,7 +127,7 @@ class SnapshotSchedulerBehaviour(GlusterBaseClass):
count += 2
self.assertEqual(status.strip().split(":")[2], ' Enabled',
"Failed to check status of scheduler")
- g.log.info("Successfuly checked scheduler status")
+ g.log.info("Successfully checked scheduler status")
# Disable snap scheduler
g.log.info("Starting to disable snapshot scheduler on all nodes")
@@ -148,4 +148,4 @@ class SnapshotSchedulerBehaviour(GlusterBaseClass):
count += 2
self.assertEqual(status.strip().split(":")[2], ' Disabled',
"Failed to check status of scheduler")
- g.log.info("Successfuly checked scheduler status")
+ g.log.info("Successfully checked scheduler status")
diff --git a/tests/functional/snapshot/test_validate_snapshot_max_limit.py b/tests/functional/snapshot/test_validate_snapshot_max_limit.py
index 75582e4ed..3df2a98dc 100644
--- a/tests/functional/snapshot/test_validate_snapshot_max_limit.py
+++ b/tests/functional/snapshot/test_validate_snapshot_max_limit.py
@@ -26,7 +26,7 @@ Steps :
5. modify max snap limit to default to 10.
6. modify soft-limit to 50%
6. Create 5 snapshots
- 7. Varify 5 created sucessfully
+ 7. Varify 5 created successfully
8. Create 6th snapshot - check for warning
-- it should not fail.
9. modify soft-limit to 100%
@@ -65,7 +65,7 @@ class SnapCreateMax(GlusterBaseClass):
5. modify max snap limit to default to 10.
6. modify soft-limit to 50%
6. Create 5 snapshots
- 7. Varify 5 created sucessfully
+ 7. Varify 5 created successfully
8. Create 6th snapshot - check for warning
-- it should not fail.
9. modify soft-limit to 100%
@@ -125,7 +125,7 @@ class SnapCreateMax(GlusterBaseClass):
ret = self.unmount_volume_and_cleanup_volume(self.mounts)
if not ret:
raise ExecutionError("Failed to cleanup volume and mount")
- g.log.info("Cleanup successfull for the volume and mount")
+ g.log.info("Cleanup successful for the volume and mount")
def test_validate_snaps_max_limit(self):
# pylint: disable=too-many-statements
@@ -194,7 +194,7 @@ class SnapCreateMax(GlusterBaseClass):
for i in range(1, 6):
self.assertTrue(("snapy%s" % i in snap_list), "%s snap not "
"found " % ("snapy%s" % i))
- g.log.info("Sucessfully validated names of snapshots")
+ g.log.info("Successfully validated names of snapshots")
# create 6th snapshot
cmd_str = "gluster snapshot create %s %s %s" % ("snapy6", self.volname,
@@ -246,7 +246,7 @@ class SnapCreateMax(GlusterBaseClass):
cmd_str = "gluster snapshot create %s %s %s" % ("snap", self.volname,
"no-timestamp")
ret, _, _ = g.run(self.mnode, cmd_str)
- self.assertNotEqual(ret, 0, ("Unexpected: sucessfully created 'snap' "
+ self.assertNotEqual(ret, 0, ("Unexpected: successfully created 'snap' "
"for %s" % self.volname))
g.log.info("Expected: Snapshot 'snap' not created as it is 11th snap")
diff --git a/tests/functional/snapshot/test_validate_snapshot_rebalance.py b/tests/functional/snapshot/test_validate_snapshot_rebalance.py
index 7eb674475..f9e018d64 100644
--- a/tests/functional/snapshot/test_validate_snapshot_rebalance.py
+++ b/tests/functional/snapshot/test_validate_snapshot_rebalance.py
@@ -55,7 +55,7 @@ class SnapCreateRebal(GlusterBaseClass):
during rebalance
9. After rebalance is completed, create snapshots with the same name as
in Step 7
- -- this operation should be sucessfull
+ -- this operation should be successful
10. Cleanup
"""
@@ -103,7 +103,7 @@ class SnapCreateRebal(GlusterBaseClass):
ret = self.unmount_volume_and_cleanup_volume(self.mounts)
if not ret:
raise ExecutionError("Failed to cleanup volume and mount")
- g.log.info("Cleanup successfull for the volume and mount")
+ g.log.info("Cleanup successful for the volume and mount")
def test_snapshot_while_rebalance(self):
# pylint: disable=too-many-statements, missing-docstring