summaryrefslogtreecommitdiffstats
path: root/tests/functional/glusterd
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/glusterd')
-rw-r--r--tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py2
-rw-r--r--tests/functional/glusterd/test_brick_status_when_quorum_not_met.py2
-rw-r--r--tests/functional/glusterd/test_concurrent_set.py4
-rw-r--r--tests/functional/glusterd/test_create_vol_with_used_bricks.py2
-rw-r--r--tests/functional/glusterd/test_nfs_quorum.py2
-rw-r--r--tests/functional/glusterd/test_peer_detach.py2
-rw-r--r--tests/functional/glusterd/test_peer_probe_while_snapd_running.py2
-rw-r--r--tests/functional/glusterd/test_probe_glusterd.py2
-rw-r--r--tests/functional/glusterd/test_probe_hostname.py12
-rw-r--r--tests/functional/glusterd/test_quorum_remove_brick.py2
-rw-r--r--tests/functional/glusterd/test_rebalance_hang.py4
-rw-r--r--tests/functional/glusterd/test_rebalance_spurious.py4
-rw-r--r--tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py4
-rw-r--r--tests/functional/glusterd/test_volume_get.py4
-rw-r--r--tests/functional/glusterd/test_volume_network_ping_timeout.py2
-rw-r--r--tests/functional/glusterd/test_volume_reset.py6
-rw-r--r--tests/functional/glusterd/test_volume_status.py8
-rw-r--r--tests/functional/glusterd/test_volume_status_fd.py2
18 files changed, 33 insertions, 33 deletions
diff --git a/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py b/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py
index da80f67f4..e44514aff 100644
--- a/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py
+++ b/tests/functional/glusterd/test_brick_port_after_stop_glusterd_modify_volume.py
@@ -104,7 +104,7 @@ class TestBrickPortAfterModifyVolume(GlusterBaseClass):
bricks_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
ret, _, _ = volume_start(self.mnode, self.volname)
self.assertEqual(ret, 0, ("Failed to start the "
diff --git a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
index 6cb3ee075..cc77b3ea5 100644
--- a/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
+++ b/tests/functional/glusterd/test_brick_status_when_quorum_not_met.py
@@ -132,7 +132,7 @@ class TestBrickStatusWhenQuorumNotMet(GlusterBaseClass):
# Verfiying node count in volume status after glusterd
# started on servers, Its not possible to check the brick status
- # immediately after glusterd start, thats why verifying that all
+ # immediately after glusterd start, that's why verifying that all
# glusterd started nodes available in gluster volume status or not
count = 0
while count < 50:
diff --git a/tests/functional/glusterd/test_concurrent_set.py b/tests/functional/glusterd/test_concurrent_set.py
index 7c753ea78..4b432b784 100644
--- a/tests/functional/glusterd/test_concurrent_set.py
+++ b/tests/functional/glusterd/test_concurrent_set.py
@@ -60,7 +60,7 @@ class TestConcurrentSet(GlusterBaseClass):
self.brick_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
# Create a volume
self.volname = "second-vol"
@@ -72,7 +72,7 @@ class TestConcurrentSet(GlusterBaseClass):
self.brick_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
cmd1 = ("for i in `seq 1 100`; do gluster volume set first-vol "
"read-ahead on; done")
diff --git a/tests/functional/glusterd/test_create_vol_with_used_bricks.py b/tests/functional/glusterd/test_create_vol_with_used_bricks.py
index 940d7a451..1be34f734 100644
--- a/tests/functional/glusterd/test_create_vol_with_used_bricks.py
+++ b/tests/functional/glusterd/test_create_vol_with_used_bricks.py
@@ -111,7 +111,7 @@ class TestCreateVolWithUsedBricks(GlusterBaseClass):
# Mounting volume
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "Volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
# run IOs
g.log.info("Starting IO on all mounts...")
diff --git a/tests/functional/glusterd/test_nfs_quorum.py b/tests/functional/glusterd/test_nfs_quorum.py
index ced5b719f..62d2ce24a 100644
--- a/tests/functional/glusterd/test_nfs_quorum.py
+++ b/tests/functional/glusterd/test_nfs_quorum.py
@@ -82,7 +82,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
# Mounting a NFS volume
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "NFS volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
# unmounting NFS Volume
ret = self.unmount_volume(self.mounts)
diff --git a/tests/functional/glusterd/test_peer_detach.py b/tests/functional/glusterd/test_peer_detach.py
index 2bae76d2a..633036927 100644
--- a/tests/functional/glusterd/test_peer_detach.py
+++ b/tests/functional/glusterd/test_peer_detach.py
@@ -68,7 +68,7 @@ class PeerDetachVerification(GlusterBaseClass):
# Assigning non existing host to variable
self.non_exist_host = '256.256.256.256'
- # Assigning invalid ip to vaiable
+ # Assigning invalid ip to variable
self.invalid_ip = '10.11.a'
# Peer detach to specified server
diff --git a/tests/functional/glusterd/test_peer_probe_while_snapd_running.py b/tests/functional/glusterd/test_peer_probe_while_snapd_running.py
index c35b3eaeb..aff015638 100644
--- a/tests/functional/glusterd/test_peer_probe_while_snapd_running.py
+++ b/tests/functional/glusterd/test_peer_probe_while_snapd_running.py
@@ -97,7 +97,7 @@ class TestPeerProbeWhileSnapdRunning(GlusterBaseClass):
# Checking snapd running or not
ret = is_snapd_running(self.mnode, self.volname)
- self.assertTrue(ret, "Snapd not runnig for volume %s" % self.volname)
+ self.assertTrue(ret, "Snapd not running for volume %s" % self.volname)
g.log.info("snapd running for volume %s", self.volname)
# Probing new node
diff --git a/tests/functional/glusterd/test_probe_glusterd.py b/tests/functional/glusterd/test_probe_glusterd.py
index d14991dbd..54b99eec2 100644
--- a/tests/functional/glusterd/test_probe_glusterd.py
+++ b/tests/functional/glusterd/test_probe_glusterd.py
@@ -62,7 +62,7 @@ class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass):
# Assigning non existing ip to variable
self.non_exist_ip = '256.256.256.256'
- # Assigning invalid ip to vaiable
+ # Assigning invalid ip to variable
self.invalid_ip = '10.11.a'
# Assigning non existing host to variable
diff --git a/tests/functional/glusterd/test_probe_hostname.py b/tests/functional/glusterd/test_probe_hostname.py
index 55476edc0..6e7d87b53 100644
--- a/tests/functional/glusterd/test_probe_hostname.py
+++ b/tests/functional/glusterd/test_probe_hostname.py
@@ -103,14 +103,14 @@ class TestPeerProbe(GlusterBaseClass):
self.brick_list, force=False)
self.assertEqual(ret, 0, "Unable"
"to create volume % s" % self.volname)
- g.log.info("Volume created successfuly % s", self.volname)
+ g.log.info("Volume created successfully % s", self.volname)
# Start a volume
g.log.info("Start a volume")
ret, _, _ = volume_start(self.mnode, self.volname)
self.assertEqual(ret, 0, "Unable"
"to start volume % s" % self.volname)
- g.log.info("Volume started successfuly % s", self.volname)
+ g.log.info("Volume started successfully % s", self.volname)
# Get volume info
g.log.info("get volume info")
@@ -127,7 +127,7 @@ class TestPeerProbe(GlusterBaseClass):
ret, _, _ = volume_stop(self.mnode, self.volname)
self.assertEqual(ret, 0, "Unable"
"to stop volume % s" % self.volname)
- g.log.info("Volume stopped successfuly % s", self.volname)
+ g.log.info("Volume stopped successfully % s", self.volname)
# Create a volume
self.volname = "test-vol-fqdn"
@@ -154,14 +154,14 @@ class TestPeerProbe(GlusterBaseClass):
my_brick_list, force=False)
self.assertEqual(ret, 0, "Unable"
"to create volume % s" % self.volname)
- g.log.info("Volume created successfuly % s", self.volname)
+ g.log.info("Volume created successfully % s", self.volname)
# Start a volume
g.log.info("Start a volume")
ret, _, _ = volume_start(self.mnode, self.volname)
self.assertEqual(ret, 0, "Unable"
"to start volume % s" % self.volname)
- g.log.info("Volume started successfuly % s", self.volname)
+ g.log.info("Volume started successfully % s", self.volname)
# Get volume info
g.log.info("get volume info")
@@ -178,4 +178,4 @@ class TestPeerProbe(GlusterBaseClass):
ret, _, _ = volume_stop(self.mnode, self.volname)
self.assertEqual(ret, 0, "Unable"
"to stop volume % s" % self.volname)
- g.log.info("Volume stopped successfuly % s", self.volname)
+ g.log.info("Volume stopped successfully % s", self.volname)
diff --git a/tests/functional/glusterd/test_quorum_remove_brick.py b/tests/functional/glusterd/test_quorum_remove_brick.py
index 6d5e45b11..4429d8231 100644
--- a/tests/functional/glusterd/test_quorum_remove_brick.py
+++ b/tests/functional/glusterd/test_quorum_remove_brick.py
@@ -120,7 +120,7 @@ class TestServerQuorumNotMet(GlusterBaseClass):
% self.random_server)
g.log.info("Glusterd stopped successfully on %s", self.random_server)
- # Forming brick list for perfroming remove brick operation
+ # Forming brick list for performing remove brick operation
remove_brick_list = form_bricks_list_to_remove_brick(self.mnode,
self.volname)
self.assertIsNotNone(remove_brick_list, "Failed to get brick list for "
diff --git a/tests/functional/glusterd/test_rebalance_hang.py b/tests/functional/glusterd/test_rebalance_hang.py
index d96a4043a..a826703c1 100644
--- a/tests/functional/glusterd/test_rebalance_hang.py
+++ b/tests/functional/glusterd/test_rebalance_hang.py
@@ -114,7 +114,7 @@ class TestRebalanceHang(GlusterBaseClass):
bricks_list, force=False)
self.assertEqual(ret, 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
ret, _, _ = volume_start(self.mnode, self.volname, False)
self.assertEqual(ret, 0, ("Failed to start the "
@@ -130,7 +130,7 @@ class TestRebalanceHang(GlusterBaseClass):
mserver=self.mnode,
mclient=self.mounts[0].client_system)
self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
self.all_mounts_procs = []
# Creating files
diff --git a/tests/functional/glusterd/test_rebalance_spurious.py b/tests/functional/glusterd/test_rebalance_spurious.py
index 9b7318812..ad16a0039 100644
--- a/tests/functional/glusterd/test_rebalance_spurious.py
+++ b/tests/functional/glusterd/test_rebalance_spurious.py
@@ -114,7 +114,7 @@ class TestSpuriousRebalance(GlusterBaseClass):
bricks_list, force=False)
self.assertEqual(ret, 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
ret, _, _ = volume_start(self.mnode, self.volname, False)
self.assertEqual(ret, 0, ("Failed to start the "
@@ -130,7 +130,7 @@ class TestSpuriousRebalance(GlusterBaseClass):
mserver=self.mnode,
mclient=self.mounts[0].client_system)
self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
remove_brick_list = []
remove_brick_list.append(bricks_list[2])
ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
diff --git a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
index 217eae5dc..f84c7dba8 100644
--- a/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
+++ b/tests/functional/glusterd/test_remove_brick_after_restart_glusterd.py
@@ -112,7 +112,7 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass):
bricks_list, force=False, **kwargs)
self.assertEqual(ret[0], 0, ("Unable"
"to create volume %s" % self.volname))
- g.log.info("Volume created successfuly %s", self.volname)
+ g.log.info("Volume created successfully %s", self.volname)
ret, _, _ = volume_start(self.mnode, self.volname, False)
self.assertEqual(ret, 0, ("Failed to start the "
@@ -128,7 +128,7 @@ class TestRemoveBrickAfterRestartGlusterd(GlusterBaseClass):
mserver=self.mnode,
mclient=self.mounts[0].client_system)
self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
self.all_mounts_procs = []
# Creating files
diff --git a/tests/functional/glusterd/test_volume_get.py b/tests/functional/glusterd/test_volume_get.py
index 228b15209..5746d5d59 100644
--- a/tests/functional/glusterd/test_volume_get.py
+++ b/tests/functional/glusterd/test_volume_get.py
@@ -161,7 +161,7 @@ class TestVolumeGet(GlusterBaseClass):
"existing volume %s with non existing option",
self.volname)
- # perfroming gluster volume get volname all
+ # performing gluster volume get volname all
ret = get_volume_options(self.mnode, self.volname, "all")
self.assertIsNotNone(ret, "gluster volume get %s all command "
@@ -194,7 +194,7 @@ class TestVolumeGet(GlusterBaseClass):
"performance.low-prio-threads value is not correct")
g.log.info("performance.low-prio-threads value is correct")
- # perfroming gluster volume get volname all
+ # performing gluster volume get volname all
ret = get_volume_options(self.mnode, self.volname, "all")
self.assertIsNotNone(ret, "gluster volume get %s all command "
"failed" % self.volname)
diff --git a/tests/functional/glusterd/test_volume_network_ping_timeout.py b/tests/functional/glusterd/test_volume_network_ping_timeout.py
index b897c4a7a..7d72d8ab2 100644
--- a/tests/functional/glusterd/test_volume_network_ping_timeout.py
+++ b/tests/functional/glusterd/test_volume_network_ping_timeout.py
@@ -96,7 +96,7 @@ class CheckVolumeChecksumAfterChangingNetworkPingTimeOut(GlusterBaseClass):
# Mounting volume as glusterfs
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
# Checking volume mounted or not
ret = is_mounted(self.volname, self.mounts[0].mountpoint, self.mnode,
diff --git a/tests/functional/glusterd/test_volume_reset.py b/tests/functional/glusterd/test_volume_reset.py
index f61fdaaba..9720c3e92 100644
--- a/tests/functional/glusterd/test_volume_reset.py
+++ b/tests/functional/glusterd/test_volume_reset.py
@@ -124,10 +124,10 @@ class GlusterdVolumeReset(GlusterBaseClass):
cmd = "gluster volume reset " + self.volname
ret, _, _ = g.run(self.mnode, cmd)
self.assertEqual(ret, 0, "volume reset failed for : %s" % self.volname)
- g.log.info("volume resetted succefully :%s", self.volname)
+ g.log.info("Volume reset successfully :%s", self.volname)
# After volume reset snap daemon will not be running,
- # bitd and scrub deamons will be in running state.
+ # bitd and scrub daemons will be in running state.
g.log.info("checking snapshot, scrub and bitrot daemons\
running or not after volume reset")
for mnode in self.servers:
@@ -157,7 +157,7 @@ class GlusterdVolumeReset(GlusterBaseClass):
ret, _, _ = g.run(self.mnode, cmd)
self.assertEqual(ret, 0, "volume reset fail\
for : %s" % self.volname)
- g.log.info("Volume reset sucessfully with force option :%s",
+ g.log.info("Volume reset successfully with force option :%s",
self.volname)
# After volume reset bitd, snapd, scrub daemons will not be running,
diff --git a/tests/functional/glusterd/test_volume_status.py b/tests/functional/glusterd/test_volume_status.py
index ff5d5752f..109586f35 100644
--- a/tests/functional/glusterd/test_volume_status.py
+++ b/tests/functional/glusterd/test_volume_status.py
@@ -110,17 +110,17 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
command on all clusters randomly.
"gluster volume status volname inode" command should not get
hang while IOs in progress.
- Then check that IOs completed successfullly or not on mount point.
+ Then check that IOs completed successfully or not on mount point.
Check that files in mount point listing properly or not.
'''
# Mounting a volume
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "Volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s", self.volname)
+ g.log.info("Volume mounted successfully : %s", self.volname)
- # After Mounting immediately writting IO's are failing some times,
- # thats why keeping sleep for 10 secs
+ # After Mounting immediately writing IO's are failing some times,
+ # that's why keeping sleep for 10 secs
sleep(10)
# run IOs
diff --git a/tests/functional/glusterd/test_volume_status_fd.py b/tests/functional/glusterd/test_volume_status_fd.py
index 2765325c5..415c96de7 100644
--- a/tests/functional/glusterd/test_volume_status_fd.py
+++ b/tests/functional/glusterd/test_volume_status_fd.py
@@ -94,7 +94,7 @@ class VolumeStatusFdWhenIOInProgress(GlusterBaseClass):
-> Mount the volume on 2 clients
-> Run I/O's on mountpoint
-> While I/O's are in progress
- -> Perfrom gluster volume status fd repeatedly
+ -> Perform gluster volume status fd repeatedly
-> List all files and dirs listed
'''