summaryrefslogtreecommitdiffstats
path: root/tests/functional/glusterd
diff options
context:
space:
mode:
authorNigel Babu <nigelb@redhat.com>2018-03-05 15:49:23 +0530
committerNigel Babu <nigelb@redhat.com>2018-03-27 16:05:15 +0530
commitfb5145be2db1a7c96b008af8a40e3b7b18df9673 (patch)
tree3ca087e0996bfd975e97b4f0235421a37d2e4767 /tests/functional/glusterd
parent8804c9499e9ed0d37823dc55d03eb7792907cf0b (diff)
Fix up coding style issues in tests
Change-Id: I14609030983d4485dbce5a4ffed1e0353e3d1bc7
Diffstat (limited to 'tests/functional/glusterd')
-rw-r--r--tests/functional/glusterd/test_add_brick.py (renamed from tests/functional/glusterd/test_add_brick_functionality.py)26
-rw-r--r--tests/functional/glusterd/test_concurrent_set.py23
-rw-r--r--tests/functional/glusterd/test_nfs_quorum.py (renamed from tests/functional/glusterd/test_nfs_quorum_on_all_vol_types.py)39
-rw-r--r--tests/functional/glusterd/test_peer_detach.py39
-rw-r--r--tests/functional/glusterd/test_probe_glusterd.py10
-rw-r--r--tests/functional/glusterd/test_quorum_syslog.py (renamed from tests/functional/glusterd/test_quorum_related_messages_in_syslog.py)48
-rw-r--r--tests/functional/glusterd/test_rebalance_new_node.py (renamed from tests/functional/glusterd/test_rebalance_status_from_new_node.py)10
-rw-r--r--tests/functional/glusterd/test_volume_create.py24
-rw-r--r--tests/functional/glusterd/test_volume_delete.py10
-rw-r--r--tests/functional/glusterd/test_volume_get.py45
-rw-r--r--tests/functional/glusterd/test_volume_op.py148
-rw-r--r--tests/functional/glusterd/test_volume_operations.py125
-rw-r--r--tests/functional/glusterd/test_volume_reset.py116
-rw-r--r--tests/functional/glusterd/test_volume_status.py29
14 files changed, 311 insertions, 381 deletions
diff --git a/tests/functional/glusterd/test_add_brick_functionality.py b/tests/functional/glusterd/test_add_brick.py
index bd6ce3ea0..aa3b6aedf 100644
--- a/tests/functional/glusterd/test_add_brick_functionality.py
+++ b/tests/functional/glusterd/test_add_brick.py
@@ -14,6 +14,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+import random
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
@@ -22,7 +23,6 @@ from glustolibs.gluster.volume_ops import (get_volume_list)
from glustolibs.gluster.brick_ops import add_brick
from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.rebalance_ops import rebalance_start
-import random
@runs_on([['distributed-replicated'], ['glusterfs']])
@@ -50,16 +50,16 @@ class TestVolumeCreate(GlusterBaseClass):
ret = cleanup_volume(self.mnode, volume)
if not ret:
raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
GlusterBaseClass.tearDown.im_func(self)
def test_add_brick_functionality(self):
ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
- self.assertTrue(ret, ("Failed to create and start volume %s"
- % self.volname))
- g.log.info("Volume created and started succssfully")
+ self.assertTrue(ret, "Failed to create and start volume %s"
+ % self.volname)
+ g.log.info("Volume created and started successfully")
# form bricks list to test add brick functionality
@@ -75,7 +75,7 @@ class TestVolumeCreate(GlusterBaseClass):
# of bricks
bricks_list_to_add = [bricks_list[0]]
- ret, out, err = add_brick(self.mnode, self.volname, bricks_list_to_add)
+ ret, _, _ = add_brick(self.mnode, self.volname, bricks_list_to_add)
self.assertNotEqual(ret, 0, "Expected: It should fail to add a single"
"brick to a replicated volume. Actual: "
"Successfully added single brick to volume")
@@ -94,8 +94,8 @@ class TestVolumeCreate(GlusterBaseClass):
non_existing_brick = complete_brick + "/non_existing_brick"
bricks_list_to_add[index_of_non_existing_brick] = non_existing_brick
- ret, out, err = add_brick(self.mnode, self.volname,
- bricks_list_to_add, False, **kwargs)
+ ret, _, _ = add_brick(self.mnode, self.volname,
+ bricks_list_to_add, False, **kwargs)
self.assertNotEqual(ret, 0, "Expected: It should fail to add non"
"existing brick to a volume. Actual: "
"Successfully added non existing brick to volume")
@@ -110,8 +110,8 @@ class TestVolumeCreate(GlusterBaseClass):
complete_brick = bricks_list_to_add[index_of_node].split(":")
complete_brick[0] = "abc.def.ghi.jkl"
bricks_list_to_add[index_of_node] = ":".join(complete_brick)
- ret, out, err = add_brick(self.mnode, self.volname,
- bricks_list_to_add, False, **kwargs)
+ ret, _, _ = add_brick(self.mnode, self.volname,
+ bricks_list_to_add, False, **kwargs)
self.assertNotEqual(ret, 0, "Expected: It should fail to add brick "
"from a node which is not part of a cluster."
"Actual:Successfully added bricks from node which"
@@ -124,11 +124,11 @@ class TestVolumeCreate(GlusterBaseClass):
bricks_list_to_add = bricks_list[(2 * replica_count_of_volume) + 1:
(3 * replica_count_of_volume) + 1]
- ret, out, err = add_brick(self.mnode, self.volname,
- bricks_list_to_add, False, **kwargs)
+ ret, _, _ = add_brick(self.mnode, self.volname,
+ bricks_list_to_add, False, **kwargs)
self.assertEqual(ret, 0, "Failed to add the bricks to the volume")
g.log.info("Successfully added bricks to volume")
# Perform rebalance start operation
- ret, out, err = rebalance_start(self.mnode, self.volname)
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
self.assertEqual(ret, 0, "Rebalance start is success")
diff --git a/tests/functional/glusterd/test_concurrent_set.py b/tests/functional/glusterd/test_concurrent_set.py
index 91cfe659c..7c753ea78 100644
--- a/tests/functional/glusterd/test_concurrent_set.py
+++ b/tests/functional/glusterd/test_concurrent_set.py
@@ -29,17 +29,12 @@ class TestConcurrentSet(GlusterBaseClass):
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
- g.log.info("Starting %s " % cls.__name__)
- '''
- checking for peer status from every node, if peers are in not
- connected state, performing peer probe.
- '''
+ g.log.info("Starting %s ", cls.__name__)
ret = cls.validate_peers_are_connected()
if not ret:
raise ExecutionError("Nodes are not in peer probe state")
def tearDown(self):
-
'''
clean up all volumes and detaches peers from cluster
'''
@@ -47,7 +42,7 @@ class TestConcurrentSet(GlusterBaseClass):
for volume in vol_list:
ret = cleanup_volume(self.mnode, volume)
self.assertTrue(ret, "Failed to Cleanup the Volume %s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
GlusterBaseClass.tearDown.im_func(self)
@@ -64,8 +59,8 @@ class TestConcurrentSet(GlusterBaseClass):
ret = volume_create(self.mnode, self.volname,
self.brick_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
- "to create volume % s" % self.volname))
- g.log.info("Volume created successfuly % s" % self.volname)
+ "to create volume %s" % self.volname))
+ g.log.info("Volume created successfuly %s", self.volname)
# Create a volume
self.volname = "second-vol"
@@ -76,8 +71,8 @@ class TestConcurrentSet(GlusterBaseClass):
ret = volume_create(self.mnode, self.volname,
self.brick_list, force=False)
self.assertEqual(ret[0], 0, ("Unable"
- "to create volume % s" % self.volname))
- g.log.info("Volume created successfuly % s" % self.volname)
+ "to create volume %s" % self.volname))
+ g.log.info("Volume created successfuly %s", self.volname)
cmd1 = ("for i in `seq 1 100`; do gluster volume set first-vol "
"read-ahead on; done")
@@ -87,8 +82,8 @@ class TestConcurrentSet(GlusterBaseClass):
proc1 = g.run_async(random.choice(self.servers), cmd1)
proc2 = g.run_async(random.choice(self.servers), cmd2)
- ret1, out1, err1 = proc1.async_communicate()
- ret2, out2, err2 = proc2.async_communicate()
+ ret1, _, _ = proc1.async_communicate()
+ ret2, _, _ = proc2.async_communicate()
self.assertEqual(ret1, 0, "Concurrent volume set on different volumes "
"simultaneously failed")
@@ -98,7 +93,7 @@ class TestConcurrentSet(GlusterBaseClass):
g.log.info("Setting options on different volumes @ same time "
"successfully completed")
ret = is_core_file_created(self.servers, test_timestamp)
- if (ret):
+ if ret:
g.log.info("No core file found, glusterd service "
"running successfully")
else:
diff --git a/tests/functional/glusterd/test_nfs_quorum_on_all_vol_types.py b/tests/functional/glusterd/test_nfs_quorum.py
index 64526b0ee..ced5b719f 100644
--- a/tests/functional/glusterd/test_nfs_quorum_on_all_vol_types.py
+++ b/tests/functional/glusterd/test_nfs_quorum.py
@@ -14,11 +14,6 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-""" Description:
- Test Cases for performing NFS disable, enable and
- performing NFS mount and unmoount on all volumes,
- performing different types quorum settings
-"""
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
@@ -28,10 +23,16 @@ from glustolibs.gluster.volume_ops import set_volume_options
@runs_on([['distributed', 'replicated', 'distributed-replicated',
'dispersed', 'distributed-dispersed'], ['nfs']])
class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
+ """
+ Test Cases for performing NFS disable, enable and
+ performing NFS mount and unmoount on all volumes,
+ performing different types quorum settings
+ """
+
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
- g.log.info("Starting %s " % cls.__name__)
+ g.log.info("Starting %s ", cls.__name__)
# checking for peer status from every node
ret = cls.validate_peers_are_connected()
@@ -50,7 +51,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
ret = self.setup_volume()
if not ret:
raise ExecutionError("Volume creation failed: %s" % self.volname)
- g.log.info("Volme created successfully : %s" % self.volname)
+ g.log.info("Volme created successfully : %s", self.volname)
def tearDown(self):
"""
@@ -60,7 +61,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
ret = self.cleanup_volume()
if not ret:
raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
- g.log.info("Volume deleted successfully : %s" % self.volname)
+ g.log.info("Volume deleted successfully : %s", self.volname)
# Calling GlusterBaseClass tearDown
GlusterBaseClass.tearDown.im_func(self)
@@ -81,7 +82,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
# Mounting a NFS volume
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "NFS volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s" % self.volname)
+ g.log.info("Volume mounted sucessfully : %s", self.volname)
# unmounting NFS Volume
ret = self.unmount_volume(self.mounts)
@@ -94,14 +95,14 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set %s nfs.disable "
"enable failed" % self.volname)
g.log.info("gluster volume set %s nfs.disable "
- "enabled successfully" % self.volname)
+ "enabled successfully", self.volname)
# Mounting a NFS volume
ret = self.mount_volume(self.mounts)
self.assertFalse(ret, "Volume mount should fail for %s, but volume "
"mounted successfully after nfs.disable on"
% self.volname)
- g.log.info("Volume mount failed : %s" % self.volname)
+ g.log.info("Volume mount failed : %s", self.volname)
# performing nfs.disable disable
self.nfs_options['nfs.disable'] = 'disable'
@@ -109,7 +110,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set %s nfs.disable "
"disable failed" % self.volname)
g.log.info("gluster volume set %s nfs.disable "
- "disabled successfully" % self.volname)
+ "disabled successfully", self.volname)
# Enabling server quorum
self.quorum_options = {'cluster.server-quorum-type': 'server'}
@@ -117,7 +118,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set %s cluster.server-quorum-type"
" server Failed" % self.volname)
g.log.info("gluster volume set %s cluster.server-quorum-type server "
- "enabled successfully" % self.volname)
+ "enabled successfully", self.volname)
# Setting Quorum ratio in percentage
self.quorum_perecent = {'cluster.server-quorum-ratio': '51%'}
@@ -125,7 +126,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set all cluster.server-quorum-rat"
"io percentage Failed :%s" % self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio 51 "
- "percentage enabled successfully on :%s" % self.servers)
+ "percentage enabled successfully on :%s", self.servers)
# Setting quorum ration in numbers
self.quorum_perecent['cluster.server-quorum-ratio'] = "50"
@@ -133,7 +134,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set all cluster.server-quorum-rat"
"io 50 Failed on :%s" % self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio 50 enab"
- "led successfully 0n :%s" % self.servers)
+ "led successfully 0n :%s", self.servers)
# Setting quorum ration in negative numbers
self.quorum_perecent['cluster.server-quorum-ratio'] = "-50"
@@ -142,7 +143,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
"tio should fail for negative numbers on :%s" %
self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio Failed "
- "for negative number on :%s" % self.servers)
+ "for negative number on :%s", self.servers)
# Setting quorum ration in negative percentage
self.quorum_perecent['cluster.server-quorum-ratio'] = "-51%"
@@ -151,7 +152,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
"ratio should fail for negative percentage on"
":%s" % self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio Failed "
- "for negtive percentage on :%s" % self.servers)
+ "for negtive percentage on :%s", self.servers)
# Setting quorum ration in fraction numbers
self.quorum_perecent['cluster.server-quorum-ratio'] = "1/2"
@@ -160,7 +161,7 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
"ratio should fail for fraction numbers :%s"
% self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio "
- "Failed for fraction number :%s" % self.servers)
+ "Failed for fraction number :%s", self.servers)
# Setting quorum ration in negative fraction numbers
self.quorum_perecent['cluster.server-quorum-ratio'] = "-1/2"
@@ -169,4 +170,4 @@ class TestNfsMountAndServerQuorumSettings(GlusterBaseClass):
"ratio should fail for negative fraction numbers"
" :%s" % self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio Failed "
- "for negative fraction number :%s" % self.servers)
+ "for negative fraction number :%s", self.servers)
diff --git a/tests/functional/glusterd/test_peer_detach.py b/tests/functional/glusterd/test_peer_detach.py
index 0ed0e678c..2bae76d2a 100644
--- a/tests/functional/glusterd/test_peer_detach.py
+++ b/tests/functional/glusterd/test_peer_detach.py
@@ -14,8 +14,8 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-""" Description:
- Test Cases in this module related to Glusterd peer detach.
+"""
+Test Cases in this module related to Glusterd peer detach.
"""
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
@@ -28,6 +28,9 @@ from glustolibs.gluster.lib_utils import is_core_file_created
@runs_on([['distributed', 'replicated', 'distributed-replicated',
'dispersed', 'distributed-dispersed'], ['glusterfs']])
class PeerDetachVerification(GlusterBaseClass):
+ """
+ Test that peer detach works as expected
+ """
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
@@ -38,14 +41,14 @@ class PeerDetachVerification(GlusterBaseClass):
raise ExecutionError("Peer probe failed ")
else:
g.log.info("All server peers are already in connected state "
- "%s:" % cls.servers)
+ "%s:", cls.servers)
@classmethod
def tearDownClass(cls):
# stopping the volume and Cleaning up the volume
ret = cls.cleanup_volume()
if ret:
- g.log.info("Volume deleted successfully : %s" % cls.volname)
+ g.log.info("Volume deleted successfully : %s", cls.volname)
else:
raise ExecutionError("Failed Cleanup the Volume %s" % cls.volname)
@@ -69,33 +72,33 @@ class PeerDetachVerification(GlusterBaseClass):
self.invalid_ip = '10.11.a'
# Peer detach to specified server
- g.log.info("Start detach specified server :%s" % self.servers[1])
- ret, out, _ = peer_detach(self.mnode, self.servers[1])
+ g.log.info("Start detach specified server :%s", self.servers[1])
+ ret, _, _ = peer_detach(self.mnode, self.servers[1])
self.assertEqual(ret, 0, "Failed to detach server :%s"
% self.servers[1])
# Detached server detaching again, Expected to fail detach
g.log.info("Start detached server detaching "
- "again : %s" % self.servers[1])
- ret, out, _ = peer_detach(self.mnode, self.servers[1])
+ "again : %s", self.servers[1])
+ ret, _, _ = peer_detach(self.mnode, self.servers[1])
self.assertNotEqual(ret, 0, "Detach server should "
"fail :%s" % self.servers[1])
# Probing detached server
- g.log.info("Start probing detached server : %s" % self.servers[1])
+ g.log.info("Start probing detached server : %s", self.servers[1])
ret = peer_probe_servers(self.mnode, self.servers[1])
self.assertTrue(ret, "Peer probe failed from %s to other "
"server : %s" % (self.mnode, self.servers[1]))
# Detach invalid host
- g.log.info("Start detaching invalid host :%s " % self.invalid_ip)
- ret, out, _ = peer_detach(self.mnode, self.invalid_ip)
+ g.log.info("Start detaching invalid host :%s ", self.invalid_ip)
+ ret, _, _ = peer_detach(self.mnode, self.invalid_ip)
self.assertNotEqual(ret, 0, "Detach invalid host should "
"fail :%s" % self.invalid_ip)
# Detach non exist host
- g.log.info("Start detaching non exist host : %s" % self.non_exist_host)
- ret, out, _ = peer_detach(self.mnode, self.non_exist_host)
+ g.log.info("Start detaching non exist host : %s", self.non_exist_host)
+ ret, _, _ = peer_detach(self.mnode, self.non_exist_host)
self.assertNotEqual(ret, 0, "Detach non existing host "
"should fail :%s" % self.non_exist_host)
@@ -107,14 +110,14 @@ class PeerDetachVerification(GlusterBaseClass):
"successfully")
# Creating Volume
- g.log.info("Started creating volume: %s" % self.volname)
+ g.log.info("Started creating volume: %s", self.volname)
ret = self.setup_volume()
self.assertTrue(ret, "Volume creation failed: %s" % self.volname)
# Peer detach one node which contains the bricks of the volume created
g.log.info("Start detaching server %s which is hosting "
- "bricks of a volume" % self.servers[1])
- ret, out, err = peer_detach(self.mnode, self.servers[1])
+ "bricks of a volume", self.servers[1])
+ ret, _, err = peer_detach(self.mnode, self.servers[1])
self.assertNotEqual(ret, 0, "detach server should fail: %s"
% self.servers[1])
msg = ('peer detach: failed: Brick(s) with the peer ' +
@@ -124,8 +127,8 @@ class PeerDetachVerification(GlusterBaseClass):
# Peer detach force a node which is hosting bricks of a volume
g.log.info("start detaching server %s with force option "
- "which is hosting bricks of a volume" % self.servers[1])
- ret, out, err = peer_detach(self.mnode, self.servers[1], force=True)
+ "which is hosting bricks of a volume", self.servers[1])
+ ret, _, err = peer_detach(self.mnode, self.servers[1], force=True)
self.assertNotEqual(ret, 0, "detach server should fail with force "
"option : %s" % self.servers[1])
msg = ('peer detach: failed: Brick(s) with the peer ' +
diff --git a/tests/functional/glusterd/test_probe_glusterd.py b/tests/functional/glusterd/test_probe_glusterd.py
index 0b035c933..d14991dbd 100644
--- a/tests/functional/glusterd/test_probe_glusterd.py
+++ b/tests/functional/glusterd/test_probe_glusterd.py
@@ -29,7 +29,7 @@ class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass):
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
- g.log.info("Starting %s " % cls.__name__)
+ g.log.info("Starting %s ", cls.__name__)
def setUp(self):
"""
@@ -57,7 +57,7 @@ class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass):
'''
ret, test_timestamp, _ = g.run_local('date +%s')
test_timestamp = test_timestamp.strip()
- g.log.info("Running Test : %s" % self.id())
+ g.log.info("Running Test : %s", self.id())
# Assigning non existing ip to variable
self.non_exist_ip = '256.256.256.256'
@@ -70,21 +70,21 @@ class PeerProbeInvalidIpNonExistingHost(GlusterBaseClass):
# Peer probe checks for non existing host
g.log.info("peer probe checking for non existing host")
- ret, out, msg = peer_probe(self.mnode, self.non_exist_host)
+ ret, _, _ = peer_probe(self.mnode, self.non_exist_host)
self.assertNotEqual(ret, 0, "peer probe should fail for "
"non existhost: %s" % self.non_exist_host)
g.log.info("peer probe failed for non existing host")
# Peer probe checks for invalid ip
g.log.info("peer probe checking for invalid ip")
- ret, out, msg = peer_probe(self.mnode, self.invalid_ip)
+ ret, _, _ = peer_probe(self.mnode, self.invalid_ip)
self.assertNotEqual(ret, 0, "peer probe shouldfail for "
"invalid ip: %s" % self.invalid_ip)
g.log.info("peer probe failed for invalid_ip")
# peer probe checks for non existing ip
g.log.info("peer probe checking for non existing ip")
- ret, out, msg = peer_probe(self.mnode, self.non_exist_ip)
+ ret, _, _ = peer_probe(self.mnode, self.non_exist_ip)
self.assertNotEqual(ret, 0, "peer probe should fail for non exist "
"ip :%s" % self.non_exist_ip)
g.log.info("peer probe failed for non existing ip")
diff --git a/tests/functional/glusterd/test_quorum_related_messages_in_syslog.py b/tests/functional/glusterd/test_quorum_syslog.py
index 2b21a2a29..cefa328b8 100644
--- a/tests/functional/glusterd/test_quorum_related_messages_in_syslog.py
+++ b/tests/functional/glusterd/test_quorum_syslog.py
@@ -14,10 +14,8 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-""" Description:
- Test Cases in this module related to quorum
- related messages in syslog, when there are more volumes.
-"""
+from time import sleep
+import re
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
@@ -26,13 +24,15 @@ from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume)
from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.gluster.gluster_init import (stop_glusterd, start_glusterd,
is_glusterd_running)
-from time import sleep
-import re
@runs_on([['distributed', 'replicated', 'distributed-replicated',
'dispersed', 'distributed-dispersed'], ['glusterfs']])
class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
+ """
+ Test Cases in this module related to quorum
+ related messages in syslog, when there are more volumes.
+ """
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
@@ -77,8 +77,8 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
# Checking glusterd service running or not
ret = is_glusterd_running(self.servers[1])
- if (ret == 0):
- g.log.info("glusterd running on :%s" % self.servers[1])
+ if ret == 0:
+ g.log.info("glusterd running on :%s", self.servers[1])
else:
raise ExecutionError("glusterd not running on :%s"
% self.servers[1])
@@ -90,14 +90,14 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
# deleting volumes
peers_not_connected = True
count = 0
- while(count < 10):
+ while count < 10:
ret = self.validate_peers_are_connected()
if ret:
peers_not_connected = False
break
count += 1
sleep(5)
- if (peers_not_connected):
+ if peers_not_connected:
raise ExecutionError("Servers are not in peer probed state")
# stopping the volume and Cleaning up the volume
@@ -106,7 +106,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to Cleanup the "
"Volume %s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
# Calling GlusterBaseClass tearDown
GlusterBaseClass.tearDown.im_func(self)
@@ -125,6 +125,8 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
for both the volumes in /var/log/messages and
/var/log/glusterfs/glusterd.log
"""
+ # pylint: disable=too-many-locals
+ # pylint: disable=too-many-statements
self.log_messages = "/var/log/messages"
self.log_glusterd = "/var/log/glusterfs/glusterd.log"
@@ -136,7 +138,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set %s cluster.server"
"-quorum-type server Failed" % self.volname)
g.log.info("gluster volume set %s cluster.server-quorum"
- "-type server enabled successfully" % self.volname)
+ "-type server enabled successfully", self.volname)
# Setting Quorum ratio in percentage
self.quorum_perecent = {'cluster.server-quorum-ratio': '91%'}
@@ -144,7 +146,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set all cluster.server-quorum-"
"ratio percentage Failed :%s" % self.servers)
g.log.info("gluster volume set all cluster.server-quorum-ratio 91 "
- "percentage enabled successfully :%s" % self.servers)
+ "percentage enabled successfully :%s", self.servers)
# counting quorum regain messages-id '106002' in /var/log/messages
# file, before glusterd services stop
@@ -169,8 +171,8 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.glusterd_service = False
self.assertTrue(ret, "Failed stop glusterd services : %s"
% self.servers[1])
- g.log.info("Stopped glusterd services successfully on: %s"
- % self.servers[1])
+ g.log.info("Stopped glusterd services successfully on: %s",
+ self.servers[1])
# checking glusterd service stopped or not
ret = is_glusterd_running(self.servers[1])
@@ -181,7 +183,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
count = 0
msg_count = False
expected_msg_id_count = int(before_glusterd_stop_msgid_count) + 2
- while (count <= 10):
+ while count <= 10:
ret, after_glusterd_stop_msgid_count, _ = g.run(self.mnode,
cmd_messages)
if(re.search(r'\b' + str(expected_msg_id_count) + r'\b',
@@ -190,8 +192,8 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
break
sleep(5)
count += 1
- self.assertTrue(msg_count, "Failed to grep quorum regain message-id "
- "106002 count in :%s" % self.log_messages)
+ self.assertTrue(msg_count, "Failed to grep quorum regain message-id "
+ "106002 count in :%s" % self.log_messages)
# counting quorum regain messages-id '106002' in
# /var/log/glusterfs/glusterd.log file after glusterd services stop
@@ -209,7 +211,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
"in : %s" % self.log_messages)
g.log.info("regain messages recorded for two volumes "
"successfully after glusterd services stop "
- ":%s" % self.log_messages)
+ ":%s", self.log_messages)
# Finding quorum regain message-id count difference between before
# and after glusterd services stop in /var/log/glusterfs/glusterd.log
@@ -218,7 +220,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.assertEqual(count_diff, 2, "Failed to record regain messages in "
": %s" % self.log_glusterd)
g.log.info("regain messages recorded for two volumes successfully "
- "after glusterd services stop :%s" % self.log_glusterd)
+ "after glusterd services stop :%s", self.log_glusterd)
# counting quorum messages-id '106003' in a /var/log/messages file
# before glusterd services start
@@ -253,7 +255,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
count = 0
expected_msg_id_count = int(before_glusterd_start_msgid_count) + 2
msg_count = False
- while(count <= 10):
+ while count <= 10:
ret, after_glusterd_start_msgid_count, _ = g.run(self.mnode,
cmd_messages)
if (re.search(r'\b' + str(expected_msg_id_count) + r'\b',
@@ -280,7 +282,7 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.assertEqual(count_diff, 2, "Failed to record regain "
"messages in :%s" % self.log_messages)
g.log.info("regain messages recorded for two volumes successfully "
- "after glusterd services start in :%s" % self.log_messages)
+ "after glusterd services start in :%s", self.log_messages)
# Finding quorum regain message-id count difference between before
# and after glusterd services start in /var/log/glusterfs/glusterd.log
@@ -289,4 +291,4 @@ class TestQuorumRelatedMessagesInSyslog(GlusterBaseClass):
self.assertEqual(count_diff, 2, "Failed to record regain messages "
"in : %s" % self.log_glusterd)
g.log.info("regain messages recorded for two volumes successfully "
- "after glusterd services start :%s" % self.log_glusterd)
+ "after glusterd services start :%s", self.log_glusterd)
diff --git a/tests/functional/glusterd/test_rebalance_status_from_new_node.py b/tests/functional/glusterd/test_rebalance_new_node.py
index dd71bcc3d..a9cd0fea6 100644
--- a/tests/functional/glusterd/test_rebalance_status_from_new_node.py
+++ b/tests/functional/glusterd/test_rebalance_new_node.py
@@ -57,7 +57,7 @@ class TestRebalanceStatus(GlusterBaseClass):
"file_dir_ops.py")
ret = upload_scripts(self.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients %s",
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
self.clients)
g.log.info("Successfully uploaded IO scripts to clients %s",
self.clients)
@@ -76,7 +76,7 @@ class TestRebalanceStatus(GlusterBaseClass):
for volume in vol_list:
ret = cleanup_volume(self.mnode, volume)
if ret is True:
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
else:
raise ExecutionError("Failed Cleanup the"
" Volume %s" % volume)
@@ -122,8 +122,8 @@ class TestRebalanceStatus(GlusterBaseClass):
self.mounts[0].client_system, self.mount_type)
self.assertTrue(ret, "Volume not mounted on mount point: %s"
% self.mounts[0].mountpoint)
- g.log.info("Volume %s mounted on %s" % (self.volname,
- self.mounts[0].mountpoint))
+ g.log.info("Volume %s mounted on %s", self.volname,
+ self.mounts[0].mountpoint)
# run IOs
g.log.info("Starting IO on all mounts...")
@@ -148,7 +148,7 @@ class TestRebalanceStatus(GlusterBaseClass):
brick_to_add = form_bricks_list(self.mnode, self.volname, 1,
self.servers[0:3],
servers_info_from_three_nodes)
- ret, out, err = add_brick(self.mnode, self.volname, brick_to_add)
+ ret, _, _ = add_brick(self.mnode, self.volname, brick_to_add)
self.assertEqual(ret, 0, "Failed to add a brick to %s" % self.volname)
ret, _, _ = rebalance_start(self.mnode, self.volname)
diff --git a/tests/functional/glusterd/test_volume_create.py b/tests/functional/glusterd/test_volume_create.py
index ad4997925..3a181597b 100644
--- a/tests/functional/glusterd/test_volume_create.py
+++ b/tests/functional/glusterd/test_volume_create.py
@@ -14,6 +14,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+import random
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.exceptions import ExecutionError
@@ -28,11 +29,13 @@ from glustolibs.gluster.peer_ops import (peer_detach_servers, peer_probe,
peer_detach)
from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.gluster_init import start_glusterd, stop_glusterd
-import random
@runs_on([['distributed'], ['glusterfs']])
class TestVolumeCreate(GlusterBaseClass):
+ '''
+ Test glusterd behavior with the gluster volume create command
+ '''
@classmethod
def setUpClass(cls):
@@ -68,12 +71,19 @@ class TestVolumeCreate(GlusterBaseClass):
ret = cleanup_volume(self.mnode, volume)
if not ret:
raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
GlusterBaseClass.tearDown.im_func(self)
def test_volume_create(self):
-
+ '''
+ In this test case, volume create operations such as creating volume
+ with non existing brick path, already used brick, already existing
+ volume name, bring the bricks to online with volume start force,
+ creating a volume with bricks in another cluster, creating a volume
+ when one of the brick node is down are validated.
+ '''
+ # pylint: disable=too-many-statements
# create and start a volume
self.volume['name'] = "first_volume"
self.volname = "first_volume"
@@ -157,15 +167,15 @@ class TestVolumeCreate(GlusterBaseClass):
ret, _, _ = peer_probe(self.servers[0], self.servers[1])
self.assertEqual(ret, 0, "Peer probe from %s to %s is failed"
% (self.servers[0], self.servers[1]))
- g.log.info("Peer probe is success from %s to %s"
- % (self.servers[0], self.servers[1]))
+ g.log.info("Peer probe is success from %s to %s",
+ self.servers[0], self.servers[1])
# form cluster 2
ret, _, _ = peer_probe(self.servers[2], self.servers[3])
self.assertEqual(ret, 0, "Peer probe from %s to %s is failed"
% (self.servers[2], self.servers[3]))
- g.log.info("Peer probe is success from %s to %s"
- % (self.servers[2], self.servers[3]))
+ g.log.info("Peer probe is success from %s to %s",
+ self.servers[2], self.servers[3])
# Creating a volume with bricks which are part of another
# cluster should fail
diff --git a/tests/functional/glusterd/test_volume_delete.py b/tests/functional/glusterd/test_volume_delete.py
index 4aa6dca24..e207bb4b0 100644
--- a/tests/functional/glusterd/test_volume_delete.py
+++ b/tests/functional/glusterd/test_volume_delete.py
@@ -14,6 +14,8 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+import re
+import random
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
@@ -23,8 +25,6 @@ from glustolibs.gluster.volume_ops import (volume_stop)
from glustolibs.gluster.brick_libs import get_all_bricks
from glustolibs.gluster.gluster_init import stop_glusterd, start_glusterd
from glustolibs.gluster.peer_ops import peer_probe_servers, is_peer_connected
-import re
-import random
@runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed',
@@ -66,7 +66,7 @@ class TestVolumeDelete(GlusterBaseClass):
ret = cleanup_volume(self.mnode, volume)
if not ret:
raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
GlusterBaseClass.tearDown.im_func(self)
@@ -104,8 +104,8 @@ class TestVolumeDelete(GlusterBaseClass):
self.assertEqual(ret, 0, "Volume stop failed")
# try to delete the volume, it should fail
- ret, out, err = g.run(self.mnode, "gluster volume delete %s "
- "--mode=script" % self.volname)
+ ret, _, err = g.run(self.mnode, "gluster volume delete %s "
+ "--mode=script" % self.volname)
self.assertNotEqual(ret, 0, "Volume delete succeeded when one of the"
" brick node is down")
if re.search(r'Some of the peers are down', err):
diff --git a/tests/functional/glusterd/test_volume_get.py b/tests/functional/glusterd/test_volume_get.py
index 75a155774..228b15209 100644
--- a/tests/functional/glusterd/test_volume_get.py
+++ b/tests/functional/glusterd/test_volume_get.py
@@ -14,8 +14,8 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-""" Description:
- Test Cases in this module related to Gluster volume get functionality
+"""
+Test Cases in this module related to Gluster volume get functionality
"""
from glusto.core import Glusto as g
@@ -85,6 +85,7 @@ class TestVolumeGet(GlusterBaseClass):
gluster volume get <vol-name> all
12. Check for any cores in "cd /"
"""
+ # pylint: disable=too-many-statements
# time stamp of current test case
ret, test_timestamp, _ = g.run_local('date +%s')
@@ -92,8 +93,8 @@ class TestVolumeGet(GlusterBaseClass):
# performing gluster volume get command for non exist volume io-cache
self.non_exist_volume = "abc99"
- ret, out, err = g.run(self.mnode, "gluster volume get %s io-cache"
- % self.non_exist_volume)
+ ret, _, err = g.run(self.mnode, "gluster volume get %s io-cache"
+ % self.non_exist_volume)
self.assertNotEqual(ret, 0, "gluster volume get command should fail "
"for non existing volume with io-cache "
"option :%s" % self.non_exist_volume)
@@ -103,11 +104,11 @@ class TestVolumeGet(GlusterBaseClass):
% self.non_exist_volume)
g.log.info("gluster volume get command failed successfully for non "
"existing volume with io-cache option"
- ":%s" % self.non_exist_volume)
+ ":%s", self.non_exist_volume)
# performing gluster volume get all command for non exist volume
- ret, out, err = g.run(self.mnode, "gluster volume get "
- "%s all" % self.non_exist_volume)
+ ret, _, err = g.run(self.mnode, "gluster volume get %s all" %
+ self.non_exist_volume)
self.assertNotEqual(ret, 0, "gluster volume get command should fail "
"for non existing volume %s with all "
"option" % self.non_exist_volume)
@@ -115,12 +116,12 @@ class TestVolumeGet(GlusterBaseClass):
"volume with all option:%s"
% self.non_exist_volume)
g.log.info("gluster volume get command failed successfully for non "
- "existing volume with all option :%s"
- % self.non_exist_volume)
+ "existing volume with all option :%s",
+ self.non_exist_volume)
# performing gluster volume get command for non exist volume
- ret, out, err = g.run(self.mnode, "gluster volume get "
- "%s" % self.non_exist_volume)
+ ret, _, err = g.run(self.mnode, "gluster volume get "
+ "%s" % self.non_exist_volume)
self.assertNotEqual(ret, 0, "gluster volume get command should "
"fail for non existing volume :%s"
% self.non_exist_volume)
@@ -128,10 +129,10 @@ class TestVolumeGet(GlusterBaseClass):
self.assertIn(msg, err, "No proper error message for non existing "
"volume :%s" % self.non_exist_volume)
g.log.info("gluster volume get command failed successfully for non "
- "existing volume :%s" % self.non_exist_volume)
+ "existing volume :%s", self.non_exist_volume)
# performing gluster volume get command without any volume name given
- ret, out, err = g.run(self.mnode, "gluster volume get")
+ ret, _, err = g.run(self.mnode, "gluster volume get")
self.assertNotEqual(ret, 0, "gluster volume get command should fail")
self.assertIn(msg, err, "No proper error message for gluster "
"volume get command")
@@ -139,7 +140,7 @@ class TestVolumeGet(GlusterBaseClass):
# performing gluster volume get io-cache command
# without any volume name given
- ret, out, err = g.run(self.mnode, "gluster volume get io-cache")
+ ret, _, err = g.run(self.mnode, "gluster volume get io-cache")
self.assertNotEqual(ret, 0, "gluster volume get io-cache command "
"should fail")
self.assertIn(msg, err, "No proper error message for gluster volume "
@@ -147,8 +148,8 @@ class TestVolumeGet(GlusterBaseClass):
g.log.info("gluster volume get io-cache command failed successfully")
# gluster volume get volname with non existing option
- ret, out, err = g.run(self.mnode, "gluster volume "
- "get %s temp.key" % self.volname)
+ ret, _, err = g.run(self.mnode, "gluster volume "
+ "get %s temp.key" % self.volname)
self.assertNotEqual(ret, 0, "gluster volume get command should fail "
"for existing volume %s with non-existing "
"option" % self.volname)
@@ -157,8 +158,8 @@ class TestVolumeGet(GlusterBaseClass):
"volume %s with non-existing option"
% self.volname)
g.log.info("gluster volume get command failed successfully for "
- "existing volume %s with non existing option"
- % self.volname)
+ "existing volume %s with non existing option",
+ self.volname)
# perfroming gluster volume get volname all
@@ -166,7 +167,7 @@ class TestVolumeGet(GlusterBaseClass):
self.assertIsNotNone(ret, "gluster volume get %s all command "
"failed" % self.volname)
g.log.info("gluster volume get %s all command executed "
- "successfully" % self.volname)
+ "successfully", self.volname)
# performing gluster volume get volname io-cache
ret = get_volume_options(self.mnode, self.volname, "io-cache")
@@ -182,8 +183,8 @@ class TestVolumeGet(GlusterBaseClass):
self.assertTrue(ret, "gluster volume set %s performance.low-prio-"
"threads failed" % self.volname)
g.log.info("gluster volume set %s "
- "performance.low-prio-threads executed successfully"
- % self.volname)
+ "performance.low-prio-threads executed successfully",
+ self.volname)
# Performing gluster volume get all, checking low-prio threads value
ret = get_volume_options(self.mnode, self.volname, "all")
@@ -198,7 +199,7 @@ class TestVolumeGet(GlusterBaseClass):
self.assertIsNotNone(ret, "gluster volume get %s all command "
"failed" % self.volname)
g.log.info("gluster volume get %s all command executed "
- "successfully" % self.volname)
+ "successfully", self.volname)
# Checking core file created or not in "/" directory
ret = is_core_file_created(self.servers, test_timestamp)
diff --git a/tests/functional/glusterd/test_volume_op.py b/tests/functional/glusterd/test_volume_op.py
deleted file mode 100644
index 93851e011..000000000
--- a/tests/functional/glusterd/test_volume_op.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-from glusto.core import Glusto as g
-from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.volume_ops import (volume_create, volume_start,
- volume_stop, volume_delete,
- get_volume_list, get_volume_info)
-from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume)
-from glustolibs.gluster.peer_ops import (peer_probe, peer_detach)
-from glustolibs.gluster.lib_utils import form_bricks_list
-
-
-@runs_on([['distributed', 'replicated', 'distributed-replicated', 'dispersed',
- 'distributed-dispersed'], ['glusterfs']])
-class TestVolumeOperations(GlusterBaseClass):
-
- @classmethod
- def setUpClass(cls):
-
- # Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # check whether peers are in connected state
- ret = cls.validate_peers_are_connected()
- if not ret:
- raise ExecutionError("Peers are not in connected state")
-
- def tearDown(self):
-
- vol_list = get_volume_list(self.mnode)
- if vol_list is None:
- raise ExecutionError("Failed to get the volume list")
-
- for volume in vol_list:
- ret = cleanup_volume(self.mnode, volume)
- if not ret:
- raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
-
- GlusterBaseClass.tearDown.im_func(self)
-
- def test_volume_op(self):
-
- # Starting a non existing volume should fail
- ret, _, _ = volume_start(self.mnode, "no_vol", force=True)
- self.assertNotEqual(ret, 0, "Expected: It should fail to Start a non"
- " existing volume. Actual: Successfully started "
- "a non existing volume")
- g.log.info("Starting a non existing volume is failed")
-
- # Stopping a non existing volume should fail
- ret, _, _ = volume_stop(self.mnode, "no_vol", force=True)
- self.assertNotEqual(ret, 0, "Expected: It should fail to stop "
- "non-existing volume. Actual: Successfully "
- "stopped a non existing volume")
- g.log.info("Stopping a non existing volume is failed")
-
- # Deleting a non existing volume should fail
- ret = volume_delete(self.mnode, "no_vol")
- self.assertTrue(ret, "Expected: It should fail to delete a "
- "non existing volume. Actual:Successfully deleted "
- "a non existing volume")
- g.log.info("Deleting a non existing volume is failed")
-
- # Detach a server and try to create volume with node
- # which is not in cluster
- ret, _, _ = peer_detach(self.mnode, self.servers[1])
- self.assertEqual(ret, 0, ("Peer detach is failed"))
- g.log.info("Peer detach is successful")
-
- num_of_bricks = len(self.servers)
- bricks_list = form_bricks_list(self.mnode, self.volname, num_of_bricks,
- self.servers, self.all_servers_info)
-
- ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
- self.assertNotEqual(ret, 0, "Successfully created volume with brick "
- "from which is not a part of node")
- g.log.info("Creating a volume with brick from node which is not part "
- "of cluster is failed")
-
- # Peer probe the detached server
- ret, _, _ = peer_probe(self.mnode, self.servers[1])
- self.assertEqual(ret, 0, ("Peer probe is failed"))
- g.log.info("Peer probe is successful")
-
- # Create and start a volume
- ret = setup_volume(self.mnode, self.all_servers_info, self.volume,
- force=True)
- self.assertTrue(ret, "Failed to create the volume")
- g.log.info("Successfully created and started the volume")
-
- # Starting already started volume should fail
- ret, _, _ = volume_start(self.mnode, self.volname)
- self.assertNotEqual(ret, 0, "Expected: It should fail to start a "
- "already started volume. Actual:Successfully"
- " started a already started volume ")
- g.log.info("Starting a already started volume is Failed.")
-
- # Deleting a volume without stopping should fail
- ret = volume_delete(self.mnode, self.volname)
- self.assertFalse(ret, ("Expected: It should fail to delete a volume"
- " without stopping. Actual: Successfully "
- "deleted a volume without stopping it"))
- g.log.error("Failed to delete a volume without stopping it")
-
- # Stopping a volume should succeed
- ret, _, _ = volume_stop(self.mnode, self.volname)
- self.assertEqual(ret, 0, ("volume stop is failed"))
- g.log.info("Volume stop is success")
-
- # Stopping a already stopped volume should fail
- ret, _, _ = volume_stop(self.mnode, self.volname)
- self.assertNotEqual(ret, 0, "Expected: It should fail to stop a "
- "already stopped volume . Actual: Successfully"
- "stopped a already stopped volume")
- g.log.info("Volume stop is failed on already stopped volume")
-
- # Deleting a volume should succeed
- ret = volume_delete(self.mnode, self.volname)
- self.assertTrue(ret, ("Volume delete is failed"))
- g.log.info("Volume delete is success")
-
- # Deleting a non existing volume should fail
- ret = volume_delete(self.mnode, self.volname)
- self.assertTrue(ret, "Expected: It should fail to delete a non "
- "existing volume. Actual:Successfully deleted a "
- "non existing volume")
- g.log.info("Volume delete is failed for non existing volume")
-
- # Volume info command should succeed
- ret = get_volume_info(self.mnode)
- self.assertIsNotNone(ret, "volume info command failed")
- g.log.info("Volume info command is success")
diff --git a/tests/functional/glusterd/test_volume_operations.py b/tests/functional/glusterd/test_volume_operations.py
index a8e75ad8b..fc8d8b0b6 100644
--- a/tests/functional/glusterd/test_volume_operations.py
+++ b/tests/functional/glusterd/test_volume_operations.py
@@ -14,27 +14,26 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+import random
+import re
+import os
+
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_ops import (volume_create, volume_start,
- get_volume_list)
+ get_volume_list, volume_stop,
+ volume_delete, get_volume_info)
+
from glustolibs.gluster.brick_libs import (are_bricks_online)
-from glustolibs.gluster.volume_libs import cleanup_volume
+from glustolibs.gluster.volume_libs import cleanup_volume, setup_volume
+from glustolibs.gluster.peer_ops import (peer_probe, peer_detach)
from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.exceptions import ExecutionError
-import random
-import re
-import os
@runs_on([['distributed'], ['glusterfs']])
class TestVolumeCreate(GlusterBaseClass):
- @classmethod
- def setUpClass(cls):
- # Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
def setUp(self):
GlusterBaseClass.setUp.im_func(self)
# check whether peers are in connected state
@@ -52,17 +51,11 @@ class TestVolumeCreate(GlusterBaseClass):
for volume in vol_list:
ret = cleanup_volume(self.mnode, volume)
if not ret:
- raise ExecutionError("Unable to delete volume % s" % volume)
- g.log.info("Volume deleted successfully : %s" % volume)
+ raise ExecutionError("Unable to delete volume %s" % volume)
+ g.log.info("Volume deleted successfully : %s", volume)
GlusterBaseClass.tearDown.im_func(self)
- @classmethod
- def tearDownClass(cls):
-
- # Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDownClass.im_func(cls)
-
def test_volume_start_force(self):
# get the brick list and create a volume
@@ -97,6 +90,7 @@ class TestVolumeCreate(GlusterBaseClass):
g.log.info("Volume start force didn't bring the brick online")
def test_volume_create_on_brick_root(self):
+ # pylint: disable=too-many-locals
# try to create a volume on brick root path without using force and
# with using force
@@ -154,7 +148,7 @@ class TestVolumeCreate(GlusterBaseClass):
ret, _, _ = g.run(server, cmd1)
self.assertEqual(ret, 0, "Failed to delete the files")
g.log.info("Successfully deleted the files")
- ret, out, err = g.run(server, cmd2)
+ ret, out, _ = g.run(server, cmd2)
if re.search("trusted.glusterfs.volume-id", out):
ret, _, _ = g.run(server, cmd3)
self.assertEqual(ret, 0, "Failed to delete the xattrs")
@@ -167,3 +161,96 @@ class TestVolumeCreate(GlusterBaseClass):
# creation of volume should succeed
ret, _, _ = volume_create(self.mnode, self.volname, same_bricks_list)
self.assertEqual(ret, 0, "Failed to create volume")
+
+ def test_volume_op(self):
+
+ # Starting a non existing volume should fail
+ ret, _, _ = volume_start(self.mnode, "no_vol", force=True)
+ self.assertNotEqual(ret, 0, "Expected: It should fail to Start a non"
+ " existing volume. Actual: Successfully started "
+ "a non existing volume")
+ g.log.info("Starting a non existing volume is failed")
+
+ # Stopping a non existing volume should fail
+ ret, _, _ = volume_stop(self.mnode, "no_vol", force=True)
+ self.assertNotEqual(ret, 0, "Expected: It should fail to stop "
+ "non-existing volume. Actual: Successfully "
+ "stopped a non existing volume")
+ g.log.info("Stopping a non existing volume is failed")
+
+ # Deleting a non existing volume should fail
+ ret = volume_delete(self.mnode, "no_vol")
+ self.assertTrue(ret, "Expected: It should fail to delete a "
+ "non existing volume. Actual:Successfully deleted "
+ "a non existing volume")
+ g.log.info("Deleting a non existing volume is failed")
+
+ # Detach a server and try to create volume with node
+ # which is not in cluster
+ ret, _, _ = peer_detach(self.mnode, self.servers[1])
+ self.assertEqual(ret, 0, ("Peer detach is failed"))
+ g.log.info("Peer detach is successful")
+
+ num_of_bricks = len(self.servers)
+ bricks_list = form_bricks_list(self.mnode, self.volname, num_of_bricks,
+ self.servers, self.all_servers_info)
+
+ ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
+ self.assertNotEqual(ret, 0, "Successfully created volume with brick "
+ "from which is not a part of node")
+ g.log.info("Creating a volume with brick from node which is not part "
+ "of cluster is failed")
+
+ # Peer probe the detached server
+ ret, _, _ = peer_probe(self.mnode, self.servers[1])
+ self.assertEqual(ret, 0, ("Peer probe is failed"))
+ g.log.info("Peer probe is successful")
+
+ # Create and start a volume
+ ret = setup_volume(self.mnode, self.all_servers_info, self.volume,
+ force=True)
+ self.assertTrue(ret, "Failed to create the volume")
+ g.log.info("Successfully created and started the volume")
+
+ # Starting already started volume should fail
+ ret, _, _ = volume_start(self.mnode, self.volname)
+ self.assertNotEqual(ret, 0, "Expected: It should fail to start a "
+ "already started volume. Actual:Successfully"
+ " started a already started volume ")
+ g.log.info("Starting a already started volume is Failed.")
+
+ # Deleting a volume without stopping should fail
+ ret = volume_delete(self.mnode, self.volname)
+ self.assertFalse(ret, ("Expected: It should fail to delete a volume"
+ " without stopping. Actual: Successfully "
+ "deleted a volume without stopping it"))
+ g.log.error("Failed to delete a volume without stopping it")
+
+ # Stopping a volume should succeed
+ ret, _, _ = volume_stop(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("volume stop is failed"))
+ g.log.info("Volume stop is success")
+
+ # Stopping a already stopped volume should fail
+ ret, _, _ = volume_stop(self.mnode, self.volname)
+ self.assertNotEqual(ret, 0, "Expected: It should fail to stop a "
+ "already stopped volume . Actual: Successfully"
+ "stopped a already stopped volume")
+ g.log.info("Volume stop is failed on already stopped volume")
+
+ # Deleting a volume should succeed
+ ret = volume_delete(self.mnode, self.volname)
+ self.assertTrue(ret, ("Volume delete is failed"))
+ g.log.info("Volume delete is success")
+
+ # Deleting a non existing volume should fail
+ ret = volume_delete(self.mnode, self.volname)
+ self.assertTrue(ret, "Expected: It should fail to delete a non "
+ "existing volume. Actual:Successfully deleted a "
+ "non existing volume")
+ g.log.info("Volume delete is failed for non existing volume")
+
+ # Volume info command should succeed
+ ret = get_volume_info(self.mnode)
+ self.assertIsNotNone(ret, "volume info command failed")
+ g.log.info("Volume info command is success")
diff --git a/tests/functional/glusterd/test_volume_reset.py b/tests/functional/glusterd/test_volume_reset.py
index 2bb8c4c24..f61fdaaba 100644
--- a/tests/functional/glusterd/test_volume_reset.py
+++ b/tests/functional/glusterd/test_volume_reset.py
@@ -15,13 +15,10 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
""" Description:
- Test Cases in this module related to Glusterd volume reset validation
- with bitd, scrub and snapd daemons running or not
"""
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.peer_ops import peer_probe_servers
from glustolibs.gluster.volume_libs import cleanup_volume
from glustolibs.gluster.bitrot_ops import (enable_bitrot, is_bitd_running,
is_scrub_process_running)
@@ -31,33 +28,20 @@ from glustolibs.gluster.uss_ops import enable_uss, is_snapd_running
@runs_on([['distributed', 'replicated', 'distributed-replicated',
'dispersed', 'distributed-dispersed'], ['glusterfs']])
class GlusterdVolumeReset(GlusterBaseClass):
+ '''
+ Test Cases in this module related to Glusterd volume reset validation
+ with bitd, scrub and snapd daemons running or not
+ '''
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
- g.log.info("Starting %s " % cls.__name__)
- '''
- checking for peer status from every node, if peers are in not
- connected state, performing peer probe.
- '''
- ret = cls.validate_peers_are_connected()
- if not ret:
- ret = peer_probe_servers(cls.mnode, cls.servers)
- if ret:
- g.log.info("peers are connected successfully from %s to other \
- servers in severlist %s:" % (cls.mnode, cls.servers))
- else:
- g.log.error("Peer probe failed from %s to other \
- servers in severlist %s:" % (cls.mnode, cls.servers))
- raise ExecutionError("Peer probe failed ")
- else:
- g.log.info("All server peers are already in connected state\
- %s:" % cls.servers)
+ g.log.info("Starting %s ", cls.__name__)
# Creating Volume
g.log.info("Started creating volume")
ret = cls.setup_volume()
if ret:
- g.log.info("Volme created successfully : %s" % cls.volname)
+ g.log.info("Volme created successfully : %s", cls.volname)
else:
raise ExecutionError("Volume creation failed: %s" % cls.volname)
@@ -71,9 +55,9 @@ class GlusterdVolumeReset(GlusterBaseClass):
# command for volume reset
g.log.info("started resetting volume")
cmd = "gluster volume reset " + self.volname
- ret, out, _ = g.run(self.mnode, cmd)
- if (ret == 0):
- g.log.info("volume restted successfully :%s" % self.volname)
+ ret, _, _ = g.run(self.mnode, cmd)
+ if ret == 0:
+ g.log.info("volume reset successfully :%s", self.volname)
else:
raise ExecutionError("Volume reset Failed :%s" % self.volname)
@@ -89,7 +73,7 @@ class GlusterdVolumeReset(GlusterBaseClass):
# stopping the volume and Cleaning up the volume
ret = cleanup_volume(cls.mnode, cls.volname)
if ret:
- g.log.info("Volume deleted successfully : %s" % cls.volname)
+ g.log.info("Volume deleted successfully : %s", cls.volname)
else:
raise ExecutionError("Failed Cleanup the Volume %s" % cls.volname)
@@ -103,52 +87,47 @@ class GlusterdVolumeReset(GlusterBaseClass):
-> Eanble Uss on same volume
-> Reset the volume with force
-> Verify all the daemons(BitD, Scrub & Uss) are running or not
- :return:
'''
# enable bitrot and scrub on volume
g.log.info("Enabling bitrot")
- ret, out, _ = enable_bitrot(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Failed to enable bitrot on\
- volume: %s" % self.volname)
- g.log.info("Bitd and scrub daemons enabled\
- successfully on volume :%s" % self.volname)
+ ret, _, _ = enable_bitrot(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable bitrot on volume: %s" %
+ self.volname)
+ g.log.info("Bitd and scrub daemons enabled successfully on volume :%s",
+ self.volname)
# enable uss on volume
g.log.info("Enabling snaphot(uss)")
- ret, out, _ = enable_uss(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Failed to enable uss on\
- volume: %s" % self.volname)
- g.log.info("uss enabled successfully on volume :%s" % self.volname)
+ ret, _, _ = enable_uss(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable uss on volume: %s" %
+ self.volname)
+ g.log.info("uss enabled successfully on volume :%s", self.volname)
# Checks bitd, snapd, scrub daemons running or not
g.log.info("checking snapshot, scrub and bitrot\
daemons running or not")
for mnode in self.servers:
ret = is_bitd_running(mnode, self.volname)
- self.assertTrue(ret, "Bitrot Daemon\
- not running on %s server:" % mnode)
+ self.assertTrue(ret, "Bitrot Daemon not running on %s server:"
+ % mnode)
ret = is_scrub_process_running(mnode, self.volname)
- self.assertTrue(ret, "Scrub Daemon\
- not running on %s server:" % mnode)
+ self.assertTrue(ret, "Scrub Daemon not running on %s server:"
+ % mnode)
ret = is_snapd_running(mnode, self.volname)
- self.assertTrue(ret, "Snap Daemon\
- not running %s server:" % mnode)
- g.log.info("bitd, scrub and snapd running\
- successflly on volume :%s" % self.volname)
+ self.assertTrue(ret, "Snap Daemon not running %s server:" % mnode)
+ g.log.info("bitd, scrub and snapd running successflly on volume :%s",
+ self.volname)
# command for volume reset
g.log.info("started resetting volume")
cmd = "gluster volume reset " + self.volname
- ret, out, _ = g.run(self.mnode, cmd)
- self.assertEqual(ret, 0, "volume reset failed\
- for : %s" % self.volname)
- g.log.info("volume resetted succefully :%s" % self.volname)
+ ret, _, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "volume reset failed for : %s" % self.volname)
+ g.log.info("volume resetted succefully :%s", self.volname)
- '''
- After volume reset snap daemon will not be running,
- bitd and scrub deamons will be in running state.
- '''
+ # After volume reset snap daemon will not be running,
+ # bitd and scrub deamons will be in running state.
g.log.info("checking snapshot, scrub and bitrot daemons\
running or not after volume reset")
for mnode in self.servers:
@@ -159,31 +138,30 @@ class GlusterdVolumeReset(GlusterBaseClass):
self.assertTrue(ret, "Scrub Daemon\
not running on %s server:" % mnode)
ret = is_snapd_running(mnode, self.volname)
- self.assertFalse(ret, "Snap Daemon should not be\
- running on %s server after volume reset:" % mnode)
- g.log.info("bitd and scrub daemons are running after volume reset\
- snapd is not running as expected on volume :%s" % self.volname)
+ self.assertFalse(ret, "Snap Daemon should not be running on %s "
+ "server after volume reset:" % mnode)
+ g.log.info("bitd and scrub daemons are running after volume reset "
+ "snapd is not running as expected on volume :%s",
+ self.volname)
# enable uss on volume
g.log.info("Enabling snaphot(uss)")
- ret, out, _ = enable_uss(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Failed to enable\
- uss on volume: %s" % self.volname)
- g.log.info("uss enabled successfully on volume :%s" % self.volname)
+ ret, _, _ = enable_uss(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable uss on volume: %s" %
+ self.volname)
+ g.log.info("uss enabled successfully on volume :%s", self.volname)
# command for volume reset with force
g.log.info("started resetting volume with force option")
cmd = "gluster volume reset " + self.volname + " force"
- ret, out, _ = g.run(self.mnode, cmd)
+ ret, _, _ = g.run(self.mnode, cmd)
self.assertEqual(ret, 0, "volume reset fail\
for : %s" % self.volname)
- g.log.info("Volume resetted sucessfully with\
- force option :%s" % self.volname)
+ g.log.info("Volume reset sucessfully with force option :%s",
+ self.volname)
- '''
- After volume reset bitd, snapd, scrub daemons will not be running,
- all three daemons will get die
- '''
+ # After volume reset bitd, snapd, scrub daemons will not be running,
+ # all three daemons will get die
g.log.info("checking snapshot, scrub and bitrot daemons\
running or not after volume reset with force")
for mnode in self.servers:
@@ -196,5 +174,5 @@ class GlusterdVolumeReset(GlusterBaseClass):
ret = is_snapd_running(mnode, self.volname)
self.assertFalse(ret, "Snap Daemon should not be\
running on %s server after volume reset force:" % mnode)
- g.log.info("After volume reset bitd, scrub and snapd are not running after\
- volume reset with force on volume :%s" % self.volname)
+ g.log.info("After volume reset bitd, scrub and snapd are not running "
+ "after volume reset with force on volume :%s", self.volname)
diff --git a/tests/functional/glusterd/test_volume_status.py b/tests/functional/glusterd/test_volume_status.py
index a1c0d1710..acfceb23b 100644
--- a/tests/functional/glusterd/test_volume_status.py
+++ b/tests/functional/glusterd/test_volume_status.py
@@ -14,18 +14,18 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-""" Description:
- Test Cases in this module related to Glusterd volume status while
- IOs in progress
"""
+Test Cases in this module related to Glusterd volume status while
+IOs in progress
+"""
+import random
+from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.io.utils import (validate_io_procs, wait_for_io_to_complete,
list_all_files_and_dirs_mounts)
-import random
-from time import sleep
@runs_on([['distributed', 'replicated', 'distributed-replicated',
@@ -42,7 +42,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
raise ExecutionError("Peer probe failed ")
else:
g.log.info("All server peers are already in connected state "
- "%s:" % cls.servers)
+ "%s:", cls.servers)
# Uploading file_dir script in all client direcotries
g.log.info("Upload io scripts to clients %s for running IO on "
@@ -53,7 +53,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
"file_dir_ops.py")
ret = upload_scripts(cls.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients %s",
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
cls.clients)
g.log.info("Successfully uploaded IO scripts to clients %s",
cls.clients)
@@ -69,7 +69,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
g.log.info("Started creating volume")
ret = self.setup_volume()
if ret:
- g.log.info("Volme created successfully : %s" % self.volname)
+ g.log.info("Volme created successfully : %s", self.volname)
else:
raise ExecutionError("Volume creation failed: %s" % self.volname)
@@ -95,7 +95,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
# unmounting the volume and Cleaning up the volume
ret = self.unmount_volume_and_cleanup_volume(self.mounts)
if ret:
- g.log.info("Volume deleted successfully : %s" % self.volname)
+ g.log.info("Volume deleted successfully : %s", self.volname)
else:
raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
@@ -117,7 +117,7 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
# Mounting a volume
ret = self.mount_volume(self.mounts)
self.assertTrue(ret, "Volume mount failed for %s" % self.volname)
- g.log.info("Volume mounted sucessfully : %s" % self.volname)
+ g.log.info("Volume mounted sucessfully : %s", self.volname)
# After Mounting immediately writting IO's are failing some times,
# thats why keeping sleep for 10 secs
@@ -147,14 +147,15 @@ class VolumeStatusWhenIOInProgress(GlusterBaseClass):
# performing "gluster volume status volname inode" command on
# all cluster servers randomly while io is in progress,
# this command should not get hang while io is in progress
+ # pylint: disable=unused-variable
for i in range(20):
- ret, out, err = g.run(random.choice(self.servers),
- "gluster --timeout=12000 volume status %s "
- "inode" % self.volname)
+ ret, _, _ = g.run(random.choice(self.servers),
+ "gluster --timeout=12000 volume status %s "
+ "inode" % self.volname)
self.assertEqual(ret, 0, ("Volume status 'inode' failed on "
"volume %s" % self.volname))
g.log.info("Successful in logging volume status"
- "'inode' of volume %s" % self.volname)
+ "'inode' of volume %s", self.volname)
# Validate IO
g.log.info("Wait for IO to complete and validate IO ...")