summaryrefslogtreecommitdiffstats
path: root/tests/functional/glusterd/test_volume_reset.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/glusterd/test_volume_reset.py')
-rw-r--r--tests/functional/glusterd/test_volume_reset.py116
1 files changed, 47 insertions, 69 deletions
diff --git a/tests/functional/glusterd/test_volume_reset.py b/tests/functional/glusterd/test_volume_reset.py
index 2bb8c4c24..f61fdaaba 100644
--- a/tests/functional/glusterd/test_volume_reset.py
+++ b/tests/functional/glusterd/test_volume_reset.py
@@ -15,13 +15,10 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
""" Description:
- Test Cases in this module related to Glusterd volume reset validation
- with bitd, scrub and snapd daemons running or not
"""
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.peer_ops import peer_probe_servers
from glustolibs.gluster.volume_libs import cleanup_volume
from glustolibs.gluster.bitrot_ops import (enable_bitrot, is_bitd_running,
is_scrub_process_running)
@@ -31,33 +28,20 @@ from glustolibs.gluster.uss_ops import enable_uss, is_snapd_running
@runs_on([['distributed', 'replicated', 'distributed-replicated',
'dispersed', 'distributed-dispersed'], ['glusterfs']])
class GlusterdVolumeReset(GlusterBaseClass):
+ '''
+ Test Cases in this module related to Glusterd volume reset validation
+ with bitd, scrub and snapd daemons running or not
+ '''
@classmethod
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
- g.log.info("Starting %s " % cls.__name__)
- '''
- checking for peer status from every node, if peers are in not
- connected state, performing peer probe.
- '''
- ret = cls.validate_peers_are_connected()
- if not ret:
- ret = peer_probe_servers(cls.mnode, cls.servers)
- if ret:
- g.log.info("peers are connected successfully from %s to other \
- servers in severlist %s:" % (cls.mnode, cls.servers))
- else:
- g.log.error("Peer probe failed from %s to other \
- servers in severlist %s:" % (cls.mnode, cls.servers))
- raise ExecutionError("Peer probe failed ")
- else:
- g.log.info("All server peers are already in connected state\
- %s:" % cls.servers)
+ g.log.info("Starting %s ", cls.__name__)
# Creating Volume
g.log.info("Started creating volume")
ret = cls.setup_volume()
if ret:
- g.log.info("Volme created successfully : %s" % cls.volname)
+ g.log.info("Volme created successfully : %s", cls.volname)
else:
raise ExecutionError("Volume creation failed: %s" % cls.volname)
@@ -71,9 +55,9 @@ class GlusterdVolumeReset(GlusterBaseClass):
# command for volume reset
g.log.info("started resetting volume")
cmd = "gluster volume reset " + self.volname
- ret, out, _ = g.run(self.mnode, cmd)
- if (ret == 0):
- g.log.info("volume restted successfully :%s" % self.volname)
+ ret, _, _ = g.run(self.mnode, cmd)
+ if ret == 0:
+ g.log.info("volume reset successfully :%s", self.volname)
else:
raise ExecutionError("Volume reset Failed :%s" % self.volname)
@@ -89,7 +73,7 @@ class GlusterdVolumeReset(GlusterBaseClass):
# stopping the volume and Cleaning up the volume
ret = cleanup_volume(cls.mnode, cls.volname)
if ret:
- g.log.info("Volume deleted successfully : %s" % cls.volname)
+ g.log.info("Volume deleted successfully : %s", cls.volname)
else:
raise ExecutionError("Failed Cleanup the Volume %s" % cls.volname)
@@ -103,52 +87,47 @@ class GlusterdVolumeReset(GlusterBaseClass):
-> Eanble Uss on same volume
-> Reset the volume with force
-> Verify all the daemons(BitD, Scrub & Uss) are running or not
- :return:
'''
# enable bitrot and scrub on volume
g.log.info("Enabling bitrot")
- ret, out, _ = enable_bitrot(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Failed to enable bitrot on\
- volume: %s" % self.volname)
- g.log.info("Bitd and scrub daemons enabled\
- successfully on volume :%s" % self.volname)
+ ret, _, _ = enable_bitrot(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable bitrot on volume: %s" %
+ self.volname)
+ g.log.info("Bitd and scrub daemons enabled successfully on volume :%s",
+ self.volname)
# enable uss on volume
g.log.info("Enabling snaphot(uss)")
- ret, out, _ = enable_uss(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Failed to enable uss on\
- volume: %s" % self.volname)
- g.log.info("uss enabled successfully on volume :%s" % self.volname)
+ ret, _, _ = enable_uss(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable uss on volume: %s" %
+ self.volname)
+ g.log.info("uss enabled successfully on volume :%s", self.volname)
# Checks bitd, snapd, scrub daemons running or not
g.log.info("checking snapshot, scrub and bitrot\
daemons running or not")
for mnode in self.servers:
ret = is_bitd_running(mnode, self.volname)
- self.assertTrue(ret, "Bitrot Daemon\
- not running on %s server:" % mnode)
+ self.assertTrue(ret, "Bitrot Daemon not running on %s server:"
+ % mnode)
ret = is_scrub_process_running(mnode, self.volname)
- self.assertTrue(ret, "Scrub Daemon\
- not running on %s server:" % mnode)
+ self.assertTrue(ret, "Scrub Daemon not running on %s server:"
+ % mnode)
ret = is_snapd_running(mnode, self.volname)
- self.assertTrue(ret, "Snap Daemon\
- not running %s server:" % mnode)
- g.log.info("bitd, scrub and snapd running\
- successflly on volume :%s" % self.volname)
+ self.assertTrue(ret, "Snap Daemon not running %s server:" % mnode)
+ g.log.info("bitd, scrub and snapd running successflly on volume :%s",
+ self.volname)
# command for volume reset
g.log.info("started resetting volume")
cmd = "gluster volume reset " + self.volname
- ret, out, _ = g.run(self.mnode, cmd)
- self.assertEqual(ret, 0, "volume reset failed\
- for : %s" % self.volname)
- g.log.info("volume resetted succefully :%s" % self.volname)
+ ret, _, _ = g.run(self.mnode, cmd)
+ self.assertEqual(ret, 0, "volume reset failed for : %s" % self.volname)
+ g.log.info("volume resetted succefully :%s", self.volname)
- '''
- After volume reset snap daemon will not be running,
- bitd and scrub deamons will be in running state.
- '''
+ # After volume reset snap daemon will not be running,
+ # bitd and scrub deamons will be in running state.
g.log.info("checking snapshot, scrub and bitrot daemons\
running or not after volume reset")
for mnode in self.servers:
@@ -159,31 +138,30 @@ class GlusterdVolumeReset(GlusterBaseClass):
self.assertTrue(ret, "Scrub Daemon\
not running on %s server:" % mnode)
ret = is_snapd_running(mnode, self.volname)
- self.assertFalse(ret, "Snap Daemon should not be\
- running on %s server after volume reset:" % mnode)
- g.log.info("bitd and scrub daemons are running after volume reset\
- snapd is not running as expected on volume :%s" % self.volname)
+ self.assertFalse(ret, "Snap Daemon should not be running on %s "
+ "server after volume reset:" % mnode)
+ g.log.info("bitd and scrub daemons are running after volume reset "
+ "snapd is not running as expected on volume :%s",
+ self.volname)
# enable uss on volume
g.log.info("Enabling snaphot(uss)")
- ret, out, _ = enable_uss(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Failed to enable\
- uss on volume: %s" % self.volname)
- g.log.info("uss enabled successfully on volume :%s" % self.volname)
+ ret, _, _ = enable_uss(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to enable uss on volume: %s" %
+ self.volname)
+ g.log.info("uss enabled successfully on volume :%s", self.volname)
# command for volume reset with force
g.log.info("started resetting volume with force option")
cmd = "gluster volume reset " + self.volname + " force"
- ret, out, _ = g.run(self.mnode, cmd)
+ ret, _, _ = g.run(self.mnode, cmd)
self.assertEqual(ret, 0, "volume reset fail\
for : %s" % self.volname)
- g.log.info("Volume resetted sucessfully with\
- force option :%s" % self.volname)
+ g.log.info("Volume reset sucessfully with force option :%s",
+ self.volname)
- '''
- After volume reset bitd, snapd, scrub daemons will not be running,
- all three daemons will get die
- '''
+ # After volume reset bitd, snapd, scrub daemons will not be running,
+ # all three daemons will get die
g.log.info("checking snapshot, scrub and bitrot daemons\
running or not after volume reset with force")
for mnode in self.servers:
@@ -196,5 +174,5 @@ class GlusterdVolumeReset(GlusterBaseClass):
ret = is_snapd_running(mnode, self.volname)
self.assertFalse(ret, "Snap Daemon should not be\
running on %s server after volume reset force:" % mnode)
- g.log.info("After volume reset bitd, scrub and snapd are not running after\
- volume reset with force on volume :%s" % self.volname)
+ g.log.info("After volume reset bitd, scrub and snapd are not running "
+ "after volume reset with force on volume :%s", self.volname)