summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--glustolibs-gluster/glustolibs/gluster/gluster_base_class.py75
-rw-r--r--tests/bvt/test_bvt_lite_and_plus.py95
2 files changed, 54 insertions, 116 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
index 8ab513d00..23274e97f 100644
--- a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
@@ -25,6 +25,12 @@ import unittest
from glusto.core import Glusto as g
import os
import random
+from glustolibs.gluster.gluster_init import start_glusterd
+from glustolibs.gluster.peer_ops import (peer_probe_servers, is_peer_connected,
+ peer_status)
+from glustolibs.gluster.volume_libs import setup_volume, cleanup_volume
+from glustolibs.gluster.volume_ops import volume_info, volume_status
+import time
class runs_on(g.CarteTestClass):
"""Decorator providing runs_on capability for standard unittest script"""
@@ -255,36 +261,57 @@ class GlusterVolumeBaseClass(GlusterBaseClass):
def setUpClass(cls):
GlusterBaseClass.setUpClass.im_func(cls)
- # Start Glusterd
- from glustolibs.gluster.gluster_init import start_glusterd
- ret = start_glusterd(servers=cls.servers)
- if not ret:
- g.log.error("glusterd did not start on at least one server")
- return False
-
- # PeerProbe servers
- from glustolibs.gluster.peer_ops import peer_probe_servers
- ret = peer_probe_servers(mnode=cls.servers[0], servers=cls.servers[1:])
- if not ret:
- g.log.error("Unable to peer probe one or more servers")
- return False
-
- from glustolibs.gluster.volume_libs import setup_volume
+ # Validate if peer is connected from all the servers
+ for server in cls.servers:
+ ret = is_peer_connected(server, cls.servers)
+ assert (ret == True), "Validating Peers to be in Cluster Failed"
+
+ # Print Peer Status from mnode
+ _, _, _ = peer_status(cls.mnode)
+
+ # Setup Volume
ret = setup_volume(mnode=cls.mnode,
all_servers_info=cls.all_servers_info,
- volume_config=cls.volume)
- if not ret:
- g.log.error("Setup volume %s failed" % cls.volname)
- return False
+ volume_config=cls.volume, force=True)
+ assert (ret == True), "Setup volume %s failed" % cls.volname
+ time.sleep(10)
+
+ # Print Volume Info and Status
+ _, _, _ = volume_info(cls.mnode, cls.volname)
+
+ _, _, _ = volume_status(cls.mnode, cls.volname)
+
+ # Validate if volume is exported or not
+ if 'nfs' in cls.mount_type:
+ cmd = "showmount -e localhost"
+ _, _, _ = g.run(cls.mnode, cmd)
+
+ cmd = "showmount -e localhost | grep %s" % cls.volname
+ ret, _, _ = g.run(cls.mnode, cmd)
+ assert (ret == 0), "Volume %s not exported" % cls.volname
+
+ if 'cifs' in cls.mount_type:
+ cmd = "smbclient -L localhost"
+ _, _, _ = g.run(cls.mnode, cmd)
+
+ cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " %
+ cls.volname)
+ ret, _, _ = g.run(cls.mnode, cmd)
+ assert (ret == 0), ("Volume %s not accessable via SMB/CIFS share" %
+ cls.volname)
# Create Mounts
+ rc = True
for mount_obj in cls.mounts:
ret = mount_obj.mount()
if not ret:
g.log.error("Unable to mount volume '%s:%s' on '%s:%s'" %
(mount_obj.server_system, mount_obj.volname,
mount_obj.client_system, mount_obj.mountpoint))
- return False
+ rc = False
+ assert (rc == True), ("Mounting volume %s on few clients failed" %
+ cls.volname)
+
@classmethod
def tearDownClass(cls, umount_vol=True, cleanup_vol=True):
@@ -304,11 +331,5 @@ class GlusterVolumeBaseClass(GlusterBaseClass):
# Cleanup volume
if cleanup_vol:
- from glustolibs.gluster.volume_libs import cleanup_volume
ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
- if not ret:
- g.log.error("cleanup volume %s failed" % cls.volname)
- return False
-
- g.log.info("TEARDOWN GLUSTER VOLUME: %s on %s" % (cls.volume_type,
- cls.mount_type))
+ assert (ret == True), ("cleanup volume %s failed" % cls.volname)
diff --git a/tests/bvt/test_bvt_lite_and_plus.py b/tests/bvt/test_bvt_lite_and_plus.py
index e47c13477..a77b6beea 100644
--- a/tests/bvt/test_bvt_lite_and_plus.py
+++ b/tests/bvt/test_bvt_lite_and_plus.py
@@ -18,19 +18,15 @@
import pytest
import os
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.gluster_init import start_glusterd
-from glustolibs.gluster.peer_ops import (peer_probe_servers, is_peer_connected,
- peer_status)
-from glustolibs.gluster.volume_libs import setup_volume, cleanup_volume
-from glustolibs.gluster.volume_ops import volume_info, volume_status
+from glustolibs.gluster.gluster_base_class import (GlusterVolumeBaseClass,
+ runs_on)
import time
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['glusterfs', 'nfs', 'cifs']])
-class BvtTestsClass(GlusterBaseClass):
+class BvtTestsClass(GlusterVolumeBaseClass):
"""Class containing case for : BVT Lite and BVT Plus.
BVT Lite: Run the case on dis-rep volume with glusterfs, nfs, cifs
@@ -41,73 +37,10 @@ class BvtTestsClass(GlusterBaseClass):
"""
@classmethod
def setUpClass(cls):
- """Following are the setps in setupclass
- - Start glusterd on all servers
- - Peer Probe
- - Setup the volume
- - Mount the volume
+ """Setup Volume and Mounts.
"""
- GlusterBaseClass.setUpClass.im_func(cls)
g.log.info("Starting %s:" % cls.__name__)
-
- # Start Glusterd
- ret = start_glusterd(servers=cls.servers)
- assert (ret == True), "glusterd did not start on at least one server"
-
- # PeerProbe servers
- ret = peer_probe_servers(mnode=cls.servers[0], servers=cls.servers[1:])
- assert (ret == True), "Unable to peer probe one or more servers"
-
- # Validate if peer is connected from all the servers
- for server in cls.servers:
- ret = is_peer_connected(server, cls.servers)
- assert (ret == True), "Validating Peers to be in Cluster Failed"
-
- # Print Peer Status from mnode
- _, _, _ = peer_status(cls.mnode)
-
- # Setup Volume
- ret = setup_volume(mnode=cls.mnode,
- all_servers_info=cls.all_servers_info,
- volume_config=cls.volume, force=True)
- assert (ret == True), "Setup volume %s failed" % cls.volname
- time.sleep(10)
-
- # Print Volume Info and Status
- _, _, _ = volume_info(cls.mnode, cls.volname)
-
- _, _, _ = volume_status(cls.mnode, cls.volname)
-
- # Validate if volume is exported or not
- if 'nfs' in cls.mount_type:
- cmd = "showmount -e localhost"
- _, _, _ = g.run(cls.mnode, cmd)
-
- cmd = "showmount -e localhost | grep %s" % cls.volname
- ret, _, _ = g.run(cls.mnode, cmd)
- assert (ret == 0), "Volume %s not exported" % cls.volname
-
- if 'cifs' in cls.mount_type:
- cmd = "smbclient -L localhost"
- _, _, _ = g.run(cls.mnode, cmd)
-
- cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " %
- cls.volname)
- ret, _, _ = g.run(cls.mnode, cmd)
- assert (ret == 0), ("Volume %s not accessable via SMB/CIFS share" %
- cls.volname)
-
- # Create Mounts
- rc = True
- for mount_obj in cls.mounts:
- ret = mount_obj.mount()
- if not ret:
- g.log.error("Unable to mount volume '%s:%s' on '%s:%s'" %
- (mount_obj.server_system, mount_obj.volname,
- mount_obj.client_system, mount_obj.mountpoint))
- rc = False
- assert (rc == True), ("Mounting volume %s on few clients failed" %
- cls.volname)
+ GlusterVolumeBaseClass.setUpClass.im_func(cls)
# Upload io scripts
cls.script_local_path = ("/usr/share/glustolibs/io/"
@@ -194,20 +127,4 @@ class BvtTestsClass(GlusterBaseClass):
def tearDownClass(cls):
"""Cleanup mount and Cleanup the volume
"""
- GlusterBaseClass.tearDownClass.im_func(cls)
-
- # Unmount mounts
- rc = True
- for mount_obj in cls.mounts:
- ret = mount_obj.unmount()
- if not ret:
- g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'" %
- (mount_obj.server_system, mount_obj.volname,
- mount_obj.client_system, mount_obj.mountpoint))
- rc = False
- assert (rc == True), ("UnMounting volume %s on few clients failed" %
- cls.volname)
-
- # Cleanup Volume
- ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
- assert (ret == True), ("cleanup volume %s failed" % cls.volname)
+ GlusterVolumeBaseClass.tearDownClass.im_func(cls)