summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorShwetha Panduranga <spandura@redhat.com>2016-10-03 15:38:38 +0530
committerShwetha Panduranga <spandura@redhat.com>2016-10-26 00:11:27 +0530
commit01a5f8306b9f772fa1f5f46c07355b747a2c0af6 (patch)
treeacbddfde982c2eb9893e9d62264eb4671df0aa70 /tests
parent0e0f958776091fb155822323467efd114d7326b5 (diff)
Adding a bvt testcase
Change-Id: Ide7e3bac46fbaf354a2a5c8baef8510b4aefec78 Signed-off-by: Shwetha Panduranga <spandura@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/bvt/test_bvt_lite_and_plus.py213
-rw-r--r--tests/gluster_tests_config.yml269
2 files changed, 482 insertions, 0 deletions
diff --git a/tests/bvt/test_bvt_lite_and_plus.py b/tests/bvt/test_bvt_lite_and_plus.py
new file mode 100644
index 000000000..e47c13477
--- /dev/null
+++ b/tests/bvt/test_bvt_lite_and_plus.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import pytest
+import os
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.gluster_init import start_glusterd
+from glustolibs.gluster.peer_ops import (peer_probe_servers, is_peer_connected,
+ peer_status)
+from glustolibs.gluster.volume_libs import setup_volume, cleanup_volume
+from glustolibs.gluster.volume_ops import volume_info, volume_status
+import time
+
+
+@runs_on([['replicated', 'distributed', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'],
+ ['glusterfs', 'nfs', 'cifs']])
+class BvtTestsClass(GlusterBaseClass):
+ """Class containing case for : BVT Lite and BVT Plus.
+
+ BVT Lite: Run the case on dis-rep volume with glusterfs, nfs, cifs
+ protocols
+
+ BVT Plus: Run the case on all volume types and all protocol types
+ combinations
+ """
+ @classmethod
+ def setUpClass(cls):
+ """Following are the setps in setupclass
+ - Start glusterd on all servers
+ - Peer Probe
+ - Setup the volume
+ - Mount the volume
+ """
+ GlusterBaseClass.setUpClass.im_func(cls)
+ g.log.info("Starting %s:" % cls.__name__)
+
+ # Start Glusterd
+ ret = start_glusterd(servers=cls.servers)
+ assert (ret == True), "glusterd did not start on at least one server"
+
+ # PeerProbe servers
+ ret = peer_probe_servers(mnode=cls.servers[0], servers=cls.servers[1:])
+ assert (ret == True), "Unable to peer probe one or more servers"
+
+ # Validate if peer is connected from all the servers
+ for server in cls.servers:
+ ret = is_peer_connected(server, cls.servers)
+ assert (ret == True), "Validating Peers to be in Cluster Failed"
+
+ # Print Peer Status from mnode
+ _, _, _ = peer_status(cls.mnode)
+
+ # Setup Volume
+ ret = setup_volume(mnode=cls.mnode,
+ all_servers_info=cls.all_servers_info,
+ volume_config=cls.volume, force=True)
+ assert (ret == True), "Setup volume %s failed" % cls.volname
+ time.sleep(10)
+
+ # Print Volume Info and Status
+ _, _, _ = volume_info(cls.mnode, cls.volname)
+
+ _, _, _ = volume_status(cls.mnode, cls.volname)
+
+ # Validate if volume is exported or not
+ if 'nfs' in cls.mount_type:
+ cmd = "showmount -e localhost"
+ _, _, _ = g.run(cls.mnode, cmd)
+
+ cmd = "showmount -e localhost | grep %s" % cls.volname
+ ret, _, _ = g.run(cls.mnode, cmd)
+ assert (ret == 0), "Volume %s not exported" % cls.volname
+
+ if 'cifs' in cls.mount_type:
+ cmd = "smbclient -L localhost"
+ _, _, _ = g.run(cls.mnode, cmd)
+
+ cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " %
+ cls.volname)
+ ret, _, _ = g.run(cls.mnode, cmd)
+ assert (ret == 0), ("Volume %s not accessable via SMB/CIFS share" %
+ cls.volname)
+
+ # Create Mounts
+ rc = True
+ for mount_obj in cls.mounts:
+ ret = mount_obj.mount()
+ if not ret:
+ g.log.error("Unable to mount volume '%s:%s' on '%s:%s'" %
+ (mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint))
+ rc = False
+ assert (rc == True), ("Mounting volume %s on few clients failed" %
+ cls.volname)
+
+ # Upload io scripts
+ cls.script_local_path = ("/usr/share/glustolibs/io/"
+ "scripts/file_dir_ops.py")
+ cls.script_upload_path = "/tmp/file_dir_ops.py"
+ ret = os.path.exists(cls.script_local_path)
+ assert (ret == True), ("Unable to find the io scripts")
+
+ for client in cls.clients:
+ g.upload(client, cls.script_local_path, cls.script_upload_path)
+ g.run(client, "ls -l %s" % cls.script_upload_path)
+ g.run(client, "chmod +x %s" % cls.script_upload_path)
+ g.run(client, "ls -l %s" % cls.script_upload_path)
+
+ def setUp(self):
+ pass
+
+ def test_bvt(self):
+ """Test IO from the mounts.
+ """
+ g.log.info("Starting Test: %s on %s %s" %
+ (self.id(), self.volume_type, self.mount_type))
+
+ # Get stat of mount before the IO
+ for mount_obj in self.mounts:
+ cmd = "mount | grep %s" % mount_obj.mountpoint
+ ret, out, err = g.run(mount_obj.client_system, cmd)
+ cmd = "df -h %s" % mount_obj.mountpoint
+ ret, out, err = g.run(mount_obj.client_system, cmd)
+ cmd = "ls -ld %s" % mount_obj.mountpoint
+ ret, out, err = g.run(mount_obj.client_system, cmd)
+ cmd = "stat %s" % mount_obj.mountpoint
+ ret, out, err = g.run(mount_obj.client_system, cmd)
+
+ # Start IO on all mounts.
+ all_mounts_procs = []
+ count = 1
+ for mount_obj in self.mounts:
+ cmd = ("python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 10 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (self.script_upload_path,
+ count, mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ count = count + 10
+
+ # Get IO status
+ rc = True
+ for i, proc in enumerate(all_mounts_procs):
+ ret, _, _ = proc.async_communicate()
+ if ret != 0:
+ g.log.error("IO Failed on %s:%s" %
+ (self.mounts[i].client_system,
+ self.mounts[i].mountpoint))
+ rc = False
+ assert (rc == True), "IO failed on some of the clients"
+
+ # Get stat of all the files/dirs created.
+ all_mounts_procs = []
+ for mount_obj in self.mounts:
+ cmd = ("python %s stat "
+ "-R %s" % (self.script_upload_path, mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ rc = True
+ for i, proc in enumerate(all_mounts_procs):
+ ret, _, _ = proc.async_communicate()
+ if ret != 0:
+ g.log.error("Stat of files and dirs under %s:%s Failed" %
+ (self.mounts[i].client_system,
+ self.mounts[i].mountpoint))
+ rc = False
+ assert (rc == True), "Stat failed on some of the clients"
+
+ def tearDown(self):
+ pass
+
+ @classmethod
+ def tearDownClass(cls):
+ """Cleanup mount and Cleanup the volume
+ """
+ GlusterBaseClass.tearDownClass.im_func(cls)
+
+ # Unmount mounts
+ rc = True
+ for mount_obj in cls.mounts:
+ ret = mount_obj.unmount()
+ if not ret:
+ g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'" %
+ (mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint))
+ rc = False
+ assert (rc == True), ("UnMounting volume %s on few clients failed" %
+ cls.volname)
+
+ # Cleanup Volume
+ ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
+ assert (ret == True), ("cleanup volume %s failed" % cls.volname)
diff --git a/tests/gluster_tests_config.yml b/tests/gluster_tests_config.yml
new file mode 100644
index 000000000..e0fcc8209
--- /dev/null
+++ b/tests/gluster_tests_config.yml
@@ -0,0 +1,269 @@
+log_file: /var/log/tests/gluster_tests.log
+log_level: DEBUG
+
+servers:
+ - server-vm1
+ - server-vm2
+ - server-vm3
+ - server-vm4
+ - server-vm5
+ - server-vm6
+ - server-vm7
+ - server-vm8
+ - server-vm9
+ - server-vm10
+ - server-vm11
+ - server-vm12
+
+clients:
+ - client-vm1
+ - client-vm2
+ - client-vm3
+ - client-vm4
+
+servers_info:
+ server-vm1: &server1
+ host: server-vm1
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm2: &server2
+ host: server-vm2
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm3: &server3
+ host: server-vm3
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm4: &server4
+ host: server-vm4
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm5: &server5
+ host: server-vm5
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm6: &server6
+ host: server-vm6
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm7: &server7
+ host: server-vm7
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm8: &server8
+ host: server-vm8
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm9: &server9
+ host: server-vm9
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm10: &server10
+ host: server-vm10
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm11: &server11
+ host: server-vm11
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+ server-vm12: &server12
+ host: server-vm12
+ devices: ["/dev/vdb", "/dev/vdc", "/dev/vdd", "/dev/vde"]
+ brick_root: "/bricks"
+
+clients_info:
+ client-vm1: &client1
+ host: client-vm1
+ client-vm2: &client2
+ host: client-vm2
+ super_user: 'root'
+ client-vm3: &client3
+ host: client-vm3
+ super_user: 'Admin'
+ platform: 'windows'
+ client-vm4: &client4
+ host: client-vm4
+ super_user: 'Administrator'
+ platform: 'windows'
+
+gluster:
+ running_on_volumes: []
+ running_on_mounts: []
+
+ cluster_config:
+ smb:
+ enable: False
+ users_info:
+ 'root':
+ password: 'foobar'
+ acl: ''
+ 'user1':
+ password: 'xyz'
+ acl: ''
+ 'user2':
+ password: 'abc'
+ acl: ''
+ ctdb_setup: True
+ ctdb_servers: []
+ ctdb_vips:
+ - vip: vip1
+ routing_prefix: '23'
+ interface: 'eth0'
+ - vip: vip2
+ routing_prefix: '22'
+ interface: 'eth0'
+ ctdb_metavol_brick_path: ''
+
+ nfs_ganesha:
+ enable: False
+ num_of_nfs_ganesha_nodes: 4
+ vips: []
+
+ volume_types:
+ distributed: &distributed
+ type: distributed
+ dist_count: 4
+ transport: tcp
+ replicated: &replicated
+ type: replicated
+ replica_count: 3
+ arbiter_count: 1
+ transport: tcp
+ distributed-replicated: &distributed-replicated
+ type: distributed-replicated
+ dist_count: 2
+ replica_count: 3
+ transport: tcp
+ dispersed: &dispersed
+ type: dispersed
+ disperse_count: 6
+ redundancy_count: 2
+ transport: tcp
+ distributed-dispersed: &distributed-dispersed
+ type: distributed-dispersed
+ dist_count: 2
+ disperse_count: 6
+ redundancy_count: 2
+ transport: tcp
+
+ slave_volumes:
+ - &slave_vol1
+ voltype: *distributed-replicated
+ servers: [ server-vm5, server-vm6, server-vm7, server-vm8 ]
+ extra_servers: []
+ quota:
+ enable: False
+ limit_usage:
+ path: "/"
+ size: 100GB
+ percent:
+ limit_objects:
+ path: "/"
+ number:
+ percent:
+ alert_time:
+ soft_timeout:
+ hard_timeout:
+ inode_quota:
+ enable: False
+ bitrot:
+ enable: False
+ scrub_throttle:
+ scrub_frequency:
+
+ volumes:
+ - &vol1
+ name: testvol
+ voltype: *distributed-dispersed
+ servers: [ server-vm1, server-vm2, server-vm3, server-vm4 ]
+ extra_servers: [ server-vm9, server-vm10, server-vm11, server-vm12 ]
+ tier:
+ create_tier: False
+ tier_type: *distributed-replicated
+ quota:
+ enable: False
+ limit_usage:
+ path: "/"
+ size: 100GB
+ percent:
+ limit_objects:
+ path: "/"
+ number:
+ percent:
+ alert_time: 0
+ soft_timeout: 0
+ hard_timeout: 0
+ inode_quota:
+ enable: False
+ bitrot:
+ enable: False
+ scrub_throttle: 'aggressive'
+ scrub_frequency: 'hourly'
+ geo_rep:
+ create_geo_rep_session: False
+ slave_volumes: [ *slave_vol1 ]
+ user: 'root'
+ group: 'geogroup'
+ sync_mode: 'rsync'
+ options:
+ performance.readdir-ahead: "on"
+ snapshot:
+ use_snapshot: True
+ snap_jobname: 'snap_job'
+ snap_schedule: 2
+ uss:
+ enable: False
+ nfs_ganesha:
+ enable: False
+ smb:
+ enable: False
+
+ mounts:
+ - &mount1
+ protocol: 'glusterfs'
+ server: server-vm1
+ volname: testvol
+ client: &client1
+ mountpoint: ''
+ options: ''
+ - &mount2
+ protocol: 'nfs'
+ server: server-vm1
+ volname: testvol
+ client:
+ host: client-vm2
+ mountpoint: ''
+ options: ''
+ num_of_mounts: 1
+ - &mount3
+ protocol: 'smb'
+ server: server-vm2
+ volname: testvol
+ client: &client3
+ mountpoint: ''
+ options: ''
+ smbuser: 'user1'
+ smbpasswd: 'xyz'
+ num_of_mounts: 3
+ - &mount4
+ protocol: 'smb'
+ server: server-vm4
+ volname: testvol
+ client:
+ host: client-vm4
+ super_user: 'Administrator'
+ platform: 'windows'
+ mountpoint: ''
+ options: ''
+ smbuser: 'user2'
+ smbpasswd: 'abc'
+ - &mount5
+ protocol: 'cifs'
+ server: server-vm1
+ volname: testvol
+ client:
+ host: client-vm2
+ mountpoint: ''
+ options: ''
+ smbuser: 'user2'
+ smbpasswd: 'abc'