summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--glustolibs-gluster/glustolibs/gluster/gluster_base_class.py148
-rw-r--r--glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py2
-rwxr-xr-xglustolibs-io/glustolibs/io/utils.py2
-rwxr-xr-xglustolibs-io/shared_files/scripts/fd_writes.py2
-rwxr-xr-xglustolibs-io/shared_files/scripts/file_dir_ops.py2
-rwxr-xr-x[-rw-r--r--]glustolibs-io/shared_files/scripts/generate_io.py30
-rw-r--r--tests/functional/afr/test_client_side_quorum.py791
-rwxr-xr-xtests/functional/afr/test_client_side_quorum_with_fixed_validate_max_bricks.py130
-rwxr-xr-x[-rw-r--r--]tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py20
9 files changed, 233 insertions, 894 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
index d5864c0b1..73a6c4c1e 100644
--- a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
@@ -1202,83 +1202,83 @@ class GlusterBlockBaseClass(GlusterBaseClass):
"""
# Get gluster block info from config file
if g.config.get('gluster_block_args_info'):
- cls.gluster_block_args_info = {}
- blocks_count = 0
- each_block_info = g.config['gluster_block_args_info']
+ cls.gluster_block_args_info = {}
+ blocks_count = 0
+ each_block_info = g.config['gluster_block_args_info']
# for i, each_block_info in enumerate(
# g.config['gluster_block_args_info']):
# volname
- block_on_volume = cls.volname
- if each_block_info.get('volname'):
- block_on_volume = each_block_info['volname']
-
- # Block name
- block_base_name = "gluster_block"
- if each_block_info.get('blockname'):
- block_base_name = each_block_info['blockname']
-
- # servers
- block_servers = cls.servers
- if each_block_info.get('servers'):
- block_servers = each_block_info['servers']
- if not filter(None, block_servers):
- block_servers = cls.servers
-
- # Block size
- block_size = "1GiB"
- if each_block_info.get('size'):
- block_size = each_block_info['size']
-
- # HA
- block_ha = 3
- if each_block_info.get('ha'):
- block_ha = each_block_info['ha']
-
- # auth
- block_auth = None
- if each_block_info.get('auth'):
- block_auth = each_block_info['auth']
-
- # prealloc
- block_prealloc = None
- if each_block_info.get('prealloc'):
- block_prealloc = each_block_info['prealloc']
-
- # ring-buffer
- block_ring_buffer = None
- if each_block_info.get('ring-buffer'):
- block_ring_buffer = each_block_info['ring-buffer']
-
- # Number of blocks
- num_of_blocks = 1
- if each_block_info.get('num_of_blocks'):
- num_of_blocks = int(each_block_info['num_of_blocks'])
-
- # for count in range(blocks_count,num_of_blocks +blocks_count):
- for count in range(blocks_count, num_of_blocks):
- # blocks_count = int(count) + i
-
- if block_ha:
- selected_block_servers = random.sample(block_servers,
- block_ha)
- else:
- selected_block_servers = random.choice(block_servers)
-
- block_name = "_".join([block_base_name,
- str(count + 1)])
-
- cls.gluster_block_args_info[block_name] = (
- {'volname': block_on_volume,
- 'blockname': block_name,
- 'servers': cls.get_ip_from_hostname(
- selected_block_servers),
- 'size': block_size,
- 'ha': block_ha,
- 'auth': block_auth,
- 'prealloc': block_prealloc,
- 'storage': None,
- 'ring-buffer': block_ring_buffer}
- )
+ block_on_volume = cls.volname
+ if each_block_info.get('volname'):
+ block_on_volume = each_block_info['volname']
+
+ # Block name
+ block_base_name = "gluster_block"
+ if each_block_info.get('blockname'):
+ block_base_name = each_block_info['blockname']
+
+ # servers
+ block_servers = cls.servers
+ if each_block_info.get('servers'):
+ block_servers = each_block_info['servers']
+ if not filter(None, block_servers):
+ block_servers = cls.servers
+
+ # Block size
+ block_size = "1GiB"
+ if each_block_info.get('size'):
+ block_size = each_block_info['size']
+
+ # HA
+ block_ha = 3
+ if each_block_info.get('ha'):
+ block_ha = each_block_info['ha']
+
+ # auth
+ block_auth = None
+ if each_block_info.get('auth'):
+ block_auth = each_block_info['auth']
+
+ # prealloc
+ block_prealloc = None
+ if each_block_info.get('prealloc'):
+ block_prealloc = each_block_info['prealloc']
+
+ # ring-buffer
+ block_ring_buffer = None
+ if each_block_info.get('ring-buffer'):
+ block_ring_buffer = each_block_info['ring-buffer']
+
+ # Number of blocks
+ num_of_blocks = 1
+ if each_block_info.get('num_of_blocks'):
+ num_of_blocks = int(each_block_info['num_of_blocks'])
+
+ # for count in range(blocks_count,num_of_blocks +blocks_count):
+ for count in range(blocks_count, num_of_blocks):
+ # blocks_count = int(count) + i
+
+ if block_ha:
+ selected_block_servers = random.sample(block_servers,
+ block_ha)
+ else:
+ selected_block_servers = random.choice(block_servers)
+
+ block_name = "_".join([block_base_name,
+ str(count + 1)])
+
+ cls.gluster_block_args_info[block_name] = (
+ {'volname': block_on_volume,
+ 'blockname': block_name,
+ 'servers': cls.get_ip_from_hostname(
+ selected_block_servers),
+ 'size': block_size,
+ 'ha': block_ha,
+ 'auth': block_auth,
+ 'prealloc': block_prealloc,
+ 'storage': None,
+ 'ring-buffer': block_ring_buffer}
+ )
for key in cls.gluster_block_args_info.keys():
value = cls.gluster_block_args_info[key]
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
index fd7d8f4a5..863ba40d7 100644
--- a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
@@ -142,7 +142,7 @@ class NfsGaneshaClusterSetupClass(GlusterBaseClass):
if is_nfs_ganesha_cluster_in_healthy_state(
cls.servers_in_nfs_ganesha_cluster[0]):
- g.log.info("Nfs-ganesha Cluster exists is in healthy state")
+ g.log.info("Nfs-ganesha Cluster exists is in healthy state")
else:
raise ExecutionError("Nfs-ganesha Cluster setup Failed")
diff --git a/glustolibs-io/glustolibs/io/utils.py b/glustolibs-io/glustolibs/io/utils.py
index b6325faa1..b6e1f627c 100755
--- a/glustolibs-io/glustolibs/io/utils.py
+++ b/glustolibs-io/glustolibs/io/utils.py
@@ -317,7 +317,7 @@ def cleanup_mounts(mounts):
mount_obj.mountpoint)
if (not mount_obj.mountpoint or
(os.path.realpath(os.path.abspath(mount_obj.mountpoint))
- is '/')):
+ == '/')):
g.log.error("%s on %s is not a valid mount point",
mount_obj.mountpoint, mount_obj.client_system)
continue
diff --git a/glustolibs-io/shared_files/scripts/fd_writes.py b/glustolibs-io/shared_files/scripts/fd_writes.py
index 0911c3f11..87358f45a 100755
--- a/glustolibs-io/shared_files/scripts/fd_writes.py
+++ b/glustolibs-io/shared_files/scripts/fd_writes.py
@@ -34,7 +34,7 @@ def is_root(path):
Returns:
True if path is '/' , False otherwise
"""
- if os.path.realpath(os.path.abspath(path)) is '/':
+ if os.path.realpath(os.path.abspath(path)) == '/':
print ("Directory '%s' is the root of filesystem. "
"Not performing any operations on the root of filesystem" %
os.path.abspath(path))
diff --git a/glustolibs-io/shared_files/scripts/file_dir_ops.py b/glustolibs-io/shared_files/scripts/file_dir_ops.py
index 2ea6a96ab..96e53262d 100755
--- a/glustolibs-io/shared_files/scripts/file_dir_ops.py
+++ b/glustolibs-io/shared_files/scripts/file_dir_ops.py
@@ -48,7 +48,7 @@ def is_root(path):
Returns:
True if path is '/' , False otherwise
"""
- if os.path.realpath(os.path.abspath(path)) is '/':
+ if os.path.realpath(os.path.abspath(path)) == '/':
print ("Directory '%s' is the root of filesystem. "
"Not performing any operations on the root of filesystem" %
os.path.abspath(path))
diff --git a/glustolibs-io/shared_files/scripts/generate_io.py b/glustolibs-io/shared_files/scripts/generate_io.py
index c9836ba80..d07bda7b0 100644..100755
--- a/glustolibs-io/shared_files/scripts/generate_io.py
+++ b/glustolibs-io/shared_files/scripts/generate_io.py
@@ -275,21 +275,21 @@ def start_populate_data(mount_point, io_dict,
proc_queue = []
for each_io in io_dict.keys():
- q = multiprocessing.Queue()
- proc_queue.append(q)
- workload_type = io_dict[each_io]['workload_type']
- proc = multiprocessing.Process(target=(io_dict[each_io]
- ['function_addr']),
- args=(q,
- (io_dict[each_io]
- ['script_path']),
- dirname,
- (io_dict[each_io]['job_files']
- [workload_type]),
- io_dict[each_io]['log_file']))
- proc_list.append(proc)
- time.sleep(5)
- proc.start()
+ q = multiprocessing.Queue()
+ proc_queue.append(q)
+ workload_type = io_dict[each_io]['workload_type']
+ proc = multiprocessing.Process(target=(io_dict[each_io]
+ ['function_addr']),
+ args=(q,
+ (io_dict[each_io]
+ ['script_path']),
+ dirname,
+ (io_dict[each_io]['job_files']
+ [workload_type]),
+ io_dict[each_io]['log_file']))
+ proc_list.append(proc)
+ time.sleep(5)
+ proc.start()
p = multiprocessing.Process(
target=run_check_if_percent_to_fill_or_timeout_is_met,
diff --git a/tests/functional/afr/test_client_side_quorum.py b/tests/functional/afr/test_client_side_quorum.py
deleted file mode 100644
index 66f2ee281..000000000
--- a/tests/functional/afr/test_client_side_quorum.py
+++ /dev/null
@@ -1,791 +0,0 @@
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-""" Description:
- Test Cases in this module tests the client side quorum.
-"""
-
-from glusto.core import Glusto as g
-from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.volume_libs import (
- set_volume_options, get_subvols)
-from glustolibs.misc.misc_libs import upload_scripts
-from glustolibs.gluster.brick_libs import (bring_bricks_offline,
- bring_bricks_online)
-from glustolibs.io.utils import (validate_io_procs,
- is_io_procs_fail_with_rofs)
-
-
-@runs_on([['replicated', 'distributed-replicated'],
- ['glusterfs', 'nfs', 'cifs']])
-class ClientSideQuorumTests(GlusterBaseClass):
- """
- ClientSideQuorumTests contains tests which verifies the
- client side quorum Test Cases
- """
- @classmethod
- def setUpClass(cls):
- """
- Upload the necessary scripts to run tests.
- """
-
- # calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # Upload io scripts for running IO on mounts
- g.log.info("Upload io scripts to clients %s for running IO on "
- "mounts", cls.clients)
- script_abs_path = "/usr/share/glustolibs/io/scripts/file_dir_ops.py"
- cls.script_upload_path = "/usr/share/glustolibs/io/scripts/" \
- "file_dir_ops.py"
- ret = upload_scripts(cls.clients, script_abs_path)
- if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients")
-
- def setUp(self):
- """
- setUp method for every test
- """
-
- # calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
-
- # Setup Volume and Mount Volume
- g.log.info("Starting to Setup Volume %s", self.volname)
- ret = self.setup_volume_and_mount_volume(self.mounts)
- if not ret:
- raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
- g.log.info("Successful in Setup Volume and Mount Volume")
-
- def tearDown(self):
- """
- tearDown for every test
- """
-
- # stopping the volume
- g.log.info("Starting to Unmount Volume and Cleanup Volume")
- ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
- if not ret:
- raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
- g.log.info("Successful in Unmount Volume and Cleanup Volume")
-
- # Calling GlusterBaseClass tearDown
-
- GlusterBaseClass.tearDown.im_func(self)
-
- def test_client_side_quorum_with_fixed_validate_max_bricks(self):
- """
- Test Script with Client Side Quorum with fixed should validate
- maximum number of bricks to accept
-
- * set cluster quorum to fixed
- * set cluster.quorum-count to higher number which is greater than
- number of replicas in a sub-voulme
- * Above step should fail
-
- """
-
- # set cluster.quorum-type to fixed
- options = {"cluster.quorum-type": "fixed"}
- g.log.info("setting %s for the volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set %s for volume %s"
- % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s", options, self.volname)
-
- # get the subvolumes
- g.log.info("Starting to get sub-volumes for volume %s", self.volname)
- subvols_dict = get_subvols(self.mnode, self.volname)
- num_subvols = len(subvols_dict['volume_subvols'])
- g.log.info("Number of subvolumes in volume %s is %s", self.volname,
- num_subvols)
-
- # get the number of bricks in replica set
- num_bricks_in_subvol = len(subvols_dict['volume_subvols'][0])
- g.log.info("Number of bricks in each replica set : %s",
- num_bricks_in_subvol)
-
- # set cluster.quorum-count to higher value than the number of bricks in
- # repliac set
- start_range = num_bricks_in_subvol + 1
- end_range = num_bricks_in_subvol + 30
- for i in range(start_range, end_range):
- options = {"cluster.quorum-count": "%s" % i}
- g.log.info("setting %s for the volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertFalse(ret, ("Able to set %s for volume %s, quorum-count"
- " should not be greater than number of"
- " bricks in replica set"
- % (options, self.volname)))
- g.log.info("Expected: Unable to set %s for volume %s, "
- "quorum-count should be less than number of bricks "
- "in replica set", options, self.volname)
-
-
-@runs_on([['replicated', 'distributed-replicated'], ['glusterfs']])
-class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):
- """
- ClientSideQuorumTestsWithSingleVolumeCross3 contains tests which
- verifies the client side quorum Test Cases with cross 3 volume.
- """
- @classmethod
- def setUpClass(cls):
- """
- Upload the necessary scripts to run tests.
- """
-
- # calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # Overriding the volume type to specifically test the volume type
- if cls.volume_type == "replicated":
- cls.volume['voltype'] = {
- 'type': 'replicated',
- 'replica_count': 3,
- 'dist_count': 1,
- 'transport': 'tcp'
- }
-
- # Upload io scripts for running IO on mounts
- g.log.info("Upload io scripts to clients %s for running IO on "
- "mounts", cls.clients)
- script_abs_path = "/usr/share/glustolibs/io/scripts/file_dir_ops.py"
- cls.script_upload_path = "/usr/share/glustolibs/io/scripts/" \
- "file_dir_ops.py"
- ret = upload_scripts(cls.clients, script_abs_path)
- if not ret:
- raise ExecutionError("Failed to upload IO scripts to clients")
-
- def setUp(self):
- """
- setUp method for every test
- """
-
- # calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
-
- # Setup Volume and Mount Volume
- g.log.info("Starting to Setup Volume %s", self.volname)
- ret = self.setup_volume_and_mount_volume(self.mounts)
- if not ret:
- raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
- g.log.info("Successful in Setup Volume and Mount Volume")
-
- def tearDown(self):
- """
- tearDown for every test
- """
-
- # stopping the volume
- g.log.info("Starting to Unmount Volume and Cleanup Volume")
- ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
- if not ret:
- raise ExecutionError("Failed to Unmount Volume "
- "and Cleanup Volume")
- g.log.info("Successful in Unmount Volume and Cleanup Volume")
-
- # Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
-
- def test_client_side_quorum_with_fixed_for_cross3(self):
- """
- Test Script to verify the Client Side Quorum with fixed
- for cross 3 volume
-
- * Disable self heal daemom
- * set cluster.quorum-type to fixed.
- * start I/O( write and read )from the mount point - must succeed
- * Bring down brick1
- * start I/0 ( write and read ) - must succeed
- * Bring down brick2
- * start I/0 ( write and read ) - must succeed
- * set the cluster.quorum-count to 1
- * start I/0 ( write and read ) - must succeed
- * set the cluster.quorum-count to 2
- * start I/0 ( write and read ) - read must pass, write will fail
- * bring back the brick1 online
- * start I/0 ( write and read ) - must succeed
- * Bring back brick2 online
- * start I/0 ( write and read ) - must succeed
- * set cluster.quorum-type to auto
- * start I/0 ( write and read ) - must succeed
- * Bring down brick1 and brick2
- * start I/0 ( write and read ) - read must pass, write will fail
- * set the cluster.quorum-count to 1
- * start I/0 ( write and read ) - read must pass, write will fail
- * set the cluster.quorum-count to 3
- * start I/0 ( write and read ) - read must pass, write will fail
- * set the quorum-type to none
- * start I/0 ( write and read ) - must succeed
-
- """
- # pylint: disable=too-many-locals,too-many-statements,too-many-branches
- # Disable self heal daemon
- options = {"cluster.self-heal-daemon": "off"}
- g.log.info("setting %s for the volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set %s for volume %s"
- % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s",
- options, self.volname)
-
- # set cluster.quorum-type to fixed
- options = {"cluster.quorum-type": "fixed"}
- g.log.info("setting %s for the volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set %s for volume %s"
- % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s",
- options, self.volname)
-
- # start I/O( write ) - must succeed
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # get the subvolumes
- g.log.info("Starting to get sub-volumes for volume %s", self.volname)
- subvols_dict = get_subvols(self.mnode, self.volname)
- num_subvols = len(subvols_dict['volume_subvols'])
- g.log.info("Number of subvolumes in volume %s:", num_subvols)
-
- # bring down brick1 for all the subvolumes
- offline_brick1_from_replicasets = []
- for i in range(0, num_subvols):
- subvol_brick_list = subvols_dict['volume_subvols'][i]
- g.log.info("sub-volume %s brick list : %s",
- i, subvol_brick_list)
- brick_to_bring_offline1 = subvol_brick_list[0]
- g.log.info("Going to bring down the brick process "
- "for %s", brick_to_bring_offline1)
- ret = bring_bricks_offline(self.volname, brick_to_bring_offline1)
- self.assertTrue(ret, ("Failed to bring down the bricks. Please "
- "check the log file for more details."))
- g.log.info("Brought down the brick process "
- "for %s successfully", brick_to_bring_offline1)
- offline_brick1_from_replicasets.append(brick_to_bring_offline1)
-
- # start I/0 ( write and read ) - must succeed
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name testfile %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # bring down brick2 for all the subvolumes
- offline_brick2_from_replicasets = []
- for i in range(0, num_subvols):
- subvol_brick_list = subvols_dict['volume_subvols'][i]
- g.log.info("sub-volume %s brick list : %s",
- i, subvol_brick_list)
- brick_to_bring_offline2 = subvol_brick_list[1]
- g.log.info("Going to bring down the brick process "
- "for %s", brick_to_bring_offline2)
- ret = bring_bricks_offline(self.volname, brick_to_bring_offline2)
- self.assertTrue(ret, ("Failed to bring down the bricks. Please "
- "check the log file for more details."))
- g.log.info("Brought down the brick process "
- "for %s successfully", brick_to_bring_offline2)
- offline_brick2_from_replicasets.append(brick_to_bring_offline2)
-
- # start I/0 ( write and read ) - must succeed
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name newfile %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # set the cluster.quorum-count to 1
- options = {"cluster.quorum-count": "1"}
- g.log.info("setting %s for the volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set %s for volume %s"
- % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s",
- options, self.volname)
-
- # start I/0 ( write and read ) - must succeed
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name filename %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # set the cluster.quorum-count to 2
- options = {"cluster.quorum-count": "2"}
- g.log.info("setting %s for the volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set %s for volume %s"
- % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s",
- options, self.volname)
-
- # start I/0 ( write and read ) - read must pass, write will fail
- g.log.info("Starting IO on all mounts......")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name testfilename %s" %
- (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- g.log.info("Validating whether IO failed with Read Only File System")
- ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
- self.mounts)
- self.assertTrue(ret, ("Unexpected Error and IO successful"
- " on Read-Only File System"))
- g.log.info("EXPECTED Read-only file system in IO while creating file")
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # bring back the brick1 online for all subvolumes
- g.log.info("bringing up the brick : %s online",
- offline_brick1_from_replicasets)
- ret = bring_bricks_online(self.mnode, self.volname,
- offline_brick1_from_replicasets)
- self.assertTrue(ret, ("Failed to brought the brick %s online"
- % offline_brick1_from_replicasets))
- g.log.info("Successfully brought the brick %s online",
- offline_brick1_from_replicasets)
-
- # start I/0 ( write and read ) - must succeed
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name newfilename %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # Bring back brick2 online
- g.log.info("bringing up the brick : %s online",
- offline_brick2_from_replicasets)
- ret = bring_bricks_online(self.mnode, self.volname,
- offline_brick2_from_replicasets)
- self.assertTrue(ret, ("Failed to brought the brick %s online"
- % offline_brick2_from_replicasets))
- g.log.info("Successfully brought the brick %s online",
- offline_brick2_from_replicasets)
-
- # start I/0 ( write and read ) - must succeed
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name textfile %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # set cluster.quorum-type to auto
- options = {"cluster.quorum-type": "auto"}
- g.log.info("setting %s for the volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set %s for volume %s"
- % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s",
- options, self.volname)
-
- # # start I/0 ( write and read ) - must succeed
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name newtextfile %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # bring down brick1 and brick2 for all the subvolumes
- for i in range(0, num_subvols):
- subvol_brick_list = subvols_dict['volume_subvols'][i]
- g.log.info("sub-volume %s brick list : %s",
- i, subvol_brick_list)
- bricks_to_bring_offline = subvol_brick_list[0:2]
- g.log.info("Going to bring down the brick process "
- "for %s", bricks_to_bring_offline)
- ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
- self.assertTrue(ret, ("Failed to bring down the bricks. Please "
- "check the log file for more details."))
- g.log.info("Brought down the brick process "
- "for %s successfully", bricks_to_bring_offline)
-
- # start I/0 ( write and read ) - read must pass, write will fail
- g.log.info("Start creating files on all mounts...")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name newtestfile %s" %
- (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- g.log.info("Validating whether IO failed with Read-only file system")
- ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
- self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successful"
- " on Read-only file system"))
- g.log.info("EXPECTED: Read-only file system in IO while creating file")
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # set the cluster.quorum-count to 1
- options = {"cluster.quorum-count": "1"}
- g.log.info("setting %s for the volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set %s for volume %s"
- % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s",
- options, self.volname)
-
- # start I/0 ( write and read ) - read must pass, write will fail
- g.log.info("Start creating files on all mounts...")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name newtestfilename %s" %
- (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- g.log.info("Validating whether IO failed with Read-only file system")
- ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
- self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successful"
- " on Read-only file system"))
- g.log.info("EXPECTED: Read-only file system in IO while creating file")
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # set the cluster.quorum-count to 3
- options = {"cluster.quorum-count": "3"}
- g.log.info("setting %s for the volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set %s for volume %s"
- % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s",
- options, self.volname)
-
- # start I/0 ( write and read ) - read must pass, write will fail
- g.log.info("Start creating files on all mounts...")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name textfilename %s" %
- (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- g.log.info("Validating whether IO failed with Read-only file system")
- ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
- self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successful"
- " on Read-only file system"))
- g.log.info("EXPECTED: Read-only file system in IO while creating file")
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # set the quorum-type to none
- options = {"cluster.quorum-type": "none"}
- g.log.info("setting %s for the volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set %s for volume %s"
- % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s",
- options, self.volname)
-
- # start I/0 ( write and read ) - must succeed
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name lastfile %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
-
- # read the file
- g.log.info("Start reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
diff --git a/tests/functional/afr/test_client_side_quorum_with_fixed_validate_max_bricks.py b/tests/functional/afr/test_client_side_quorum_with_fixed_validate_max_bricks.py
new file mode 100755
index 000000000..cf699b7d8
--- /dev/null
+++ b/tests/functional/afr/test_client_side_quorum_with_fixed_validate_max_bricks.py
@@ -0,0 +1,130 @@
+# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+""" Description:
+ Test Cases in this module tests the client side quorum.
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_libs import (
+ set_volume_options, get_subvols)
+from glustolibs.misc.misc_libs import upload_scripts
+
+
+@runs_on([['replicated', 'distributed-replicated'],
+ ['glusterfs', 'nfs', 'cifs']])
+class ClientSideQuorumTests(GlusterBaseClass):
+ """
+ ClientSideQuorumTests contains tests which verifies the
+ client side quorum Test Cases
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Upload the necessary scripts to run tests.
+ """
+
+ # calling GlusterBaseClass setUpClass
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ # Upload io scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on "
+ "mounts", cls.clients)
+ script_abs_path = "/usr/share/glustolibs/io/scripts/file_dir_ops.py"
+ cls.script_upload_path = script_abs_path
+ ret = upload_scripts(cls.clients, script_abs_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients")
+
+ def setUp(self):
+ """
+ setUp method for every test
+ """
+
+ # calling GlusterBaseClass setUp
+ GlusterBaseClass.setUp.im_func(self)
+
+ # Setup Volume and Mount Volume
+ g.log.info("Starting to Setup Volume %s", self.volname)
+ ret = self.setup_volume_and_mount_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+
+ def tearDown(self):
+ """
+ tearDown for every test
+ """
+
+ # stopping the volume
+ g.log.info("Starting to Unmount Volume and Cleanup Volume")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ GlusterBaseClass.tearDown.im_func(self)
+
+ def test_client_side_quorum_with_fixed_validate_max_bricks(self):
+ """
+ Test Script with Client Side Quorum with fixed should validate
+ maximum number of bricks to accept
+
+ * set cluster quorum to fixed
+ * set cluster.quorum-count to higher number which is greater than
+ number of replicas in a sub-voulme
+ * Above step should fail
+
+ """
+
+ # set cluster.quorum-type to fixed
+ options = {"cluster.quorum-type": "fixed"}
+ g.log.info("setting %s for the volume %s", options, self.volname)
+ ret = set_volume_options(self.mnode, self.volname, options)
+ self.assertTrue(ret, ("Unable to set %s for volume %s"
+ % (options, self.volname)))
+ g.log.info("Successfully set %s for volume %s", options, self.volname)
+
+ # get the subvolumes
+ g.log.info("Starting to get sub-volumes for volume %s", self.volname)
+ subvols_dict = get_subvols(self.mnode, self.volname)
+ num_subvols = len(subvols_dict['volume_subvols'])
+ g.log.info("Number of subvolumes in volume %s is %s", self.volname,
+ num_subvols)
+
+ # get the number of bricks in replica set
+ num_bricks_in_subvol = len(subvols_dict['volume_subvols'][0])
+ g.log.info("Number of bricks in each replica set : %s",
+ num_bricks_in_subvol)
+
+ # set cluster.quorum-count to higher value than the number of bricks in
+ # repliac set
+ start_range = num_bricks_in_subvol + 1
+ end_range = num_bricks_in_subvol + 30
+ for i in range(start_range, end_range):
+ options = {"cluster.quorum-count": "%s" % i}
+ g.log.info("setting %s for the volume %s", options, self.volname)
+ ret = set_volume_options(self.mnode, self.volname, options)
+ self.assertFalse(ret, ("Able to set %s for volume %s, quorum-count"
+ " should not be greater than number of"
+ " bricks in replica set"
+ % (options, self.volname)))
+ g.log.info("Expected: Unable to set %s for volume %s, "
+ "quorum-count should be less than number of bricks "
+ "in replica set", options, self.volname)
diff --git a/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
index 05caf4a43..1e0956508 100644..100755
--- a/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
+++ b/tests/functional/nfs_ganesha/test_nfs_ganesha_sanity.py
@@ -69,16 +69,16 @@ class TestNfsGaneshaSanity(NfsGaneshaVolumeBaseClass):
# Running kernel untar now,single loop for the sanity test
g.log.info("Running kernel untars now")
for mount_obj in self.mounts:
- cmd = ("cd %s ;mkdir $(hostname);cd $(hostname);"
- "wget https://www.kernel.org/pub/linux/kernel/v2.6"
- "/linux-2.6.1.tar.gz;"
- "tar xvf linux-2.6.1.tar.gz" % (mount_obj.mountpoint))
- ret, out, err = g.run(mount_obj.client_system, cmd)
- if ret == 0:
- g.log.info("Successfully untared the tarball!")
- else:
- g.log.error("ERROR ! Kernel untar errored out!")
- self.assertEqual(ret, 0, "Kernel untar failed!")
+ cmd = ("cd %s ;mkdir $(hostname);cd $(hostname);"
+ "wget https://www.kernel.org/pub/linux/kernel/v2.6"
+ "/linux-2.6.1.tar.gz;"
+ "tar xvf linux-2.6.1.tar.gz" % mount_obj.mountpoint)
+ ret, out, err = g.run(mount_obj.client_system, cmd)
+ if ret == 0:
+ g.log.info("Successfully untared the tarball!")
+ else:
+ g.log.error("ERROR ! Kernel untar errored out!")
+ self.assertEqual(ret, 0, "Kernel untar failed!")
# Check for crashes after kernel untar
g.log.info("Checking for Cluster Status after kernel untar")