summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitalii Koriakov <vkoriako@redhat.com>2019-01-25 11:56:17 +0200
committerVijay Avuthu <vavuthu@redhat.com>2019-01-25 12:07:16 +0000
commit182610940d242457b8718438924b586a54e40159 (patch)
tree3d9e9bc66ba601586e1a6652547316b4f476e584
parent1438fe88f709e035eb311ab052067e39e31d7770 (diff)
Delete test_client_side_quorum_with_auto_option and test_client_side_quorum_with_auto_option_overwrite_fixed
Change-Id: I5e22228eaf8574f2ccb1ae38cb98ec01e6493fdf Signed-off-by: Vitalii Koriakov <vkoriako@redhat.com>
-rw-r--r--tests/functional/afr/test_client_side_quorum.py360
1 files changed, 0 insertions, 360 deletions
diff --git a/tests/functional/afr/test_client_side_quorum.py b/tests/functional/afr/test_client_side_quorum.py
index 46f944d..66f2ee2 100644
--- a/tests/functional/afr/test_client_side_quorum.py
+++ b/tests/functional/afr/test_client_side_quorum.py
@@ -23,7 +23,6 @@ from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import (
set_volume_options, get_subvols)
-from glustolibs.gluster.volume_ops import get_volume_options
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.gluster.brick_libs import (bring_bricks_offline,
bring_bricks_online)
@@ -88,214 +87,6 @@ class ClientSideQuorumTests(GlusterBaseClass):
GlusterBaseClass.tearDown.im_func(self)
- def test_client_side_quorum_with_auto_option(self):
- """
- Test Script to verify the Client Side Quorum with auto option
-
- * set cluster.quorum-type to auto.
- * start I/O from the mount point.
- * kill 2 of the brick process from the each and every replica set
- * perform ops
-
- """
- # pylint: disable=too-many-branches,too-many-statements
- # set cluster.quorum-type to auto
- options = {"cluster.quorum-type": "auto"}
- g.log.info("setting cluster.quorum-type to auto on "
- "volume %s", self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set volume option %s for"
- "volume %s" % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s", options, self.volname)
-
- # write files on all mounts
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s" % (self.script_upload_path,
- mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
-
- # get the subvolumes
- g.log.info("Starting to get sub-volumes for volume %s", self.volname)
- subvols_dict = get_subvols(self.mnode, self.volname)
- num_subvols = len(subvols_dict['volume_subvols'])
- g.log.info("Number of subvolumes in volume %s:", num_subvols)
-
- # bring bricks offline( 2 bricks ) for all the subvolumes
- for i in range(0, num_subvols):
- subvol_brick_list = subvols_dict['volume_subvols'][i]
- g.log.info("sub-volume %s brick list : %s", i, subvol_brick_list)
- # For volume type: 1 * 2, bring 1 brick offline
- if len(subvol_brick_list) == 2:
- bricks_to_bring_offline = subvol_brick_list[0:1]
- else:
- bricks_to_bring_offline = subvol_brick_list[0:2]
- g.log.info("Going to bring down the brick process "
- "for %s", bricks_to_bring_offline)
- ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
- self.assertTrue(ret, ("Failed to bring down the bricks. Please "
- "check the log file for more details."))
- g.log.info("Brought down the brick process "
- "for %s successfully", bricks_to_bring_offline)
-
- # create 2 files named newfile0.txt and newfile1.txt
- g.log.info("Start creating 2 files on all mounts...")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 2 --base-file-name newfile %s" %
- (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- g.log.info("Validating whether IO failed with read-only filesystem")
- ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
- self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successful"
- " on read-only filesystem"))
- g.log.info("EXPECTED: Read-only file system in IO while creating file")
-
- # create directory user1
- g.log.info("Start creating directory on all mounts...")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_deep_dir "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- g.log.info("Validating whether IO failed with read-only filesystem")
- ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
- self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successful"
- " on read-only filesystem"))
- g.log.info("EXPECTED: Read-only file system in IO while"
- " creating directory")
-
- # create h/w link to file
- g.log.info("Start creating hard link for file0.txt on all mounts")
- for mount_obj in self.mounts:
- cmd = "ln %s/file0.txt %s/file0.txt_hwlink" \
- % (mount_obj.mountpoint, mount_obj.mountpoint)
- ret, _, err = g.run(mount_obj.client_system, cmd)
- self.assertTrue(ret, ("Unexpected error and creating hard link"
- " successful on read-only filesystem"))
- self.assertIn("Read-only file system",
- err, "Read-only filesystem not found in "
- "IO while truncating file")
- g.log.info("EXPECTED: Read-only file system in IO")
-
- # create s/w link
- g.log.info("Start creating soft link for file1.txt on all mounts")
- for mount_obj in self.mounts:
- cmd = "ln -s %s/file1.txt %s/file1.txt_swlink" %\
- (mount_obj.mountpoint, mount_obj.mountpoint)
- ret, _, err = g.run(mount_obj.client_system, cmd)
- self.assertTrue(ret, ("Unexpected error and creating soft link"
- " successful on read-only filesystem"))
- self.assertIn("Read-only file system",
- err, "Read-only filesystem not found in "
- "IO while truncating file")
- g.log.info("EXPECTED: Read-only file system in IO")
-
- # append to file
- g.log.info("Appending to file1.txt on all mounts")
- for mount_obj in self.mounts:
- cmd = "cat %s/file0.txt >> %s/file1.txt" %\
- (mount_obj.mountpoint, mount_obj.mountpoint)
- ret, _, err = g.run(mount_obj.client_system, cmd)
- self.assertTrue(ret, ("Unexpected error and append successful"
- " on read-only filesystem"))
- self.assertIn("Read-only file system",
- err, "Read-only filesystem not found in "
- "IO while truncating file")
- g.log.info("EXPECTED: Read-only file system in IO")
-
- # modify the file
- g.log.info("Modifying file1.txt on all mounts")
- for mount_obj in self.mounts:
- cmd = "echo 'Modify Contents' > %s/file1.txt"\
- % (mount_obj.mountpoint)
- ret, _, err = g.run(mount_obj.client_system, cmd)
- self.assertTrue(ret, ("Unexpected error and modifying successful"
- " on read-only filesystem"))
- self.assertIn("Read-only file system",
- err, "Read-only filesystem not found in "
- "IO while truncating file")
- g.log.info("EXPECTED: Read-only file system in IO")
-
- # truncate the file
- g.log.info("Truncating file1.txt on all mounts")
- for mount_obj in self.mounts:
- cmd = "truncate -s 0 %s/file1.txt" % (mount_obj.mountpoint)
- ret, _, err = g.run(mount_obj.client_system, cmd)
- self.assertTrue(ret, ("Unexpected error and truncating file"
- " successful on read-only filesystem"))
- self.assertIn("Read-only file system",
- err, "Read-only filesystem not found in "
- "IO while truncating file")
- g.log.info("EXPECTED: Read-only file system in IO")
-
- # read the file
- g.log.info("Starting reading files on all mounts")
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s read "
- "%s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "Reads failed on some of the clients"
- )
-
- # stat on file
- g.log.info("stat on file1.txt on all mounts")
- for mount_obj in self.mounts:
- cmd = "stat %s/file1.txt" % (mount_obj.mountpoint)
- ret, _, err = g.run(mount_obj.client_system, cmd)
- self.assertFalse(ret, ("Unexpected error and stat on file fails"
- " on read-only filesystem"))
- g.log.info("stat on file is successful on read-only filesystem")
-
- # stat on dir
- g.log.info("stat on directory on all mounts")
- for mount_obj in self.mounts:
- cmd = ("python %s stat %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- ret, _, err = g.run(mount_obj.client_system, cmd)
- self.assertFalse(ret, ("Unexpected error and stat on directory"
- " fails on read-only filesystem"))
- g.log.info("stat on dir is successful on read-only filesystem")
-
- # ls on mount point
- g.log.info("ls on mount point on all mounts")
- for mount_obj in self.mounts:
- cmd = ("python %s ls %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- ret, _, err = g.run(mount_obj.client_system, cmd)
- self.assertFalse(ret, ("Unexpected error and listing file fails"
- " on read-only filesystem"))
- g.log.info("listing files is successful on read-only filesystem")
-
def test_client_side_quorum_with_fixed_validate_max_bricks(self):
"""
Test Script with Client Side Quorum with fixed should validate
@@ -344,157 +135,6 @@ class ClientSideQuorumTests(GlusterBaseClass):
"quorum-count should be less than number of bricks "
"in replica set", options, self.volname)
- def test_client_side_quorum_with_auto_option_overwrite_fixed(self):
- """
- Test Script to verify the Client Side Quorum with auto option
-
- * check the default value of cluster.quorum-type
- * try to set any junk value to cluster.quorum-type
- other than {none,auto,fixed}
- * check the default value of cluster.quorum-count
- * set cluster.quorum-type to fixed and cluster.quorum-count to 1
- * start I/O from the mount point
- * kill 2 of the brick process from the each replica set.
- * set cluster.quorum-type to auto
-
- """
- # pylint: disable=too-many-locals,too-many-lines,too-many-statements
- # check the default value of cluster.quorum-type
- option = "cluster.quorum-type"
- g.log.info("Getting %s for the volume %s", option, self.volname)
- option_dict = get_volume_options(self.mnode, self.volname, option)
- self.assertIsNotNone(option_dict, ("Failed to get %s volume option"
- " for volume %s"
- % (option, self.volname)))
- self.assertEqual(option_dict['cluster.quorum-type'], 'auto',
- ("Default value for %s is not auto"
- " for volume %s" % (option, self.volname)))
- g.log.info("Succesfully verified default value of %s for volume %s",
- option, self.volname)
-
- # set the junk value to cluster.quorum-type
- junk_values = ["123", "abcd", "fixxed", "Aauto"]
- for each_junk_value in junk_values:
- options = {"cluster.quorum-type": "%s" % each_junk_value}
- g.log.info("setting %s for the volume "
- "%s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertFalse(ret, ("Able to set junk value %s for "
- "volume %s" % (options, self.volname)))
- g.log.info("Expected: Unable to set junk value %s "
- "for volume %s", options, self.volname)
-
- # check the default value of cluster.quorum-count
- option = "cluster.quorum-count"
- g.log.info("Getting %s for the volume %s", option, self.volname)
- option_dict = get_volume_options(self.mnode, self.volname, option)
- self.assertIsNotNone(option_dict, ("Failed to get %s volume option"
- " for volume %s"
- % (option, self.volname)))
- self.assertEqual(option_dict['cluster.quorum-count'], '(null)',
- ("Default value for %s is not null"
- " for volume %s" % (option, self.volname)))
- g.log.info("Successful in getting %s for the volume %s",
- option, self.volname)
-
- # set cluster.quorum-type to fixed and cluster.quorum-count to 1
- options = {"cluster.quorum-type": "fixed",
- "cluster.quorum-count": "1"}
- g.log.info("setting %s for the volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set %s for volume %s"
- % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s",
- options, self.volname)
-
- # create files
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name file %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
-
- # get the subvolumes
- g.log.info("starting to get subvolumes for volume %s", self.volname)
- subvols_dict = get_subvols(self.mnode, self.volname)
- num_subvols = len(subvols_dict['volume_subvols'])
- g.log.info("Number of subvolumes in volume %s is %s",
- self.volname, num_subvols)
-
- # bring bricks offline( 2 bricks ) for all the subvolumes
- for i in range(0, num_subvols):
- subvol_brick_list = subvols_dict['volume_subvols'][i]
- g.log.info("sub-volume %s brick list : %s",
- i, subvol_brick_list)
- bricks_to_bring_offline = subvol_brick_list[0:2]
- g.log.info("Going to bring down the brick process "
- "for %s", bricks_to_bring_offline)
- ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
- self.assertTrue(ret, ("Failed to bring down the bricks. Please "
- "check the log file for more details."))
- g.log.info("Brought down the brick process "
- "for %s successfully", bricks_to_bring_offline)
-
- # create files
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_files "
- "-f 10 --base-file-name second_file %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # Validate IO
- self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
-
- # set cluster.quorum-type to auto
- options = {"cluster.quorum-type": "auto"}
- g.log.info("setting %s for volume %s", options, self.volname)
- ret = set_volume_options(self.mnode, self.volname, options)
- self.assertTrue(ret, ("Unable to set volume option %s for "
- "volume %s" % (options, self.volname)))
- g.log.info("Successfully set %s for volume %s",
- options, self.volname)
-
- # create files
- g.log.info("Starting IO on all mounts...")
- g.log.info("mounts: %s", self.mounts)
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_deep_dirs_with_files --dir-depth 2 "
- "--dir-length 2 --max-num-of-dirs 3 --num-of-files 7 %s"
- % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
-
- # check IO failed with Read Only File System error
- g.log.info("Wait for IO to complete and validate IO.....")
- ret, _ = is_io_procs_fail_with_rofs(self, all_mounts_procs,
- self.mounts)
- self.assertTrue(ret, ("Unexpected error and IO successful "
- "on Read-only file system. Please check the "
- "logs for more details"))
- g.log.info("EXPECTED : Read-only file system in IO while "
- "creating files")
-
@runs_on([['replicated', 'distributed-replicated'], ['glusterfs']])
class ClientSideQuorumTestsWithSingleVolumeCross3(GlusterBaseClass):