summaryrefslogtreecommitdiffstats
path: root/tests/functional/arbiter/brick_cases
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/arbiter/brick_cases')
-rwxr-xr-xtests/functional/arbiter/brick_cases/test_brickcases.py177
-rwxr-xr-xtests/functional/arbiter/brick_cases/test_cyclic_brick_kill_list.py16
-rw-r--r--tests/functional/arbiter/brick_cases/test_impact_of_replace_brick_for_glustershd.py21
-rwxr-xr-xtests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py139
-rwxr-xr-xtests/functional/arbiter/brick_cases/test_rmvrf_files.py28
5 files changed, 180 insertions, 201 deletions
diff --git a/tests/functional/arbiter/brick_cases/test_brickcases.py b/tests/functional/arbiter/brick_cases/test_brickcases.py
index ec5ac0a0e..766012bd5 100755
--- a/tests/functional/arbiter/brick_cases/test_brickcases.py
+++ b/tests/functional/arbiter/brick_cases/test_brickcases.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,20 +19,20 @@
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
from glustolibs.gluster.volume_libs import (
- log_volume_info_and_status, replace_brick_from_volume,
- expand_volume, wait_for_volume_process_to_be_online,
- verify_all_process_of_volume_are_online, shrink_volume)
+ replace_brick_from_volume, expand_volume, shrink_volume,
+ wait_for_volume_process_to_be_online,
+ verify_all_process_of_volume_are_online)
from glustolibs.gluster.rebalance_ops import (
rebalance_start, rebalance_status, wait_for_rebalance_to_complete)
-from glustolibs.gluster.heal_libs import monitor_heal_completion
+from glustolibs.gluster.heal_libs import (monitor_heal_completion,
+ is_heal_complete)
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.io.utils import (validate_io_procs,
- list_all_files_and_dirs_mounts,
wait_for_io_to_complete)
from glustolibs.misc.misc_libs import upload_scripts
-@runs_on([['replicated', 'distributed-replicated'],
+@runs_on([['arbiter', 'distributed-arbiter'],
['glusterfs', 'cifs', 'nfs']])
class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
"""Class for testing Volume Type Change from replicated to
@@ -41,32 +41,14 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # Overriding the volume type to specifically test the volume type
- # change from replicated to arbiter
- if cls.volume_type == "replicated":
- cls.volume['voltype'] = {
- 'type': 'replicated',
- 'replica_count': 2,
- 'dist_count': 1,
- 'transport': 'tcp'}
-
- if cls.volume_type == "distributed-replicated":
- cls.volume['voltype'] = {
- 'type': 'distributed-replicated',
- 'dist_count': 2,
- 'replica_count': 2,
- 'transport': 'tcp'}
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on mounts",
cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, [script_local_path])
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -91,8 +73,8 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
"""
- Setup Volume and Mount Volume
"""
- # Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(self)
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
self.all_mounts_procs = []
self.io_validation_complete = False
@@ -120,13 +102,6 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
raise ExecutionError("IO failed on some of the clients")
g.log.info("IO is successful on all mounts")
- # List all files and dirs created
- g.log.info("List all files and directories:")
- ret = list_all_files_and_dirs_mounts(self.mounts)
- if not ret:
- raise ExecutionError("Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
-
# Unmount Volume and Cleanup Volume
g.log.info("Starting to Unmount Volume and Cleanup Volume")
ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
@@ -135,7 +110,7 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
g.log.info("Successful in Unmount Volume and Cleanup Volume")
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_replicated_to_arbiter_volume_change_with_volume_ops(self):
"""
@@ -147,29 +122,25 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
# pylint: disable=too-many-statements
# Start IO on mounts
- g.log.info("Starting IO on all mounts...")
self.all_mounts_procs = []
- for mount_obj in self.mounts:
- g.log.info("Starting IO on %s:%s", mount_obj.client_system,
- mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
- "--dirname-start-num %d "
- "--dir-depth 2 "
- "--dir-length 15 "
- "--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path,
- self.counter,
- mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- self.all_mounts_procs.append(proc)
- self.counter = self.counter + 10
+ g.log.info("Starting IO on %s:%s", self.mounts[0].client_system,
+ self.mounts[0].mountpoint)
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dirname-start-num 10 --dir-depth 1 --dir-length 1 "
+ "--max-num-of-dirs 1 --num-of-files 5 %s" % (
+ self.script_upload_path,
+ self.mounts[0].mountpoint))
+ proc = g.run_async(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
self.io_validation_complete = False
# Validate IO
- ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.assertTrue(
+ validate_io_procs(self.all_mounts_procs, self.mounts[0]),
+ "IO failed on some of the clients"
+ )
self.io_validation_complete = True
- self.assertTrue(ret, "IO failed on some of the clients")
# Adding bricks to make an Arbiter Volume
g.log.info("Adding bricks to convert to Arbiter Volume")
@@ -180,16 +151,6 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
g.log.info("Changing volume to arbiter volume is successful %s",
self.volname)
- # Log Volume Info and Status after changing the volume type from
- # replicated to arbitered
- g.log.info("Logging volume info and Status after changing to "
- "arbitered volume")
- ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume info and status failed on "
- "volume %s", self.volname))
- g.log.info("Successful in logging volume info and status of volume %s",
- self.volname)
-
# Wait for volume processes to be online
g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
@@ -215,34 +176,25 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
g.log.info("self-heal is successful after changing the volume type "
"from replicated to arbitered volume")
+ # Check if heal is completed
+ ret = is_heal_complete(self.mnode, self.volname)
+ self.assertTrue(ret, 'Heal is not complete')
+ g.log.info('Heal is completed successfully')
+
# Start IO on mounts
- g.log.info("Starting IO on all mounts...")
self.all_mounts_procs = []
- for mount_obj in self.mounts:
- g.log.info("Starting IO on %s:%s", mount_obj.client_system,
- mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
- "--dirname-start-num %d "
- "--dir-depth 2 "
- "--dir-length 35 "
- "--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path,
- self.counter,
- mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- self.all_mounts_procs.append(proc)
- self.counter = self.counter + 10
+ g.log.info("Starting IO on %s:%s", self.mounts[0].client_system,
+ self.mounts[0].mountpoint)
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dirname-start-num 10 --dir-depth 1 --dir-length 1 "
+ "--max-num-of-dirs 1 --num-of-files 5 %s" % (
+ self.script_upload_path,
+ self.mounts[0].mountpoint))
+ proc = g.run_async(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
self.io_validation_complete = False
- # Log Volume Info and Status before expanding the volume.
- g.log.info("Logging volume info and Status before expanding volume")
- ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume info and status failed on "
- "volume %s", self.volname))
- g.log.info("Successful in logging volume info and status of volume %s",
- self.volname)
-
# Start add-brick (subvolume-increase)
g.log.info("Start adding bricks to volume when IO in progress")
ret = expand_volume(self.mnode, self.volname, self.servers,
@@ -252,14 +204,6 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
g.log.info("Expanding volume when IO in progress is successful on "
"volume %s", self.volname)
- # Log Volume Info and Status after expanding the volume
- g.log.info("Logging volume info and Status after expanding volume")
- ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume info and status failed on "
- "volume %s", self.volname))
- g.log.info("Successful in logging volume info and status of volume %s",
- self.volname)
-
# Wait for volume processes to be online
g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
@@ -295,15 +239,6 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
g.log.info("Rebalance is successfully complete on the volume %s",
self.volname)
- # Log Volume Info and Status before replacing brick from the volume.
- g.log.info("Logging volume info and Status before replacing brick "
- "from the volume %s", self.volname)
- ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume info and status failed on "
- "volume %s", self.volname))
- g.log.info("Successful in logging volume info and status of volume %s",
- self.volname)
-
# Replace brick from a sub-volume
g.log.info("Replace a faulty brick from the volume")
ret = replace_brick_from_volume(self.mnode, self.volname,
@@ -311,15 +246,6 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
self.assertTrue(ret, "Failed to replace faulty brick from the volume")
g.log.info("Successfully replaced faulty brick from the volume")
- # Log Volume Info and Status after replacing the brick
- g.log.info("Logging volume info and Status after replacing brick "
- "from the volume %s", self.volname)
- ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume info and status failed on "
- "volume %s", self.volname))
- g.log.info("Successful in logging volume info and status of volume %s",
- self.volname)
-
# Wait for volume processes to be online
g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
@@ -343,13 +269,10 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
"current test workload")
g.log.info("self-heal is successful after replace-brick operation")
- # Log Volume Info and Status before shrinking the volume.
- g.log.info("Logging volume info and Status before shrinking volume")
- ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume info and status failed on "
- "volume %s", self.volname))
- g.log.info("Successful in logging volume info and status of volume %s",
- self.volname)
+ # Check if heal is completed
+ ret = is_heal_complete(self.mnode, self.volname)
+ self.assertTrue(ret, 'Heal is not complete')
+ g.log.info('Heal is completed successfully')
# Shrinking volume by removing bricks from volume when IO in progress
g.log.info("Start removing bricks from volume when IO in progress")
@@ -359,14 +282,6 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
g.log.info("Shrinking volume when IO in progress is successful on "
"volume %s", self.volname)
- # Log Volume Info and Status after shrinking the volume
- g.log.info("Logging volume info and Status after shrinking volume")
- ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume info and status failed on "
- "volume %s", self.volname))
- g.log.info("Successful in logging volume info and status of volume %s",
- self.volname)
-
# Wait for volume processes to be online
g.log.info("Wait for volume processes to be online")
ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
@@ -386,7 +301,7 @@ class GlusterArbiterVolumeTypeChangeClass(GlusterBaseClass):
# Validate IO
self.assertTrue(
- validate_io_procs(self.all_mounts_procs, self.mounts),
+ validate_io_procs(self.all_mounts_procs, self.mounts[0]),
"IO failed on some of the clients"
)
self.io_validation_complete = True
diff --git a/tests/functional/arbiter/brick_cases/test_cyclic_brick_kill_list.py b/tests/functional/arbiter/brick_cases/test_cyclic_brick_kill_list.py
index 8dbf0bcac..642c6f011 100755
--- a/tests/functional/arbiter/brick_cases/test_cyclic_brick_kill_list.py
+++ b/tests/functional/arbiter/brick_cases/test_cyclic_brick_kill_list.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -15,7 +15,9 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import time
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import log_volume_info_and_status
from glustolibs.gluster.brick_libs import (
@@ -41,16 +43,14 @@ class ListMount(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on mounts",
cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "fd_writes.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"fd_writes.py")
- ret = upload_scripts(cls.clients, [script_local_path])
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -73,7 +73,7 @@ class ListMount(GlusterBaseClass):
def setUp(self):
# Calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
self.all_mounts_procs = []
self.io_validation_complete = False
@@ -117,7 +117,7 @@ class ListMount(GlusterBaseClass):
g.log.info("Successful in Unmount Volume and Cleanup Volume")
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_files_on_mount(self):
"""""
@@ -134,7 +134,7 @@ class ListMount(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s "
+ cmd = ("/usr/bin/env python %s "
"--file-sizes-list 1G "
"--chunk-sizes-list 128 "
"--write-time 900 "
diff --git a/tests/functional/arbiter/brick_cases/test_impact_of_replace_brick_for_glustershd.py b/tests/functional/arbiter/brick_cases/test_impact_of_replace_brick_for_glustershd.py
index a8a222c24..56d2edbd9 100644
--- a/tests/functional/arbiter/brick_cases/test_impact_of_replace_brick_for_glustershd.py
+++ b/tests/functional/arbiter/brick_cases/test_impact_of_replace_brick_for_glustershd.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -30,7 +30,7 @@ from glustolibs.gluster.heal_libs import (get_self_heal_daemon_pid,
is_shd_daemonized)
-@runs_on([['replicated', 'distributed-replicated'],
+@runs_on([['arbiter', 'distributed-arbiter'],
['glusterfs', 'nfs']])
class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
"""
@@ -41,18 +41,7 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # Override Volumes
- if cls.volume_type == "distributed-replicated":
- # Define distributed-replicated volume
- cls.volume['voltype'] = {
- 'type': 'distributed-replicated',
- 'dist_count': 2,
- 'replica_count': 3,
- 'arbiter_count': 1,
- 'transport': 'tcp'}
-
+ cls.get_super_method(cls, 'setUpClass')()
cls.glustershd = "/var/lib/glusterd/glustershd/glustershd-server.vol"
def setUp(self):
@@ -61,7 +50,7 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
self.all_mounts_procs = []
self.io_validation_complete = False
@@ -88,7 +77,7 @@ class ImpactOfReplaceBrickForGlustershdTests(GlusterBaseClass):
g.log.info("Successful in umounting the volume and Cleanup")
# Calling GlusterBaseClass teardown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_impact_of_replace_brick_for_glustershd(self):
# pylint: disable=too-many-statements,too-many-branches,too-many-locals
diff --git a/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py b/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
index 33e92e9ee..24c014502 100755
--- a/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
+++ b/tests/functional/arbiter/brick_cases/test_replica3_to_arbiter.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,6 +22,12 @@ from glustolibs.gluster.volume_libs import (
expand_volume, wait_for_volume_process_to_be_online,
verify_all_process_of_volume_are_online, shrink_volume, get_subvols)
from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.io.utils import run_linux_untar
+from glustolibs.gluster.heal_libs import (monitor_heal_completion,
+ is_heal_complete,
+ is_volume_in_split_brain)
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.heal_ops import trigger_heal
@runs_on([['replicated', 'distributed-replicated'],
@@ -30,45 +36,39 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass):
"""Class for testing Volume Type Change from replicated to
Arbitered volume
"""
- @classmethod
- def setUpClass(cls):
- # Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # Overriding the volume type to specifically test the volume type
-
- if cls.volume_type == "replicated":
- cls.volume['voltype'] = {
- 'type': 'replicated',
- 'replica_count': 3,
- 'transport': 'tcp'}
-
def setUp(self):
"""
Setup Volume
"""
- # Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(self)
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Set I/O flag to false
+ self.is_io_running = False
# Setup Volume
- g.log.info("Starting to Setup Volume")
- ret = self.setup_volume()
+ g.log.info("Starting to Setup and Mount Volume")
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
if not ret:
- raise ExecutionError("Failed to Setup_Volume")
- g.log.info("Successful in Setup Volume")
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
self.subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
def tearDown(self):
- # Cleanup Volume
- g.log.info("Starting to Unmount Volume and Cleanup Volume")
- ret = self.cleanup_volume()
+ # Wait for I/O if not completed
+ if self.is_io_running:
+ if not self._wait_for_untar_completion():
+ g.log.error("I/O failed to stop on clients")
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
if not ret:
- raise ExecutionError("Failed to Cleanup Volume")
- g.log.info("Successful Cleanup Volume")
+ raise ExecutionError("Unable to delete volume % s" % self.volname)
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
# Clearing bricks
for subvol in self.subvols:
@@ -80,10 +80,22 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass):
g.log.info('Clearing brick %s is successful', brick)
g.log.info('Clearing for all brick is successful')
- def test_replicated_to_arbiter_volume(self):
+ def _wait_for_untar_completion(self):
+ """Wait for untar to complete"""
+ has_process_stopped = []
+ for proc in self.io_process:
+ try:
+ ret, _, _ = proc.async_communicate()
+ if not ret:
+ has_process_stopped.append(False)
+ has_process_stopped.append(True)
+ except ValueError:
+ has_process_stopped.append(True)
+ return all(has_process_stopped)
+
+ def _convert_replicated_to_arbiter_volume(self):
"""
- Description:-
- Reduce the replica count from replica 3 to arbiter
+ Helper module to convert replicated to arbiter volume.
"""
# pylint: disable=too-many-statements
# Remove brick to reduce the replica count from replica 3
@@ -112,7 +124,7 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass):
g.log.info("Adding bricks to convert to Arbiter Volume")
replica_arbiter = {'replica_count': 1, 'arbiter_count': 1}
ret = expand_volume(self.mnode, self.volname, self.servers,
- self.all_servers_info, add_to_hot_tier=False,
+ self.all_servers_info, force=True,
**replica_arbiter)
self.assertTrue(ret, "Failed to expand the volume %s" % self.volname)
g.log.info("Changing volume to arbiter volume is successful %s",
@@ -132,3 +144,70 @@ class GlusterArbiterVolumeTypeClass(GlusterBaseClass):
self.assertTrue(ret, "Volume %s : All process are not online"
% self.volname)
g.log.info("Volume %s : All process are online", self.volname)
+
+ def test_replicated_to_arbiter_volume(self):
+ """
+ Description:-
+ Reduce the replica count from replica 3 to arbiter
+ """
+ # pylint: disable=too-many-statements
+ self._convert_replicated_to_arbiter_volume()
+
+ def test_replica_to_arbiter_volume_with_io(self):
+ """
+ Description: Replica 3 to arbiter conversion with ongoing IO's
+
+ Steps :
+ 1) Create a replica 3 volume and start volume.
+ 2) Set client side self heal off.
+ 3) Fuse mount the volume.
+ 4) Create directory dir1 and write data.
+ Example: untar linux tar from the client into the dir1
+ 5) When IO's is running, execute remove-brick command,
+ and convert replica 3 to replica 2 volume
+ 6) Execute add-brick command and convert to arbiter volume,
+ provide the path of new arbiter brick.
+ 7) Issue gluster volume heal.
+ 8) Heal should be completed with no files in split-brain.
+ """
+
+ # pylint: disable=too-many-statements
+ # Create a dir to start untar
+ self.linux_untar_dir = "{}/{}".format(self.mounts[0].mountpoint,
+ "linuxuntar")
+ ret = mkdir(self.clients[0], self.linux_untar_dir)
+ self.assertTrue(ret, "Failed to create dir linuxuntar for untar")
+
+ # Start linux untar on dir linuxuntar
+ self.io_process = run_linux_untar(self.clients[0],
+ self.mounts[0].mountpoint,
+ dirs=tuple(['linuxuntar']))
+ self.is_io_running = True
+
+ # Convert relicated to arbiter volume
+ self._convert_replicated_to_arbiter_volume()
+
+ # Wait for IO to complete.
+ ret = self._wait_for_untar_completion()
+ self.assertFalse(ret, "IO didn't complete or failed on client")
+ self.is_io_running = False
+
+ # Start healing
+ ret = trigger_heal(self.mnode, self.volname)
+ self.assertTrue(ret, 'Heal is not started')
+ g.log.info('Healing is started')
+
+ # Monitor heal completion
+ ret = monitor_heal_completion(self.mnode, self.volname,
+ timeout_period=3600)
+ self.assertTrue(ret, 'Heal has not yet completed')
+
+ # Check if heal is completed
+ ret = is_heal_complete(self.mnode, self.volname)
+ self.assertTrue(ret, 'Heal is not complete')
+ g.log.info('Heal is completed successfully')
+
+ # Check for split-brain
+ ret = is_volume_in_split_brain(self.mnode, self.volname)
+ self.assertFalse(ret, 'Volume is in split-brain state')
+ g.log.info('Volume is not in split-brain state')
diff --git a/tests/functional/arbiter/brick_cases/test_rmvrf_files.py b/tests/functional/arbiter/brick_cases/test_rmvrf_files.py
index 5d8e87ed5..8d7304b0b 100755
--- a/tests/functional/arbiter/brick_cases/test_rmvrf_files.py
+++ b/tests/functional/arbiter/brick_cases/test_rmvrf_files.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -15,6 +15,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from glusto.core import Glusto as g
+
from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
from glustolibs.gluster.volume_libs import (
log_volume_info_and_status)
@@ -30,7 +31,7 @@ from glustolibs.io.utils import (validate_io_procs,
from glustolibs.misc.misc_libs import upload_scripts
-@runs_on([['replicated', 'distributed-replicated'],
+@runs_on([['arbiter', 'distributed-arbiter'],
['glusterfs', 'cifs', 'nfs']])
class TestRmrfMount(GlusterBaseClass):
"""
@@ -40,16 +41,14 @@ class TestRmrfMount(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on mounts",
cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, [script_local_path])
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -72,7 +71,7 @@ class TestRmrfMount(GlusterBaseClass):
def setUp(self):
# Calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
self.all_mounts_procs = []
self.io_validation_complete = False
@@ -110,7 +109,7 @@ class TestRmrfMount(GlusterBaseClass):
g.log.info("Successful in Unmount Volume and Cleanup Volume")
# Calling GlusterBaseClass tearDown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_self_heal(self):
"""
@@ -130,14 +129,14 @@ class TestRmrfMount(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
- cmd = ("python %s create_deep_dirs_with_files "
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
"--dir-length 35 "
"--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path,
- self.counter,
- mount_obj.mountpoint))
+ "--num-of-files 5 %s" % (
+ self.script_upload_path,
+ self.counter, mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
@@ -146,10 +145,7 @@ class TestRmrfMount(GlusterBaseClass):
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline = bricks_to_bring_offline_dict['volume_bricks']
# Killing one brick from the volume set
g.log.info("Bringing bricks: %s offline", bricks_to_bring_offline)