summaryrefslogtreecommitdiffstats
path: root/tests/functional/dht
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/dht')
-rw-r--r--tests/functional/dht/test_accessing_file_when_dht_layout_is_stale.py181
-rw-r--r--tests/functional/dht/test_add_brick_rebalance_revised.py2
-rw-r--r--tests/functional/dht/test_add_brick_rebalance_with_self_heal_in_progress.py136
-rw-r--r--tests/functional/dht/test_add_brick_rebalance_with_symlink_pointing_out_of_gluster.py133
-rw-r--r--tests/functional/dht/test_add_brick_replace_brick_fix_layout.py2
-rw-r--r--tests/functional/dht/test_rebalance_multiple_expansions.py100
-rw-r--r--tests/functional/dht/test_rebalance_multiple_shrinks.py87
-rw-r--r--tests/functional/dht/test_rebalance_nested_dir.py99
-rw-r--r--tests/functional/dht/test_rebalance_peer_probe.py130
-rw-r--r--tests/functional/dht/test_rebalance_preserve_user_permissions.py112
-rw-r--r--tests/functional/dht/test_rebalance_two_volumes.py163
-rw-r--r--tests/functional/dht/test_rebalance_with_acl_set_to_files.py129
-rw-r--r--tests/functional/dht/test_time_taken_for_ls.py105
-rw-r--r--tests/functional/dht/test_verify_permissions_on_root_dir_when_brick_down.py134
14 files changed, 1456 insertions, 57 deletions
diff --git a/tests/functional/dht/test_accessing_file_when_dht_layout_is_stale.py b/tests/functional/dht/test_accessing_file_when_dht_layout_is_stale.py
new file mode 100644
index 000000000..e7f89d84e
--- /dev/null
+++ b/tests/functional/dht/test_accessing_file_when_dht_layout_is_stale.py
@@ -0,0 +1,181 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.glusterfile import get_fattr, set_fattr
+from glustolibs.gluster.volume_libs import get_subvols
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+# pylint: disable=too-many-locals
+@runs_on([['distributed'], ['glusterfs']])
+class TestAccessFileStaleLayout(GlusterBaseClass):
+ def setUp(self):
+ self.get_super_method(self, 'setUp')()
+
+ self.volume['voltype']['dist_count'] = 2
+ ret = self.setup_volume_and_mount_volume(self.mounts)
+ if not ret:
+ raise ExecutionError('Failed to setup and mount volume')
+
+ def tearDown(self):
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError('Failed to umount and cleanup Volume')
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _get_brick_node_and_path(self):
+ '''Yields list containing brick node and path from first brick of each
+ subvol
+ '''
+ subvols = get_subvols(self.mnode, self.volname)
+ for subvol in subvols['volume_subvols']:
+ subvol[0] += self.dir_path
+ yield subvol[0].split(':')
+
+ def _assert_file_lookup(self, node, fqpath, when, result):
+ '''Perform `stat` on `fqpath` from `node` and validate against `result`
+ '''
+ cmd = ('stat {}'.format(fqpath))
+ ret, _, _ = g.run(node, cmd)
+ assert_method = self.assertNotEqual
+ assert_msg = 'fail'
+ if result:
+ assert_method = self.assertEqual
+ assert_msg = 'pass'
+ assert_method(
+ ret, 0, 'Lookup on {} from {} should {} {} layout '
+ 'change'.format(fqpath, node, assert_msg, when))
+
+ def test_accessing_file_when_dht_layout_is_stale(self):
+ '''
+ Description : Checks if a file can be opened and accessed if the dht
+ layout has become stale.
+
+ Steps:
+ 1. Create, start and mount a volume consisting 2 subvols on 2 clients
+ 2. Create a dir `dir` and file `dir/file` from client0
+ 3. Take note of layouts of `brick1`/dir and `brick2`/dir of the volume
+ 4. Validate for success lookup from only one brick path
+ 5. Re-assign layouts ie., brick1/dir to brick2/dir and vice-versa
+ 6. Remove `dir/file` from client0 and recreate same file from client0
+ and client1
+ 7. Validate for success lookup from only one brick path (as layout is
+ changed file creation path will be changed)
+ 8. Validate checksum is matched from both the clients
+ '''
+
+ # Will be used in _get_brick_node_and_path
+ self.dir_path = '/dir'
+
+ # Will be used in argument to _assert_file_lookup
+ file_name = '/file'
+
+ dir_path = self.mounts[0].mountpoint + self.dir_path
+ file_path = dir_path + file_name
+
+ client0, client1 = self.clients[0], self.clients[1]
+ fattr = 'trusted.glusterfs.dht'
+ io_cmd = ('cat /dev/urandom | tr -dc [:space:][:print:] | '
+ 'head -c 1K > {}'.format(file_path))
+
+ # Create a dir from client0
+ ret = mkdir(self.clients[0], dir_path)
+ self.assertTrue(ret, 'Unable to create a directory from mount point')
+
+ # Touch a file with data from client0
+ ret, _, _ = g.run(client0, io_cmd)
+ self.assertEqual(ret, 0, 'Failed to create a file on mount')
+
+ # Yields `node` and `brick-path` from first brick of each subvol
+ gen = self._get_brick_node_and_path()
+
+ # Take note of newly created directory's layout from org_subvol1
+ node1, fqpath1 = next(gen)
+ layout1 = get_fattr(node1, fqpath1, fattr)
+ self.assertIsNotNone(layout1,
+ '{} is not present on {}'.format(fattr, fqpath1))
+
+ # Lookup on file from node1 should fail as `dir/file` will always get
+ # hashed to node2 in a 2-brick distribute volume by default
+ self._assert_file_lookup(node1,
+ fqpath1 + file_name,
+ when='before',
+ result=False)
+
+ # Take note of newly created directory's layout from org_subvol2
+ node2, fqpath2 = next(gen)
+ layout2 = get_fattr(node2, fqpath2, fattr)
+ self.assertIsNotNone(layout2,
+ '{} is not present on {}'.format(fattr, fqpath2))
+
+ # Lookup on file from node2 should pass
+ self._assert_file_lookup(node2,
+ fqpath2 + file_name,
+ when='before',
+ result=True)
+
+ # Set org_subvol2 directory layout to org_subvol1 and vice-versa
+ for node, fqpath, layout, vol in ((node1, fqpath1, layout2, (2, 1)),
+ (node2, fqpath2, layout1, (1, 2))):
+ ret = set_fattr(node, fqpath, fattr, layout)
+ self.assertTrue(
+ ret, 'Failed to set layout of org_subvol{} on '
+ 'brick {} of org_subvol{}'.format(vol[0], fqpath, vol[1]))
+
+ # Remove file after layout change from client0
+ cmd = 'rm -f {}'.format(file_path)
+ ret, _, _ = g.run(client0, cmd)
+ self.assertEqual(ret, 0, 'Failed to delete file after layout change')
+
+ # Create file with same name as above after layout change from client0
+ # and client1
+ for client in (client0, client1):
+ ret, _, _ = g.run(client, io_cmd)
+ self.assertEqual(
+ ret, 0, 'Failed to create file from '
+ '{} after layout change'.format(client))
+
+ # After layout change lookup on file from node1 should pass
+ self._assert_file_lookup(node1,
+ fqpath1 + file_name,
+ when='after',
+ result=True)
+
+ # After layout change lookup on file from node2 should fail
+ self._assert_file_lookup(node2,
+ fqpath2 + file_name,
+ when='after',
+ result=False)
+
+ # Take note of checksum from client0 and client1
+ checksums = [None] * 2
+ for index, mount in enumerate(self.mounts):
+ ret, checksums[index] = collect_mounts_arequal(mount, dir_path)
+ self.assertTrue(
+ ret, 'Failed to get arequal on client {}'.format(
+ mount.client_system))
+
+ # Validate no checksum mismatch
+ self.assertEqual(checksums[0], checksums[1],
+ 'Checksum mismatch between client0 and client1')
+
+ g.log.info('Pass: Test accessing file on stale layout is complete.')
diff --git a/tests/functional/dht/test_add_brick_rebalance_revised.py b/tests/functional/dht/test_add_brick_rebalance_revised.py
index 69d807d97..cc749f47a 100644
--- a/tests/functional/dht/test_add_brick_rebalance_revised.py
+++ b/tests/functional/dht/test_add_brick_rebalance_revised.py
@@ -109,7 +109,7 @@ class TestAddBrickRebalanceRevised(GlusterBaseClass):
("ln file_$i hardfile_$i",
"Failed to create hard links for files"))
- # Create 50 files on the mount point and create 40 hard links
+ # Create 50 files on the mount point and create 50 hard links
# for the files.
for operation, msg in ops:
self._run_command_50_times(operation, msg)
diff --git a/tests/functional/dht/test_add_brick_rebalance_with_self_heal_in_progress.py b/tests/functional/dht/test_add_brick_rebalance_with_self_heal_in_progress.py
new file mode 100644
index 000000000..6fb7fe4f0
--- /dev/null
+++ b/tests/functional/dht/test_add_brick_rebalance_with_self_heal_in_progress.py
@@ -0,0 +1,136 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from random import choice
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import get_all_bricks, bring_bricks_online
+from glustolibs.gluster.heal_libs import monitor_heal_completion
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import (collect_mounts_arequal, validate_io_procs,
+ wait_for_io_to_complete)
+from glustolibs.misc.misc_libs import kill_process
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter'], ['glusterfs']])
+class TestAddBrickRebalanceWithSelfHeal(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.is_io_running = False
+
+ def tearDown(self):
+
+ # If I/O processes are running wait for it to complete
+ if self.is_io_running:
+ if not wait_for_io_to_complete(self.list_of_io_processes,
+ [self.mounts[0]]):
+ raise ExecutionError("Failed to wait for I/O to complete")
+
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_add_brick_rebalance_with_self_heal_in_progress(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Start creating a few files on mount point.
+ 3. While file creation is going on, kill one of the bricks
+ in the replica pair.
+ 4. After file creattion is complete collect arequal checksum
+ on mount point.
+ 5. Bring back the brick online by starting volume with force.
+ 6. Check if all bricks are online and if heal is in progress.
+ 7. Add bricks to the volume and start rebalance.
+ 8. Wait for rebalance and heal to complete on volume.
+ 9. Collect arequal checksum on mount point and compare
+ it with the one taken in step 4.
+ """
+ # Start I/O from mount point and wait for it to complete
+ cmd = ("cd %s; for i in {1..1000} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ self.list_of_io_processes = [
+ g.run_async(self.mounts[0].client_system, cmd)]
+ self.is_copy_running = True
+
+ # Get a list of all the bricks to kill brick
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ self.assertIsNotNone(brick_list, "Empty present brick list")
+
+ # Kill brick process of a brick which is being removed
+ brick = choice(brick_list)
+ node, _ = brick.split(":")
+ ret = kill_process(node, process_names="glusterfsd")
+ self.assertTrue(ret, "Failed to kill brick process of brick %s"
+ % brick)
+
+ # Validate if I/O was successful or not.
+ ret = validate_io_procs(self.list_of_io_processes, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ self.is_copy_running = False
+
+ # Collect arequal checksum before ops
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Bring back the brick online by starting volume with force
+ ret = bring_bricks_online(self.mnode, self.volname, brick_list,
+ bring_bricks_online_methods=[
+ 'volume_start_force'])
+ self.assertTrue(ret, "Error in bringing back brick online")
+ g.log.info('All bricks are online now')
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Wait for heal to complete
+ ret = monitor_heal_completion(self.mnode, self.volname)
+ self.assertTrue(ret, "heal has not yet completed")
+ g.log.info("Self heal completed")
+
+ # Check for data loss by comparing arequal before and after ops
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_add_brick_rebalance_with_symlink_pointing_out_of_gluster.py b/tests/functional/dht/test_add_brick_rebalance_with_symlink_pointing_out_of_gluster.py
new file mode 100644
index 000000000..92135b3b4
--- /dev/null
+++ b/tests/functional/dht/test_add_brick_rebalance_with_symlink_pointing_out_of_gluster.py
@@ -0,0 +1,133 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.glusterfile import get_md5sum
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import (validate_io_procs, wait_for_io_to_complete)
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter'], ['glusterfs']])
+class TestAddBrickRebalanceWithSymlinkPointingOutOfGluster(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.is_io_running = False
+
+ def tearDown(self):
+
+ # Remove the temporary dir created for test
+ ret, _, _ = g.run(self.mounts[0].client_system, "rm -rf /mnt/tmp/")
+ if ret:
+ raise ExecutionError("Failed to remove /mnt/tmp create for test")
+
+ # If I/O processes are running wait for it to complete
+ if self.is_io_running:
+ if not wait_for_io_to_complete(self.list_of_io_processes,
+ [self.mounts[0]]):
+ raise ExecutionError("Failed to wait for I/O to complete")
+
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_add_brick_rebalance_with_symlink_pointing_out_of_volume(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Create symlinks on the volume such that the files for the symlink
+ are outside the volume.
+ 3. Once all the symlinks are create a data file using dd:
+ dd if=/dev/urandom of=FILE bs=1024 count=100
+ 4. Start copying the file's data to all the symlink.
+ 5. When data is getting copied to all files through symlink add brick
+ and start rebalance.
+ 6. Once rebalance is complete check the md5sum of each file through
+ symlink and compare if it's same as the orginal file.
+ """
+ # Create symlinks on volume pointing outside volume
+ cmd = ("cd %s; mkdir -p /mnt/tmp;for i in {1..100};do "
+ "touch /mnt/tmp/file$i; ln -sf /mnt/tmp/file$i link$i;done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.mounts[0].client_system, cmd)
+ self.assertFalse(
+ ret, "Failed to create symlinks pointing outside volume")
+
+ # Create a data file using dd inside mount point
+ cmd = ("cd %s; dd if=/dev/urandom of=FILE bs=1024 count=100"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.mounts[0].client_system, cmd)
+ self.assertFalse(ret, "Failed to create data file on mount point")
+
+ # Start copying data from file to symliks
+ cmd = ("cd %s;for i in {1..100};do cat FILE >> link$i;done"
+ % self.mounts[0].mountpoint)
+ self.list_of_io_processes = [
+ g.run_async(self.mounts[0].client_system, cmd)]
+ self.is_copy_running = True
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Validate if I/O was successful or not.
+ ret = validate_io_procs(self.list_of_io_processes, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ self.is_copy_running = False
+
+ # Get md5sum of the original file and compare it with that of
+ # all files through the symlink
+ original_file_md5sum = get_md5sum(self.mounts[0].client_system,
+ "{}/FILE".format(
+ self.mounts[0].mountpoint))
+ self.assertIsNotNone(original_file_md5sum,
+ 'Failed to get md5sum of original file')
+ for number in range(1, 101):
+ symlink_md5sum = get_md5sum(self.mounts[0].client_system,
+ "{}/link{}".format(
+ self.mounts[0].mountpoint, number))
+ self.assertEqual(original_file_md5sum.split(' ')[0],
+ symlink_md5sum.split(' ')[0],
+ "Original file and symlink checksum not equal"
+ " for link%s" % number)
+ g.log.info("Symlink and original file checksum same on all symlinks")
diff --git a/tests/functional/dht/test_add_brick_replace_brick_fix_layout.py b/tests/functional/dht/test_add_brick_replace_brick_fix_layout.py
index 66f39ff24..783ca1800 100644
--- a/tests/functional/dht/test_add_brick_replace_brick_fix_layout.py
+++ b/tests/functional/dht/test_add_brick_replace_brick_fix_layout.py
@@ -82,7 +82,7 @@ class TestAddBrickReplaceBrickFixLayout(GlusterBaseClass):
1. Create a volume, start it and mount it.
2. Create files and dirs on the mount point.
3. Add bricks to the volume.
- 4. Replace 2 old brick to the volume.
+ 4. Replace 2 old bricks to the volume.
5. Trigger rebalance fix layout and wait for it to complete.
6. Check layout on all the bricks through trusted.glusterfs.dht.
"""
diff --git a/tests/functional/dht/test_rebalance_multiple_expansions.py b/tests/functional/dht/test_rebalance_multiple_expansions.py
new file mode 100644
index 000000000..e96d88d56
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_multiple_expansions.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2021 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed', 'distributed-replicated'],
+ ['glusterfs']])
+class TestRebalanceMultipleExpansions(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmount and clean volume
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_multiple_expansions(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it
+ 2. Create some file on mountpoint
+ 3. Collect arequal checksum on mount point pre-rebalance
+ 4. Do the following 3 times:
+ 5. Expand the volume
+ 6. Start rebalance and wait for it to finish
+ 7. Collect arequal checksum on mount point post-rebalance
+ and compare with value from step 3
+ """
+
+ # Create some file on mountpoint
+ cmd = ("cd %s; for i in {1..500} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.volname)
+
+ # Collect arequal checksum before rebalance
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ for _ in range(3):
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on "
+ "volume %s" % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Collect arequal checksum after rebalance
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+
+ # Check for data loss by comparing arequal before and after
+ # rebalance
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_rebalance_multiple_shrinks.py b/tests/functional/dht/test_rebalance_multiple_shrinks.py
new file mode 100644
index 000000000..a95cdf141
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_multiple_shrinks.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2021 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.volume_libs import shrink_volume
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed'], ['glusterfs']])
+class TestRebalanceMultipleShrinks(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Changing dist_count to 6
+ self.volume['voltype']['dist_count'] = 6
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmount and clean volume
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_multiple_shrinks(self):
+ """
+ Test case:
+ 1. Modify the distribution count of a volume
+ 2. Create a volume, start it and mount it
+ 3. Create some file on mountpoint
+ 4. Collect arequal checksum on mount point pre-rebalance
+ 5. Do the following 3 times:
+ 6. Shrink the volume
+ 7. Collect arequal checksum on mount point post-rebalance
+ and compare with value from step 4
+ """
+
+ # Create some file on mountpoint
+ cmd = ("cd %s; for i in {1..500} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.volname)
+
+ # Collect arequal checksum before rebalance
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ for _ in range(3):
+ # Shrink volume
+ ret = shrink_volume(self.mnode, self.volname,
+ rebalance_timeout=16000)
+ self.assertTrue(ret, "Failed to remove-brick from volume")
+ g.log.info("Remove-brick rebalance successful")
+
+ # Collect arequal checksum after rebalance
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+
+ # Check for data loss by comparing arequal before and after
+ # rebalance
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_rebalance_nested_dir.py b/tests/functional/dht/test_rebalance_nested_dir.py
new file mode 100644
index 000000000..77f099ad3
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_nested_dir.py
@@ -0,0 +1,99 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed', 'distributed-replicated'],
+ ['glusterfs']])
+class TestRebalanceNestedDir(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmount and clean volume
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_nested_dir(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it
+ 2. On mount point, create a large nested dir structure with
+ files in the inner-most dir
+ 3. Collect arequal checksum on mount point pre-rebalance
+ 4. Expand the volume
+ 5. Start rebalance and wait for it to finish
+ 6. Collect arequal checksum on mount point post-rebalance
+ and compare wth value from step 3
+ """
+
+ # create a large nested dir structure with files in the inner-most dir
+ cmd = ("cd %s; for i in {1..100} ; do mkdir $i; cd $i; done;"
+ "for j in {1..100} ; do "
+ "dd if=/dev/urandom of=file$j bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.volname)
+
+ # Collect arequal checksum before rebalance
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Collect arequal checksum after rebalance
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+
+ # Check for data loss by comparing arequal before and after rebalance
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_rebalance_peer_probe.py b/tests/functional/dht/test_rebalance_peer_probe.py
new file mode 100644
index 000000000..7ffc9ca63
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_peer_probe.py
@@ -0,0 +1,130 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from time import sleep
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal
+from glustolibs.gluster.peer_ops import (peer_probe_servers, peer_detach)
+
+
+@runs_on([['distributed'], ['glusterfs']])
+class TestRebalancePeerProbe(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.first_client = self.mounts[0].client_system
+ self.is_peer_detached = False
+
+ def tearDown(self):
+
+ # Unmount and clean volume
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Probe detached node in case it's still detached
+ if self.is_peer_detached:
+ if not peer_probe_servers(self.mnode, self.servers[5]):
+ raise ExecutionError("Failed to probe detached "
+ "servers %s" % self.servers)
+ g.log.info("Peer probe success for detached "
+ "servers %s", self.servers)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_peer_probe(self):
+ """
+ Test case:
+ 1. Detach a peer
+ 2. Create a volume, start it and mount it
+ 3. Start creating a few files on mount point
+ 4. Collect arequal checksum on mount point pre-rebalance
+ 5. Expand the volume
+ 6. Start rebalance
+ 7. While rebalance is going, probe a peer and check if
+ the peer was probed successfully
+ 7. Collect arequal checksum on mount point post-rebalance
+ and compare wth value from step 4
+ """
+
+ # Detach a peer
+ ret, _, _ = peer_detach(self.mnode, self.servers[5])
+ self.assertEqual(ret, 0, "Failed to detach peer %s"
+ % self.servers[5])
+
+ self.is_peer_detached = True
+
+ # Start I/O from mount point and wait for it to complete
+ cmd = ("cd %s; for i in {1..1000} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.volname)
+
+ # Collect arequal checksum before rebalance
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Let rebalance run for a while
+ sleep(5)
+
+ # Add new node to the cluster
+ ret = peer_probe_servers(self.mnode, self.servers[5])
+ self.assertTrue(ret, "Failed to peer probe server : %s"
+ % self.servers[5])
+ g.log.info("Peer probe success for %s and all peers are in "
+ "connected state", self.servers[5])
+
+ self.is_peer_detached = False
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Collect arequal checksum after rebalance
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+
+ # Check for data loss by comparing arequal before and after rebalance
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_rebalance_preserve_user_permissions.py b/tests/functional/dht/test_rebalance_preserve_user_permissions.py
index 6bffeb8d7..59327f329 100644
--- a/tests/functional/dht/test_rebalance_preserve_user_permissions.py
+++ b/tests/functional/dht/test_rebalance_preserve_user_permissions.py
@@ -25,7 +25,6 @@ from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.rebalance_ops import (
rebalance_start,
- get_rebalance_status,
wait_for_rebalance_to_complete)
from glustolibs.gluster.volume_libs import (
expand_volume,
@@ -40,9 +39,7 @@ from glustolibs.gluster.glusterfile import (
@runs_on([['distributed', 'distributed-replicated'],
['glusterfs']])
class TestRebalancePreserveUserPermissions(GlusterBaseClass):
-
def setUp(self):
-
self.get_super_method(self, 'setUp')()
# Creating Volume and mounting the volume
@@ -60,7 +57,6 @@ class TestRebalancePreserveUserPermissions(GlusterBaseClass):
raise ExecutionError("Failed to add user")
def tearDown(self):
-
ret = del_user(self.client, self.user)
if not ret:
raise ExecutionError("Failed to delete user")
@@ -73,12 +69,45 @@ class TestRebalancePreserveUserPermissions(GlusterBaseClass):
self.get_super_method(self, 'tearDown')()
+ def _start_rebalance_and_wait(self):
+ """Start rebalance and wait"""
+ # Start Rebalance
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
+ "%s", self.volname))
+ g.log.info("Successfully started rebalance on the volume %s",
+ self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance is successfully complete on the volume %s",
+ self.volname)
+
+ def _get_arequal_and_check_if_equal_to_before(self):
+ """Check if arequal checksum is equal or not"""
+ self.arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self.assertEqual(
+ self.arequal_checksum_before, self.arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
+
+ def _logged_vol_info(self):
+ """Log volume info and status"""
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+
def _check_user_permission(self):
"""
Verify permissions on MP and file
"""
stat_mp_dict = get_file_stat(self.client, self.mountpoint)
- self.assertEqual(stat_mp_dict['access'], '777', "Expected 777 "
+ self.assertIsNotNone(stat_mp_dict, "stat on %s failed"
+ % self.mountpoint)
+ self.assertEqual(stat_mp_dict['access'], '777',
+ "Expected 777 "
"but found %s" % stat_mp_dict['access'])
g.log.info("File permissions for mountpoint is 777 as expected")
@@ -92,9 +121,9 @@ class TestRebalancePreserveUserPermissions(GlusterBaseClass):
self.assertEqual(stat_dict['groupname'], self.user,
"Expected %s but found %s"
% (self.user, stat_dict['groupname']))
- g.log.info("User and Group are 'glusto_user' as expected")
+ g.log.info("User and Group are %s as expected", self.user)
- def test_rebalance_preserve_user_permissions(self):
+ def _testcase(self, number_of_expands=1):
"""
Test case:
1. Create a volume start it and mount on the client.
@@ -102,7 +131,7 @@ class TestRebalancePreserveUserPermissions(GlusterBaseClass):
3. Add new user to the client.
4. As the new user create dirs/files.
5. Compute arequal checksum and check permission on / and subdir.
- 6. Add brick into the volume and start rebalance.
+ 6. expand cluster according to number_of_expands and start rebalance.
7. After rebalance is completed:
7.1 check arequal checksum
7.2 verfiy no change in / and sub dir permissions.
@@ -126,57 +155,24 @@ class TestRebalancePreserveUserPermissions(GlusterBaseClass):
# check permission on / and subdir
self._check_user_permission()
- # Log the volume info and status before rebalance
- ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume info and status failed on "
- "volume %s", self.volname))
-
- # Get arequal checksum before starting fix-layout
- g.log.info("Getting arequal checksum before rebalance")
- arequal_cksum_pre_rebalance = collect_mounts_arequal(self.mounts[0])
-
- # Expand the volume
- ret = expand_volume(self.mnode, self.volname, self.servers,
- self.all_servers_info)
- self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
- g.log.info("Expanding volume is successful on "
- "volume %s", self.volname)
-
- # Log the volume info after expanding volume.
- ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume info and status failed on "
- "volume %s", self.volname))
+ # get arequal checksum before expand
+ self.arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
- # Start Rebalance
- ret, _, _ = rebalance_start(self.mnode, self.volname)
- self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
- "%s", self.volname))
- g.log.info("Successfully started rebalance on the volume %s",
- self.volname)
+ self._logged_vol_info()
- # Check rebalance is in progress
- rebalance_status = get_rebalance_status(self.mnode, self.volname)
- ret = rebalance_status['aggregate']['statusStr']
- self.assertEqual(ret, "in progress", ("Rebalance is not in "
- "'in progress' state, either "
- "rebalance is in completed state"
- " or failed to get rebalance "
- "status"))
- g.log.info("Rebalance is in 'in progress' state")
+ # expand the volume
+ for i in range(number_of_expands):
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Failed to expand iter %d volume %s",
+ i, self.volname))
- # Wait for rebalance to complete
- ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
- self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
- "%s", self.volname))
- g.log.info("Rebalance is successfully complete on the volume %s",
- self.volname)
+ self._logged_vol_info()
+ # Start Rebalance and wait for completion
+ self._start_rebalance_and_wait()
- # Compare arequals checksum pre/post rebalance
- arequal_cksum_post_rebalance = collect_mounts_arequal(self.mounts[0])
- self.assertEqual(arequal_cksum_pre_rebalance,
- arequal_cksum_post_rebalance,
- "arequal checksum is NOT MATCHNG")
- g.log.info("arequal checksum is SAME")
+ # compare arequals checksum before and after rebalance
+ self._get_arequal_and_check_if_equal_to_before()
# permissions check on / and sub dir
self._check_user_permission()
@@ -190,3 +186,9 @@ class TestRebalancePreserveUserPermissions(GlusterBaseClass):
self.assertEqual(ret, 0, ("User %s failed to create files", self.user))
g.log.info("IO as %s is successful", self.user)
+
+ def test_rebalance_preserve_user_permissions(self):
+ self._testcase()
+
+ def test_rebalance_preserve_user_permissions_multi_expands(self):
+ self._testcase(2)
diff --git a/tests/functional/dht/test_rebalance_two_volumes.py b/tests/functional/dht/test_rebalance_two_volumes.py
new file mode 100644
index 000000000..c96f75586
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_two_volumes.py
@@ -0,0 +1,163 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal
+from glustolibs.gluster.mount_ops import mount_volume
+from glustolibs.gluster.volume_ops import (volume_create, volume_start,
+ volume_stop, volume_delete)
+from glustolibs.gluster.lib_utils import form_bricks_list
+
+
+@runs_on([['distributed', 'distributed-replicated'], ['glusterfs']])
+class TestRebalanceTwoVolumes(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.first_client = self.mounts[0].client_system
+
+ self.second_vol_name = "second_volume"
+ self.second_mountpoint = "/mnt/{}".format(self.second_vol_name)
+ self.is_second_volume_created = False
+
+ def tearDown(self):
+
+ # Unmount and clean volume
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ if self.is_second_volume_created:
+ # Stop the 2nd volume
+ ret, _, _ = volume_stop(self.mnode, self.second_vol_name)
+ self.assertEqual(ret, 0, ("volume stop failed for %s"
+ % self.second_vol_name))
+ g.log.info("Volume %s stopped", self.second_vol_name)
+
+ # Delete the 2nd volume
+ ret = volume_delete(self.mnode, self.second_vol_name)
+ self.assertTrue(ret, ("Failed to cleanup the Volume "
+ "%s", self.second_vol_name))
+ g.log.info("Volume deleted successfully : %s",
+ self.second_vol_name)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_two_volumes(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it
+ 2. Create a 2nd volume, start it and mount it
+ 3. Create files on mount points
+ 4. Collect arequal checksum on mount point pre-rebalance
+ 5. Expand the volumes
+ 6. Start rebalance simultaneously on the 2 volumes
+ 7. Wait for rebalance to complete
+ 8. Collect arequal checksum on mount point post-rebalance
+ and compare with value from step 4
+ """
+
+ # Get brick list
+ bricks_list = form_bricks_list(self.mnode, self.volname, 3,
+ self.servers, self.all_servers_info)
+ self.assertIsNotNone(bricks_list, "Bricks list is None")
+
+ # Create 2nd volume
+ ret, _, _ = volume_create(self.mnode, self.second_vol_name,
+ bricks_list)
+ self.assertEqual(ret, 0, ("Failed to create volume %s") % (
+ self.second_vol_name))
+ g.log.info("Volume %s created successfully", self.second_vol_name)
+
+ # Start 2nd volume
+ ret, _, _ = volume_start(self.mnode, self.second_vol_name)
+ self.assertEqual(ret, 0, ("Failed to start volume %s") % (
+ self.second_vol_name))
+ g.log.info("Started volume %s", self.second_vol_name)
+
+ self.is_second_volume_created = True
+
+ # Mount 2nd volume
+ for mount_obj in self.mounts:
+ ret, _, _ = mount_volume(self.second_vol_name,
+ mtype=self.mount_type,
+ mpoint=self.second_mountpoint,
+ mserver=self.mnode,
+ mclient=mount_obj.client_system)
+ self.assertEqual(ret, 0, ("Failed to mount volume %s") % (
+ self.second_vol_name))
+ g.log.info("Volume mounted successfully : %s",
+ self.second_vol_name)
+
+ # Start I/O from mount point for volume 1 and wait for it to complete
+ cmd = ("cd %s; for i in {1..1000} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.volname)
+
+ # Start I/O from mount point for volume 2 and wait for it to complete
+ cmd = ("cd %s; for i in {1..1000} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.second_mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.second_vol_name)
+
+ # Collect arequal checksum before rebalance
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Add bricks to volumes
+ for volume in (self.volname, self.second_vol_name):
+ ret = expand_volume(self.mnode, volume, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % volume)
+
+ # Trigger rebalance
+ for volume in (self.volname, self.second_vol_name):
+ ret, _, _ = rebalance_start(self.mnode, volume,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the"
+ " volume %s" % volume)
+
+ # Wait for rebalance to complete
+ for volume in (self.volname, self.second_vol_name):
+ ret = wait_for_rebalance_to_complete(self.mnode, volume,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume"
+ " %s" % volume)
+ g.log.info("Rebalance successfully completed")
+
+ # Collect arequal checksum after rebalance
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+
+ # Check for data loss by comparing arequal before and after rebalance
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_rebalance_with_acl_set_to_files.py b/tests/functional/dht/test_rebalance_with_acl_set_to_files.py
new file mode 100644
index 000000000..d290ae56a
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_with_acl_set_to_files.py
@@ -0,0 +1,129 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.glusterfile import set_acl, get_acl
+from glustolibs.gluster.lib_utils import add_user, del_user
+from glustolibs.gluster.mount_ops import mount_volume
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter', 'distributed',
+ 'replicated', 'arbiter', 'distributed-dispersed',
+ 'dispersed'], ['glusterfs']])
+class TestRebalanceWithAclSetToFiles(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume():
+ raise ExecutionError("Failed to Setup volume")
+
+ self.first_client = self.mounts[0].client_system
+ self.mount_point = self.mounts[0].mountpoint
+
+ # Mount volume with -o acl option
+ ret, _, _ = mount_volume(self.volname, self.mount_type,
+ self.mount_point, self.mnode,
+ self.first_client, options='acl')
+ if ret:
+ raise ExecutionError("Failed to mount volume")
+
+ # Create a non-root user
+ if not add_user(self.first_client, 'joker'):
+ raise ExecutionError("Failed to create user joker")
+
+ def tearDown(self):
+
+ # Remove non-root user created for test
+ if not del_user(self.first_client, 'joker'):
+ raise ExecutionError("Failed to remove user joker")
+
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _check_acl_set_to_files(self):
+ """Check acl values set to files"""
+ for number in range(1, 11):
+ ret = get_acl(self.first_client, self.mount_point,
+ 'file{}'.format(str(number)))
+ self.assertIn('user:joker:rwx', ret['rules'],
+ "Rule not present in getfacl output")
+
+ def test_add_brick_rebalance_with_acl_set_to_files(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it to a client.
+ 2. Create 10 files on the mount point and set acls on the files.
+ 3. Check the acl value and collect arequal-checksum.
+ 4. Add bricks to the volume and start rebalance.
+ 5. Check the value of acl(it should be same as step 3),
+ collect and compare arequal-checksum with the one collected
+ in step 3
+ """
+ # Create 10 files on the mount point.
+ cmd = ("cd {}; for i in `seq 1 10`;do touch file$i;done"
+ .format(self.mount_point))
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertFalse(ret, "Failed to create files on mount point")
+
+ for number in range(1, 11):
+ ret = set_acl(self.first_client, 'u:joker:rwx', '{}/file{}'
+ .format(self.mount_point, str(number)))
+ self.assertTrue(ret, "Failed to set acl on files")
+
+ # Collect arequal on mount point and check acl value
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+ self._check_acl_set_to_files()
+ g.log.info("Files created and acl set to files properly")
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Check acl value if it's same as before rebalance
+ self._check_acl_set_to_files()
+
+ # Check for data loss by comparing arequal before and after ops
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum and acl value are SAME")
diff --git a/tests/functional/dht/test_time_taken_for_ls.py b/tests/functional/dht/test_time_taken_for_ls.py
new file mode 100644
index 000000000..7c9653999
--- /dev/null
+++ b/tests/functional/dht/test_time_taken_for_ls.py
@@ -0,0 +1,105 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed'], ['glusterfs']])
+class TestTimeForls(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume(self.mounts):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.is_io_running = False
+
+ def tearDown(self):
+
+ if self.is_io_running:
+ self._validate_io()
+
+ if not self.unmount_volume_and_cleanup_volume(self.mounts):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _validate_io(self):
+ """Validare I/O threads running on mount point"""
+ io_success = []
+ for proc in self.proc_list:
+ try:
+ ret, _, _ = proc.async_communicate()
+ if ret:
+ io_success.append(False)
+ break
+ io_success.append(True)
+ except ValueError:
+ io_success.append(True)
+ return all(io_success)
+
+ def test_time_taken_for_ls(self):
+ """
+ Test case:
+ 1. Create a volume of type distributed-replicated or
+ distributed-arbiter or distributed-dispersed and start it.
+ 2. Mount the volume to clients and create 2000 directories
+ and 10 files inside each directory.
+ 3. Wait for I/O to complete on mount point and perform ls
+ (ls should complete within 10 seconds).
+ """
+ # Creating 2000 directories on the mount point
+ ret, _, _ = g.run(self.mounts[0].client_system,
+ "cd %s; for i in {1..2000};do mkdir dir$i;done"
+ % self.mounts[0].mountpoint)
+ self.assertFalse(ret, 'Failed to create 2000 dirs on mount point')
+
+ # Create 5000 files inside each directory
+ dirs = ('{1..100}', '{101..200}', '{201..300}', '{301..400}',
+ '{401..500}', '{501..600}', '{601..700}', '{701..800}',
+ '{801..900}', '{901..1000}', '{1001..1100}', '{1101..1200}',
+ '{1201..1300}', '{1301..1400}', '{1401..1500}', '{1501..1600}',
+ '{1801..1900}', '{1901..2000}')
+ self.proc_list, counter = [], 0
+ while counter < 18:
+ for mount_obj in self.mounts:
+ ret = g.run_async(mount_obj.client_system,
+ "cd %s;for i in %s;do "
+ "touch dir$i/file{1..10};done"
+ % (mount_obj.mountpoint, dirs[counter]))
+ self.proc_list.append(ret)
+ counter += 1
+ self.is_io_running = True
+
+ # Check if I/O is successful or not
+ ret = self._validate_io()
+ self.assertTrue(ret, "Failed to create Files and dirs on mount point")
+ self.is_io_running = False
+ g.log.info("Successfully created files and dirs needed for the test")
+
+ # Run ls on mount point which should get completed within 10 seconds
+ ret, _, _ = g.run(self.mounts[0].client_system,
+ "cd %s; timeout 10 ls"
+ % self.mounts[0].mountpoint)
+ self.assertFalse(ret, '1s taking more than 10 seconds')
+ g.log.info("ls completed in under 10 seconds")
diff --git a/tests/functional/dht/test_verify_permissions_on_root_dir_when_brick_down.py b/tests/functional/dht/test_verify_permissions_on_root_dir_when_brick_down.py
new file mode 100644
index 000000000..f6228c122
--- /dev/null
+++ b/tests/functional/dht/test_verify_permissions_on_root_dir_when_brick_down.py
@@ -0,0 +1,134 @@
+# Copyright (C) 2021 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterfile import set_file_permissions
+from glustolibs.gluster.brick_libs import (get_all_bricks,
+ bring_bricks_offline,
+ bring_bricks_online)
+
+
+@runs_on([['distributed', 'distributed-replicated', 'distributed-dispersed',
+ 'distributed-arbiter'],
+ ['glusterfs']])
+class TestVerifyPermissionChanges(GlusterBaseClass):
+ def setUp(self):
+ """
+ Setup and mount volume
+ """
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume(mounts=[self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and Mount Volume")
+
+ def _set_root_dir_permission(self, permission):
+ """ Sets the root dir permission to the given value"""
+ m_point = self.mounts[0].mountpoint
+ ret = set_file_permissions(self.clients[0], m_point, permission)
+ self.assertTrue(ret, "Failed to set root dir permissions")
+
+ def _get_dir_permissions(self, host, directory):
+ """ Returns dir permissions"""
+ cmd = 'stat -c "%a" {}'.format(directory)
+ ret, out, _ = g.run(host, cmd)
+ self.assertEqual(ret, 0, "Failed to get permission on {}".format(host))
+ return out.strip()
+
+ def _get_root_dir_permission(self, expected=None):
+ """ Returns the root dir permission """
+ permission = self._get_dir_permissions(self.mounts[0].client_system,
+ self.mounts[0].mountpoint)
+ if not expected:
+ return permission.strip()
+ self.assertEqual(permission, expected, "The permissions doesn't match")
+ return True
+
+ def _bring_a_brick_offline(self):
+ """ Brings down a brick from the volume"""
+ brick_to_kill = get_all_bricks(self.mnode, self.volname)[-1]
+ ret = bring_bricks_offline(self.volname, brick_to_kill)
+ self.assertTrue(ret, "Failed to bring brick offline")
+ return brick_to_kill
+
+ def _bring_back_brick_online(self, brick):
+ """ Brings back down brick from the volume"""
+ ret = bring_bricks_online(self.mnode, self.volname, brick)
+ self.assertTrue(ret, "Failed to bring brick online")
+
+ def _verify_mount_dir_and_brick_dir_permissions(self, expected,
+ down_brick=None):
+ """ Verifies the mount directory and brick dir permissions are same"""
+ # Get root dir permission and verify
+ self._get_root_dir_permission(expected)
+
+ # Verify brick dir permission
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ for brick in brick_list:
+ brick_node, brick_path = brick.split(":")
+ if down_brick and down_brick.split(":")[-1] != brick_path:
+ actual_perm = self._get_dir_permissions(brick_node,
+ brick_path)
+ self.assertEqual(actual_perm, expected,
+ "The permissions are not same")
+
+ def test_verify_root_dir_permission_changes(self):
+ """
+ 1. create pure dist volume
+ 2. mount on client
+ 3. Checked default permission (should be 755)
+ 4. Change the permission to 444 and verify
+ 5. Kill a brick
+ 6. Change root permission to 755
+ 7. Verify permission changes on all bricks, except down brick
+ 8. Bring back the brick and verify the changes are reflected
+ """
+
+ # Verify the default permission on root dir is 755
+ self._verify_mount_dir_and_brick_dir_permissions("755")
+
+ # Change root permission to 444
+ self._set_root_dir_permission("444")
+
+ # Verify the changes were successful
+ self._verify_mount_dir_and_brick_dir_permissions("444")
+
+ # Kill a brick
+ offline_brick = self._bring_a_brick_offline()
+
+ # Change root permission to 755
+ self._set_root_dir_permission("755")
+
+ # Verify the permission changed to 755 on mount and brick dirs
+ self._verify_mount_dir_and_brick_dir_permissions("755", offline_brick)
+
+ # Bring brick online
+ self._bring_back_brick_online(offline_brick)
+
+ # Verify the permission changed to 755 on mount and brick dirs
+ self._verify_mount_dir_and_brick_dir_permissions("755")
+
+ def tearDown(self):
+ # Unmount and cleanup original volume
+ if not self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]]):
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()