summaryrefslogtreecommitdiffstats
path: root/tests/functional/dht
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/dht')
-rw-r--r--tests/functional/dht/test_access_file_with_stale_linkto_xattr.py169
-rw-r--r--tests/functional/dht/test_accessing_file_when_dht_layout_is_stale.py181
-rw-r--r--tests/functional/dht/test_add_brick_rebalance_revised.py171
-rw-r--r--tests/functional/dht/test_add_brick_rebalance_with_rsync_in_progress.py151
-rw-r--r--tests/functional/dht/test_add_brick_rebalance_with_self_heal_in_progress.py136
-rw-r--r--tests/functional/dht/test_add_brick_rebalance_with_symlink_pointing_out_of_gluster.py133
-rw-r--r--tests/functional/dht/test_add_brick_remove_brick_with_lookups_and_kernal_untar.py162
-rw-r--r--tests/functional/dht/test_add_brick_replace_brick_fix_layout.py124
-rw-r--r--tests/functional/dht/test_brick_full_add_brick_rebalance.py120
-rw-r--r--tests/functional/dht/test_brick_full_add_brick_remove_brick.py111
-rw-r--r--tests/functional/dht/test_copy_dir_subvol_down.py308
-rw-r--r--tests/functional/dht/test_copy_file_subvol_down.py336
-rw-r--r--tests/functional/dht/test_copy_huge_file_with_remove_brick_in_progress.py111
-rw-r--r--tests/functional/dht/test_custom_xattr_healing_for_dir.py332
-rw-r--r--tests/functional/dht/test_delete_dir_with_self_pointing_linkto_files.py140
-rw-r--r--tests/functional/dht/test_delete_file_picked_for_migration.py165
-rw-r--r--tests/functional/dht/test_dht_create_dir.py124
-rw-r--r--tests/functional/dht/test_dht_custom_xattr.py17
-rw-r--r--tests/functional/dht/test_dht_file_rename_when_dest_is_hashed_or_cached_to_diff_subvol_combinations.py919
-rw-r--r--tests/functional/dht/test_dht_file_rename_when_destination_file_exists.py540
-rw-r--r--tests/functional/dht/test_directory_custom_extended_attributes.py21
-rw-r--r--tests/functional/dht/test_disable_readdirp_data_loss.py103
-rw-r--r--tests/functional/dht/test_file_creation.py494
-rw-r--r--tests/functional/dht/test_file_rename_when_destination_file_doesnot_exist.py450
-rw-r--r--tests/functional/dht/test_file_rename_when_destination_file_stored_on_source_file_hashed_subvol.py639
-rw-r--r--tests/functional/dht/test_invalid_memory_read_after_freed.py102
-rw-r--r--tests/functional/dht/test_kill_brick_with_remove_brick.py128
-rw-r--r--tests/functional/dht/test_nuke_happy_path.py95
-rw-r--r--tests/functional/dht/test_one_brick_full_add_brick_rebalance.py139
-rw-r--r--tests/functional/dht/test_open_file_migration.py131
-rw-r--r--tests/functional/dht/test_pipe_character_and_block_device_files.py328
-rw-r--r--tests/functional/dht/test_readdirp_with_rebalance.py173
-rw-r--r--tests/functional/dht/test_rebalance_add_brick_and_lookup.py113
-rw-r--r--tests/functional/dht/test_rebalance_dir_file_from_multiple_clients.py2
-rw-r--r--tests/functional/dht/test_rebalance_files_with_holes.py128
-rw-r--r--tests/functional/dht/test_rebalance_multiple_expansions.py100
-rw-r--r--tests/functional/dht/test_rebalance_multiple_shrinks.py87
-rw-r--r--tests/functional/dht/test_rebalance_nested_dir.py99
-rw-r--r--tests/functional/dht/test_rebalance_peer_probe.py130
-rw-r--r--tests/functional/dht/test_rebalance_preserve_user_permissions.py194
-rw-r--r--tests/functional/dht/test_rebalance_remove_brick_with_quota.py160
-rw-r--r--tests/functional/dht/test_rebalance_rename.py181
-rw-r--r--tests/functional/dht/test_rebalance_two_volumes.py163
-rw-r--r--tests/functional/dht/test_rebalance_with_acl_set_to_files.py129
-rw-r--r--tests/functional/dht/test_rebalance_with_brick_down.py171
-rw-r--r--tests/functional/dht/test_rebalance_with_hidden_files.py8
-rw-r--r--tests/functional/dht/test_rebalance_with_quota.py188
-rw-r--r--tests/functional/dht/test_rebalance_with_quota_on_subdirectory.py195
-rw-r--r--tests/functional/dht/test_rebalance_with_special_files.py158
-rw-r--r--tests/functional/dht/test_remove_brick_command_opitons.py113
-rw-r--r--tests/functional/dht/test_remove_brick_no_commit_followed_by_rebalance.py169
-rw-r--r--tests/functional/dht/test_remove_brick_with_open_fd.py107
-rw-r--r--tests/functional/dht/test_rename_with_brick_min_free_limit_crossed.py82
-rw-r--r--tests/functional/dht/test_restart_glusterd_after_rebalance.py3
-rwxr-xr-x[-rw-r--r--]tests/functional/dht/test_rmdir_subvol_down.py3
-rw-r--r--tests/functional/dht/test_sparse_file_creation_and_deletion.py156
-rw-r--r--tests/functional/dht/test_stack_overflow.py131
-rw-r--r--tests/functional/dht/test_stop_glusterd_while_rebalance_in_progress.py2
-rw-r--r--tests/functional/dht/test_time_taken_for_ls.py105
-rw-r--r--tests/functional/dht/test_verify_create_hash.py2
-rw-r--r--tests/functional/dht/test_verify_permissions_on_root_dir_when_brick_down.py134
-rw-r--r--tests/functional/dht/test_volume_start_stop_while_rebalance_in_progress.py4
-rw-r--r--tests/functional/dht/test_wipe_out_directory_permissions.py132
63 files changed, 10497 insertions, 105 deletions
diff --git a/tests/functional/dht/test_access_file_with_stale_linkto_xattr.py b/tests/functional/dht/test_access_file_with_stale_linkto_xattr.py
new file mode 100644
index 000000000..c40d33935
--- /dev/null
+++ b/tests/functional/dht/test_access_file_with_stale_linkto_xattr.py
@@ -0,0 +1,169 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.lib_utils import add_user, del_user, set_passwd
+from glustolibs.gluster.volume_ops import (set_volume_options,
+ reset_volume_option)
+from glustolibs.gluster.volume_libs import get_subvols
+from glustolibs.gluster.dht_test_utils import find_new_hashed
+from glustolibs.gluster.glusterfile import move_file, is_linkto_file
+from glustolibs.gluster.glusterfile import set_file_permissions
+
+
+@runs_on([['distributed', 'distributed-arbiter',
+ 'distributed-replicated', 'distributed-dispersed'],
+ ['glusterfs']])
+class TestAccessFileWithStaleLinktoXattr(GlusterBaseClass):
+ def setUp(self):
+ """
+ Setup and mount volume or raise ExecutionError
+ """
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Setup and Mount Volume")
+
+ # Add a new user to the clients
+ ret = add_user(self.clients[0], "test_user1")
+ if ret is not True:
+ raise ExecutionError("Failed to add user")
+
+ # Set password for user "test_user1"
+ ret = set_passwd(self.clients[0], "test_user1", "red123")
+ if ret is not True:
+ raise ExecutionError("Failed to set password")
+
+ # Geneate ssh key on local host
+ cmd = 'echo -e "n" | ssh-keygen -f ~/.ssh/id_rsa -q -N ""'
+ ret, out, _ = g.run_local(cmd)
+ if ret and "already exists" not in out:
+ raise ExecutionError("Failed to generate ssh-key")
+ g.log.info("Successfully generated ssh-key")
+
+ # Perform ssh-copy-id
+ cmd = ('sshpass -p "red123" ssh-copy-id -o StrictHostKeyChecking=no'
+ ' test_user1@{}'.format(self.clients[0]))
+ ret, _, _ = g.run_local(cmd)
+ if ret:
+ raise ExecutionError("Failed to perform ssh-copy-id")
+ g.log.info("Successfully performed ssh-copy-id")
+
+ def tearDown(self):
+ # Delete the added user
+ ret = del_user(self.clients[0], "test_user1")
+ if ret is not True:
+ raise ExecutionError("Failed to delete user")
+
+ # Reset the volume options set inside the test
+ for opt in ('performance.parallel-readdir',
+ 'performance.readdir-ahead'):
+ ret, _, _ = reset_volume_option(self.mnode, self.volname, opt)
+ if ret:
+ raise ExecutionError("Failed to reset the volume option %s"
+ % opt)
+ g.log.info("Successfully reset the volume options")
+
+ # Unmount and cleanup original volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_access_file_with_stale_linkto_xattr(self):
+ """
+ Description: Checks if the files are accessible as non-root user if
+ the files have stale linkto xattr.
+ Steps:
+ 1) Create a volume and start it.
+ 2) Mount the volume on client node using FUSE.
+ 3) Create a file.
+ 4) Enable performance.parallel-readdir and
+ performance.readdir-ahead on the volume.
+ 5) Rename the file in order to create
+ a linkto file.
+ 6) Force the linkto xattr values to become stale by changing the dht
+ subvols in the graph
+ 7) Login as an non-root user and access the file.
+ """
+ # pylint: disable=protected-access
+
+ # Set permissions on the mount-point
+ m_point = self.mounts[0].mountpoint
+ ret = set_file_permissions(self.clients[0], m_point, "-R 777")
+ self.assertTrue(ret, "Failed to set file permissions")
+ g.log.info("Successfully set file permissions on mount-point")
+
+ # Creating a file on the mount-point
+ cmd = 'dd if=/dev/urandom of={}/FILE-1 count=1 bs=16k'.format(
+ m_point)
+ ret, _, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "File to create file")
+
+ # Enable performance.parallel-readdir and
+ # performance.readdir-ahead on the volume
+ options = {"performance.parallel-readdir": "enable",
+ "performance.readdir-ahead": "enable"}
+ ret = set_volume_options(self.mnode, self.volname, options)
+ self.assertTrue(ret, "Failed to set volume options")
+ g.log.info("Successfully set volume options")
+
+ # Finding a file name such that renaming source file to it will form a
+ # linkto file
+ subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
+ newhash = find_new_hashed(subvols, "/", "FILE-1")
+ new_name = str(newhash.newname)
+ new_host = str(newhash.hashedbrickobject._host)
+ new_name_path = str(newhash.hashedbrickobject._fqpath)[:-1]
+
+ # Move file such that it hashes to some other subvol and forms linkto
+ # file
+ ret = move_file(self.clients[0], "{}/FILE-1".format(m_point),
+ "{}/{}".format(m_point, new_name))
+ self.assertTrue(ret, "Rename failed")
+ g.log.info('Renamed file %s to %s',
+ "{}/FILE-1".format(m_point),
+ "{}/{}".format(m_point, new_name))
+
+ # Check if "dst_file" is linkto file
+ ret = is_linkto_file(new_host,
+ '{}{}'.format(new_name_path, new_name))
+ self.assertTrue(ret, "File is not a linkto file")
+ g.log.info("File is linkto file")
+
+ # Force the linkto xattr values to become stale by changing the dht
+ # subvols in the graph; for that:
+ # disable performance.parallel-readdir and
+ # performance.readdir-ahead on the volume
+ options = {"performance.parallel-readdir": "disable",
+ "performance.readdir-ahead": "disable"}
+ ret = set_volume_options(self.mnode, self.volname, options)
+ self.assertTrue(ret, "Failed to disable volume options")
+ g.log.info("Successfully disabled volume options")
+
+ # Access the file as non-root user
+ cmd = "ls -lR {}".format(m_point)
+ ret, _, _ = g.run(self.mounts[0].client_system, cmd,
+ user="test_user1")
+ self.assertEqual(ret, 0, "Lookup failed ")
+ g.log.info("Lookup successful")
diff --git a/tests/functional/dht/test_accessing_file_when_dht_layout_is_stale.py b/tests/functional/dht/test_accessing_file_when_dht_layout_is_stale.py
new file mode 100644
index 000000000..e7f89d84e
--- /dev/null
+++ b/tests/functional/dht/test_accessing_file_when_dht_layout_is_stale.py
@@ -0,0 +1,181 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.glusterfile import get_fattr, set_fattr
+from glustolibs.gluster.volume_libs import get_subvols
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+# pylint: disable=too-many-locals
+@runs_on([['distributed'], ['glusterfs']])
+class TestAccessFileStaleLayout(GlusterBaseClass):
+ def setUp(self):
+ self.get_super_method(self, 'setUp')()
+
+ self.volume['voltype']['dist_count'] = 2
+ ret = self.setup_volume_and_mount_volume(self.mounts)
+ if not ret:
+ raise ExecutionError('Failed to setup and mount volume')
+
+ def tearDown(self):
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError('Failed to umount and cleanup Volume')
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _get_brick_node_and_path(self):
+ '''Yields list containing brick node and path from first brick of each
+ subvol
+ '''
+ subvols = get_subvols(self.mnode, self.volname)
+ for subvol in subvols['volume_subvols']:
+ subvol[0] += self.dir_path
+ yield subvol[0].split(':')
+
+ def _assert_file_lookup(self, node, fqpath, when, result):
+ '''Perform `stat` on `fqpath` from `node` and validate against `result`
+ '''
+ cmd = ('stat {}'.format(fqpath))
+ ret, _, _ = g.run(node, cmd)
+ assert_method = self.assertNotEqual
+ assert_msg = 'fail'
+ if result:
+ assert_method = self.assertEqual
+ assert_msg = 'pass'
+ assert_method(
+ ret, 0, 'Lookup on {} from {} should {} {} layout '
+ 'change'.format(fqpath, node, assert_msg, when))
+
+ def test_accessing_file_when_dht_layout_is_stale(self):
+ '''
+ Description : Checks if a file can be opened and accessed if the dht
+ layout has become stale.
+
+ Steps:
+ 1. Create, start and mount a volume consisting 2 subvols on 2 clients
+ 2. Create a dir `dir` and file `dir/file` from client0
+ 3. Take note of layouts of `brick1`/dir and `brick2`/dir of the volume
+ 4. Validate for success lookup from only one brick path
+ 5. Re-assign layouts ie., brick1/dir to brick2/dir and vice-versa
+ 6. Remove `dir/file` from client0 and recreate same file from client0
+ and client1
+ 7. Validate for success lookup from only one brick path (as layout is
+ changed file creation path will be changed)
+ 8. Validate checksum is matched from both the clients
+ '''
+
+ # Will be used in _get_brick_node_and_path
+ self.dir_path = '/dir'
+
+ # Will be used in argument to _assert_file_lookup
+ file_name = '/file'
+
+ dir_path = self.mounts[0].mountpoint + self.dir_path
+ file_path = dir_path + file_name
+
+ client0, client1 = self.clients[0], self.clients[1]
+ fattr = 'trusted.glusterfs.dht'
+ io_cmd = ('cat /dev/urandom | tr -dc [:space:][:print:] | '
+ 'head -c 1K > {}'.format(file_path))
+
+ # Create a dir from client0
+ ret = mkdir(self.clients[0], dir_path)
+ self.assertTrue(ret, 'Unable to create a directory from mount point')
+
+ # Touch a file with data from client0
+ ret, _, _ = g.run(client0, io_cmd)
+ self.assertEqual(ret, 0, 'Failed to create a file on mount')
+
+ # Yields `node` and `brick-path` from first brick of each subvol
+ gen = self._get_brick_node_and_path()
+
+ # Take note of newly created directory's layout from org_subvol1
+ node1, fqpath1 = next(gen)
+ layout1 = get_fattr(node1, fqpath1, fattr)
+ self.assertIsNotNone(layout1,
+ '{} is not present on {}'.format(fattr, fqpath1))
+
+ # Lookup on file from node1 should fail as `dir/file` will always get
+ # hashed to node2 in a 2-brick distribute volume by default
+ self._assert_file_lookup(node1,
+ fqpath1 + file_name,
+ when='before',
+ result=False)
+
+ # Take note of newly created directory's layout from org_subvol2
+ node2, fqpath2 = next(gen)
+ layout2 = get_fattr(node2, fqpath2, fattr)
+ self.assertIsNotNone(layout2,
+ '{} is not present on {}'.format(fattr, fqpath2))
+
+ # Lookup on file from node2 should pass
+ self._assert_file_lookup(node2,
+ fqpath2 + file_name,
+ when='before',
+ result=True)
+
+ # Set org_subvol2 directory layout to org_subvol1 and vice-versa
+ for node, fqpath, layout, vol in ((node1, fqpath1, layout2, (2, 1)),
+ (node2, fqpath2, layout1, (1, 2))):
+ ret = set_fattr(node, fqpath, fattr, layout)
+ self.assertTrue(
+ ret, 'Failed to set layout of org_subvol{} on '
+ 'brick {} of org_subvol{}'.format(vol[0], fqpath, vol[1]))
+
+ # Remove file after layout change from client0
+ cmd = 'rm -f {}'.format(file_path)
+ ret, _, _ = g.run(client0, cmd)
+ self.assertEqual(ret, 0, 'Failed to delete file after layout change')
+
+ # Create file with same name as above after layout change from client0
+ # and client1
+ for client in (client0, client1):
+ ret, _, _ = g.run(client, io_cmd)
+ self.assertEqual(
+ ret, 0, 'Failed to create file from '
+ '{} after layout change'.format(client))
+
+ # After layout change lookup on file from node1 should pass
+ self._assert_file_lookup(node1,
+ fqpath1 + file_name,
+ when='after',
+ result=True)
+
+ # After layout change lookup on file from node2 should fail
+ self._assert_file_lookup(node2,
+ fqpath2 + file_name,
+ when='after',
+ result=False)
+
+ # Take note of checksum from client0 and client1
+ checksums = [None] * 2
+ for index, mount in enumerate(self.mounts):
+ ret, checksums[index] = collect_mounts_arequal(mount, dir_path)
+ self.assertTrue(
+ ret, 'Failed to get arequal on client {}'.format(
+ mount.client_system))
+
+ # Validate no checksum mismatch
+ self.assertEqual(checksums[0], checksums[1],
+ 'Checksum mismatch between client0 and client1')
+
+ g.log.info('Pass: Test accessing file on stale layout is complete.')
diff --git a/tests/functional/dht/test_add_brick_rebalance_revised.py b/tests/functional/dht/test_add_brick_rebalance_revised.py
new file mode 100644
index 000000000..cc749f47a
--- /dev/null
+++ b/tests/functional/dht/test_add_brick_rebalance_revised.py
@@ -0,0 +1,171 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete, get_rebalance_status)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed', 'distributed'], ['glusterfs']])
+class TestAddBrickRebalanceRevised(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _run_command_50_times(self, operation, msg):
+ """
+ Run a command 50 times on the mount point and display msg if fails
+ """
+ cmd = ("cd %s; for i in {1..50}; do %s;done"
+ % (self.mounts[0].mountpoint, operation))
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertFalse(ret, msg)
+
+ def _add_bricks_to_volume(self):
+ """Add bricks to the volume"""
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ def _trigger_rebalance_and_wait(self, rebal_force=False):
+ """Start rebalance with or without force and wait"""
+ # Trigger rebalance on volume
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=rebal_force)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ def _check_if_files_are_skipped_or_not(self):
+ """Check if files are skipped or not"""
+ rebalance_status = get_rebalance_status(self.mnode, self.volname)
+ ret = int(rebalance_status['aggregate']['skipped'])
+ self.assertNotEqual(ret, 0, "Hardlink rebalance skipped")
+
+ def _check_arequal_checksum_is_equal_before_and_after(self):
+ """Check if arequal checksum is equal or not"""
+ self.assertEqual(
+ self.arequal_checksum_before, self.arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
+
+ def test_add_brick_rebalance_with_hardlinks(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it using fuse.
+ 2. Create 50 files on the mount point and create 50 hardlinks for the
+ files.
+ 3. After the files and hard links creation is complete, add bricks to
+ the volume and trigger rebalance on the volume.
+ 4. Wait for rebalance to complete and check if files are skipped
+ or not.
+ 5. Trigger rebalance on the volume with force and repeat step 4.
+ """
+ # Tuple of ops to be done
+ ops = (("dd if=/dev/urandom of=file_$i bs=1M count=1",
+ "Failed to create 50 files"),
+ ("ln file_$i hardfile_$i",
+ "Failed to create hard links for files"))
+
+ # Create 50 files on the mount point and create 50 hard links
+ # for the files.
+ for operation, msg in ops:
+ self._run_command_50_times(operation, msg)
+
+ # Collect arequal checksum before add brick op
+ self.arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # After the file creation is complete, add bricks to the volume
+ self._add_bricks_to_volume()
+
+ # Trigger rebalance on the volume, wait for it to complete
+ self._trigger_rebalance_and_wait()
+
+ # Check if hardlinks are skipped or not
+ self._check_if_files_are_skipped_or_not()
+
+ # Trigger rebalance with force on the volume, wait for it to complete
+ self._trigger_rebalance_and_wait(rebal_force=True)
+
+ # Check if hardlinks are skipped or not
+ self._check_if_files_are_skipped_or_not()
+
+ # Compare arequals checksum before and after rebalance
+ self.arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self._check_arequal_checksum_is_equal_before_and_after()
+
+ def test_add_brick_rebalance_with_sticky_bit(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it using fuse.
+ 2. Create 50 files on the mount point and set sticky bit to the files.
+ 3. After the files creation and sticky bit addition is complete,
+ add bricks to the volume and trigger rebalance on the volume.
+ 4. Wait for rebalance to complete.
+ 5. Check for data corruption by comparing arequal before and after.
+ """
+ # Tuple of ops to be done
+ ops = (("dd if=/dev/urandom of=file_$i bs=1M count=1",
+ "Failed to create 50 files"),
+ ("chmod +t file_$i",
+ "Failed to enable sticky bit for files"))
+
+ # Create 50 files on the mount point and enable sticky bit.
+ for operation, msg in ops:
+ self._run_command_50_times(operation, msg)
+
+ # Collect arequal checksum before add brick op
+ self.arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # After the file creation and sticky bit addtion is complete,
+ # add bricks to the volume
+ self._add_bricks_to_volume()
+
+ # Trigger rebalance on the volume, wait for it to complete
+ self._trigger_rebalance_and_wait()
+
+ # Compare arequals checksum before and after rebalance
+ self.arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self._check_arequal_checksum_is_equal_before_and_after()
diff --git a/tests/functional/dht/test_add_brick_rebalance_with_rsync_in_progress.py b/tests/functional/dht/test_add_brick_rebalance_with_rsync_in_progress.py
new file mode 100644
index 000000000..799ce1a60
--- /dev/null
+++ b/tests/functional/dht/test_add_brick_rebalance_with_rsync_in_progress.py
@@ -0,0 +1,151 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal, run_linux_untar
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed', 'distributed'], ['glusterfs']])
+class TestAddBrickRebalanceWithRsyncInProgress(GlusterBaseClass):
+
+ def setUp(self):
+ # calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Changing dist_count to 3
+ self.volume['voltype']['dist_count'] = 3
+
+ # Set I/O flag to false
+ self.is_io_running = False
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ def tearDown(self):
+
+ # Wait for I/O if not completed
+ if self.is_io_running:
+ if not self._wait_for_untar_and_rsync_completion():
+ g.log.error("I/O failed to stop on clients")
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume % s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _wait_for_untar_and_rsync_completion(self):
+ """Wait for untar and rsync to complete"""
+ has_process_stopped = []
+ for proc in self.list_of_io_processes:
+ try:
+ ret, _, _ = proc.async_communicate()
+ if not ret:
+ has_process_stopped.append(False)
+ has_process_stopped.append(True)
+ except ValueError:
+ has_process_stopped.append(True)
+ return all(has_process_stopped)
+
+ def test_add_brick_rebalance_with_rsync_in_progress(self):
+ """
+ Test case:
+ 1. Create, start and mount a volume.
+ 2. Create a directory on the mount point and start linux utar.
+ 3. Create another directory on the mount point and start rsync of
+ linux untar directory.
+ 4. Add bricks to the volume
+ 5. Trigger rebalance on the volume.
+ 6. Wait for rebalance to complete on volume.
+ 7. Wait for I/O to complete.
+ 8. Validate if checksum of both the untar and rsync is same.
+ """
+ # List of I/O processes
+ self.list_of_io_processes = []
+
+ # Create a dir to start untar
+ self.linux_untar_dir = "{}/{}".format(self.mounts[0].mountpoint,
+ "linuxuntar")
+ ret = mkdir(self.clients[0], self.linux_untar_dir)
+ self.assertTrue(ret, "Failed to create dir linuxuntar for untar")
+
+ # Start linux untar on dir linuxuntar
+ ret = run_linux_untar(self.clients[0], self.mounts[0].mountpoint,
+ dirs=tuple(['linuxuntar']))
+ self.list_of_io_processes += ret
+ self.is_io_running = True
+
+ # Create a new directory and start rsync
+ self.rsync_dir = "{}/{}".format(self.mounts[0].mountpoint,
+ 'rsyncuntarlinux')
+ ret = mkdir(self.clients[0], self.rsync_dir)
+ self.assertTrue(ret, "Failed to create dir rsyncuntarlinux for rsync")
+
+ # Start rsync for linux untar on mount point
+ cmd = ("for i in `seq 1 3`; do rsync -azr {} {};sleep 120;done"
+ .format(self.linux_untar_dir, self.rsync_dir))
+ ret = g.run_async(self.clients[0], cmd)
+ self.list_of_io_processes.append(ret)
+
+ # Add bricks to the volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick with rsync on volume %s"
+ % self.volname)
+
+ # Trigger rebalance on the volume
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=6000)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+
+ # Wait for IO to complete.
+ ret = self._wait_for_untar_and_rsync_completion()
+ self.assertFalse(ret, "IO didn't complete or failed on client")
+ self.is_io_running = False
+
+ # As we are running rsync and untar together, there are situations
+ # when some of the new files created by linux untar is not synced
+ # through rsync which causes checksum to retrun different value,
+ # Hence to take care of this corner case we are rerunning rsync.
+ cmd = "rsync -azr {} {}".format(self.linux_untar_dir, self.rsync_dir)
+ ret, _, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "Failed sync left behind files")
+
+ # Check daata consistency on both the directories
+ rsync_checksum = collect_mounts_arequal(
+ self.mounts[0], path='rsyncuntarlinux/linuxuntar/')
+ untar_checksum = collect_mounts_arequal(self.mounts[0],
+ path='linuxuntar')
+ self.assertEqual(
+ rsync_checksum, untar_checksum,
+ "Checksum on untar dir and checksum on rsync dir didn't match")
diff --git a/tests/functional/dht/test_add_brick_rebalance_with_self_heal_in_progress.py b/tests/functional/dht/test_add_brick_rebalance_with_self_heal_in_progress.py
new file mode 100644
index 000000000..6fb7fe4f0
--- /dev/null
+++ b/tests/functional/dht/test_add_brick_rebalance_with_self_heal_in_progress.py
@@ -0,0 +1,136 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from random import choice
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import get_all_bricks, bring_bricks_online
+from glustolibs.gluster.heal_libs import monitor_heal_completion
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import (collect_mounts_arequal, validate_io_procs,
+ wait_for_io_to_complete)
+from glustolibs.misc.misc_libs import kill_process
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter'], ['glusterfs']])
+class TestAddBrickRebalanceWithSelfHeal(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.is_io_running = False
+
+ def tearDown(self):
+
+ # If I/O processes are running wait for it to complete
+ if self.is_io_running:
+ if not wait_for_io_to_complete(self.list_of_io_processes,
+ [self.mounts[0]]):
+ raise ExecutionError("Failed to wait for I/O to complete")
+
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_add_brick_rebalance_with_self_heal_in_progress(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Start creating a few files on mount point.
+ 3. While file creation is going on, kill one of the bricks
+ in the replica pair.
+ 4. After file creattion is complete collect arequal checksum
+ on mount point.
+ 5. Bring back the brick online by starting volume with force.
+ 6. Check if all bricks are online and if heal is in progress.
+ 7. Add bricks to the volume and start rebalance.
+ 8. Wait for rebalance and heal to complete on volume.
+ 9. Collect arequal checksum on mount point and compare
+ it with the one taken in step 4.
+ """
+ # Start I/O from mount point and wait for it to complete
+ cmd = ("cd %s; for i in {1..1000} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ self.list_of_io_processes = [
+ g.run_async(self.mounts[0].client_system, cmd)]
+ self.is_copy_running = True
+
+ # Get a list of all the bricks to kill brick
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ self.assertIsNotNone(brick_list, "Empty present brick list")
+
+ # Kill brick process of a brick which is being removed
+ brick = choice(brick_list)
+ node, _ = brick.split(":")
+ ret = kill_process(node, process_names="glusterfsd")
+ self.assertTrue(ret, "Failed to kill brick process of brick %s"
+ % brick)
+
+ # Validate if I/O was successful or not.
+ ret = validate_io_procs(self.list_of_io_processes, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ self.is_copy_running = False
+
+ # Collect arequal checksum before ops
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Bring back the brick online by starting volume with force
+ ret = bring_bricks_online(self.mnode, self.volname, brick_list,
+ bring_bricks_online_methods=[
+ 'volume_start_force'])
+ self.assertTrue(ret, "Error in bringing back brick online")
+ g.log.info('All bricks are online now')
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Wait for heal to complete
+ ret = monitor_heal_completion(self.mnode, self.volname)
+ self.assertTrue(ret, "heal has not yet completed")
+ g.log.info("Self heal completed")
+
+ # Check for data loss by comparing arequal before and after ops
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_add_brick_rebalance_with_symlink_pointing_out_of_gluster.py b/tests/functional/dht/test_add_brick_rebalance_with_symlink_pointing_out_of_gluster.py
new file mode 100644
index 000000000..92135b3b4
--- /dev/null
+++ b/tests/functional/dht/test_add_brick_rebalance_with_symlink_pointing_out_of_gluster.py
@@ -0,0 +1,133 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.glusterfile import get_md5sum
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import (validate_io_procs, wait_for_io_to_complete)
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter'], ['glusterfs']])
+class TestAddBrickRebalanceWithSymlinkPointingOutOfGluster(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.is_io_running = False
+
+ def tearDown(self):
+
+ # Remove the temporary dir created for test
+ ret, _, _ = g.run(self.mounts[0].client_system, "rm -rf /mnt/tmp/")
+ if ret:
+ raise ExecutionError("Failed to remove /mnt/tmp create for test")
+
+ # If I/O processes are running wait for it to complete
+ if self.is_io_running:
+ if not wait_for_io_to_complete(self.list_of_io_processes,
+ [self.mounts[0]]):
+ raise ExecutionError("Failed to wait for I/O to complete")
+
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_add_brick_rebalance_with_symlink_pointing_out_of_volume(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Create symlinks on the volume such that the files for the symlink
+ are outside the volume.
+ 3. Once all the symlinks are create a data file using dd:
+ dd if=/dev/urandom of=FILE bs=1024 count=100
+ 4. Start copying the file's data to all the symlink.
+ 5. When data is getting copied to all files through symlink add brick
+ and start rebalance.
+ 6. Once rebalance is complete check the md5sum of each file through
+ symlink and compare if it's same as the orginal file.
+ """
+ # Create symlinks on volume pointing outside volume
+ cmd = ("cd %s; mkdir -p /mnt/tmp;for i in {1..100};do "
+ "touch /mnt/tmp/file$i; ln -sf /mnt/tmp/file$i link$i;done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.mounts[0].client_system, cmd)
+ self.assertFalse(
+ ret, "Failed to create symlinks pointing outside volume")
+
+ # Create a data file using dd inside mount point
+ cmd = ("cd %s; dd if=/dev/urandom of=FILE bs=1024 count=100"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.mounts[0].client_system, cmd)
+ self.assertFalse(ret, "Failed to create data file on mount point")
+
+ # Start copying data from file to symliks
+ cmd = ("cd %s;for i in {1..100};do cat FILE >> link$i;done"
+ % self.mounts[0].mountpoint)
+ self.list_of_io_processes = [
+ g.run_async(self.mounts[0].client_system, cmd)]
+ self.is_copy_running = True
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Validate if I/O was successful or not.
+ ret = validate_io_procs(self.list_of_io_processes, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ self.is_copy_running = False
+
+ # Get md5sum of the original file and compare it with that of
+ # all files through the symlink
+ original_file_md5sum = get_md5sum(self.mounts[0].client_system,
+ "{}/FILE".format(
+ self.mounts[0].mountpoint))
+ self.assertIsNotNone(original_file_md5sum,
+ 'Failed to get md5sum of original file')
+ for number in range(1, 101):
+ symlink_md5sum = get_md5sum(self.mounts[0].client_system,
+ "{}/link{}".format(
+ self.mounts[0].mountpoint, number))
+ self.assertEqual(original_file_md5sum.split(' ')[0],
+ symlink_md5sum.split(' ')[0],
+ "Original file and symlink checksum not equal"
+ " for link%s" % number)
+ g.log.info("Symlink and original file checksum same on all symlinks")
diff --git a/tests/functional/dht/test_add_brick_remove_brick_with_lookups_and_kernal_untar.py b/tests/functional/dht/test_add_brick_remove_brick_with_lookups_and_kernal_untar.py
new file mode 100644
index 000000000..4e185733e
--- /dev/null
+++ b/tests/functional/dht/test_add_brick_remove_brick_with_lookups_and_kernal_untar.py
@@ -0,0 +1,162 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from random import choice
+from unittest import skip, SkipTest
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.volume_libs import expand_volume, shrink_volume
+from glustolibs.gluster.brickmux_ops import enable_brick_mux, disable_brick_mux
+from glustolibs.misc.misc_libs import upload_scripts, kill_process
+from glustolibs.io.utils import (run_linux_untar, validate_io_procs,
+ wait_for_io_to_complete)
+
+
+@runs_on([['distributed-replicated', 'distributed-dispersed'], ['glusterfs']])
+class TestAddBrickRemoveBrickWithlookupsAndKernaluntar(GlusterBaseClass):
+
+ @classmethod
+ def setUpClass(cls):
+ # Calling GlusterBaseClass setUpClass
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Check for availability of atleast 4 clients
+ if len(cls.clients) < 4:
+ raise SkipTest("This test requires atleast 4 clients")
+
+ # Upload io scripts for running IO on mounts
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
+ cls.clients)
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Enable brickmux on cluster
+ if not enable_brick_mux(self.mnode):
+ raise ExecutionError("Failed to enable brickmux on cluster")
+
+ # Changing dist_count to 3
+ self.volume['voltype']['dist_count'] = 3
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ self.list_of_io_processes = []
+ self.is_io_running = False
+
+ def tearDown(self):
+
+ # Disable brickmux on cluster
+ if not disable_brick_mux(self.mnode):
+ raise ExecutionError("Failed to disable brickmux on cluster")
+
+ # If I/O processes are running wait from them to complete
+ if self.is_io_running:
+ if not wait_for_io_to_complete(self.list_of_io_processes,
+ self.mounts):
+ raise ExecutionError("Failed to wait for I/O to complete")
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ @skip('Skipping due to Bug 1571317')
+ def test_add_brick_remove_brick_with_lookups_and_kernal_untar(self):
+ """
+ Test case:
+ 1. Enable brickmux on cluster, create a volume, start it and mount it.
+ 2. Start the below I/O from 4 clients:
+ From client-1 : run script to create folders and files continuously
+ From client-2 : start linux kernel untar
+ From client-3 : while true;do find;done
+ From client-4 : while true;do ls -lRt;done
+ 3. Kill brick process on one of the nodes.
+ 4. Add brick to the volume.
+ 5. Remove bricks from the volume.
+ 6. Validate if I/O was successful or not.
+ """
+ # Fill few bricks till it is full
+ bricks = get_all_bricks(self.mnode, self.volname)
+
+ # Create a dir to start untar
+ self.linux_untar_dir = "{}/{}".format(self.mounts[0].mountpoint,
+ "linuxuntar")
+ ret = mkdir(self.clients[0], self.linux_untar_dir)
+ self.assertTrue(ret, "Failed to create dir linuxuntar for untar")
+
+ # Start linux untar on dir linuxuntar
+ ret = run_linux_untar(self.clients[0], self.mounts[0].mountpoint,
+ dirs=tuple(['linuxuntar']))
+ self.list_of_io_processes += ret
+ self.is_io_running = True
+
+ # Run script to create folders and files continuously
+ cmd = ("/usr/bin/env python {} create_deep_dirs_with_files "
+ "--dirname-start-num 758 --dir-depth 2 "
+ "--dir-length 100 --max-num-of-dirs 10 --num-of-files 105 {}"
+ .format(self.script_upload_path, self.mounts[1].mountpoint))
+ ret = g.run_async(self.mounts[1].client_system, cmd)
+ self.list_of_io_processes += [ret]
+
+ # Run lookup operations from 2 clients
+ cmd = ("cd {}; for i in `seq 1 1000000`;do find .; done"
+ .format(self.mounts[2].mountpoint))
+ ret = g.run_async(self.mounts[2].client_system, cmd)
+ self.list_of_io_processes += [ret]
+
+ cmd = ("cd {}; for i in `seq 1 1000000`;do ls -lRt; done"
+ .format(self.mounts[3].mountpoint))
+ ret = g.run_async(self.mounts[3].client_system, cmd)
+ self.list_of_io_processes += [ret]
+
+ # Kill brick process of one of the nodes.
+ brick = choice(bricks)
+ node, _ = brick.split(":")
+ ret = kill_process(node, process_names="glusterfsd")
+ self.assertTrue(ret, "Failed to kill brick process of brick %s"
+ % brick)
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+ g.log.info("Add brick to volume successful")
+
+ # Remove bricks from the volume
+ ret = shrink_volume(self.mnode, self.volname, rebalance_timeout=2400)
+ self.assertTrue(ret, "Failed to remove-brick from volume")
+ g.log.info("Remove-brick rebalance successful")
+
+ # Validate if I/O was successful or not.
+ ret = validate_io_procs(self.list_of_io_processes, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ self.is_io_running = False
diff --git a/tests/functional/dht/test_add_brick_replace_brick_fix_layout.py b/tests/functional/dht/test_add_brick_replace_brick_fix_layout.py
new file mode 100644
index 000000000..783ca1800
--- /dev/null
+++ b/tests/functional/dht/test_add_brick_replace_brick_fix_layout.py
@@ -0,0 +1,124 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from random import choice
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_ops import add_brick
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.glusterfile import get_fattr
+from glustolibs.gluster.rebalance_ops import (rebalance_start,
+ wait_for_fix_layout_to_complete)
+from glustolibs.gluster.volume_libs import (form_bricks_list_to_add_brick,
+ replace_brick_from_volume)
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter'], ['glusterfs']])
+class TestAddBrickReplaceBrickFixLayout(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Changing dist_count to 3
+ self.volume['voltype']['dist_count'] = 3
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _replace_a_old_added_brick(self, brick_to_be_replaced):
+ """Replace a old brick from the volume"""
+ ret = replace_brick_from_volume(self.mnode, self.volname,
+ self.servers, self.all_servers_info,
+ src_brick=brick_to_be_replaced)
+ self.assertTrue(ret, "Failed to replace brick %s "
+ % brick_to_be_replaced)
+ g.log.info("Successfully replaced brick %s", brick_to_be_replaced)
+
+ def _check_trusted_glusterfs_dht_on_all_bricks(self):
+ """Check trusted.glusterfs.dht xattr on the backend bricks"""
+ bricks = get_all_bricks(self.mnode, self.volname)
+ fattr_value = []
+ for brick_path in bricks:
+ node, path = brick_path.split(":")
+ ret = get_fattr(node, "{}".format(path), "trusted.glusterfs.dht")
+ fattr_value += [ret]
+ self.assertEqual(len(set(fattr_value)), 4,
+ "Value of trusted.glusterfs.dht is not as expected")
+ g.log.info("Successfully checked value of trusted.glusterfs.dht.")
+
+ def test_add_brick_replace_brick_fix_layout(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Create files and dirs on the mount point.
+ 3. Add bricks to the volume.
+ 4. Replace 2 old bricks to the volume.
+ 5. Trigger rebalance fix layout and wait for it to complete.
+ 6. Check layout on all the bricks through trusted.glusterfs.dht.
+ """
+ # Create directories with some files on mount point
+ cmd = ("cd %s; for i in {1..10}; do mkdir dir$i; for j in {1..5};"
+ " do dd if=/dev/urandom of=dir$i/file$j bs=1M count=1; done;"
+ " done" % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertFalse(ret, "Failed to create dirs and files.")
+
+ # Orginal brick list before add brick
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ self.assertIsNotNone(brick_list, "Empty present brick list")
+
+ # Add bricks to the volume
+ add_brick_list = form_bricks_list_to_add_brick(
+ self.mnode, self.volname, self.servers, self.all_servers_info)
+ self.assertIsNotNone(add_brick_list, "Empty add brick list")
+
+ ret, _, _ = add_brick(self.mnode, self.volname, add_brick_list)
+ self.assertFalse(ret, "Failed to add bricks to the volume")
+ g.log.info("Successfully added bricks to the volume")
+
+ # Replace 2 old bricks to the volume
+ for _ in range(0, 2):
+ brick = choice(brick_list)
+ self._replace_a_old_added_brick(brick)
+ brick_list.remove(brick)
+
+ # Start rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname, fix_layout=True)
+ self.assertFalse(ret, "Failed to start rebalance on volume")
+
+ ret = wait_for_fix_layout_to_complete(self.mnode, self.volname,
+ timeout=800)
+ self.assertTrue(ret, "Rebalance failed on volume")
+
+ # Check layout on all the bricks through trusted.glusterfs.dht
+ self._check_trusted_glusterfs_dht_on_all_bricks()
diff --git a/tests/functional/dht/test_brick_full_add_brick_rebalance.py b/tests/functional/dht/test_brick_full_add_brick_rebalance.py
new file mode 100644
index 000000000..e67115220
--- /dev/null
+++ b/tests/functional/dht/test_brick_full_add_brick_rebalance.py
@@ -0,0 +1,120 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import string
+from random import choice
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.dht_test_utils import find_hashed_subvol
+from glustolibs.gluster.lib_utils import get_usable_size_per_disk
+from glustolibs.gluster.rebalance_ops import (rebalance_start,
+ wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import get_subvols, expand_volume
+from glustolibs.gluster.volume_ops import set_volume_options
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter'], ['glusterfs']])
+class TestBrickFullAddBrickRebalance(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ @staticmethod
+ def _get_random_string():
+ letters = string.ascii_lowercase
+ return ''.join(choice(letters) for _ in range(5))
+
+ def test_brick_full_add_brick_rebalance(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Create a data set on the client node such that all the available
+ space is used and "No space left on device" error is generated.
+ 3. Set cluster.min-free-disk to 30%.
+ 4. Add bricks to the volume, trigger rebalance and wait for rebalance
+ to complete.
+ """
+ # Create a data set on the client node such that all the available
+ # space is used and "No space left on device" error is generated
+ bricks = get_all_bricks(self.mnode, self.volname)
+
+ # Calculate the usable size and fill till it reaches
+ # min free limit
+ usable_size = get_usable_size_per_disk(bricks[0])
+ subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
+ filename = "abc"
+ for subvol in subvols:
+ while (subvols[find_hashed_subvol(subvols, "/", filename)[1]] ==
+ subvol):
+ filename = self._get_random_string()
+ ret, _, _ = g.run(self.mounts[0].client_system,
+ "fallocate -l {}G {}/{}".format(
+ usable_size, self.mounts[0].mountpoint,
+ filename))
+ self.assertFalse(ret, "Failed to fill disk to min free limit")
+ g.log.info("Disk filled up to min free limit")
+
+ # Try to perfrom I/O from mount point(This should fail)
+ ret, _, _ = g.run(self.mounts[0].client_system,
+ "fallocate -l 5G {}/mfile".format(
+ self.mounts[0].mountpoint))
+ self.assertTrue(ret,
+ "Unexpected: Able to do I/O even when disks are "
+ "filled to min free limit")
+ g.log.info("Expected: Unable to perfrom I/O as min free disk is hit")
+
+ # Set cluster.min-free-disk to 30%
+ ret = set_volume_options(self.mnode, self.volname,
+ {'cluster.min-free-disk': '30%'})
+ self.assertTrue(ret, "Failed to set cluster.min-free-disk to 30%")
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
diff --git a/tests/functional/dht/test_brick_full_add_brick_remove_brick.py b/tests/functional/dht/test_brick_full_add_brick_remove_brick.py
new file mode 100644
index 000000000..eaf7dafb4
--- /dev/null
+++ b/tests/functional/dht/test_brick_full_add_brick_remove_brick.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import string
+from random import choice
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.dht_test_utils import find_hashed_subvol
+from glustolibs.gluster.lib_utils import get_usable_size_per_disk
+from glustolibs.gluster.volume_libs import (get_subvols, expand_volume,
+ shrink_volume)
+from glustolibs.gluster.volume_ops import set_volume_options
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter'], ['glusterfs']])
+class TestBrickFullAddBrickRemoveBrickRebalance(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ @staticmethod
+ def _get_random_string():
+ letters = string.ascii_lowercase
+ return ''.join(choice(letters) for _ in range(5))
+
+ def test_brick_full_add_brick_remove_brick(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Fill few bricks till min-free-limit is reached.
+ 3. Add brick to the volume.(This should pass.)
+ 4. Set cluster.min-free-disk to 30%.
+ 5. Remove bricks from the volume.(This should pass.)
+ 6. Check for data loss by comparing arequal before and after.
+ """
+ # Fill few bricks till it is full
+ bricks = get_all_bricks(self.mnode, self.volname)
+
+ # Calculate the usable size and fill till it reaches
+ # min free limit
+ usable_size = get_usable_size_per_disk(bricks[0])
+ subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
+ filename = "abc"
+ for _ in range(0, usable_size):
+ while (subvols[find_hashed_subvol(subvols, "/", filename)[1]]
+ == subvols[0]):
+ filename = self._get_random_string()
+ ret, _, _ = g.run(self.mounts[0].client_system,
+ "fallocate -l 1G {}/{}".format(
+ self.mounts[0].mountpoint, filename))
+ self.assertFalse(ret, "Failed to fill disk to min free limit")
+ filename = self._get_random_string()
+ g.log.info("Disk filled up to min free limit")
+
+ # Collect arequal checksum before ops
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Set cluster.min-free-disk to 30%
+ ret = set_volume_options(self.mnode, self.volname,
+ {'cluster.min-free-disk': '30%'})
+ self.assertTrue(ret, "Failed to set cluster.min-free-disk to 30%")
+
+ # Remove bricks from the volume
+ ret = shrink_volume(self.mnode, self.volname, rebalance_timeout=1800)
+ self.assertTrue(ret, "Failed to remove-brick from volume")
+ g.log.info("Remove-brick rebalance successful")
+
+ # Check for data loss by comparing arequal before and after ops
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_copy_dir_subvol_down.py b/tests/functional/dht/test_copy_dir_subvol_down.py
new file mode 100644
index 000000000..8835bcada
--- /dev/null
+++ b/tests/functional/dht/test_copy_dir_subvol_down.py
@@ -0,0 +1,308 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.io.utils import collect_mounts_arequal, validate_io_procs
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.gluster.dht_test_utils import (find_hashed_subvol,
+ find_new_hashed)
+from glustolibs.gluster.volume_libs import get_subvols
+from glustolibs.gluster.brick_libs import bring_bricks_offline
+
+
+@runs_on([['distributed', 'distributed-replicated',
+ 'distributed-arbiter', 'distributed-dispersed'],
+ ['glusterfs']])
+class TestCopyDirSubvolDown(GlusterBaseClass):
+ @classmethod
+ def setUpClass(cls):
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Check for the default dist_count value and override it if required
+ if cls.default_volume_type_config['distributed']['dist_count'] <= 2:
+ cls.default_volume_type_config['distributed']['dist_count'] = 4
+ else:
+ cls.default_volume_type_config[cls.voltype]['dist_count'] = 3
+
+ # Upload io scripts for running IO on mounts
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts "
+ "to clients %s" % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume and Mount Volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+
+ def tearDown(self):
+
+ # Unmount and cleanup original volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _create_src(self, m_point):
+ """
+ Create the source directory and files under the
+ source directory.
+ """
+ # Create source dir
+ ret = mkdir(self.mounts[0].client_system, "{}/src_dir".format(m_point))
+ self.assertTrue(ret, "mkdir of src_dir failed")
+
+ # Create files inside source dir
+ cmd = ("/usr/bin/env python %s create_files "
+ "-f 100 %s/src_dir/" % (
+ self.script_upload_path, m_point))
+ proc = g.run_async(self.mounts[0].client_system,
+ cmd, user=self.mounts[0].user)
+ g.log.info("IO on %s:%s is started successfully",
+ self.mounts[0].client_system, m_point)
+
+ # Validate IO
+ self.assertTrue(
+ validate_io_procs([proc], self.mounts[0]),
+ "IO failed on some of the clients"
+ )
+
+ def _copy_files_check_contents(self, m_point, dest_dir):
+ """
+ Copy files from source directory to destination
+ directory when it hashes to up-subvol and check
+ if all the files are copied properly.
+ """
+ # pylint: disable=protected-access
+ # collect arequal checksum on src dir
+ ret, src_checksum = collect_mounts_arequal(
+ self.mounts[0], '{}/src_dir'.format(m_point))
+ self.assertTrue(ret, ("Failed to get arequal on client"
+ " {}".format(self.clients[0])))
+
+ # copy src_dir to dest_dir
+ command = "cd {}; cp -r src_dir {}".format(m_point, dest_dir)
+ ret, _, _ = g.run(self.mounts[0].client_system, command)
+ self.assertEqual(ret, 0, "Failed to copy of src dir to"
+ " dest dir")
+ g.log.info("Successfully copied src dir to dest dir.")
+
+ # collect arequal checksum on destination dir
+ ret, dest_checksum = collect_mounts_arequal(
+ self.mounts[0], '{}/{}'.format(m_point, dest_dir))
+ self.assertTrue(ret, ("Failed to get arequal on client"
+ " {}".format(self.mounts[0])))
+
+ # Check if the contents of src dir are copied to
+ # dest dir
+ self.assertEqual(src_checksum,
+ dest_checksum,
+ 'All the contents of src dir are not'
+ ' copied to dest dir')
+ g.log.info('Successfully copied the contents of src dir'
+ ' to dest dir')
+
+ def _copy_when_dest_hash_down(self, m_point, dest_dir):
+ """
+ Copy files from source directory to destination
+ directory when it hashes to down-subvol.
+ """
+ # pylint: disable=protected-access
+ # copy src_dir to dest_dir (should fail as hash subvol for dest
+ # dir is down)
+ command = "cd {}; cp -r src_dir {}".format(m_point, dest_dir)
+ ret, _, _ = g.run(self.mounts[0].client_system, command)
+ self.assertEqual(ret, 1, "Unexpected : Copy of src dir to"
+ " dest dir passed")
+ g.log.info("Copy of src dir to dest dir failed as expected.")
+
+ def test_copy_existing_dir_dest_subvol_down(self):
+ """
+ Case 1:
+ - Create directory from mount point.
+ - Copy dir ---> Bring down dht sub-volume where destination
+ directory hashes to down sub-volume.
+ - Copy directory and make sure destination dir does not exist
+ """
+ # pylint: disable=protected-access
+ m_point = self.mounts[0].mountpoint
+
+ # Create source dir
+ ret = mkdir(self.mounts[0].client_system, "{}/src_dir".format(m_point))
+ self.assertTrue(ret, "mkdir of src_dir failed")
+ g.log.info("Directory src_dir created successfully")
+
+ # Get subvol list
+ subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(subvols, "Failed to get subvols")
+
+ # Find out the destination dir name such that it hashes to
+ # different subvol
+ newdir = find_new_hashed(subvols, "/", "src_dir")
+ dest_dir = str(newdir.newname)
+ dest_count = newdir.subvol_count
+
+ # Kill the brick/subvol to which the destination dir hashes
+ ret = bring_bricks_offline(
+ self.volname, subvols[dest_count])
+ self.assertTrue(ret, ('Error in bringing down subvolume %s',
+ subvols[dest_count]))
+ g.log.info('DHT subvol %s is offline', subvols[dest_count])
+
+ # Copy src_dir to dest_dir (should fail as hash subvol for dest
+ # dir is down)
+ self._copy_when_dest_hash_down(m_point, dest_dir)
+
+ def test_copy_existing_dir_dest_subvol_up(self):
+ """
+ Case 2:
+ - Create files and directories from mount point.
+ - Copy dir ---> Bring down dht sub-volume where destination
+ directory should not hash to down sub-volume
+ - copy dir and make sure destination dir does not exist
+ """
+ # pylint: disable=protected-access
+ m_point = self.mounts[0].mountpoint
+
+ # Create source dir and create files inside it
+ self._create_src(m_point)
+
+ # Get subvol list
+ subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(subvols, "Failed to get subvols")
+
+ # Find out hashed brick/subvol for src dir
+ src_subvol, src_count = find_hashed_subvol(subvols, "/", "src_dir")
+ self.assertIsNotNone(src_subvol, "Could not find srchashed")
+ g.log.info("Hashed subvol for src_dir is %s", src_subvol._path)
+
+ # Find out the destination dir name such that it hashes to
+ # different subvol
+ newdir = find_new_hashed(subvols, "/", "src_dir")
+ dest_dir = str(newdir.newname)
+ dest_count = newdir.subvol_count
+
+ # Remove the hashed subvol for dest and src dir from the
+ # subvol list
+ for item in (subvols[src_count], subvols[dest_count]):
+ subvols.remove(item)
+
+ # Bring down a DHT subvol
+ ret = bring_bricks_offline(self.volname, subvols[0])
+ self.assertTrue(ret, ('Error in bringing down subvolume %s',
+ subvols[0]))
+ g.log.info('DHT subvol %s is offline', subvols[0])
+
+ # Create files on source dir and
+ # perform copy of src_dir to dest_dir
+ self._copy_files_check_contents(m_point, dest_dir)
+
+ def test_copy_new_dir_dest_subvol_up(self):
+ """
+ Case 3:
+ - Copy dir ---> Bring down dht sub-volume where destination
+ directory should not hash to down sub-volume
+ - Create files and directories from mount point.
+ - copy dir and make sure destination dir does not exist
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=too-many-statements
+ m_point = self.mounts[0].mountpoint
+
+ # Get subvols
+ subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(subvols, "Failed to get subvols")
+
+ # Find out hashed brick/subvol for src dir
+ src_subvol, src_count = find_hashed_subvol(
+ subvols, "/", "src_dir")
+ self.assertIsNotNone(src_subvol, "Could not find srchashed")
+ g.log.info("Hashed subvol for src_dir is %s", src_subvol._path)
+
+ # Find out the destination dir name such that it hashes to
+ # different subvol
+ newdir = find_new_hashed(subvols, "/", "src_dir")
+ dest_dir = str(newdir.newname)
+ dest_count = newdir.subvol_count
+
+ # Remove the hashed subvol for dest and src dir from the
+ # subvol list
+ for item in (subvols[src_count], subvols[dest_count]):
+ subvols.remove(item)
+
+ # Bring down a dht subvol
+ ret = bring_bricks_offline(self.volname, subvols[0])
+ self.assertTrue(ret, ('Error in bringing down subvolume %s',
+ subvols[0]))
+ g.log.info('DHT subvol %s is offline', subvols[0])
+
+ # Create source dir and create files inside it
+ self._create_src(m_point)
+
+ # Create files on source dir and
+ # perform copy of src_dir to dest_dir
+ self._copy_files_check_contents(m_point, dest_dir)
+
+ def test_copy_new_dir_dest_subvol_down(self):
+ """
+ Case 4:
+ - Copy dir ---> Bring down dht sub-volume where destination
+ directory hashes to down sub-volume
+ - Create directory from mount point.
+ - Copy dir and make sure destination dir does not exist
+ """
+ # pylint: disable=protected-access
+ m_point = self.mounts[0].mountpoint
+
+ # Get subvol list
+ subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(subvols, "Failed to get subvols")
+
+ # Find out the destination dir name such that it hashes to
+ # different subvol
+ newdir = find_new_hashed(subvols, "/", "src_dir")
+ dest_dir = str(newdir.newname)
+ dest_count = newdir.subvol_count
+
+ # Bring down the hashed-subvol for dest dir
+ ret = bring_bricks_offline(self.volname, subvols[dest_count])
+ self.assertTrue(ret, ('Error in bringing down subvolume %s',
+ subvols[dest_count]))
+ g.log.info('DHT subvol %s is offline', subvols[dest_count])
+
+ # Create source dir
+ ret = mkdir(self.mounts[0].client_system, "{}/src_dir".format(m_point))
+ self.assertTrue(ret, "mkdir of src_dir failed")
+ g.log.info("Directory src_dir created successfully")
+
+ # Copy src_dir to dest_dir (should fail as hash subvol for dest
+ # dir is down)
+ self._copy_when_dest_hash_down(m_point, dest_dir)
diff --git a/tests/functional/dht/test_copy_file_subvol_down.py b/tests/functional/dht/test_copy_file_subvol_down.py
new file mode 100644
index 000000000..afb06ac3c
--- /dev/null
+++ b/tests/functional/dht/test_copy_file_subvol_down.py
@@ -0,0 +1,336 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+# pylint: disable=protected-access
+# pylint: disable=too-many-statements
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brickdir import BrickDir
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_libs import get_subvols
+from glustolibs.gluster.dht_test_utils import (find_hashed_subvol,
+ find_new_hashed,
+ find_specific_hashed)
+from glustolibs.gluster.brick_libs import bring_bricks_offline
+from glustolibs.gluster.glusterfile import move_file
+
+
+@runs_on([['distributed', 'distributed-dispersed',
+ 'distributed-arbiter', 'distributed-replicated'],
+ ['glusterfs']])
+class TestCopyFileSubvolDown(GlusterBaseClass):
+ @classmethod
+ def setUpClass(cls):
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Override the default dist_count value
+ cls.default_volume_type_config[cls.voltype]['dist_count'] = 4
+
+ def setUp(self):
+
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume and Mount Volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+
+ self.client, self.m_point = (self.mounts[0].client_system,
+ self.mounts[0].mountpoint)
+
+ self.subvols = (get_subvols(
+ self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(self.subvols, "Failed to get subvols")
+
+ def tearDown(self):
+
+ # Unmount and cleanup original volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _create_src_file(self):
+ """Create a srcfile"""
+ cmd = "touch {}/srcfile".format(self.m_point)
+ ret, _, _ = g.run(self.client, cmd)
+ self.assertEqual(ret, 0, "Failed to create srcfile")
+ g.log.info("Successfully created srcfile")
+
+ def _find_hash_for_src_file(self):
+ """Find a new hashsubvol which is different from hash of srcfile"""
+ src_hash_subvol = find_new_hashed(self.subvols, "/", "srcfile")
+ new_src_name = str(src_hash_subvol.newname)
+ src_hash_subvol_count = src_hash_subvol.subvol_count
+ return new_src_name, src_hash_subvol_count
+
+ def _find_cache_for_src_file(self):
+ """Find out hash subvol for srcfile which after rename will become
+ cache subvol"""
+ src_cache_subvol, src_cache_subvol_count = find_hashed_subvol(
+ self.subvols, "/", "srcfile")
+ self.assertIsNotNone(src_cache_subvol, "Could not find src cached")
+ g.log.info("Cached subvol for srcfile is %s", src_cache_subvol._path)
+ return src_cache_subvol_count
+
+ def _rename_src(self, new_src_name):
+ """Rename the srcfile to a new name such that it hashes and
+ caches to different subvols"""
+ ret = move_file(self.client, "{}/srcfile".format(self.m_point),
+ ("{}/".format(self.m_point) + new_src_name))
+ self.assertTrue(ret, ("Failed to move file srcfile and {}".format(
+ new_src_name)))
+
+ def _create_dest_file_find_hash(
+ self, src_cache_subvol_count, src_hash_subvol_count):
+ """Find a name for dest file such that it hashed to a subvol different
+ from the src file's hash and cache subvol"""
+ # Get subvol list
+ subvol_list = (get_subvols(self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(subvol_list, "Failed to get subvols")
+ for item in (subvol_list[src_hash_subvol_count],
+ subvol_list[src_cache_subvol_count]):
+ subvol_list.remove(item)
+
+ # Find name for dest file
+ dest_subvol = BrickDir(subvol_list[0][0] + "/" + "/")
+ dest_file = find_specific_hashed(self.subvols, "/", dest_subvol)
+ self.assertIsNotNone(dest_file, "Could not find hashed for destfile")
+
+ # Create dest file
+ cmd = "touch {}/{}".format(self.m_point, dest_file.newname)
+ ret, _, _ = g.run(self.client, cmd)
+ self.assertEqual(ret, 0, "Failed to create destfile")
+ g.log.info("Successfully created destfile")
+ return dest_file.newname, dest_file.subvol_count
+
+ def _kill_subvol(self, subvol_count):
+ """Bring down the subvol as the subvol_count"""
+ ret = bring_bricks_offline(
+ self.volname, self.subvols[subvol_count])
+ self.assertTrue(ret, ('Error in bringing down subvolume %s',
+ self.subvols[subvol_count]))
+ g.log.info('DHT subvol %s is offline',
+ self.subvols[subvol_count])
+
+ def _copy_src_file_to_dest_file(
+ self, src_file, dest_file, expected="pass"):
+ """
+ Copy src file to dest dest, it will either pass or
+ fail; as per the scenario
+ """
+ command = "cd {}; cp -r {} {}".format(
+ self.m_point, src_file, dest_file)
+ expected_ret = 0 if expected == "pass" else 1
+ ret, _, _ = g.run(self.client, command)
+ self.assertEqual(ret, expected_ret,
+ "Unexpected, Copy of Src file to dest "
+ "file status : %s" % (expected))
+ g.log.info("Copy of src file to dest file returned as expected")
+
+ def test_copy_srchash_up_desthash_up(self):
+ """
+ Case 1:
+ 1) Create a volume and start it
+ 2) Create a src file and a dest file
+ 3) All subvols are up
+ 4) Copy src file to dest file
+ """
+ # Create a src file
+ self._create_src_file()
+
+ # Find out cache subvol for src file
+ src_cache_count = self._find_cache_for_src_file()
+
+ # Find new hash for src file
+ src_file_new, src_hash_count = self._find_hash_for_src_file()
+
+ # Rename src file so it hash and cache to different subvol
+ self._rename_src(src_file_new)
+
+ # Create dest file and find its hash subvol
+ dest_file, _ = self._create_dest_file_find_hash(
+ src_cache_count, src_hash_count)
+
+ # Copy src file to dest file
+ self._copy_src_file_to_dest_file(src_file_new, dest_file)
+
+ def test_copy_srccache_down_srchash_up_desthash_down(self):
+ """
+ Case 2:
+ 1) Create a volume and start it
+ 2) Create a src file and a dest file
+ 3) Bring down the cache subvol for src file
+ 4) Bring down the hash subvol for dest file
+ 5) Copy src file to dest file
+ """
+ # Create a src file
+ self._create_src_file()
+
+ # Find out cache subvol for src file
+ src_cache_count = self._find_cache_for_src_file()
+
+ # Find new hash for src file
+ src_file_new, src_hash_count = self._find_hash_for_src_file()
+
+ # Rename src file so it hash and cache to different subvol
+ self._rename_src(src_file_new)
+
+ # Create dest file and find its hash subvol
+ dest_file, dest_hash_count = self._create_dest_file_find_hash(
+ src_cache_count, src_hash_count)
+
+ # kill src cache subvol
+ self._kill_subvol(src_cache_count)
+
+ # Kill dest hash subvol
+ self._kill_subvol(dest_hash_count)
+
+ # Copy src file to dest file
+ self._copy_src_file_to_dest_file(src_file_new, dest_file,
+ expected="fail")
+
+ def test_copy_srccache_down_srchash_up_desthash_up(self):
+ """
+ Case 3:
+ 1) Create a volume and start it
+ 2) Create a src file and a dest file
+ 3) Bring down the cache subvol for src file
+ 4) Copy src file to dest file
+ """
+ # Create a src file
+ self._create_src_file()
+
+ # Find out cache subvol for src file
+ src_cache_count = self._find_cache_for_src_file()
+
+ # Find new hash for src file
+ src_file_new, src_hash_count = self._find_hash_for_src_file()
+
+ # Rename src file so it hash and cache to different subvol
+ self._rename_src(src_file_new)
+
+ # Create dest file and find its hash subvol
+ dest_file, _ = self._create_dest_file_find_hash(
+ src_cache_count, src_hash_count)
+
+ # kill src cache subvol
+ self._kill_subvol(src_cache_count)
+
+ # Copy src file to dest file
+ self._copy_src_file_to_dest_file(src_file_new, dest_file,
+ expected="fail")
+
+ def test_copy_srchash_down_desthash_down(self):
+ """
+ Case 4:
+ 1) Create a volume and start it
+ 2) Create a src file and a dest file
+ 3) Bring down the hash subvol for src file
+ 4) Bring down the hash subvol for dest file
+ 5) Copy src file to dest file
+ """
+ # Create a src file
+ self._create_src_file()
+
+ # Find out cache subvol for src file
+ src_cache_count = self._find_cache_for_src_file()
+
+ # Find new hash for src file
+ src_file_new, src_hash_count = self._find_hash_for_src_file()
+
+ # Rename src file so it hash and cache to different subvol
+ self._rename_src(src_file_new)
+
+ # Create dest file and find its hash subvol
+ dest_file, dest_hash_count = self._create_dest_file_find_hash(
+ src_cache_count, src_hash_count)
+
+ # Kill the hashed subvol for src file
+ self._kill_subvol(src_hash_count)
+
+ # Kill the hashed subvol for dest file
+ self._kill_subvol(dest_hash_count)
+
+ # Copy src file to dest file
+ self._copy_src_file_to_dest_file(src_file_new, dest_file,
+ expected="fail")
+
+ def test_copy_srchash_down_desthash_up(self):
+ """
+ Case 5:
+ 1) Create a volume and start it
+ 2) Create a src file and a dest file
+ 3) Bring down the hash subvol for src file
+ 4) Copy src file to dest file
+ """
+ # Create a src file
+ self._create_src_file()
+
+ # Find out cache subvol for src file
+ src_cache_count = self._find_cache_for_src_file()
+
+ # Find new hash for src file
+ src_file_new, src_hash_count = self._find_hash_for_src_file()
+
+ # Rename src file so it hash and cache to different subvol
+ self._rename_src(src_file_new)
+
+ # Create dest file and find its hash subvol
+ dest_file, _ = self._create_dest_file_find_hash(
+ src_cache_count, src_hash_count)
+
+ # Kill the hashed subvol for src file
+ self._kill_subvol(src_hash_count)
+
+ # Copy src file to dest file
+ self._copy_src_file_to_dest_file(src_file_new, dest_file)
+
+ def test_copy_srchash_up_desthash_down(self):
+ """
+ Case 6:
+ 1) Create a volume and start it
+ 2) Create a src file and a dest file
+ 3) Bring down the hash subvol for dest file
+ 4) Copy src file to dest file
+ """
+ # Create a src file
+ self._create_src_file()
+
+ # Find out cache subvol for src file
+ src_cache_count = self._find_cache_for_src_file()
+
+ # Find new hash for src file
+ src_file_new, src_hash_count = self._find_hash_for_src_file()
+
+ # Rename src file so it hash and cache to different subvol
+ self._rename_src(src_file_new)
+
+ # Create dest file and find its hash subvol
+ dest_file, dest_hash_count = self._create_dest_file_find_hash(
+ src_cache_count, src_hash_count)
+
+ # Kill the hashed subvol for dest file
+ self._kill_subvol(dest_hash_count)
+
+ # Copy src file to dest file
+ self._copy_src_file_to_dest_file(src_file_new, dest_file,
+ expected="fail")
diff --git a/tests/functional/dht/test_copy_huge_file_with_remove_brick_in_progress.py b/tests/functional/dht/test_copy_huge_file_with_remove_brick_in_progress.py
new file mode 100644
index 000000000..f142637f2
--- /dev/null
+++ b/tests/functional/dht/test_copy_huge_file_with_remove_brick_in_progress.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.glusterfile import get_md5sum
+from glustolibs.gluster.volume_libs import shrink_volume
+from glustolibs.io.utils import validate_io_procs, wait_for_io_to_complete
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed', 'distributed'], ['glusterfs']])
+class TestCopyHugeFileWithRemoveBrickInProgress(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # If cp is running then wait for it to complete
+ if self.cp_running:
+ if not wait_for_io_to_complete(self.io_proc, [self.mounts[0]]):
+ g.log.error("I/O failed to stop on clients")
+ ret, _, _ = g.run(self.first_client, "rm -rf /mnt/huge_file.txt")
+ if ret:
+ g.log.error("Failed to remove huge file from /mnt.")
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_copy_huge_file_with_remove_brick_in_progress(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Create files and dirs on the mount point.
+ 3. Start remove-brick and copy huge file when remove-brick is
+ in progress.
+ 4. Commit remove-brick and check checksum of orginal and copied file.
+ """
+ # Create a directory with some files inside
+ cmd = ("cd %s; for i in {1..10}; do mkdir dir$i; for j in {1..5};"
+ " do dd if=/dev/urandom of=dir$i/file$j bs=1M count=1; done;"
+ " done" % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertFalse(ret,
+ "Failed to create dirs and files.")
+
+ # Create a hug file under /mnt dir
+ ret, _, _ = g.run(self.first_client,
+ "fallocate -l 10G /mnt/huge_file.txt")
+ self.assertFalse(ret, "Failed to create hug file at /mnt")
+
+ # Copy a huge file when remove-brick is in progress
+ self.cp_running = False
+ cmd = ("sleep 60; cd %s;cp ../huge_file.txt ."
+ % self.mounts[0].mountpoint)
+ self.io_proc = [g.run_async(self.first_client, cmd)]
+ self.rename_running = True
+
+ # Start remove-brick on volume and wait for it to complete
+ ret = shrink_volume(self.mnode, self.volname, rebalance_timeout=1000)
+ self.assertTrue(ret, "Failed to remove-brick from volume")
+ g.log.info("Remove-brick rebalance successful")
+
+ # Validate if copy was successful or not
+ ret = validate_io_procs(self.io_proc, [self.mounts[0]])
+ self.assertTrue(ret, "dir rename failed on mount point")
+ self.cp_running = False
+
+ # Check checksum of orginal and copied file
+ original_file_checksum = get_md5sum(self.first_client,
+ "/mnt/huge_file.txt")
+ copied_file_checksum = get_md5sum(self.first_client,
+ "{}/huge_file.txt"
+ .format(self.mounts[0].mountpoint))
+ self.assertEqual(original_file_checksum.split(" ")[0],
+ copied_file_checksum.split(" ")[0],
+ "md5 checksum of original and copied file are"
+ " different")
+ g.log.info("md5 checksum of original and copied file are same.")
+
+ # Remove original huge file
+ ret, _, _ = g.run(self.first_client, "rm -rf /mnt/huge_file.txt")
+ self.assertFalse(ret, "Failed to remove huge_file from mount point")
diff --git a/tests/functional/dht/test_custom_xattr_healing_for_dir.py b/tests/functional/dht/test_custom_xattr_healing_for_dir.py
new file mode 100644
index 000000000..d5bca0fb3
--- /dev/null
+++ b/tests/functional/dht/test_custom_xattr_healing_for_dir.py
@@ -0,0 +1,332 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+# pylint: disable=protected-access
+# pylint: disable=too-many-statements
+
+from time import sleep
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterfile import (get_fattr, set_fattr,
+ delete_fattr)
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.volume_libs import get_subvols
+from glustolibs.gluster.dht_test_utils import (find_hashed_subvol,
+ find_new_hashed)
+from glustolibs.gluster.brick_libs import (get_online_bricks_list,
+ bring_bricks_offline)
+from glustolibs.gluster.volume_ops import volume_start
+
+
+@runs_on([['distributed', 'distributed-dispersed',
+ 'distributed-arbiter', 'distribited-replicated'],
+ ['glusterfs']])
+class TestCustomXattrHealingForDir(GlusterBaseClass):
+ def setUp(self):
+
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume and Mount Volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+
+ self.client, self.m_point = (self.mounts[0].client_system,
+ self.mounts[0].mountpoint)
+
+ def tearDown(self):
+
+ # Unmount and cleanup original volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _set_xattr_value(self, fattr_value="bar2"):
+ """Set the xattr 'user.foo' as per the value on dir1"""
+ # Set the xattr on the dir1
+ ret = set_fattr(self.client, '{}/dir1'.format(self.m_point),
+ 'user.foo', fattr_value)
+ self.assertTrue(ret, "Failed to set the xattr on dir1")
+ g.log.info("Successfully set the xattr user.foo with value:"
+ " %s on dir1", fattr_value)
+
+ def _check_xattr_value_on_mnt(self, expected_value=None):
+ """Check if the expected value for 'user.foo'
+ is present for dir1 on mountpoint"""
+ ret = get_fattr(self.client, '{}/dir1'.format(self.m_point),
+ 'user.foo', encode="text")
+ self.assertEqual(ret, expected_value, "Failed to get the xattr"
+ " on:{}".format(self.client))
+ g.log.info(
+ "The xattr user.foo for dir1 is displayed on mointpoint"
+ " and has value:%s", expected_value)
+
+ def _check_xattr_value_on_bricks(self, online_bricks, expected_value=None):
+ """Check if the expected value for 'user.foo'is present
+ for dir1 on backend bricks"""
+ for brick in online_bricks:
+ host, brick_path = brick.split(':')
+ ret = get_fattr(host, '{}/dir1'.format(brick_path),
+ 'user.foo', encode="text")
+ self.assertEqual(ret, expected_value, "Failed to get the xattr"
+ " on:{}".format(brick_path))
+ g.log.info("The xattr user.foo is displayed for dir1 on "
+ "brick:%s and has value:%s",
+ brick_path, expected_value)
+
+ def _create_dir(self, dir_name=None):
+ """Create a directory on the mountpoint"""
+ ret = mkdir(self.client, "{}/{}".format(self.m_point, dir_name))
+ self.assertTrue(ret, "mkdir of {} failed".format(dir_name))
+
+ def _perform_lookup(self):
+ """Perform lookup on mountpoint"""
+ cmd = ("ls -lR {}/dir1".format(self.m_point))
+ ret, _, _ = g.run(self.client, cmd)
+ self.assertEqual(ret, 0, "Failed to lookup")
+ g.log.info("Lookup successful")
+ sleep(5)
+
+ def _create_xattr_check_self_heal(self):
+ """Create custom xattr and check if its healed"""
+ # Set the xattr on the dir1
+ self._set_xattr_value(fattr_value="bar2")
+
+ # Get online brick list
+ online_bricks = get_online_bricks_list(self.mnode, self.volname)
+ self.assertIsNotNone(online_bricks, "Failed to get online bricks")
+
+ # Check if the custom xattr is being displayed on the
+ # mount-point for dir1
+ self._check_xattr_value_on_mnt(expected_value="bar2")
+
+ # Check if the xattr is being displayed on the online-bricks
+ # for dir1
+ self._check_xattr_value_on_bricks(online_bricks, expected_value="bar2")
+
+ # Modify custom xattr value on dir1
+ self._set_xattr_value(fattr_value="ABC")
+
+ # Lookup on moint-point to refresh the value of xattr
+ self._perform_lookup()
+
+ # Check if the modified custom xattr is being displayed
+ # on the mount-point for dir1
+ self._check_xattr_value_on_mnt(expected_value="ABC")
+
+ # Check if the modified custom xattr is being
+ # displayed on the bricks for dir1
+ self._check_xattr_value_on_bricks(online_bricks, expected_value="ABC")
+
+ # Remove the custom xattr from the mount point for dir1
+ ret = delete_fattr(self.client,
+ '{}/dir1'.format(self.m_point), 'user.foo')
+ self.assertTrue(ret, "Failed to delete the xattr for "
+ "dir1 on mountpoint")
+ g.log.info(
+ "Successfully deleted the xattr for dir1 from mountpoint")
+
+ # Lookup on moint-point to refresh the value of xattr
+ self._perform_lookup()
+
+ # Check that the custom xattr is not displayed on the
+ # for dir1 on mountpoint
+ ret = get_fattr(self.client, '{}/dir1'.format(self.m_point),
+ 'user.foo', encode="text")
+ self.assertEqual(ret, None, "Xattr for dir1 is not removed"
+ " on:{}".format(self.client))
+ g.log.info("Success: xattr is removed for dir1 on mointpoint")
+
+ # Check that the custom xattr is not displayed on the
+ # for dir1 on the backend bricks
+ for brick in online_bricks:
+ host, brick_path = brick.split(':')
+ ret = get_fattr(host, '{}/dir1'.format(brick_path),
+ 'user.foo', encode="text")
+ self.assertEqual(ret, None, "Xattr for dir1 is not removed"
+ " on:{}".format(brick_path))
+ g.log.info("Xattr for dir1 is removed from "
+ "brick:%s", brick_path)
+
+ # Check if the trusted.glusterfs.pathinfo is displayed
+ # for dir1 on mointpoint
+ ret = get_fattr(self.client, '{}/dir1'.format(self.m_point),
+ 'trusted.glusterfs.pathinfo')
+ self.assertIsNotNone(ret, "Failed to get the xattr"
+ " on:{}".format(self.client))
+ g.log.info("The xattr trusted.glusterfs.pathinfo"
+ " is displayed on mointpoint for dir1")
+
+ # Set the xattr on the dir1
+ self._set_xattr_value(fattr_value="star1")
+
+ # Bring back the bricks online
+ ret, _, _ = volume_start(self.mnode, self.volname, force=True)
+ self.assertFalse(ret, 'Failed to start volume %s with "force" option'
+ % self.volname)
+ g.log.info('Successfully started volume %s with "force" option',
+ self.volname)
+
+ # Execute lookup on the mointpoint
+ self._perform_lookup()
+
+ # Get online brick list
+ online_bricks = get_online_bricks_list(self.mnode, self.volname)
+ self.assertIsNotNone(online_bricks, "Failed to get online bricks")
+
+ # Check if the custom xattr is being displayed
+ # on the mount-point for dir1
+ self._check_xattr_value_on_mnt(expected_value="star1")
+
+ # Check if the custom xattr is displayed on all the bricks
+ self._check_xattr_value_on_bricks(online_bricks,
+ expected_value="star1")
+
+ def test_custom_xattr_with_subvol_down_dir_exists(self):
+ """
+ Description:
+ Steps:
+ 1) Create directories from mount point.
+ 2) Bring one or more(not all) dht sub-volume(s) down by killing
+ processes on that server
+ 3) Create a custom xattr for dir hashed to down sub-volume and also for
+ another dir not hashing to down sub-volumes
+ # setfattr -n user.foo -v bar2 <dir>
+ 4) Verify that custom xattr for directory is displayed on mount point
+ and bricks for both directories
+ # getfattr -n user.foo <dir>
+ # getfattr -n user.foo <brick_path>/<dir>
+ 5) Modify custom xattr value and verify that custom xattr for directory
+ is displayed on mount point and all up bricks
+ # setfattr -n user.foo -v ABC <dir>
+ 6) Verify that custom xattr is not displayed once you remove it on
+ mount point and all up bricks
+ 7) Verify that mount point shows pathinfo xattr for dir hashed to down
+ sub-volume and also for dir not hashed to down sub-volumes
+ # getfattr -n trusted.glusterfs.pathinfo <dir>
+ 8) Again create a custom xattr for dir not hashing to down sub-volumes
+ # setfattr -n user.foo -v star1 <dir>
+ 9) Bring up the sub-volumes
+ 10) Execute lookup on parent directory of both <dir> from mount point
+ 11) Verify Custom extended attributes for dir1 on all bricks
+ """
+ # pylint: disable=protected-access
+ # Create dir1 on client0
+ self._create_dir(dir_name="dir1")
+
+ # Get subvol list
+ subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(subvols, "Failed to get subvols")
+
+ # Finding a dir name such that it hashes to a different subvol
+ newhash = find_new_hashed(subvols, "/", "dir1")
+ new_name = str(newhash.newname)
+ new_subvol_count = newhash.subvol_count
+
+ # Create a dir with the new name
+ self._create_dir(dir_name=new_name)
+
+ # Kill the brick/subvol to which the new dir hashes
+ ret = bring_bricks_offline(
+ self.volname, subvols[new_subvol_count])
+ self.assertTrue(ret, ('Error in bringing down subvolume %s',
+ subvols[new_subvol_count]))
+ g.log.info('DHT subvol %s is offline', subvols[new_subvol_count])
+
+ # Set the xattr on dir hashing to down subvol
+ ret = set_fattr(self.client, '{}/{}'.format(self.m_point, new_name),
+ 'user.foo', 'bar2')
+ self.assertFalse(ret, "Unexpected: custom xattr set successfully"
+ " for dir hashing to down subvol")
+ g.log.info("Expected: Failed to set xattr on dir:%s"
+ " which hashes to down subvol due to error: Transport"
+ " endpoint not connected", new_name)
+
+ # Check if the trusted.glusterfs.pathinfo is displayed
+ # for dir hashing to down subvol on mointpoint
+ ret = get_fattr(self.client, '{}/{}'.format(
+ self.m_point, new_name), 'trusted.glusterfs.pathinfo')
+ self.assertIsNotNone(ret, "Failed to get the xattr"
+ " on:{}".format(self.client))
+ g.log.info("The xattr trusted.glusterfs.pathinfo"
+ " is displayed on mointpoint for %s", new_name)
+
+ # Set the xattr on dir hashing to down subvol
+ ret = set_fattr(self.client, '{}/{}'.format(self.m_point, new_name),
+ 'user.foo', 'star1')
+ self.assertFalse(ret, "Unexpected: custom xattr set successfully"
+ " for dir hashing to down subvol")
+ g.log.info("Expected: Tansport endpoint not connected")
+
+ # Calling the local function
+ self._create_xattr_check_self_heal()
+
+ def test_custom_xattr_with_subvol_down_dir_doesnt_exists(self):
+ """
+ Description:
+ Steps:
+ 1) Bring one or more(not all) dht sub-volume(s) down by killing
+ processes on that server
+ 2) Create a directory from mount point such that it
+ hashes to up subvol.
+ 3) Create a custom xattr for dir
+ # setfattr -n user.foo -v bar2 <dir>
+ 4) Verify that custom xattr for directory is displayed on mount point
+ and bricks for directory
+ # getfattr -n user.foo <dir>
+ # getfattr -n user.foo <brick_path>/<dir>
+ 5) Modify custom xattr value and verify that custom xattr for directory
+ is displayed on mount point and all up bricks
+ # setfattr -n user.foo -v ABC <dir>
+ 6) Verify that custom xattr is not displayed once you remove it on
+ mount point and all up bricks
+ 7) Verify that mount point shows pathinfo xattr for dir
+ 8) Again create a custom xattr for dir
+ # setfattr -n user.foo -v star1 <dir>
+ 9) Bring up the sub-volumes
+ 10) Execute lookup on parent directory of both <dir> from mount point
+ 11) Verify Custom extended attributes for dir1 on all bricks
+ """
+ # Get subvol list
+ subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(subvols, "Failed to get subvols")
+
+ # Find out the hashed subvol for dir1
+ hashed_subvol, subvol_count = find_hashed_subvol(subvols, "/", "dir1")
+ self.assertIsNotNone(hashed_subvol, "Could not find srchashed")
+ g.log.info("Hashed subvol for dir1 is %s", hashed_subvol._path)
+
+ # Remove the hashed_subvol from subvol list
+ subvols.remove(subvols[subvol_count])
+
+ # Bring down a dht subvol
+ ret = bring_bricks_offline(self.volname, subvols[0])
+ self.assertTrue(ret, ('Error in bringing down subvolume %s',
+ subvols[0]))
+ g.log.info('DHT subvol %s is offline', subvols[0])
+
+ # Create the dir1
+ self._create_dir(dir_name="dir1")
+
+ # Calling the local function
+ self._create_xattr_check_self_heal()
diff --git a/tests/functional/dht/test_delete_dir_with_self_pointing_linkto_files.py b/tests/functional/dht/test_delete_dir_with_self_pointing_linkto_files.py
new file mode 100644
index 000000000..f4541e2e8
--- /dev/null
+++ b/tests/functional/dht/test_delete_dir_with_self_pointing_linkto_files.py
@@ -0,0 +1,140 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_ops import remove_brick
+from glustolibs.gluster.glusterdir import mkdir, get_dir_contents
+from glustolibs.gluster.glusterfile import set_fattr, get_dht_linkto_xattr
+from glustolibs.gluster.rebalance_ops import wait_for_remove_brick_to_complete
+from glustolibs.gluster.volume_libs import form_bricks_list_to_remove_brick
+
+
+@runs_on([['distributed'], ['glusterfs']])
+class TestDeletDirWithSelfPointingLinktofiles(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Changing dist_count to 2
+ self.volume['voltype']['dist_count'] = 2
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ # Assign a variable for the first_client
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_delete_dir_with_self_pointing_linkto_files(self):
+ """
+ Test case:
+ 1. Create a pure distribute volume with 2 bricks, start and mount it.
+ 2. Create dir dir0/dir1/dir2 inside which create 1000 files and rename
+ all the files.
+ 3. Start remove-brick operation on the volume.
+ 4. Check remove-brick status till status is completed.
+ 5. When remove-brick status is completed stop it.
+ 6. Go to brick used for remove brick and perform lookup on the files.
+ 8. Change the linkto xattr value for every file in brick used for
+ remove brick to point to itself.
+ 9. Perfrom rm -rf * from mount point.
+ """
+ # Create dir /dir0/dir1/dir2
+ self.dir_path = "{}/dir0/dir1/dir2/".format(self.mounts[0].mountpoint)
+ ret = mkdir(self.first_client, self.dir_path, parents=True)
+ self.assertTrue(ret, "Failed to create /dir0/dir1/dir2/ dir")
+
+ # Create 1000 files inside /dir0/dir1/dir2
+ ret, _, _ = g.run(self.first_client,
+ 'cd %s;for i in {1..1000}; do echo "Test file" '
+ '> tfile-$i; done' % self.dir_path)
+ self.assertFalse(ret,
+ "Failed to create 1000 files inside /dir0/dir1/dir2")
+
+ # Rename 1000 files present inside /dir0/dir1/dir2
+ ret, _, _ = g.run(self.first_client,
+ "cd %s;for i in {1..1000};do mv tfile-$i "
+ "ntfile-$i;done" % self.dir_path)
+ self.assertFalse(ret,
+ "Failed to rename 1000 files inside /dir0/dir1/dir2")
+ g.log.info("I/O successful on mount point.")
+
+ # Start remove-brick operation on the volume
+ brick = form_bricks_list_to_remove_brick(self.mnode, self.volname,
+ subvol_num=1)
+ self.assertIsNotNone(brick, "Brick_list is empty")
+ ret, _, _ = remove_brick(self.mnode, self.volname, brick, 'start')
+ self.assertFalse(ret, "Failed to start remov-brick on volume")
+
+ # Check remove-brick status till status is completed
+ ret = wait_for_remove_brick_to_complete(self.mnode, self.volname,
+ brick)
+ self.assertTrue(ret, "Remove-brick didn't complete on volume")
+
+ # When remove-brick status is completed stop it
+ ret, _, _ = remove_brick(self.mnode, self.volname, brick, 'stop')
+ self.assertFalse(ret, "Failed to start remov-brick on volume")
+ g.log.info("Successfully started and stopped remove-brick")
+
+ # Go to brick used for remove brick and perform lookup on the files
+ node, path = brick[0].split(":")
+ path = "{}/dir0/dir1/dir2/".format(path)
+ ret, _, _ = g.run(node, 'ls {}*'.format(path))
+ self.assertFalse(ret, "Failed to do lookup on %s" % brick[0])
+
+ # Change the linkto xattr value for every file in brick used for
+ # remove brick to point to itself
+ ret = get_dir_contents(node, path)
+ self.assertIsNotNone(ret,
+ "Unable to get files present in dir0/dir1/dir2")
+
+ ret = get_dht_linkto_xattr(node, "{}{}".format(path, ret[0]))
+ self.assertIsNotNone(ret, "Unable to fetch dht linkto xattr")
+
+ # Change trusted.glusterfs.dht.linkto from dist-client-0 to
+ # dist-client-1 or visa versa according to initial value
+ dht_linkto_xattr = ret.split("-")
+ if int(dht_linkto_xattr[2]):
+ dht_linkto_xattr[2] = "0"
+ else:
+ dht_linkto_xattr[2] = "1"
+ linkto_value = "-".join(dht_linkto_xattr)
+
+ # Set xattr trusted.glusterfs.dht.linkto on all the linkto files
+ ret = set_fattr(node, '{}*'.format(path),
+ 'trusted.glusterfs.dht.linkto', linkto_value)
+ self.assertTrue(ret,
+ "Failed to change linkto file to point to itself")
+
+ # Perfrom rm -rf * from mount point
+ ret, _, _ = g.run(self.first_client,
+ "rm -rf {}/*".format(self.mounts[0].mountpoint))
+ self.assertFalse(ret, "Failed to run rm -rf * on mount point")
+ g.log.info("rm -rf * successful on mount point")
diff --git a/tests/functional/dht/test_delete_file_picked_for_migration.py b/tests/functional/dht/test_delete_file_picked_for_migration.py
new file mode 100644
index 000000000..2d66ec63b
--- /dev/null
+++ b/tests/functional/dht/test_delete_file_picked_for_migration.py
@@ -0,0 +1,165 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.rebalance_ops import (
+ get_rebalance_status, rebalance_start)
+from glustolibs.gluster.volume_libs import (get_subvols,
+ form_bricks_list_to_add_brick,
+ log_volume_info_and_status)
+from glustolibs.gluster.dht_test_utils import find_new_hashed
+from glustolibs.gluster.glusterfile import move_file, is_linkto_file
+from glustolibs.gluster.brick_ops import add_brick
+from glustolibs.gluster.brick_libs import get_all_bricks
+
+
+@runs_on([['distributed', 'distributed-replicated',
+ 'distributed-dispersed', 'distributed-arbiter'],
+ ['glusterfs']])
+class DeleteFileInMigration(GlusterBaseClass):
+ def setUp(self):
+ """
+ Setup and mount volume or raise ExecutionError
+ """
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Setup and Mount Volume")
+
+ # Form brick list for add-brick operation
+ self.add_brick_list = form_bricks_list_to_add_brick(
+ self.mnode, self.volname, self.servers, self.all_servers_info,
+ distribute_count=1)
+ if not self.add_brick_list:
+ raise ExecutionError("Volume %s: Failed to form bricks list for"
+ " add-brick" % self.volname)
+ g.log.info("Volume %s: Formed bricks list for add-brick operation",
+ (self.add_brick_list, self.volname))
+
+ def tearDown(self):
+
+ # Unmount Volume and Cleanup Volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_delete_file_in_migration(self):
+ """
+ Verify that if a file is picked for migration and then deleted, the
+ file should be removed successfully.
+ * First create a big data file of 10GB.
+ * Rename that file, such that after rename a linkto file is created
+ (we are doing this to make sure that file is picked for migration.)
+ * Add bricks to the volume and trigger rebalance using force option.
+ * When the file has been picked for migration, delete that file from
+ the mount point.
+ * Check whether the file has been deleted or not on the mount-point
+ as well as the back-end bricks.
+ """
+
+ # pylint: disable=too-many-statements
+ # pylint: disable=too-many-locals
+ # pylint: disable=protected-access
+
+ mountpoint = self.mounts[0].mountpoint
+
+ # Location of source file
+ src_file = mountpoint + '/file1'
+
+ # Finding a file name such that renaming source file to it will form a
+ # linkto file
+ subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
+ newhash = find_new_hashed(subvols, "/", "file1")
+ new_name = str(newhash.newname)
+ new_host = str(newhash.hashedbrickobject._host)
+ new_name_path = str(newhash.hashedbrickobject._fqpath)[:-2]
+
+ # Location of destination file to which source file will be renamed
+ dst_file = '{}/{}'.format(mountpoint, new_name)
+ # Create a 10GB file source file
+ cmd = ("dd if=/dev/urandom of={} bs=1024K count=10000".format(
+ src_file))
+ ret, _, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, ("File {} creation failed".format(src_file)))
+
+ # Move file such that it hashes to some other subvol and forms linkto
+ # file
+ ret = move_file(self.clients[0], src_file, dst_file)
+ self.assertTrue(ret, "Rename failed")
+ g.log.info('Renamed file %s to %s', src_file, dst_file)
+
+ # Check if "file_two" is linkto file
+ ret = is_linkto_file(new_host,
+ '{}/{}'.format(new_name_path, new_name))
+ self.assertTrue(ret, "File is not a linkto file")
+ g.log.info("File is linkto file")
+
+ # Expanding volume by adding bricks to the volume
+ ret, _, _ = add_brick(self.mnode, self.volname,
+ self.add_brick_list, force=True)
+ self.assertEqual(ret, 0, ("Volume {}: Add-brick failed".format
+ (self.volname)))
+ g.log.info("Volume %s: add-brick successful", self.volname)
+
+ # Log Volume Info and Status after expanding the volume
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Start Rebalance
+ ret, _, _ = rebalance_start(self.mnode, self.volname, force=True)
+ self.assertEqual(ret, 0, ("Volume {}: Failed to start rebalance".format
+ (self.volname)))
+ g.log.info("Volume %s : Rebalance started ", self.volname)
+
+ # Check if rebalance is running and delete the file
+ status_info = get_rebalance_status(self.mnode, self.volname)
+ status = status_info['aggregate']['statusStr']
+ self.assertEqual(status, 'in progress', "Rebalance is not running")
+ ret, _, _ = g.run(self.clients[0], (" rm -rf {}".format(dst_file)))
+ self.assertEqual(ret, 0, ("Cannot delete file {}".format
+ (dst_file)))
+ g.log.info("File is deleted")
+
+ # Check if the file is present on the mount point
+ ret, _, _ = g.run(self.clients[0], ("ls -l {}".format(dst_file)))
+ self.assertEqual(ret, 2, ("Failed to delete file {}".format
+ (dst_file)))
+
+ # Check if the file is present on the backend bricks
+ bricks = get_all_bricks(self.mnode, self.volname)
+ for brick in bricks:
+ node, brick_path = brick.split(':')
+ ret, _, _ = g.run(node, "ls -l {}/{}".format
+ (brick_path, new_name))
+ self.assertEqual(ret, 2, "File is still present on"
+ " back-end brick: {}".format(
+ brick_path))
+ g.log.info("File is deleted from back-end brick: %s", brick_path)
+
+ # Check if rebalance process is still running
+ for server in self.servers:
+ ret, _, _ = g.run(server, "pgrep rebalance")
+ self.assertEqual(ret, 1, ("Rebalance process is still"
+ " running on server {}".format
+ (server)))
+ g.log.info("Rebalance process is not running")
diff --git a/tests/functional/dht/test_dht_create_dir.py b/tests/functional/dht/test_dht_create_dir.py
index f39e0d473..d3604dcbc 100644
--- a/tests/functional/dht/test_dht_create_dir.py
+++ b/tests/functional/dht/test_dht_create_dir.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -17,6 +17,7 @@
# pylint: disable=too-many-statements, undefined-loop-variable
# pylint: disable=too-many-branches,too-many-locals,pointless-string-statement
+from re import search
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
@@ -33,10 +34,9 @@ Description: tests to check the dht layouts of files and directories,
"""
-@runs_on([['replicated',
- 'distributed',
- 'distributed-replicated',
- 'dispersed', 'distributed-dispersed'],
+@runs_on([['distributed', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed', 'replicated',
+ 'arbiter', 'distributed-arbiter'],
['glusterfs']])
class TestDhtClass(GlusterBaseClass):
@@ -44,15 +44,13 @@ class TestDhtClass(GlusterBaseClass):
Description: tests to check the dht layouts of files and directories,
along with their symlinks.
"""
- @classmethod
- def setUpClass(cls):
+ def setUp(self):
- # Calling GlusterBaseClass setUpClass
- cls.get_super_method(cls, 'setUpClass')()
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
# Setup Volume and Mount Volume
- g.log.info("Starting to Setup Volume and Mount Volume")
- ret = cls.setup_volume_and_mount_volume(cls.mounts)
+ ret = self.setup_volume_and_mount_volume(self.mounts)
if not ret:
raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
g.log.info("Successful in Setup Volume and Mount Volume")
@@ -60,7 +58,6 @@ class TestDhtClass(GlusterBaseClass):
def tearDown(self):
# Unmount and cleanup original volume
- g.log.info("Starting to Unmount Volume and Cleanup Volume")
ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
if not ret:
raise ExecutionError("Failed to umount the vol & cleanup Volume")
@@ -71,7 +68,6 @@ class TestDhtClass(GlusterBaseClass):
def test_create_directory(self):
- g.log.info("creating multiple,multilevel directories")
m_point = self.mounts[0].mountpoint
command = 'mkdir -p ' + m_point + '/root_dir/test_dir{1..3}'
ret, _, _ = g.run(self.mounts[0].client_system, command)
@@ -82,8 +78,6 @@ class TestDhtClass(GlusterBaseClass):
self.assertEqual(ret, 0, "ls failed on parent directory:root_dir")
g.log.info("ls on parent directory: successful")
- g.log.info("creating files at different directory levels inside %s",
- self.mounts[0].mountpoint)
command = 'touch ' + m_point + \
'/root_dir/test_file{1..5} ' + m_point + \
'/root_dir/test_dir{1..3}/test_file{1..5}'
@@ -91,7 +85,7 @@ class TestDhtClass(GlusterBaseClass):
self.assertEqual(ret, 0, "File creation: failed")
command = 'ls ' + m_point + '/root_dir'
ret, out, _ = g.run(self.mounts[0].client_system, command)
- self.assertEqual(ret, 0, "can't list the created directories")
+ self.assertEqual(ret, 0, "Failed to list the created directories")
list_of_files_and_dirs = out.split('\n')
flag = True
for x_count in range(3):
@@ -104,25 +98,20 @@ class TestDhtClass(GlusterBaseClass):
flag = False
self.assertTrue(flag, "ls command didn't list all the "
"directories and files")
- g.log.info("creation of files at multiple levels successful")
+ g.log.info("Creation of files at multiple levels successful")
- g.log.info("creating a list of all directories")
command = 'cd ' + m_point + ';find root_dir -type d -print'
ret, out, _ = g.run(self.mounts[0].client_system, command)
- self.assertEqual(ret, 0, "creation of directory list failed")
+ self.assertEqual(ret, 0, "Creation of directory list failed")
list_of_all_dirs = out.split('\n')
del list_of_all_dirs[-1]
- g.log.info("verifying that all the directories are present on "
- "every brick and the layout ranges are correct")
flag = validate_files_in_dir(self.clients[0],
m_point + '/root_dir',
test_type=k.TEST_LAYOUT_IS_COMPLETE)
self.assertTrue(flag, "Layout has some holes or overlaps")
g.log.info("Layout is completely set")
- g.log.info("Checking if gfid xattr of directories is displayed and"
- "is same on all the bricks on the server node")
brick_list = get_all_bricks(self.mnode, self.volname)
for direc in list_of_all_dirs:
list_of_gfid = []
@@ -139,13 +128,11 @@ class TestDhtClass(GlusterBaseClass):
for x_count in range(len(list_of_gfid) - 1):
if list_of_gfid[x_count] != list_of_gfid[x_count + 1]:
flag = False
- self.assertTrue(flag, ("the gfid for the directory %s is not "
+ self.assertTrue(flag, ("The gfid for the directory %s is not "
"same on all the bricks", direc))
- g.log.info("the gfid for each directory is the same on all the "
+ g.log.info("The gfid for each directory is the same on all the "
"bricks")
- g.log.info("Verify that for all directories mount point "
- "should not display xattr")
for direc in list_of_all_dirs:
list_of_xattrs = get_fattr_list(self.mounts[0].client_system,
self.mounts[0].mountpoint
@@ -157,13 +144,11 @@ class TestDhtClass(GlusterBaseClass):
g.log.info("Verified : mount point not displaying important "
"xattrs")
- g.log.info("Verifying that for all directories only mount point "
- "shows pathinfo xattr")
for direc in list_of_all_dirs:
fattr = get_fattr(self.mounts[0].client_system,
self.mounts[0].mountpoint+'/'+direc,
'trusted.glusterfs.pathinfo')
- self.assertTrue(fattr, ("pathinfo not displayed for the "
+ self.assertTrue(fattr, ("Pathinfo not displayed for the "
"directory %s on mount point", direc))
brick_list = get_all_bricks(self.mnode, self.volname)
for direc in list_of_all_dirs:
@@ -178,118 +163,97 @@ class TestDhtClass(GlusterBaseClass):
def test_create_link_for_directory(self):
- g.log.info("creating a directory at mount point")
m_point = self.mounts[0].mountpoint
- test_dir_path = 'test_dir'
- fqpath = m_point + '/' + test_dir_path
+ fqpath_for_test_dir = m_point + '/test_dir'
+ flag = mkdir(self.clients[0], fqpath_for_test_dir, True)
+ self.assertTrue(flag, "Failed to create a directory")
+ fqpath = m_point + '/test_dir/dir{1..3}'
flag = mkdir(self.clients[0], fqpath, True)
- self.assertTrue(flag, "failed to create a directory")
- fqpath = m_point + '/' + test_dir_path + '/dir{1..3}'
- flag = mkdir(self.clients[0], fqpath, True)
- self.assertTrue(flag, "failed to create sub directories")
+ self.assertTrue(flag, "Failed to create sub directories")
flag = validate_files_in_dir(self.clients[0],
- m_point + '/test_dir',
+ fqpath_for_test_dir,
test_type=k.TEST_LAYOUT_IS_COMPLETE)
- self.assertTrue(flag, "layout of test directory is complete")
- g.log.info("directory created successfully")
+ self.assertTrue(flag, "Layout of test directory is not complete")
+ g.log.info("Layout for directory is complete")
- g.log.info("creating a symlink for test_dir")
sym_link_path = m_point + '/' + 'test_sym_link'
- command = 'ln -s ' + m_point + '/test_dir ' + sym_link_path
+ command = 'ln -s ' + fqpath_for_test_dir + ' ' + sym_link_path
ret, _, _ = g.run(self.mounts[0].client_system, command)
- self.assertEqual(ret, 0, "failed to create symlink for test_dir")
+ self.assertEqual(ret, 0, "Failed to create symlink for test_dir")
command = 'stat ' + sym_link_path
ret, out, _ = g.run(self.mounts[0].client_system, command)
- self.assertEqual(ret, 0, "stat command didn't return the details "
+ self.assertEqual(ret, 0, "Stat command didn't return the details "
"correctly")
flag = False
- g.log.info("checking if the link is symbolic")
if 'symbolic link' in out:
flag = True
- self.assertTrue(flag, "the type of the link is not symbolic")
- g.log.info("the link is symbolic")
- g.log.info("checking if the sym link points to right directory")
- index_start = out.find('->') + 6
- index_end = out.find("\n") - 3
- dir_pointed = out[index_start:index_end]
+ self.assertTrue(flag, "The type of the link is not symbolic")
+ g.log.info("The link is symbolic")
flag = False
- if dir_pointed == m_point + '/' + test_dir_path:
+ if search(fqpath_for_test_dir, out):
flag = True
self.assertTrue(flag, "sym link does not point to correct "
"location")
g.log.info("sym link points to right directory")
g.log.info("The details of the symlink are correct")
- g.log.info("verifying that inode number of the test_dir "
- "and its sym link are different")
- command = 'ls -id ' + m_point + '/' + \
- test_dir_path + ' ' + sym_link_path
+ command = 'ls -id ' + fqpath_for_test_dir + ' ' + sym_link_path
ret, out, _ = g.run(self.mounts[0].client_system, command)
- self.assertEqual(ret, 0, "inode numbers not retrieved by the "
+ self.assertEqual(ret, 0, "Inode numbers not retrieved by the "
"ls command")
list_of_inode_numbers = out.split('\n')
- flag = True
if (list_of_inode_numbers[0].split(' ')[0] ==
list_of_inode_numbers[1].split(' ')[0]):
flag = False
- self.assertTrue(flag, "the inode numbers of the dir and sym link "
+ self.assertTrue(flag, "The inode numbers of the dir and sym link "
"are same")
- g.log.info("verified: inode numbers of the test_dir "
+ g.log.info("Verified: inode numbers of the test_dir "
"and its sym link are different")
- g.log.info("listing the contents of the test_dir from its sym "
- "link")
command = 'ls ' + sym_link_path
ret, out1, _ = g.run(self.mounts[0].client_system, command)
- self.assertEqual(ret, 0, "failed to list the contents using the "
+ self.assertEqual(ret, 0, "Failed to list the contents using the "
"sym link")
- command = 'ls ' + m_point + '/' + test_dir_path
+ command = 'ls ' + fqpath_for_test_dir
ret, out2, _ = g.run(self.mounts[0].client_system, command)
- self.assertEqual(ret, 0, "failed to list the contents of the "
+ self.assertEqual(ret, 0, "Failed to list the contents of the "
"test_dir using ls command")
flag = False
if out1 == out2:
flag = True
- self.assertTrue(flag, "the contents listed using the sym link "
+ self.assertTrue(flag, "The contents listed using the sym link "
"are not the same")
- g.log.info("the contents listed using the symlink are"
+ g.log.info("The contents listed using the symlink are"
" the same as that of the test_dir")
- g.log.info("verifying that mount point doesn't display important "
- "xattrs using the symlink")
command = 'getfattr -d -m . -e hex ' + sym_link_path
ret, out, _ = g.run(self.mounts[0].client_system, command)
self.assertEqual(ret, 0, "failed to retrieve xattrs")
list_xattrs = ['trusted.gfid', 'trusted.glusterfs.dht']
- flag = True
for xattr in list_xattrs:
if xattr in out:
flag = False
- self.assertTrue(flag, "important xattrs are being compromised"
+ self.assertTrue(flag, "Important xattrs are being compromised"
" using the symlink at the mount point")
- g.log.info("verified: mount point doesn't display important "
+ g.log.info("Verified: mount point doesn't display important "
"xattrs using the symlink")
- g.log.info("verifying that mount point shows path info xattr for the"
- " test_dir and sym link and is same for both")
path_info_1 = get_pathinfo(self.mounts[0].client_system,
- m_point + '/' + test_dir_path)
+ fqpath_for_test_dir)
path_info_2 = get_pathinfo(self.mounts[0].client_system,
sym_link_path)
if path_info_1 == path_info_2:
flag = True
- self.assertTrue(flag, "pathinfos for test_dir and its sym link "
+ self.assertTrue(flag, "Pathinfos for test_dir and its sym link "
"are not same")
- g.log.info("pathinfos for test_dir and its sym link are same")
+ g.log.info("Pathinfos for test_dir and its sym link are same")
- g.log.info("verifying readlink on sym link at mount point returns "
- "the name of the directory")
command = 'readlink ' + sym_link_path
ret, out, _ = g.run(self.mounts[0].client_system, command)
self.assertEqual(ret, 0, "readlink command returned an error")
flag = False
- if out.rstrip() == m_point + '/' + test_dir_path:
+ if out.rstrip() == fqpath_for_test_dir:
flag = True
self.assertTrue(flag, "readlink did not return the path of the "
"test_dir")
diff --git a/tests/functional/dht/test_dht_custom_xattr.py b/tests/functional/dht/test_dht_custom_xattr.py
index a0a17958b..fa2ad8cdb 100644
--- a/tests/functional/dht/test_dht_custom_xattr.py
+++ b/tests/functional/dht/test_dht_custom_xattr.py
@@ -44,7 +44,8 @@ class TestDhtCustomXattrClass(GlusterBaseClass):
for mount_object in self.mounts:
for fname in self.files_and_soft_links:
attr_val = get_fattr(mount_object.client_system,
- fname, 'user.foo')
+ fname, 'user.foo',
+ encode='text')
self.assertEqual(attr_val, xattr_val,
"Custom xattr not found from mount.")
g.log.info("Custom xattr found on mount point.")
@@ -59,7 +60,7 @@ class TestDhtCustomXattrClass(GlusterBaseClass):
for fname in files:
attr_val = get_fattr(node,
"{}/{}".format(brick_path, fname),
- 'user.foo')
+ 'user.foo', encode='text')
self.assertEqual(attr_val, xattr_val,
"Custom xattr not visible on bricks")
g.log.info("Custom xattr found on bricks.")
@@ -104,7 +105,7 @@ class TestDhtCustomXattrClass(GlusterBaseClass):
for mount_object in self.mounts:
for fname in list_of_all_files:
ret = get_fattr(mount_object.client_system,
- fname, 'user.foo')
+ fname, 'user.foo', encode='text')
self.assertIsNone(ret,
"Custom attribute visible at mount "
"point even after deletion")
@@ -118,7 +119,7 @@ class TestDhtCustomXattrClass(GlusterBaseClass):
if fname.split('/')[3] in files_on_bricks]
for fname in files:
ret = get_fattr(node, "{}/{}".format(brick_path, fname),
- 'user.foo')
+ 'user.foo', encode='text')
self.assertIsNone(ret,
"Custom attribute visible on "
"brick even after deletion")
@@ -216,13 +217,13 @@ class TestDhtCustomXattrClass(GlusterBaseClass):
self.set_xattr_user_foo(self.list_of_files, 'bar2')
# Check if custom xattr is set to all the regular files
- self.check_custom_xattr_visible('bar2')
+ self.check_custom_xattr_visible("bar2")
# Change the custom xattr on all the regular files
self.set_xattr_user_foo(self.list_of_files, 'ABC')
# Check if xattr is set to all the regular files
- self.check_custom_xattr_visible('ABC')
+ self.check_custom_xattr_visible("ABC")
# Delete Custom xattr from all regular files
self.delete_xattr_user_foo(self.list_of_files)
@@ -238,13 +239,13 @@ class TestDhtCustomXattrClass(GlusterBaseClass):
self.set_xattr_user_foo(list_of_softlinks, 'bar2')
# Check if custom xattr is set to all the regular files
- self.check_custom_xattr_visible('bar2')
+ self.check_custom_xattr_visible("bar2")
# Change the custom xattr on all the regular files
self.set_xattr_user_foo(list_of_softlinks, 'ABC')
# Check if xattr is set to all the regular files
- self.check_custom_xattr_visible('ABC')
+ self.check_custom_xattr_visible("ABC")
# Delete Custom xattr from all regular files
self.delete_xattr_user_foo(list_of_softlinks)
diff --git a/tests/functional/dht/test_dht_file_rename_when_dest_is_hashed_or_cached_to_diff_subvol_combinations.py b/tests/functional/dht/test_dht_file_rename_when_dest_is_hashed_or_cached_to_diff_subvol_combinations.py
new file mode 100644
index 000000000..884b55a2a
--- /dev/null
+++ b/tests/functional/dht/test_dht_file_rename_when_dest_is_hashed_or_cached_to_diff_subvol_combinations.py
@@ -0,0 +1,919 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re
+from glusto.core import Glusto as g
+from glustolibs.gluster.glusterfile import get_file_stat
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.dht_test_utils import (find_hashed_subvol,
+ create_brickobjectlist,
+ find_new_hashed,
+ find_specific_hashed)
+from glustolibs.gluster.volume_libs import get_subvols, parse_vol_file
+from glustolibs.gluster.glusterfile import (move_file,
+ is_linkto_file,
+ get_dht_linkto_xattr)
+
+
+@runs_on([['distributed-arbiter', 'distributed',
+ 'distributed-replicated',
+ 'distributed-dispersed'],
+ ['glusterfs']])
+class DhtFileRenameWithDestFile(GlusterBaseClass):
+
+ def setUp(self):
+ """
+ Setup Volume and Mount Volume
+ """
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Change the dist count to 4 in case of 'distributed-replicated' ,
+ # 'distributed-dispersed' and 'distributed-arbiter'
+ if self.volume_type in ("distributed-replicated",
+ "distributed-dispersed",
+ "distributed-arbiter"):
+ self.volume['voltype']['dist_count'] = 4
+
+ # Setup Volume and Mount Volume
+ ret = self.setup_volume_and_mount_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+
+ self.mount_point = self.mounts[0].mountpoint
+
+ self.subvols = (get_subvols(
+ self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(self.subvols, "failed to get subvols")
+
+ def tearDown(self):
+ """
+ Unmount Volume and Cleanup Volume
+ """
+ # Unmount Volume and Cleanup Volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unmount Volume and Cleanup Volume: Fail")
+ g.log.info("Unmount Volume and Cleanup Volume: Success")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _create_file_and_get_hashed_subvol(self, file_name):
+ """ Creates a file and return its hashed subvol
+
+ Args:
+ file_name(str): name of the file to be created
+ Returns:
+ hashed_subvol object: An object of type BrickDir type
+ representing the hashed subvolume
+
+ subvol_count: The subvol index in the subvol list
+
+ source_file: Path to the file created
+
+ """
+ # pylint: disable=unsubscriptable-object
+
+ # Create Source File
+ source_file = "{}/{}".format(self.mount_point, file_name)
+ ret, _, err = g.run(self.mounts[0].client_system,
+ ("touch %s" % source_file))
+ self.assertEqual(ret, 0, ("Failed to create {} : err {}"
+ .format(source_file, err)))
+ g.log.info("Successfully created the source file")
+
+ # Find the hashed subvol for source file
+ source_hashed_subvol, count = find_hashed_subvol(self.subvols,
+ "/",
+ file_name)
+ self.assertIsNotNone(source_hashed_subvol,
+ "Couldn't find hashed subvol for the source file")
+ return source_hashed_subvol, count, source_file
+
+ @staticmethod
+ def _verify_link_file_exists(brickdir, file_name):
+ """ Verifies whether a file link is present in given subvol
+ Args:
+ brickdir(Class Object): BrickDir object containing data about
+ bricks under a specific subvol
+ Returns:
+ (bool): True if link file exists else false
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+ file_path = brickdir._fqpath + file_name
+ file_stat = get_file_stat(brickdir._host, file_path)
+ if file_stat is None:
+ g.log.error("Failed to get File stat for %s", file_path)
+ return False
+ if not file_stat['access'] == "1000":
+ g.log.error("Access value not 1000 for %s", file_path)
+ return False
+
+ # Check for file type to be'sticky empty', have size of 0 and
+ # have the glusterfs.dht.linkto xattr set.
+ ret = is_linkto_file(brickdir._host, file_path)
+ if not ret:
+ g.log.error("%s is not a linkto file", file_path)
+ return False
+ return True
+
+ @staticmethod
+ def _verify_file_exists(brick_dir, file_name):
+ """ Verifies whether a file is present in given subvol or not
+ Args:
+ brick_dir(Class Object): BrickDir object containing data about
+ bricks under a specific subvol
+ file_name(str): Name of the file to be searched
+ Returns:
+ (bool): True if link file exists else false
+ """
+ # pylint: disable=protected-access
+
+ cmd = "[ -f {} ]".format(brick_dir._fqpath + (str(file_name)))
+ ret, _, _ = g.run(brick_dir._host, cmd)
+ if ret:
+ return False
+ return True
+
+ @staticmethod
+ def _get_remote_subvolume(vol_file_data, brick_name):
+ """ Verifies whether a file is present in given subvol or not
+ Args:
+ vol_file_data(dict): Dictionary containing data of .vol file
+ brick_name(str): Brick path
+ Returns:
+ (str): Remote subvol name
+ (None): If error occurred
+ """
+ try:
+ brick_name = re.search(r'[a-z0-9\-\_]*', brick_name).group()
+ remote_subvol = (vol_file_data[
+ brick_name]['option']['remote-subvolume'])
+ except KeyError:
+ return None
+ return remote_subvol
+
+ def _verify_file_links_to_specified_destination(self, host, file_path,
+ dest_file):
+ """ Verifies whether a file link points to the specified destination
+ Args:
+ host(str): Host at which commands are to be executed
+ file_path(str): path to the link file
+ dest_file(str): path to the dest file to be pointed at
+ Returns:
+ (bool) : Based on whether the given file points to dest or not
+ """
+ link_to_xattr = get_dht_linkto_xattr(host, file_path)
+ # Remove unexpected chars in the value, if any
+ link_to_xattr = re.search(r'[a-z0-9\-\_]*', link_to_xattr).group()
+ if link_to_xattr is None:
+ g.log.error("Failed to get trusted.glusterfs.dht.linkto")
+ return False
+
+ # Get the remote-subvolume for the corresponding linkto xattr
+ path = ("/var/lib/glusterd/vols/{}/{}.tcp-fuse.vol"
+ .format(self.volname, self.volname))
+ vol_data = parse_vol_file(self.mnode, path)
+ if not vol_data:
+ g.log.error("Failed to parse the file %s", path)
+ return False
+
+ remote_subvol = self._get_remote_subvolume(vol_data, link_to_xattr)
+ if remote_subvol is None:
+ # In case, failed to find the remote subvol, get all the
+ # subvolumes and then check whether the file is present in
+ # any of those sunbol
+ subvolumes = vol_data[link_to_xattr]['subvolumes']
+ for subvol in subvolumes:
+ remote_subvol = self._get_remote_subvolume(vol_data,
+ subvol)
+ if remote_subvol:
+ subvol = re.search(r'[a-z0-9\-\_]*', subvol).group()
+ remote_host = (
+ vol_data[subvol]['option']['remote-host'])
+ # Verify the new file is in the remote-subvol identified
+ cmd = "[ -f {}/{} ]".format(remote_subvol, dest_file)
+ ret, _, _ = g.run(remote_host, cmd)
+ if not ret:
+ return True
+ g.log.error("The given link file doesn't point to any of "
+ "the subvolumes")
+ return False
+ else:
+ remote_host = vol_data[link_to_xattr]['option']['remote-host']
+ # Verify the new file is in the remote-subvol identified
+ cmd = "[ -f {}/{} ]".format(remote_subvol, dest_file)
+ ret, _, _ = g.run(remote_host, cmd)
+ if not ret:
+ return True
+ return False
+
+ def test_file_rename_when_dest_doesnt_hash_src_cached_or_hashed(self):
+ """
+ - Destination file should exist
+ - Source file is hashed on sub volume(s1) and cached on
+ another subvolume(s2)
+ - Destination file should be hased to subvolume(s3) other
+ than above two subvolumes
+ - Destination file hased on subvolume(s3) but destination file
+ should be cached on same subvolume(s2) where source file is stored
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Destination file hashed on subvolume and should link
+ to new destination file
+ - source link file should be removed
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=too-many-locals
+
+ # Create source file and Get hashed subvol (s2)
+ src_subvol, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Find a new file name for destination file, which hashes
+ # to another subvol (s1)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed,
+ "couldn't find new hashed for destination file")
+
+ # Rename the source file to the new file name
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest_file)))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Identify a file name for dest to get stored in S2
+ dest_cached_subvol = find_specific_hashed(self.subvols,
+ "/",
+ src_subvol)
+ # Create the file with identified name
+ _, _, dst_file = (
+ self._create_file_and_get_hashed_subvol(
+ str(dest_cached_subvol.newname)))
+ # Verify its in S2 itself
+ self.assertEqual(dest_cached_subvol.subvol_count, src_count,
+ ("The subvol found for destination is not same as "
+ "that of the source file cached subvol"))
+
+ # Find a subvol (s3) for dest file to linkto, other than S1 and S2
+ brickobject = create_brickobjectlist(self.subvols, "/")
+ self.assertIsNotNone(brickobject, "Failed to get brick object list")
+ br_count = -1
+ subvol_new = None
+ for brickdir in brickobject:
+ br_count += 1
+ if br_count not in (src_count, new_hashed.subvol_count):
+ subvol_new = brickdir
+ break
+
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ subvol_new)
+ self.assertIsNotNone(new_hashed2,
+ "could not find new hashed for dstfile")
+
+ # Verify the subvol is not same as S1(src_count) and S2(dest_count)
+ self.assertNotEqual(new_hashed2.subvol_count, src_count,
+ ("The subvol found for destination is same as that"
+ " of the source file cached subvol"))
+ self.assertNotEqual(new_hashed2.subvol_count, new_hashed.subvol_count,
+ ("The subvol found for destination is same as that"
+ " of the source file hashed subvol"))
+
+ # Rename the dest file to the new file name
+ dst_file_ln = "{}/{}".format(self.mount_point,
+ str(new_hashed2.newname))
+ ret = move_file(self.mounts[0].client_system, dst_file, dst_file_ln)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(dst_file, dst_file_ln)))
+
+ # Verify the Dest link file is stored on hashed sub volume(s3)
+ dest_link_subvol = new_hashed2.hashedbrickobject
+ ret = self._verify_link_file_exists(dest_link_subvol,
+ str(new_hashed2.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(dest_link_subvol._fqpath,
+ str(new_hashed2.newname))))
+
+ # Move/Rename Source File to Dest
+ src_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, src_file, dst_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(src_file, dst_file)))
+
+ # Verify Source file is removed
+ ret = self._verify_file_exists(src_subvol, "test_source_file")
+ self.assertFalse(ret, "The source file is still present in {}"
+ .format(src_subvol._fqpath))
+
+ # Verify Source link is removed
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, "The source link file is still present in {}"
+ .format(src_link_subvol._fqpath))
+
+ # Verify the Destination link is on hashed subvolume
+ ret = self._verify_link_file_exists(dest_link_subvol,
+ str(new_hashed2.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(dest_link_subvol._fqpath,
+ str(new_hashed2.newname))))
+
+ # Verify the dest link file points to new destination file
+ file_path = dest_link_subvol._fqpath + str(new_hashed2.newname)
+ ret = (self._verify_file_links_to_specified_destination(
+ dest_link_subvol._host, file_path,
+ str(dest_cached_subvol.newname)))
+ self.assertTrue(ret, "The dest link file not pointing towards "
+ "the desired file")
+ g.log.info("The Destination link file is pointing to new file"
+ " as expected")
+
+ def test_file_rename_when_dest_hash_src_cached(self):
+ """
+ - Destination file should exist
+ - Source file hashed sub volume(s1) and cached on another subvolume(s2)
+ - Destination file should be hased to subvolume where source file is
+ stored(s2)
+ - Destination file hased on subvolume(s2) but should be cached on
+ some other subvolume(s3) than this two subvolume
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Destination link file should be removed
+ - source link file should be removed
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=too-many-locals
+
+ # Create source file and Get hashed subvol (s2)
+ src_subvol, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Find a new file name for destination file, which hashes
+ # to another subvol (s2)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed,
+ "couldn't find new hashed for destination file")
+
+ # Rename the source file to the new file name
+ src_hashed = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, src_hashed)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, src_hashed)))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Find a subvol (s3) for dest file to linkto, other than S1 and S2
+ brickobject = create_brickobjectlist(self.subvols, "/")
+ self.assertIsNotNone(brickobject, "Failed to get brick object list")
+ br_count = -1
+ subvol_new = None
+ for brickdir in brickobject:
+ br_count += 1
+ if br_count not in (src_count, new_hashed.subvol_count):
+ subvol_new = brickdir
+ break
+
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ subvol_new)
+ self.assertIsNotNone(new_hashed2,
+ "could not find new hashed for dstfile")
+
+ # Create a file in the subvol S3
+ dest_subvol, count, dest_file = (
+ self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))
+
+ # Verify the subvol is not same as S1 and S2
+ self.assertNotEqual(count, src_count,
+ ("The subvol found for destination is same as that"
+ " of the source file cached subvol"))
+ self.assertNotEqual(count, new_hashed.subvol_count,
+ ("The subvol found for destination is same as that"
+ " of the source file hashed subvol"))
+
+ # Find a file name that hashes to S2
+ dest_hashed = find_specific_hashed(self.subvols,
+ "/",
+ src_subvol)
+ self.assertIsNotNone(dest_hashed,
+ "could not find new hashed for dstfile")
+
+ # Rename destination to hash to S2 and verify
+ dest = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, dest_file, dest)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(dest_file, dest)))
+
+ # Rename Source File to Dest
+ ret = move_file(self.mounts[0].client_system, src_hashed, dest)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(src_hashed, dest)))
+
+ # Verify Destination File is removed
+ ret = self._verify_file_exists(new_hashed2.hashedbrickobject,
+ str(new_hashed2.newname))
+ self.assertFalse(ret, "The Destination file is still present in {}"
+ .format(dest_subvol._fqpath))
+
+ # Verify Source link is removed
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, "The source link file is still present in {}"
+ .format(src_link_subvol._fqpath))
+
+ # Verify Destination Link is removed
+ ret = self._verify_link_file_exists(dest_hashed.hashedbrickobject,
+ str(dest_hashed.newname))
+ self.assertFalse(ret, "The Dest link file is still present in {}"
+ .format(dest_hashed.hashedbrickobject._fqpath))
+
+ def test_file_rename_when_src_linked_and_dest_hash_other(self):
+ """
+ - Destination file should exist
+ - Source link file hashed on sub volume(s1) and cached on another
+ subvolume(s2)
+ - Destination file should be hased to some other
+ subvolume(s3)(neither s1 nor s2)
+ - Destination file hased on subvolume(s3) but cached on
+ subvolume(s1) where source file is hashed
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Destination link file should be there on hashed subvolume
+ and should link to new destination file
+ - source link file should be removed
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=too-many-locals
+
+ # Create source file and Get hashed subvol (s2)
+ _, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Find a new file name for destination file, which hashes
+ # to another subvol (s2)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed,
+ "couldn't find new hashed for destination file")
+
+ # Rename the source file to the new file name
+ src_hashed = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, src_hashed)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, src_hashed)))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Find a file name that hashes to S1
+ dest_hashed = find_specific_hashed(self.subvols,
+ "/",
+ new_hashed.hashedbrickobject,
+ new_hashed.newname)
+ self.assertIsNotNone(dest_hashed,
+ "could not find new hashed for dstfile")
+
+ # Create a file in the subvol S1
+ dest_subvol, count, _ = self._create_file_and_get_hashed_subvol(
+ str(dest_hashed.newname))
+
+ # Verify the subvol is S1
+ self.assertEqual(count, new_hashed.subvol_count,
+ ("The subvol found for destination is not same as"
+ " that of the source file hashed subvol"))
+
+ # Find a subvol (s3) for dest file to linkto, other than S1 and S2
+ brickobject = create_brickobjectlist(self.subvols, "/")
+ self.assertIsNotNone(brickobject, "Failed to get brick object list")
+ br_count = -1
+ subvol_new = None
+ for brickdir in brickobject:
+ br_count += 1
+ if br_count not in (src_count, new_hashed.subvol_count):
+ subvol_new = brickdir
+ break
+
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ subvol_new)
+ self.assertIsNotNone(new_hashed2,
+ "could not find new hashed for dstfile")
+
+ # Rename destination to hash to S3 and verify
+ dest_src = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
+ dest = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
+ ret = move_file(self.mounts[0].client_system, dest_src, dest)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(dest_src, dest)))
+
+ # Rename Source File to Dest
+ ret = move_file(self.mounts[0].client_system, src_hashed, dest)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(src_hashed, dest)))
+
+ # Verify Destination File is removed
+ ret = self._verify_file_exists(dest_hashed.hashedbrickobject,
+ str(dest_hashed.newname))
+ self.assertFalse(ret, "The Destination file is still present in {}"
+ .format(dest_subvol._fqpath))
+
+ # Verify Source link is removed
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, "The source link file is still present in {}"
+ .format(src_link_subvol._fqpath))
+
+ # Verify Destination Link is present and points to new dest file
+ ret = self._verify_link_file_exists(new_hashed2.hashedbrickobject,
+ str(new_hashed2.newname))
+ self.assertTrue(ret, "The Dest link file is not present in {}"
+ .format(new_hashed2.hashedbrickobject._fqpath))
+
+ file_path = new_hashed2.hashedbrickobject._fqpath + str(
+ new_hashed2.newname)
+ ret = (self._verify_file_links_to_specified_destination(
+ new_hashed2.hashedbrickobject._host, file_path,
+ str(new_hashed2.newname)))
+ self.assertTrue(ret, "The dest link file not pointing towards "
+ "the desired file")
+ g.log.info("The Destination link file is pointing to new file"
+ " as expected")
+
+ def test_file_rename_when_dest_hash_src_cached_but_hash_other(self):
+ """
+ - Destination file should exist
+ - Source file hashed on sub volume(s1) and cached
+ on another subvolume(s2)
+ - Destination file should be hased to same subvolume(s1)
+ where source file is hashed
+ - Destination hased on subvolume(s1) but cached on some other
+ subvolume(s3)(neither s1 nor s2)
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Destination link file should be there on hashed subvolume
+ and should link to new destination file
+ - source link file should be removed
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=too-many-locals
+
+ # Create source file and Get hashed subvol (s2)
+ _, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Find a new file name for destination file, which hashes
+ # to another subvol (s2)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed,
+ "couldn't find new hashed for destination file")
+
+ # Rename the source file to the new file name
+ src_hashed = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, src_hashed)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, src_hashed)))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Destination file cached on S3.
+ # Find a subvol (s3) for dest file to linkto, other than S1 and S2
+ brickobject = create_brickobjectlist(self.subvols, "/")
+ self.assertIsNotNone(brickobject, "Failed to get brick object list")
+ br_count = -1
+ subvol_new = None
+ for brickdir in brickobject:
+ br_count += 1
+ if br_count not in (src_count, new_hashed.subvol_count):
+ subvol_new = brickdir
+ break
+
+ dest_cached = find_specific_hashed(self.subvols,
+ "/",
+ subvol_new)
+ self.assertIsNotNone(dest_cached,
+ "could not find new hashed for dstfile")
+
+ # Create a file in S3
+ _, count, dest_src = self._create_file_and_get_hashed_subvol(
+ str(dest_cached.newname))
+
+ # Verify the subvol is not S2 and S1
+ self.assertNotEqual(count, new_hashed.subvol_count,
+ ("The subvol found for destination is same as "
+ "that of the source file hashed subvol"))
+ self.assertNotEqual(count, src_count,
+ ("The subvol found for destination is same as "
+ "that of the source file cached subvol"))
+
+ # Rename Destination file such that it hashes to S1
+ dest_hashed = find_specific_hashed(self.subvols,
+ "/",
+ new_hashed.hashedbrickobject,
+ new_hashed.newname)
+ # Verify its S1
+ self.assertEqual(dest_hashed.subvol_count, new_hashed.subvol_count,
+ ("The subvol found for destination is not same as "
+ "that of the source file hashed subvol"))
+
+ # Move dest to new name
+ dest = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, dest_src, dest)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(dest_src, dest)))
+
+ # Move Source file to Dest
+ src = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, src, dest)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(src, dest)))
+
+ # Verify Destination File is removed
+ ret = self._verify_file_exists(dest_cached.hashedbrickobject,
+ str(dest_cached.newname))
+ self.assertFalse(ret, "The Dest file is still present in {}"
+ .format(dest_cached.hashedbrickobject._fqpath))
+
+ # Verify Source link is removed
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, "The source link file is still present in {}"
+ .format(src_link_subvol._fqpath))
+
+ # Verify Destination Link is present and points to new dest file
+ ret = self._verify_link_file_exists(dest_hashed.hashedbrickobject,
+ str(dest_hashed.newname))
+ self.assertTrue(ret, "The Dest link file is not present in {}"
+ .format(dest_hashed.hashedbrickobject._fqpath))
+
+ file_path = dest_hashed.hashedbrickobject._fqpath + str(
+ dest_hashed.newname)
+ ret = (self._verify_file_links_to_specified_destination(
+ dest_hashed.hashedbrickobject._host, file_path,
+ str(dest_hashed.newname)))
+ self.assertTrue(ret, "The dest link file not pointing towards "
+ "the desired file")
+ g.log.info("The Destination link file is pointing to new file"
+ " as expected")
+
+ def test_file_rename_when_dest_neither_hash_cache_to_src_subvols(self):
+ """
+ - Destination file should exist
+ - Source file hashed on sub volume(s1) and cached on
+ another subvolume(s2)
+ - Destination file should be hased to some other subvolume(s3)
+ (neither s1 nor s2)
+ - Destination file hased on subvolume(s3) but cached on
+ remaining subvolume(s4)
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Destination link file should be there on hashed subvolume
+ and should link to new destination file
+ - source link file should be removed
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=too-many-locals
+
+ # Create source file and Get hashed subvol (s2)
+ _, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Find a new file name for destination file, which hashes
+ # to another subvol (s2)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed,
+ "couldn't find new hashed for destination file")
+
+ # Rename the source file to the new file name
+ src_hashed = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, src_hashed)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, src_hashed)))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Destination file cached on S4.
+ # Find a subvol (s4) for dest file to linkto, other than S1 and S2
+ brickobject = create_brickobjectlist(self.subvols, "/")
+ self.assertIsNotNone(brickobject, "Failed to get brick object list")
+ br_count = -1
+ subvol_new = None
+ for brickdir in brickobject:
+ br_count += 1
+ if br_count not in (src_count, new_hashed.subvol_count):
+ subvol_new = brickdir
+ break
+
+ dest_cached = find_specific_hashed(self.subvols,
+ "/",
+ subvol_new)
+ self.assertIsNotNone(dest_cached,
+ "could not find new hashed for dstfile")
+ # Create a file in S3
+ _, _, dest_src = self._create_file_and_get_hashed_subvol(
+ str(dest_cached.newname))
+
+ # Verify the subvol is not S2 and S1
+ self.assertNotEqual(dest_cached.subvol_count, new_hashed.subvol_count,
+ ("The subvol found for destination is same as "
+ "that of the source file hashed subvol"))
+ self.assertNotEqual(dest_cached.subvol_count, src_count,
+ ("The subvol found for destination is same as "
+ "that of the source file cached subvol"))
+
+ # Identify a name for dest that hashes to another subvol S3
+ # Find a subvol (s3) for dest file to linkto, other than S1 and S2 and
+ # S4
+ brickobject = create_brickobjectlist(self.subvols, "/")
+ self.assertIsNotNone(brickobject, "Failed to get brick object list")
+ br_count = -1
+ subvol_new = None
+ for brickdir in brickobject:
+ br_count += 1
+ if br_count not in (src_count, new_hashed.subvol_count,
+ dest_cached.subvol_count):
+ subvol_new = brickdir
+ break
+
+ dest_hashed = find_specific_hashed(self.subvols,
+ "/",
+ subvol_new)
+
+ # Move dest to new name
+ dest = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, dest_src, dest)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(dest_src, dest)))
+
+ # Move Source file to Dest
+ src = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, src, dest)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(src, dest)))
+
+ # Verify Destination File is removed
+ ret = self._verify_file_exists(dest_cached.hashedbrickobject,
+ str(dest_cached.newname))
+ self.assertFalse(ret, "The Source file is still present in {}"
+ .format(dest_cached.hashedbrickobject._fqpath))
+
+ # Verify Source link is removed
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, "The source link file is still present in {}"
+ .format(src_link_subvol._fqpath))
+
+ # Verify Destination Link is present and points to new dest file
+ ret = self._verify_link_file_exists(dest_hashed.hashedbrickobject,
+ str(dest_hashed.newname))
+ self.assertTrue(ret, "The Dest link file is not present in {}"
+ .format(dest_hashed.hashedbrickobject._fqpath))
+
+ file_path = dest_hashed.hashedbrickobject._fqpath + str(
+ dest_hashed.newname)
+ ret = (self._verify_file_links_to_specified_destination(
+ dest_hashed.hashedbrickobject._host, file_path,
+ str(dest_hashed.newname)))
+ self.assertTrue(ret, "The dest link file not pointing towards "
+ "the desired file")
+ g.log.info("The Destination link file is pointing to new file"
+ " as expected")
+
+ def test_file_rename_when_dest_hash_src_hashed_but_cache_diff(self):
+ """
+ - Destination file should exist
+ - Source file is stored on hashed subvolume it self
+ - Destination file should be hased to some other subvolume(s2)
+ - Destination file hased on subvolume(s2) but cached on some other
+ subvolume(s3)(neither s1 nor s2)
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Destination link file should be there on hashed subvolume and
+ should link to new destination file
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=too-many-locals
+
+ # Create source file and Get hashed subvol (s1)
+ _, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Find a new file name for destination to hash to some subvol S3
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed,
+ "couldn't find new hashed for destination file")
+
+ # Create Dest file in S3
+ dest_cached, dest_count, dest_file = (
+ self._create_file_and_get_hashed_subvol(str(new_hashed.newname)))
+
+ # Verify S1 and S3 are not same
+ self.assertNotEqual(src_count, dest_count,
+ ("The destination file is cached to the source "
+ "cached subvol"))
+
+ # Find new name for dest file, that it hashes to some other subvol S2
+ brickobject = create_brickobjectlist(self.subvols, "/")
+ self.assertIsNotNone(brickobject, "Failed to get brick object list")
+ br_count = -1
+ subvol_new = None
+ for brickdir in brickobject:
+ br_count += 1
+ if br_count not in (src_count, dest_count):
+ subvol_new = brickdir
+ break
+
+ dest_hashed = find_specific_hashed(self.subvols,
+ "/",
+ subvol_new)
+ # Move dest to new name
+ dest = "{}/{}".format(self.mount_point, str(dest_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, dest_file, dest)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(dest_file, dest)))
+
+ # Move Source file to Dest
+ ret = move_file(self.mounts[0].client_system, source_file, dest)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest)))
+
+ # Verify Destination File is removed
+ ret = self._verify_file_exists(dest_cached,
+ str(new_hashed.newname))
+ self.assertFalse(ret, "The Source file is still present in {}"
+ .format(dest_cached._fqpath))
+
+ # Verify Destination Link is present and points to new dest file
+ ret = self._verify_link_file_exists(dest_hashed.hashedbrickobject,
+ str(dest_hashed.newname))
+ self.assertTrue(ret, "The Dest link file is not present in {}"
+ .format(dest_hashed.hashedbrickobject._fqpath))
+
+ file_path = dest_hashed.hashedbrickobject._fqpath + str(
+ dest_hashed.newname)
+ ret = (self._verify_file_links_to_specified_destination(
+ dest_hashed.hashedbrickobject._host, file_path,
+ str(dest_hashed.newname)))
+ self.assertTrue(ret, "The dest link file not pointing towards "
+ "the desired file")
+ g.log.info("The Destination link file is pointing to new file"
+ " as expected")
diff --git a/tests/functional/dht/test_dht_file_rename_when_destination_file_exists.py b/tests/functional/dht/test_dht_file_rename_when_destination_file_exists.py
new file mode 100644
index 000000000..9673710e0
--- /dev/null
+++ b/tests/functional/dht/test_dht_file_rename_when_destination_file_exists.py
@@ -0,0 +1,540 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.glusterfile import get_file_stat
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.dht_test_utils import (find_hashed_subvol,
+ create_brickobjectlist,
+ find_new_hashed,
+ find_specific_hashed)
+from glustolibs.gluster.volume_libs import get_subvols
+from glustolibs.gluster.glusterfile import move_file, is_linkto_file
+
+
+@runs_on([['distributed', 'distributed-replicated',
+ 'distributed-dispersed', 'distributed-arbiter'],
+ ['glusterfs']])
+class DhtFileRenameWithDestFile(GlusterBaseClass):
+
+ def setUp(self):
+ """
+ Setup Volume and Mount Volume
+ """
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Change the dist count to 4 in case of 'distributed-replicated' ,
+ # 'distributed-dispersed' and 'distributed-arbiter'
+ if self.volume_type in ("distributed-replicated",
+ "distributed-dispersed",
+ "distributed-arbiter"):
+ self.volume['voltype']['dist_count'] = 4
+
+ # Setup Volume and Mount Volume
+ ret = self.setup_volume_and_mount_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+
+ self.mount_point = self.mounts[0].mountpoint
+
+ self.subvols = (get_subvols(
+ self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(self.subvols, "failed to get subvols")
+
+ def tearDown(self):
+ """
+ Unmount Volume and Cleanup Volume
+ """
+ # Unmount Volume and Cleanup Volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unmount Volume and Cleanup Volume: Fail")
+ g.log.info("Unmount Volume and Cleanup Volume: Success")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _create_file_and_get_hashed_subvol(self, file_name):
+ """ Creates a file and return its hashed subvol
+
+ Args:
+ file_name(str): name of the file to be created
+ Returns:
+ hashed_subvol object: An object of type BrickDir type
+ representing the hashed subvolume
+
+ subvol_count: The subvol index in the subvol list
+
+ source_file: Path to the file created
+
+ """
+ # pylint: disable=unsubscriptable-object
+
+ # Create Source File
+ source_file = "{}/{}".format(self.mount_point, file_name)
+ ret, _, err = g.run(self.mounts[0].client_system,
+ ("touch %s" % source_file))
+ self.assertEqual(ret, 0, ("Failed to create {} : err {}"
+ .format(source_file, err)))
+ g.log.info("Successfully created the source file")
+
+ # Find the hashed subvol for source file
+ source_hashed_subvol, count = find_hashed_subvol(self.subvols,
+ "/",
+ file_name)
+ self.assertIsNotNone(source_hashed_subvol,
+ "Couldn't find hashed subvol for the source file")
+ return source_hashed_subvol, count, source_file
+
+ @staticmethod
+ def _verify_link_file_exists(brickdir, file_name):
+ """ Verifies whether a file link is present in given subvol
+ Args:
+ brickdir(Class Object): BrickDir object containing data about
+ bricks under a specific subvol
+ Returns:
+ True/False(bool): Based on existance of file link
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+ file_path = brickdir._fqpath + file_name
+ file_stat = get_file_stat(brickdir._host, file_path)
+ if file_stat is None:
+ g.log.error("Failed to get File stat for %s", file_path)
+ return False
+ if not file_stat['access'] == "1000":
+ g.log.error("Access value not 1000 for %s", file_path)
+ return False
+
+ # Check for file type to be'sticky empty', have size of 0 and
+ # have the glusterfs.dht.linkto xattr set.
+ ret = is_linkto_file(brickdir._host, file_path)
+ if not ret:
+ g.log.error("%s is not a linkto file", file_path)
+ return False
+ return True
+
+ @staticmethod
+ def _verify_file_exists(brick_dir, file_name):
+ """ Verifies whether a file is present in given subvol or not
+ Args:
+ brickdir(Class Object): BrickDir object containing data about
+ bricks under a specific subvol
+ file_name(str): Name of the file to be searched
+ Returns:
+ True/False(bool): Based on existance of file
+ """
+ # pylint: disable=protected-access
+
+ cmd = "[ -f {} ]".format(brick_dir._fqpath + (str(file_name)))
+ ret, _, _ = g.run(brick_dir._host, cmd)
+ if ret != 0:
+ return False
+ return True
+
+ def test_dht_file_rename_dest_exists_src_and_dest_hash_diff(self):
+ """
+ case 6 :
+ - Destination file should exist
+ - Source file is stored on hashed subvolume(s1) it self
+ - Destination file should be hashed to some other subvolume(s2)
+ - Destination file is stored on hashed subvolume
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Destination hashed file should be created on its hashed
+ subvolume(s2)
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+
+ # Create source file and Get hashed subvol (s1)
+ _, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Find a new file name for destination file, which hashes
+ # to another subvol (s2)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed,
+ "could'nt find new hashed for destination file")
+
+ # create destination_file and get its hashed subvol (s2)
+ dest_hashed_subvol, dest_count, dest_file = (
+ self._create_file_and_get_hashed_subvol(str(new_hashed.newname)))
+
+ # Verify the subvols are not same for source and destination files
+ self.assertNotEqual(src_count,
+ dest_count,
+ "The subvols for src and dest are same.")
+
+ # Rename the source file to the destination file
+ ret = move_file(self.mounts[0].client_system, source_file, dest_file)
+ self.assertTrue(ret, "Failed to move files {} and {}".format(
+ source_file, dest_file))
+
+ # Verify destination file is removed
+ ret = self._verify_file_exists(dest_hashed_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("Destination file : {} is not removed in subvol"
+ " : {}".format(str(new_hashed.newname),
+ dest_hashed_subvol._fqpath)))
+ g.log.info("The destination file is removed as expected")
+
+ # Verify the Destination link is found in new subvol (s2)
+ ret = self._verify_link_file_exists(dest_hashed_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The New hashed volume {} doesn't have the "
+ "expected linkto file {}"
+ .format(str(new_hashed.newname),
+ dest_hashed_subvol._fqpath)))
+ g.log.info("New hashed volume has the expected linkto file")
+
+ def test_dht_file_rename_dest_exists_src_and_dest_hash_same(self):
+ """
+ Case 7:
+ - Destination file should exist
+ - Source file is stored on hashed subvolume(s1) it self
+ - Destination file should be hashed to same subvolume(s1)
+ - Destination file is stored on hashed subvolume
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed to destination file
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+
+ # Create soruce file and Get hashed subvol (s1)
+ source_hashed_subvol, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Find a new file name for destination file that hashes
+ # to same subvol (s1)
+ new_hashed = find_specific_hashed(self.subvols,
+ "/",
+ source_hashed_subvol)
+ self.assertIsNotNone(new_hashed, "Couldn't find a new hashed subvol "
+ "for destination file")
+
+ # Create destination_file and get its hashed subvol (should be s1)
+ dest_hashed_subvol, dest_count, dest_file = (
+ self._create_file_and_get_hashed_subvol(str(new_hashed.newname)))
+
+ # Verify the subvols are not same for source and destination files
+ self.assertEqual(src_count, dest_count,
+ "The subvols for src and dest are not same.")
+
+ # Rename the source file to the destination file
+ ret = move_file(self.mounts[0].client_system, source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move files {} and {}"
+ .format(source_file, dest_file)))
+
+ # Verify the file move and the destination file is hashed to
+ # same subvol or not
+ _, rename_count = find_hashed_subvol(self.subvols,
+ "/",
+ str(new_hashed.newname))
+ self.assertEqual(dest_count,
+ rename_count,
+ ("The subvols for source : {} and dest : {} are "
+ "not same.".format(source_hashed_subvol._fqpath,
+ dest_hashed_subvol._fqpath)))
+
+ # Verify destination file is removed
+ ret = self._verify_file_exists(dest_hashed_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("Destination file : {} is not removed in subvol"
+ " : {}".format(str(new_hashed.newname),
+ dest_hashed_subvol._fqpath)))
+ g.log.info("The destination file is removed as expected")
+
+ def test_file_rename_dest_exist_and_not_hash_src_srclink_subvol(self):
+ """
+ Case 8:
+ - Destination file should exist
+ - Source file is hashed sub volume(s1) and
+ cached on another subvolume(s2)
+ - Destination file should be hashed to some other subvolume(s3)
+ (should not be same subvolumes mentioned in above condition)
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Souce hashed file should be removed
+ - Destination hashed file should be created on its hashed subvolume(s3)
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=too-many-locals
+ # pylint: disable=unsubscriptable-object
+
+ # Find a non hashed subvolume(or brick)
+ # Create soruce file and Get hashed subvol (s2)
+ _, count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Rename the file to create link in hashed subvol -(s1)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed,
+ "could not find new hashed for dstfile")
+ count2 = new_hashed.subvol_count
+ # Rename the source file to the new file name
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest_file)))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Find a subvol (s3) other than S1 and S2
+ brickobject = create_brickobjectlist(self.subvols, "/")
+ self.assertIsNotNone(brickobject, "Failed to get brick object list")
+ br_count = -1
+ subvol_new = None
+ for brickdir in brickobject:
+ br_count += 1
+ if br_count not in (count, count2):
+ subvol_new = brickdir
+ break
+
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ subvol_new)
+ self.assertIsNotNone(new_hashed2,
+ "could not find new hashed for dstfile")
+
+ # Create destination file in a new subvol (s3)
+ dest_hashed_subvol, dest_count, dest_file = (
+ self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))
+
+ # Verify the subvol is not same as S1 or S2
+ self.assertNotEqual(count2, dest_count,
+ ("The subvols for src :{} and dest : {} are same."
+ .format(count2, dest_count)))
+ # Verify the subvol is not same as S1 or S2
+ self.assertNotEqual(count, dest_count,
+ ("The subvols for src :{} and dest : {} are same."
+ .format(count, dest_count)))
+
+ # Rename the source file to the destination file
+ source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest_file)))
+
+ # Verify destination file is removed
+ ret = self._verify_file_exists(dest_hashed_subvol,
+ str(new_hashed2.newname))
+ self.assertTrue(ret, ("Destination file : {} is not removed in subvol"
+ " : {}".format(str(new_hashed.newname),
+ dest_hashed_subvol._fqpath)))
+ g.log.info("The destination file is removed as expected")
+
+ # Check that the source link file is removed.
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, ("The New hashed volume {} still have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+ g.log.info("The source link file is removed")
+
+ # Check Destination link file is created on its hashed sub-volume(s3)
+ ret = self._verify_link_file_exists(dest_hashed_subvol,
+ str(new_hashed2.newname))
+ self.assertTrue(ret, ("The New hashed volume {} doesn't have the "
+ "expected linkto file {}"
+ .format(dest_hashed_subvol._fqpath,
+ str(new_hashed2.newname))))
+ g.log.info("Destinaion link is created in desired subvol")
+
+ def test_file_rename_dest_exist_and_hash_to_src_subvol(self):
+ """
+ Case 9:
+ - Destination file should exist
+ - Source file is hashed sub volume(s1) and
+ cached on another subvolume(s2)
+ - Destination file should be hashed to subvolume where source file
+ is cached(s2)
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Souce hashed file should be removed
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+
+ # Get hashed subvol (S2)
+ source_hashed_subvol, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Rename the file to create link in hashed subvol -(s1)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed, ("could not find new hashed for {}"
+ .format(source_file)))
+
+ # Rename the source file to the new file name
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest_file)))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The New hashed volume {} doesn't have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Get a file name for dest file to hash to the subvol s2
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ source_hashed_subvol)
+ self.assertIsNotNone(new_hashed2, "Could not find a name hashed"
+ "to the given subvol")
+
+ # Create destination file in the subvol (s2)
+ dest_hashed_subvol, dest_count, dest_file = (
+ self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))
+
+ # Verify the subvol is same as S2
+ self.assertEqual(src_count, dest_count,
+ "The subvols for src and dest are not same.")
+
+ # Move the source file to the new file name
+ source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest_file)))
+
+ # Verify destination file is removed
+ ret = self._verify_file_exists(dest_hashed_subvol,
+ str(new_hashed2.newname))
+ self.assertTrue(ret, ("Destination file : {} is not removed in subvol"
+ " : {}".format(str(new_hashed.newname),
+ dest_hashed_subvol._fqpath)))
+ g.log.info("The destination file is removed as expected")
+
+ # Check that the source link file is removed.
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, ("The New hashed volume {} still have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+ g.log.info("The source link file is removed")
+
+ def test_file_rename_dest_exist_and_hash_to_srclink_subvol(self):
+ """
+ Case 10:
+ - Destination file should exist
+ - Source file is hashed sub volume(s1) and
+ cached on another subvolume(s2)
+ - Destination file should be hashed to same subvolume(s1) where source
+ file is hashed.
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file(cached) should be renamed to destination file
+ - Source file(hashed) should be removed.
+ - Destination hahshed file should be created on its
+ hashed subvolume(s1)
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+
+ # Get hashed subvol s2)
+ _, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Rename the file to create link in another subvol - (s1)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed, ("could not find new hashed subvol "
+ "for {}".format(source_file)))
+
+ self.assertNotEqual(src_count,
+ new_hashed.subvol_count,
+ "New file should hash to different sub-volume")
+
+ # Rename the source file to the new file name
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest_file)))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The New hashed volume {} doesn't have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Get a file name for dest to hash to the subvol s1
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ src_link_subvol,
+ new_hashed.newname)
+ self.assertIsNotNone(new_hashed2, ("Couldn't find a name hashed to the"
+ " given subvol {}"
+ .format(src_link_subvol)))
+ # Create destination file in the subvol (s2)
+ dest_hashed_subvol, dest_count, dest_file = (
+ self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))
+
+ # Verify the subvol is same as S1
+ self.assertEqual(new_hashed.subvol_count, dest_count,
+ "The subvols for src and dest are not same.")
+
+ # Move the source file to the new file name
+ source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, dest_file)
+ self.assertTrue(ret, "Failed to move file")
+
+ # Verify destination file is removed
+ ret = self._verify_file_exists(dest_hashed_subvol,
+ str(new_hashed2.newname))
+ self.assertTrue(ret, ("Destination file : {} is not removed in subvol"
+ " : {}".format(str(new_hashed.newname),
+ dest_hashed_subvol._fqpath)))
+ g.log.info("The destination file is removed as expected")
+
+ # Check that the source link file is removed.
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, ("The hashed volume {} still have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+ g.log.info("The source link file is removed")
+
+ # Check Destination link file is created on its hashed sub-volume(s1)
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed2.newname))
+ self.assertTrue(ret, ("The New hashed volume {} doesn't have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed2.newname))))
+ g.log.info("Destinaion link is created in desired subvol")
diff --git a/tests/functional/dht/test_directory_custom_extended_attributes.py b/tests/functional/dht/test_directory_custom_extended_attributes.py
index b391593b1..fd1493622 100644
--- a/tests/functional/dht/test_directory_custom_extended_attributes.py
+++ b/tests/functional/dht/test_directory_custom_extended_attributes.py
@@ -117,7 +117,8 @@ class TestDirectoryCustomExtendedAttributes(GlusterBaseClass):
mount_point, folder_name)
ret = get_fattr(mount_point.client_system,
mount_point.mountpoint,
- 'trusted.glusterfs.pathinfo')
+ 'trusted.glusterfs.pathinfo',
+ encode="text")
self.assertIsNotNone(ret,
"trusted.glusterfs.pathinfo is not "
"presented on %s:%s" %
@@ -139,7 +140,7 @@ class TestDirectoryCustomExtendedAttributes(GlusterBaseClass):
g.log.debug('Check xarttr user.foo on %s:%s',
mount_point.client_system, folder_name)
ret = get_fattr(mount_point.client_system, folder_name,
- 'user.foo')
+ 'user.foo', encode="text")
self.assertEqual(ret, 'bar2',
"Xattr attribute user.foo is not presented on "
"mount point %s and directory %s" %
@@ -153,7 +154,8 @@ class TestDirectoryCustomExtendedAttributes(GlusterBaseClass):
brick_path = dir_prefix.format(root=brick_dir,
client_index=mount_index)
- ret = get_fattr(brick_server, brick_path, 'user.foo')
+ ret = get_fattr(brick_server, brick_path, 'user.foo',
+ encode="text")
g.log.debug('Check custom xattr for directory on brick %s:%s',
brick_server, brick_path)
@@ -177,7 +179,8 @@ class TestDirectoryCustomExtendedAttributes(GlusterBaseClass):
g.log.debug('Looking if custom extra attribute user.foo is '
'presented on mount or on bricks after deletion')
self.assertIsNone(get_fattr(mount_point.client_system,
- folder_name, 'user.foo'),
+ folder_name, 'user.foo',
+ encode="text"),
"Xattr user.foo is presented on mount point"
" %s:%s after deletion" %
(mount_point.mountpoint, folder_name))
@@ -277,7 +280,7 @@ class TestDirectoryCustomExtendedAttributes(GlusterBaseClass):
g.log.debug('Check mountpoint and bricks for custom xattribute')
self.assertEqual('bar2', get_fattr(mount_point.client_system,
linked_folder_name,
- 'user.foo'),
+ 'user.foo', encode="text"),
'Custom xattribute is not presented on '
'mount point %s:%s' %
(mount_point.client_system, linked_folder_name))
@@ -297,7 +300,8 @@ class TestDirectoryCustomExtendedAttributes(GlusterBaseClass):
continue
self.assertEqual(get_fattr(brick_server, brick_path,
- 'user.foo'), 'bar2',
+ 'user.foo', encode="text"),
+ 'bar2',
"Actual: custom attribute not "
"found on brick %s:%s" % (
brick_server, brick_path))
@@ -319,7 +323,8 @@ class TestDirectoryCustomExtendedAttributes(GlusterBaseClass):
"after deletion", mount_point.client_system,
linked_folder_name)
self.assertIsNone(get_fattr(mount_point.client_system,
- linked_folder_name, 'user.foo'),
+ linked_folder_name, 'user.foo',
+ encode="text"),
"Expected: xattr user.foo to be not presented on"
" %s:%s" % (mount_point.client_system,
linked_folder_name))
@@ -339,7 +344,7 @@ class TestDirectoryCustomExtendedAttributes(GlusterBaseClass):
continue
self.assertIsNone(get_fattr(brick_server, brick_path,
- 'user.foo'),
+ 'user.foo', encode="text"),
"Extended custom attribute is presented on "
"%s:%s after deletion" % (brick_server,
brick_path))
diff --git a/tests/functional/dht/test_disable_readdirp_data_loss.py b/tests/functional/dht/test_disable_readdirp_data_loss.py
new file mode 100644
index 000000000..47be667cc
--- /dev/null
+++ b/tests/functional/dht/test_disable_readdirp_data_loss.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.volume_ops import set_volume_options
+from glustolibs.gluster.glusterdir import get_dir_contents
+
+
+@runs_on([['distributed-dispersed'], ['glusterfs']])
+class TestDisableReaddirpDataLoss(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume():
+ raise ExecutionError("Failed to Setup_Volume %s" % self.volname)
+
+ def tearDown(self):
+
+ # Unmount volume if mounted
+ if self.currently_mounted_clients:
+ if not self.unmount_volume(self.currently_mounted_clients):
+ raise ExecutionError("Failed to unmount Volume")
+
+ # Cleanup volume
+ if not self.cleanup_volume():
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _mount_on_a_client(self, mountobj):
+ """Mount volume on one client and update list"""
+ ret = self.mount_volume([mountobj])
+ self.assertTrue(ret, "Failed to mount volume on client")
+ self.currently_mounted_clients.append(mountobj)
+
+ def _perfrom_lookups_on_mount_point(self, node, mountpoint):
+ """Perform lookups on a given mount point"""
+ ret = get_dir_contents(node, mountpoint)
+ self.assertEqual(len(ret), 8,
+ "8 dirs not present on mount point %s on %s"
+ % (node, mountpoint))
+ g.log.info("Lookup successful on node %s and mount point %s",
+ node, mountpoint)
+
+ def test_disable_readdirp_data_loss(self):
+ """
+ Test case:
+ 1. Create a 2 x (4+2) disperse volume and start it.
+ 2. Disable performance.force-readdirp and dht.force-readdirp.
+ 3. Mount the volume on one client and create 8 directories.
+ 4. Do a lookup on the mount using the same mount point,
+ number of directories should be 8.
+ 5. Mount the volume again on a different client and check
+ if number of directories is the same or not.
+ """
+ # List to determine if volume is mounted or not
+ self.currently_mounted_clients = []
+
+ # Disable performance.force-readdirp and dht.force-readdirp
+ for option, val in (("performance.force-readdirp", "disable"),
+ ("dht.force-readdirp", "off")):
+ ret = set_volume_options(self.mnode, self.volname, {option: val})
+ self.assertTrue(ret, "Failed to set volume option %s to %s"
+ % (option, val))
+ g.log.info("Successfully disabled performance.force-readdirp and "
+ "dht.force-readdirp")
+
+ # Mount the volume on one client and create 8 directories
+ self._mount_on_a_client(self.mounts[0])
+ ret, _, _ = g.run(self.mounts[0].client_system,
+ "mkdir %s/dir{1..8}" % self.mounts[0].mountpoint)
+ self.assertFalse(ret, "Failed to create 8 directories on mount point")
+ g.log.info("Successfully mounted and create 8 dirs on mount point")
+
+ # Do a lookup on the mount using the same mount point,
+ # number of directories should be 8
+ self._perfrom_lookups_on_mount_point(self.mounts[0].client_system,
+ self.mounts[0].mountpoint)
+
+ # Mount the volume again on a different client and check
+ # if number of directories is the same or not
+ self._mount_on_a_client(self.mounts[1])
+ self._perfrom_lookups_on_mount_point(self.mounts[1].client_system,
+ self.mounts[1].mountpoint)
diff --git a/tests/functional/dht/test_file_creation.py b/tests/functional/dht/test_file_creation.py
new file mode 100644
index 000000000..5671cb84b
--- /dev/null
+++ b/tests/functional/dht/test_file_creation.py
@@ -0,0 +1,494 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterfile import (get_file_stat, get_pathinfo,
+ file_exists, create_link_file,
+ get_md5sum, get_fattr)
+from glustolibs.gluster.lib_utils import append_string_to_file
+
+
+@runs_on([['distributed', 'distributed-arbiter',
+ 'distributed-replicated', 'distributed-dispersed'],
+ ['glusterfs']])
+class TestFileCreation(GlusterBaseClass):
+ def setUp(self):
+
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume and Mount Volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+ self.client, self.m_point = (self.mounts[0].client_system,
+ self.mounts[0].mountpoint)
+
+ def tearDown(self):
+
+ # Unmount and cleanup original volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _create_file_using_touch(self, file_name):
+ """Creates a regular empty file"""
+ cmd = "touch {}/{}".format(self.m_point, file_name)
+ ret, _, _ = g.run(self.client, cmd)
+ self.assertEqual(ret, 0, "Failed to create file {}".format(file_name))
+ g.log.info("Successfully created file %s", file_name)
+
+ def _check_file_stat_on_mountpoint(self, file_name, file_type):
+ """Check the file-type on mountpoint"""
+ file_stat = (get_file_stat(self.client, "{}/{}".format(
+ self.m_point, file_name
+ )))['filetype']
+ self.assertEqual(file_stat, file_type,
+ "File is not a {}".format(file_type))
+ g.log.info("File is %s", file_type)
+
+ def _is_file_present_on_brick(self, file_name):
+ """Check if file is created on the backend-bricks as per
+ the value of trusted.glusterfs.pathinfo xattr"""
+ brick_list = get_pathinfo(self.client, "{}/{}".format(
+ self.m_point, file_name))
+ self.assertNotEqual(
+ brick_list, 0, "Failed to get bricklist for {}".format(file_name))
+
+ for brick in brick_list['brickdir_paths']:
+ host, path = brick.split(':')
+ ret = file_exists(host, path)
+ self.assertTrue(ret, "File {} is not present on {}".format(
+ file_name, brick
+ ))
+ g.log.info("File %s is present on %s", file_name, brick)
+
+ def _compare_file_permissions(self, file_name,
+ file_info_mnt=None, file_info_brick=None):
+ """Check if the file's permission are same on mountpoint and
+ backend-bricks"""
+ if (file_info_mnt is None and file_info_brick is None):
+ file_info_mnt = (get_file_stat(self.client, "{}/{}".format(
+ self.m_point, file_name
+ )))['access']
+ self.assertIsNotNone(
+ file_info_mnt, "Failed to get access time for {}".format(
+ file_name))
+ brick_list = get_pathinfo(self.client, "{}/{}".format(
+ self.m_point, file_name))
+ self.assertNotEqual(
+ brick_list, 0, "Failed to get bricklist for {}".format(
+ file_name))
+ file_info_brick = []
+ for brick in brick_list['brickdir_paths']:
+ host, path = brick.split(':')
+ info_brick = (get_file_stat(host, path))['access']
+ file_info_brick.append(info_brick)
+
+ for info in file_info_brick:
+ self.assertEqual(info, file_info_mnt,
+ "File details for {} are diffrent on"
+ " backend-brick".format(file_name))
+ g.log.info("Details for file %s is correct"
+ " on backend-bricks", file_name)
+
+ def _check_change_time_mnt(self, file_name):
+ """Find out the modification time for file on mountpoint"""
+ file_ctime_mnt = (get_file_stat(self.client, "{}/{}".format(
+ self.m_point, file_name
+ )))['epoch_ctime']
+ return file_ctime_mnt
+
+ def _check_change_time_brick(self, file_name):
+ """Find out the modification time for file on backend-bricks"""
+ brick_list = get_pathinfo(self.client, "{}/{}".format(
+ self.m_point, file_name))
+ self.assertNotEqual(brick_list, 0,
+ "Failed to get bricklist for {}".format(file_name))
+
+ brick_mtime = []
+ for brick in brick_list['brickdir_paths']:
+ host, path = brick.split(':')
+ cmd = "ls -lR {}".format(path)
+ ret, _, _ = g.run(host, cmd)
+ self.assertEqual(ret, 0, "Lookup failed on"
+ " brick:{}".format(path))
+ file_ctime_brick = (get_file_stat(host, path))['epoch_ctime']
+ brick_mtime.append(file_ctime_brick)
+ return brick_mtime
+
+ def _compare_file_perm_mnt(self, mtime_before, mtime_after,
+ file_name):
+ """Compare the file permissions before and after appending data"""
+ self.assertNotEqual(mtime_before, mtime_after, "Unexpected:"
+ "The ctime has not been changed")
+ g.log.info("The modification time for %s has been"
+ " changed as expected", file_name)
+
+ def _collect_and_compare_file_info_on_mnt(
+ self, link_file_name, values, expected=True):
+ """Collect the files's permissions on mountpoint and compare"""
+ stat_test_file = get_file_stat(
+ self.client, "{}/test_file".format(self.m_point))
+ self.assertIsNotNone(stat_test_file, "Failed to get stat of test_file")
+ stat_link_file = get_file_stat(
+ self.client, "{}/{}".format(self.m_point, link_file_name))
+ self.assertIsNotNone(stat_link_file, "Failed to get stat of {}".format(
+ link_file_name))
+
+ for key in values:
+ if expected is True:
+ self.assertEqual(stat_test_file[key], stat_link_file[key],
+ "The {} is not same for test_file"
+ " and {}".format(key, link_file_name))
+ g.log.info("The %s for test_file and %s is same on mountpoint",
+ key, link_file_name)
+ else:
+ self.assertNotEqual(stat_test_file[key], stat_link_file[key],
+ "Unexpected : The {} is same for test_file"
+ " and {}".format(key, link_file_name))
+ g.log.info("The %s for test_file and %s is different"
+ " on mountpoint", key, link_file_name)
+
+ def _compare_file_md5sum_on_mnt(self, link_file_name):
+ """Collect and compare the md5sum for file on mountpoint"""
+ md5sum_test_file, _ = (get_md5sum(
+ self.client, "{}/test_file".format(self.m_point))).split()
+ self.assertIsNotNone(
+ md5sum_test_file, "Failed to get md5sum for test_file")
+
+ md5sum_link_file, _ = get_md5sum(
+ self.client, "{}/{}".format(self.m_point, link_file_name)).split()
+ self.assertIsNotNone(md5sum_link_file, "Failed to get"
+ " md5sum for {}".format(link_file_name))
+ self.assertEqual(md5sum_test_file, md5sum_link_file,
+ "The md5sum for test_file and {} is"
+ " not same".format(link_file_name))
+ g.log.info("The md5sum is same for test_file and %s"
+ " on mountpoint", link_file_name)
+
+ def _compare_file_md5sum_on_bricks(self, link_file_name):
+ """Collect and compare md5sum for file on backend-bricks"""
+ brick_list_test_file = get_pathinfo(self.client, "{}/test_file".format(
+ self.m_point))
+ md5sum_list_test_file = []
+ for brick in brick_list_test_file['brickdir_paths']:
+ host, path = brick.split(':')
+ md5sum_test_file, _ = (get_md5sum(host, path)).split()
+ md5sum_list_test_file.append(md5sum_test_file)
+
+ brick_list_link_file = get_pathinfo(self.client, "{}/{}".format(
+ self.m_point, link_file_name))
+ md5sum_list_link_file = []
+ for brick in brick_list_link_file['brickdir_paths']:
+ md5sum_link_file, _ = (get_md5sum(host, path)).split()
+ md5sum_list_link_file.append(md5sum_link_file)
+
+ self.assertEqual(md5sum_test_file, md5sum_link_file,
+ "The md5sum for test_file and {} is"
+ " not same on brick {}".format(link_file_name, brick))
+ g.log.info("The md5sum for test_file and %s is same"
+ " on backend brick %s", link_file_name, brick)
+
+ def _compare_gfid_xattr_on_files(self, link_file_name, expected=True):
+ """Collect and compare the value of trusted.gfid xattr for file
+ on backend-bricks"""
+ brick_list_test_file = get_pathinfo(self.client, "{}/test_file".format(
+ self.m_point))
+ xattr_list_test_file = []
+ for brick in brick_list_test_file['brickdir_paths']:
+ host, path = brick.split(':')
+ xattr_test_file = get_fattr(host, path, "trusted.gfid")
+ xattr_list_test_file.append(xattr_test_file)
+
+ brick_list_link_file = get_pathinfo(self.client, "{}/{}".format(
+ self.m_point, link_file_name))
+ xattr_list_link_file = []
+ for brick in brick_list_link_file['brickdir_paths']:
+ host, path = brick.split(':')
+ xattr_link_file = get_fattr(host, path, "trusted.gfid")
+ xattr_list_link_file.append(xattr_link_file)
+
+ if expected is True:
+ self.assertEqual(xattr_list_test_file, xattr_list_link_file,
+ "Unexpected: The xattr trusted.gfid is not same "
+ "for test_file and {}".format(link_file_name))
+ g.log.info("The xattr trusted.gfid is same for test_file"
+ " and %s", link_file_name)
+ else:
+ self.assertNotEqual(xattr_list_test_file, xattr_list_link_file,
+ "Unexpected: The xattr trusted.gfid is same "
+ "for test_file and {}".format(link_file_name))
+ g.log.info("The xattr trusted.gfid is not same for test_file"
+ " and %s", link_file_name)
+
+ def test_special_file_creation(self):
+ """
+ Description : check creation of different types of files.
+
+ Steps:
+ 1) From mount point, Create a regular file
+ eg:
+ touch f1
+ - From mount point, create character, block device and pipe files
+ mknod c
+ mknod b
+ mkfifo
+ 2) Stat on the files created in Step-2 from mount point
+ 3) Verify that file is stored on only one bricks which is mentioned in
+ trusted.glusterfs.pathinfo xattr
+ On mount point -
+ " getfattr -n trusted.glusterfs.pathinfo
+ On all bricks
+ " ls / "
+ 4) Verify that file permissions are same on mount point and sub-volumes
+ " stat "
+ 5) Append some data to the file.
+ 6) List content of file to verify that data has been appended.
+ " cat "
+ 7) Verify that file change time and size has been updated
+ accordingly(from mount point and sub-volume)
+ " stat / "
+ """
+ # pylint: disable=too-many-statements
+ # pylint: disable=too-many-locals
+ # Create a regular file
+ self._create_file_using_touch("regfile")
+
+ # Create a character and block file
+ for (file_name, parameter) in [
+ ("blockfile", "b"), ("charfile", "c")]:
+ cmd = "mknod {}/{} {} 1 5".format(self.m_point, file_name,
+ parameter)
+ ret, _, _ = g.run(self.client, cmd)
+ self.assertEqual(
+ ret, 0, "Failed to create {} file".format(file_name))
+ g.log.info("%s file created successfully", file_name)
+
+ # Create a pipe file
+ cmd = "mkfifo {}/pipefile".format(self.m_point)
+ ret, _, _ = g.run(self.client, cmd)
+ self.assertEqual(ret, 0, "Failed to create pipe file")
+ g.log.info("Pipe file is created successfully")
+
+ # Stat all the files created on mount-point
+ for (file_name, check_string) in [
+ ("regfile", "regular empty file"),
+ ("charfile", "character special file"),
+ ("blockfile", "block special file"),
+ ("pipefile", "fifo")]:
+ self._check_file_stat_on_mountpoint(file_name, check_string)
+
+ # Verify files are stored on backend bricks as per
+ # the trusted.glusterfs.pathinfo
+ file_types = ["regfile", "charfile", "blockfile", "pipefile"]
+
+ for file_name in file_types:
+ self._is_file_present_on_brick(file_name)
+
+ # Verify that the file permissions are same on
+ # mount-point and bricks
+ for file_name in file_types:
+ self._compare_file_permissions(file_name)
+
+ # Note the modification time on mount and bricks
+ # for all files. Also it should be same on mnt and bricks
+ reg_mnt_ctime_1 = self._check_change_time_mnt("regfile")
+ char_mnt_ctime_1 = self._check_change_time_mnt("charfile")
+ block_mnt_ctime_1 = self._check_change_time_mnt("blockfile")
+ fifo_mnt_ctime_1 = self._check_change_time_mnt("pipefile")
+
+ reg_brick_ctime_1 = self._check_change_time_brick("regfile")
+ char_brick_ctime_1 = self._check_change_time_brick("charfile")
+ block_brick_ctime_1 = self._check_change_time_brick("blockfile")
+ fifo_brick_ctime_1 = self._check_change_time_brick("pipefile")
+
+ for (file_name, mnt_ctime, brick_ctime) in [
+ ("regfile", reg_mnt_ctime_1, reg_brick_ctime_1),
+ ("charfile", char_mnt_ctime_1, char_brick_ctime_1),
+ ("blockfile", block_mnt_ctime_1, block_brick_ctime_1),
+ ("pipefile", fifo_mnt_ctime_1, fifo_brick_ctime_1)]:
+ self._compare_file_permissions(
+ file_name, mnt_ctime, brick_ctime)
+
+ # Append some data to the files
+ for (file_name, data_str) in [
+ ("regfile", "regular"),
+ ("charfile", "character special"),
+ ("blockfile", "block special")]:
+ ret = append_string_to_file(
+ self.client, "{}/{}".format(self.m_point, file_name),
+ "Welcome! This is a {} file".format(data_str))
+ self.assertTrue(
+ ret, "Failed to append data to {}".format(file_name))
+ g.log.info(
+ "Successfully appended data to %s", file_name)
+
+ # Check if the data has been appended
+ check = "Welcome! This is a regular file"
+ cmd = "cat {}/{}".format(self.m_point, "regfile")
+ ret, out, _ = g.run(self.client, cmd)
+ self.assertEqual(out.strip(), check, "No data present at regfile")
+
+ # Append data to pipefile and check if it has been appended
+ g.run_async(self.client, "echo 'Hello' > {}/{} ".format(
+ self.m_point, "pipefile"))
+ ret, out, _ = g.run(
+ self.client, "cat < {}/{}".format(self.m_point, "pipefile"))
+ self.assertEqual(
+ ret, 0, "Unable to fetch datat on other terimnal")
+ self.assertEqual(
+ "Hello", out.split('\n')[0],
+ "Hello not recieved on the second terimnal")
+
+ # Lookup on mount-point
+ cmd = "ls -lR {}".format(self.m_point)
+ ret, _, _ = g.run(self.client, cmd)
+ self.assertEqual(ret, 0, "Lookup on mountpoint failed")
+
+ # Collect ctime on mount point after appending data
+ reg_mnt_ctime_2 = self._check_change_time_mnt("regfile")
+
+ # After appending data the ctime for file should change
+ self.assertNotEqual(reg_mnt_ctime_1, reg_mnt_ctime_2, "Unexpected:"
+ "The ctime has not been changed")
+ g.log.info("The modification time for regfile has been"
+ " changed as expected")
+
+ # Collect the ctime on bricks
+ reg_brick_ctime_2 = self._check_change_time_brick("regfile")
+
+ # Check if the ctime has changed on bricks as per mount
+ self._compare_file_permissions(
+ "regfile", reg_mnt_ctime_2, reg_brick_ctime_2)
+
+ def test_hard_link_file(self):
+ """
+ Description: link file create, validate and access file
+ using it
+
+ Steps:
+ 1) From mount point, create a regular file
+ 2) Verify that file is stored on only on bricks which is
+ mentioned in trusted.glusterfs.pathinfo xattr
+ 3) From mount point create hard-link file for the created file
+ 4) From mount point stat on the hard-link file and original file;
+ file inode, permission, size should be same
+ 5) From mount point, verify that file contents are same
+ "md5sum"
+ 6) Verify "trusted.gfid" extended attribute of the file
+ on sub-vol
+ 7) From sub-volume stat on the hard-link file and original file;
+ file inode, permission, size should be same
+ 8) From sub-volume verify that content of file are same
+ """
+ # Create a regular file
+ self._create_file_using_touch("test_file")
+
+ # Check file is create on bricks as per trusted.glusterfs.pathinfo
+ self._is_file_present_on_brick("test_file")
+
+ # Create a hard-link file for the test_file
+ ret = create_link_file(
+ self.client, "{}/test_file".format(self.m_point),
+ "{}/hardlink_file".format(self.m_point))
+ self.assertTrue(ret, "Failed to create hard link file for"
+ " test_file")
+ g.log.info("Successfully created hardlink_file")
+
+ # On mountpoint perform stat on original and hard-link file
+ values = ["inode", "access", "size"]
+ self._collect_and_compare_file_info_on_mnt(
+ "hardlink_file", values, expected=True)
+
+ # Check the md5sum on original and hard-link file on mountpoint
+ self._compare_file_md5sum_on_mnt("hardlink_file")
+
+ # Compare the value of trusted.gfid for test_file and hard-link file
+ # on backend-bricks
+ self._compare_gfid_xattr_on_files("hardlink_file")
+
+ # On backend bricks perform stat on original and hard-link file
+ values = ["inode", "access", "size"]
+ self._collect_and_compare_file_info_on_mnt("hardlink_file", values)
+
+ # On backend bricks check the md5sum
+ self._compare_file_md5sum_on_bricks("hardlink_file")
+
+ def test_symlink_file(self):
+ """
+ Description: Create symbolic link file, validate and access file
+ using it
+
+ Steps:
+ 1) From mount point, create a regular file
+ 2) Verify that file is stored on only on bricks which is
+ mentioned in trusted.glusterfs.pathinfo xattr
+ 3) From mount point create symbolic link file for the created file
+ 4) From mount point stat on the symbolic link file and original file;
+ file inode should be different
+ 5) From mount point, verify that file contents are same
+ "md5sum"
+ 6) Verify "trusted.gfid" extended attribute of the file
+ on sub-vol
+ 7) Verify readlink on symbolic link from mount point
+ "readlink "
+ 8) From sub-volume verify that content of file are same
+ """
+ # Create a regular file on mountpoint
+ self._create_file_using_touch("test_file")
+
+ # Check file is create on bricks as per trusted.glusterfs.pathinfo
+ self._is_file_present_on_brick("test_file")
+
+ # Create a symbolic-link file for the test_file
+ ret = create_link_file(
+ self.client, "{}/test_file".format(self.m_point),
+ "{}/softlink_file".format(self.m_point), soft=True)
+ self.assertTrue(ret, "Failed to create symbolic link file for"
+ " test_file")
+ g.log.info("Successfully created softlink_file")
+
+ # On mountpoint perform stat on original and symbolic-link file
+ # The value of inode should be different
+ values = ["inode"]
+ self._collect_and_compare_file_info_on_mnt(
+ "softlink_file", values, expected=False)
+
+ # Check the md5sum on original and symbolic-link file on mountpoint
+ self._compare_file_md5sum_on_mnt("softlink_file")
+
+ # Compare the value of trusted.gfid for test_file and
+ # symbolic-link file on backend-bricks
+ self._compare_gfid_xattr_on_files("softlink_file")
+
+ # Verify readlink on symbolic-link from mount point
+ cmd = "readlink {}/softlink_file".format(self.m_point)
+ ret, out, _ = g.run(self.client, cmd)
+ self.assertEqual(
+ out.strip(), "{}/test_file".format(self.m_point),
+ "Symbolic link points to incorrect file")
+ g.log.info("Symbolic link points to correct file")
+
+ # Check the md5sum on original and symbolic-link file on backend bricks
+ self._compare_file_md5sum_on_bricks("softlink_file")
diff --git a/tests/functional/dht/test_file_rename_when_destination_file_doesnot_exist.py b/tests/functional/dht/test_file_rename_when_destination_file_doesnot_exist.py
new file mode 100644
index 000000000..7f98fbede
--- /dev/null
+++ b/tests/functional/dht/test_file_rename_when_destination_file_doesnot_exist.py
@@ -0,0 +1,450 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.glusterfile import get_file_stat
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.dht_test_utils import (find_hashed_subvol,
+ create_brickobjectlist,
+ find_new_hashed,
+ find_specific_hashed)
+from glustolibs.gluster.volume_libs import get_subvols
+from glustolibs.gluster.glusterfile import move_file, is_linkto_file
+
+
+@runs_on([['distributed', 'distributed-replicated',
+ 'distributed-dispersed', 'distributed-arbiter'],
+ ['glusterfs']])
+class DhtFileRenameVerification(GlusterBaseClass):
+
+ def setUp(self):
+ """
+ Setup Volume and Mount Volume
+ """
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Change the dist count to 4 in case of 'distributed-replicated' ,
+ # 'distributed-dispersed' and 'distributed-arbiter'
+ if self.volume_type in ("distributed-replicated",
+ "distributed-dispersed",
+ "distributed-arbiter"):
+ self.volume['voltype']['dist_count'] = 4
+
+ # Setup Volume and Mount Volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+
+ mount_obj = self.mounts[0]
+ self.mount_point = mount_obj.mountpoint
+
+ self.subvols = (get_subvols(
+ self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(self.subvols, "failed to get subvols")
+
+ def tearDown(self):
+ """
+ Unmount Volume and Cleanup Volume
+ """
+ # Unmount Volume and Cleanup Volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Unmount Volume and Cleanup Volume: Fail")
+ g.log.info("Unmount Volume and Cleanup Volume: Success")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _create_file_and_get_hashed_subvol(self, file_name):
+ """ Creates a file and return its hashed subvol
+
+ Args:
+ file_name(str): name of the file to be created
+ Returns:
+ hashed_subvol object: An object of type BrickDir type
+ representing the hashed subvolume
+
+ subvol_count: The subvol index in the subvol list
+
+ source_file: Path to the file created
+
+ """
+ # pylint: disable=unsubscriptable-object
+
+ # Create Source File
+ source_file = "{}/{}".format(self.mount_point, file_name)
+ ret, _, err = g.run(self.clients[0], ("touch %s" % source_file))
+ self.assertEqual(ret, 0, ("Failed to create {} : err {}"
+ .format(source_file, err)))
+ g.log.info("Successfully created the source file")
+
+ # Find the hashed subvol for source file
+ source_hashed_subvol, count = find_hashed_subvol(self.subvols,
+ "/",
+ file_name)
+ self.assertIsNotNone(source_hashed_subvol, ("Couldn't find hashed "
+ "subvol for the {}"
+ .format(source_file)))
+ return source_hashed_subvol, count, source_file
+
+ @staticmethod
+ def _verify_link_file_exists(brickdir, file_name):
+ """ Verifies whether a file link is present in given subvol
+ Args:
+ brickdir(Class Object): BrickDir object containing data about
+ bricks under a specific subvol
+ Returns:
+ True/False(bool): Based on existance of file link
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+ file_path = brickdir._fqpath + file_name
+ file_stat = get_file_stat(brickdir._host, file_path)
+ if file_stat is None:
+ g.log.error("Failed to get File stat for %s", file_path)
+ return False
+ if not file_stat['access'] == "1000":
+ g.log.error("Access value not 1000 for %s", file_path)
+ return False
+
+ # Check for file type to be'sticky empty', have size of 0 and
+ # have the glusterfs.dht.linkto xattr set.
+ ret = is_linkto_file(brickdir._host, file_path)
+ if not ret:
+ g.log.error("%s is not a linkto file", file_path)
+ return False
+ return True
+
+ def test_file_rename_when_source_and_dest_hash_diff_subvol(self):
+ """
+ case 1 :
+ - Destination file does not exist
+ - Source file is stored on hashed subvolume(s1) it self
+ - Destination file should be hashed to some other subvolume(s2)
+ mv <source_file> <destination_file>
+ - Source file should be renamed to to Destination file.
+ - Destination link file should be created on its hashed
+ subvolume(s2)
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+
+ # Create soruce file and Get hashed subvol (s2)
+ _, count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Rename the file such that the new name hashes to a new subvol (S1)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed, ("could'nt find new hashed for {}"
+ .format(source_file)))
+ src_link_subvol = new_hashed.hashedbrickobject
+
+ # Verify the subvols are not same for source and destination files
+ self.assertNotEqual(count,
+ new_hashed.subvol_count,
+ "The subvols for src and dest are same.")
+
+ # Rename the source file to the destination file
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.clients[0], source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move files {} and {}"
+ .format(source_file, dest_file)))
+
+ # Verify the link file is found in new subvol
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+ g.log.info("New hashed volume has the expected linkto file")
+
+ def test_file_rename_when_source_and_dest_hash_same_subvol(self):
+ """
+ Case 2:
+ - Destination file does not exist
+ - Source file is stored on hashed subvolume(s1) it self
+ - Destination file should be hashed to same subvolume(s1)
+ mv <source_file> <destination_file>
+ - Source file should be renamed to destination file
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+
+ # Create soruce file and Get hashed subvol (s1)
+ source_hashed_subvol, count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Rename the file such that the new name hashes to a new subvol
+ new_hashed = find_specific_hashed(self.subvols,
+ "/",
+ source_hashed_subvol)
+ self.assertIsNotNone(new_hashed,
+ "could not find new hashed for destination file")
+
+ # Rename the source file to the destination file
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.clients[0], source_file, dest_file)
+ self.assertTrue(ret, "Failed to move files {} and {}".format(
+ source_file, dest_file))
+
+ _, rename_count = find_hashed_subvol(self.subvols,
+ "/",
+ str(new_hashed.newname))
+ self.assertEqual(count, rename_count,
+ "The hashed subvols for src and dest are not same.")
+
+ def test_file_rename_when_dest_not_hash_to_src_or_src_link_subvol(self):
+ """
+ Case 3:
+ - Destination file does not exist
+ - Source link file is stored on hashed sub volume(s1) and Source
+ file is stored on another subvolume(s2)
+ - Destination file should be hashed to some other subvolume(s3)
+ (should not be same subvolumes mentioned in above condition)
+ mv <source_file> <destination_file>
+ - Source file should be ranamed to destination file
+ - source link file should be removed.
+ - Destination link file should be created on its hashed
+ subvolume(s3)
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=too-many-locals
+ # pylint: disable=unsubscriptable-object
+
+ # Find a non hashed subvolume(or brick)
+ # Create soruce file and Get hashed subvol (s2)
+ _, count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Rename the file to create link in hashed subvol -(s1)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed,
+ "could not find new hashed for dstfile")
+ count2 = new_hashed.subvol_count
+ # Rename the source file to the new file name
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.clients[0], source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest_file)))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # find a subvol (s3) other than S1 and S2
+ brickobject = create_brickobjectlist(self.subvols, "/")
+ self.assertIsNotNone(brickobject, "Failed to get brick object list")
+ br_count = -1
+ subvol_new = None
+ for brickdir in brickobject:
+ br_count += 1
+ if br_count not in (count, count2):
+ subvol_new = brickdir
+ break
+
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ subvol_new)
+ self.assertIsNotNone(new_hashed2,
+ "could not find new hashed for dstfile")
+
+ # Rename the source file to the destination file
+ source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
+ ret = move_file(self.clients[0], source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest_file)))
+
+ hashed_subvol_after_rename, rename_count = (
+ find_hashed_subvol(self.subvols,
+ "/",
+ str(new_hashed2.newname)))
+ self.assertNotEqual(count2, rename_count,
+ "The subvols for src and dest are same.")
+
+ # check that the source link file is removed.
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, ("The New hashed volume {} still have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+ g.log.info("The source link file is removed")
+
+ # Check Destination link file is created on its hashed sub-volume(s3)
+ ret = self._verify_link_file_exists(hashed_subvol_after_rename,
+ str(new_hashed2.newname))
+ self.assertTrue(ret, ("The New hashed volume {} doesn't have the "
+ "expected linkto file {}"
+ .format(hashed_subvol_after_rename._fqpath,
+ str(new_hashed2.newname))))
+ g.log.info("Destinaion link is created in desired subvol")
+
+ def test_file_rename_when_src_file_and_dest_file_hash_same_subvol(self):
+ """
+ Case 4:
+ - Destination file does not exist
+ - Source link file is stored on hashed sub volume(s1) and Source
+ file is stored on another subvolume(s2)
+ - Destination file should be hashed to same subvolume(s2)
+ mv <source_file> <destination_file>
+ - Source file should be ranamed to destination file
+ - source link file should be removed.
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+
+ # Get hashed subvol (S2)
+ source_hashed_subvol, count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Rename the file to create link in hashed subvol -(s1)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed, ("could not find new hashed for {}"
+ .format(source_file)))
+
+ # Rename the source file to the new file name
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.clients[0], source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest_file)))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The New hashed volume {} doesn't have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Get a file name to hash to the subvol s2
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ source_hashed_subvol)
+ self.assertIsNotNone(new_hashed2, "Could not find a name hashed"
+ "to the given subvol")
+
+ _, rename_count = (
+ find_hashed_subvol(self.subvols, "/", str(new_hashed2.newname)))
+ self.assertEqual(count, rename_count,
+ "The subvols for src and dest are not same.")
+
+ # Move the source file to the new file name
+ source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
+ ret = move_file(self.clients[0], source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest_file)))
+
+ # check that the source link file is removed.
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, ("The New hashed volume {} still have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+ g.log.info("The source link file is removed")
+
+ def test_file_rename_when_src_link_and_dest_file_hash_same_subvol(self):
+ """
+ Case 5:
+ - Destination file does not exist
+ - Source link file is stored on hashed sub volume(s1) and Source
+ file is stored on another subvolume(s2)
+ - Destination file should be hashed to same subvolume(s1)
+ mv <source_file> <destination_file>
+ - Source file should be renamed to destination file
+ - Source link file should be removed.
+ - Destination link file should be created on its
+ hashed subvolume(s1)
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+
+ # Get hashed subvol s2)
+ _, count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Rename the file to create link in another subvol - (s1)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed, ("could not find new hashed subvol "
+ "for {}".format(source_file)))
+
+ self.assertNotEqual(count,
+ new_hashed.subvol_count,
+ "New file should hash to different sub-volume")
+
+ # Rename the source file to the new file name
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.clients[0], source_file, dest_file)
+ self.assertTrue(ret, ("Failed to move file {} and {}"
+ .format(source_file, dest_file)))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The New hashed volume {} doesn't have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Get a file name to hash to the subvol s1
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ src_link_subvol,
+ new_hashed.newname)
+ self.assertIsNotNone(new_hashed2, ("Couldn't find a name hashed to the"
+ " given subvol {}"
+ .format(src_link_subvol)))
+
+ _, rename_count = (
+ find_hashed_subvol(self.subvols, "/", str(new_hashed2.newname)))
+ self.assertEqual(new_hashed.subvol_count, rename_count,
+ "The subvols for src and dest are not same.")
+
+ # Move the source file to the new file name
+ source_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
+ ret = move_file(self.clients[0], source_file, dest_file)
+ self.assertTrue(ret, "Failed to move file")
+
+ # check that the source link file is removed.
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, ("The hashed volume {} still have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+ g.log.info("The source link file is removed")
+
+ # Check Destination link file is created on its hashed sub-volume(s1)
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed2.newname))
+ self.assertTrue(ret, ("The New hashed volume {} doesn't have the "
+ "expected linkto file {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed2.newname))))
+ g.log.info("Destinaion link is created in desired subvol")
diff --git a/tests/functional/dht/test_file_rename_when_destination_file_stored_on_source_file_hashed_subvol.py b/tests/functional/dht/test_file_rename_when_destination_file_stored_on_source_file_hashed_subvol.py
new file mode 100644
index 000000000..9efe2a891
--- /dev/null
+++ b/tests/functional/dht/test_file_rename_when_destination_file_stored_on_source_file_hashed_subvol.py
@@ -0,0 +1,639 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re
+from glusto.core import Glusto as g
+from glustolibs.gluster.glusterfile import get_file_stat
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.dht_test_utils import (find_hashed_subvol,
+ find_new_hashed,
+ find_specific_hashed)
+from glustolibs.gluster.volume_libs import get_subvols, parse_vol_file
+from glustolibs.gluster.glusterfile import (move_file,
+ is_linkto_file,
+ get_dht_linkto_xattr)
+
+
+@runs_on([['distributed-replicated', 'distributed',
+ 'distributed-dispersed', 'distributed-arbiter'],
+ ['glusterfs']])
+class DhtFileRenameWithDestFileHashed(GlusterBaseClass):
+
+ def setUp(self):
+ """
+ Setup Volume and Mount Volume
+ """
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Change the dist count to 4 in case of 'distributed-replicated' ,
+ # 'distributed-dispersed' and 'distributed-arbiter'
+ if self.volume_type in ("distributed-replicated",
+ "distributed-dispersed",
+ "distributed-arbiter"):
+ self.volume['voltype']['dist_count'] = 4
+
+ # Setup Volume and Mount Volume
+ ret = self.setup_volume_and_mount_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+
+ self.mount_point = self.mounts[0].mountpoint
+
+ self.subvols = (get_subvols(
+ self.mnode, self.volname))['volume_subvols']
+ self.assertIsNotNone(self.subvols, "failed to get subvols")
+
+ def tearDown(self):
+ """
+ Unmount Volume and Cleanup Volume
+ """
+ # Unmount Volume and Cleanup Volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unmount Volume and Cleanup Volume: Fail")
+ g.log.info("Unmount Volume and Cleanup Volume: Success")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _create_file_and_get_hashed_subvol(self, file_name):
+ """ Creates a file and return its hashed subvol
+
+ Args:
+ file_name(str): name of the file to be created
+ Returns:
+ hashed_subvol object: An object of type BrickDir type
+ representing the hashed subvolume
+
+ subvol_count: The subvol index in the subvol list
+
+ source_file: Path to the file created
+
+ """
+ # pylint: disable=unsubscriptable-object
+
+ # Create Source File
+ source_file = "{}/{}".format(self.mount_point, file_name)
+ ret, _, err = g.run(self.mounts[0].client_system,
+ ("touch %s" % source_file))
+ self.assertEqual(ret, 0,
+ ("Failed to create %s : err %s", source_file, err))
+ g.log.info("Successfully created the source file")
+
+ # Find the hashed subvol for source file
+ source_hashed_subvol, count = find_hashed_subvol(self.subvols,
+ "/",
+ file_name)
+ self.assertIsNotNone(source_hashed_subvol,
+ "Couldn't find hashed subvol for the source file")
+ return source_hashed_subvol, count, source_file
+
+ @staticmethod
+ def _verify_link_file_exists(brickdir, file_name):
+ """ Verifies whether a file link is present in given subvol
+ Args:
+ brickdir(Class Object): BrickDir object containing data about
+ bricks under a specific subvol
+ Returns:
+ (bool): Based on existance of file link
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=unsubscriptable-object
+ file_path = brickdir._fqpath + file_name
+ file_stat = get_file_stat(brickdir._host, file_path)
+ if file_stat is None:
+ g.log.error("Failed to get File stat for %s", file_path)
+ return False
+ if not file_stat['access'] == "1000":
+ g.log.error("Access value not 1000 for %s", file_path)
+ return False
+
+ # Check for file type to be'sticky empty', have size of 0 and
+ # have the glusterfs.dht.linkto xattr set.
+ ret = is_linkto_file(brickdir._host, file_path)
+ if not ret:
+ g.log.error("%s is not a linkto file", file_path)
+ return False
+ return True
+
+ @staticmethod
+ def _verify_file_exists(brick_dir, file_name):
+ """ Verifies whether a file is present in given subvol or not
+ Args:
+ brickdir(Class Object): BrickDir object containing data about
+ bricks under a specific subvol
+ file_name(str): Name of the file to be searched
+ Returns:
+ (bool): Based on existance of file
+ """
+ # pylint: disable=protected-access
+
+ cmd = "[ -f {} ]".format(brick_dir._fqpath +
+ (str(file_name)))
+ ret, _, _ = g.run(brick_dir._host, cmd)
+ if ret:
+ return False
+ return True
+
+ @staticmethod
+ def _get_remote_subvolume(vol_file_data, brick_name):
+ """ Verifies whether a file is present in given subvol or not
+ Args:
+ vol_file_data(dict): Dictionary containing data of .vol file
+ brick_name(str): Brick path
+ Returns:
+ (str): Remote subvol name
+ (None): If error occurred
+ """
+ try:
+ brick_name = re.search(r'[a-z0-9\-\_]*', brick_name).group()
+ remote_subvol = (vol_file_data[
+ brick_name]['option']['remote-subvolume'])
+ except KeyError:
+ return None
+ return remote_subvol
+
+ def _verify_file_links_to_specified_destination(self, host, file_path,
+ dest_file):
+ """ Verifies whether a file link points to the specified destination
+ Args:
+ host(str): Host at which commands are to be executed
+ file_path(str): path to the link file
+ dest_file(str): path to the dest file to be pointed at
+ Returns:
+ (bool) : Based on whether the given file points to dest or not
+ """
+ link_to_xattr = get_dht_linkto_xattr(host, file_path)
+ # Remove unexpected chars in the value, if any
+ link_to_xattr = re.search(r'[a-z0-9\-\_]*', link_to_xattr).group()
+ if link_to_xattr is None:
+ g.log.error("Failed to get trusted.glusterfs.dht.linkto")
+ return False
+
+ # Get the remote-subvolume for the corresponding linkto xattr
+ path = ("/var/lib/glusterd/vols/{}/{}.tcp-fuse.vol"
+ .format(self.volname, self.volname))
+ vol_data = parse_vol_file(self.mnode, path)
+ if not vol_data:
+ g.log.error("Failed to parse the file %s", path)
+ return False
+
+ remote_subvol = self._get_remote_subvolume(vol_data, link_to_xattr)
+ if remote_subvol is None:
+ # In case, failed to find the remote subvol, get all the
+ # subvolumes and then check whether the file is present in
+ # any of those sunbol
+ subvolumes = vol_data[link_to_xattr]['subvolumes']
+ for subvol in subvolumes:
+ remote_subvol = self._get_remote_subvolume(vol_data,
+ subvol)
+ if remote_subvol:
+ subvol = re.search(r'[a-z0-9\-\_]*', subvol).group()
+ remote_host = (
+ vol_data[subvol]['option']['remote-host'])
+ # Verify the new file is in the remote-subvol identified
+ cmd = "[ -f {}/{} ]".format(remote_subvol, dest_file)
+ ret, _, _ = g.run(remote_host, cmd)
+ if ret == 0:
+ return True
+ g.log.error("The given link file doesn't point to any of "
+ "the subvolumes")
+ return False
+ else:
+ remote_host = vol_data[link_to_xattr]['option']['remote-host']
+ # Verify the new file is in the remote-subvol identified
+ cmd = "[ -f {}/{} ]".format(remote_subvol, dest_file)
+ ret, _, _ = g.run(remote_host, cmd)
+ if ret == 0:
+ return True
+ return False
+
+ def test_file_rename_when_source_and_dest_hash_diff_subvol(self):
+ """
+ - Destination file should exist
+ - Source file is stored on hashed sub volume(s1) and cached on
+ another subvolume(s2)
+ - Destination file should be hased to subvolume where source file is
+ stored(s2)
+ - Destination file should hased subvolume(s2) but cached same
+ subvolume(s1) where source file is hashed
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Destination link file should be removed
+ - source link file should be removed
+ """
+ # pylint: disable=protected-access
+
+ # Create soruce file and Get hashed subvol (s2)
+ source_hashed_subvol, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Rename the file such that the new name hashes to a new subvol (S1)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed, ("could'nt find new hashed for {}"
+ .format(source_file)))
+
+ # Verify the subvols are not same for source and destination files
+ self.assertNotEqual(src_count,
+ new_hashed.subvol_count,
+ "The subvols for src and dest are same.")
+
+ # Rename/Move the file
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, dest_file)
+ self.assertTrue(ret, "Failed to move files {} and {}".format(
+ source_file, dest_file))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Get a file name that stores to S1 for destination
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ src_link_subvol,
+ new_hashed.newname)
+ self.assertIsNotNone(new_hashed2,
+ "could not find new hashed for dstfile")
+
+ # Create destination file in subvol S1
+ dest_hashed_subvol, dest_count, dest_file = (
+ self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))
+
+ # Verify the subvol is S1 itself
+ self.assertEqual(new_hashed.subvol_count, dest_count,
+ "The destination file is not stored to desired "
+ "subvol :{}, instead to subvol : {}"
+ .format(new_hashed2.subvol_count, dest_count))
+
+ # Create a linkfile to dest by renaming it to hash to S2
+ dest_hashed = find_specific_hashed(self.subvols,
+ "/",
+ source_hashed_subvol)
+ # Verify the subvol is S2
+ self.assertEqual(dest_hashed.subvol_count, src_count,
+ "The destination file is not stored to desired "
+ "subvol :{}, instead to subvol : {}"
+ .format(dest_hashed.subvol_count, src_count))
+
+ # Rename the source file to the new file name
+ dest_file_2 = "{}/{}".format(self.mount_point,
+ str(dest_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, dest_file, dest_file_2)
+ self.assertTrue(ret, "Failed to move files {} and {}".format(
+ source_file, dest_file_2))
+
+ # Verify the Dest link file is stored on sub volume(s2)
+ ret = self._verify_link_file_exists(source_hashed_subvol,
+ str(dest_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(source_hashed_subvol._fqpath,
+ str(dest_hashed.newname))))
+
+ # Rename source to destination
+ src = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ dest_file = "{}/{}".format(self.mount_point,
+ str(dest_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, src, dest_file)
+ self.assertTrue(ret, "Failed to move files {} and {}"
+ .format(src, dest_file))
+
+ # Verify destination file is removed
+ ret = self._verify_file_exists(dest_hashed_subvol,
+ str(new_hashed2.newname))
+ self.assertFalse(ret, ("Destination file : {} is not removed in subvol"
+ " : {}".format(str(new_hashed2.newname),
+ dest_hashed_subvol._fqpath)))
+ g.log.info("The destination file is removed as expected")
+
+ # Verify the source link is removed
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, ("The hashed subvol {} still have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ g.log.info("The source link file is removed as expected")
+
+ def test_file_rename_when_source_and_dest_hash_same_subvol(self):
+ """
+ - Destination file should exist
+ - Source file is hashed sub volume(s1) and cached on another
+ subvolume(s2)
+ - Destination file should be hased to same subvolume(s1) where
+ source file is hased
+ - Destination hashed on subvolume(s1) but should be cached on
+ subvolume(s2) where source file is stored
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Destination link file should be there on hashed subvolume and
+ should link to new destination file
+ - source link file should be removed
+ """
+ # pylint: disable=protected-access
+ # pylint: disable=too-many-locals
+
+ # Create soruce file and Get hashed subvol (s2)
+ source_hashed_subvol, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Rename the file such that the new name hashes to a new subvol (S1)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed, ("could'nt find new hashed for {}"
+ .format(source_file)))
+
+ # Verify the subvols are not same for source and destination files
+ self.assertNotEqual(src_count,
+ new_hashed.subvol_count,
+ "The subvols for src and dest are same.")
+
+ # Rename/Move the file
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, source_file, dest_file)
+ self.assertTrue(ret, "Failed to move files {} and {}".format(
+ source_file, dest_file))
+
+ # Verify the Source link file is stored on hashed sub volume(s1)
+ src_link_subvol = new_hashed.hashedbrickobject
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ # Get a file name that stores to S2 for destination
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ source_hashed_subvol)
+ self.assertIsNotNone(new_hashed2,
+ "could not find new hashed for dstfile")
+
+ # Create destination file in subvol S2
+ dest_hashed_subvol, dest_count, dest_file = (
+ self._create_file_and_get_hashed_subvol(str(new_hashed2.newname)))
+
+ # Verify the subvol is S2 itself
+ self.assertEqual(dest_count, src_count,
+ "The destination file is not stored to desired "
+ "subvol :{}"
+ .format(dest_count))
+
+ # Create a linkfile to dest by renaming it to hash to S1
+ dest_hashed = find_specific_hashed(self.subvols,
+ "/",
+ src_link_subvol,
+ new_hashed.newname)
+ # Verify the subvol is S1
+ self.assertEqual(dest_hashed.subvol_count, new_hashed.subvol_count,
+ "The destination file is not stored to desired "
+ "subvol :{}, instead to subvol : {}"
+ .format(dest_hashed.subvol_count, new_hashed))
+
+ # Rename the dest file to the new file name
+ dest_file_2 = "{}/{}".format(self.mount_point,
+ str(dest_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, dest_file, dest_file_2)
+ self.assertTrue(ret, "Failed to move files {} and {}".format(
+ source_file, dest_file_2))
+
+ # Rename source to destination
+ src = "{}/{}".format(self.mount_point, str(new_hashed.newname))
+ dest_file = "{}/{}".format(self.mount_point,
+ str(dest_hashed.newname))
+ ret = move_file(self.mounts[0].client_system, src, dest_file)
+ self.assertTrue(ret, "Failed to move files {} and {}"
+ .format(src, dest_file))
+
+ # Verify destination file is removed
+ ret = self._verify_file_exists(dest_hashed_subvol,
+ str(new_hashed2.newname))
+ self.assertFalse(ret, ("Destination file : {} is not removed in subvol"
+ " : {}".format(str(new_hashed2.newname),
+ dest_hashed_subvol._fqpath)))
+ g.log.info("The destination file is removed as expected")
+
+ # Verify the source link is removed
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(new_hashed.newname))
+ self.assertFalse(ret, ("The hashed subvol {} still have the "
+ "expected linkto file: {}"
+ .format(src_link_subvol._fqpath,
+ str(new_hashed.newname))))
+
+ g.log.info("The source link file is removed as expected")
+
+ # Verify the Destination link is on hashed subvolume
+ ret = self._verify_link_file_exists(src_link_subvol,
+ str(dest_hashed.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(dest_hashed_subvol._fqpath,
+ str(dest_hashed.newname))))
+
+ # Verify the dest link file points to new destination file
+ file_path = src_link_subvol._fqpath + str(dest_hashed.newname)
+ ret = (self._verify_file_links_to_specified_destination(
+ src_link_subvol._host, file_path, str(dest_hashed.newname)))
+ self.assertTrue(ret, "The dest link file not pointing towards "
+ "the desired file")
+ g.log.info("The Destination link file is pointing to new file"
+ " as expected")
+
+ def test_file_rename_when_dest_hash_to_src_subvol(self):
+ """
+ - Destination file should exist
+ - Source file is stored on hashed subvolume it self
+ - Destination file should be hased to same subvolume(s1)
+ where source file is
+ - Destination file hased subvolume(s1) but cached onsubvolume(s2)
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Destination link file should be removed
+ """
+ # pylint: disable=protected-access
+
+ # Create soruce file and Get hashed subvol (s1)
+ source_hashed_subvol, src_count, source_file = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Find a file name that hashes to another subvol (s2)
+ new_hashed = find_new_hashed(self.subvols, "/", "test_source_file")
+ self.assertIsNotNone(new_hashed, ("could'nt find new hashed for {}"
+ .format(source_file)))
+
+ # Create destination file in subvol S2
+ _, dest_count, dest_file = (
+ self._create_file_and_get_hashed_subvol(str(new_hashed.newname)))
+
+ # Rename dest file such that it hashes to S1
+ new_hashed2 = find_specific_hashed(self.subvols,
+ "/",
+ source_hashed_subvol)
+ self.assertIsNotNone(new_hashed2,
+ "could not find new hashed for dstfile")
+
+ # Verify the subvol is S1 itself
+ self.assertEqual(new_hashed2.subvol_count, src_count,
+ "The destination file is not stored to desired "
+ "subvol :{}".format(dest_count))
+
+ # Rename/Move the file
+ dest_file2 = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
+ ret = move_file(self.mounts[0].client_system, dest_file, dest_file2)
+ self.assertTrue(ret, "Failed to move files {} and {}"
+ .format(source_file, dest_file))
+
+ # Verify the Dest link file is stored on hashed sub volume(s1)
+ dest_link_subvol = new_hashed2.hashedbrickobject
+ ret = self._verify_link_file_exists(dest_link_subvol,
+ str(new_hashed2.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(dest_link_subvol._fqpath,
+ str(new_hashed2.newname))))
+
+ # Rename Source to Dest
+ src = "{}/{}".format(self.mount_point, "test_source_file")
+ dest_file = "{}/{}".format(self.mount_point, str(new_hashed2.newname))
+ ret = move_file(self.mounts[0].client_system, src, dest_file)
+ self.assertTrue(ret, "Failed to move files {} and {}"
+ .format(src, dest_file))
+
+ # Verify destination file is removed
+ ret = self._verify_file_exists(new_hashed.hashedbrickobject,
+ str(new_hashed.newname))
+ self.assertFalse(ret, ("Destination file : {} is not removed in subvol"
+ " : {}".format(str(new_hashed.newname),
+ new_hashed.hashedbrickobject
+ ._fqpath)))
+ g.log.info("The destination file is removed as expected")
+
+ # Verify the Destination link is removed
+ ret = self._verify_link_file_exists(new_hashed2.hashedbrickobject,
+ str(new_hashed2.newname))
+ self.assertFalse(ret, ("The hashed subvol {} still have the "
+ "expected linkto file: {}"
+ .format(new_hashed2.hashedbrickobject._fqpath,
+ str(new_hashed2.newname))))
+
+ g.log.info("The Destination link file is removed as expected")
+
+ def test_file_rename_when_dest_cache_to_src_subvol(self):
+ """
+ - Destination file should exist
+ - Source file is stored on hashed subvolume it self
+ - Destination file should be hased to some other subvolume(s2)
+ - Destination file hashed on subvolume(s2) but cached on the
+ subvolume(s1) where souce file is present
+ mv <source_file> <destination_file>
+ - Destination file is removed.
+ - Source file should be renamed as destination file
+ - Destination link file should be there on hashed subvolume and
+ should link to new destination file
+ """
+ # pylint: disable=protected-access
+
+ # Create soruce file and Get hashed subvol (s1)
+ source_hashed_subvol, src_count, _ = (
+ self._create_file_and_get_hashed_subvol("test_source_file"))
+
+ # Find name for dest file to cache to S1
+ dest_subvol = find_specific_hashed(self.subvols,
+ "/",
+ source_hashed_subvol)
+ dest_name = str(dest_subvol.newname)
+
+ # Create destination file in subvol S1
+ _, dest_count, _ = self._create_file_and_get_hashed_subvol(dest_name)
+
+ # Verify its subvol (s1)
+ self.assertEqual(src_count, dest_count,
+ ("The newly created file falls under subvol {} "
+ "rather than {}".format(dest_count, src_count)))
+
+ # Rename dest file such that it hashes to some other subvol S2
+ dest_hashed_subvol = find_new_hashed(self.subvols,
+ "/",
+ dest_name)
+ self.assertIsNotNone(dest_hashed_subvol,
+ "could not find new hashed for dstfile")
+
+ # Rename/Move the file
+ dest_file = "{}/{}".format(self.mount_point,
+ dest_hashed_subvol.newname)
+ src_file = "{}/{}".format(self.mount_point, dest_name)
+ ret = move_file(self.mounts[0].client_system, src_file, dest_file)
+ self.assertTrue(ret, "Failed to move files {} and {}"
+ .format(src_file, dest_file))
+
+ # Verify the Dest link file is stored on hashed sub volume(s2)
+ dest_link_subvol = dest_hashed_subvol.hashedbrickobject
+ ret = self._verify_link_file_exists(dest_link_subvol,
+ str(dest_hashed_subvol.newname))
+ self.assertTrue(ret, ("The hashed subvol {} doesn't have the "
+ "expected linkto file: {}"
+ .format(dest_link_subvol._fqpath,
+ str(dest_hashed_subvol.newname))))
+
+ # Rename Source to Dest
+ src = "{}/{}".format(self.mount_point, "test_source_file")
+ dest_file = "{}/{}".format(self.mount_point,
+ dest_hashed_subvol.newname)
+ ret = move_file(self.mounts[0].client_system, src, dest_file)
+ self.assertTrue(ret, "Failed to move files {} and {}"
+ .format(src, dest_file))
+
+ # Verify destination file is removed
+ ret = self._verify_file_exists(dest_subvol.hashedbrickobject,
+ dest_name)
+ self.assertFalse(ret, ("Destination file : {} is not removed in subvol"
+ " : {}"
+ .format(str(dest_hashed_subvol.newname),
+ dest_link_subvol._fqpath)))
+ g.log.info("The destination file is removed as expected")
+
+ # Verify the Destination link is present
+ ret = self._verify_link_file_exists(dest_link_subvol,
+ str(dest_hashed_subvol.newname))
+ self.assertTrue(ret, ("The hashed subvol {} still have the "
+ "expected linkto file: {}"
+ .format(dest_link_subvol._fqpath,
+ str(dest_hashed_subvol.newname))))
+
+ g.log.info("The Destination link file is present as expected")
+
+ # Verify the dest link file points to new destination file
+ file_path = dest_link_subvol._fqpath + str(dest_hashed_subvol.newname)
+ ret = (self._verify_file_links_to_specified_destination(
+ dest_link_subvol._host, file_path,
+ str(dest_hashed_subvol.newname)))
+ self.assertTrue(ret, "The dest link file not pointing towards "
+ "the desired file")
+ g.log.info("The Destination link file is pointing to new file"
+ " as expected")
diff --git a/tests/functional/dht/test_invalid_memory_read_after_freed.py b/tests/functional/dht/test_invalid_memory_read_after_freed.py
new file mode 100644
index 000000000..fb4e3719c
--- /dev/null
+++ b/tests/functional/dht/test_invalid_memory_read_after_freed.py
@@ -0,0 +1,102 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.glusterdir import get_dir_contents
+from glustolibs.gluster.lib_utils import is_core_file_created
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed', 'distributed'], ['glusterfs']])
+class TestInvalidMemoryReadAfterFreed(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ # Assign a variable for the first_client
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_invalid_memory_read_after_freed(self):
+ """
+ Test case:
+ 1. Create a volume and start it.
+ 2. Mount the volume using FUSE.
+ 3. Create multiple level of dirs and files inside every dir.
+ 4. Rename files such that linkto files are created.
+ 5. From the mount point do an rm -rf * and check if all files
+ are delete or not from mount point as well as backend bricks.
+ """
+ # Fetch timestamp to check for core files
+ ret, test_timestamp, _ = g.run(self.mnode, "date +%s")
+ self.assertEqual(ret, 0, "date command failed")
+ test_timestamp = test_timestamp.strip()
+
+ # Create multiple level of dirs and files inside every dir
+ cmd = ("cd %s; for i in {1..100}; do mkdir dir$i; cd dir$i; "
+ "for i in {1..200}; do dd if=/dev/urandom of=file$i bs=1K"
+ " count=1; done; done" % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertFalse(ret, "Failed to create dirs and files")
+
+ # Rename files such that linkto files are created
+ cmd = ("cd %s; for i in {1..100}; do cd dir$i; for i in {1..200}; do "
+ "mv file$i ntfile$i; done; done" % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertFalse(ret, "Failed to rename files")
+ g.log.info("Files created and renamed successfully")
+
+ # From the mount point do an rm -rf * and check if all files
+ # are delete or not from mount point as well as backend bricks.
+ ret, _, _ = g.run(self.first_client,
+ "rm -rf {}/*".format(self.mounts[0].mountpoint))
+ self.assertFalse(ret, "rn -rf * failed on mount point")
+
+ ret = get_dir_contents(self.first_client,
+ "{}/".format(self.mounts[0].mountpoint))
+ self.assertEqual(ret, [], "Unexpected: Files and directories still "
+ "seen from mount point")
+
+ for brick in get_all_bricks(self.mnode, self.volname):
+ node, brick_path = brick.split(":")
+ ret = get_dir_contents(node, "{}/".format(brick_path))
+ self.assertEqual(ret, [], "Unexpected: Files and dirs still seen "
+ "on brick %s on node %s" % (brick_path, node))
+ g.log.info("rm -rf * on mount point successful")
+
+ # Check for core file on servers and clients
+ servers = self.servers + [self.first_client]
+ ret = is_core_file_created(servers, test_timestamp)
+ self.assertTrue(ret, "Core files found on servers used for test")
+ g.log.info("No cores found on all participating servers")
diff --git a/tests/functional/dht/test_kill_brick_with_remove_brick.py b/tests/functional/dht/test_kill_brick_with_remove_brick.py
new file mode 100644
index 000000000..0257b3d86
--- /dev/null
+++ b/tests/functional/dht/test_kill_brick_with_remove_brick.py
@@ -0,0 +1,128 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from random import choice
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_ops import remove_brick
+from glustolibs.gluster.rebalance_ops import (
+ wait_for_remove_brick_to_complete, get_remove_brick_status)
+from glustolibs.gluster.volume_libs import form_bricks_list_to_remove_brick
+from glustolibs.misc.misc_libs import upload_scripts, kill_process
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter'], ['glusterfs']])
+class TestKillBrickWithRemoveBrick(GlusterBaseClass):
+
+ @classmethod
+ def setUpClass(cls):
+ # Calling GlusterBaseClass setUpClass
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Upload io scripts for running IO on mounts
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s" %
+ cls.clients)
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Changing dist_count to 3
+ self.volume['voltype']['dist_count'] = 3
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume(self.mounts)
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_kill_brick_with_remove_brick(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Create some data on the volume.
+ 3. Start remove-brick on the volume.
+ 4. When remove-brick is in progress kill brick process of a brick
+ which is being remove.
+ 5. Remove-brick should complete without any failures.
+ """
+ # Start I/O from clients on the volume
+ counter = 1
+ for mount_obj in self.mounts:
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d --dir-depth 2 "
+ "--dir-length 10 --max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (
+ self.script_upload_path,
+ counter, mount_obj.mountpoint))
+ ret, _, _ = g.run(mount_obj.client_system, cmd)
+ self.assertFalse(ret, "Failed to create datat on volume")
+ counter += 10
+
+ # Collect arequal checksum before ops
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Start remove-brick on the volume
+ brick_list = form_bricks_list_to_remove_brick(self.mnode, self.volname)
+ self.assertIsNotNone(brick_list, "Brick list is empty")
+
+ ret, _, _ = remove_brick(self.mnode, self.volname, brick_list, 'start')
+ self.assertFalse(ret, "Failed to start remove-brick on volume")
+ g.log.info("Successfully started remove-brick on volume")
+
+ # Check rebalance is in progress
+ ret = get_remove_brick_status(self.mnode, self.volname, brick_list)
+ ret = ret['aggregate']['statusStr']
+ self.assertEqual(ret, "in progress", ("Rebalance is not in "
+ "'in progress' state, either "
+ "rebalance is in completed state"
+ " or failed to get rebalance "
+ "status"))
+
+ # kill brick process of a brick which is being removed
+ brick = choice(brick_list)
+ node, _ = brick.split(":")
+ ret = kill_process(node, process_names="glusterfsd")
+ self.assertTrue(ret, "Failed to kill brick process of brick %s"
+ % brick)
+
+ # Wait for remove-brick to complete on the volume
+ ret = wait_for_remove_brick_to_complete(self.mnode, self.volname,
+ brick_list, timeout=1200)
+ self.assertTrue(ret, "Remove-brick didn't complete")
+ g.log.info("Remove brick completed successfully")
+
+ # Check for data loss by comparing arequal before and after ops
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_nuke_happy_path.py b/tests/functional/dht/test_nuke_happy_path.py
new file mode 100644
index 000000000..e2e040e60
--- /dev/null
+++ b/tests/functional/dht/test_nuke_happy_path.py
@@ -0,0 +1,95 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.glusterdir import mkdir, get_dir_contents
+from glustolibs.gluster.glusterfile import set_fattr
+from glustolibs.gluster.brick_libs import get_all_bricks
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed', 'distributed'], ['glusterfs']])
+class TestNukeHappyPath(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ # Assign a variable for the first_client
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_nuke_happy_path(self):
+ """
+ Test case:
+ 1. Create a distributed volume, start and mount it
+ 2. Create 1000 dirs and 1000 files under a directory say 'dir1'
+ 3. Set xattr glusterfs.dht.nuke to "test" for dir1
+ 4. Validate dir-1 is not seen from mount point
+ 5. Validate if the entry is moved to '/brickpath/.glusterfs/landfill'
+ and deleted eventually.
+ """
+ # Create 1000 dirs and 1000 files under a directory say 'dir1'
+ self.dir_1_path = "{}/dir1/".format(self.mounts[0].mountpoint)
+ ret = mkdir(self.first_client, self.dir_1_path)
+ self.assertTrue(ret, "Failed to create dir1 on mount point")
+ cmd = ("cd {};for i in `seq 1 1000`;do mkdir dir$i;touch file$i;done"
+ .format(self.dir_1_path))
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertFalse(ret, "I/O failed at dir1 on mount point")
+
+ # Set xattr glusterfs.dht.nuke to "test" for dir1
+ ret = set_fattr(self.first_client, self.dir_1_path,
+ 'glusterfs.dht.nuke', 'test')
+ self.assertTrue(ret, "Failed to set xattr glusterfs.dht.nuke")
+
+ # Validate dir-1 is not seen from mount point
+ ret = get_dir_contents(self.first_client, self.mounts[0].mountpoint)
+ self.assertEqual([], ret,
+ "UNEXPECTED: Mount point has files ideally it should "
+ "be empty.")
+
+ # Validate if the entry is moved to '/brickpath/.glusterfs/landfill'
+ # and deleted eventually
+ for brick_path in get_all_bricks(self.mnode, self.volname):
+ node, path = brick_path.split(":")
+ path = "{}/.glusterfs/landfill/*/".format(path)
+ ret = get_dir_contents(node, path)
+ # In case if landfile is already cleaned before checking
+ # stop execution of the loop.
+ if ret is None:
+ g.log.info("Bricks have been already cleaned up.")
+ break
+ self.assertIsNotNone(ret,
+ "Files not present in /.glusterfs/landfill"
+ " dir")
+ g.log.info("Successully nuked dir1.")
diff --git a/tests/functional/dht/test_one_brick_full_add_brick_rebalance.py b/tests/functional/dht/test_one_brick_full_add_brick_rebalance.py
new file mode 100644
index 000000000..1ef5d1e90
--- /dev/null
+++ b/tests/functional/dht/test_one_brick_full_add_brick_rebalance.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import string
+from random import choice
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.dht_test_utils import find_hashed_subvol
+from glustolibs.gluster.lib_utils import get_usable_size_per_disk
+from glustolibs.gluster.glusterdir import get_dir_contents, mkdir
+from glustolibs.gluster.glusterfile import get_dht_linkto_xattr
+from glustolibs.gluster.rebalance_ops import (rebalance_start,
+ wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import (get_subvols, expand_volume)
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed'], ['glusterfs']])
+class TestOneBrickFullAddBrickRebalance(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Changing dist_count to 3
+ self.volume['voltype']['dist_count'] = 3
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ @staticmethod
+ def _get_random_string():
+ letters = string.ascii_lowercase
+ return ''.join(choice(letters) for _ in range(10))
+
+ def test_one_brick_full_add_brick_rebalance(self):
+ """
+ Test case:
+ 1. Create a pure distribute volume with 3 bricks.
+ 2. Start it and mount it on client.
+ 3. Fill one disk of the volume till it's full
+ 4. Add brick to volume, start rebalance and wait for it to complete.
+ 5. Check arequal checksum before and after add brick should be same.
+ 6. Check if link files are present on bricks or not.
+ """
+ # Fill few bricks till it is full
+ bricks = get_all_bricks(self.mnode, self.volname)
+
+ # Calculate the usable size and fill till it reaches
+ # min free limit
+ usable_size = get_usable_size_per_disk(bricks[0])
+ subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
+ fname = "abc"
+
+ # Create directories in hierarchy
+ dirp = "/dir1/dir2/"
+ path = "{}{}".format(self.mounts[0].mountpoint, dirp)
+ ret = mkdir(self.mounts[0].client_system, path, parents=True)
+ self.assertTrue(ret, "Failed to create dir hierarchy")
+
+ for _ in range(0, usable_size):
+
+ # Create files inside directories
+ while (subvols[find_hashed_subvol(subvols, dirp, fname)[1]][0] !=
+ subvols[0][0]):
+ fname = self._get_random_string()
+ ret, _, _ = g.run(self.mounts[0].client_system,
+ "fallocate -l 1G {}{}".format(path, fname))
+ self.assertFalse(ret, "Failed to fill disk to min free limit")
+ fname = self._get_random_string()
+ g.log.info("Disk filled up to min free limit")
+
+ # Collect arequal checksum before ops
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1800)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Check for data loss by comparing arequal before and after ops
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
+
+ # Check if linkto files exist or not as rebalance is already
+ # completed we shouldn't be seeing any linkto files
+ for brick in bricks:
+ node, path = brick.split(":")
+ path += dirp
+ list_of_files = get_dir_contents(node, path)
+ self.assertIsNotNone(list_of_files, "Unable to get files")
+ for filename in list_of_files:
+ ret = get_dht_linkto_xattr(node, "{}{}".format(path,
+ filename))
+ self.assertIsNone(ret, "Unable to fetch dht linkto xattr")
diff --git a/tests/functional/dht/test_open_file_migration.py b/tests/functional/dht/test_open_file_migration.py
new file mode 100644
index 000000000..55709cdb7
--- /dev/null
+++ b/tests/functional/dht/test_open_file_migration.py
@@ -0,0 +1,131 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.io.utils import open_file_fd
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.gluster.rebalance_ops import (rebalance_start,
+ wait_for_rebalance_to_complete,
+ get_rebalance_status)
+
+
+@runs_on([['distributed', 'replicated', 'arbiter',
+ 'dispersed'],
+ ['glusterfs']])
+class TestOpenFileMigration(GlusterBaseClass):
+ def setUp(self):
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume and Mount Volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+
+ def tearDown(self):
+ """
+ Cleanup and umount volume
+ """
+ # Calling GlusterBaseClass teardown
+ self.get_super_method(self, 'tearDown')()
+
+ # Cleanup and umount volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ def test_open_file_migration(self):
+ """
+ Description: Checks that files with open fd are migrated successfully.
+
+ Steps :
+ 1) Create a volume.
+ 2) Mount the volume using FUSE.
+ 3) Create files on volume mount.
+ 4) Open fd for the files and keep on doing read write operations on
+ these files.
+ 5) While fds are open, add bricks to the volume and trigger rebalance.
+ 6) Wait for rebalance to complete.
+ 7) Wait for write on open fd to complete.
+ 8) Check for any data loss during rebalance.
+ 9) Check if rebalance has any failures.
+ """
+ # Create files and open fd for the files on mount point
+ m_point = self.mounts[0].mountpoint
+ cmd = ('cd {}; for i in `seq 261 1261`;do touch testfile$i;'
+ 'done'.format(m_point))
+ ret, _, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "Failed to create files")
+ g.log.info("Successfully created files")
+ proc = open_file_fd(m_point, 2, self.clients[0],
+ start_range=301, end_range=400)
+
+ # Calculate file count for the mount-point
+ cmd = ("ls -lR {}/testfile* | wc -l".format(m_point))
+ ret, count_before, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "Failed to get file count")
+ g.log.info("File count before rebalance is:%s", count_before)
+
+ # Add bricks to the volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Failed to expand the volume %s",
+ self.volname))
+ g.log.info("Expanding volume is successful on "
+ "volume %s", self.volname)
+
+ # Trigger rebalance
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start rebalance")
+ g.log.info("Rebalance is started")
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=300)
+ self.assertTrue(ret, ("Rebalance failed on volume %s",
+ self.volname))
+ g.log.info("Rebalance is successful on "
+ "volume %s", self.volname)
+
+ # Close connection and check if write on open fd has completed
+ ret, _, _ = proc.async_communicate()
+ self.assertEqual(ret, 0, "Write on open fd"
+ " has not completed yet")
+ g.log.info("Write completed on open fd")
+
+ # Calculate file count for the mount-point
+ cmd = ("ls -lR {}/testfile* | wc -l".format(m_point))
+ ret, count_after, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "Failed to get file count")
+ g.log.info("File count after rebalance is:%s", count_after)
+
+ # Check if there is any data loss
+ self.assertEqual(int(count_before), int(count_after),
+ "The file count before and after"
+ " rebalance is not same."
+ " There is data loss.")
+ g.log.info("The file count before and after rebalance is same."
+ " No data loss occurred.")
+
+ # Check if rebalance has any failures
+ ret = get_rebalance_status(self.mnode, self.volname)
+ no_of_failures = ret['aggregate']['failures']
+ self.assertEqual(int(no_of_failures), 0,
+ "Failures in rebalance")
+ g.log.info("No failures in rebalance")
diff --git a/tests/functional/dht/test_pipe_character_and_block_device_files.py b/tests/functional/dht/test_pipe_character_and_block_device_files.py
new file mode 100644
index 000000000..8a3739b83
--- /dev/null
+++ b/tests/functional/dht/test_pipe_character_and_block_device_files.py
@@ -0,0 +1,328 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from socket import gethostbyname
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.glusterdir import get_dir_contents
+from glustolibs.gluster.glusterfile import (
+ get_file_stat, get_fattr, set_fattr, delete_fattr, get_pathinfo,
+ file_exists)
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter', 'distributed'],
+ ['glusterfs']])
+class TestPipeCharacterAndBlockDeviceFiles(GlusterBaseClass):
+
+ def setUp(self):
+ # calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Changing dist_count to 5
+ self.volume['voltype']['dist_count'] = 5
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume % s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _create_character_and_block_device_files(self):
+ """Create character and block device files"""
+ self.list_of_device_files, self.file_names = [], []
+ for ftype, filename in (('b', 'blockfile'), ('c', 'Characterfile')):
+
+ # Create files using mknod
+ cmd = ("cd {}; mknod {} {} 1 5".format(
+ self.mounts[0].mountpoint, filename, ftype))
+ ret, _, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(
+ ret, 0, 'Failed to create %s file' % filename)
+
+ # Add file names and file path to lists
+ self.file_names.append(filename)
+ self.list_of_device_files.append('{}/{}'.format(
+ self.mounts[0].mountpoint, filename))
+
+ # Create file type list for the I/O
+ self.filetype_list = ["block special file", "character special file"]
+
+ def _create_pipe_file(self):
+ """Create pipe files"""
+
+ # Create pipe files using mkfifo
+ cmd = "cd {}; mkfifo {}".format(self.mounts[0].mountpoint, 'fifo')
+ ret, _, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, 'Failed to create %s file' % 'fifo')
+
+ # Populate variables with fifo file details
+ self.list_of_device_files = [
+ '{}/{}'.format(self.mounts[0].mountpoint, 'fifo')]
+ self.file_names = ['fifo']
+ self.filetype_list = ['fifo']
+
+ def _set_xattr_trusted_foo(self, xattr_val):
+ """Sets xattr trusted.foo on all the files"""
+ for fname in self.list_of_device_files:
+ ret = set_fattr(self.clients[0], fname, 'trusted.foo',
+ xattr_val)
+ self.assertTrue(ret, "Unable to create custom xattr "
+ "for file {}".format(fname))
+
+ def _delete_xattr_trusted_foo(self):
+ """Removes xattr trusted.foo from all the files."""
+ for fname in self.list_of_device_files:
+ ret = delete_fattr(self.clients[0], fname, 'trusted.foo')
+ self.assertTrue(ret, "Unable to remove custom xattr for "
+ "file {}".format(fname))
+
+ def _check_custom_xattr_trusted_foo(self, xattr_val, visible=True):
+ """Check custom xttar from mount point and on bricks."""
+ # Check custom xattr from mount point
+ for fname in self.list_of_device_files:
+ ret = get_fattr(self.clients[0], fname, 'trusted.foo',
+ encode='text')
+ if visible:
+ self.assertEqual(ret, xattr_val,
+ "Custom xattr not found from mount.")
+ else:
+ self.assertIsNone(ret, "Custom attribute visible at mount "
+ "point even after deletion")
+
+ # Check custom xattr on bricks
+ for brick in get_all_bricks(self.mnode, self.volname):
+ node, brick_path = brick.split(':')
+ files_on_bricks = get_dir_contents(node, brick_path)
+ files = [
+ fname for fname in self.file_names
+ if fname in files_on_bricks]
+ for fname in files:
+ ret = get_fattr(node, "{}/{}".format(brick_path, fname),
+ 'trusted.foo', encode='text')
+ if visible:
+ self.assertEqual(ret, xattr_val,
+ "Custom xattr not visible on bricks")
+ else:
+ self.assertIsNone(ret, "Custom attribute visible on "
+ "brick even after deletion")
+
+ def _check_if_files_are_stored_only_on_expected_bricks(self):
+ """Check if files are stored only on expected bricks"""
+ for fname in self.list_of_device_files:
+ # Fetch trusted.glusterfs.pathinfo and check if file is present on
+ # brick or not
+ ret = get_pathinfo(self.clients[0], fname)
+ self.assertIsNotNone(ret, "Unable to get "
+ "trusted.glusterfs.pathinfo of file %s"
+ % fname)
+ present_brick_list = []
+ for brick_path in ret['brickdir_paths']:
+ node, path = brick_path.split(":")
+ ret = file_exists(node, path)
+ self.assertTrue(ret, "Unable to find file {} on brick {}"
+ .format(fname, path))
+ brick_text = brick_path.split('/')[:-1]
+ if brick_text[0][0:2].isdigit():
+ brick_text[0] = gethostbyname(brick_text[0][:-1]) + ":"
+ present_brick_list.append('/'.join(brick_text))
+
+ # Check on other bricks where file doesn't exist
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ other_bricks = [
+ brk for brk in brick_list if brk not in present_brick_list]
+ for brick in other_bricks:
+ node, path = brick.split(':')
+ ret = file_exists(node, "{}/{}".format(path,
+ fname.split('/')[-1]))
+ self.assertFalse(ret, "Unexpected: Able to find file {} on "
+ "brick {}".format(fname, path))
+
+ def _check_filetype_of_files_from_mountpoint(self):
+ """Check filetype of files from mountpoint"""
+ for filetype in self.filetype_list:
+ # Check if filetype is as expected
+ ret = get_file_stat(self.clients[0], self.list_of_device_files[
+ self.filetype_list.index(filetype)])
+ self.assertEqual(ret['filetype'], filetype,
+ "File type not reflecting properly for %s"
+ % filetype)
+
+ def _compare_stat_output_from_mout_point_and_bricks(self):
+ """Compare stat output from mountpoint and bricks"""
+ for fname in self.list_of_device_files:
+ # Fetch stat output from mount point
+ mountpoint_stat = get_file_stat(self.clients[0], fname)
+ bricks = get_pathinfo(self.clients[0], fname)
+
+ # Fetch stat output from bricks
+ for brick_path in bricks['brickdir_paths']:
+ node, path = brick_path.split(":")
+ brick_stat = get_file_stat(node, path)
+ for key in ("filetype", "access", "size", "username",
+ "groupname", "uid", "gid", "epoch_atime",
+ "epoch_mtime", "epoch_ctime"):
+ self.assertEqual(mountpoint_stat[key], brick_stat[key],
+ "Difference observed between stat output "
+ "of mountpoint and bricks for file %s"
+ % fname)
+
+ def test_character_and_block_device_file_creation(self):
+ """
+ Test case:
+ 1. Create distributed volume with 5 sub-volumes, start amd mount it.
+ 2. Create character and block device files.
+ 3. Check filetype of files from mount point.
+ 4. Verify that the files are stored on only the bricks which is
+ mentioned in trusted.glusterfs.pathinfo xattr.
+ 5. Verify stat output from mount point and bricks.
+ """
+ # Create Character and block device files
+ self._create_character_and_block_device_files()
+
+ # Check filetype of files from mount point
+ self._check_filetype_of_files_from_mountpoint()
+
+ # Verify that the files are stored on only the bricks which is
+ # mentioned in trusted.glusterfs.pathinfo xattr
+ self._check_if_files_are_stored_only_on_expected_bricks()
+
+ # Verify stat output from mount point and bricks
+ self._compare_stat_output_from_mout_point_and_bricks()
+
+ def test_character_and_block_device_file_removal_using_rm(self):
+ """
+ Test case:
+ 1. Create distributed volume with 5 sub-volumes, start and mount it.
+ 2. Create character and block device files.
+ 3. Check filetype of files from mount point.
+ 4. Verify that the files are stored on only one bricks which is
+ mentioned in trusted.glusterfs.pathinfo xattr.
+ 5. Delete the files.
+ 6. Verify if the files are delete from all the bricks
+ """
+ # Create Character and block device files
+ self._create_character_and_block_device_files()
+
+ # Check filetype of files from mount point
+ self._check_filetype_of_files_from_mountpoint()
+
+ # Verify that the files are stored on only the bricks which is
+ # mentioned in trusted.glusterfs.pathinfo xattr
+ self._check_if_files_are_stored_only_on_expected_bricks()
+
+ # Delete both the character and block device files
+ for fname in self.list_of_device_files:
+ ret, _, _ = g.run(self.clients[0], 'rm -rf {}'.format(fname))
+ self.assertEqual(
+ ret, 0, 'Failed to remove {} file'.format(fname))
+
+ # Verify if the files are deleted from all bricks or not
+ for brick in get_all_bricks(self.mnode, self.volname):
+ node, path = brick.split(':')
+ for fname in self.file_names:
+ ret = file_exists(node, "{}/{}".format(path, fname))
+ self.assertFalse(ret, "Unexpected: Able to find file {} on "
+ " brick {} even after deleting".format(fname,
+ path))
+
+ def test_character_and_block_device_file_with_custom_xattrs(self):
+ """
+ Test case:
+ 1. Create distributed volume with 5 sub-volumes, start and mount it.
+ 2. Create character and block device files.
+ 3. Check filetype of files from mount point.
+ 4. Set a custom xattr for files.
+ 5. Verify that xattr for files is displayed on mount point and bricks.
+ 6. Modify custom xattr value and verify that xattr for files
+ is displayed on mount point and bricks.
+ 7. Remove the xattr and verify that custom xattr is not displayed.
+ 8. Verify that mount point and brick shows pathinfo xattr properly.
+ """
+ # Create Character and block device files
+ self._create_character_and_block_device_files()
+
+ # Check filetype of files from mount point
+ self._check_filetype_of_files_from_mountpoint()
+
+ # Set a custom xattr for files
+ self._set_xattr_trusted_foo("bar1")
+
+ # Verify that xattr for files is displayed on mount point and bricks
+ self._check_custom_xattr_trusted_foo("bar1")
+
+ # Modify custom xattr value
+ self._set_xattr_trusted_foo("bar2")
+
+ # Verify that xattr for files is displayed on mount point and bricks
+ self._check_custom_xattr_trusted_foo("bar2")
+
+ # Remove the xattr
+ self._delete_xattr_trusted_foo()
+
+ # Verify that custom xattr is not displayed
+ self._check_custom_xattr_trusted_foo("bar2", visible=False)
+
+ # Verify that mount point shows pathinfo xattr properly
+ self._check_if_files_are_stored_only_on_expected_bricks()
+
+ def test_pipe_file_create(self):
+ """
+ Test case:
+ 1. Create distributed volume with 5 sub-volumes, start and mount it.
+ 2. Create a pipe file.
+ 3. Check filetype of files from mount point.
+ 4. Verify that the files are stored on only the bricks which is
+ mentioned in trusted.glusterfs.pathinfo xattr.
+ 5. Verify stat output from mount point and bricks.
+ 6. Write data to fifo file and read data from fifo file
+ from the other instance of the same client.
+ """
+ # Create a pipe file
+ self._create_pipe_file()
+
+ # Check filetype of files from mount point
+ self._check_filetype_of_files_from_mountpoint()
+
+ # Verify that the files are stored on only the bricks which is
+ # mentioned in trusted.glusterfs.pathinfo xattr
+ self._check_if_files_are_stored_only_on_expected_bricks()
+
+ # Verify stat output from mount point and bricks
+ self._compare_stat_output_from_mout_point_and_bricks()
+
+ # Write data to fifo file and read data from fifo file
+ # from the other instance of the same client.
+ g.run_async(self.clients[0], "echo 'Hello' > {} ".format(
+ self.list_of_device_files[0]))
+ ret, out, _ = g.run(
+ self.clients[0], "cat < {}".format(self.list_of_device_files[0]))
+ self.assertEqual(
+ ret, 0, "Unable to fetch datat on other terimnal")
+ self.assertEqual(
+ "Hello", out.split('\n')[0],
+ "Hello not recieved on the second terimnal")
diff --git a/tests/functional/dht/test_readdirp_with_rebalance.py b/tests/functional/dht/test_readdirp_with_rebalance.py
new file mode 100644
index 000000000..6845e0fe3
--- /dev/null
+++ b/tests/functional/dht/test_readdirp_with_rebalance.py
@@ -0,0 +1,173 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.rebalance_ops import (set_rebalance_throttle,
+ rebalance_start,
+ get_rebalance_status)
+from glustolibs.gluster.volume_libs import form_bricks_list_to_add_brick
+from glustolibs.gluster.brick_ops import add_brick
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.io.utils import validate_io_procs
+
+
+@runs_on([['distributed', 'replicated', 'dispersed',
+ 'arbiter', 'distributed-dispersed',
+ 'distributed-replicated', 'distributed-arbiter'],
+ ['glusterfs']])
+class TestReaddirpWithRebalance(GlusterBaseClass):
+ def setUp(self):
+ """
+ Setup and mount volume or raise ExecutionError
+ """
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+ self.all_mounts_procs, self.io_validation_complete = [], False
+
+ # Setup Volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Setup and Mount Volume")
+
+ # Upload io scripts for running IO on mounts
+ self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(self.clients[0], self.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients")
+
+ # Form brick list for expanding volume
+ self.add_brick_list = form_bricks_list_to_add_brick(
+ self.mnode, self.volname, self.servers, self.all_servers_info,
+ distribute_count=3)
+ if not self.add_brick_list:
+ raise ExecutionError("Volume %s: Failed to form bricks list for"
+ " expand" % self.volname)
+ g.log.info("Volume %s: Formed bricks list for expand", self.volname)
+
+ def tearDown(self):
+ # Unmount and cleanup original volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_readdirp_with_rebalance(self):
+ """
+ Description: Tests to check that all directories are read
+ and listed while rebalance is still in progress.
+
+ Steps :
+ 1) Create a volume.
+ 2) Mount the volume using FUSE.
+ 3) Create a dir "master" on mount-point.
+ 4) Create 8000 empty dirs (dir1 to dir8000) inside dir "master".
+ 5) Now inside a few dirs (e.g. dir1 to dir10), create deep dirs
+ and inside every dir, create 50 files.
+ 6) Collect the number of dirs present on /mnt/<volname>/master
+ 7) Change the rebalance throttle to lazy.
+ 8) Add-brick to the volume (at least 3 replica sets.)
+ 9) Start rebalance using "force" option on the volume.
+ 10) List the directories on dir "master".
+ """
+ # pylint: disable=too-many-statements
+ # Start IO on mounts
+ m_point = self.mounts[0].mountpoint
+ ret = mkdir(self.mounts[0].client_system,
+ "{}/master".format(m_point))
+ self.assertTrue(ret, "mkdir of dir master failed")
+
+ # Create 8000 empty dirs
+ cmd = ("ulimit -n 64000; /usr/bin/env python {} create_deep_dir"
+ " --dir-length 8000 --dir-depth 0"
+ " {}/master/".format(self.script_upload_path, m_point))
+ proc = g.run_async(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+ g.log.info("IO on %s:%s is started successfully",
+ self.mounts[0].client_system, m_point)
+
+ # Validate 8000 empty dirs are created successfully
+ ret = validate_io_procs(self.all_mounts_procs, self.mounts[0])
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+
+ # Create deep dirs and files
+ self.all_mounts_procs = []
+ cmd = ("/usr/bin/env python {} create_deep_dirs_with_files"
+ " --dir-length 10 --dir-depth 1 --max-num-of-dirs 50 "
+ " --num-of-files 50 --file-type empty-file"
+ " {}/master/".format(self.script_upload_path, m_point))
+ proc = g.run_async(self.mounts[0].client_system, cmd,
+ user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+ g.log.info("IO on %s:%s is started successfully",
+ self.mounts[0].client_system, m_point)
+
+ # Validate deep dirs and files are created successfully
+ ret = validate_io_procs(self.all_mounts_procs, self.mounts[0])
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+
+ # Check the dir count before rebalance
+ cmd = ('cd {}/master; ls -l | wc -l'.format(m_point))
+ ret, dir_count_before, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "Failed to "
+ "get directory count")
+ g.log.info("Dir count before %s", dir_count_before)
+
+ # Change the rebalance throttle to lazy
+ ret, _, _ = set_rebalance_throttle(self.mnode, self.volname,
+ throttle_type='lazy')
+ self.assertEqual(ret, 0, "Failed to set rebal-throttle to lazy")
+ g.log.info("Rebal-throttle set to 'lazy' successfully")
+
+ # Add-bricks to the volume
+ ret, _, _ = add_brick(self.mnode, self.volname, self.add_brick_list)
+ self.assertEqual(ret, 0, "Failed to add-brick to the volume")
+ g.log.info("Added bricks to the volume successfully")
+
+ # Start rebalance using force
+ ret, _, _ = rebalance_start(self.mnode, self.volname, force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance")
+ g.log.info("Rebalance started successfully")
+
+ # Check if rebalance is in progress
+ rebalance_status = get_rebalance_status(self.mnode, self.volname)
+ status = rebalance_status['aggregate']['statusStr']
+ self.assertEqual(status, "in progress",
+ ("Rebalance is not in 'in progress' state,"
+ " either rebalance is in compeleted state"
+ " or failed to get rebalance status"))
+
+ # Check the dir count after rebalance
+ cmd = ('cd {}/master; ls -l | wc -l'.format(m_point))
+ ret, dir_count_after, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "Failed to do lookup and"
+ " get directory count")
+ g.log.info("Dir count after %s", dir_count_after)
+
+ # Check if there is any data loss
+ self.assertEqual(set(dir_count_before), set(dir_count_after),
+ ("There is data loss"))
+ g.log.info("The checksum before and after rebalance is same."
+ " There is no data loss.")
diff --git a/tests/functional/dht/test_rebalance_add_brick_and_lookup.py b/tests/functional/dht/test_rebalance_add_brick_and_lookup.py
new file mode 100644
index 000000000..b02fe5eea
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_add_brick_and_lookup.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-131 USA.
+
+"""
+Description:
+ Rebalance with add brick and log time taken for lookup
+"""
+
+from time import time
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.rebalance_ops import (rebalance_start,
+ wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+
+
+@runs_on([['distributed', 'distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed'], ['glusterfs']])
+class TestRebalanceWithAddBrickAndLookup(GlusterBaseClass):
+ """ Rebalance with add brick and do lookup """
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+ self.all_mounts_procs = []
+
+ # Setup and mount the volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup Volume and Mount it")
+
+ def test_rebalance_with_add_brick_and_lookup(self):
+ """
+ Rebalance with add brick and then lookup on mount
+ - Create a Distributed-Replicated volume.
+ - Create deep dirs(200) and 100 files on the deepest directory.
+ - Expand volume.
+ - Initiate rebalance
+ - Once rebalance is completed, do a lookup on mount and time it.
+ """
+ # Create Deep dirs.
+ cmd = (
+ "cd %s/; for i in {1..200};do mkdir dir${i}; cd dir${i};"
+ " if [ ${i} -eq 100 ]; then for j in {1..100}; do touch file${j};"
+ " done; fi; done;" % (self.mounts[0].mountpoint))
+ ret, _, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "Failed to create the deep dirs and files")
+ g.log.info("Deep dirs and files created.")
+
+ # Expand the volume.
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
+ g.log.info("Expanding volume is successful on "
+ "volume %s", self.volname)
+
+ # Start Rebalance.
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
+ "%s", self.volname))
+ g.log.info("Successfully started rebalance on the volume %s",
+ self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=500)
+ self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance is successfully complete on the volume %s",
+ self.volname)
+
+ # Do a lookup on the mountpoint and note the time taken to run.
+ # The time used for comparison is taken as a benchmark on using a
+ # RHGS 3.5.2 for this TC. For 3.5.2, the time takes came out to be
+ # 4 seconds. Now the condition for subtest to pass is for the lookup
+ # should not be more than 10% of this value, i.e. 4.4 seconds.
+ cmd = ("ls -R %s/" % (self.mounts[0].mountpoint))
+ start_time = time()
+ ret, _, _ = g.run(self.clients[0], cmd)
+ end_time = time()
+ self.assertEqual(ret, 0, "Failed to do a lookup")
+ time_taken = end_time - start_time
+ self.assertTrue(time_taken <= 4.4, "Lookup takes more time "
+ "than the previously benchmarked value.")
+ g.log.info("Lookup took : %d seconds", time_taken)
+
+ def tearDown(self):
+ """tear Down callback"""
+ # Unmount Volume and cleanup.
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Filed to Unmount Volume and "
+ "Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and cleanup.")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/dht/test_rebalance_dir_file_from_multiple_clients.py b/tests/functional/dht/test_rebalance_dir_file_from_multiple_clients.py
index dd80479bb..add72aec5 100644
--- a/tests/functional/dht/test_rebalance_dir_file_from_multiple_clients.py
+++ b/tests/functional/dht/test_rebalance_dir_file_from_multiple_clients.py
@@ -201,7 +201,7 @@ class RebalanceValidation(GlusterBaseClass):
# Wait for rebalance to complete
g.log.info("Waiting for rebalance to complete")
ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
- timeout=600)
+ timeout=1800)
self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
"%s", self.volname))
g.log.info("Rebalance status on volume %s: Complete",
diff --git a/tests/functional/dht/test_rebalance_files_with_holes.py b/tests/functional/dht/test_rebalance_files_with_holes.py
new file mode 100644
index 000000000..ba01eadcb
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_files_with_holes.py
@@ -0,0 +1,128 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume, shrink_volume
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed', 'distributed', 'replicated',
+ 'arbiter', 'dispersed'], ['glusterfs']])
+class TestAddBrickRebalanceFilesWithHoles(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_add_brick_rebalance_files_with_holes(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it using fuse.
+ 2. On the volume root, create files with holes.
+ 3. After the file creation is complete, add bricks to the volume.
+ 4. Trigger rebalance on the volume.
+ 5. Wait for rebalance to complete.
+ """
+ # On the volume root, create files with holes
+ cmd = ("cd %s;for i in {1..5000}; do dd if=/dev/urandom"
+ " of=file_with_holes$i bs=1M count=1 seek=100M; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertFalse(ret, "Failed to create files with holes")
+
+ # After the file creation is complete, add bricks to the volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance on the volume
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=9000)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed', 'distributed'], ['glusterfs']])
+class TestRemoveBrickRebalanceFilesWithHoles(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_remove_brick_rebalance_files_with_holes(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it using fuse.
+ 2. On the volume root, create files with holes.
+ 3. After the file creation is complete, remove-brick from volume.
+ 4. Wait for remove-brick to complete.
+ """
+ # On the volume root, create files with holes
+ cmd = ("cd %s;for i in {1..2000}; do dd if=/dev/urandom"
+ " of=file_with_holes$i bs=1M count=1 seek=100M; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertFalse(ret, "Failed to create files with holes")
+
+ # After the file creation is complete, remove-brick from volume
+ # Wait for remove-brick to complete
+ ret = shrink_volume(self.mnode, self.volname, rebalance_timeout=16000)
+ self.assertTrue(ret, "Failed to remove-brick from volume")
+ g.log.info("Remove-brick rebalance successful")
diff --git a/tests/functional/dht/test_rebalance_multiple_expansions.py b/tests/functional/dht/test_rebalance_multiple_expansions.py
new file mode 100644
index 000000000..e96d88d56
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_multiple_expansions.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2021 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed', 'distributed-replicated'],
+ ['glusterfs']])
+class TestRebalanceMultipleExpansions(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmount and clean volume
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_multiple_expansions(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it
+ 2. Create some file on mountpoint
+ 3. Collect arequal checksum on mount point pre-rebalance
+ 4. Do the following 3 times:
+ 5. Expand the volume
+ 6. Start rebalance and wait for it to finish
+ 7. Collect arequal checksum on mount point post-rebalance
+ and compare with value from step 3
+ """
+
+ # Create some file on mountpoint
+ cmd = ("cd %s; for i in {1..500} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.volname)
+
+ # Collect arequal checksum before rebalance
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ for _ in range(3):
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on "
+ "volume %s" % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Collect arequal checksum after rebalance
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+
+ # Check for data loss by comparing arequal before and after
+ # rebalance
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_rebalance_multiple_shrinks.py b/tests/functional/dht/test_rebalance_multiple_shrinks.py
new file mode 100644
index 000000000..a95cdf141
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_multiple_shrinks.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2021 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.volume_libs import shrink_volume
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed'], ['glusterfs']])
+class TestRebalanceMultipleShrinks(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Changing dist_count to 6
+ self.volume['voltype']['dist_count'] = 6
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmount and clean volume
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_multiple_shrinks(self):
+ """
+ Test case:
+ 1. Modify the distribution count of a volume
+ 2. Create a volume, start it and mount it
+ 3. Create some file on mountpoint
+ 4. Collect arequal checksum on mount point pre-rebalance
+ 5. Do the following 3 times:
+ 6. Shrink the volume
+ 7. Collect arequal checksum on mount point post-rebalance
+ and compare with value from step 4
+ """
+
+ # Create some file on mountpoint
+ cmd = ("cd %s; for i in {1..500} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.volname)
+
+ # Collect arequal checksum before rebalance
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ for _ in range(3):
+ # Shrink volume
+ ret = shrink_volume(self.mnode, self.volname,
+ rebalance_timeout=16000)
+ self.assertTrue(ret, "Failed to remove-brick from volume")
+ g.log.info("Remove-brick rebalance successful")
+
+ # Collect arequal checksum after rebalance
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+
+ # Check for data loss by comparing arequal before and after
+ # rebalance
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_rebalance_nested_dir.py b/tests/functional/dht/test_rebalance_nested_dir.py
new file mode 100644
index 000000000..77f099ad3
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_nested_dir.py
@@ -0,0 +1,99 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed', 'distributed-replicated'],
+ ['glusterfs']])
+class TestRebalanceNestedDir(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmount and clean volume
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_nested_dir(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it
+ 2. On mount point, create a large nested dir structure with
+ files in the inner-most dir
+ 3. Collect arequal checksum on mount point pre-rebalance
+ 4. Expand the volume
+ 5. Start rebalance and wait for it to finish
+ 6. Collect arequal checksum on mount point post-rebalance
+ and compare wth value from step 3
+ """
+
+ # create a large nested dir structure with files in the inner-most dir
+ cmd = ("cd %s; for i in {1..100} ; do mkdir $i; cd $i; done;"
+ "for j in {1..100} ; do "
+ "dd if=/dev/urandom of=file$j bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.volname)
+
+ # Collect arequal checksum before rebalance
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Collect arequal checksum after rebalance
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+
+ # Check for data loss by comparing arequal before and after rebalance
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_rebalance_peer_probe.py b/tests/functional/dht/test_rebalance_peer_probe.py
new file mode 100644
index 000000000..7ffc9ca63
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_peer_probe.py
@@ -0,0 +1,130 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from time import sleep
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal
+from glustolibs.gluster.peer_ops import (peer_probe_servers, peer_detach)
+
+
+@runs_on([['distributed'], ['glusterfs']])
+class TestRebalancePeerProbe(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.first_client = self.mounts[0].client_system
+ self.is_peer_detached = False
+
+ def tearDown(self):
+
+ # Unmount and clean volume
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Probe detached node in case it's still detached
+ if self.is_peer_detached:
+ if not peer_probe_servers(self.mnode, self.servers[5]):
+ raise ExecutionError("Failed to probe detached "
+ "servers %s" % self.servers)
+ g.log.info("Peer probe success for detached "
+ "servers %s", self.servers)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_peer_probe(self):
+ """
+ Test case:
+ 1. Detach a peer
+ 2. Create a volume, start it and mount it
+ 3. Start creating a few files on mount point
+ 4. Collect arequal checksum on mount point pre-rebalance
+ 5. Expand the volume
+ 6. Start rebalance
+ 7. While rebalance is going, probe a peer and check if
+ the peer was probed successfully
+ 7. Collect arequal checksum on mount point post-rebalance
+ and compare wth value from step 4
+ """
+
+ # Detach a peer
+ ret, _, _ = peer_detach(self.mnode, self.servers[5])
+ self.assertEqual(ret, 0, "Failed to detach peer %s"
+ % self.servers[5])
+
+ self.is_peer_detached = True
+
+ # Start I/O from mount point and wait for it to complete
+ cmd = ("cd %s; for i in {1..1000} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.volname)
+
+ # Collect arequal checksum before rebalance
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Let rebalance run for a while
+ sleep(5)
+
+ # Add new node to the cluster
+ ret = peer_probe_servers(self.mnode, self.servers[5])
+ self.assertTrue(ret, "Failed to peer probe server : %s"
+ % self.servers[5])
+ g.log.info("Peer probe success for %s and all peers are in "
+ "connected state", self.servers[5])
+
+ self.is_peer_detached = False
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Collect arequal checksum after rebalance
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+
+ # Check for data loss by comparing arequal before and after rebalance
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_rebalance_preserve_user_permissions.py b/tests/functional/dht/test_rebalance_preserve_user_permissions.py
new file mode 100644
index 000000000..59327f329
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_preserve_user_permissions.py
@@ -0,0 +1,194 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-131 USA.
+
+"""
+Description:
+ Rebalance: permissions check as non root user
+"""
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start,
+ wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import (
+ expand_volume,
+ log_volume_info_and_status)
+from glustolibs.io.utils import (collect_mounts_arequal)
+from glustolibs.gluster.lib_utils import (add_user, del_user)
+from glustolibs.gluster.glusterfile import (
+ get_file_stat,
+ set_file_permissions)
+
+
+@runs_on([['distributed', 'distributed-replicated'],
+ ['glusterfs']])
+class TestRebalancePreserveUserPermissions(GlusterBaseClass):
+ def setUp(self):
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+
+ self.user = "glusto_user"
+ self.client = self.mounts[0].client_system
+ self.mountpoint = self.mounts[0].mountpoint
+ # Add new user on the client node
+ ret = add_user(self.client, self.user)
+ if not ret:
+ raise ExecutionError("Failed to add user")
+
+ def tearDown(self):
+ ret = del_user(self.client, self.user)
+ if not ret:
+ raise ExecutionError("Failed to delete user")
+ # Unmount Volume and cleanup.
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Unmount Volume and "
+ "Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and cleanup.")
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _start_rebalance_and_wait(self):
+ """Start rebalance and wait"""
+ # Start Rebalance
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
+ "%s", self.volname))
+ g.log.info("Successfully started rebalance on the volume %s",
+ self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance is successfully complete on the volume %s",
+ self.volname)
+
+ def _get_arequal_and_check_if_equal_to_before(self):
+ """Check if arequal checksum is equal or not"""
+ self.arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self.assertEqual(
+ self.arequal_checksum_before, self.arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
+
+ def _logged_vol_info(self):
+ """Log volume info and status"""
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+
+ def _check_user_permission(self):
+ """
+ Verify permissions on MP and file
+ """
+ stat_mp_dict = get_file_stat(self.client, self.mountpoint)
+ self.assertIsNotNone(stat_mp_dict, "stat on %s failed"
+ % self.mountpoint)
+ self.assertEqual(stat_mp_dict['access'], '777',
+ "Expected 777 "
+ "but found %s" % stat_mp_dict['access'])
+ g.log.info("File permissions for mountpoint is 777 as expected")
+
+ # check owner and group of random file
+ fpath = self.mountpoint + "/d1/f.1"
+ stat_dict = get_file_stat(self.client, fpath)
+ self.assertIsNotNone(stat_dict, "stat on %s failed" % fpath)
+ self.assertEqual(stat_dict['username'], self.user,
+ "Expected %s but found %s"
+ % (self.user, stat_dict['username']))
+ self.assertEqual(stat_dict['groupname'], self.user,
+ "Expected %s but found %s"
+ % (self.user, stat_dict['groupname']))
+ g.log.info("User and Group are %s as expected", self.user)
+
+ def _testcase(self, number_of_expands=1):
+ """
+ Test case:
+ 1. Create a volume start it and mount on the client.
+ 2. Set full permission on the mount point.
+ 3. Add new user to the client.
+ 4. As the new user create dirs/files.
+ 5. Compute arequal checksum and check permission on / and subdir.
+ 6. expand cluster according to number_of_expands and start rebalance.
+ 7. After rebalance is completed:
+ 7.1 check arequal checksum
+ 7.2 verfiy no change in / and sub dir permissions.
+ 7.3 As the new user create and delete file/dir.
+ """
+ # Set full permissions on the mount point.
+ ret = set_file_permissions(self.clients[0], self.mountpoint, "-R 777")
+ self.assertTrue(ret, "Failed to set permissions on the mount point")
+ g.log.info("Set full permissions on the mount point")
+
+ # Create dirs/files as self.test_user
+ cmd = (r'su -l %s -c "cd %s;'
+ r'for i in {0..9}; do mkdir d\$i; done;'
+ r'for i in {0..99}; do let x=\$i%%10;'
+ r'dd if=/dev/urandom of=d\$x/f.\$i bs=1024 count=1; done"'
+ % (self.user, self.mountpoint))
+ ret, _, _ = g.run(self.client, cmd)
+ self.assertEqual(ret, 0, ("Failed to create files as %s", self.user))
+ g.log.info("IO as %s is successful", self.user)
+
+ # check permission on / and subdir
+ self._check_user_permission()
+
+ # get arequal checksum before expand
+ self.arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ self._logged_vol_info()
+
+ # expand the volume
+ for i in range(number_of_expands):
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Failed to expand iter %d volume %s",
+ i, self.volname))
+
+ self._logged_vol_info()
+ # Start Rebalance and wait for completion
+ self._start_rebalance_and_wait()
+
+ # compare arequals checksum before and after rebalance
+ self._get_arequal_and_check_if_equal_to_before()
+
+ # permissions check on / and sub dir
+ self._check_user_permission()
+
+ # Create/Delete file as self.test_user
+ cmd = ('su -l %s -c '
+ '"cd %s; touch file.test;'
+ 'find . -mindepth 1 -maxdepth 1 -type d | xargs rm -rf"'
+ % (self.user, self.mountpoint))
+ ret, _, _ = g.run(self.client, cmd)
+
+ self.assertEqual(ret, 0, ("User %s failed to create files", self.user))
+ g.log.info("IO as %s is successful", self.user)
+
+ def test_rebalance_preserve_user_permissions(self):
+ self._testcase()
+
+ def test_rebalance_preserve_user_permissions_multi_expands(self):
+ self._testcase(2)
diff --git a/tests/functional/dht/test_rebalance_remove_brick_with_quota.py b/tests/functional/dht/test_rebalance_remove_brick_with_quota.py
new file mode 100644
index 000000000..b9da72b47
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_remove_brick_with_quota.py
@@ -0,0 +1,160 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Description:
+ Removing brick from volume after enabling quota.
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.rebalance_ops import (rebalance_start,
+ wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import (
+ log_volume_info_and_status,
+ shrink_volume)
+from glustolibs.gluster.quota_ops import (
+ quota_enable,
+ quota_set_hard_timeout,
+ quota_set_soft_timeout,
+ quota_limit_usage)
+from glustolibs.gluster.quota_libs import quota_validate
+from glustolibs.io.utils import wait_for_io_to_complete
+from glustolibs.misc.misc_libs import upload_scripts
+
+
+@runs_on([['distributed'], ['glusterfs']])
+class TestRemoveBrickWithQuota(GlusterBaseClass):
+ """ Remove Brick With Quota Enabled"""
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+ self.all_mounts_procs = []
+
+ # Setup and Mount the volume
+ g.log.info("Starting to Setup volume and mount it.")
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup Volume and mount it")
+
+ # Upload IO script for running IO on mounts
+ self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(self.mounts[0].client_system,
+ self.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to client")
+
+ def test_brick_removal_with_quota(self):
+ """
+ Test Brick removal with quota in place
+ 1. Create Volume of type distribute
+ 2. Set Quota limit on the directory
+ 3. Do some IO to reach the Hard limit
+ 4. After IO ends, remove bricks
+ 5. Quota validation should succeed.
+ """
+ # Enable Quota
+ ret, _, _ = quota_enable(self.mnode, self.volname)
+ self.assertEqual(
+ ret, 0, ("Failed to enable quota on the volume 5s", self.volname))
+ g.log.info("Successfully enabled quota on volume %s", self.volname)
+
+ # Set the Quota timeouts to 0 for strict accounting
+ ret, _, _ = quota_set_hard_timeout(self.mnode, self.volname, 0)
+ self.assertEqual(
+ ret, 0, ("Failed to set hard-timeout to 0 for %s", self.volname))
+ ret, _, _ = quota_set_soft_timeout(self.mnode, self.volname, 0)
+ self.assertEqual(
+ ret, 0, ("Failed to set soft-timeout to 0 for %s", self.volname))
+ g.log.info(
+ "Quota soft and hard timeout has been set to 0 for %s",
+ self.volname)
+
+ # Set the quota limit of 100 MB on root dir of the volume
+ ret, _, _ = quota_limit_usage(self.mnode, self.volname, "/", "100MB")
+ self.assertEqual(ret, 0, "Failed to set Quota for dir root")
+ g.log.info("Successfully set quota limit for dir root")
+
+ # Do some IO until hard limit is reached.
+ cmd = (
+ "/usr/bin/env python %s create_files "
+ "-f 100 --fixed-file-size 1M --base-file-name file %s"
+ % (self.script_upload_path, self.mounts[0].mountpoint))
+ proc = g.run_async(
+ self.mounts[0].client_system, cmd, user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+
+ # Wait for IO to complete and validate IO
+ self.assertTrue(wait_for_io_to_complete(self.all_mounts_procs,
+ self.mounts[0]),
+ "IO failed on some of the clients")
+ g.log.info("IO completed on the clients")
+
+ # Validate quota
+ ret = quota_validate(self.mnode, self.volname,
+ path='/', hard_limit=104857600,
+ sl_exceeded=True, hl_exceeded=True)
+ self.assertTrue(ret, "Quota validate Failed for '/'")
+ g.log.info("Quota Validated for path '/'")
+
+ # Log Volume info and status before shrinking volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Shrink the volume.
+ ret = shrink_volume(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to shrink volume on "
+ "volume %s", self.volname))
+ g.log.info("Shrinking volume is successful on "
+ "volume %s", self.volname)
+
+ # Log volume info and status after shrinking volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Perform rebalance start operation.
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance started.")
+
+ # Wait till rebalance ends.
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance is successfully complete on the volume %s",
+ self.volname)
+
+ # Validate quota
+ ret = quota_validate(self.mnode, self.volname,
+ path='/', hard_limit=104857600,
+ sl_exceeded=True, hl_exceeded=True)
+ self.assertTrue(ret, "Quota validate Failed for '/'")
+ g.log.info("Quota Validated for path '/'")
+
+ def tearDown(self):
+ "tear Down Callback"""
+ # Unmount volume and do cleanup
+ g.log.info("Starting to Unmount volume and cleanup")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Faile to Unmount and cleanup volume")
+ g.log.info("Successful in Unmount and cleanup of volumes")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/dht/test_rebalance_rename.py b/tests/functional/dht/test_rebalance_rename.py
new file mode 100644
index 000000000..c449d2945
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_rename.py
@@ -0,0 +1,181 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+
+Description:
+ Renaming of directories and files while rebalance is running
+"""
+
+from unittest import skip
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.rebalance_ops import (get_rebalance_status,
+ rebalance_start,
+ wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import (
+ expand_volume,
+ log_volume_info_and_status)
+from glustolibs.io.utils import (
+ collect_mounts_arequal,
+ wait_for_io_to_complete)
+from glustolibs.misc.misc_libs import upload_scripts
+
+
+@runs_on([['arbiter', 'distributed-arbiter', 'dispersed', 'replicated',
+ 'distributed-dispersed', 'distributed-replicated', 'distributed'],
+ ['glusterfs']])
+class TestRenameDuringRebalance(GlusterBaseClass):
+ """Renaming Files during rebalance"""
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+ self.all_mounts_procs = []
+
+ # Setup Volume and Mount Volume
+ g.log.info("Starting to Setup Volume and mount it")
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup Volume and Mount it")
+
+ # Upload io script for running IO on mounts
+ self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(self.mounts[0].client_system,
+ self.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients")
+
+ @skip('Skipping due to Bug 1755834')
+ def test_rename_file_rebalance(self):
+ """
+ Test file renames during rebalance
+ - Create a volume
+ - Create directories or files
+ - Calculate the checksum using arequal
+ - Add brick and start rebalance
+ - While rebalance is running, rename files or directories.
+ - After rebalancing calculate checksum.
+ """
+ # Taking the instance of mount point.
+ mount_point = self.mounts[0].mountpoint
+
+ # Creating main directory.
+ ret = mkdir(self.mounts[0].client_system,
+ "{}/main".format(mount_point))
+ self.assertTrue(ret, "mkdir of dir main failed")
+
+ # Creating Files.
+ self.all_mounts_procs = []
+ command = ("/usr/bin/env python {} create_files"
+ " {}/main/ -f 4000"
+ " --fixed-file-size 1k".format(self.script_upload_path,
+ mount_point))
+ proc = g.run_async(self.mounts[0].client_system, command,
+ user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+ g.log.info("IO on %s:%s is started successfully",
+ self.mounts[0].client_system, mount_point)
+
+ # Wait for IO completion.
+ self.assertTrue(wait_for_io_to_complete(self.all_mounts_procs,
+ self.mounts[0]),
+ "IO failed on some of the clients")
+ g.log.info("IO completed on the clients")
+
+ # Getting the arequal checksum.
+ arequal_checksum_before_rebalance = collect_mounts_arequal(self.mounts)
+
+ # Log Volume Info and Status before expanding the volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Expanding volume by adding bricks to the volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
+ g.log.info("Expanding volume is successful on "
+ "volume %s", self.volname)
+
+ # Log Volume Info and Status after expanding the volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Start Rebalance
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
+ "%s", self.volname))
+ g.log.info("Successfully started rebalance on the volume %s",
+ self.volname)
+
+ # Check that rebalance status is "in progress"
+ rebalance_status = get_rebalance_status(self.mnode, self.volname)
+ ret = rebalance_status['aggregate']['statusStr']
+ self.assertEqual(ret, "in progress", ("Rebalance is not in "
+ "'in progress' state, either "
+ "rebalance is in completed state"
+ " or failed to get rebalance "
+ " status"))
+ g.log.info("Rebalance is in 'in progress' state")
+
+ # Renaming the files during rebalance.
+ self.all_mounts_procs = []
+ command = ("/usr/bin/env python {} mv"
+ " {}/main/ --postfix re ".format(
+ self.script_upload_path,
+ mount_point))
+ proc = g.run_async(self.mounts[0].client_system, command,
+ user=self.mounts[0].user)
+ g.log.info("IO on %s:%s is started successfully",
+ self.mounts[0].client_system, mount_point)
+ self.all_mounts_procs.append(proc)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ self.assertTrue(ret, ("Rebalace is not yet complete on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance is successfully complete on the volume %s",
+ self.volname)
+
+ # Wait for IO completion.
+ self.assertTrue(wait_for_io_to_complete(self.all_mounts_procs,
+ self.mounts[0]),
+ "IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+
+ # Getting arequal checksum after rebalance
+ arequal_checksum_after_rebalance = collect_mounts_arequal(self.mounts)
+
+ # Comparing arequals checksum before and after rebalance.
+ self.assertEqual(arequal_checksum_before_rebalance,
+ arequal_checksum_after_rebalance,
+ "arequal checksum is NOT MATCHING")
+ g.log.info("arequal checksum is SAME")
+
+ def tearDown(self):
+ """tear Down Callback"""
+ # Unmount Volume and Cleanup volume.
+ g.log.info("Starting to Umount Volume and Cleanup Volume")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Unmount Volume and Cleanup volume")
+ g.log.info("Successful in Unmount Volume and Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/dht/test_rebalance_two_volumes.py b/tests/functional/dht/test_rebalance_two_volumes.py
new file mode 100644
index 000000000..c96f75586
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_two_volumes.py
@@ -0,0 +1,163 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal
+from glustolibs.gluster.mount_ops import mount_volume
+from glustolibs.gluster.volume_ops import (volume_create, volume_start,
+ volume_stop, volume_delete)
+from glustolibs.gluster.lib_utils import form_bricks_list
+
+
+@runs_on([['distributed', 'distributed-replicated'], ['glusterfs']])
+class TestRebalanceTwoVolumes(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.first_client = self.mounts[0].client_system
+
+ self.second_vol_name = "second_volume"
+ self.second_mountpoint = "/mnt/{}".format(self.second_vol_name)
+ self.is_second_volume_created = False
+
+ def tearDown(self):
+
+ # Unmount and clean volume
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ if self.is_second_volume_created:
+ # Stop the 2nd volume
+ ret, _, _ = volume_stop(self.mnode, self.second_vol_name)
+ self.assertEqual(ret, 0, ("volume stop failed for %s"
+ % self.second_vol_name))
+ g.log.info("Volume %s stopped", self.second_vol_name)
+
+ # Delete the 2nd volume
+ ret = volume_delete(self.mnode, self.second_vol_name)
+ self.assertTrue(ret, ("Failed to cleanup the Volume "
+ "%s", self.second_vol_name))
+ g.log.info("Volume deleted successfully : %s",
+ self.second_vol_name)
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rebalance_two_volumes(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it
+ 2. Create a 2nd volume, start it and mount it
+ 3. Create files on mount points
+ 4. Collect arequal checksum on mount point pre-rebalance
+ 5. Expand the volumes
+ 6. Start rebalance simultaneously on the 2 volumes
+ 7. Wait for rebalance to complete
+ 8. Collect arequal checksum on mount point post-rebalance
+ and compare with value from step 4
+ """
+
+ # Get brick list
+ bricks_list = form_bricks_list(self.mnode, self.volname, 3,
+ self.servers, self.all_servers_info)
+ self.assertIsNotNone(bricks_list, "Bricks list is None")
+
+ # Create 2nd volume
+ ret, _, _ = volume_create(self.mnode, self.second_vol_name,
+ bricks_list)
+ self.assertEqual(ret, 0, ("Failed to create volume %s") % (
+ self.second_vol_name))
+ g.log.info("Volume %s created successfully", self.second_vol_name)
+
+ # Start 2nd volume
+ ret, _, _ = volume_start(self.mnode, self.second_vol_name)
+ self.assertEqual(ret, 0, ("Failed to start volume %s") % (
+ self.second_vol_name))
+ g.log.info("Started volume %s", self.second_vol_name)
+
+ self.is_second_volume_created = True
+
+ # Mount 2nd volume
+ for mount_obj in self.mounts:
+ ret, _, _ = mount_volume(self.second_vol_name,
+ mtype=self.mount_type,
+ mpoint=self.second_mountpoint,
+ mserver=self.mnode,
+ mclient=mount_obj.client_system)
+ self.assertEqual(ret, 0, ("Failed to mount volume %s") % (
+ self.second_vol_name))
+ g.log.info("Volume mounted successfully : %s",
+ self.second_vol_name)
+
+ # Start I/O from mount point for volume 1 and wait for it to complete
+ cmd = ("cd %s; for i in {1..1000} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.mounts[0].mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.volname)
+
+ # Start I/O from mount point for volume 2 and wait for it to complete
+ cmd = ("cd %s; for i in {1..1000} ; do "
+ "dd if=/dev/urandom of=file$i bs=10M count=1; done"
+ % self.second_mountpoint)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "IO failed on volume %s"
+ % self.second_vol_name)
+
+ # Collect arequal checksum before rebalance
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Add bricks to volumes
+ for volume in (self.volname, self.second_vol_name):
+ ret = expand_volume(self.mnode, volume, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % volume)
+
+ # Trigger rebalance
+ for volume in (self.volname, self.second_vol_name):
+ ret, _, _ = rebalance_start(self.mnode, volume,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the"
+ " volume %s" % volume)
+
+ # Wait for rebalance to complete
+ for volume in (self.volname, self.second_vol_name):
+ ret = wait_for_rebalance_to_complete(self.mnode, volume,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume"
+ " %s" % volume)
+ g.log.info("Rebalance successfully completed")
+
+ # Collect arequal checksum after rebalance
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+
+ # Check for data loss by comparing arequal before and after rebalance
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
diff --git a/tests/functional/dht/test_rebalance_with_acl_set_to_files.py b/tests/functional/dht/test_rebalance_with_acl_set_to_files.py
new file mode 100644
index 000000000..d290ae56a
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_with_acl_set_to_files.py
@@ -0,0 +1,129 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.glusterfile import set_acl, get_acl
+from glustolibs.gluster.lib_utils import add_user, del_user
+from glustolibs.gluster.mount_ops import mount_volume
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start, wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter', 'distributed',
+ 'replicated', 'arbiter', 'distributed-dispersed',
+ 'dispersed'], ['glusterfs']])
+class TestRebalanceWithAclSetToFiles(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume():
+ raise ExecutionError("Failed to Setup volume")
+
+ self.first_client = self.mounts[0].client_system
+ self.mount_point = self.mounts[0].mountpoint
+
+ # Mount volume with -o acl option
+ ret, _, _ = mount_volume(self.volname, self.mount_type,
+ self.mount_point, self.mnode,
+ self.first_client, options='acl')
+ if ret:
+ raise ExecutionError("Failed to mount volume")
+
+ # Create a non-root user
+ if not add_user(self.first_client, 'joker'):
+ raise ExecutionError("Failed to create user joker")
+
+ def tearDown(self):
+
+ # Remove non-root user created for test
+ if not del_user(self.first_client, 'joker'):
+ raise ExecutionError("Failed to remove user joker")
+
+ if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _check_acl_set_to_files(self):
+ """Check acl values set to files"""
+ for number in range(1, 11):
+ ret = get_acl(self.first_client, self.mount_point,
+ 'file{}'.format(str(number)))
+ self.assertIn('user:joker:rwx', ret['rules'],
+ "Rule not present in getfacl output")
+
+ def test_add_brick_rebalance_with_acl_set_to_files(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it to a client.
+ 2. Create 10 files on the mount point and set acls on the files.
+ 3. Check the acl value and collect arequal-checksum.
+ 4. Add bricks to the volume and start rebalance.
+ 5. Check the value of acl(it should be same as step 3),
+ collect and compare arequal-checksum with the one collected
+ in step 3
+ """
+ # Create 10 files on the mount point.
+ cmd = ("cd {}; for i in `seq 1 10`;do touch file$i;done"
+ .format(self.mount_point))
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertFalse(ret, "Failed to create files on mount point")
+
+ for number in range(1, 11):
+ ret = set_acl(self.first_client, 'u:joker:rwx', '{}/file{}'
+ .format(self.mount_point, str(number)))
+ self.assertTrue(ret, "Failed to set acl on files")
+
+ # Collect arequal on mount point and check acl value
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+ self._check_acl_set_to_files()
+ g.log.info("Files created and acl set to files properly")
+
+ # Add brick to volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, "Failed to add brick on volume %s"
+ % self.volname)
+
+ # Trigger rebalance and wait for it to complete
+ ret, _, _ = rebalance_start(self.mnode, self.volname,
+ force=True)
+ self.assertEqual(ret, 0, "Failed to start rebalance on the volume %s"
+ % self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1200)
+ self.assertTrue(ret, "Rebalance is not yet complete on the volume "
+ "%s" % self.volname)
+ g.log.info("Rebalance successfully completed")
+
+ # Check acl value if it's same as before rebalance
+ self._check_acl_set_to_files()
+
+ # Check for data loss by comparing arequal before and after ops
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum and acl value are SAME")
diff --git a/tests/functional/dht/test_rebalance_with_brick_down.py b/tests/functional/dht/test_rebalance_with_brick_down.py
new file mode 100644
index 000000000..fd0a0ffe2
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_with_brick_down.py
@@ -0,0 +1,171 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-131 USA.
+
+"""
+Description:
+ Rebalance with one brick down in replica
+"""
+
+from random import choice
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.rebalance_ops import (rebalance_start,
+ wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import (
+ expand_volume,
+ log_volume_info_and_status,
+ volume_start)
+from glustolibs.gluster.brick_libs import (
+ get_all_bricks,
+ bring_bricks_offline)
+from glustolibs.gluster.heal_libs import monitor_heal_completion
+from glustolibs.io.utils import (
+ wait_for_io_to_complete,
+ collect_mounts_arequal)
+from glustolibs.misc.misc_libs import upload_scripts
+
+
+@runs_on([['distributed-arbiter', 'distributed-replicated',
+ 'distributed-dispersed'], ['glusterfs']])
+class TestRebalanceWithBrickDown(GlusterBaseClass):
+ """ Rebalance with brick down in replica"""
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+ self.all_mounts_procs = []
+
+ # Setup and mount the volume
+ g.log.info("Starting to setup and mount the volume")
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup Volume and Mount it")
+
+ # Upload IO script for running IO on mounts
+ self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(self.mounts[0].client_system,
+ self.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients")
+
+ def test_rebalance_with_brick_down(self):
+ """
+ Rebalance with brick down in replica
+ - Create a Replica volume.
+ - Bring down one of the brick down in the replica pair
+ - Do some IO and create files on the mount point
+ - Add a pair of bricks to the volume
+ - Initiate rebalance
+ - Bring back the brick which was down.
+ - After self heal happens, all the files should be present.
+ """
+ # Log the volume info and status before brick is down.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Bring one fo the bricks offline
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ ret = bring_bricks_offline(self.volname, choice(brick_list))
+
+ # Log the volume info and status after brick is down.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Create files at mountpoint.
+ cmd = (
+ "/usr/bin/env python %s create_files "
+ "-f 2000 --fixed-file-size 1k --base-file-name file %s"
+ % (self.script_upload_path, self.mounts[0].mountpoint))
+ proc = g.run_async(
+ self.mounts[0].client_system, cmd, user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+
+ # Wait for IO to complete.
+ self.assertTrue(wait_for_io_to_complete(self.all_mounts_procs,
+ self.mounts[0]),
+ "IO failed on some of the clients")
+ g.log.info("IO completed on the clients")
+
+ # Compute the arequal checksum before bringing all bricks online
+ arequal_before_all_bricks_online = collect_mounts_arequal(self.mounts)
+
+ # Log the volume info and status before expanding volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Expand the volume.
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
+ g.log.info("Expanding volume is successful on "
+ "volume %s", self.volname)
+
+ # Log the voluem info after expanding volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Start Rebalance.
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
+ "%s", self.volname))
+ g.log.info("Successfully started rebalance on the volume %s",
+ self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance is successfully complete on the volume %s",
+ self.volname)
+
+ # Log the voluem info and status before bringing all bricks online
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Bring all bricks online.
+ ret, _, _ = volume_start(self.mnode, self.volname, force=True)
+ self.assertEqual(ret, 0, "Not able to start volume with force option")
+ g.log.info("Volume start with force option successful.")
+
+ # Log the volume info and status after bringing all beicks online
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Monitor heal completion.
+ ret = monitor_heal_completion(self.mnode, self.volname)
+ self.assertTrue(ret, "heal has not yet completed")
+ g.log.info("Self heal completed")
+
+ # Compute the arequal checksum after all bricks online.
+ arequal_after_all_bricks_online = collect_mounts_arequal(self.mounts)
+
+ # Comparing arequal checksum before and after the operations.
+ self.assertEqual(arequal_before_all_bricks_online,
+ arequal_after_all_bricks_online,
+ "arequal checksum is NOT MATCHING")
+ g.log.info("arequal checksum is SAME")
+
+ def tearDown(self):
+ """tear Down callback"""
+ # Unmount Volume and cleanup.
+ g.log.info("Starting to Unmount Volume and Cleanup")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Filed to Unmount Volume and "
+ "Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and cleanup.")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/dht/test_rebalance_with_hidden_files.py b/tests/functional/dht/test_rebalance_with_hidden_files.py
index b0cb9b340..40fe7b976 100644
--- a/tests/functional/dht/test_rebalance_with_hidden_files.py
+++ b/tests/functional/dht/test_rebalance_with_hidden_files.py
@@ -91,16 +91,19 @@ class RebalanceValidation(GlusterBaseClass):
# Start IO on mounts
g.log.info("Starting IO on all mounts...")
self.all_mounts_procs = []
+ counter = 1
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s", mount_obj.client_system,
mount_obj.mountpoint)
cmd = ("/usr/bin/env python %s create_files "
- "--base-file-name . -f 99 %s" % (
+ "--base-file-name .file%d -f 99 %s" % (
self.script_upload_path,
+ counter,
mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)
+ counter += 100
# validate IO
self.assertTrue(
@@ -173,7 +176,8 @@ class RebalanceValidation(GlusterBaseClass):
# Wait for rebalance to complete
g.log.info("Waiting for rebalance to complete")
- ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1800)
self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
"%s", self.volname))
g.log.info("Rebalance is successfully complete on the volume %s",
diff --git a/tests/functional/dht/test_rebalance_with_quota.py b/tests/functional/dht/test_rebalance_with_quota.py
new file mode 100644
index 000000000..5abb2ca1a
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_with_quota.py
@@ -0,0 +1,188 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Description:
+ Rebalance with quota on mountpoint.
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.rebalance_ops import (rebalance_start,
+ get_rebalance_status,
+ wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import (
+ log_volume_info_and_status,
+ expand_volume)
+from glustolibs.gluster.quota_ops import (
+ quota_enable,
+ quota_set_hard_timeout,
+ quota_set_soft_timeout,
+ quota_limit_usage)
+from glustolibs.gluster.quota_libs import quota_validate
+from glustolibs.io.utils import (
+ wait_for_io_to_complete,
+ collect_mounts_arequal)
+from glustolibs.misc.misc_libs import upload_scripts
+
+
+@runs_on([['distributed', 'distributed-replicated', 'distributed-dispersed'],
+ ['glusterfs']])
+class TestRebalanceWithQuotOnRoot(GlusterBaseClass):
+ """ Rebalance with quota enabled on mountpoint """
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+ self.all_mounts_procs = []
+
+ # Setup and Mount the volume
+ g.log.info("Starting to Setup volume and mount it.")
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup Volume and mount it")
+
+ # Upload IO script for running IO on mounts
+ self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(self.mounts[0].client_system,
+ self.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to client")
+
+ def test_rebalance_with_quota_enabled(self):
+ """
+ Test rebalance with quota enabled on root.
+ 1. Create Volume of type distribute
+ 2. Set Quota limit on the root directory
+ 3. Do some IO to reach the Hard limit
+ 4. After IO ends, compute arequal checksum
+ 5. Add bricks to the volume.
+ 6. Start rebalance
+ 7. After rebalance is completed, check arequal checksum
+ """
+ # Enable Quota
+ ret, _, _ = quota_enable(self.mnode, self.volname)
+ self.assertEqual(
+ ret, 0, ("Failed to enable quota on the volume %s", self.volname))
+ g.log.info("Successfully enabled quota on volume %s", self.volname)
+
+ # Set the Quota timeouts to 0 for strict accounting
+ ret, _, _ = quota_set_hard_timeout(self.mnode, self.volname, 0)
+ self.assertEqual(
+ ret, 0, ("Failed to set hard-timeout to 0 for %s", self.volname))
+ ret, _, _ = quota_set_soft_timeout(self.mnode, self.volname, 0)
+ self.assertEqual(
+ ret, 0, ("Failed to set soft-timeout to 0 for %s", self.volname))
+ g.log.info(
+ "Quota soft and hard timeout has been set to 0 for %s",
+ self.volname)
+
+ # Set the quota limit of 1 GB on root dir of the volume
+ ret, _, _ = quota_limit_usage(self.mnode, self.volname, "/", "1GB")
+ self.assertEqual(ret, 0, "Failed to set Quota for dir root")
+ g.log.info("Successfully set quota limit for dir root")
+
+ # Do some IO until hard limit is reached.
+ cmd = (
+ "/usr/bin/env python %s create_files "
+ "-f 1024 --fixed-file-size 1M --base-file-name file %s"
+ % (self.script_upload_path, self.mounts[0].mountpoint))
+ proc = g.run_async(
+ self.mounts[0].client_system, cmd, user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+
+ # Wait for IO to complete and validate IO
+ self.assertTrue(wait_for_io_to_complete(self.all_mounts_procs,
+ self.mounts[0]),
+ "IO failed on some of the clients")
+ g.log.info("IO completed on the clients")
+
+ # Validate quota
+ ret = quota_validate(self.mnode, self.volname,
+ path='/', hard_limit=1073741824,
+ sl_exceeded=True, hl_exceeded=True)
+ self.assertTrue(ret, "Quota validate Failed for '/'")
+ g.log.info("Quota Validated for path '/'")
+
+ # Compute arequal checksum.
+ arequal_checksum_before_rebalance = collect_mounts_arequal(self.mounts)
+
+ # Log Volume info and status before expanding volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Expand the volume.
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
+ g.log.info("Expanding volume is successful on "
+ "volume %s", self.volname)
+
+ # Log volume info and status after expanding volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Perform rebalance start operation.
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance started.")
+
+ # Check rebalance is in progress
+ rebalance_status = get_rebalance_status(self.mnode, self.volname)
+ ret = rebalance_status['aggregate']['statusStr']
+ self.assertEqual(ret, "in progress", ("Rebalance is not in "
+ "'in progress' state, either "
+ "rebalance is in completed state"
+ " or failed to get rebalance "
+ "status"))
+ g.log.info("Rebalance is 'in progress' state")
+
+ # Wait till rebalance ends.
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance is successfully complete on the volume %s",
+ self.volname)
+
+ # Validate quota
+ ret = quota_validate(self.mnode, self.volname,
+ path='/', hard_limit=1073741824,
+ sl_exceeded=True, hl_exceeded=True)
+ self.assertTrue(ret, "Quota validate Failed for '/'")
+ g.log.info("Quota Validated for path '/'")
+
+ # Compute arequal checksum.
+ arequal_checksum_after_rebalance = collect_mounts_arequal(self.mounts)
+
+ # Comparing arequals checksum before and after rebalance.
+ self.assertEqual(arequal_checksum_before_rebalance,
+ arequal_checksum_after_rebalance,
+ "arequal checksum is NOT MATCHING")
+ g.log.info("arequal checksum is SAME")
+
+ def tearDown(self):
+ """tear Down Callback"""
+ # Unmount volume and do cleanup
+ g.log.info("Starting to Unmount volume and cleanup")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Faile to Unmount and cleanup volume")
+ g.log.info("Successful in Unmount and cleanup of volumes")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/dht/test_rebalance_with_quota_on_subdirectory.py b/tests/functional/dht/test_rebalance_with_quota_on_subdirectory.py
new file mode 100644
index 000000000..bddb9aeb6
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_with_quota_on_subdirectory.py
@@ -0,0 +1,195 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Description:
+ Rebalance with quota on subdirectory.
+"""
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.rebalance_ops import (rebalance_start,
+ get_rebalance_status,
+ wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import (
+ log_volume_info_and_status,
+ expand_volume)
+from glustolibs.gluster.quota_ops import (
+ quota_enable,
+ quota_set_hard_timeout,
+ quota_set_soft_timeout,
+ quota_limit_usage)
+from glustolibs.gluster.quota_libs import quota_validate
+from glustolibs.io.utils import (
+ wait_for_io_to_complete,
+ collect_mounts_arequal)
+from glustolibs.misc.misc_libs import upload_scripts
+
+
+@runs_on([['distributed', 'distributed-replicated', 'distributed-dispersed'],
+ ['glusterfs']])
+class TestRebalanceWithQuotaOnSubDir(GlusterBaseClass):
+ """ Rebalance with quota enabled on subdirectory """
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+ self.all_mounts_procs = []
+
+ # Setup and Mount the volume
+ g.log.info("Starting to Setup volume and mount it.")
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup Volume and mount it")
+
+ # Upload IO script for running IO on mounts
+ self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(self.mounts[0].client_system,
+ self.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to client")
+
+ def test_rebalance_with_quota_enabled_on_subdirectory(self):
+ """
+ Test rebalance with quota enabled on subdirectory.
+ 1. Create Volume of type distribute
+ 2. Set Quota limit on subdirectory
+ 3. Do some IO to reach the Hard limit
+ 4. After IO ends, compute arequal checksum
+ 5. Add bricks to the volume.
+ 6. Start rebalance
+ 7. After rebalance is completed, check arequal checksum
+ """
+ # Creating main directory.
+ ret = mkdir(self.mounts[0].client_system,
+ "{}/main".format(self.mounts[0].mountpoint))
+ self.assertTrue(ret, "mkdir of dir main failed")
+
+ # Enable Quota
+ ret, _, _ = quota_enable(self.mnode, self.volname)
+ self.assertEqual(
+ ret, 0, ("Failed to enable quota on the volume %s", self.volname))
+ g.log.info("Successfully enabled quota on volume %s", self.volname)
+
+ # Set the Quota timeouts to 0 for strict accounting
+ ret, _, _ = quota_set_hard_timeout(self.mnode, self.volname, 0)
+ self.assertEqual(
+ ret, 0, ("Failed to set hard-timeout to 0 for %s", self.volname))
+ ret, _, _ = quota_set_soft_timeout(self.mnode, self.volname, 0)
+ self.assertEqual(
+ ret, 0, ("Failed to set soft-timeout to 0 for %s", self.volname))
+ g.log.info(
+ "Quota soft and hard timeout has been set to 0 for %s",
+ self.volname)
+
+ # Set the quota limit of 1 GB on /main dir of the volume
+ ret, _, _ = quota_limit_usage(self.mnode, self.volname, "/main",
+ "1GB")
+ self.assertEqual(ret, 0, "Failed to set Quota for dir /main")
+ g.log.info("Successfully set quota limit for dir /main")
+
+ # Do some IO until hard limit is reached.
+ cmd = (
+ "/usr/bin/env python %s create_files "
+ "-f 1024 --fixed-file-size 1M --base-file-name file %s/main/"
+ % (self.script_upload_path, self.mounts[0].mountpoint))
+ proc = g.run_async(
+ self.mounts[0].client_system, cmd, user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+
+ # Wait for IO to complete and validate IO
+ self.assertTrue(wait_for_io_to_complete(self.all_mounts_procs,
+ self.mounts[0]),
+ "IO failed on some of the clients")
+ g.log.info("IO completed on the clients")
+
+ # Validate quota
+ ret = quota_validate(self.mnode, self.volname,
+ path='/main', hard_limit=1073741824,
+ sl_exceeded=True, hl_exceeded=True)
+ self.assertTrue(ret, "Quota validate Failed for '/main'")
+ g.log.info("Quota Validated for path '/main'")
+
+ # Compute arequal checksum.
+ arequal_checksum_before_rebalance = collect_mounts_arequal(self.mounts)
+
+ # Log Volume info and status before expanding volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Expand the volume.
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
+ g.log.info("Expanding volume is successful on "
+ "volume %s", self.volname)
+
+ # Log volume info and status after expanding volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Perform rebalance start operation.
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance started.")
+
+ # Check rebalance is in progress
+ rebalance_status = get_rebalance_status(self.mnode, self.volname)
+ ret = rebalance_status['aggregate']['statusStr']
+ self.assertEqual(ret, "in progress", ("Rebalance is not in "
+ "'in progress' state, either "
+ "rebalance is in completed state"
+ " or failed to get rebalance "
+ "status"))
+
+ # Wait till rebalance ends.
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance is successfully complete on the volume %s",
+ self.volname)
+
+ # Validate quota
+ ret = quota_validate(self.mnode, self.volname,
+ path='/main', hard_limit=1073741824,
+ sl_exceeded=True, hl_exceeded=True)
+ self.assertTrue(ret, "Quota validate Failed for '/main'")
+ g.log.info("Quota Validated for path '/main'")
+
+ # Compute arequal checksum.
+ arequal_checksum_after_rebalance = collect_mounts_arequal(self.mounts)
+
+ # Comparing arequals checksum before and after rebalance.
+ self.assertEqual(arequal_checksum_before_rebalance,
+ arequal_checksum_after_rebalance,
+ "arequal checksum is NOT MATCHING")
+ g.log.info("arequal checksum is SAME")
+
+ def tearDown(self):
+ """tear Down Callback"""
+ # Unmount volume and do cleanup
+ g.log.info("Starting to Unmount volume and cleanup")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Faile to Unmount and cleanup volume")
+ g.log.info("Successful in Unmount and cleanup of volumes")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/dht/test_rebalance_with_special_files.py b/tests/functional/dht/test_rebalance_with_special_files.py
new file mode 100644
index 000000000..c3cb33cca
--- /dev/null
+++ b/tests/functional/dht/test_rebalance_with_special_files.py
@@ -0,0 +1,158 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-131 USA.
+
+"""
+Description:
+ Rebalance with special files
+"""
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.rebalance_ops import (
+ rebalance_start,
+ get_rebalance_status,
+ wait_for_rebalance_to_complete)
+from glustolibs.gluster.volume_libs import (
+ expand_volume,
+ log_volume_info_and_status)
+from glustolibs.io.utils import wait_for_io_to_complete
+from glustolibs.misc.misc_libs import upload_scripts
+
+
+@runs_on([['distributed', 'distributed-arbiter', 'distributed-replicated',
+ 'distributed-dispersed'], ['glusterfs']])
+class TestRebalanceWithSpecialFiles(GlusterBaseClass):
+ """ Rebalance with special files"""
+
+ def setUp(self):
+ """Setup Volume"""
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+ self.all_mounts_procs = []
+
+ # Setup and mount the volume
+ g.log.info("Starting to setup and mount the volume")
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup Volume and Mount it")
+
+ # Upload IO script for running IO on mounts
+ self.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(self.mounts[0].client_system,
+ self.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients")
+
+ def test_rebalance_with_special_files(self):
+ """
+ Rebalance with special files
+ - Create Volume and start it.
+ - Create some special files on mount point.
+ - Once it is complete, start some IO.
+ - Add brick into the volume and start rebalance
+ - All IO should be successful.
+ """
+ # Create pipe files at mountpoint.
+ cmd = (
+ "for i in {1..500};do mkfifo %s/fifo${i}; done"
+ % (self.mounts[0].mountpoint))
+ ret, _, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "Failed to create pipe files")
+ g.log.info("Pipe files created successfully")
+
+ # Create block device files at mountpoint.
+ cmd = (
+ "for i in {1..500};do mknod %s/blk${i} blockfile 1 5;done"
+ % (self.mounts[0].mountpoint))
+ ret, _, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "Failed to create block files")
+ g.log.info("Block files created successfully")
+
+ # Create character device files at mountpoint.
+ cmd = (
+ "for i in {1..500};do mknod %s/charc${i} characterfile 1 5;done"
+ % (self.mounts[0].mountpoint))
+ ret, _, _ = g.run(self.clients[0], cmd)
+ self.assertEqual(ret, 0, "Failed to create character files")
+ g.log.info("Character files created successfully")
+
+ # Create files at mountpoint.
+ cmd = (
+ "/usr/bin/env python %s create_files "
+ "-f 1000 --fixed-file-size 1M --base-file-name file %s"
+ % (self.script_upload_path, self.mounts[0].mountpoint))
+ proc = g.run_async(
+ self.mounts[0].client_system, cmd, user=self.mounts[0].user)
+ self.all_mounts_procs.append(proc)
+
+ # Log the volume info and status before expanding volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Expand the volume.
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Failed to expand the volume %s", self.volname))
+ g.log.info("Expanding volume is successful on "
+ "volume %s", self.volname)
+
+ # Log the volume info after expanding volume.
+ log_volume_info_and_status(self.mnode, self.volname)
+
+ # Start Rebalance.
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
+ "%s", self.volname))
+ g.log.info("Successfully started rebalance on the volume %s",
+ self.volname)
+
+ # Check rebalance is in progress
+ rebalance_status = get_rebalance_status(self.mnode, self.volname)
+ ret = rebalance_status['aggregate']['statusStr']
+ self.assertEqual(ret, "in progress", ("Rebalance is not in "
+ "'in progress' state, either "
+ "rebalance is in completed state"
+ " or failed to get rebalance "
+ "status"))
+ g.log.info("Rebalance is in 'in progress' state")
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
+ "%s", self.volname))
+ g.log.info("Rebalance is successfully complete on the volume %s",
+ self.volname)
+
+ # Wait for IO to complete.
+ self.assertTrue(wait_for_io_to_complete(self.all_mounts_procs,
+ self.mounts[0]),
+ "IO failed on some of the clients")
+ g.log.info("IO completed on the clients")
+
+ def tearDown(self):
+ """tear Down callback"""
+ # Unmount Volume and cleanup.
+ g.log.info("Starting to Unmount Volume and Cleanup")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Unmount Volume and "
+ "Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and cleanup.")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/dht/test_remove_brick_command_opitons.py b/tests/functional/dht/test_remove_brick_command_opitons.py
new file mode 100644
index 000000000..2e5b0c81a
--- /dev/null
+++ b/tests/functional/dht/test_remove_brick_command_opitons.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.brick_ops import remove_brick
+from glustolibs.gluster.volume_libs import shrink_volume
+from glustolibs.gluster.volume_libs import form_bricks_list_to_remove_brick
+from glustolibs.io.utils import collect_mounts_arequal
+
+
+@runs_on([['distributed-replicated', 'distributed-dispersed',
+ 'distributed-arbiter', 'distributed'], ['glusterfs']])
+class TestRemoveBrickCommandOptions(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _run_io_on_mount_point(self, fname="file"):
+ """Create a few files on mount point"""
+ cmd = ("cd {};for i in `seq 1 5`; do mkdir dir$i;"
+ "for j in `seq 1 10`;do touch {}$j;done;done"
+ .format(self.mounts[0].mountpoint, fname))
+ ret, _, _ = g.run(self.mounts[0].client_system, cmd)
+ self.assertFalse(ret, "Failed to do I/O on mount point")
+
+ def test_remove_brick_command_basic(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Create some data on the volume.
+ 3. Run remove-brick start, status and finally commit.
+ 4. Check if there is any data loss or not.
+ """
+ # Create some data on the volume
+ self._run_io_on_mount_point()
+
+ # Collect arequal checksum before ops
+ arequal_checksum_before = collect_mounts_arequal(self.mounts[0])
+
+ # Run remove-brick start, status and finally commit
+ ret = shrink_volume(self.mnode, self.volname)
+ self.assertTrue(ret, "Failed to remove-brick from volume")
+ g.log.info("Remove-brick rebalance successful")
+
+ # Check for data loss by comparing arequal before and after ops
+ arequal_checksum_after = collect_mounts_arequal(self.mounts[0])
+ self.assertEqual(arequal_checksum_before, arequal_checksum_after,
+ "arequal checksum is NOT MATCHNG")
+ g.log.info("arequal checksum is SAME")
+
+ def test_remove_brick_command_force(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Create some data on the volume.
+ 3. Run remove-brick with force.
+ 4. Check if bricks are still seen on volume or not
+ """
+ # Create some data on the volume
+ self._run_io_on_mount_point()
+
+ # Remove-brick on the volume with force option
+ brick_list_to_remove = form_bricks_list_to_remove_brick(self.mnode,
+ self.volname)
+ self.assertIsNotNone(brick_list_to_remove, "Brick list is empty")
+
+ ret, _, _ = remove_brick(self.mnode, self.volname,
+ brick_list_to_remove, option="force")
+ self.assertFalse(ret, "Failed to run remove-brick with force")
+ g.log.info("Successfully run remove-brick with force")
+
+ # Get a list of all bricks
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ self.assertIsNotNone(brick_list, "Brick list is empty")
+
+ # Check if bricks removed brick are present or not in brick list
+ for brick in brick_list_to_remove:
+ self.assertNotIn(brick, brick_list,
+ "Brick still present in brick list even "
+ "after removing")
diff --git a/tests/functional/dht/test_remove_brick_no_commit_followed_by_rebalance.py b/tests/functional/dht/test_remove_brick_no_commit_followed_by_rebalance.py
new file mode 100644
index 000000000..dc80a3544
--- /dev/null
+++ b/tests/functional/dht/test_remove_brick_no_commit_followed_by_rebalance.py
@@ -0,0 +1,169 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from time import sleep
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.io.utils import collect_mounts_arequal, validate_io_procs
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.gluster.volume_libs import (form_bricks_list_to_remove_brick,
+ expand_volume)
+from glustolibs.gluster.brick_ops import remove_brick
+from glustolibs.gluster.rebalance_ops import (rebalance_start,
+ wait_for_rebalance_to_complete)
+
+
+@runs_on([['distributed', 'distributed-replicated',
+ 'distributed-dispersed', 'distributed-arbiter'],
+ ['glusterfs']])
+class TestRemoveBrickNoCommitFollowedByRebalance(GlusterBaseClass):
+ @classmethod
+ def setUpClass(cls):
+ cls.get_super_method(cls, 'setUpClass')()
+
+ # Upload io scripts for running IO on mounts
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts "
+ "to clients %s" % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+ """
+ Setup and mount volume or raise ExecutionError
+ """
+ # Calling GlusterBaseClass setUp
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to Setup and Mount Volume")
+
+ def tearDown(self):
+
+ # Unmount and cleanup original volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_remove_brick_no_commit_followed_by_rebalance(self):
+ """
+ Description: Tests to check that there is no data loss when
+ remove-brick operation is stopped and then new bricks
+ are added to the volume.
+ Steps :
+ 1) Create a volume.
+ 2) Mount the volume using FUSE.
+ 3) Create files and dirs on the mount-point.
+ 4) Calculate the arequal-checksum on the mount-point
+ 5) Start remove-brick operation on the volume.
+ 6) While migration is in progress, stop the remove-brick
+ operation.
+ 7) Add-bricks to the volume and trigger rebalance.
+ 8) Wait for rebalance to complete.
+ 9) Calculate the arequal-checksum on the mount-point.
+ """
+ # Start IO on mounts
+ m_point = self.mounts[0].mountpoint
+ cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
+ "--dir-length 10 --dir-depth 2 --max-num-of-dirs 1 "
+ "--num-of-files 50 --file-type empty-file %s" % (
+ self.script_upload_path, m_point))
+ proc = g.run_async(self.mounts[0].client_system,
+ cmd, user=self.mounts[0].user)
+ g.log.info("IO on %s:%s is started successfully",
+ self.mounts[0].client_system, m_point)
+
+ # Validate IO
+ self.assertTrue(
+ validate_io_procs([proc], self.mounts[0]),
+ "IO failed on some of the clients"
+ )
+
+ # Calculate arequal-checksum before starting remove-brick
+ ret, arequal_before = collect_mounts_arequal(self.mounts[0])
+ self.assertTrue(ret, "Collecting arequal-checksum failed")
+
+ # Form bricks list for volume shrink
+ remove_brick_list = form_bricks_list_to_remove_brick(
+ self.mnode, self.volname, subvol_name=1)
+ self.assertIsNotNone(remove_brick_list, ("Volume %s: Failed to "
+ "form bricks list for "
+ "shrink", self.volname))
+ g.log.info("Volume %s: Formed bricks list for shrink", self.volname)
+
+ # Shrink volume by removing bricks
+ ret, _, _ = remove_brick(self.mnode, self.volname,
+ remove_brick_list, "start")
+ self.assertEqual(ret, 0, ("Volume %s shrink failed ",
+ self.volname))
+ g.log.info("Volume %s shrink started ", self.volname)
+
+ # Log remove-brick status
+ ret, out, _ = remove_brick(self.mnode, self.volname,
+ remove_brick_list, "status")
+ self.assertEqual(ret, 0, ("Remove-brick status failed on %s ",
+ self.volname))
+
+ # Check if migration is in progress
+ if r'in progress' in out:
+ # Stop remove-brick process
+ g.log.info("Stop removing bricks from volume")
+ ret, out, _ = remove_brick(self.mnode, self.volname,
+ remove_brick_list, "stop")
+ self.assertEqual(ret, 0, "Failed to stop remove-brick process")
+ g.log.info("Stopped remove-brick process successfully")
+ else:
+ g.log.error("Migration for remove-brick is complete")
+
+ # Sleep for 30 secs so that any running remove-brick process stops
+ sleep(30)
+
+ # Add bricks to the volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Volume %s: Add-brick failed", self.volname))
+ g.log.info("Volume %s: Add-brick successful", self.volname)
+
+ # Tigger rebalance
+ ret, _, _ = rebalance_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, ("Volume %s: Failed to start rebalance",
+ self.volname))
+ g.log.info("Volume %s: Rebalance started ", self.volname)
+
+ # Wait for rebalance to complete
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ self.assertTrue(ret, "Rebalance has not completed")
+ g.log.info("Rebalance has completed successfully")
+
+ # Calculate arequal-checksum on mount-point
+ ret, arequal_after = collect_mounts_arequal(self.mounts[0])
+ self.assertTrue(ret, "Collecting arequal-checksum failed")
+
+ # Check if there is any data loss
+ self.assertEqual(set(arequal_before), set(arequal_after),
+ ("There is data loss"))
+ g.log.info("The checksum before and after rebalance is same."
+ " There is no data loss.")
diff --git a/tests/functional/dht/test_remove_brick_with_open_fd.py b/tests/functional/dht/test_remove_brick_with_open_fd.py
new file mode 100644
index 000000000..053114295
--- /dev/null
+++ b/tests/functional/dht/test_remove_brick_with_open_fd.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.glusterfile import get_md5sum
+from glustolibs.gluster.volume_libs import get_subvols, shrink_volume
+from glustolibs.gluster.dht_test_utils import find_hashed_subvol
+from glustolibs.io.utils import validate_io_procs, wait_for_io_to_complete
+
+
+@runs_on([['distributed-replicated', 'distributed-dispersed',
+ 'distributed-arbiter', 'distributed'], ['glusterfs']])
+class TestRemoveBrickWithOpenFD(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+ self.is_copy_running = False
+
+ def tearDown(self):
+
+ # If I/O processes are running wait from them to complete
+ if self.is_copy_running:
+ if not wait_for_io_to_complete(self.list_of_io_processes,
+ self.mounts):
+ raise ExecutionError("Failed to wait for I/O to complete")
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_remove_brick_with_open_fd(self):
+ """
+ Test case:
+ 1. Create volume, start it and mount it.
+ 2. Open file datafile on mount point and start copying /etc/passwd
+ line by line(Make sure that the copy is slow).
+ 3. Start remove-brick of the subvol to which has datafile is hashed.
+ 4. Once remove-brick is complete compare the checksum of /etc/passwd
+ and datafile.
+ """
+ # Open file datafile on mount point and start copying /etc/passwd
+ # line by line
+ ret, out, _ = g.run(self.mounts[0].client_system,
+ "cat /etc/passwd | wc -l")
+ self.assertFalse(ret, "Failed to get number of lines of /etc/passwd")
+ cmd = ("cd {}; exec 30<> datafile ;for i in `seq 1 {}`; do "
+ "head -n $i /etc/passwd | tail -n 1 >> datafile; sleep 10; done"
+ .format(self.mounts[0].mountpoint, out.strip()))
+
+ self.list_of_io_processes = [
+ g.run_async(self.mounts[0].client_system, cmd)]
+ self.is_copy_running = True
+
+ # Start remove-brick of the subvol to which has datafile is hashed
+ subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
+ number = find_hashed_subvol(subvols, "/", 'datafile')[1]
+
+ ret = shrink_volume(self.mnode, self.volname, subvol_num=number)
+ self.assertTrue(ret, "Failed to remove-brick from volume")
+ g.log.info("Remove-brick rebalance successful")
+
+ # Validate if I/O was successful or not.
+ ret = validate_io_procs(self.list_of_io_processes, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+ self.is_copy_running = False
+
+ # Compare md5checksum of /etc/passwd and datafile
+ md5_of_orginal_file = get_md5sum(self.mounts[0].client_system,
+ '/etc/passwd')
+ self.assertIsNotNone(md5_of_orginal_file,
+ 'Unable to get md5 checksum of orignial file')
+ md5_of_copied_file = get_md5sum(
+ self.mounts[0].client_system, '{}/datafile'.format(
+ self.mounts[0].mountpoint))
+ self.assertIsNotNone(md5_of_copied_file,
+ 'Unable to get md5 checksum of copied file')
+ self.assertEqual(md5_of_orginal_file.split(" ")[0],
+ md5_of_copied_file.split(" ")[0],
+ "md5 checksum of original and copied file didn't"
+ " match")
+ g.log.info("md5 checksum of original and copied files are same")
diff --git a/tests/functional/dht/test_rename_with_brick_min_free_limit_crossed.py b/tests/functional/dht/test_rename_with_brick_min_free_limit_crossed.py
new file mode 100644
index 000000000..0e481fce0
--- /dev/null
+++ b/tests/functional/dht/test_rename_with_brick_min_free_limit_crossed.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.lib_utils import get_usable_size_per_disk
+from glustolibs.gluster.brick_libs import get_all_bricks
+
+
+@runs_on([['distributed'], ['glusterfs']])
+class TestRenameWithBricksMinFreeLimitCrossed(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Changing dist_count to 1
+ self.volume['voltype']['dist_count'] = 1
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ self.first_client = self.mounts[0].client_system
+ self.mount_point = self.mounts[0].mountpoint
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def test_rename_with_brick_min_free_limit_crossed(self):
+ """
+ Test case:
+ 1. Create a volume, start it and mount it.
+ 2. Calculate the usable size and fill till it reachs min free limit
+ 3. Rename the file
+ 4. Try to perfrom I/O from mount point.(This should fail)
+ """
+ bricks = get_all_bricks(self.mnode, self.volname)
+
+ # Calculate the usable size and fill till it reachs
+ # min free limit
+ usable_size = get_usable_size_per_disk(bricks[0])
+ ret, _, _ = g.run(self.first_client, "fallocate -l {}G {}/file"
+ .format(usable_size, self.mount_point))
+ self.assertFalse(ret, "Failed to fill disk to min free limit")
+ g.log.info("Disk filled up to min free limit")
+
+ # Rename the file
+ ret, _, _ = g.run(self.first_client, "mv {}/file {}/Renamedfile"
+ .format(self.mount_point, self.mount_point))
+ self.assertFalse(ret, "Rename failed on file to Renamedfile")
+ g.log.info("File renamed successfully")
+
+ # Try to perfrom I/O from mount point(This should fail)
+ ret, _, _ = g.run(self.first_client,
+ "fallocate -l 5G {}/mfile".format(self.mount_point))
+ self.assertTrue(ret,
+ "Unexpected: Able to do I/O even when disks are "
+ "filled to min free limit")
+ g.log.info("Expected: Unable to perfrom I/O as min free disk is hit")
diff --git a/tests/functional/dht/test_restart_glusterd_after_rebalance.py b/tests/functional/dht/test_restart_glusterd_after_rebalance.py
index b4436968d..408e309a1 100644
--- a/tests/functional/dht/test_restart_glusterd_after_rebalance.py
+++ b/tests/functional/dht/test_restart_glusterd_after_rebalance.py
@@ -131,7 +131,8 @@ class RebalanceValidation(GlusterBaseClass):
# Wait for rebalance to complete
g.log.info("Waiting for rebalance to complete")
- ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
+ ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
+ timeout=1800)
self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
"%s", self.volname))
g.log.info("Rebalance is successfully complete on the volume %s",
diff --git a/tests/functional/dht/test_rmdir_subvol_down.py b/tests/functional/dht/test_rmdir_subvol_down.py
index 492158551..d029bfc99 100644..100755
--- a/tests/functional/dht/test_rmdir_subvol_down.py
+++ b/tests/functional/dht/test_rmdir_subvol_down.py
@@ -87,7 +87,8 @@ class TestLookupDir(GlusterBaseClass):
# Find a non hashed subvolume(or brick)
nonhashed_subvol, count = find_nonhashed_subvol(self.subvols,
"parent", "child")
- self.assertIsNotNone("Error in finding nonhashed value")
+ self.assertIsNotNone(nonhashed_subvol,
+ "Error in finding nonhashed value")
g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)
# Bring nonhashed_subbvol offline
diff --git a/tests/functional/dht/test_sparse_file_creation_and_deletion.py b/tests/functional/dht/test_sparse_file_creation_and_deletion.py
new file mode 100644
index 000000000..7404ece90
--- /dev/null
+++ b/tests/functional/dht/test_sparse_file_creation_and_deletion.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+from time import sleep
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.lib_utils import get_size_of_mountpoint
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed', 'distributed'], ['glusterfs']])
+class TestSparseFileCreationAndDeletion(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Changing dist_count to 5
+ self.volume['voltype']['dist_count'] = 5
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ # Assign a variable for the first_client
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _create_two_sparse_files(self):
+ """Create 2 sparse files from /dev/zero and /dev/null"""
+
+ # Create a tuple to hold both the file names
+ self.sparse_file_tuple = (
+ "{}/sparse_file_zero".format(self.mounts[0].mountpoint),
+ "{}/sparse_file_null".format(self.mounts[0].mountpoint)
+ )
+
+ # Create 2 spares file where one is created from /dev/zero and
+ # another is created from /dev/null
+ for filename, input_file in ((self.sparse_file_tuple[0], "/dev/zero"),
+ (self.sparse_file_tuple[1], "/dev/null")):
+ cmd = ("dd if={} of={} bs=1M seek=5120 count=1000"
+ .format(input_file, filename))
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, 'Failed to create %s ' % filename)
+
+ g.log.info("Successfully created sparse_file_zero and"
+ " sparse_file_null")
+
+ def _check_du_and_ls_of_sparse_file(self):
+ """Check du and ls -lks on spare files"""
+
+ for filename in self.sparse_file_tuple:
+
+ # Fetch output of ls -lks for the sparse file
+ cmd = "ls -lks {}".format(filename)
+ ret, out, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "Failed to get ls -lks for file %s "
+ % filename)
+ ls_value = out.split(" ")[5]
+
+ # Fetch output of du for the sparse file
+ cmd = "du --block-size=1 {}".format(filename)
+ ret, out, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "Failed to get du for file %s "
+ % filename)
+ du_value = out.split("\t")[0]
+
+ # Compare du and ls -lks value
+ self. assertNotEqual(ls_value, du_value,
+ "Unexpected: Sparse file size coming up same "
+ "for du and ls -lks")
+
+ g.log.info("Successfully checked sparse file size using ls and du")
+
+ def _delete_two_sparse_files(self):
+ """Delete sparse files"""
+
+ for filename in self.sparse_file_tuple:
+ cmd = "rm -rf {}".format(filename)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, 'Failed to delete %s ' % filename)
+
+ g.log.info("Successfully remove both sparse files")
+
+ def test_sparse_file_creation_and_deletion(self):
+ """
+ Test case:
+ 1. Create volume with 5 sub-volumes, start and mount it.
+ 2. Check df -h for available size.
+ 3. Create 2 sparse file one from /dev/null and one from /dev/zero.
+ 4. Find out size of files and compare them through du and ls.
+ (They shouldn't match.)
+ 5. Check df -h for available size.(It should be less than step 2.)
+ 6. Remove the files using rm -rf.
+ """
+ # Check df -h for avaliable size
+ available_space_at_start = get_size_of_mountpoint(
+ self.first_client, self.mounts[0].mountpoint)
+ self.assertIsNotNone(available_space_at_start,
+ "Failed to get available space on mount point")
+
+ # Create 2 sparse file one from /dev/null and one from /dev/zero
+ self._create_two_sparse_files()
+
+ # Find out size of files and compare them through du and ls
+ # (They shouldn't match)
+ self._check_du_and_ls_of_sparse_file()
+
+ # Check df -h for avaliable size(It should be less than step 2)
+ available_space_now = get_size_of_mountpoint(
+ self.first_client, self.mounts[0].mountpoint)
+ self.assertIsNotNone(available_space_now,
+ "Failed to get avaliable space on mount point")
+ ret = (int(available_space_at_start) > int(available_space_now))
+ self.assertTrue(ret, "Available space at start not less than "
+ "available space now")
+
+ # Remove the files using rm -rf
+ self._delete_two_sparse_files()
+
+ # Sleep for 180 seconds for the meta data in .glusterfs directory
+ # to be removed
+ sleep(180)
+
+ # Check df -h after removing sparse files
+ available_space_now = get_size_of_mountpoint(
+ self.first_client, self.mounts[0].mountpoint)
+ self.assertIsNotNone(available_space_now,
+ "Failed to get avaliable space on mount point")
+ ret = int(available_space_at_start) - int(available_space_now) < 1500
+ self.assertTrue(ret, "Available space at start and available space now"
+ " is not equal")
diff --git a/tests/functional/dht/test_stack_overflow.py b/tests/functional/dht/test_stack_overflow.py
new file mode 100644
index 000000000..c371add63
--- /dev/null
+++ b/tests/functional/dht/test_stack_overflow.py
@@ -0,0 +1,131 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.volume_ops import (set_volume_options,
+ reset_volume_option)
+from glustolibs.gluster.volume_libs import expand_volume
+from glustolibs.gluster.rebalance_ops import (rebalance_start,
+ wait_for_fix_layout_to_complete)
+from glustolibs.gluster.glusterfile import move_file
+
+
+@runs_on([['distributed', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed', 'replicated',
+ 'arbiter', 'distributed-arbiter'],
+ ['glusterfs']])
+class TestStackOverflow(GlusterBaseClass):
+ def setUp(self):
+ """
+ Setup and mount volume or raise ExecutionError
+ """
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ g.log.error("Failed to Setup and Mount Volume")
+ raise ExecutionError("Failed to Setup and Mount Volume")
+
+ def tearDown(self):
+ # Reset the volume options set inside the test
+ vol_options = ['performance.parallel-readdir',
+ 'performance.readdir-ahead']
+ for opt in vol_options:
+ ret, _, _ = reset_volume_option(self.mnode, self.volname, opt)
+ if ret:
+ raise ExecutionError("Failed to reset the volume option %s"
+ % opt)
+ g.log.info("Successfully reset the volume options")
+
+ # Unmount and cleanup original volume
+ ret = self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def test_stack_overflow(self):
+ """
+ Description: Tests to check that there is no stack overflow
+ in readdirp with parallel-readdir enabled.
+ Steps :
+ 1) Create a volume.
+ 2) Mount the volume using FUSE.
+ 3) Enable performance.parallel-readdir and
+ performance.readdir-ahead on the volume.
+ 4) Create 10000 files on the mount point.
+ 5) Add-brick to the volume.
+ 6) Perform fix-layout on the volume (not rebalance).
+ 7) From client node, rename all the files, this will result in creation
+ of linkto files on the newly added brick.
+ 8) Do ls -l (lookup) on the mount-point.
+ """
+ # pylint: disable=too-many-statements
+ # Enable performance.parallel-readdir and
+ # performance.readdir-ahead on the volume
+ options = {"performance.parallel-readdir": "enable",
+ "performance.readdir-ahead": "enable"}
+ ret = set_volume_options(self.mnode, self.volname, options)
+ self.assertTrue(ret, "Failed to set volume options")
+ g.log.info("Successfully set volume options")
+
+ # Creating 10000 files on volume root
+ m_point = self.mounts[0].mountpoint
+ command = 'touch ' + m_point + '/file{1..10000}_0'
+ ret, _, _ = g.run(self.clients[0], command)
+ self.assertEqual(ret, 0, "File creation failed on %s"
+ % m_point)
+ g.log.info("Files successfully created on the mount point")
+
+ # Add bricks to the volume
+ ret = expand_volume(self.mnode, self.volname, self.servers,
+ self.all_servers_info)
+ self.assertTrue(ret, ("Failed to expand the volume %s",
+ self.volname))
+ g.log.info("Expanding volume is successful on "
+ "volume %s", self.volname)
+
+ # Perform fix-layout on the volume
+ ret, _, _ = rebalance_start(self.mnode, self.volname, fix_layout=True)
+ self.assertEqual(ret, 0, 'Failed to start rebalance')
+ g.log.info('Rebalance is started')
+
+ # Wait for fix-layout to complete
+ ret = wait_for_fix_layout_to_complete(self.mnode, self.volname,
+ timeout=3000)
+ self.assertTrue(ret, ("Fix-layout failed on volume %s",
+ self.volname))
+ g.log.info("Fix-layout is successful on "
+ "volume %s", self.volname)
+
+ # Rename all files from client node
+ for i in range(1, 10000):
+ ret = move_file(self.clients[0],
+ '{}/file{}_0'.format(m_point, i),
+ '{}/file{}_1'.format(m_point, i))
+ self.assertTrue(ret, "Failed to rename files")
+ g.log.info("Files renamed successfully")
+
+ # Perform lookup from the mount-point
+ cmd = "ls -lR " + m_point
+ ret, _, _ = g.run(self.mounts[0].client_system, cmd)
+ self.assertEqual(ret, 0, "Failed to lookup")
+ g.log.info("Lookup successful")
diff --git a/tests/functional/dht/test_stop_glusterd_while_rebalance_in_progress.py b/tests/functional/dht/test_stop_glusterd_while_rebalance_in_progress.py
index 83a0ee991..350cb4e1a 100644
--- a/tests/functional/dht/test_stop_glusterd_while_rebalance_in_progress.py
+++ b/tests/functional/dht/test_stop_glusterd_while_rebalance_in_progress.py
@@ -167,7 +167,7 @@ class RebalanceValidation(GlusterBaseClass):
# Wait for rebalance to complete
g.log.info("Waiting for rebalance to complete")
ret = wait_for_rebalance_to_complete(self.mnode, self.volname,
- timeout=600)
+ timeout=1800)
self.assertTrue(ret, ("Rebalance is either timed out or failed"
"%s", self.volname))
g.log.info("Volume %s: Rebalance completed successfully",
diff --git a/tests/functional/dht/test_time_taken_for_ls.py b/tests/functional/dht/test_time_taken_for_ls.py
new file mode 100644
index 000000000..7c9653999
--- /dev/null
+++ b/tests/functional/dht/test_time_taken_for_ls.py
@@ -0,0 +1,105 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+
+
+@runs_on([['distributed-replicated', 'distributed-arbiter',
+ 'distributed-dispersed'], ['glusterfs']])
+class TestTimeForls(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume(self.mounts):
+ raise ExecutionError("Failed to Setup and mount volume")
+
+ self.is_io_running = False
+
+ def tearDown(self):
+
+ if self.is_io_running:
+ self._validate_io()
+
+ if not self.unmount_volume_and_cleanup_volume(self.mounts):
+ raise ExecutionError("Failed to Cleanup Volume")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
+ def _validate_io(self):
+ """Validare I/O threads running on mount point"""
+ io_success = []
+ for proc in self.proc_list:
+ try:
+ ret, _, _ = proc.async_communicate()
+ if ret:
+ io_success.append(False)
+ break
+ io_success.append(True)
+ except ValueError:
+ io_success.append(True)
+ return all(io_success)
+
+ def test_time_taken_for_ls(self):
+ """
+ Test case:
+ 1. Create a volume of type distributed-replicated or
+ distributed-arbiter or distributed-dispersed and start it.
+ 2. Mount the volume to clients and create 2000 directories
+ and 10 files inside each directory.
+ 3. Wait for I/O to complete on mount point and perform ls
+ (ls should complete within 10 seconds).
+ """
+ # Creating 2000 directories on the mount point
+ ret, _, _ = g.run(self.mounts[0].client_system,
+ "cd %s; for i in {1..2000};do mkdir dir$i;done"
+ % self.mounts[0].mountpoint)
+ self.assertFalse(ret, 'Failed to create 2000 dirs on mount point')
+
+ # Create 5000 files inside each directory
+ dirs = ('{1..100}', '{101..200}', '{201..300}', '{301..400}',
+ '{401..500}', '{501..600}', '{601..700}', '{701..800}',
+ '{801..900}', '{901..1000}', '{1001..1100}', '{1101..1200}',
+ '{1201..1300}', '{1301..1400}', '{1401..1500}', '{1501..1600}',
+ '{1801..1900}', '{1901..2000}')
+ self.proc_list, counter = [], 0
+ while counter < 18:
+ for mount_obj in self.mounts:
+ ret = g.run_async(mount_obj.client_system,
+ "cd %s;for i in %s;do "
+ "touch dir$i/file{1..10};done"
+ % (mount_obj.mountpoint, dirs[counter]))
+ self.proc_list.append(ret)
+ counter += 1
+ self.is_io_running = True
+
+ # Check if I/O is successful or not
+ ret = self._validate_io()
+ self.assertTrue(ret, "Failed to create Files and dirs on mount point")
+ self.is_io_running = False
+ g.log.info("Successfully created files and dirs needed for the test")
+
+ # Run ls on mount point which should get completed within 10 seconds
+ ret, _, _ = g.run(self.mounts[0].client_system,
+ "cd %s; timeout 10 ls"
+ % self.mounts[0].mountpoint)
+ self.assertFalse(ret, '1s taking more than 10 seconds')
+ g.log.info("ls completed in under 10 seconds")
diff --git a/tests/functional/dht/test_verify_create_hash.py b/tests/functional/dht/test_verify_create_hash.py
index 2f9fffd7b..5ed2a97a0 100644
--- a/tests/functional/dht/test_verify_create_hash.py
+++ b/tests/functional/dht/test_verify_create_hash.py
@@ -104,7 +104,7 @@ class TestCreateFile(GlusterBaseClass):
for brickdir in brickobject:
count += 1
ret = brickdir.hashrange_contains_hash(filehash)
- if ret == 1:
+ if ret:
hash_subvol = subvols[count]
ret, _, err = g.run(brickdir._host, ("stat %s/file1" %
brickdir._fqpath))
diff --git a/tests/functional/dht/test_verify_permissions_on_root_dir_when_brick_down.py b/tests/functional/dht/test_verify_permissions_on_root_dir_when_brick_down.py
new file mode 100644
index 000000000..f6228c122
--- /dev/null
+++ b/tests/functional/dht/test_verify_permissions_on_root_dir_when_brick_down.py
@@ -0,0 +1,134 @@
+# Copyright (C) 2021 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.glusterfile import set_file_permissions
+from glustolibs.gluster.brick_libs import (get_all_bricks,
+ bring_bricks_offline,
+ bring_bricks_online)
+
+
+@runs_on([['distributed', 'distributed-replicated', 'distributed-dispersed',
+ 'distributed-arbiter'],
+ ['glusterfs']])
+class TestVerifyPermissionChanges(GlusterBaseClass):
+ def setUp(self):
+ """
+ Setup and mount volume
+ """
+ self.get_super_method(self, 'setUp')()
+
+ # Setup Volume
+ if not self.setup_volume_and_mount_volume(mounts=[self.mounts[0]]):
+ raise ExecutionError("Failed to Setup and Mount Volume")
+
+ def _set_root_dir_permission(self, permission):
+ """ Sets the root dir permission to the given value"""
+ m_point = self.mounts[0].mountpoint
+ ret = set_file_permissions(self.clients[0], m_point, permission)
+ self.assertTrue(ret, "Failed to set root dir permissions")
+
+ def _get_dir_permissions(self, host, directory):
+ """ Returns dir permissions"""
+ cmd = 'stat -c "%a" {}'.format(directory)
+ ret, out, _ = g.run(host, cmd)
+ self.assertEqual(ret, 0, "Failed to get permission on {}".format(host))
+ return out.strip()
+
+ def _get_root_dir_permission(self, expected=None):
+ """ Returns the root dir permission """
+ permission = self._get_dir_permissions(self.mounts[0].client_system,
+ self.mounts[0].mountpoint)
+ if not expected:
+ return permission.strip()
+ self.assertEqual(permission, expected, "The permissions doesn't match")
+ return True
+
+ def _bring_a_brick_offline(self):
+ """ Brings down a brick from the volume"""
+ brick_to_kill = get_all_bricks(self.mnode, self.volname)[-1]
+ ret = bring_bricks_offline(self.volname, brick_to_kill)
+ self.assertTrue(ret, "Failed to bring brick offline")
+ return brick_to_kill
+
+ def _bring_back_brick_online(self, brick):
+ """ Brings back down brick from the volume"""
+ ret = bring_bricks_online(self.mnode, self.volname, brick)
+ self.assertTrue(ret, "Failed to bring brick online")
+
+ def _verify_mount_dir_and_brick_dir_permissions(self, expected,
+ down_brick=None):
+ """ Verifies the mount directory and brick dir permissions are same"""
+ # Get root dir permission and verify
+ self._get_root_dir_permission(expected)
+
+ # Verify brick dir permission
+ brick_list = get_all_bricks(self.mnode, self.volname)
+ for brick in brick_list:
+ brick_node, brick_path = brick.split(":")
+ if down_brick and down_brick.split(":")[-1] != brick_path:
+ actual_perm = self._get_dir_permissions(brick_node,
+ brick_path)
+ self.assertEqual(actual_perm, expected,
+ "The permissions are not same")
+
+ def test_verify_root_dir_permission_changes(self):
+ """
+ 1. create pure dist volume
+ 2. mount on client
+ 3. Checked default permission (should be 755)
+ 4. Change the permission to 444 and verify
+ 5. Kill a brick
+ 6. Change root permission to 755
+ 7. Verify permission changes on all bricks, except down brick
+ 8. Bring back the brick and verify the changes are reflected
+ """
+
+ # Verify the default permission on root dir is 755
+ self._verify_mount_dir_and_brick_dir_permissions("755")
+
+ # Change root permission to 444
+ self._set_root_dir_permission("444")
+
+ # Verify the changes were successful
+ self._verify_mount_dir_and_brick_dir_permissions("444")
+
+ # Kill a brick
+ offline_brick = self._bring_a_brick_offline()
+
+ # Change root permission to 755
+ self._set_root_dir_permission("755")
+
+ # Verify the permission changed to 755 on mount and brick dirs
+ self._verify_mount_dir_and_brick_dir_permissions("755", offline_brick)
+
+ # Bring brick online
+ self._bring_back_brick_online(offline_brick)
+
+ # Verify the permission changed to 755 on mount and brick dirs
+ self._verify_mount_dir_and_brick_dir_permissions("755")
+
+ def tearDown(self):
+ # Unmount and cleanup original volume
+ if not self.unmount_volume_and_cleanup_volume(mounts=[self.mounts[0]]):
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/dht/test_volume_start_stop_while_rebalance_in_progress.py b/tests/functional/dht/test_volume_start_stop_while_rebalance_in_progress.py
index 0bec68b8f..55099c811 100644
--- a/tests/functional/dht/test_volume_start_stop_while_rebalance_in_progress.py
+++ b/tests/functional/dht/test_volume_start_stop_while_rebalance_in_progress.py
@@ -131,8 +131,8 @@ class RebalanceValidation(GlusterBaseClass):
# Log Volume Info and Status before expanding the volume.
g.log.info("Logging volume info and Status before expanding volume")
ret = log_volume_info_and_status(self.mnode, self.volname)
- g.log.error(ret, ("Logging volume info and status failed on "
- "volume %s", self.volname))
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
g.log.info("Logging volume info and status was successful for volume "
"%s", self.volname)
diff --git a/tests/functional/dht/test_wipe_out_directory_permissions.py b/tests/functional/dht/test_wipe_out_directory_permissions.py
new file mode 100644
index 000000000..485aaf0d5
--- /dev/null
+++ b/tests/functional/dht/test_wipe_out_directory_permissions.py
@@ -0,0 +1,132 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along`
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.brick_libs import get_all_bricks
+from glustolibs.gluster.brick_ops import add_brick
+from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.gluster.glusterdir import mkdir
+from glustolibs.gluster.glusterfile import get_file_stat, get_fattr
+
+
+@runs_on([['distributed'], ['glusterfs']])
+class TestDhtWipeOutDirectoryPremissions(GlusterBaseClass):
+
+ def setUp(self):
+
+ self.get_super_method(self, 'setUp')()
+
+ # Changing dist_count to 1
+ self.volume['voltype']['dist_count'] = 1
+
+ # Creating Volume and mounting the volume
+ ret = self.setup_volume_and_mount_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Volume creation or mount failed: %s"
+ % self.volname)
+
+ # Assign a variable for the first_client
+ self.first_client = self.mounts[0].client_system
+
+ def tearDown(self):
+
+ # Unmounting and cleaning volume
+ ret = self.unmount_volume_and_cleanup_volume([self.mounts[0]])
+ if not ret:
+ raise ExecutionError("Unable to delete volume %s" % self.volname)
+
+ self.get_super_method(self, 'tearDown')()
+
+ def _check_permissions_of_dir(self):
+ """Check permissions of dir created."""
+ for brick_path in get_all_bricks(self.mnode, self.volname):
+ node, path = brick_path.split(":")
+ ret = get_file_stat(node, "{}/dir".format(path))
+ self.assertEqual(int(ret["access"]), 755,
+ "Unexpected:Permissions of dir is %s and not %d"
+ % (ret["access"], 755))
+ g.log.info("Permissions of dir directory is proper on all bricks")
+
+ def _check_trusted_glusterfs_dht_on_all_bricks(self):
+ """Check trusted.glusterfs.dht xattr on the backend bricks"""
+ bricks = get_all_bricks(self.mnode, self.volname)
+ possible_values = ["0x000000000000000000000000ffffffff",
+ "0x00000000000000000000000000000000"]
+ for brick_path in bricks:
+ node, path = brick_path.split(":")
+ ret = get_fattr(node, "{}/dir".format(path),
+ "trusted.glusterfs.dht")
+ self.assertEqual(
+ ret, possible_values[bricks.index(brick_path)],
+ "Value of trusted.glusterfs.dht is not as expected")
+ g.log.info("Successfully checked value of trusted.glusterfs.dht.")
+
+ def test_wipe_out_directory_permissions(self):
+ """
+ Test case:
+ 1. Create a 1 brick pure distributed volume.
+ 2. Start the volume and mount it on a client node using FUSE.
+ 3. Create a directory on the mount point.
+ 4. Check trusted.glusterfs.dht xattr on the backend brick.
+ 5. Add brick to the volume using force.
+ 6. Do lookup from the mount point.
+ 7. Check the directory permissions from the backend bricks.
+ 8. Check trusted.glusterfs.dht xattr on the backend bricks.
+ 9. From mount point cd into the directory.
+ 10. Check the directory permissions from backend bricks.
+ 11. Check trusted.glusterfs.dht xattr on the backend bricks.
+ """
+ # Create a directory on the mount point
+ self.dir_path = "{}/dir".format(self.mounts[0].mountpoint)
+ ret = mkdir(self.first_client, self.dir_path)
+ self.assertTrue(ret, "Failed to create directory dir")
+
+ # Check trusted.glusterfs.dht xattr on the backend brick
+ self._check_trusted_glusterfs_dht_on_all_bricks()
+
+ # Add brick to the volume using force
+ brick_list = form_bricks_list(self.mnode, self.volname, 1,
+ self.servers, self.all_servers_info)
+ self.assertIsNotNone(brick_list,
+ "Failed to get available space on mount point")
+ ret, _, _ = add_brick(self.mnode, self.volname, brick_list, force=True)
+ self.assertEqual(ret, 0, ("Volume {}: Add-brick failed".format
+ (self.volname)))
+
+ # Do a lookup from the mount point
+ cmd = "ls -lR {}".format(self.dir_path)
+ ret, _, _ = g.run(self.first_client, cmd)
+ self.assertEqual(ret, 0, "Failed to lookup")
+ g.log.info("Lookup successful")
+
+ # Check the directory permissions from the backend bricks
+ self._check_permissions_of_dir()
+
+ # Check trusted.glusterfs.dht xattr on the backend bricks
+ self._check_trusted_glusterfs_dht_on_all_bricks()
+
+ # From mount point cd into the directory
+ ret, _, _ = g.run(self.first_client, "cd {};cd ..;cd {}"
+ .format(self.dir_path, self.dir_path))
+ self.assertEqual(ret, 0, "Unable to cd into dir from mount point")
+
+ # Check the directory permissions from backend bricks
+ self._check_permissions_of_dir()
+
+ # Check trusted.glusterfs.dht xattr on the backend bricks
+ self._check_trusted_glusterfs_dht_on_all_bricks()