summaryrefslogtreecommitdiffstats
path: root/tests/functional/arbiter/test_metadata_self_heal.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/arbiter/test_metadata_self_heal.py')
-rwxr-xr-xtests/functional/arbiter/test_metadata_self_heal.py74
1 files changed, 30 insertions, 44 deletions
diff --git a/tests/functional/arbiter/test_metadata_self_heal.py b/tests/functional/arbiter/test_metadata_self_heal.py
index 3d0a37447..0b2708438 100755
--- a/tests/functional/arbiter/test_metadata_self_heal.py
+++ b/tests/functional/arbiter/test_metadata_self_heal.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -32,10 +32,11 @@ from glustolibs.gluster.heal_libs import (monitor_heal_completion,
is_shd_daemonized)
from glustolibs.gluster.heal_ops import trigger_heal
from glustolibs.misc.misc_libs import upload_scripts
-from glustolibs.io.utils import (collect_mounts_arequal, validate_io_procs)
+from glustolibs.io.utils import (collect_mounts_arequal,
+ wait_for_io_to_complete)
-@runs_on([['replicated', 'distributed-replicated'],
+@runs_on([['arbiter', 'distributed-arbiter'],
['glusterfs', 'nfs']])
class TestMetadataSelfHeal(GlusterBaseClass):
"""
@@ -75,26 +76,14 @@ class TestMetadataSelfHeal(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # Overriding the volume type to specifically test the volume type
- # Change from distributed-replicated to arbiter
- if cls.volume_type == "distributed-replicated":
- cls.volume['voltype'] = {
- 'type': 'distributed-replicated',
- 'dist_count': 2,
- 'replica_count': 3,
- 'arbiter_count': 1,
- 'transport': 'tcp'}
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on mounts",
cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, [script_local_path])
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -103,7 +92,7 @@ class TestMetadataSelfHeal(GlusterBaseClass):
def setUp(self):
# Calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Create user qa
for mount_object in self.mounts:
@@ -139,7 +128,7 @@ class TestMetadataSelfHeal(GlusterBaseClass):
g.log.info("Successful in umounting the volume and Cleanup")
# Calling GlusterBaseClass teardown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_metadata_self_heal(self):
"""
@@ -203,10 +192,10 @@ class TestMetadataSelfHeal(GlusterBaseClass):
user=self.mounts[0].user)
all_mounts_procs.append(proc)
- # Validate IO
+ # wait for io to complete
self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients")
+ wait_for_io_to_complete(all_mounts_procs, self.mounts),
+ "Io failed to complete on some of the clients")
# Setting options
g.log.info('Setting options...')
@@ -218,10 +207,7 @@ class TestMetadataSelfHeal(GlusterBaseClass):
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline = bricks_to_bring_offline_dict['volume_bricks']
# Bring brick offline
g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
@@ -343,8 +329,9 @@ class TestMetadataSelfHeal(GlusterBaseClass):
# Checking arequals before bringing bricks online
# and after bringing bricks online
- self.assertItemsEqual(result_before_online, result_after_online,
- 'Checksums are not equal')
+ self.assertEqual(sorted(result_before_online),
+ sorted(result_after_online),
+ 'Checksums are not equal')
g.log.info('Checksums before bringing bricks online '
'and after bringing bricks online are equal')
@@ -367,11 +354,6 @@ class TestMetadataSelfHeal(GlusterBaseClass):
ret, out, err = g.run(node, command)
file_list = out.split()
- g.log.info('Checking for user and group on %s...', node)
- conn = g.rpyc_get_connection(node)
- if conn is None:
- raise Exception("Unable to get connection on node %s" % node)
-
for file_name in file_list:
file_to_check = '%s/%s/%s' % (nodes_to_check[node],
test_meta_data_self_heal_folder,
@@ -379,26 +361,30 @@ class TestMetadataSelfHeal(GlusterBaseClass):
g.log.info('Checking for permissions, user and group for %s',
file_name)
+
# Check for permissions
- permissions = oct(
- conn.modules.os.stat(file_to_check).st_mode)[-3:]
- self.assertEqual(permissions, '444',
+ cmd = ("stat -c '%a %n' {} | awk '{{print $1}}'"
+ .format(file_to_check))
+ ret, permissions, _ = g.run(node, cmd)
+ self.assertEqual(permissions.split('\n')[0], '444',
'Permissions %s is not equal to 444'
% permissions)
g.log.info("Permissions are '444' for %s", file_name)
# Check for user
- uid = conn.modules.os.stat(file_to_check).st_uid
- username = conn.modules.pwd.getpwuid(uid).pw_name
- self.assertEqual(username, 'qa', 'User %s is not equal qa'
+ cmd = ("ls -ld {} | awk '{{print $3}}'"
+ .format(file_to_check))
+ ret, username, _ = g.run(node, cmd)
+ self.assertEqual(username.split('\n')[0],
+ 'qa', 'User %s is not equal qa'
% username)
g.log.info("User is 'qa' for %s", file_name)
# Check for group
- gid = conn.modules.os.stat(file_to_check).st_gid
- groupname = conn.modules.grp.getgrgid(gid).gr_name
- self.assertEqual(groupname, 'qa', 'Group %s is not equal qa'
+ cmd = ("ls -ld {} | awk '{{print $4}}'"
+ .format(file_to_check))
+ ret, groupname, _ = g.run(node, cmd)
+ self.assertEqual(groupname.split('\n')[0],
+ 'qa', 'Group %s is not equal qa'
% groupname)
g.log.info("Group is 'qa' for %s", file_name)
-
- g.rpyc_close_connection(host=node)