summaryrefslogtreecommitdiffstats
path: root/tests/functional/afr
diff options
context:
space:
mode:
authorVitalii Koriakov <vkoriako@redhat.com>2018-01-22 15:21:57 +0200
committerVitalii Koriakov <vkoriako@redhat.com>2018-05-08 12:31:37 +0300
commit626079a3e3227a0b00934ce5c30d8110751dca16 (patch)
treec9e8946c45c4ae7e1b2769ad4c1bd8ef3593336f /tests/functional/afr
parent01d3ecbd59a7e4fa8bbf10de1977dbd01d0fbfc6 (diff)
Test MetaData Self-Heal (heal command)
Change-Id: I32fefdab769e5a361e4dcb5f1328b2c8da2e4f1a Signed-off-by: Vitalii Koriakov <vkoriako@redhat.com>
Diffstat (limited to 'tests/functional/afr')
-rwxr-xr-xtests/functional/afr/heal/test_self_heal.py576
1 files changed, 461 insertions, 115 deletions
diff --git a/tests/functional/afr/heal/test_self_heal.py b/tests/functional/afr/heal/test_self_heal.py
index c378af8..d7506a6 100755
--- a/tests/functional/afr/heal/test_self_heal.py
+++ b/tests/functional/afr/heal/test_self_heal.py
@@ -116,13 +116,6 @@ class TestSelfHeal(GlusterBaseClass):
raise ExecutionError("IO failed on some of the clients")
g.log.info("IO is successful on all mounts")
- # List all files and dirs created
- g.log.info("List all files and directories:")
- ret = list_all_files_and_dirs_mounts(self.mounts)
- if not ret:
- raise ExecutionError("Failed to list all files and dirs")
- g.log.info("Listing all files and directories is successful")
-
# Cleanup and umount volume
g.log.info("Starting to Unmount Volume and Cleanup Volume")
ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
@@ -143,12 +136,12 @@ class TestSelfHeal(GlusterBaseClass):
"entry-self-heal": "off"
"data-self-heal": "off"
- create IO
- - Get areequal before getting bricks offline
+ - Get arequal before getting bricks offline
- set the volume option
"self-heal-daemon": "off"
- bring down all bricks processes from selected set
- - Get areequal after getting bricks offline and compare with
- areequal before getting bricks offline
+ - Get areeual after getting bricks offline and compare with
+ arequal before getting bricks offline
- modify the data
- bring bricks online
- set the volume option
@@ -196,11 +189,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = True
g.log.info("IO is successful on all mounts")
- # Get areequal before getting bricks offline
- g.log.info('Getting areequal before getting bricks offline...')
+ # Get arequal before getting bricks offline
+ g.log.info('Getting arequal before getting bricks offline...')
ret, result_before_offline = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal before getting bricks offline '
+ g.log.info('Getting arequal before getting bricks offline '
'is successful')
# Setting options
@@ -231,14 +224,14 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info('Bringing bricks %s offline is successful',
bricks_to_bring_offline)
- # Get areequal after getting bricks offline
- g.log.info('Getting areequal after getting bricks offline...')
+ # Get arequal after getting bricks offline
+ g.log.info('Getting arequal after getting bricks offline...')
ret, result_after_offline = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks offline '
+ g.log.info('Getting arequal after getting bricks offline '
'is successful')
- # Checking areequals before bringing bricks offline
+ # Checking arequals before bringing bricks offline
# and after bringing bricks offline
self.assertEqual(result_before_offline, result_after_offline,
'Checksums before and '
@@ -422,21 +415,21 @@ class TestSelfHeal(GlusterBaseClass):
"entry-self-heal": "off"
"data-self-heal": "off"
- create IO
- - get areequal before getting bricks offline
+ - get arequal before getting bricks offline
- set the volume option
"self-heal-daemon": "off"
- bring down all bricks processes from selected set
- - get areequal after getting bricks offline and compare with
+ - get arequal after getting bricks offline and compare with
arequal after bringing bricks offline
- modify the data
- - get areequal before getting bricks online
+ - get arequal before getting bricks online
- bring bricks online
- set the volume option
"self-heal-daemon": "on"
- check daemons and start healing
- check if heal is completed
- check for split-brain
- - get areequal after getting bricks online and compare with
+ - get arequal after getting bricks online and compare with
arequal before bringing bricks online
"""
# pylint: disable=too-many-statements
@@ -491,11 +484,11 @@ class TestSelfHeal(GlusterBaseClass):
"python %s delete %s"]
for cmd in cmd_list:
- # Get areequal before getting bricks offline
- g.log.info('Getting areequal before getting bricks offline...')
+ # Get arequal before getting bricks offline
+ g.log.info('Getting arequal before getting bricks offline...')
ret, result_before_offline = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal before getting bricks offline '
+ g.log.info('Getting arequal before getting bricks offline '
'is successful')
# Setting options
@@ -528,14 +521,14 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info('Bringing bricks %s offline is successful',
bricks_to_bring_offline)
- # Get areequal after getting bricks offline
- g.log.info('Getting areequal after getting bricks offline...')
+ # Get arequal after getting bricks offline
+ g.log.info('Getting arequal after getting bricks offline...')
ret, result_after_offline = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks offline '
+ g.log.info('Getting arequal after getting bricks offline '
'is successful')
- # Checking areequals before bringing bricks offline
+ # Checking arequals before bringing bricks offline
# and after bringing bricks offline
self.assertEqual(result_before_offline, result_after_offline,
'Checksums are not equal')
@@ -563,11 +556,11 @@ class TestSelfHeal(GlusterBaseClass):
self.io_validation_complete = True
g.log.info("IO is successful on all mounts")
- # Get areequal before getting bricks online
- g.log.info('Getting areequal before getting bricks online...')
+ # Get arequal before getting bricks online
+ g.log.info('Getting arequal before getting bricks online...')
ret, result_before_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal before getting bricks online '
+ g.log.info('Getting arequal before getting bricks online '
'is successful')
# List all files and dirs created
@@ -636,11 +629,11 @@ class TestSelfHeal(GlusterBaseClass):
self.assertFalse(ret, 'Volume is in split-brain state')
g.log.info('Volume is not in split-brain state')
- # Get areequal after getting bricks online
- g.log.info('Getting areequal after getting bricks online...')
+ # Get arequal after getting bricks online
+ g.log.info('Getting arequal after getting bricks online...')
ret, result_after_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks online '
+ g.log.info('Getting arequal after getting bricks online '
'is successful')
# List all files and dirs created
@@ -650,7 +643,7 @@ class TestSelfHeal(GlusterBaseClass):
raise ExecutionError("Failed to list all files and dirs")
g.log.info("Listing all files and directories is successful")
- # Checking areequals before bringing bricks online
+ # Checking arequals before bringing bricks online
# and after bringing bricks online
self.assertEqual(result_before_online, result_after_online,
'Checksums are not equal')
@@ -669,10 +662,10 @@ class TestSelfHeal(GlusterBaseClass):
"data-self-heal-algorithm": "diff"
"self-heal-daemon": "off"
- create IO
- - calculate areequal
+ - calculate arequal
- bring down all bricks processes from selected set
- modify the data
- - get areequal before getting bricks online
+ - get arequal before getting bricks online
- bring bricks online
- expand volume by adding bricks to the volume
- do rebalance
@@ -680,7 +673,7 @@ class TestSelfHeal(GlusterBaseClass):
- start healing
- check if heal is completed
- check for split-brain
- - calculate areequal and compare with arequal before bringing bricks
+ - calculate arequal and compare with arequal before bringing bricks
offline and after bringing bricks online
"""
# pylint: disable=too-many-branches,too-many-statements
@@ -767,11 +760,11 @@ class TestSelfHeal(GlusterBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
g.log.info("IO is successful on all mounts")
- # Get areequal before getting bricks online
- g.log.info('Getting areequal before getting bricks online...')
+ # Get arequal before getting bricks online
+ g.log.info('Getting arequal before getting bricks online...')
ret, result_before_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal before getting bricks online '
+ g.log.info('Getting arequal before getting bricks online '
'is successful')
# Bring brick online
@@ -848,14 +841,14 @@ class TestSelfHeal(GlusterBaseClass):
self.assertFalse(ret, 'Volume is in split-brain state')
g.log.info('Volume is not in split-brain state')
- # Get areequal after getting bricks online
- g.log.info('Getting areequal after getting bricks online...')
+ # Get arequal after getting bricks online
+ g.log.info('Getting arequal after getting bricks online...')
ret, result_after_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks online '
+ g.log.info('Getting arequal after getting bricks online '
'is successful')
- # Checking areequals before bringing bricks offline
+ # Checking arequals before bringing bricks offline
# and after bringing bricks online
self.assertItemsEqual(result_before_online, result_after_online,
'Checksums are not equal')
@@ -870,10 +863,10 @@ class TestSelfHeal(GlusterBaseClass):
- create IO
- bring down all bricks processes from selected set
- modify the data
- - calculate areequal
+ - calculate arequal
- bring bricks online
- start healing
- - calculate areequal and compare with arequal before bringing bricks
+ - calculate arequal and compare with arequal before bringing bricks
offline and after bringing bricks online
"""
# pylint: disable=too-many-locals,too-many-statements
@@ -944,11 +937,11 @@ class TestSelfHeal(GlusterBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
g.log.info("IO is successful on all mounts")
- # Get areequal before getting bricks online
- g.log.info('Getting areequal before getting bricks online...')
+ # Get arequal before getting bricks online
+ g.log.info('Getting arequal before getting bricks online...')
ret, result_before_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal before getting bricks online '
+ g.log.info('Getting arequal before getting bricks online '
'is successful')
# Bring brick online
@@ -995,14 +988,14 @@ class TestSelfHeal(GlusterBaseClass):
self.assertFalse(ret, 'Volume is in split-brain state')
g.log.info('Volume is not in split-brain state')
- # Get areequal after getting bricks online
- g.log.info('Getting areequal after getting bricks online...')
+ # Get arequal after getting bricks online
+ g.log.info('Getting arequal after getting bricks online...')
ret, result_after_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks online '
+ g.log.info('Getting arequal after getting bricks online '
'is successful')
- # Checking areequals before bringing bricks online
+ # Checking arequals before bringing bricks online
# and after bringing bricks online
self.assertItemsEqual(result_before_online, result_after_online,
'Checksums are not equal')
@@ -1018,10 +1011,10 @@ class TestSelfHeal(GlusterBaseClass):
- create IO
- bring down all bricks processes from selected set
- modify the data
- - calculate areequal
+ - calculate arequal
- bring bricks online
- start healing
- - calculate areequal and compare with arequal before bringing bricks
+ - calculate arequal and compare with arequal before bringing bricks
offline and after bringing bricks online
"""
# pylint: disable=too-many-locals,too-many-statements
@@ -1092,11 +1085,11 @@ class TestSelfHeal(GlusterBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
g.log.info("IO is successful on all mounts")
- # Get areequal before getting bricks online
- g.log.info('Getting areequal before getting bricks online...')
+ # Get arequal before getting bricks online
+ g.log.info('Getting arequal before getting bricks online...')
ret, result_before_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal before getting bricks online '
+ g.log.info('Getting arequal before getting bricks online '
'is successful')
# Bring brick online
@@ -1143,14 +1136,14 @@ class TestSelfHeal(GlusterBaseClass):
self.assertFalse(ret, 'Volume is in split-brain state')
g.log.info('Volume is not in split-brain state')
- # Get areequal after getting bricks online
- g.log.info('Getting areequal after getting bricks online...')
+ # Get arequal after getting bricks online
+ g.log.info('Getting arequal after getting bricks online...')
ret, result_after_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks online '
+ g.log.info('Getting arequal after getting bricks online '
'is successful')
- # Checking areequals before bringing bricks online
+ # Checking arequals before bringing bricks online
# and after bringing bricks online
self.assertItemsEqual(result_before_online, result_after_online,
'Checksums are not equal')
@@ -1164,16 +1157,16 @@ class TestSelfHeal(GlusterBaseClass):
Description:
- create IO
- - calculate areequal
+ - calculate arequal
- bring down all bricks processes from selected set
- calculate arequal and compare with arequal before
getting bricks offline
- modify the data
- - areequal before getting bricks online
+ - arequal before getting bricks online
- bring bricks online
- check daemons and healing completion
- start healing
- - calculate areequal and compare with arequal before bringing bricks
+ - calculate arequal and compare with arequal before bringing bricks
online and after bringing bricks online
"""
# pylint: disable=too-many-locals,too-many-statements
@@ -1213,11 +1206,11 @@ class TestSelfHeal(GlusterBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
g.log.info("IO is successful on all mounts")
- # Get areequal before getting bricks offline
- g.log.info('Getting areequal before getting bricks offline...')
+ # Get arequal before getting bricks offline
+ g.log.info('Getting arequal before getting bricks offline...')
ret, result_before_offline = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal before getting bricks offline '
+ g.log.info('Getting arequal before getting bricks offline '
'is successful')
# Select bricks to bring offline
@@ -1241,14 +1234,14 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info('Bringing bricks %s offline is successful',
bricks_to_bring_offline)
- # Get areequal after getting bricks offline
- g.log.info('Getting areequal after getting bricks offline...')
+ # Get arequal after getting bricks offline
+ g.log.info('Getting arequal after getting bricks offline...')
ret, result_after_offline = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks offline '
+ g.log.info('Getting arequal after getting bricks offline '
'is successful')
- # Checking areequals before bringing bricks offline
+ # Checking arequals before bringing bricks offline
# and after bringing bricks offline
self.assertItemsEqual(result_before_offline, result_after_offline,
'Checksums before and after '
@@ -1284,11 +1277,11 @@ class TestSelfHeal(GlusterBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
g.log.info("IO is successful on all mounts")
- # Get areequal before getting bricks online
- g.log.info('Getting areequal before getting bricks online...')
+ # Get arequal before getting bricks online
+ g.log.info('Getting arequal before getting bricks online...')
ret, result_before_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal before getting bricks online '
+ g.log.info('Getting arequal before getting bricks online '
'is successful')
# Bring brick online
@@ -1335,14 +1328,14 @@ class TestSelfHeal(GlusterBaseClass):
self.assertFalse(ret, 'Volume is in split-brain state')
g.log.info('Volume is not in split-brain state')
- # Get areequal after getting bricks online
- g.log.info('Getting areequal after getting bricks online...')
+ # Get arequal after getting bricks online
+ g.log.info('Getting arequal after getting bricks online...')
ret, result_after_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks online '
+ g.log.info('Getting arequal after getting bricks online '
'is successful')
- # Checking areequals before bringing bricks online
+ # Checking arequals before bringing bricks online
# and after bringing bricks online
self.assertItemsEqual(result_before_online, result_after_online,
'Checksums before and '
@@ -1362,20 +1355,20 @@ class TestSelfHeal(GlusterBaseClass):
"data-self-heal-algorithm": "diff"
"self-heal-daemon": "off"
- create IO
- - calculate areequal
+ - calculate arequal
- bring down all bricks processes from selected set
- - calculate areequals and compare with arequal
+ - calculate arequals and compare with arequal
before bringing bricks offline
- modify the data and verify whether the links are properly created
- - calculate areequal before getting bricks online
+ - calculate arequal before getting bricks online
- bring bricks online
- set the volume option
"self-heal-daemon": "on"
- check daemons and start healing
- check is heal is complited
- check for split-brain
- - calculate areequal after getting bricks online and compare with
- areequal before getting bricks online
+ - calculate arequal after getting bricks online and compare with
+ arequal before getting bricks online
"""
# pylint: disable=too-many-locals,too-many-statements
# Setting options
@@ -1424,11 +1417,11 @@ class TestSelfHeal(GlusterBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
g.log.info("IO is successful on all mounts")
- # Get areequal before getting bricks offline
- g.log.info('Getting areequal before getting bricks offline...')
+ # Get arequal before getting bricks offline
+ g.log.info('Getting arequal before getting bricks offline...')
ret, result_before_offline = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal before getting bricks offline '
+ g.log.info('Getting arequal before getting bricks offline '
'is successful')
# Select bricks to bring offline
@@ -1452,14 +1445,14 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info('Bringing bricks %s offline is successful',
bricks_to_bring_offline)
- # Get areequal after getting bricks offline
- g.log.info('Getting areequal after getting bricks offline...')
+ # Get arequal after getting bricks offline
+ g.log.info('Getting arequal after getting bricks offline...')
ret, result_after_offline = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks offline '
+ g.log.info('Getting arequal after getting bricks offline '
'is successful')
- # Checking areequals before bringing bricks offline
+ # Checking arequals before bringing bricks offline
# and after bringing bricks offline
self.assertItemsEqual(result_before_offline, result_after_offline,
'Checksums before and '
@@ -1517,11 +1510,11 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info('Links for %s are properly created',
mount_object.mountpoint)
- # Get areequal before getting bricks online
- g.log.info('Getting areequal before getting bricks online...')
+ # Get arequal before getting bricks online
+ g.log.info('Getting arequal before getting bricks online...')
ret, result_before_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal before getting bricks online '
+ g.log.info('Getting arequal before getting bricks online '
'is successful')
# Bring brick online
@@ -1580,14 +1573,14 @@ class TestSelfHeal(GlusterBaseClass):
self.assertFalse(ret, 'Volume is in split-brain state')
g.log.info('Volume is not in split-brain state')
- # Get areequal after getting bricks online
- g.log.info('Getting areequal after getting bricks online...')
+ # Get arequal after getting bricks online
+ g.log.info('Getting arequal after getting bricks online...')
ret, result_after_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks online '
+ g.log.info('Getting arequal after getting bricks online '
'is successful')
- # Checking areequals before bringing bricks online
+ # Checking arequals before bringing bricks online
# and after bringing bricks online
self.assertItemsEqual(result_before_online, result_after_online,
'Checksums before and '
@@ -1606,7 +1599,7 @@ class TestSelfHeal(GlusterBaseClass):
"self-heal-daemon": "off"
- bring down all bricks processes from selected set
- create IO (50k files)
- - Get areequal before getting bricks online
+ - Get arequal before getting bricks online
- bring bricks online
- set the volume option
"self-heal-daemon": "on"
@@ -1614,12 +1607,12 @@ class TestSelfHeal(GlusterBaseClass):
- start healing
- check if heal is completed
- check for split-brain
- - get areequal after getting bricks online and compare with
- areequal before getting bricks online
+ - get arequal after getting bricks online and compare with
+ arequal before getting bricks online
- add bricks
- do rebalance
- - get areequal after adding bricks and compare with
- areequal after getting bricks online
+ - get arequal after adding bricks and compare with
+ arequal after getting bricks online
"""
# pylint: disable=too-many-locals,too-many-statements
# Setting options
@@ -1673,11 +1666,11 @@ class TestSelfHeal(GlusterBaseClass):
self.assertTrue(ret, "IO failed on some of the clients")
g.log.info("IO is successful on all mounts")
- # Get areequal before getting bricks online
- g.log.info('Getting areequal before getting bricks online...')
+ # Get arequal before getting bricks online
+ g.log.info('Getting arequal before getting bricks online...')
ret, result_before_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal before getting bricks online '
+ g.log.info('Getting arequal before getting bricks online '
'is successful')
# Bring brick online
@@ -1737,14 +1730,14 @@ class TestSelfHeal(GlusterBaseClass):
self.assertFalse(ret, 'Volume is in split-brain state')
g.log.info('Volume is not in split-brain state')
- # Get areequal after getting bricks online
- g.log.info('Getting areequal after getting bricks online...')
+ # Get arequal after getting bricks online
+ g.log.info('Getting arequal after getting bricks online...')
ret, result_after_online = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks online '
+ g.log.info('Getting arequal after getting bricks online '
'is successful')
- # Checking areequals before bringing bricks online
+ # Checking arequals before bringing bricks online
# and after bringing bricks online
self.assertItemsEqual(result_before_online, result_after_online,
'Checksums before and '
@@ -1769,14 +1762,14 @@ class TestSelfHeal(GlusterBaseClass):
self.assertTrue(ret, 'Rebalance is not completed')
g.log.info('Rebalance is completed successfully')
- # Get areequal after adding bricks
- g.log.info('Getting areequal after adding bricks...')
+ # Get arequal after adding bricks
+ g.log.info('Getting arequal after adding bricks...')
ret, result_after_adding_bricks = collect_mounts_arequal(self.mounts)
self.assertTrue(ret, 'Failed to get arequal')
- g.log.info('Getting areequal after getting bricks '
+ g.log.info('Getting arequal after getting bricks '
'is successful')
- # Checking areequals after bringing bricks online
+ # Checking arequals after bringing bricks online
# and after adding bricks
self.assertItemsEqual(result_after_online, result_after_adding_bricks,
'Checksums after bringing bricks online and '
@@ -1891,3 +1884,356 @@ class TestSelfHeal(GlusterBaseClass):
# Comparing the results
g.log.info("comparing both the results")
self.assertEqual(result_before, result_after, "Arequals are not equal")
+
+
+@runs_on([['replicated', 'distributed-replicated'],
+ ['glusterfs', 'cifs', 'nfs']])
+class TestMetadataSelfHeal(GlusterBaseClass):
+ """
+ Description:
+ Test cases related to metadata delf heal
+ in default configuration of the volume
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ # Calling GlusterBaseClass setUpClass
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ # Upload io scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on mounts",
+ cls.clients)
+ script_local_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, [script_local_path])
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts to clients %s"
+ % cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ cls.counter = 1
+ # int: Value of counter is used for dirname-start-num argument for
+ # file_dir_ops.py create_deep_dirs_with_files.
+
+ # The --dir-length argument value for file_dir_ops.py
+ # create_deep_dirs_with_files is set to 10 (refer to the cmd in setUp
+ # method). This means every mount will create
+ # 10 top level dirs. For every mountpoint/testcase to create new set of
+ # dirs, we are incrementing the counter by --dir-length value i.e 10
+ # in this test suite.
+
+ # If we are changing the --dir-length to new value, ensure the counter
+ # is also incremented by same value to create new set of files/dirs.
+
+ def setUp(self):
+ # Calling GlusterBaseClass setUp
+ GlusterBaseClass.setUp.im_func(self)
+
+ self.all_mounts_procs = []
+ self.io_validation_complete = False
+
+ for mount_object in self.mounts:
+ # Create user qa
+ g.log.info("Creating user 'qa'...")
+ command = "useradd qa"
+ ret, _, err = g.run(mount_object.client_system, command)
+
+ if 'already exists' in err:
+ g.log.warn("User 'qa' is already exists")
+ else:
+ g.log.info("User 'qa' is created successfully")
+
+ # Setup Volume and Mount Volume
+ g.log.info("Starting to Setup Volume and Mount Volume")
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
+
+ def tearDown(self):
+ """
+ If test method failed before validating IO, tearDown waits for the
+ IO's to complete and checks for the IO exit status
+
+ Cleanup and umount volume
+ """
+ if not self.io_validation_complete:
+ g.log.info("Wait for IO to complete as IO validation did not "
+ "succeed in test method")
+ ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
+ if not ret:
+ raise ExecutionError("IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+
+ for mount_object in self.mounts:
+ # Delete user
+ g.log.info('Deleting user qa...')
+ command = "userdel -r qa"
+ ret, _, err = g.run(mount_object.client_system, command)
+
+ if 'does not exist' in err:
+ g.log.warn('User qa is already deleted')
+ else:
+ g.log.info('User qa successfully deleted')
+
+ # Cleanup and umount volume
+ g.log.info("Starting to Unmount Volume and Cleanup Volume")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass teardown
+ GlusterBaseClass.tearDown.im_func(self)
+
+ def test_metadata_self_heal(self):
+ """
+ Test MetaData Self-Heal (heal command)
+
+ Description:
+ - set the volume option
+ "metadata-self-heal": "off"
+ "entry-self-heal": "off"
+ "data-self-heal": "off"
+ - create IO
+ - set the volume option
+ "self-heal-daemon": "off"
+ - bring down all bricks processes from selected set
+ - Change the permissions, ownership and the group
+ of the files under "test_meta_data_self_heal" folder
+ - get arequal before getting bricks online
+ - bring bricks online
+ - set the volume option
+ "self-heal-daemon": "on"
+ - check daemons and start healing
+ - check is heal is completed
+ - check for split-brain
+ - get arequal after getting bricks online and compare with
+ arequal before getting bricks online
+ - check group and user are 'qa'
+ """
+ # pylint: disable=too-many-locals,too-many-statements
+ # Setting options
+ g.log.info('Setting options...')
+ options = {"metadata-self-heal": "off",
+ "entry-self-heal": "off",
+ "data-self-heal": "off"}
+ ret = set_volume_options(self.mnode, self.volname, options)
+ self.assertTrue(ret, 'Failed to set options')
+ g.log.info("Options "
+ "'metadata-self-heal', "
+ "'entry-self-heal', "
+ "'data-self-heal', "
+ "are set to 'off' successfully")
+
+ # Creating files on client side
+ test_meta_data_self_heal_folder = 'test_meta_data_self_heal'
+ for mount_object in self.mounts:
+ g.log.info("Generating data for %s:%s",
+ mount_object.client_system, mount_object.mountpoint)
+
+ # Create files
+ g.log.info('Creating files...')
+ command = ("cd %s/ ; "
+ "mkdir %s ;"
+ "cd %s/ ;"
+ "for i in `seq 1 50` ; "
+ "do dd if=/dev/urandom of=test.$i bs=10k count=1 ; "
+ "done ;"
+ % (mount_object.mountpoint,
+ test_meta_data_self_heal_folder,
+ test_meta_data_self_heal_folder))
+
+ proc = g.run_async(mount_object.client_system, command,
+ user=mount_object.user)
+ self.all_mounts_procs.append(proc)
+
+ # Validate IO
+ g.log.info("Wait for IO to complete and validate IO ...")
+ ret = validate_io_procs(self.all_mounts_procs, self.mounts)
+ self.io_validation_complete = True
+ self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("IO is successful on all mounts")
+
+ # Setting options
+ g.log.info('Setting options...')
+ options = {"self-heal-daemon": "off"}
+ ret = set_volume_options(self.mnode, self.volname, options)
+ self.assertTrue(ret, 'Failed to set options')
+ g.log.info("Option 'self-heal-daemon' is set to 'off' successfully")
+
+ # Select bricks to bring offline
+ bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
+ self.mnode, self.volname))
+ bricks_to_bring_offline = filter(None, (
+ bricks_to_bring_offline_dict['hot_tier_bricks'] +
+ bricks_to_bring_offline_dict['cold_tier_bricks'] +
+ bricks_to_bring_offline_dict['volume_bricks']))
+
+ # Bring brick offline
+ g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
+ ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
+ self.assertTrue(ret, 'Failed to bring bricks %s offline' %
+ bricks_to_bring_offline)
+
+ ret = are_bricks_offline(self.mnode, self.volname,
+ bricks_to_bring_offline)
+ self.assertTrue(ret, 'Bricks %s are not offline'
+ % bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s offline is successful',
+ bricks_to_bring_offline)
+
+ # Changing the permissions, ownership and the group
+ # of the files under "test_meta_data_self_heal" folder
+ for mount_object in self.mounts:
+ g.log.info("Modifying data for %s:%s",
+ mount_object.client_system, mount_object.mountpoint)
+
+ # Change permissions to 444
+ g.log.info('Changing permissions...')
+ command = ("cd %s/%s/ ; "
+ "chmod -R 444 *"
+ % (mount_object.mountpoint,
+ test_meta_data_self_heal_folder))
+ ret, out, err = g.run(mount_object.client_system, command)
+ self.assertEqual(ret, 0, err)
+ g.log.info('Permissions are changed successfully')
+
+ # Change the ownership to qa
+ g.log.info('Changing the ownership...')
+ command = ("cd %s/%s/ ; "
+ "chown -R qa *"
+ % (mount_object.mountpoint,
+ test_meta_data_self_heal_folder))
+ ret, out, err = g.run(mount_object.client_system, command)
+ self.assertEqual(ret, 0, err)
+ g.log.info('Ownership is changed successfully')
+
+ # Change the group to qa
+ g.log.info('Changing the group...')
+ command = ("cd %s/%s/ ; "
+ "chgrp -R qa *"
+ % (mount_object.mountpoint,
+ test_meta_data_self_heal_folder))
+ ret, out, err = g.run(mount_object.client_system, command)
+ self.assertEqual(ret, 0, err)
+ g.log.info('Group is changed successfully')
+
+ # Get arequal before getting bricks online
+ g.log.info('Getting arequal before getting bricks online...')
+ ret, result_before_online = collect_mounts_arequal(self.mounts)
+ self.assertTrue(ret, 'Failed to get arequal')
+ g.log.info('Getting arequal before getting bricks online '
+ 'is successful')
+
+ # Bring brick online
+ g.log.info('Bringing bricks %s online...', bricks_to_bring_offline)
+ ret = bring_bricks_online(self.mnode, self.volname,
+ bricks_to_bring_offline)
+ self.assertTrue(ret, 'Failed to bring bricks %s online' %
+ bricks_to_bring_offline)
+ g.log.info('Bringing bricks %s online is successful',
+ bricks_to_bring_offline)
+
+ # Setting options
+ g.log.info('Setting options...')
+ options = {"self-heal-daemon": "on"}
+ ret = set_volume_options(self.mnode, self.volname, options)
+ self.assertTrue(ret, 'Failed to set options')
+ g.log.info("Option 'self-heal-daemon' is set to 'on' successfully")
+
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Volume process %s not online "
+ "despite waiting for 5 minutes", self.volname))
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", self.volname)
+
+ # Verify volume's all process are online
+ g.log.info("Verifying volume's all process are online")
+ ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Volume %s : All process are not online"
+ % self.volname))
+ g.log.info("Volume %s : All process are online", self.volname)
+
+ # Wait for self-heal-daemons to be online
+ g.log.info("Waiting for self-heal-daemons to be online")
+ ret = is_shd_daemonized(self.all_servers)
+ self.assertTrue(ret, "Either No self heal daemon process found")
+ g.log.info("All self-heal-daemons are online")
+
+ # Start healing
+ ret = trigger_heal(self.mnode, self.volname)
+ self.assertTrue(ret, 'Heal is not started')
+ g.log.info('Healing is started')
+
+ # Monitor heal completion
+ ret = monitor_heal_completion(self.mnode, self.volname)
+ self.assertTrue(ret, 'Heal has not yet completed')
+
+ # Check if heal is completed
+ ret = is_heal_complete(self.mnode, self.volname)
+ self.assertTrue(ret, 'Heal is not complete')
+ g.log.info('Heal is completed successfully')
+
+ # Check for split-brain
+ ret = is_volume_in_split_brain(self.mnode, self.volname)
+ self.assertFalse(ret, 'Volume is in split-brain state')
+ g.log.info('Volume is not in split-brain state')
+
+ # Get arequal after getting bricks online
+ g.log.info('Getting arequal after getting bricks online...')
+ ret, result_after_online = collect_mounts_arequal(self.mounts)
+ self.assertTrue(ret, 'Failed to get arequal')
+ g.log.info('Getting arequal after getting bricks online '
+ 'is successful')
+
+ # Checking arequals before bringing bricks online
+ # and after bringing bricks online
+ self.assertItemsEqual(result_before_online, result_after_online,
+ 'Checksums are not equal')
+ g.log.info('Checksums before bringing bricks online '
+ 'and after bringing bricks online are equal')
+
+ # Check for user and group
+ for mount_object in self.mounts:
+ # Get file list
+ command = ("cd %s/%s/ ; "
+ "ls"
+ % (mount_object.mountpoint,
+ test_meta_data_self_heal_folder))
+ ret, out, err = g.run(mount_object.client_system, command)
+ file_list = out.split()
+
+ # Checking for user and group
+ g.log.info('Checking for user and group...')
+ conn = g.rpyc_get_connection(mount_object.client_system)
+ if conn is None:
+ raise Exception("Unable to get connection on node %s" %
+ mount_object.client_system)
+
+ for file_name in file_list:
+ file_to_check = '%s/%s/%s' % (mount_object.mountpoint,
+ test_meta_data_self_heal_folder,
+ file_name)
+
+ g.log.info('Checking for user and group for %s...', file_name)
+ # Check for user
+ uid = conn.modules.os.stat(file_to_check).st_uid
+ username = conn.modules.pwd.getpwuid(uid).pw_name
+ self.assertEqual(username, 'qa', 'User %s is not equal qa'
+ % username)
+ g.log.info("User is 'qa' for %s", file_name)
+
+ # Check for group
+ gid = conn.modules.os.stat(file_to_check).st_gid
+ groupname = conn.modules.grp.getgrgid(gid).gr_name
+ self.assertEqual(groupname, 'qa', 'Group %s is not equal qa'
+ % groupname)
+ g.log.info("Group is 'qa' for %s", file_name)
+
+ g.rpyc_close_connection(host=mount_object.client_system)