summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorValerii Ponomarov <vponomar@redhat.com>2019-12-06 22:43:44 +0530
committerBala Konda Reddy M <bmekala@redhat.com>2019-12-12 12:11:48 +0000
commit195e6e3482a19c2d5f643cbb2c9b60f81d38028c (patch)
tree5d1d1f0ade1b0c0531ada71a63a323eedc651798
parent79e82b1f04184d895638a78c7c57ea49d936efe7 (diff)
[py2to3] Add py3 support for tests in 'tests/functional/afr/heal'
Change-Id: Id4df838565ec3f9ad765cf223bb5115e43dac1c5 Signed-off-by: Valerii Ponomarov <kiparis.kh@gmail.com>
-rw-r--r--tests/functional/afr/heal/test_data_split_brain_resolution.py5
-rwxr-xr-xtests/functional/afr/heal/test_existing_shd_should_take_care_of_self_healing.py6
-rw-r--r--tests/functional/afr/heal/test_heal_info_while_accessing_file.py18
-rw-r--r--tests/functional/afr/heal/test_metadata_split_brain_resolution.py4
-rw-r--r--tests/functional/afr/heal/test_no_glustershd_with_distribute.py5
-rwxr-xr-xtests/functional/afr/heal/test_self_heal.py39
-rwxr-xr-xtests/functional/afr/heal/test_self_heal_daemon_process.py25
7 files changed, 60 insertions, 42 deletions
diff --git a/tests/functional/afr/heal/test_data_split_brain_resolution.py b/tests/functional/afr/heal/test_data_split_brain_resolution.py
index e1284cad6..b375e09a7 100644
--- a/tests/functional/afr/heal/test_data_split_brain_resolution.py
+++ b/tests/functional/afr/heal/test_data_split_brain_resolution.py
@@ -23,6 +23,7 @@
"""
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.misc.misc_libs import upload_scripts
@@ -45,7 +46,7 @@ class HealDataSplitBrain(GlusterBaseClass):
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Override Volume
if cls.volume_type == "replicated":
@@ -85,7 +86,7 @@ class HealDataSplitBrain(GlusterBaseClass):
raise ExecutionError("Failed to create volume")
g.log.info("Successful in cleaning up Volume %s", cls.volname)
- GlusterBaseClass.tearDownClass.im_func(cls)
+ cls.get_super_method(cls, 'tearDownClass')()
def verify_brick_arequals(self):
g.log.info("Fetching bricks for the volume: %s", self.volname)
diff --git a/tests/functional/afr/heal/test_existing_shd_should_take_care_of_self_healing.py b/tests/functional/afr/heal/test_existing_shd_should_take_care_of_self_healing.py
index 64f5254a5..3fe682e59 100755
--- a/tests/functional/afr/heal/test_existing_shd_should_take_care_of_self_healing.py
+++ b/tests/functional/afr/heal/test_existing_shd_should_take_care_of_self_healing.py
@@ -46,7 +46,7 @@ class SelfHealDaemonProcessTestsWithHealing(GlusterBaseClass):
which is used in tests
"""
# calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
# Setup Volume and Mount Volume
g.log.info("Starting to Setup Volume and Mount Volume")
@@ -74,8 +74,8 @@ class SelfHealDaemonProcessTestsWithHealing(GlusterBaseClass):
raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
g.log.info("Successful in Unmount Volume and Cleanup Volume")
- # calling GlusterBaseClass tearDownClass
- GlusterBaseClass.tearDownClass.im_func(self)
+ # calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
def test_existing_glustershd_should_take_care_of_self_healing(self):
"""
diff --git a/tests/functional/afr/heal/test_heal_info_while_accessing_file.py b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py
index 2fa7b194c..e729db679 100644
--- a/tests/functional/afr/heal/test_heal_info_while_accessing_file.py
+++ b/tests/functional/afr/heal/test_heal_info_while_accessing_file.py
@@ -14,7 +14,10 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+import sys
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.volume_libs import get_subvols
@@ -41,7 +44,7 @@ class TestSelfHeal(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on mounts",
@@ -73,7 +76,7 @@ class TestSelfHeal(GlusterBaseClass):
def setUp(self):
# Calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
self.all_mounts_procs = []
self.io_validation_complete = False
@@ -118,7 +121,7 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info("Successful in umounting the volume and Cleanup")
# Calling GlusterBaseClass teardown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_heal_info_shouldnot_list_files_being_accessed(self):
"""
@@ -152,8 +155,9 @@ class TestSelfHeal(GlusterBaseClass):
mount_obj.client_system, mount_obj.mountpoint)
# Creating files
- cmd = ("python %s create_files -f 100 %s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ cmd = "/usr/bin/env python%d %s create_files -f 100 %s" % (
+ sys.version_info.major, self.script_upload_path,
+ mount_obj.mountpoint)
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
@@ -214,9 +218,7 @@ class TestSelfHeal(GlusterBaseClass):
# Compare dicts before accessing and while accessing
g.log.info('Comparing entries before modifying and while modifying...')
- ret = cmp(entries_before_accessing, entries_while_accessing)
- self.assertEqual(ret, 0, 'Entries before modifying and while modifying'
- 'are not equal')
+ self.assertDictEqual(entries_before_accessing, entries_while_accessing)
g.log.info('Comparison entries before modifying and while modifying'
'finished successfully.')
diff --git a/tests/functional/afr/heal/test_metadata_split_brain_resolution.py b/tests/functional/afr/heal/test_metadata_split_brain_resolution.py
index 75c513a5f..087d598f6 100644
--- a/tests/functional/afr/heal/test_metadata_split_brain_resolution.py
+++ b/tests/functional/afr/heal/test_metadata_split_brain_resolution.py
@@ -44,7 +44,7 @@ class HealMetadataSplitBrain(GlusterBaseClass):
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Override Volume
if cls.volume_type == "replicated":
@@ -84,7 +84,7 @@ class HealMetadataSplitBrain(GlusterBaseClass):
raise ExecutionError("Failed to create volume")
g.log.info("Successful in cleaning up Volume %s", cls.volname)
- GlusterBaseClass.tearDownClass.im_func(cls)
+ cls.get_super_method(cls, 'tearDownClass')()
def verify_brick_arequals(self):
g.log.info("Fetching bricks for the volume: %s", self.volname)
diff --git a/tests/functional/afr/heal/test_no_glustershd_with_distribute.py b/tests/functional/afr/heal/test_no_glustershd_with_distribute.py
index 4cbee5f93..fdf818abd 100644
--- a/tests/functional/afr/heal/test_no_glustershd_with_distribute.py
+++ b/tests/functional/afr/heal/test_no_glustershd_with_distribute.py
@@ -42,7 +42,8 @@ class SelfHealDaemonProcessTestsWithMultipleVolumes(GlusterBaseClass):
which is used in tests
"""
# calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
+
list_of_vol = ['distributed-dispersed', 'replicated',
'dispersed', 'distributed', 'distributed-replicated']
cls.volume_configs = []
@@ -98,7 +99,7 @@ class SelfHealDaemonProcessTestsWithMultipleVolumes(GlusterBaseClass):
g.log.info("Successfully Cleanedup all Volumes")
# calling GlusterBaseClass tearDownClass
- GlusterBaseClass.tearDownClass.im_func(cls)
+ cls.get_super_method(cls, 'tearDownClass')()
def test_no_glustershd_with_distribute(self):
"""
diff --git a/tests/functional/afr/heal/test_self_heal.py b/tests/functional/afr/heal/test_self_heal.py
index fe060e4f5..efbbf92dc 100755
--- a/tests/functional/afr/heal/test_self_heal.py
+++ b/tests/functional/afr/heal/test_self_heal.py
@@ -16,7 +16,10 @@
# pylint: disable=too-many-lines
+import sys
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.volume_ops import set_volume_options
@@ -54,7 +57,7 @@ class TestSelfHeal(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on mounts",
@@ -86,7 +89,7 @@ class TestSelfHeal(GlusterBaseClass):
def setUp(self):
# Calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
self.all_mounts_procs = []
self.io_validation_complete = False
@@ -121,7 +124,7 @@ class TestSelfHeal(GlusterBaseClass):
g.log.info("Successful in umounting the volume and Cleanup")
# Calling GlusterBaseClass teardown
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_data_self_heal_daemon_off(self):
"""
@@ -171,8 +174,10 @@ class TestSelfHeal(GlusterBaseClass):
mount_obj.client_system, mount_obj.mountpoint)
# Create files
g.log.info('Creating files...')
- command = ("python %s create_files -f 100 --fixed-file-size 1k %s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ command = ("/usr/bin/env python%d %s create_files -f 100 "
+ "--fixed-file-size 1k %s" % (
+ sys.version_info.major, self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, command,
user=mount_obj.user)
@@ -203,10 +208,10 @@ class TestSelfHeal(GlusterBaseClass):
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
+ bricks_to_bring_offline = list(filter(None, (
bricks_to_bring_offline_dict['hot_tier_bricks'] +
bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline_dict['volume_bricks'])))
# Bring brick offline
g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
@@ -243,8 +248,10 @@ class TestSelfHeal(GlusterBaseClass):
mount_obj.mountpoint)
# Create files
g.log.info('Creating files...')
- command = ("python %s create_files -f 100 --fixed-file-size 10k %s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ command = ("/usr/bin/env python%d %s create_files -f 100 "
+ "--fixed-file-size 10k %s" % (
+ sys.version_info.major, self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, command,
user=mount_obj.user)
@@ -338,8 +345,9 @@ class TestSelfHeal(GlusterBaseClass):
mount_obj.mountpoint)
# Create files
g.log.info('Creating files...')
- command = ("python %s create_files -f 1000 %s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ command = ("/usr/bin/env python%d %s create_files -f 1000 %s" % (
+ sys.version_info.major, self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, command,
user=mount_obj.user)
@@ -442,10 +450,10 @@ class TestSelfHeal(GlusterBaseClass):
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
+ bricks_to_bring_offline = list(filter(None, (
bricks_to_bring_offline_dict['hot_tier_bricks'] +
bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline_dict['volume_bricks'])))
# Bring brick offline
g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
@@ -466,8 +474,9 @@ class TestSelfHeal(GlusterBaseClass):
mount_obj.client_system, mount_obj.mountpoint)
# Create 50k files
g.log.info('Creating files...')
- command = ("python %s create_files -f 50000 %s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ command = ("/usr/bin/env python%d %s create_files -f 50000 %s" % (
+ sys.version_info.major, self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, command,
user=mount_obj.user)
diff --git a/tests/functional/afr/heal/test_self_heal_daemon_process.py b/tests/functional/afr/heal/test_self_heal_daemon_process.py
index edb4575eb..07bdd8eec 100755
--- a/tests/functional/afr/heal/test_self_heal_daemon_process.py
+++ b/tests/functional/afr/heal/test_self_heal_daemon_process.py
@@ -18,9 +18,12 @@
Test Cases in this module tests the self heal daemon process.
"""
-import time
import calendar
+import sys
+import time
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import (
@@ -55,7 +58,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on mounts",
@@ -78,7 +81,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
"""
# calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
self.all_mounts_procs = []
self.io_validation_complete = False
@@ -112,7 +115,7 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
g.log.info("Successful in Unmount Volume and Cleanup Volume")
# calling GlusterBaseClass tearDownClass
- GlusterBaseClass.tearDown.im_func(self)
+ self.get_super_method(self, 'tearDown')()
def test_glustershd_with_add_remove_brick(self):
"""
@@ -445,10 +448,10 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
+ bricks_to_bring_offline = list(filter(None, (
bricks_to_bring_offline_dict['hot_tier_bricks'] +
bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline_dict['volume_bricks'])))
# bring bricks offline
g.log.info("Going to bring down the brick process "
@@ -529,10 +532,10 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
# Select bricks to bring offline
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, self.volname))
- bricks_to_bring_offline = filter(None, (
+ bricks_to_bring_offline = list(filter(None, (
bricks_to_bring_offline_dict['hot_tier_bricks'] +
bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline_dict['volume_bricks'])))
# Bring brick offline
g.log.info('Bringing bricks %s offline...', bricks_to_bring_offline)
@@ -551,8 +554,10 @@ class SelfHealDaemonProcessTests(GlusterBaseClass):
for mount_obj in self.mounts:
g.log.info("Starting IO on %s:%s",
mount_obj.client_system, mount_obj.mountpoint)
- cmd = ("python %s create_files -f 100 %s/test_dir"
- % (self.script_upload_path, mount_obj.mountpoint))
+ cmd = ("/usr/bin/env python%d %s create_files -f 100 "
+ "%s/test_dir" % (
+ sys.version_info.major, self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, cmd,
user=mount_obj.user)
self.all_mounts_procs.append(proc)