summaryrefslogtreecommitdiffstats
path: root/tests/functional
diff options
context:
space:
mode:
authorkshithijiyer <kshithij.ki@gmail.com>2020-07-03 10:54:51 +0530
committerArthy Loganathan <aloganat@redhat.com>2020-07-06 06:15:51 +0000
commit4214d5b070548ee7fe369d9ab00da695bd61bf50 (patch)
tree632fd99be168ddccd7e94c8ada8e83a348305761 /tests/functional
parente16d0dfb3bff3ecf83a28f9f38ec055b1ca92ffe (diff)
[Testfix] Fix test_mount_point_not_go_to_rofs failure
Problem: Testcase test_mount_point_not_go_to_rofs fails every time in the CI runs with the below traceback: > ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts) tests/functional/arbiter/test_mount_point_while_deleting_files.py:137: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ build/bdist.linux-x86_64/egg/glustolibs/io/utils.py:290: in wait_for_io_to_complete ??? /usr/lib/python2.7/site-packages/glusto/connectible.py:247: in async_communicate stdout, stderr = p.communicate() /usr/lib64/python2.7/subprocess.py:800: in communicate return self._communicate(input) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <subprocess.Popen object at 0x7febb64238d0>, input = None def _communicate(self, input): if self.stdin: # Flush stdio buffer. This might block, if the user has # been writing to .stdin in an uncontrolled fashion. > self.stdin.flush() E ValueError: I/O operation on closed file /usr/lib64/python2.7/subprocess.py:1396: ValueError This is because the self.io_validation_complete is never set to True in the testcase. Fix: Adding code to set self.io_validation_complete to True and moving code from TearDownClass to TearDown. Modifying logic to not add both clients to self.mounts. Change-Id: I51ed635e713838ee3054c4d1dd8c6cdc16bbd8bf Signed-off-by: kshithijiyer <kshithij.ki@gmail.com>
Diffstat (limited to 'tests/functional')
-rwxr-xr-xtests/functional/arbiter/test_mount_point_while_deleting_files.py61
1 files changed, 27 insertions, 34 deletions
diff --git a/tests/functional/arbiter/test_mount_point_while_deleting_files.py b/tests/functional/arbiter/test_mount_point_while_deleting_files.py
index e1ac94ac5..68f880663 100755
--- a/tests/functional/arbiter/test_mount_point_while_deleting_files.py
+++ b/tests/functional/arbiter/test_mount_point_while_deleting_files.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -34,8 +34,7 @@ from glustolibs.gluster.mount_ops import (mount_volume,
from glustolibs.misc.misc_libs import upload_scripts
-@runs_on([['arbiter'],
- ['glusterfs']])
+@runs_on([['arbiter'], ['glusterfs']])
class VolumeSetDataSelfHealTests(GlusterBaseClass):
@classmethod
def setUpClass(cls):
@@ -57,6 +56,7 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
# Setup Volumes
cls.volume_configs = []
cls.mounts_dict_list = []
+ cls.client = cls.clients[0]
# Define two replicated volumes
for i in range(1, 3):
@@ -67,24 +67,22 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
cls.volume_configs.append(volume_config)
# Redefine mounts
- for client in cls.all_clients_info.keys():
- mount = {
- 'protocol': cls.mount_type,
- 'server': cls.mnode,
- 'volname': volume_config['name'],
- 'client': cls.all_clients_info[client],
- 'mountpoint': (os.path.join(
- "/mnt", '_'.join([volume_config['name'],
- cls.mount_type]))),
- 'options': ''
- }
- cls.mounts_dict_list.append(mount)
-
- cls.mounts = create_mount_objs(cls.mounts_dict_list)
+ mount = {
+ 'protocol': cls.mount_type,
+ 'server': cls.mnode,
+ 'volname': volume_config['name'],
+ 'client': cls.all_clients_info[cls.client],
+ 'mountpoint': (os.path.join(
+ "/mnt", '_'.join([volume_config['name'],
+ cls.mount_type]))),
+ 'options': ''
+ }
+ cls.mounts_dict_list.append(mount)
+
+ cls.mounts = create_mount_objs(cls.mounts_dict_list)
# Create and mount volumes
cls.mount_points = []
- cls.client = cls.clients[0]
for volume_config in cls.volume_configs:
# Setup volume
@@ -146,39 +144,33 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
raise ExecutionError("Failed to list all files and dirs")
g.log.info("Listing all files and directories is successful")
- @classmethod
- def tearDownClass(cls):
- """
- Clean up the volume and umount volume from client
- """
# umount all volumes
- for mount_obj in cls.mounts:
+ for mount_point in self.mount_points:
ret, _, _ = umount_volume(
- mount_obj.client_system, mount_obj.mountpoint)
+ self.client, mount_point)
if ret:
raise ExecutionError(
"Failed to umount on volume %s "
- % cls.volname)
+ % self.volname)
g.log.info("Successfully umounted %s on client %s",
- cls.volname, mount_obj.client_system)
- ret = rmdir(mount_obj.client_system, mount_obj.mountpoint)
+ self.volname, self.client)
+ ret = rmdir(self.client, mount_point)
if not ret:
raise ExecutionError(
- ret, "Failed to remove directory mount directory.")
+ "Failed to remove directory mount directory.")
g.log.info("Mount directory is removed successfully")
# stopping all volumes
- g.log.info("Starting to Cleanup all Volumes")
- volume_list = get_volume_list(cls.mnode)
+ volume_list = get_volume_list(self.mnode)
for volume in volume_list:
- ret = cleanup_volume(cls.mnode, volume)
+ ret = cleanup_volume(self.mnode, volume)
if not ret:
raise ExecutionError("Failed to cleanup Volume %s" % volume)
g.log.info("Volume: %s cleanup is done", volume)
g.log.info("Successfully Cleanedup all Volumes")
- # calling GlusterBaseClass tearDownClass
- cls.get_super_method(cls, 'tearDownClass')()
+ # calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
def test_mount_point_not_go_to_rofs(self):
"""
@@ -249,3 +241,4 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
self.assertTrue(
validate_io_procs(self.all_mounts_procs, self.mounts),
"IO failed on some of the clients")
+ self.io_validation_complete = True