summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rwxr-xr-xtests/functional/arbiter/test_create_snapshot_and_verify_content.py46
-rwxr-xr-xtests/functional/arbiter/test_metadata_self_heal.py11
-rwxr-xr-xtests/functional/arbiter/test_mount_point_while_deleting_files.py80
-rwxr-xr-xtests/functional/arbiter/test_self_heal_differing_in_file_type.py13
-rw-r--r--tests/functional/arbiter/test_self_heal_symbolic_links.py12
5 files changed, 84 insertions, 78 deletions
diff --git a/tests/functional/arbiter/test_create_snapshot_and_verify_content.py b/tests/functional/arbiter/test_create_snapshot_and_verify_content.py
index dd7b33c99..ca8d761dd 100755
--- a/tests/functional/arbiter/test_create_snapshot_and_verify_content.py
+++ b/tests/functional/arbiter/test_create_snapshot_and_verify_content.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -27,7 +27,9 @@ from glustolibs.gluster.volume_libs import (
wait_for_volume_process_to_be_online,
get_subvols)
from glustolibs.misc.misc_libs import upload_scripts
-from glustolibs.io.utils import collect_mounts_arequal
+from glustolibs.io.utils import (
+ collect_mounts_arequal,
+ validate_io_procs)
@runs_on([['distributed-replicated', 'replicated'],
@@ -118,21 +120,20 @@ class TestArbiterSelfHeal(GlusterBaseClass):
g.log.info("Generating data for %s:%s",
self.mounts[0].client_system, self.mounts[0].mountpoint)
# Create dirs with file
+ all_mounts_procs = []
g.log.info('Creating dirs with file...')
command = ("/usr/bin/env python%d %s create_deep_dirs_with_files "
- "-d 2 "
- "-l 2 "
- "-n 2 "
- "-f 20 "
- "%s" % (
+ "-d 2 -l 2 -n 2 -f 20 %s" % (
sys.version_info.major, self.script_upload_path,
self.mounts[0].mountpoint))
+ proc = g.run_async(self.mounts[0].client_system, command,
+ user=self.mounts[0].user)
+ all_mounts_procs.append(proc)
- ret, _, err = g.run(self.mounts[0].client_system, command,
- user=self.mounts[0].user)
-
- self.assertFalse(ret, err)
- g.log.info("IO is successful")
+ # Validate IO
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients")
# Get arequal before snapshot
g.log.info('Getting arequal before snapshot...')
@@ -151,21 +152,20 @@ class TestArbiterSelfHeal(GlusterBaseClass):
g.log.info("Generating data for %s:%s",
self.mounts[0].client_system, self.mounts[0].mountpoint)
# Create dirs with file
+ all_mounts_procs = []
g.log.info('Adding dirs with file...')
command = ("/usr/bin/env python%d %s create_deep_dirs_with_files "
- "-d 2 "
- "-l 2 "
- "-n 2 "
- "-f 20 "
- "%s" % (
+ "-d 2 -l 2 -n 2 -f 20 %s" % (
sys.version_info.major, self.script_upload_path,
self.mounts[0].mountpoint+'/new_files'))
-
- ret, _, err = g.run(self.mounts[0].client_system, command,
- user=self.mounts[0].user)
-
- self.assertFalse(ret, err)
- g.log.info("IO is successful")
+ proc = g.run_async(self.mounts[0].client_system, command,
+ user=self.mounts[0].user)
+ all_mounts_procs.append(proc)
+
+ # Validate IO
+ self.assertTrue(
+ validate_io_procs(all_mounts_procs, self.mounts),
+ "IO failed on some of the clients")
# Stop the volume
g.log.info("Stopping %s ...", self.volname)
diff --git a/tests/functional/arbiter/test_metadata_self_heal.py b/tests/functional/arbiter/test_metadata_self_heal.py
index ba359da7e..56afaf95e 100755
--- a/tests/functional/arbiter/test_metadata_self_heal.py
+++ b/tests/functional/arbiter/test_metadata_self_heal.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -32,7 +32,8 @@ from glustolibs.gluster.heal_libs import (monitor_heal_completion,
is_shd_daemonized)
from glustolibs.gluster.heal_ops import trigger_heal
from glustolibs.misc.misc_libs import upload_scripts
-from glustolibs.io.utils import (collect_mounts_arequal, validate_io_procs)
+from glustolibs.io.utils import (collect_mounts_arequal,
+ wait_for_io_to_complete)
@runs_on([['replicated', 'distributed-replicated'],
@@ -201,10 +202,10 @@ class TestMetadataSelfHeal(GlusterBaseClass):
user=self.mounts[0].user)
all_mounts_procs.append(proc)
- # Validate IO
+ # wait for io to complete
self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients")
+ wait_for_io_to_complete(all_mounts_procs, self.mounts),
+ "Io failed to complete on some of the clients")
# Setting options
g.log.info('Setting options...')
diff --git a/tests/functional/arbiter/test_mount_point_while_deleting_files.py b/tests/functional/arbiter/test_mount_point_while_deleting_files.py
index 6a8e1fe1b..88c83346c 100755
--- a/tests/functional/arbiter/test_mount_point_while_deleting_files.py
+++ b/tests/functional/arbiter/test_mount_point_while_deleting_files.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,6 +20,7 @@ import sys
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.glusterdir import rmdir
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import setup_volume, cleanup_volume
from glustolibs.gluster.volume_ops import get_volume_list
@@ -90,7 +91,6 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
# Create and mount volumes
cls.mount_points = []
- cls.client = cls.clients[0]
for volume_config in cls.volume_configs:
# Setup volume
ret = setup_volume(mnode=cls.mnode,
@@ -102,22 +102,22 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
% volume_config['name'])
g.log.info("Successful in setting volume %s",
volume_config['name'])
-
- # Mount volume
- mount_point = (os.path.join("/mnt", '_'.join(
- [volume_config['name'], cls.mount_type])))
- cls.mount_points.append(mount_point)
- ret, _, _ = mount_volume(volume_config['name'],
- cls.mount_type,
- mount_point,
- cls.mnode,
- cls.client)
- if ret:
- raise ExecutionError(
- "Failed to do gluster mount on volume %s "
- % cls.volname)
- g.log.info("Successfully mounted %s on client %s",
- cls.volname, cls.client)
+ for mount_obj in cls.mounts:
+ # Mount volume
+ mount_point = (os.path.join("/mnt", '_'.join(
+ [volume_config['name'], cls.mount_type])))
+ cls.mount_points.append(mount_point)
+ ret, _, _ = mount_volume(volume_config['name'],
+ cls.mount_type,
+ mount_point,
+ cls.mnode,
+ mount_obj.client_system)
+ if ret:
+ raise ExecutionError(
+ "Failed to do gluster mount on volume %s "
+ % cls.volname)
+ g.log.info("Successfully mounted %s on client %s",
+ cls.volname, mount_obj.client_system)
def setUp(self):
"""
@@ -157,6 +157,22 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
"""
Clean up the volume and umount volume from client
"""
+ # umount all volumes
+ for mount_obj in cls.mounts:
+ ret, _, _ = umount_volume(
+ mount_obj.client_system, mount_obj.mountpoint)
+ if ret:
+ raise ExecutionError(
+ "Failed to umount on volume %s "
+ % cls.volname)
+ g.log.info("Successfully umounted %s on client %s",
+ cls.volname, mount_obj.client_system)
+ ret = rmdir(mount_obj.client_system, mount_obj.mountpoint)
+ if not ret:
+ raise ExecutionError(
+ ret, "Failed to remove directory mount directory.")
+ g.log.info("Mount directory is removed successfully")
+
# stopping all volumes
g.log.info("Starting to Cleanup all Volumes")
volume_list = get_volume_list(cls.mnode)
@@ -167,16 +183,6 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
g.log.info("Volume: %s cleanup is done", volume)
g.log.info("Successfully Cleanedup all Volumes")
- # umount all volumes
- for mount_point in cls.mount_points:
- ret, _, _ = umount_volume(cls.client, mount_point)
- if ret:
- raise ExecutionError(
- "Failed to umount on volume %s "
- % cls.volname)
- g.log.info("Successfully umounted %s on client %s",
- cls.volname, cls.client)
-
# calling GlusterBaseClass tearDownClass
cls.get_super_method(cls, 'tearDownClass')()
@@ -189,9 +195,9 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
- Check if all the files are deleted from the mount point
from both the servers
"""
+ # pylint: disable=too-many-locals,too-many-statements
# create files on all mounts
g.log.info("Starting IO on all mounts...")
- all_mounts_procs = []
for mount_obj in self.mounts:
g.log.info("Generating data for %s:%s",
mount_obj.client_system, mount_obj.mountpoint)
@@ -206,13 +212,12 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
proc = g.run_async(mount_obj.client_system, command,
user=mount_obj.user)
- all_mounts_procs.append(proc)
+ self.all_mounts_procs.append(proc)
# Validate IO
self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients")
# select bricks to bring offline
volume_list = get_volume_list(self.mnode)
@@ -235,7 +240,7 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
# delete files on all mounts
g.log.info("Deleting IO on all mounts...")
- all_mounts_procs = []
+ self.all_mounts_procs = []
for mount_obj in self.mounts:
g.log.info("Deleting data for %s:%s",
mount_obj.client_system, mount_obj.mountpoint)
@@ -246,10 +251,9 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
mount_obj.mountpoint)
proc = g.run_async(mount_obj.client_system, command,
user=mount_obj.user)
- all_mounts_procs.append(proc)
+ self.all_mounts_procs.append(proc)
# Validate IO
self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients")
diff --git a/tests/functional/arbiter/test_self_heal_differing_in_file_type.py b/tests/functional/arbiter/test_self_heal_differing_in_file_type.py
index 25954a84b..201846fe4 100755
--- a/tests/functional/arbiter/test_self_heal_differing_in_file_type.py
+++ b/tests/functional/arbiter/test_self_heal_differing_in_file_type.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -29,7 +29,9 @@ from glustolibs.gluster.heal_libs import (monitor_heal_completion,
is_volume_in_split_brain,
is_shd_daemonized)
from glustolibs.misc.misc_libs import upload_scripts
-from glustolibs.io.utils import (collect_mounts_arequal, validate_io_procs)
+from glustolibs.io.utils import (
+ collect_mounts_arequal, validate_io_procs,
+ wait_for_io_to_complete)
@runs_on([['replicated', 'distributed-replicated'],
@@ -145,11 +147,10 @@ class TestSelfHeal(GlusterBaseClass):
user=self.mounts[0].user)
all_mounts_procs.append(proc)
- # Validate IO
+ # wait for io to complete
self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
+ wait_for_io_to_complete(all_mounts_procs, self.mounts),
+ "Io failed to complete on some of the clients")
# Get arequal before getting bricks offline
g.log.info('Getting arequal before getting bricks offline...')
diff --git a/tests/functional/arbiter/test_self_heal_symbolic_links.py b/tests/functional/arbiter/test_self_heal_symbolic_links.py
index ce9b10304..f0475a8d2 100644
--- a/tests/functional/arbiter/test_self_heal_symbolic_links.py
+++ b/tests/functional/arbiter/test_self_heal_symbolic_links.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -31,7 +31,8 @@ from glustolibs.gluster.heal_libs import (monitor_heal_completion,
is_shd_daemonized)
from glustolibs.gluster.heal_ops import trigger_heal
from glustolibs.misc.misc_libs import upload_scripts
-from glustolibs.io.utils import (collect_mounts_arequal, validate_io_procs)
+from glustolibs.io.utils import (collect_mounts_arequal,
+ wait_for_io_to_complete)
@runs_on([['replicated', 'distributed-replicated'],
@@ -163,11 +164,10 @@ class TestSelfHeal(GlusterBaseClass):
user=self.mounts[0].user)
all_mounts_procs.append(proc)
- # Validate IO
+ # wait for io to complete
self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
+ wait_for_io_to_complete(all_mounts_procs, self.mounts),
+ "Io failed to complete on some of the clients")
# Get arequal before getting bricks offline
g.log.info('Getting arequal before getting bricks offline...')