summaryrefslogtreecommitdiffstats
path: root/tests/functional/arbiter/test_mount_point_while_deleting_files.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/arbiter/test_mount_point_while_deleting_files.py')
-rwxr-xr-xtests/functional/arbiter/test_mount_point_while_deleting_files.py213
1 files changed, 102 insertions, 111 deletions
diff --git a/tests/functional/arbiter/test_mount_point_while_deleting_files.py b/tests/functional/arbiter/test_mount_point_while_deleting_files.py
index 1bbdf279f..68f880663 100755
--- a/tests/functional/arbiter/test_mount_point_while_deleting_files.py
+++ b/tests/functional/arbiter/test_mount_point_while_deleting_files.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,10 +14,12 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
import os
+
from glusto.core import Glusto as g
+
from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.glusterdir import rmdir
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.volume_libs import setup_volume, cleanup_volume
from glustolibs.gluster.volume_ops import get_volume_list
@@ -32,22 +34,19 @@ from glustolibs.gluster.mount_ops import (mount_volume,
from glustolibs.misc.misc_libs import upload_scripts
-@runs_on([['replicated'],
- ['glusterfs']])
+@runs_on([['arbiter'], ['glusterfs']])
class VolumeSetDataSelfHealTests(GlusterBaseClass):
@classmethod
def setUpClass(cls):
# Calling GlusterBaseClass setUpClass
- GlusterBaseClass.setUpClass.im_func(cls)
+ cls.get_super_method(cls, 'setUpClass')()
# Upload io scripts for running IO on mounts
g.log.info("Upload io scripts to clients %s for running IO on mounts",
cls.clients)
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
- ret = upload_scripts(cls.clients, [script_local_path])
+ ret = upload_scripts(cls.clients, cls.script_upload_path)
if not ret:
raise ExecutionError("Failed to upload IO scripts to clients %s"
% cls.clients)
@@ -55,69 +54,62 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
cls.clients)
# Setup Volumes
- if cls.volume_type == "replicated":
- cls.volume_configs = []
- cls.mounts_dict_list = []
- # Define two replicated volumes
- for i in range(1, 3):
- cls.volume['voltype'] = {
- 'type': 'replicated',
- 'replica_count': 3,
- 'arbiter_count': 1,
- 'transport': 'tcp'}
-
- volume_config = {'name': 'testvol_%s_%d'
- % (cls.volume['voltype']['type'], i),
- 'servers': cls.servers,
- 'voltype': cls.volume['voltype']}
- cls.volume_configs.append(volume_config)
-
- # redefine mounts
- for client in cls.all_clients_info.keys():
- mount = {
- 'protocol': cls.mount_type,
- 'server': cls.mnode,
- 'volname': volume_config['name'],
- 'client': cls.all_clients_info[client],
- 'mountpoint': (os.path.join(
- "/mnt", '_'.join([volume_config['name'],
- cls.mount_type]))),
- 'options': ''
- }
- cls.mounts_dict_list.append(mount)
-
- cls.mounts = create_mount_objs(cls.mounts_dict_list)
-
- # Create and mount volumes
- cls.mount_points = []
- cls.client = cls.clients[0]
- for volume_config in cls.volume_configs:
- # Setup volume
- ret = setup_volume(mnode=cls.mnode,
- all_servers_info=cls.all_servers_info,
- volume_config=volume_config,
- force=False)
- if not ret:
- raise ExecutionError("Failed to setup Volume %s"
- % volume_config['name'])
- g.log.info("Successful in setting volume %s",
- volume_config['name'])
-
- # Mount volume
- mount_point = (os.path.join("/mnt", '_'.join(
- [volume_config['name'], cls.mount_type])))
- cls.mount_points.append(mount_point)
- ret, _, _ = mount_volume(volume_config['name'],
- cls.mount_type,
- mount_point,
- cls.mnode,
- cls.client)
- if ret:
- raise ExecutionError(
- "Failed to do gluster mount on volume %s "
- % cls.volname)
- g.log.info("Successfully mounted %s on client %s",
- cls.volname, cls.client)
+ cls.volume_configs = []
+ cls.mounts_dict_list = []
+ cls.client = cls.clients[0]
+
+ # Define two replicated volumes
+ for i in range(1, 3):
+ volume_config = {
+ 'name': 'testvol_%s_%d' % (cls.volume['voltype']['type'], i),
+ 'servers': cls.servers,
+ 'voltype': cls.volume['voltype']}
+ cls.volume_configs.append(volume_config)
+
+ # Redefine mounts
+ mount = {
+ 'protocol': cls.mount_type,
+ 'server': cls.mnode,
+ 'volname': volume_config['name'],
+ 'client': cls.all_clients_info[cls.client],
+ 'mountpoint': (os.path.join(
+ "/mnt", '_'.join([volume_config['name'],
+ cls.mount_type]))),
+ 'options': ''
+ }
+ cls.mounts_dict_list.append(mount)
+
+ cls.mounts = create_mount_objs(cls.mounts_dict_list)
+
+ # Create and mount volumes
+ cls.mount_points = []
+ for volume_config in cls.volume_configs:
+
+ # Setup volume
+ ret = setup_volume(mnode=cls.mnode,
+ all_servers_info=cls.all_servers_info,
+ volume_config=volume_config,
+ force=False)
+ if not ret:
+ raise ExecutionError("Failed to setup Volume %s"
+ % volume_config['name'])
+ g.log.info("Successful in setting volume %s",
+ volume_config['name'])
+
+ # Mount volume
+ mount_point = (os.path.join("/mnt", '_'.join(
+ [volume_config['name'], cls.mount_type])))
+ cls.mount_points.append(mount_point)
+ ret, _, _ = mount_volume(volume_config['name'],
+ cls.mount_type,
+ mount_point,
+ cls.mnode, cls.client)
+ if ret:
+ raise ExecutionError(
+ "Failed to do gluster mount on volume %s "
+ % cls.volname)
+ g.log.info("Successfully mounted %s on client %s",
+ cls.volname, cls.client)
def setUp(self):
"""
@@ -125,7 +117,7 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
"""
# calling GlusterBaseClass setUp
- GlusterBaseClass.setUp.im_func(self)
+ self.get_super_method(self, 'setUp')()
self.all_mounts_procs = []
self.io_validation_complete = False
@@ -152,33 +144,33 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
raise ExecutionError("Failed to list all files and dirs")
g.log.info("Listing all files and directories is successful")
- @classmethod
- def tearDownClass(cls):
- """
- Clean up the volume and umount volume from client
- """
+ # umount all volumes
+ for mount_point in self.mount_points:
+ ret, _, _ = umount_volume(
+ self.client, mount_point)
+ if ret:
+ raise ExecutionError(
+ "Failed to umount on volume %s "
+ % self.volname)
+ g.log.info("Successfully umounted %s on client %s",
+ self.volname, self.client)
+ ret = rmdir(self.client, mount_point)
+ if not ret:
+ raise ExecutionError(
+ "Failed to remove directory mount directory.")
+ g.log.info("Mount directory is removed successfully")
+
# stopping all volumes
- g.log.info("Starting to Cleanup all Volumes")
- volume_list = get_volume_list(cls.mnode)
+ volume_list = get_volume_list(self.mnode)
for volume in volume_list:
- ret = cleanup_volume(cls.mnode, volume)
+ ret = cleanup_volume(self.mnode, volume)
if not ret:
raise ExecutionError("Failed to cleanup Volume %s" % volume)
g.log.info("Volume: %s cleanup is done", volume)
g.log.info("Successfully Cleanedup all Volumes")
- # umount all volumes
- for mount_point in cls.mount_points:
- ret, _, _ = umount_volume(cls.client, mount_point)
- if ret:
- raise ExecutionError(
- "Failed to umount on volume %s "
- % cls.volname)
- g.log.info("Successfully umounted %s on client %s",
- cls.volname, cls.client)
-
- # calling GlusterBaseClass tearDownClass
- GlusterBaseClass.tearDownClass.im_func(cls)
+ # calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
def test_mount_point_not_go_to_rofs(self):
"""
@@ -189,39 +181,37 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
- Check if all the files are deleted from the mount point
from both the servers
"""
+ # pylint: disable=too-many-locals,too-many-statements
# create files on all mounts
g.log.info("Starting IO on all mounts...")
- all_mounts_procs = []
for mount_obj in self.mounts:
g.log.info("Generating data for %s:%s",
mount_obj.client_system, mount_obj.mountpoint)
# Create files
g.log.info('Creating files...')
- command = ("python %s create_files "
+ command = ("/usr/bin/env python %s create_files "
"-f 100 "
"--fixed-file-size 1M "
- "%s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ "%s" % (
+ self.script_upload_path,
+ mount_obj.mountpoint))
proc = g.run_async(mount_obj.client_system, command,
user=mount_obj.user)
- all_mounts_procs.append(proc)
+ self.all_mounts_procs.append(proc)
# Validate IO
self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients")
# select bricks to bring offline
volume_list = get_volume_list(self.mnode)
for volname in volume_list:
bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
self.mnode, volname))
- bricks_to_bring_offline = filter(None, (
- bricks_to_bring_offline_dict['hot_tier_bricks'] +
- bricks_to_bring_offline_dict['cold_tier_bricks'] +
- bricks_to_bring_offline_dict['volume_bricks']))
+ bricks_to_bring_offline = (
+ bricks_to_bring_offline_dict['volume_bricks'])
# bring bricks offline
g.log.info("Going to bring down the brick process for %s",
@@ -234,20 +224,21 @@ class VolumeSetDataSelfHealTests(GlusterBaseClass):
# delete files on all mounts
g.log.info("Deleting IO on all mounts...")
- all_mounts_procs = []
+ self.all_mounts_procs = []
for mount_obj in self.mounts:
g.log.info("Deleting data for %s:%s",
mount_obj.client_system, mount_obj.mountpoint)
# Delete files
g.log.info('Deleting files...')
- command = ("python %s delete %s"
- % (self.script_upload_path, mount_obj.mountpoint))
+ command = "/usr/bin/env python %s delete %s" % (
+ self.script_upload_path,
+ mount_obj.mountpoint)
proc = g.run_async(mount_obj.client_system, command,
user=mount_obj.user)
- all_mounts_procs.append(proc)
+ self.all_mounts_procs.append(proc)
# Validate IO
self.assertTrue(
- validate_io_procs(all_mounts_procs, self.mounts),
- "IO failed on some of the clients"
- )
+ validate_io_procs(self.all_mounts_procs, self.mounts),
+ "IO failed on some of the clients")
+ self.io_validation_complete = True