summaryrefslogtreecommitdiffstats
path: root/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
diff options
context:
space:
mode:
Diffstat (limited to 'glustolibs-gluster/glustolibs/gluster/gluster_base_class.py')
-rw-r--r--glustolibs-gluster/glustolibs/gluster/gluster_base_class.py649
1 files changed, 647 insertions, 2 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
index 8f9d0b9b0..d5864c0b1 100644
--- a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -24,11 +24,16 @@ import os
import random
import copy
import datetime
+import time
import socket
from glusto.core import Glusto as g
-from glustolibs.gluster.exceptions import ConfigError
+from glustolibs.gluster.exceptions import ConfigError, ExecutionError
from glustolibs.gluster.peer_ops import is_peer_connected, peer_status
from glustolibs.gluster.volume_ops import set_volume_options
+from glustolibs.gluster.block_ops import block_delete
+from glustolibs.gluster.block_libs import (setup_block, if_block_exists,
+ get_block_list,
+ get_block_info)
from glustolibs.gluster.volume_libs import (setup_volume,
cleanup_volume,
log_volume_info_and_status)
@@ -753,3 +758,643 @@ class GlusterBaseClass(unittest.TestCase):
msg = "Teardownclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
g.log.info(msg)
cls.inject_msg_in_gluster_logs(msg)
+
+
+class GlusterBlockBaseClass(GlusterBaseClass):
+ """GlusterBlockBaseClass sets up the volume and blocks.
+ """
+ @classmethod
+ def setup_blocks(cls, blocknames):
+ """Create blocks and calls the methods:
+ update_block_info_dict and create_client_block_map
+
+ Args:
+ blocknames(list): Blocks to be create
+ Returns:
+ bool: False if block creation is unsuccessful and
+ true if all blocks created.
+
+ """
+ if not isinstance(blocknames, list):
+ blocknames = [blocknames]
+
+ g.log.info("Creating block devices on volume %s", cls.volname)
+ for blockname in blocknames:
+ each_block = cls.gluster_block_args_info.get(blockname)
+ if each_block:
+ # Form a dict for keyargs
+ block_args_info = {}
+ block_args_info['ha'] = each_block['ha']
+ block_args_info['auth'] = each_block['auth']
+ block_args_info['prealloc'] = each_block['prealloc']
+ block_args_info['storage'] = each_block['storage']
+ block_args_info['ring-buffer'] = each_block['ring-buffer']
+
+ _rc = setup_block(
+ mnode=cls.mnode,
+ volname=each_block['volname'],
+ blockname=each_block['blockname'],
+ servers=each_block['servers'],
+ size=each_block['size'],
+ **block_args_info)
+ if not _rc:
+ g.log.error("Failed to create block on volume "
+ "%s: \n%s", cls.volname, each_block)
+ return False
+ g.log.info("Successfully created block on volume "
+ "%s: \n%s", cls.volname, each_block)
+ else:
+ g.log.error("Unable to get args info for block %s on "
+ "volume %s", blockname, cls.volname)
+ return False
+
+ # Check if all the blocks are listed in block list command
+ for blockname in blocknames:
+ each_block = cls.gluster_block_args_info.get(blockname)
+ _rc = if_block_exists(cls.mnode, each_block['volname'], blockname)
+ if not _rc:
+ return False
+
+ # Update the block info dict
+ cls.update_block_info_dict()
+ # Create client-block map
+ cls.create_client_block_map(cls.blocknames)
+ return True
+
+ @classmethod
+ def update_block_info_dict(cls):
+ """Updates the class's block_info_dict variable
+ Calls the gluster-block info command and updates the block info.
+ """
+ # Get Block dict
+ cls.blocknames = get_block_list(cls.mnode, cls.volname)
+
+ if cls.blocknames:
+ for blockname in cls.blocknames:
+ cls.block_info_dict[blockname] = (get_block_info(cls.mnode,
+ cls.volname,
+ blockname))
+ if cls.block_info_dict[blockname] is None:
+ g.log.error("Could not get block info")
+ return False
+ # Update total_number_of_blocks
+ cls.total_num_of_blocks = len(cls.blocknames)
+
+ # Log the block_info_dict
+ g.log.info("Logging Block Info:")
+ for key, value in cls.block_info_dict.iteritems():
+ g.log.info("Glusto block info: %s\n %s" % (key, value))
+
+ return True
+
+ @classmethod
+ def discover_blocks_on_clients(cls, blocknames):
+ """Discover blocks on all the clients
+ """
+ # List all the block devices on clients (Logging)
+
+ if not isinstance(blocknames, list):
+ blocknames = [blocknames]
+
+ # results = g.run_parallel(cls.clients, "lsblk -S")
+ server_list = []
+ for blockname in blocknames:
+ block_info = get_block_info(cls.mnode, cls.volname, blockname)
+ if block_info:
+ servers_to_add = block_info.get("EXPORTED ON")
+ for server_ip in servers_to_add:
+ if server_ip not in server_list:
+ server_list.append(server_ip)
+ else:
+ g.log.error("Failed to get block info for block %s"
+ " on volume %s", blockname, cls.volname)
+ return False
+
+ g.log.info("Server list %s", server_list)
+ # Discover the block devices from clients
+ for client in cls.clients:
+ for server in server_list:
+ cmd = ("iscsiadm -m discovery -t st -p %s" %
+ server)
+ ret, out, err = g.run(client, cmd)
+ if ret != 0:
+ g.log.error("Failed to discover blocks on "
+ "client %s: %s", client, err)
+ return False
+ g.log.info("Discovered blocks on client %s: %s",
+ client, out)
+ return True
+
+ @classmethod
+ def get_iqn_of_blocks_on_clients(cls, blocknames):
+ """Get iqn number of each block on it's respective client.
+
+ Args:
+ blocknames: list
+ Returns:
+ bool: True if iqn of all blocks is obtained. False otherwise
+ """
+ if not isinstance(blocknames, list):
+ blocknames = [blocknames]
+ for blockname in blocknames:
+
+ try:
+ block_gbid = cls.block_info_dict[blockname]['GBID']
+ except KeyError:
+ g.log.error("Failed to get GBID of block %s on volume %s",
+ blockname, cls.volname)
+ return False
+
+ try:
+ block_mapped_client = (
+ cls.clients_blocks_map[blockname]['client'])
+ except KeyError:
+ g.log.error("Failed to get the client which mounts the block "
+ "%s on the volume %s", blockname, cls.volname)
+ return False
+
+ # Get the servers where the blocks are exported
+ server_ip = cls.block_info_dict[blockname]['EXPORTED ON'][0]
+
+ # Get iqn from gbid
+ cmd = ("iscsiadm -m discovery -t st -p %s | grep -F %s | "
+ "tail -1 | cut -d ' ' -f2" %
+ (server_ip, block_gbid))
+
+ # Not using async here as if two processes execute the above
+ # command at the same time in background, it will cause:
+ # 'iscsiadm: Connection to Discovery Address' error
+ ret, out, err = g.run(block_mapped_client, cmd)
+ if ret != 0:
+ g.log.error("Failed to get iqn of block %s on client %s: %s",
+ block_gbid, block_mapped_client, err)
+ return False
+ g.log.info("Iqn for gbid '%s' on client %s : '%s'",
+ block_gbid, block_mapped_client, out)
+ block_iqn = out.strip()
+ cls.clients_blocks_map[blockname]['iqn'] = block_iqn
+
+ return True
+
+ @classmethod
+ def login_to_iqn_on_clients(cls, blocknames):
+ """Login the blocks on their clients/initiator.
+
+ Return:
+ Either bool or Execution error.
+ """
+ if not isinstance(blocknames, list):
+ blocknames = [blocknames]
+
+ ret = cls.update_block_info_dict()
+ if not ret:
+ return False
+
+ result = cls.get_iqn_of_blocks_on_clients(blocknames)
+ if not result:
+ return False
+
+ # ret_value = True
+ # Login to the block from the client
+ for blockname in blocknames:
+ block_gbid = cls.block_info_dict[blockname]['GBID']
+ block_mapped_client = (
+ cls.clients_blocks_map[blockname]['client'])
+
+ if not cls.clients_blocks_map[blockname].get('iqn'):
+ g.log.error("Iqn info for block %s not there. So can't login",
+ blockname)
+ return False
+
+ block_iqn = cls.clients_blocks_map[blockname]['iqn']
+ cls.clients_blocks_map[blockname]['logged_in'] = False
+
+ if cls.block_info_dict[blockname]['PASSWORD']:
+ block_password = cls.block_info_dict[blockname]['PASSWORD']
+ cmd = ("iscsiadm -m node -T %s -o update -n "
+ "node.session.auth.authmethod -v CHAP -n "
+ "node.session.auth.username -v %s -n "
+ "node.session.auth.password -v %s " %
+ (block_iqn, block_gbid, block_password))
+ ret, out, err = g.run(block_mapped_client, cmd)
+ if ret != 0:
+ g.log.error("Unable to update login credentials for "
+ "iqn %s on %s: %s",
+ block_iqn, block_mapped_client, err)
+ return False
+ g.log.info("Credentials for iqn %s updated successfully "
+ "on %s",
+ block_iqn, block_mapped_client)
+
+ # Login to iqn
+ if not cls.clients_blocks_map[blockname].get('logged_in'):
+ cmd = "iscsiadm -m node -T %s -l" % block_iqn
+ ret, out, err = g.run(block_mapped_client, cmd)
+ if ret != 0:
+ raise ExecutionError("Failed to login to iqn %s on "
+ "%s: %s Command o/p: %s ",
+ block_iqn, block_mapped_client,
+ err, out)
+
+ g.log.info("Successfully logged in to iqn %s on %s: %s",
+ block_iqn, block_mapped_client, out)
+ cls.clients_blocks_map[blockname]['logged_in'] = True
+
+ return True
+
+ @classmethod
+ def logout_iqn_on_clients(cls, blocknames):
+ """Logout each block from the initiator
+ """
+ # Convert string or unicode type to list
+ if not isinstance(blocknames, list):
+ blocknames = [blocknames]
+
+ for blockname in blocknames:
+ block_mapped_client = (
+ cls.clients_blocks_map[blockname]['client'])
+ block_iqn = cls.clients_blocks_map[blockname]['iqn']
+ cmd = "iscsiadm -m node -T %s -u" % block_iqn
+ ret, out, err = g.run(block_mapped_client, cmd)
+ if ret != 0:
+ g.log.error("Failed to logout of iqn %s on %s: %s"
+ " Command o/p: %s",
+ block_iqn, block_mapped_client, err, out)
+ return False
+ g.log.info("Successfully logged out of iqn %s on %s: %s",
+ block_iqn, block_mapped_client, out)
+
+ return True
+
+ @classmethod
+ def get_mpath_of_iqn_on_clients(cls, blocknames):
+ """Get mpath of the logged in blocks
+
+ Return:
+ True if successful and execution error if getting mpath fails.
+ """
+ # Get the mpath for iqn
+ # Donot forget to install 'sg3_utils'
+ # Convert string or unicode type to list
+ if not isinstance(blocknames, list):
+ blocknames = [blocknames]
+
+ for blockname in blocknames:
+ block_gbid = cls.block_info_dict[blockname]['GBID']
+ block_mapped_client = cls.clients_blocks_map[blockname]['client']
+ block_iqn = cls.clients_blocks_map[blockname]['iqn']
+ if not cls.clients_blocks_map[blockname].get('mpath'):
+ cmd = ("for i in `/dev/mapper/mpath*` ; do "
+ "sg_inq -i $i | grep %s > /dev/null ; "
+ "if [[ $? -eq 0 ]] ; then echo $i ; fi ; done" %
+ (block_gbid))
+ ret, out, err = g.run(block_mapped_client, cmd)
+ if ret != 0:
+ raise ExecutionError("Failed to get mpath for iqn %s on "
+ "client %s: %s", block_iqn,
+ block_mapped_client, err)
+ block_mpath = out.strip()
+ g.log.info("Successfully got mpath '%s' for iqn '%s' on "
+ "client %s", block_mpath, block_iqn,
+ block_mapped_client)
+ cls.clients_blocks_map[blockname]['mpath'] = block_mpath
+ time.sleep(1)
+
+ return True
+
+ @classmethod
+ def create_client_block_map(cls, blocknames):
+ """
+ Mapping a single block to a client.
+ Select a client randomly from the list
+ """
+ if not isinstance(blocknames, list):
+ blocknames = [blocknames]
+
+ tmp_client_list = cls.clients[:]
+ for blockname in blocknames:
+ if blockname not in cls.clients_blocks_map:
+ if len(tmp_client_list) == 0:
+ tmp_client_list = cls.clients[:]
+ client_to_map = random.choice(tmp_client_list)
+ tmp_client_list.remove(client_to_map)
+ cls.clients_blocks_map[blockname] = {
+ 'client': client_to_map,
+ 'iqn': '',
+ 'logged_in': False,
+ 'mpath': '',
+ 'is_formatted': False,
+ 'is_mounted': False,
+ }
+ g.log.info("Blocks mapped to clients. Each block is mapped to a "
+ "randomly selected client")
+ for blockname in blocknames:
+ g.log.info("Block %s mapped to %s", blockname,
+ cls.clients_blocks_map[blockname]['client'])
+
+ @classmethod
+ def mount_blocks(cls, blocknames, filesystem='xfs'):
+ """Mount the blocks on their clients
+ """
+ if not isinstance(blocknames, list):
+ blocknames = [blocknames]
+ # Discover the block on client
+ ret = cls.discover_blocks_on_clients(blocknames)
+ if not ret:
+ return False
+
+ for blockname in blocknames:
+ if not cls.clients_blocks_map[blockname]['logged_in']:
+
+ # Login inside the block on client
+ ret = cls.login_to_iqn_on_clients(blockname)
+ if not ret:
+ return False
+
+ # time.sleep added because the path /dev/mapper/mapth*
+ # is getting read even before the logging is completed.
+ time.sleep(2)
+ # Get mpath of block on it's client
+ ret = cls.get_mpath_of_iqn_on_clients(blockname)
+ if not ret:
+ return False
+
+ # make fs
+ block_mpath = cls.clients_blocks_map[blockname]['mpath']
+ block_mapped_client = cls.clients_blocks_map[blockname]['client']
+ if not cls.clients_blocks_map[blockname].get('is_formatted'):
+ cmd = "mkfs.%s -f %s" % (filesystem, block_mpath)
+ ret, out, err = g.run(block_mapped_client, cmd)
+ if ret != 0:
+ raise ExecutionError("Failed to make fs on %s on client "
+ "%s: %s", block_mpath,
+ block_mapped_client, err)
+ g.log.info("Successfully created fs on %s on client %s: %s",
+ block_mpath, block_mapped_client, out)
+ cls.clients_blocks_map[blockname]['is_formatted'] = True
+
+ # mount the block
+ if not cls.clients_blocks_map[blockname].get('is_mounted'):
+ temp_mount = {
+ 'protocol': 'xfs',
+ 'client': {
+ 'host': cls.clients_blocks_map[blockname]['client'],
+ },
+ 'volname': cls.clients_blocks_map[blockname]['mpath'],
+ 'mountpoint': "/mnt/%s" % blockname
+ }
+ mount_obj = create_mount_objs([temp_mount]).pop()
+
+ g.log.info("Mount Obj %s", mount_obj)
+ g.log.info("Mounting the device %s on %s:%s" %
+ (mount_obj.volname, mount_obj.client_system,
+ mount_obj.mountpoint))
+
+ # The function is_mounted will give an error in log file:
+ # "Missing arguments for mount"
+ # Because this is also used for mounting glusterfs volumes and
+ # a server name is needed But here mounting does not
+ # require a server name and therefore the argument check
+ # for server fails and an error is reported in the log file.
+ # But that will not affect the block mounting.
+ # So, we can live with it for now.
+ ret = mount_obj.mount()
+ if not ret:
+ raise ExecutionError("Unable to mount the "
+ "device %s on %s:%s" %
+ (mount_obj.volname,
+ mount_obj.client_system,
+ mount_obj.mountpoint))
+ g.log.info("Successfully mounted the device %s on %s:%s" %
+ (mount_obj.volname, mount_obj.client_system,
+ mount_obj.mountpoint))
+ cls.mount_blocks_list.append(mount_obj)
+ cls.clients_blocks_map[blockname]['is_mounted'] = True
+
+ return True
+
+ @classmethod
+ def setup_block_mount_block(cls, blocknames):
+ """Create and mount the blocks
+ """
+ # Setup block
+ g.log.info("Setting up blocks")
+ ret = cls.setup_blocks(blocknames)
+ if not ret:
+ raise ExecutionError("Failed to setup blocks")
+ g.log.info("Successful in setting up blocks")
+
+ # Mount Blocks
+ g.log.info("Mounting the blocks on initiator nodes")
+ ret = cls.mount_blocks(blocknames)
+ if not ret:
+ raise ExecutionError("Failed to mount the blocks of volume %s",
+ cls.volname)
+ g.log.info("Successful in mounting the blocks of the volume %s",
+ cls.volname)
+
+ return True
+
+ @classmethod
+ def get_block_args_info_from_config(cls):
+ """Created the dict gluster_block_args_info which helps in
+ providing block information during block creation
+ """
+ # Get gluster block info from config file
+ if g.config.get('gluster_block_args_info'):
+ cls.gluster_block_args_info = {}
+ blocks_count = 0
+ each_block_info = g.config['gluster_block_args_info']
+ # for i, each_block_info in enumerate(
+ # g.config['gluster_block_args_info']):
+ # volname
+ block_on_volume = cls.volname
+ if each_block_info.get('volname'):
+ block_on_volume = each_block_info['volname']
+
+ # Block name
+ block_base_name = "gluster_block"
+ if each_block_info.get('blockname'):
+ block_base_name = each_block_info['blockname']
+
+ # servers
+ block_servers = cls.servers
+ if each_block_info.get('servers'):
+ block_servers = each_block_info['servers']
+ if not filter(None, block_servers):
+ block_servers = cls.servers
+
+ # Block size
+ block_size = "1GiB"
+ if each_block_info.get('size'):
+ block_size = each_block_info['size']
+
+ # HA
+ block_ha = 3
+ if each_block_info.get('ha'):
+ block_ha = each_block_info['ha']
+
+ # auth
+ block_auth = None
+ if each_block_info.get('auth'):
+ block_auth = each_block_info['auth']
+
+ # prealloc
+ block_prealloc = None
+ if each_block_info.get('prealloc'):
+ block_prealloc = each_block_info['prealloc']
+
+ # ring-buffer
+ block_ring_buffer = None
+ if each_block_info.get('ring-buffer'):
+ block_ring_buffer = each_block_info['ring-buffer']
+
+ # Number of blocks
+ num_of_blocks = 1
+ if each_block_info.get('num_of_blocks'):
+ num_of_blocks = int(each_block_info['num_of_blocks'])
+
+ # for count in range(blocks_count,num_of_blocks +blocks_count):
+ for count in range(blocks_count, num_of_blocks):
+ # blocks_count = int(count) + i
+
+ if block_ha:
+ selected_block_servers = random.sample(block_servers,
+ block_ha)
+ else:
+ selected_block_servers = random.choice(block_servers)
+
+ block_name = "_".join([block_base_name,
+ str(count + 1)])
+
+ cls.gluster_block_args_info[block_name] = (
+ {'volname': block_on_volume,
+ 'blockname': block_name,
+ 'servers': cls.get_ip_from_hostname(
+ selected_block_servers),
+ 'size': block_size,
+ 'ha': block_ha,
+ 'auth': block_auth,
+ 'prealloc': block_prealloc,
+ 'storage': None,
+ 'ring-buffer': block_ring_buffer}
+ )
+
+ for key in cls.gluster_block_args_info.keys():
+ value = cls.gluster_block_args_info[key]
+ g.log.info("Gluster-Block args info: %s\n %s" % (key, value))
+
+ @classmethod
+ def setUpClass(cls, setup_vol=True, setup_blk=True, mount_blk=True):
+ """Setup volume, create blocks, mount the blocks if specified.
+ """
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ cls.mount_blocks_list = []
+ cls.total_num_of_blocks = 0
+ cls.block_info_dict = {}
+ cls.clients_blocks_map = {}
+ cls.blocknames = []
+
+ # Default gluster block info
+ cls.gluster_block_args_info = {
+ 'gluster_block_%d' % (cls.total_num_of_blocks + 1): {
+ 'volname': cls.volname,
+ 'blockname': 'gluster_block_%d'
+ % (cls.total_num_of_blocks + 1),
+ 'servers': random.sample(cls.servers_ips, 2),
+ 'size': '1GiB',
+ 'ha': 2,
+ 'auth': None,
+ 'prealloc': None,
+ 'storage': None,
+ 'ring-buffer': None
+ }
+ }
+
+ if g.config.get('gluster_block_args_info'):
+ cls.get_block_args_info_from_config()
+
+ @classmethod
+ def tearDownClass(cls, umount_blocks=True, cleanup_blocks=True,
+ cleanup_vol=True, unlink_storage="yes"):
+ """Teardown the mounts, deletes blocks, gluster volume.
+ """
+ # Unmount volume
+ if umount_blocks:
+ _rc = True
+ g.log.info("Starting to UnMount Blocks")
+ for mount_obj in cls.mount_blocks_list:
+ ret = mount_obj.unmount()
+ if not ret:
+ g.log.error("Unable to unmount block '%s on cleint %s "
+ "at %s'",
+ mount_obj.volname, mount_obj.client_system,
+ mount_obj.mountpoint)
+ _rc = False
+ if not _rc:
+ raise ExecutionError("Unmount of all mounts are not "
+ "successful")
+ else:
+ g.log.info("Successful in unmounting volume on all clients")
+ else:
+ g.log.info("Not Unmounting the Volume as 'umount_vol' is set "
+ "to %s", umount_blocks)
+
+ # Logout the blocks
+ for blockname in cls.clients_blocks_map:
+ block_iqn = cls.clients_blocks_map[blockname]['iqn']
+ block_mapped_client = (
+ cls.clients_blocks_map[blockname]['client'])
+ g.log.info("Logging out iqn %s on client %s", block_iqn,
+ block_mapped_client)
+ cmd = "iscsiadm -m node -T %s -u" % block_iqn
+ ret, out, err = g.run(block_mapped_client, cmd)
+ if ret != 0:
+ raise ExecutionError("Failed to logout iqn %s on client %s "
+ ":%s", block_iqn, block_mapped_client,
+ err)
+ g.log.info("Successfully logged out iqn %s on client %s: %s",
+ block_iqn, block_mapped_client, out)
+
+ # Restarting multipathd on all clients
+ g.log.info("Restarting multipathd on all clients")
+ cmd = "service multipathd restart && service multipathd status"
+ results = g.run_parallel(cls.clients, cmd)
+ for client in results:
+ ret, out, err = results[client]
+ if ret != 0:
+ raise ExecutionError("Failed to restart multipathd on "
+ "client %s: %s", client, err)
+ g.log.info("Successfully restarted multipathd on client %s: %s",
+ client, out)
+
+ # Cleanup blocks
+ if cleanup_blocks:
+ blocknames = get_block_list(cls.mnode, cls.volname)
+ if blocknames:
+ g.log.info("Listing blocks before deleting:\n%s",
+ '\n'.join(blocknames))
+ for blockname in blocknames:
+ ret, out, err = block_delete(cls.mnode, cls.volname,
+ blockname, unlink_storage)
+ if ret != 0:
+ raise ExecutionError("Failed to delete the block "
+ "%s on volume %s", blockname,
+ cls.volname)
+ g.log.info("Successfully deleted the block %s on "
+ "volume %s", blockname, cls.volname)
+
+ # Cleanup volume
+ if cleanup_vol:
+ g.log.info("Cleanup Volume %s", cls.volname)
+ ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
+ if not ret:
+ raise ExecutionError("cleanup volume %s failed", cls.volname)
+ else:
+ g.log.info("Successfully cleaned-up volume")
+ else:
+ g.log.info("Not Cleaning-Up volume as 'cleanup_vol' is %s",
+ cleanup_vol)
+
+ GlusterBaseClass.tearDownClass.im_func(cls)