From daca62f4b7f876bf675e5b29b643e3d472903597 Mon Sep 17 00:00:00 2001 From: Shwetha Panduranga Date: Wed, 8 Feb 2017 18:52:37 +0530 Subject: Adding a new test to VVT: 1) glusterbaseclass: - Making changes in glusterbaseclass to not necessarily have volume_type and mount_type. 2) volume_libs: - setup_volume don't have to export the volume. It just creates starts and setup's any operation on the volume. - Moved the sharing/exporting the volume to BaseClass 3) Renaming samba_ops to samba_libs to have better naming practice. 4) Adding nfs_ganesha_libs for any nfs related helper functions 5) Adding a new vvt case which creates, deteles, creates the volume. Change-Id: I238c349df7165d669d3bc7234d97845dba2f51a6 Signed-off-by: Shwetha Panduranga --- .../glustolibs/gluster/gluster_base_class.py | 400 +++++++++++---------- glustolibs-gluster/glustolibs/gluster/nfs_libs.py | 63 ++++ .../glustolibs/gluster/samba_libs.py | 301 ++++++++++++++++ glustolibs-gluster/glustolibs/gluster/samba_ops.py | 397 -------------------- .../glustolibs/gluster/volume_libs.py | 182 +++++----- 5 files changed, 668 insertions(+), 675 deletions(-) create mode 100644 glustolibs-gluster/glustolibs/gluster/nfs_libs.py create mode 100644 glustolibs-gluster/glustolibs/gluster/samba_libs.py delete mode 100644 glustolibs-gluster/glustolibs/gluster/samba_ops.py (limited to 'glustolibs-gluster/glustolibs/gluster') diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py index 65e7cc759..4ad6cc732 100644 --- a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py +++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright (C) 2016 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify @@ -21,17 +20,20 @@ """ import unittest - -from glusto.core import Glusto as g import os import random -from glustolibs.gluster.peer_ops import (is_peer_connected, - peer_status) -from glustolibs.gluster.volume_libs import setup_volume, cleanup_volume -from glustolibs.gluster.volume_ops import volume_info, volume_status -from glustolibs.gluster.exceptions import ExecutionError, ConfigError import time import copy +from glusto.core import Glusto as g +from glustolibs.gluster.exceptions import ExecutionError, ConfigError +from glustolibs.gluster.peer_ops import is_peer_connected, peer_status +from glustolibs.gluster.volume_ops import volume_info +from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume, + log_volume_info_and_status) +from glustolibs.gluster.samba_libs import share_volume_over_smb +from glustolibs.gluster.nfs_libs import export_volume_through_nfs +from glustolibs.gluster.mount_ops import create_mount_objs +from glustolibs.io.utils import log_mounts_info class runs_on(g.CarteTestClass): @@ -63,38 +65,41 @@ class runs_on(g.CarteTestClass): class GlusterBaseClass(unittest.TestCase): + """GlusterBaseClass to be subclassed by Gluster Tests. + This class reads the config for variable values that will be used in + gluster tests. If variable values are not specified in the config file, + the variable are defaulted to specific values. + """ # these will be populated by either the runs_on decorator or # defaults in setUpClass() volume_type = None mount_type = None - volname = None - servers = None - voltype = None - mnode = None - mounts = None - clients = None @classmethod def setUpClass(cls): """Initialize all the variables necessary for testing Gluster """ + g.log.info("Setting up class: %s", cls.__name__) + # Get all servers cls.all_servers = None - if ('servers' in g.config and g.config['servers']): + if 'servers' in g.config and g.config['servers']: cls.all_servers = g.config['servers'] + cls.servers = cls.all_servers else: raise ConfigError("'servers' not defined in the global config") # Get all clients cls.all_clients = None - if ('clients' in g.config and g.config['clients']): + if 'clients' in g.config and g.config['clients']: cls.all_clients = g.config['clients'] + cls.clients = cls.all_clients else: raise ConfigError("'clients' not defined in the global config") # Get all servers info cls.all_servers_info = None - if ('servers_info' in g.config and g.config['servers_info']): + if 'servers_info' in g.config and g.config['servers_info']: cls.all_servers_info = g.config['servers_info'] else: raise ConfigError("'servers_info' not defined in the global " @@ -102,19 +107,31 @@ class GlusterBaseClass(unittest.TestCase): # All clients_info cls.all_clients_info = None - if ('clients_info' in g.config and g.config['clients_info']): + if 'clients_info' in g.config and g.config['clients_info']: cls.all_clients_info = g.config['clients_info'] else: raise ConfigError("'clients_info' not defined in the global " "config") - if cls.volume_type is None: - cls.volume_type = "distributed" - if cls.mount_type is None: - cls.mount_type = "glusterfs" + # Set mnode : Node on which gluster commands are executed + cls.mnode = cls.all_servers[0] - g.log.info("SETUP GLUSTER VOLUME: %s on %s" % (cls.volume_type, - cls.mount_type)) + # SMB Cluster info + try: + cls.smb_users_info = ( + g.config['gluster']['cluster_config']['smb']['users_info']) + except KeyError: + cls.smb_users_info = {} + cls.smb_users_info['root'] = {} + cls.smb_users_info['root']['password'] = 'foobar' + cls.smb_users_info['root']['acl'] = 'rwx' + + # NFS-Ganesha Cluster Info + try: + cls.enable_nfs_ganesha = bool(g.config['gluster']['cluster_config'] + ['nfs_ganesha']['enable']) + except KeyError: + cls.enable_nfs_ganesha = False # Defining default volume_types configuration. default_volume_type_config = { @@ -151,150 +168,136 @@ class GlusterBaseClass(unittest.TestCase): # Get the volume configuration. cls.volume = {} - found_volume = False - if 'gluster' in g.config: - if 'volumes' in g.config['gluster']: - for volume in g.config['gluster']['volumes']: - if volume['voltype']['type'] == cls.volume_type: - cls.volume = copy.deepcopy(volume) - found_volume = True - break - - if found_volume: - if 'name' not in cls.volume: + if cls.volume_type: + found_volume = False + if 'gluster' in g.config: + if 'volumes' in g.config['gluster']: + for volume in g.config['gluster']['volumes']: + if volume['voltype']['type'] == cls.volume_type: + cls.volume = copy.deepcopy(volume) + found_volume = True + break + + if found_volume: + if 'name' not in cls.volume: + cls.volume['name'] = 'testvol_%s' % cls.volume_type + + if 'servers' not in cls.volume: + cls.volume['servers'] = cls.all_servers + + if not found_volume: + try: + if g.config['gluster']['volume_types'][cls.volume_type]: + cls.volume['voltype'] = (g.config['gluster'] + ['volume_types'] + [cls.volume_type]) + except KeyError: + try: + cls.volume['voltype'] = (default_volume_type_config + [cls.volume_type]) + except KeyError: + raise ConfigError("Unable to get configs of volume " + "type: %s", cls.volume_type) cls.volume['name'] = 'testvol_%s' % cls.volume_type - - if 'servers' not in cls.volume: cls.volume['servers'] = cls.all_servers - if not found_volume: - cls.volume = { - 'name': ('testvol_%s' % cls.volume_type), - 'servers': cls.all_servers - } - try: - if g.config['gluster']['volume_types'][cls.volume_type]: - cls.volume['voltype'] = (g.config['gluster'] - ['volume_types'][cls.volume_type]) - except KeyError: - try: - cls.volume['voltype'] = (default_volume_type_config - [cls.volume_type]) - except KeyError: - raise ConfigError("Unable to get configs of volume type: " - "%s", cls.volume_type) - - # Set volume options - if 'options' not in cls.volume: - cls.volume['options'] = {} - - # Set nfs.disable to 'off' to start gluster-nfs server on start of the - # volume if the mount type is 'nfs' - if cls.mount_type == 'nfs': - cls.volume['options']['nfs.disable'] = 'off' - - # SMB Info - if cls.mount_type == 'cifs' or cls.mount_type == 'smb': - if 'smb' not in cls.volume: - cls.volume['smb'] = {} - cls.volume['smb']['enable'] = True - users_info_found = False - try: - if cls.volume['smb']['users_info']: - users_info_found = True - except KeyError: - users_info_found = False - - if not users_info_found: - cls.volume['smb']['users_info'] = {} - try: - cls.volume['smb']['users_info'] = ( - g.config['gluster']['cluster_config']['smb'] - ['users_info']) - except KeyError: - pass - - if not cls.volume['smb']['users_info']: - cls.volume['smb']['users_info']['root'] = {} - cls.volume['smb']['users_info']['root']['password'] = ( - 'foobar') - - # Define Volume variables. - cls.volname = cls.volume['name'] - cls.servers = cls.volume['servers'] - cls.voltype = cls.volume['voltype']['type'] - cls.mnode = cls.servers[0] - try: - cls.smb_users_info = cls.volume['smb']['users_info'] - except KeyError: - cls.smb_users_info = {} + # Set volume options + if 'options' not in cls.volume: + cls.volume['options'] = {} + + # Define Volume Useful Variables. + cls.volname = cls.volume['name'] + cls.voltype = cls.volume['voltype']['type'] + cls.servers = cls.volume['servers'] + cls.mnode = cls.servers[0] + cls.vol_options = cls.volume['options'] # Get the mount configuration. - cls.mounts_dict_list = [] cls.mounts = [] - found_mount = False - if 'gluster' in g.config: - if 'mounts' in g.config['gluster']: - for mount in g.config['gluster']['mounts']: - if mount['protocol'] == cls.mount_type: - temp_mount = {} - temp_mount['protocol'] = cls.mount_type - if ('volname' in mount and mount['volname']): - if mount['volname'] == cls.volname: - temp_mount = copy.deepcopy(mount) + if cls.mount_type: + cls.mounts_dict_list = [] + found_mount = False + if 'gluster' in g.config: + if 'mounts' in g.config['gluster']: + for mount in g.config['gluster']['mounts']: + if mount['protocol'] == cls.mount_type: + temp_mount = {} + temp_mount['protocol'] = cls.mount_type + if 'volname' in mount and mount['volname']: + if mount['volname'] == cls.volname: + temp_mount = copy.deepcopy(mount) + else: + continue else: - continue - else: - temp_mount['volname'] = cls.volname - if ('server' not in temp_mount or - (not temp_mount['server'])): - temp_mount['server'] = cls.mnode - if ('mountpoint' not in temp_mount or - (not temp_mount['mountpoint'])): - temp_mount['mountpoint'] = (os.path.join( - "/mnt", '_'.join([cls.volname, - cls.mount_type]))) - if ('client' not in temp_mount or - (not temp_mount['client'])): - temp_mount['client'] = ( - cls.all_clients_info[ - random.choice(cls.all_clients_info.keys())] - ) - cls.mounts_dict_list.append(temp_mount) - found_mount = True - if not found_mount: - for client in cls.all_clients_info.keys(): - mount = { - 'protocol': cls.mount_type, - 'server': cls.mnode, - 'volname': cls.volname, - 'client': cls.all_clients_info[client], - 'mountpoint': (os.path.join( - "/mnt", '_'.join([cls.volname, cls.mount_type]))), - 'options': '' - } - cls.mounts_dict_list.append(mount) - - if cls.mount_type == 'cifs' or cls.mount_type == 'smb': + temp_mount['volname'] = cls.volname + if ('server' not in temp_mount or + (not temp_mount['server'])): + temp_mount['server'] = cls.mnode + if ('mountpoint' not in temp_mount or + (not temp_mount['mountpoint'])): + temp_mount['mountpoint'] = (os.path.join( + "/mnt", '_'.join([cls.volname, + cls.mount_type]))) + if ('client' not in temp_mount or + (not temp_mount['client'])): + temp_mount['client'] = ( + cls.all_clients_info[ + random.choice( + cls.all_clients_info.keys())] + ) + cls.mounts_dict_list.append(temp_mount) + found_mount = True + if not found_mount: + for client in cls.all_clients_info.keys(): + mount = { + 'protocol': cls.mount_type, + 'server': cls.mnode, + 'volname': cls.volname, + 'client': cls.all_clients_info[client], + 'mountpoint': (os.path.join( + "/mnt", '_'.join([cls.volname, cls.mount_type]))), + 'options': '' + } + cls.mounts_dict_list.append(mount) + + if cls.mount_type == 'cifs' or cls.mount_type == 'smb': + for mount in cls.mounts_dict_list: + if 'smbuser' not in mount: + mount['smbuser'] = random.choice( + cls.smb_users_info.keys()) + mount['smbpasswd'] = ( + cls.smb_users_info[mount['smbuser']]['password']) + + cls.mounts = create_mount_objs(cls.mounts_dict_list) + + # Defining clients from mounts. + cls.clients = [] for mount in cls.mounts_dict_list: - if 'smbuser' not in mount: - mount['smbuser'] = random.choice(cls.smb_users_info.keys()) - mount['smbpasswd'] = ( - cls.smb_users_info[mount['smbuser']]['password']) + cls.clients.append(mount['client']['host']) + cls.clients = list(set(cls.clients)) + + # Log the baseclass variables for debugging purposes + g.log.debug("GlusterBaseClass Variables:\n %s", cls.__dict__) + + def setUp(self): + g.log.info("Starting Test: %s", self.id()) - from glustolibs.gluster.mount_ops import create_mount_objs - cls.mounts = create_mount_objs(cls.mounts_dict_list) + def tearDown(self): + g.log.info("Ending Test: %s", self.id()) - # Defining clients from mounts. - cls.clients = [] - for mount_dict in cls.mounts_dict_list: - cls.clients.append(mount_dict['client']['host']) - cls.clients = list(set(cls.clients)) + @classmethod + def tearDownClass(cls): + g.log.info("Teardown class: %s", cls.__name__) class GlusterVolumeBaseClass(GlusterBaseClass): + """GlusterVolumeBaseClass sets up the volume for testing purposes. + """ @classmethod def setUpClass(cls): + """Setup volume, shares/exports volume for cifs/nfs protocols, + mounts the volume. + """ GlusterBaseClass.setUpClass.im_func(cls) # Validate if peer is connected from all the servers @@ -303,72 +306,76 @@ class GlusterVolumeBaseClass(GlusterBaseClass): if not ret: raise ExecutionError("Validating Peers to be in Cluster " "Failed") + g.log.info("All peers are in connected state") - # Print Peer Status from mnode - _, _, _ = peer_status(cls.mnode) + # Peer Status from mnode + peer_status(cls.mnode) # Setup Volume ret = setup_volume(mnode=cls.mnode, all_servers_info=cls.all_servers_info, volume_config=cls.volume, force=True) if not ret: - raise ExecutionError("Setup volume %s failed" % cls.volname) + raise ExecutionError("Setup volume %s failed", cls.volname) time.sleep(10) - # Print Volume Info and Status - _, _, _ = volume_info(cls.mnode, cls.volname) - - _, _, _ = volume_status(cls.mnode, cls.volname) - - # Validate if volume is exported or not - if 'nfs' in cls.mount_type: - cmd = "showmount -e localhost" - _, _, _ = g.run(cls.mnode, cmd) - - cmd = "showmount -e localhost | grep %s" % cls.volname - ret, _, _ = g.run(cls.mnode, cmd) - if not ret: - raise ExecutionError("Volume %s not exported" % cls.volname) + # Export/Share the volume based on mount_type + if cls.mount_type != "glusterfs": + if "nfs" in cls.mount_type: + ret = export_volume_through_nfs( + mnode=cls.mnode, volname=cls.volname, + enable_ganesha=cls.enable_nfs_ganesha) + if not ret: + raise ExecutionError("Failed to export volume %s " + "as NFS export", cls.volname) - if 'cifs' in cls.mount_type: - cmd = "smbclient -L localhost" - _, _, _ = g.run(cls.mnode, cmd) + if "smb" in cls.mount_type or "cifs" in cls.mount_type: + ret = share_volume_over_smb(mnode=cls.mnode, + volname=cls.volname, + smb_users_info=cls.smb_users_info) + if not ret: + raise ExecutionError("Failed to export volume %s " + "as SMB Share", cls.volname) - cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " % - cls.volname) - ret, _, _ = g.run(cls.mnode, cmd) - if not ret: - raise ExecutionError("Volume %s not accessable via SMB/CIFS " - "share" % cls.volname) + # Log Volume Info and Status + ret = log_volume_info_and_status(cls.mnode, cls.volname) + if not ret: + raise ExecutionError("Logging volume %s info and status failed", + cls.volname) # Create Mounts - rc = True + _rc = True for mount_obj in cls.mounts: ret = mount_obj.mount() if not ret: - g.log.error("Unable to mount volume '%s:%s' on '%s:%s'" % - (mount_obj.server_system, mount_obj.volname, - mount_obj.client_system, mount_obj.mountpoint)) - rc = False - if not rc: - raise ExecutionError("Mounting volume %s on few clients failed" % + g.log.error("Unable to mount volume '%s:%s' on '%s:%s'", + mount_obj.server_system, mount_obj.volname, + mount_obj.client_system, mount_obj.mountpoint) + _rc = False + if not _rc: + raise ExecutionError("Mounting volume %s on few clients failed", cls.volname) + # Get info of mount before the IO + log_mounts_info(cls.mounts) + @classmethod def tearDownClass(cls, umount_vol=True, cleanup_vol=True): - """unittest tearDownClass override""" + """Teardown the mounts and volume. + """ + GlusterBaseClass.tearDownClass.im_func(cls) + # Unmount volume if umount_vol: - rc = True + _rc = True for mount_obj in cls.mounts: ret = mount_obj.unmount() if not ret: - g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'" % - (mount_obj.server_system, mount_obj.volname, - mount_obj.client_system, mount_obj.mountpoint) - ) - rc = False - if not rc: + g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'", + mount_obj.server_system, mount_obj.volname, + mount_obj.client_system, mount_obj.mountpoint) + _rc = False + if not _rc: raise ExecutionError("Unmount of all mounts are not " "successful") @@ -376,4 +383,7 @@ class GlusterVolumeBaseClass(GlusterBaseClass): if cleanup_vol: ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname) if not ret: - raise ExecutionError("cleanup volume %s failed" % cls.volname) + raise ExecutionError("cleanup volume %s failed", cls.volname) + + # All Volume Info + volume_info(cls.mnode) diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_libs.py b/glustolibs-gluster/glustolibs/gluster/nfs_libs.py new file mode 100644 index 000000000..003ebc2d0 --- /dev/null +++ b/glustolibs-gluster/glustolibs/gluster/nfs_libs.py @@ -0,0 +1,63 @@ +# Copyright (C) 2015-2016 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" + Description: Libraries for gluster nfs operations. +""" +import time +from glusto.core import Glusto as g +from glustolibs.gluster.volume_libs import is_volume_exported + + +def export_volume_through_nfs(mnode, volname, enable_ganesha=False, + time_delay=30): + """Export the volume through nfs + + Args: + mnode (str): Node on which cmd has to be executed. + volname (str): volume name + enable_ganesha (bool): Enable ganesha for the volume. + time_delay (int): Time to wait after the volume set operations + to validate whether the volume is exported or not. + + Returns: + bool: If volume is successfully exported through nfs returns True. + False Otherwise. + """ + # Enable nfs on the volume + cmd = ("gluster volume set %s nfs.disable off --mode=script" % volname) + ret, _, _ = g.run(mnode, cmd) + if ret != 0: + g.log.error("Failed to enable nfs for the volume %s", volname) + return False + + # Enable ganesha on the volume if enable_ganesha is True + if enable_ganesha: + cmd = ("gluster volume set %s ganesha.enable on --mode=script" % + volname) + ret, _, _ = g.run(mnode, cmd) + if ret != 0: + g.log.error("Failed to enable nfs ganesha for volume %s", volname) + return False + + time.sleep(time_delay) + # Verify if volume is exported + ret = is_volume_exported(mnode, volname, "nfs") + if not ret: + g.log.info("Volume %s is not exported as 'nfs' export", volname) + return False + + return True diff --git a/glustolibs-gluster/glustolibs/gluster/samba_libs.py b/glustolibs-gluster/glustolibs/gluster/samba_libs.py new file mode 100644 index 000000000..a12e48855 --- /dev/null +++ b/glustolibs-gluster/glustolibs/gluster/samba_libs.py @@ -0,0 +1,301 @@ +# Copyright (C) 2015-2016 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" + Description: Library for samba operations. +""" + +from glusto.core import Glusto as g +from glustolibs.gluster.volume_libs import is_volume_exported +from glustolibs.gluster.mount_ops import GlusterMount + + +def start_smb_service(mnode): + """Start smb service on the specified node. + + Args: + mnode (str): Node on which smb service has to be started + + Returns: + bool: True on successfully starting smb service. False otherwise. + """ + g.log.info("Starting SMB Service on %s", mnode) + + # Enable Samba to start on boot + ret, _, _ = g.run(mnode, "chkconfig smb on") + if ret != 0: + g.log.error("Unable to set chkconfig smb on") + return False + g.log.info("chkconfig smb on successful") + + # Start smb service + ret, _, _ = g.run(mnode, "service smb start") + if ret != 0: + g.log.error("Unable to start the smb service") + return False + g.log.info("Successfully started smb service") + + return True + + +def smb_service_status(mnode): + """Status of smb service on the specified node. + + Args: + mnode (str): Node on which smb service has to be started + + Returns: + tuple: Tuple containing three elements (ret, out, err). + The first element 'ret' is of type 'int' and is the return value + of command execution. + + The second element 'out' is of type 'str' and is the stdout value + of the command execution. + + The third element 'err' is of type 'str' and is the stderr value + of the command execution. + """ + g.log.info("Getting SMB Service status on %s", mnode) + return g.run(mnode, "service smb status") + + +def is_smb_service_running(mnode): + """Check if smb service is running on node + + Args: + mnode (str): Node on which smb service status has to be verified. + + Returns: + bool: True if smb service is running. False otherwise. + """ + g.log.info("Check if SMB service is running on %s", mnode) + ret, out, _ = smb_service_status(mnode) + if ret != 0: + return False + if "Active: active (running)" in out: + return True + else: + return False + + +def stop_smb_service(mnode): + """Stop smb service on the specified node. + + Args: + mnode (str): Node on which smb service has to be stopped. + + Returns: + bool: True on successfully stopping smb service. False otherwise. + """ + g.log.info("Stopping SMB Service on %s", mnode) + + # Disable Samba to start on boot + ret, _, _ = g.run(mnode, "chkconfig smb off") + if ret != 0: + g.log.error("Unable to set chkconfig smb off") + return False + g.log.info("chkconfig smb off successful") + + # Stop smb service + ret, _, _ = g.run(mnode, "service smb stop") + if ret != 0: + g.log.error("Unable to stop the smb service") + return False + g.log.info("Successfully stopped smb service") + + return True + + +def list_smb_shares(mnode): + """List all the gluster volumes that are exported as SMB Shares + + Args: + mnode (str): Node on which commands has to be executed. + + Returns: + list: List of all volume names that are exported as SMB Shares. + Empty list if no volumes are exported as SMB Share. + """ + g.log.info("List all SMB Shares") + smb_shares_list = [] + cmd = "smbclient -L localhost" + ret, out, _ = g.run(mnode, cmd) + if ret != 0: + g.log.error("Failed to find the SMB Shares") + return smb_shares_list + else: + out = out.splitlines() + for line in out: + if 'gluster-' in line: + smb_shares_list.append(line.split(" ")[0].strip()) + + return smb_shares_list + + +def enable_mounting_volume_over_smb(mnode, volname, smb_users_info): + """Enable mounting volume over SMB. Set ACL's for non-root users. + + Args: + mnode (str): Node on which commands are executed. + volname (str): Name of the volume on which acl's has to be set. + smb_users_info (dict): Dict containing users info. Example: + smb_users_info = { + 'root': {'password': 'foobar', + 'acl': '' + }, + 'user1': {'password': 'abc', + 'acl': '' + }, + 'user2': {'password': 'xyz', + 'acl': '' + } + } + Returns: + bool: True on successfully enabling to mount volume using SMB. + False otherwise. + """ + g.log.info("Enable mounting volume over SMB") + # Create a temp mount to provide required permissions to the smb user + mount = { + 'protocol': 'glusterfs', + 'server': mnode, + 'volname': volname, + 'client': { + 'host': mnode + }, + 'mountpoint': '/tmp/gluster_smb_set_user_permissions_%s' % volname, + 'options': 'acl' + } + mount_obj = GlusterMount(mount) + ret = mount_obj.mount() + if not ret: + g.log.error("Unable to create temporary mount for providing " + "required permissions to the smb users") + return False + g.log.info("Successfully created temporary mount for providing " + "required permissions to the smb users") + + # Provide required permissions to the smb user + for smb_user in smb_users_info.keys(): + if smb_user != 'root': + if 'acl' in smb_users_info[smb_user]: + acl = smb_users_info[smb_user]['acl'] + if not acl: + acl = "rwx" + else: + acl = "rwx" + + cmd = ("setfacl -m user:%s:%s %s" % (smb_user, acl, + mount_obj.mountpoint)) + ret, _, _ = g.run(mnode, cmd) + if ret != 0: + g.log.error("Unable to provide required permissions to the " + "smb user %s ", smb_user) + return False + g.log.info("Successfully provided required permissions to the " + "smb user %s ", smb_user) + + # Verify SMB/CIFS share can be accessed by the user + + # Unmount the temp mount created + ret = mount_obj.unmount() + if not ret: + g.log.error("Unable to unmount the temp mount") + g.log.info("Successfully unmounted the temp mount") + + return True + + +def share_volume_over_smb(mnode, volname, smb_users_info): + """Sharing volumes over SMB + + Args: + mnode (str): Node on which commands has to be executed. + volname (str): Name of the volume to be shared. + smb_users_info (dict): Dict containing users info. Example: + smb_users_info = { + 'root': {'password': 'foobar', + 'acl': '' + }, + 'user1': {'password': 'abc', + 'acl': '' + }, + 'user2': {'password': 'xyz', + 'acl': '' + } + } + + Returns: + bool : True on successfully sharing the volume over SMB. + False otherwise + """ + g.log.info("Start sharing the volume over SMB") + + # Set volume option 'stat-prefetch' to 'off'. + cmd = "gluster volume set %s stat-prefetch off" % volname + ret, _, _ = g.run(mnode, cmd) + if ret != 0: + g.log.error("Failed to set the volume option stat-prefetch off") + return False + g.log.info("Successfully set 'stat-prefetch' to 'off' on %s", volname) + + # Set volume option 'server.allow-insecure' to 'on'. + cmd = "gluster volume set %s server.allow-insecure on" % volname + ret, _, _ = g.run(mnode, cmd) + if ret != 0: + g.log.error("Failed to set the volume option server-allow-insecure") + return False + g.log.info("Successfully set 'server-allow-insecure' to 'on' on %s", + volname) + + # Set 'storage.batch-fsync-delay-usec' to 0. + # This is to ensure ping_pong's lock and I/O coherency tests works on CIFS. + cmd = ("gluster volume set %s storage.batch-fsync-delay-usec 0" % volname) + ret, _, _ = g.run(mnode, cmd) + if ret != 0: + g.log.error("Failed to set the volume option " + "'storage.batch-fsync-delay-usec' to 0 on %s", volname) + return False + g.log.info("Successfully set 'storage.batch-fsync-delay-usec' to 0 on %s", + volname) + + # Verify if the volume can be accessed from the SMB/CIFS share. + cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " % volname) + ret, _, _ = g.run(mnode, cmd) + if ret != 0: + g.log.error("volume '%s' not accessable via SMB/CIFS share", volname) + return False + g.log.info("volume '%s' can be accessed from SMB/CIFS share", volname) + + # To verify if the SMB/CIFS share can be accessed by the root/non-root user + # TBD + + # Enable mounting volumes over SMB + ret = enable_mounting_volume_over_smb(mnode, volname, smb_users_info) + if not ret: + g.log.error("Failed to enable mounting volumes using SMB") + return False + g.log.info("Successfully enabled mounting volumes using SMV for the " + "smbusers: %s", str(smb_users_info.keys())) + + # Verify if volume is shared + ret = is_volume_exported(mnode, volname, "smb") + if not ret: + g.log.info("Volume %s is not exported as 'cifs/smb' share", volname) + return False + g.log.info("Volume %s is exported as 'cifs/smb' share", volname) + + return True diff --git a/glustolibs-gluster/glustolibs/gluster/samba_ops.py b/glustolibs-gluster/glustolibs/gluster/samba_ops.py deleted file mode 100644 index ec158b04b..000000000 --- a/glustolibs-gluster/glustolibs/gluster/samba_ops.py +++ /dev/null @@ -1,397 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2015-2016 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" - Description: Library for samba operations. -""" - - -from glusto.core import Glusto as g -import time - - -def start_smb_service(mnode): - """Start smb service on the specified node. - - Args: - mnode (str): Node on which smb service has to be started - - Returns: - bool: True on successfully starting smb service. False otherwise. - """ - # Enable Samba to start on boot - ret, _, _ = g.run(mnode, "chkconfig smb on") - if ret != 0: - g.log.error("Unable to set chkconfig smb on") - return False - g.log.info("chkconfig smb on successful") - - # Start smb service - ret, _, _ = g.run(mnode, "service smb start") - if ret != 0: - g.log.error("Unable to start the smb service") - return False - g.log.info("Successfully started smb service") - - return True - - -def smb_service_status(mnode): - """Status of smb service on the specified node. - - Args: - mnode (str): Node on which smb service has to be started - - Returns: - tuple: Tuple containing three elements (ret, out, err). - The first element 'ret' is of type 'int' and is the return value - of command execution. - - The second element 'out' is of type 'str' and is the stdout value - of the command execution. - - The third element 'err' is of type 'str' and is the stderr value - of the command execution. - """ - return g.run(mnode, "service smb status") - - -def is_smb_service_running(mnode): - ret, out, err = smb_service_status(mnode) - if ret != 0: - return False - if "Active: active (running)" in out: - return True - else: - return False - - -def stop_smb_service(mnode): - """Stop smb service on the specified node. - - Args: - mnode (str): Node on which smb service has to be stopped. - - Returns: - bool: True on successfully stopping smb service. False otherwise. - """ - # Disable Samba to start on boot - ret, _, _ = g.run(mnode, "chkconfig smb off") - if ret != 0: - g.log.error("Unable to set chkconfig smb off") - return False - g.log.info("chkconfig smb off successful") - - # Stop smb service - ret, _, _ = g.run(mnode, "service smb stop") - if ret != 0: - g.log.error("Unable to stop the smb service") - return False - g.log.info("Successfully stopped smb service") - - return True - - -def create_smb_users(servers, smb_users_info, start_uid=50000): - """Creates SMB users on specified servers and sets password for SMB users. - - Args: - servers (list): List of server hosts on which smb users has to be - created - smb_users_info (dict): Dict containing users info. Example: - smb_users_info = { - 'root': {'password': 'foobar', - 'acl': '' - }, - 'user1': {'password': 'abc', - 'acl': '' - }, - 'user2': {'password': 'xyz', - 'acl': '' - } - } - start_uid (int): starting uid number for the users - - Returns: - bool: True on successfully creating smb users. False otherwise. - """ - uid = start_uid - for smb_user in smb_users_info.keys(): - if smb_user == 'root': - continue - for server in servers: - # Check if user already exist with same uid - cmd = ("getent passwd %d" % uid) - ret, out, err = g.run(server, cmd) - if ret == 0: - if smb_user in out.split(":")[0]: - continue - else: - cmd = ("userdel -f %s" % smb_user) - ret, _, _ = g.run(server, cmd) - if ret != 0: - g.log.error("Unable to delete the smb user '%s' on " - "server %s" % (smb_user, server)) - return False - - else: - cmd = ("useradd -u %d %s" % (uid, smb_user)) - ret, out, err = g.run(server, cmd) - if ret != 0: - g.log.error("Unable to add the smb user '%s' on " - "server %s" % (smb_user, server)) - return False - uid = uid + 1 - - mnode = servers[0] - for smb_user in smb_users_info.keys(): - if 'password' in smb_users_info[smb_user]: - smbpasswd = smb_users_info[smb_user]['password'] - else: - g.log.error("Password not found for the user %s" % smb_user) - return False - - # Set smb password for smb_user - cmd = ("(echo \"%s\"; echo \"%s\") | smbpasswd -a %s" % - (smbpasswd, smbpasswd, smb_user)) - - ret, _, _ = g.run(mnode, cmd) - if ret != 0: - g.log.error("Unable to set the smb password for smb user %s" % - smb_user) - return False - g.log.info("Successfully set password for smb user %s on node %s" % - (smb_user, mnode)) - - return True - - -def delete_smb_users(servers, smb_users_info): - rc = True - for smb_user in smb_users_info.keys(): - if smb_user == 'root': - continue - cmd = ("userdel -r -f %s" % smb_user) - for server in servers: - ret, out, err = g.run(server, cmd) - if ret != 0: - if not ("userdel: user '%s' does not exist" % smb_user) in out: - rc = False - return rc - - -def list_smb_shares(mnode): - """List all the gluster volumes that are exported as SMB Shares - - Args: - mnode (str): Node on which commands has to be executed. - - Returns: - list: List of all volume names that are exported as SMB Shares. - Empty list if no volumes are exported as SMB Share. - """ - smb_shares_list = [] - cmd = "smbclient -L localhost" - ret, out, err = g.run(mnode, cmd) - if ret != 0: - g.log.error("Failed to find the SMB Shares") - return smb_shares_list - else: - out = out.splitlines() - for line in out: - if 'gluster-' in line: - smb_shares_list.append(line.split(" ")[0].strip()) - - return smb_shares_list - - -def share_volume_over_smb(mnode, volname, servers, smb_users_info): - """Sharing volumes over SMB - - Args: - mnode (str): Node on which commands has to be executed. - volname (str): Name of the volume to be shared. - servers (list): List of all servers in the storage pool. - smb_users_info (dict): Dict containing users info. Example: - smb_users_info = { - 'root': {'password': 'foobar', - 'acl': '' - }, - 'user1': {'password': 'abc', - 'acl': '' - }, - 'user2': {'password': 'xyz', - 'acl': '' - } - } - - Returns: - bool : True on successfully sharing the volume over SMB. - False otherwise - """ - # Set volume option 'stat-prefetch' to 'off'. - cmd = "gluster volume set %s stat-prefetch off" % volname - ret, _, _ = g.run(mnode, cmd) - if ret != 0: - g.log.error("Failed to set the volume option stat-prefetch off") - return False - g.log.info("Successfully set 'stat-prefetch' to 'off' on %s" % volname) - - # Set volume option 'server.allow-insecure' to 'on'. - cmd = "gluster volume set %s server.allow-insecure on" % volname - ret, _, _ = g.run(mnode, cmd) - if ret != 0: - g.log.error("Failed to set the volume option server-allow-insecure") - return False - g.log.info("Successfully set 'server-allow-insecure' to 'on' on %s" % - volname) - - # Set 'storage.batch-fsync-delay-usec' to 0. - # This is to ensure ping_pong's lock and I/O coherency tests works on CIFS. - cmd = ("gluster volume set %s storage.batch-fsync-delay-usec 0" % volname) - ret, _, _ = g.run(mnode, cmd) - if ret != 0: - g.log.error("Failed to set the volume option " - "'storage.batch-fsync-delay-usec' to 0 on %s" % volname) - return False - g.log.info("Successfully set 'storage.batch-fsync-delay-usec' to 0 on %s" % - volname) - - # Edit the /etc/glusterfs/glusterd.vol in each Red Hat Gluster Storage - # node, to add a line 'option rpc-auth-allow-insecure on' - glusterd_volfile = "/etc/glusterfs/glusterd.vol" - glusterd_volfile_edit_cmd = ( - ("grep -F 'option rpc-auth-allow-insecure on' %s > /dev/null || " - "(cp %s %s.orig && " - "sed -i '/^end-volume/d' %s && " - "echo ' option rpc-auth-allow-insecure on' >> %s && " - "echo 'end-volume' >> %s )") % - (glusterd_volfile, glusterd_volfile, glusterd_volfile, - glusterd_volfile, glusterd_volfile, glusterd_volfile)) - results = g.run_parallel(servers, glusterd_volfile_edit_cmd) - rc = True - for server, ret_values in results.iteritems(): - retcode, out, err = ret_values - if retcode != 0: - g.log.error("Unable to edit glusterd volfile on server %s", server) - rc = False - if not rc: - return False - g.log.info("Succefully edited all the servers glusterd volfile to add " - "the setting 'option rpc-auth-allow-insecure on'") - - # Restart glusterd service on each Red Hat Gluster Storage node. - from glustolibs.gluster.gluster_init import restart_glusterd - ret = restart_glusterd(servers) - if not ret: - g.log.error("Unable to restart glusterd on few servers") - return False - g.log.info("Successfully restarted glusterd on all servers") - time.sleep(30) - # Verify if the volume can be accessed from the SMB/CIFS share. - cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " % volname) - ret, _, _ = g.run(mnode, cmd) - if ret != 0: - g.log.error("volume '%s' not accessable via SMB/CIFS share" % volname) - return False - g.log.info("volume '%s' can be accessed from SMB/CIFS share" % volname) - - # To verify if the SMB/CIFS share can be accessed by the root/non-root user - # TBD - - # Enable mounting volumes using SMB - ret = enable_mounting_volume_using_smb(mnode, volname, smb_users_info) - if not ret: - g.log.error("Failed to enable mounting volumes using SMB") - return False - g.log.info("Successfully enabled mounting volumes using SMV for the " - "smbusers: %s" % smb_users_info.keys()) - return True - - -def enable_mounting_volume_using_smb(mnode, volname, smb_users_info): - """Enable mounting volume using SMB. Set ACL's for non-root users. - - Args: - mnode (str): Node on which commands are executed. - volname (str): Name of the volume on which acl's has to be set. - smb_users_info (dict): Dict containing users info. Example: - smb_users_info = { - 'root': {'password': 'foobar', - 'acl': '' - }, - 'user1': {'password': 'abc', - 'acl': '' - }, - 'user2': {'password': 'xyz', - 'acl': '' - } - } - Returns: - bool: True on successfully enabling to mount volume using SMB. - False otherwise. - """ - # Create a temp mount to provide required permissions to the smb user - from glustolibs.gluster.mount_ops import GlusterMount - mount = { - 'protocol': 'glusterfs', - 'server': mnode, - 'volname': volname, - 'client': { - 'host': mnode - }, - 'mountpoint': '/tmp/gluster_smb_set_user_permissions_%s' % volname, - 'options': 'acl' - } - mount_obj = GlusterMount(mount) - ret = mount_obj.mount() - if not ret: - g.log.error("Unable to create temporary mount for providing " - "required permissions to the smb users") - return False - g.log.info("Successfully created temporary mount for providing " - "required permissions to the smb users") - - # Provide required permissions to the smb user - for smb_user in smb_users_info.keys(): - if smb_user != 'root': - if 'acl' in smb_users_info[smb_user]: - acl = smb_users_info[smb_user]['acl'] - if not acl: - acl = "rwx" - else: - acl = "rwx" - - cmd = ("setfacl -m user:%s:%s %s" % (smb_user, acl, - mount_obj.mountpoint)) - ret, _, _ = g.run(mnode, cmd) - if ret != 0: - g.log.error("Unable to provide required permissions to the " - "smb user %s " % smb_user) - return False - g.log.info("Successfully provided required permissions to the " - "smb user %s " % smb_user) - - # Verify SMB/CIFS share can be accessed by the user - - # Unmount the temp mount created - ret = mount_obj.unmount() - if not ret: - g.log.error("Unable to unmount the temp mount") - g.log.info("Successfully unmounted the temp mount") - - return True diff --git a/glustolibs-gluster/glustolibs/gluster/volume_libs.py b/glustolibs-gluster/glustolibs/gluster/volume_libs.py index 94a817dae..6a3f80ac3 100644 --- a/glustolibs-gluster/glustolibs/gluster/volume_libs.py +++ b/glustolibs-gluster/glustolibs/gluster/volume_libs.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright (C) 2015-2016 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify @@ -26,14 +25,14 @@ from glustolibs.gluster.lib_utils import form_bricks_list from glustolibs.gluster.volume_ops import (volume_create, volume_start, set_volume_options, get_volume_info, volume_stop, volume_delete, - volume_info, volume_status) + volume_info, volume_status, + get_volume_options) from glustolibs.gluster.tiering_ops import (add_extra_servers_to_cluster, tier_attach, is_tier_process_running) from glustolibs.gluster.quota_ops import (enable_quota, set_quota_limit_usage, is_quota_enabled) from glustolibs.gluster.uss_ops import enable_uss, is_uss_enabled -from glustolibs.gluster.samba_ops import share_volume_over_smb from glustolibs.gluster.snap_ops import snap_delete_by_volumename from glustolibs.gluster.brick_libs import are_bricks_online, get_all_bricks from glustolibs.gluster.heal_libs import are_all_self_heal_daemons_are_online @@ -210,14 +209,14 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False): bricks_list=bricks_list, force=force, **kwargs) if ret != 0: - g.log.error("Unable to create volume %s" % volname) + g.log.error("Unable to create volume %s", volname) return False # Start Volume time.sleep(2) ret = volume_start(mnode, volname) if not ret: - g.log.error("volume start %s failed" % volname) + g.log.error("volume start %s failed", volname) return False # Create Tier volume @@ -273,13 +272,13 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False): time.sleep(30) # Check if tier is running - rc = True + _rc = True for server in extra_servers: ret = is_tier_process_running(server, volname) if not ret: g.log.error("Tier process not running on %s", server) - rc = False - if not rc: + _rc = False + if not _rc: return False # Enable Quota @@ -291,13 +290,13 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False): return False # Check if 'limit_usage' is defined - if ('limit_usage' in volume_config['quota']): - if ('path' in volume_config['quota']['limit_usage']): + if 'limit_usage' in volume_config['quota']: + if 'path' in volume_config['quota']['limit_usage']: path = volume_config['quota']['limit_usage']['path'] else: path = "/" - if ('size' in volume_config['quota']['limit_usage']): + if 'size' in volume_config['quota']['limit_usage']: size = volume_config['quota']['limit_usage']['size'] else: size = "100GB" @@ -331,34 +330,6 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False): g.log.error("USS is not enabled on the volume %s", volname) return False - # Enable Ganesha -# if ('nfs_ganesha' in volume_config and -# 'enable' in volume_config['nfs_ganesha'] and -# volume_config['nfs_ganesha']['enable']): -# from glustolibs.gluster.ganesha import vol_set_ganesha -# ret = vol_set_ganesha(mnode=mnode, volname=volname, option=True) -# if not ret: -# g.log.error("failed to set the ganesha option for %s" % volname) -# return False - - # Enable Samba - if ('smb' in volume_config and 'enable' in volume_config['smb'] and - volume_config['smb']['enable']): - smb_users_info = {} - if ('users_info' in volume_config['smb'] and - volume_config['smb']['users_info']): - smb_users_info = volume_config['smb']['users_info'] - else: - g.log.error("SMB Users info not available in the volume config." - "Unable to export volume %s as SMB Share" % volname) - return False - ret = share_volume_over_smb(mnode=mnode, volname=volname, - servers=servers, - smb_users_info=smb_users_info) - if not ret: - g.log.error("Failed to export volume %s as SMB Share" % volname) - return False - # Set all the volume options: if 'options' in volume_config: volume_options = volume_config['options'] @@ -388,23 +359,22 @@ def cleanup_volume(mnode, volname): """ volinfo = get_volume_info(mnode, volname) if volinfo is None or volname not in volinfo: - g.log.info("Volume %s does not exist in %s" % (volname, mnode)) + g.log.info("Volume %s does not exist in %s", volname, mnode) return True ret, _, _ = snap_delete_by_volumename(mnode, volname) if ret != 0: - g.log.error("Failed to delete the snapshots in " - "volume %s" % volname) + g.log.error("Failed to delete the snapshots in volume %s", volname) return False ret, _, _ = volume_stop(mnode, volname, force=True) if ret != 0: - g.log.error("Failed to stop volume %s" % volname) + g.log.error("Failed to stop volume %s", volname) return False ret = volume_delete(mnode, volname) if not ret: - g.log.error("Unable to cleanup the volume %s" % volname) + g.log.error("Unable to cleanup the volume %s", volname) return False return True @@ -431,7 +401,7 @@ def is_volume_exported(mnode, volname, share_type): else: return True - if 'cifs' in share_type: + if 'cifs' in share_type or 'smb' in share_type: cmd = "smbclient -L localhost" _, _, _ = g.run(mnode, cmd) @@ -525,18 +495,19 @@ def get_subvols(mnode, volname): ['hotBricks']['hotBrickType']) tmp = volinfo[volname]["bricks"]['hotBricks']["brick"] hot_tier_bricks = [x["name"] for x in tmp if "name" in x] - if (hot_tier_type == 'Distribute'): + if hot_tier_type == 'Distribute': for brick in hot_tier_bricks: subvols['hot_tier_subvols'].append([brick]) elif (hot_tier_type == 'Replicate' or hot_tier_type == 'Distributed-Replicate'): - rep_count = int((volinfo[volname]["bricks"]['hotBricks'] - ['numberOfBricks']).split("=", 1)[0]. - split("x")[1].strip()) - subvol_list = ([hot_tier_bricks[i:i + rep_count] - for i in range(0, len(hot_tier_bricks), - rep_count)]) + rep_count = int( + (volinfo[volname]["bricks"]['hotBricks'] + ['numberOfBricks']).split("=", 1)[0].split("x")[1].strip() + ) + subvol_list = ( + [hot_tier_bricks[i:i + rep_count] + for i in range(0, len(hot_tier_bricks), rep_count)]) subvols['hot_tier_subvols'] = subvol_list # Get cold tier subvols @@ -546,29 +517,31 @@ def get_subvols(mnode, volname): cold_tier_bricks = [x["name"] for x in tmp if "name" in x] # Distribute volume - if (cold_tier_type == 'Distribute'): + if cold_tier_type == 'Distribute': for brick in cold_tier_bricks: subvols['cold_tier_subvols'].append([brick]) # Replicate or Distribute-Replicate volume elif (cold_tier_type == 'Replicate' or cold_tier_type == 'Distributed-Replicate'): - rep_count = int((volinfo[volname]["bricks"]['coldBricks'] - ['numberOfBricks']).split("=", 1)[0]. - split("x")[1].strip()) - subvol_list = ([cold_tier_bricks[i:i + rep_count] - for i in range(0, len(cold_tier_bricks), - rep_count)]) + rep_count = int( + (volinfo[volname]["bricks"]['coldBricks'] + ['numberOfBricks']).split("=", 1)[0].split("x")[1].strip() + ) + subvol_list = ( + [cold_tier_bricks[i:i + rep_count] + for i in range(0, len(cold_tier_bricks), rep_count)]) subvols['cold_tier_subvols'] = subvol_list # Disperse or Distribute-Disperse volume elif (cold_tier_type == 'Disperse' or cold_tier_type == 'Distributed-Disperse'): - disp_count = sum([int(nums) for nums in - ((volinfo[volname]["bricks"]['coldBricks'] - ['numberOfBricks']).split("x", 1)[1]. - strip().split("=")[0].strip().strip("()"). - split()) if nums.isdigit()]) + disp_count = sum( + [int(nums) for nums in ( + (volinfo[volname]["bricks"]['coldBricks'] + ['numberOfBricks']).split("x", 1)[1]. + strip().split("=")[0].strip().strip("()"). + split()) if nums.isdigit()]) subvol_list = [cold_tier_bricks[i:i + disp_count] for i in range(0, len(cold_tier_bricks), disp_count)] @@ -587,11 +560,10 @@ def get_subvols(mnode, volname): for brick in bricks: subvols['volume_subvols'].append([brick]) - elif (voltype == 'Disperse' or voltype == 'Distributed-Disperse'): + elif voltype == 'Disperse' or voltype == 'Distributed-Disperse': disp_count = int(volinfo[volname]['disperseCount']) - subvol_list = [bricks[i:i + disp_count]for i in range(0, - len(bricks), - disp_count)] + subvol_list = ([bricks[i:i + disp_count] + for i in range(0, len(bricks), disp_count)]) subvols['volume_subvols'] = subvol_list return subvols @@ -609,7 +581,7 @@ def is_tiered_volume(mnode, volname): """ volinfo = get_volume_info(mnode, volname) if volinfo is None: - g.log.error("Unable to get the volume info for volume %s" % volname) + g.log.error("Unable to get the volume info for volume %s", volname) return None voltype = volinfo[volname]['typeStr'] @@ -632,7 +604,7 @@ def is_distribute_volume(mnode, volname): """ volume_type_info = get_volume_type_info(mnode, volname) if volume_type_info is None: - g.log.error("Unable to check if the volume %s is distribute" % volname) + g.log.error("Unable to check if the volume %s is distribute", volname) return False if volume_type_info['is_tier']: @@ -640,7 +612,7 @@ def is_distribute_volume(mnode, volname): ['hotBrickType']) cold_tier_type = (volume_type_info['cold_tier_type_info'] ['coldBrickType']) - if (hot_tier_type == 'Distribute' and cold_tier_type == 'Distribute'): + if hot_tier_type == 'Distribute' and cold_tier_type == 'Distribute': return True else: return False @@ -693,7 +665,7 @@ def get_volume_type_info(mnode, volname): """ volinfo = get_volume_info(mnode, volname) if volinfo is None: - g.log.error("Unable to get the volume info for volume %s" % volname) + g.log.error("Unable to get the volume info for volume %s", volname) return None volume_type_info = { @@ -726,7 +698,7 @@ def get_volume_type_info(mnode, volname): non_tiered_volume_type_info[key] = volinfo[volname][key] else: g.log.error("Unable to find key '%s' in the volume info for " - "the volume %s" % (key, volname)) + "the volume %s", key, volname) non_tiered_volume_type_info[key] = None volume_type_info['volume_type_info'] = non_tiered_volume_type_info @@ -754,11 +726,11 @@ def get_cold_tier_type_info(mnode, volname): """ volinfo = get_volume_info(mnode, volname) if volinfo is None: - g.log.error("Unable to get the volume info for volume %s" % volname) + g.log.error("Unable to get the volume info for volume %s", volname) return None if not is_tiered_volume(mnode, volname): - g.log.error("Volume %s is not a tiered volume" % volname) + g.log.error("Volume %s is not a tiered volume", volname) return None cold_tier_type_info = { @@ -773,7 +745,7 @@ def get_cold_tier_type_info(mnode, volname): ['coldBricks'][key]) else: g.log.error("Unable to find key '%s' in the volume info for the " - "volume %s" % (key, volname)) + "volume %s", key, volname) return None if 'Disperse' in cold_tier_type_info['coldBrickType']: @@ -804,11 +776,11 @@ def get_hot_tier_type_info(mnode, volname): """ volinfo = get_volume_info(mnode, volname) if volinfo is None: - g.log.error("Unable to get the volume info for volume %s" % volname) + g.log.error("Unable to get the volume info for volume %s", volname) return None if not is_tiered_volume(mnode, volname): - g.log.error("Volume %s is not a tiered volume" % volname) + g.log.error("Volume %s is not a tiered volume", volname) return None hot_tier_type_info = { @@ -821,7 +793,7 @@ def get_hot_tier_type_info(mnode, volname): [key]) else: g.log.error("Unable to find key '%s' in the volume info for the " - "volume %s" % (key, volname)) + "volume %s", key, volname) return None return hot_tier_type_info @@ -889,7 +861,7 @@ def get_cold_tier_num_of_bricks_per_subvol(mnode, volname): NoneType: None if volume doesnot exist or not a tiered volume. """ if not is_tiered_volume(mnode, volname): - g.log.error("Volume %s is not a tiered volume" % volname) + g.log.error("Volume %s is not a tiered volume", volname) return None subvols_dict = get_subvols(mnode, volname) if subvols_dict['cold_tier_subvols']: @@ -910,7 +882,7 @@ def get_hot_tier_num_of_bricks_per_subvol(mnode, volname): NoneType: None if volume doesnot exist or not a tiered volume. """ if not is_tiered_volume(mnode, volname): - g.log.error("Volume %s is not a tiered volume" % volname) + g.log.error("Volume %s is not a tiered volume", volname) return None subvols_dict = get_subvols(mnode, volname) if subvols_dict['hot_tier_subvols']: @@ -945,7 +917,7 @@ def get_replica_count(mnode, volname): """ vol_type_info = get_volume_type_info(mnode, volname) if vol_type_info is None: - g.log.error("Unable to get the replica count info for the volume %s" % + g.log.error("Unable to get the replica count info for the volume %s", volname) return None @@ -1036,7 +1008,7 @@ def get_disperse_count(mnode, volname): """ vol_type_info = get_volume_type_info(mnode, volname) if vol_type_info is None: - g.log.error("Unable to get the disperse count info for the volume %s" % + g.log.error("Unable to get the disperse count info for the volume %s", volname) return None @@ -1077,3 +1049,47 @@ def get_cold_tier_disperse_count(mnode, volname): cold_tier_disperse_count = (volinfo[volname]["bricks"]['coldBricks'] ['colddisperseCount']) return cold_tier_disperse_count + + +def enable_and_validate_volume_options(mnode, volname, volume_options_list, + time_delay=5): + """Enable the volume option and validate whether the option has be + successfully enabled or not + + Args: + mnode (str): Node on which commands are executed. + volname (str): Name of the volume. + volume_options_list (list): List of volume options to be enabled + time_delay (int): Time delay between 2 volume set operations + + Returns: + bool: True when enabling and validating all volume options is + successful. False otherwise + """ + if not isinstance(volume_options_list, list): + volume_options_list = [volume_options_list] + + for option in volume_options_list: + # Set volume option to 'enable' + g.log.info("Setting the volume option : %s", option) + ret = set_volume_options(mnode, volname, {option: "enable"}) + if not ret: + return False + + # Validate whether the option is set on the volume + g.log.info("Validating the volume option : %s to be set to 'enable'", + option) + option_dict = get_volume_options(mnode, volname, option) + g.log.info("Options Dict: %s", option_dict) + if option_dict is None: + g.log.error("%s is not enabled on the volume %s", option, volname) + return False + + if option not in option_dict or "enable" not in option_dict[option]: + g.log.error("%s is not enabled on the volume %s", option, volname) + return False + + g.log.info("%s is enabled on the volume %s", option, volname) + time.sleep(time_delay) + + return True -- cgit