summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--glustolibs-gluster/glustolibs/gluster/gluster_base_class.py400
-rw-r--r--glustolibs-gluster/glustolibs/gluster/nfs_libs.py63
-rw-r--r--glustolibs-gluster/glustolibs/gluster/samba_libs.py (renamed from glustolibs-gluster/glustolibs/gluster/samba_ops.py)308
-rw-r--r--glustolibs-gluster/glustolibs/gluster/volume_libs.py182
-rw-r--r--tests/functional/bvt/test_bvt_lite_and_plus.py129
-rw-r--r--tests/functional/bvt/test_vvt.py129
6 files changed, 602 insertions, 609 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
index 65e7cc759..4ad6cc732 100644
--- a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2016 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
@@ -21,17 +20,20 @@
"""
import unittest
-
-from glusto.core import Glusto as g
import os
import random
-from glustolibs.gluster.peer_ops import (is_peer_connected,
- peer_status)
-from glustolibs.gluster.volume_libs import setup_volume, cleanup_volume
-from glustolibs.gluster.volume_ops import volume_info, volume_status
-from glustolibs.gluster.exceptions import ExecutionError, ConfigError
import time
import copy
+from glusto.core import Glusto as g
+from glustolibs.gluster.exceptions import ExecutionError, ConfigError
+from glustolibs.gluster.peer_ops import is_peer_connected, peer_status
+from glustolibs.gluster.volume_ops import volume_info
+from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume,
+ log_volume_info_and_status)
+from glustolibs.gluster.samba_libs import share_volume_over_smb
+from glustolibs.gluster.nfs_libs import export_volume_through_nfs
+from glustolibs.gluster.mount_ops import create_mount_objs
+from glustolibs.io.utils import log_mounts_info
class runs_on(g.CarteTestClass):
@@ -63,38 +65,41 @@ class runs_on(g.CarteTestClass):
class GlusterBaseClass(unittest.TestCase):
+ """GlusterBaseClass to be subclassed by Gluster Tests.
+ This class reads the config for variable values that will be used in
+ gluster tests. If variable values are not specified in the config file,
+ the variable are defaulted to specific values.
+ """
# these will be populated by either the runs_on decorator or
# defaults in setUpClass()
volume_type = None
mount_type = None
- volname = None
- servers = None
- voltype = None
- mnode = None
- mounts = None
- clients = None
@classmethod
def setUpClass(cls):
"""Initialize all the variables necessary for testing Gluster
"""
+ g.log.info("Setting up class: %s", cls.__name__)
+
# Get all servers
cls.all_servers = None
- if ('servers' in g.config and g.config['servers']):
+ if 'servers' in g.config and g.config['servers']:
cls.all_servers = g.config['servers']
+ cls.servers = cls.all_servers
else:
raise ConfigError("'servers' not defined in the global config")
# Get all clients
cls.all_clients = None
- if ('clients' in g.config and g.config['clients']):
+ if 'clients' in g.config and g.config['clients']:
cls.all_clients = g.config['clients']
+ cls.clients = cls.all_clients
else:
raise ConfigError("'clients' not defined in the global config")
# Get all servers info
cls.all_servers_info = None
- if ('servers_info' in g.config and g.config['servers_info']):
+ if 'servers_info' in g.config and g.config['servers_info']:
cls.all_servers_info = g.config['servers_info']
else:
raise ConfigError("'servers_info' not defined in the global "
@@ -102,19 +107,31 @@ class GlusterBaseClass(unittest.TestCase):
# All clients_info
cls.all_clients_info = None
- if ('clients_info' in g.config and g.config['clients_info']):
+ if 'clients_info' in g.config and g.config['clients_info']:
cls.all_clients_info = g.config['clients_info']
else:
raise ConfigError("'clients_info' not defined in the global "
"config")
- if cls.volume_type is None:
- cls.volume_type = "distributed"
- if cls.mount_type is None:
- cls.mount_type = "glusterfs"
+ # Set mnode : Node on which gluster commands are executed
+ cls.mnode = cls.all_servers[0]
- g.log.info("SETUP GLUSTER VOLUME: %s on %s" % (cls.volume_type,
- cls.mount_type))
+ # SMB Cluster info
+ try:
+ cls.smb_users_info = (
+ g.config['gluster']['cluster_config']['smb']['users_info'])
+ except KeyError:
+ cls.smb_users_info = {}
+ cls.smb_users_info['root'] = {}
+ cls.smb_users_info['root']['password'] = 'foobar'
+ cls.smb_users_info['root']['acl'] = 'rwx'
+
+ # NFS-Ganesha Cluster Info
+ try:
+ cls.enable_nfs_ganesha = bool(g.config['gluster']['cluster_config']
+ ['nfs_ganesha']['enable'])
+ except KeyError:
+ cls.enable_nfs_ganesha = False
# Defining default volume_types configuration.
default_volume_type_config = {
@@ -151,150 +168,136 @@ class GlusterBaseClass(unittest.TestCase):
# Get the volume configuration.
cls.volume = {}
- found_volume = False
- if 'gluster' in g.config:
- if 'volumes' in g.config['gluster']:
- for volume in g.config['gluster']['volumes']:
- if volume['voltype']['type'] == cls.volume_type:
- cls.volume = copy.deepcopy(volume)
- found_volume = True
- break
-
- if found_volume:
- if 'name' not in cls.volume:
+ if cls.volume_type:
+ found_volume = False
+ if 'gluster' in g.config:
+ if 'volumes' in g.config['gluster']:
+ for volume in g.config['gluster']['volumes']:
+ if volume['voltype']['type'] == cls.volume_type:
+ cls.volume = copy.deepcopy(volume)
+ found_volume = True
+ break
+
+ if found_volume:
+ if 'name' not in cls.volume:
+ cls.volume['name'] = 'testvol_%s' % cls.volume_type
+
+ if 'servers' not in cls.volume:
+ cls.volume['servers'] = cls.all_servers
+
+ if not found_volume:
+ try:
+ if g.config['gluster']['volume_types'][cls.volume_type]:
+ cls.volume['voltype'] = (g.config['gluster']
+ ['volume_types']
+ [cls.volume_type])
+ except KeyError:
+ try:
+ cls.volume['voltype'] = (default_volume_type_config
+ [cls.volume_type])
+ except KeyError:
+ raise ConfigError("Unable to get configs of volume "
+ "type: %s", cls.volume_type)
cls.volume['name'] = 'testvol_%s' % cls.volume_type
-
- if 'servers' not in cls.volume:
cls.volume['servers'] = cls.all_servers
- if not found_volume:
- cls.volume = {
- 'name': ('testvol_%s' % cls.volume_type),
- 'servers': cls.all_servers
- }
- try:
- if g.config['gluster']['volume_types'][cls.volume_type]:
- cls.volume['voltype'] = (g.config['gluster']
- ['volume_types'][cls.volume_type])
- except KeyError:
- try:
- cls.volume['voltype'] = (default_volume_type_config
- [cls.volume_type])
- except KeyError:
- raise ConfigError("Unable to get configs of volume type: "
- "%s", cls.volume_type)
-
- # Set volume options
- if 'options' not in cls.volume:
- cls.volume['options'] = {}
-
- # Set nfs.disable to 'off' to start gluster-nfs server on start of the
- # volume if the mount type is 'nfs'
- if cls.mount_type == 'nfs':
- cls.volume['options']['nfs.disable'] = 'off'
-
- # SMB Info
- if cls.mount_type == 'cifs' or cls.mount_type == 'smb':
- if 'smb' not in cls.volume:
- cls.volume['smb'] = {}
- cls.volume['smb']['enable'] = True
- users_info_found = False
- try:
- if cls.volume['smb']['users_info']:
- users_info_found = True
- except KeyError:
- users_info_found = False
-
- if not users_info_found:
- cls.volume['smb']['users_info'] = {}
- try:
- cls.volume['smb']['users_info'] = (
- g.config['gluster']['cluster_config']['smb']
- ['users_info'])
- except KeyError:
- pass
-
- if not cls.volume['smb']['users_info']:
- cls.volume['smb']['users_info']['root'] = {}
- cls.volume['smb']['users_info']['root']['password'] = (
- 'foobar')
-
- # Define Volume variables.
- cls.volname = cls.volume['name']
- cls.servers = cls.volume['servers']
- cls.voltype = cls.volume['voltype']['type']
- cls.mnode = cls.servers[0]
- try:
- cls.smb_users_info = cls.volume['smb']['users_info']
- except KeyError:
- cls.smb_users_info = {}
+ # Set volume options
+ if 'options' not in cls.volume:
+ cls.volume['options'] = {}
+
+ # Define Volume Useful Variables.
+ cls.volname = cls.volume['name']
+ cls.voltype = cls.volume['voltype']['type']
+ cls.servers = cls.volume['servers']
+ cls.mnode = cls.servers[0]
+ cls.vol_options = cls.volume['options']
# Get the mount configuration.
- cls.mounts_dict_list = []
cls.mounts = []
- found_mount = False
- if 'gluster' in g.config:
- if 'mounts' in g.config['gluster']:
- for mount in g.config['gluster']['mounts']:
- if mount['protocol'] == cls.mount_type:
- temp_mount = {}
- temp_mount['protocol'] = cls.mount_type
- if ('volname' in mount and mount['volname']):
- if mount['volname'] == cls.volname:
- temp_mount = copy.deepcopy(mount)
+ if cls.mount_type:
+ cls.mounts_dict_list = []
+ found_mount = False
+ if 'gluster' in g.config:
+ if 'mounts' in g.config['gluster']:
+ for mount in g.config['gluster']['mounts']:
+ if mount['protocol'] == cls.mount_type:
+ temp_mount = {}
+ temp_mount['protocol'] = cls.mount_type
+ if 'volname' in mount and mount['volname']:
+ if mount['volname'] == cls.volname:
+ temp_mount = copy.deepcopy(mount)
+ else:
+ continue
else:
- continue
- else:
- temp_mount['volname'] = cls.volname
- if ('server' not in temp_mount or
- (not temp_mount['server'])):
- temp_mount['server'] = cls.mnode
- if ('mountpoint' not in temp_mount or
- (not temp_mount['mountpoint'])):
- temp_mount['mountpoint'] = (os.path.join(
- "/mnt", '_'.join([cls.volname,
- cls.mount_type])))
- if ('client' not in temp_mount or
- (not temp_mount['client'])):
- temp_mount['client'] = (
- cls.all_clients_info[
- random.choice(cls.all_clients_info.keys())]
- )
- cls.mounts_dict_list.append(temp_mount)
- found_mount = True
- if not found_mount:
- for client in cls.all_clients_info.keys():
- mount = {
- 'protocol': cls.mount_type,
- 'server': cls.mnode,
- 'volname': cls.volname,
- 'client': cls.all_clients_info[client],
- 'mountpoint': (os.path.join(
- "/mnt", '_'.join([cls.volname, cls.mount_type]))),
- 'options': ''
- }
- cls.mounts_dict_list.append(mount)
-
- if cls.mount_type == 'cifs' or cls.mount_type == 'smb':
+ temp_mount['volname'] = cls.volname
+ if ('server' not in temp_mount or
+ (not temp_mount['server'])):
+ temp_mount['server'] = cls.mnode
+ if ('mountpoint' not in temp_mount or
+ (not temp_mount['mountpoint'])):
+ temp_mount['mountpoint'] = (os.path.join(
+ "/mnt", '_'.join([cls.volname,
+ cls.mount_type])))
+ if ('client' not in temp_mount or
+ (not temp_mount['client'])):
+ temp_mount['client'] = (
+ cls.all_clients_info[
+ random.choice(
+ cls.all_clients_info.keys())]
+ )
+ cls.mounts_dict_list.append(temp_mount)
+ found_mount = True
+ if not found_mount:
+ for client in cls.all_clients_info.keys():
+ mount = {
+ 'protocol': cls.mount_type,
+ 'server': cls.mnode,
+ 'volname': cls.volname,
+ 'client': cls.all_clients_info[client],
+ 'mountpoint': (os.path.join(
+ "/mnt", '_'.join([cls.volname, cls.mount_type]))),
+ 'options': ''
+ }
+ cls.mounts_dict_list.append(mount)
+
+ if cls.mount_type == 'cifs' or cls.mount_type == 'smb':
+ for mount in cls.mounts_dict_list:
+ if 'smbuser' not in mount:
+ mount['smbuser'] = random.choice(
+ cls.smb_users_info.keys())
+ mount['smbpasswd'] = (
+ cls.smb_users_info[mount['smbuser']]['password'])
+
+ cls.mounts = create_mount_objs(cls.mounts_dict_list)
+
+ # Defining clients from mounts.
+ cls.clients = []
for mount in cls.mounts_dict_list:
- if 'smbuser' not in mount:
- mount['smbuser'] = random.choice(cls.smb_users_info.keys())
- mount['smbpasswd'] = (
- cls.smb_users_info[mount['smbuser']]['password'])
+ cls.clients.append(mount['client']['host'])
+ cls.clients = list(set(cls.clients))
+
+ # Log the baseclass variables for debugging purposes
+ g.log.debug("GlusterBaseClass Variables:\n %s", cls.__dict__)
+
+ def setUp(self):
+ g.log.info("Starting Test: %s", self.id())
- from glustolibs.gluster.mount_ops import create_mount_objs
- cls.mounts = create_mount_objs(cls.mounts_dict_list)
+ def tearDown(self):
+ g.log.info("Ending Test: %s", self.id())
- # Defining clients from mounts.
- cls.clients = []
- for mount_dict in cls.mounts_dict_list:
- cls.clients.append(mount_dict['client']['host'])
- cls.clients = list(set(cls.clients))
+ @classmethod
+ def tearDownClass(cls):
+ g.log.info("Teardown class: %s", cls.__name__)
class GlusterVolumeBaseClass(GlusterBaseClass):
+ """GlusterVolumeBaseClass sets up the volume for testing purposes.
+ """
@classmethod
def setUpClass(cls):
+ """Setup volume, shares/exports volume for cifs/nfs protocols,
+ mounts the volume.
+ """
GlusterBaseClass.setUpClass.im_func(cls)
# Validate if peer is connected from all the servers
@@ -303,72 +306,76 @@ class GlusterVolumeBaseClass(GlusterBaseClass):
if not ret:
raise ExecutionError("Validating Peers to be in Cluster "
"Failed")
+ g.log.info("All peers are in connected state")
- # Print Peer Status from mnode
- _, _, _ = peer_status(cls.mnode)
+ # Peer Status from mnode
+ peer_status(cls.mnode)
# Setup Volume
ret = setup_volume(mnode=cls.mnode,
all_servers_info=cls.all_servers_info,
volume_config=cls.volume, force=True)
if not ret:
- raise ExecutionError("Setup volume %s failed" % cls.volname)
+ raise ExecutionError("Setup volume %s failed", cls.volname)
time.sleep(10)
- # Print Volume Info and Status
- _, _, _ = volume_info(cls.mnode, cls.volname)
-
- _, _, _ = volume_status(cls.mnode, cls.volname)
-
- # Validate if volume is exported or not
- if 'nfs' in cls.mount_type:
- cmd = "showmount -e localhost"
- _, _, _ = g.run(cls.mnode, cmd)
-
- cmd = "showmount -e localhost | grep %s" % cls.volname
- ret, _, _ = g.run(cls.mnode, cmd)
- if not ret:
- raise ExecutionError("Volume %s not exported" % cls.volname)
+ # Export/Share the volume based on mount_type
+ if cls.mount_type != "glusterfs":
+ if "nfs" in cls.mount_type:
+ ret = export_volume_through_nfs(
+ mnode=cls.mnode, volname=cls.volname,
+ enable_ganesha=cls.enable_nfs_ganesha)
+ if not ret:
+ raise ExecutionError("Failed to export volume %s "
+ "as NFS export", cls.volname)
- if 'cifs' in cls.mount_type:
- cmd = "smbclient -L localhost"
- _, _, _ = g.run(cls.mnode, cmd)
+ if "smb" in cls.mount_type or "cifs" in cls.mount_type:
+ ret = share_volume_over_smb(mnode=cls.mnode,
+ volname=cls.volname,
+ smb_users_info=cls.smb_users_info)
+ if not ret:
+ raise ExecutionError("Failed to export volume %s "
+ "as SMB Share", cls.volname)
- cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " %
- cls.volname)
- ret, _, _ = g.run(cls.mnode, cmd)
- if not ret:
- raise ExecutionError("Volume %s not accessable via SMB/CIFS "
- "share" % cls.volname)
+ # Log Volume Info and Status
+ ret = log_volume_info_and_status(cls.mnode, cls.volname)
+ if not ret:
+ raise ExecutionError("Logging volume %s info and status failed",
+ cls.volname)
# Create Mounts
- rc = True
+ _rc = True
for mount_obj in cls.mounts:
ret = mount_obj.mount()
if not ret:
- g.log.error("Unable to mount volume '%s:%s' on '%s:%s'" %
- (mount_obj.server_system, mount_obj.volname,
- mount_obj.client_system, mount_obj.mountpoint))
- rc = False
- if not rc:
- raise ExecutionError("Mounting volume %s on few clients failed" %
+ g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
+ mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint)
+ _rc = False
+ if not _rc:
+ raise ExecutionError("Mounting volume %s on few clients failed",
cls.volname)
+ # Get info of mount before the IO
+ log_mounts_info(cls.mounts)
+
@classmethod
def tearDownClass(cls, umount_vol=True, cleanup_vol=True):
- """unittest tearDownClass override"""
+ """Teardown the mounts and volume.
+ """
+ GlusterBaseClass.tearDownClass.im_func(cls)
+
# Unmount volume
if umount_vol:
- rc = True
+ _rc = True
for mount_obj in cls.mounts:
ret = mount_obj.unmount()
if not ret:
- g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'" %
- (mount_obj.server_system, mount_obj.volname,
- mount_obj.client_system, mount_obj.mountpoint)
- )
- rc = False
- if not rc:
+ g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'",
+ mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint)
+ _rc = False
+ if not _rc:
raise ExecutionError("Unmount of all mounts are not "
"successful")
@@ -376,4 +383,7 @@ class GlusterVolumeBaseClass(GlusterBaseClass):
if cleanup_vol:
ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
if not ret:
- raise ExecutionError("cleanup volume %s failed" % cls.volname)
+ raise ExecutionError("cleanup volume %s failed", cls.volname)
+
+ # All Volume Info
+ volume_info(cls.mnode)
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_libs.py b/glustolibs-gluster/glustolibs/gluster/nfs_libs.py
new file mode 100644
index 000000000..003ebc2d0
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_libs.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: Libraries for gluster nfs operations.
+"""
+import time
+from glusto.core import Glusto as g
+from glustolibs.gluster.volume_libs import is_volume_exported
+
+
+def export_volume_through_nfs(mnode, volname, enable_ganesha=False,
+ time_delay=30):
+ """Export the volume through nfs
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+ volname (str): volume name
+ enable_ganesha (bool): Enable ganesha for the volume.
+ time_delay (int): Time to wait after the volume set operations
+ to validate whether the volume is exported or not.
+
+ Returns:
+ bool: If volume is successfully exported through nfs returns True.
+ False Otherwise.
+ """
+ # Enable nfs on the volume
+ cmd = ("gluster volume set %s nfs.disable off --mode=script" % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to enable nfs for the volume %s", volname)
+ return False
+
+ # Enable ganesha on the volume if enable_ganesha is True
+ if enable_ganesha:
+ cmd = ("gluster volume set %s ganesha.enable on --mode=script" %
+ volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to enable nfs ganesha for volume %s", volname)
+ return False
+
+ time.sleep(time_delay)
+ # Verify if volume is exported
+ ret = is_volume_exported(mnode, volname, "nfs")
+ if not ret:
+ g.log.info("Volume %s is not exported as 'nfs' export", volname)
+ return False
+
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/samba_ops.py b/glustolibs-gluster/glustolibs/gluster/samba_libs.py
index ec158b04b..a12e48855 100644
--- a/glustolibs-gluster/glustolibs/gluster/samba_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/samba_libs.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
@@ -19,9 +18,9 @@
Description: Library for samba operations.
"""
-
from glusto.core import Glusto as g
-import time
+from glustolibs.gluster.volume_libs import is_volume_exported
+from glustolibs.gluster.mount_ops import GlusterMount
def start_smb_service(mnode):
@@ -33,6 +32,8 @@ def start_smb_service(mnode):
Returns:
bool: True on successfully starting smb service. False otherwise.
"""
+ g.log.info("Starting SMB Service on %s", mnode)
+
# Enable Samba to start on boot
ret, _, _ = g.run(mnode, "chkconfig smb on")
if ret != 0:
@@ -67,11 +68,21 @@ def smb_service_status(mnode):
The third element 'err' is of type 'str' and is the stderr value
of the command execution.
"""
+ g.log.info("Getting SMB Service status on %s", mnode)
return g.run(mnode, "service smb status")
def is_smb_service_running(mnode):
- ret, out, err = smb_service_status(mnode)
+ """Check if smb service is running on node
+
+ Args:
+ mnode (str): Node on which smb service status has to be verified.
+
+ Returns:
+ bool: True if smb service is running. False otherwise.
+ """
+ g.log.info("Check if SMB service is running on %s", mnode)
+ ret, out, _ = smb_service_status(mnode)
if ret != 0:
return False
if "Active: active (running)" in out:
@@ -89,6 +100,8 @@ def stop_smb_service(mnode):
Returns:
bool: True on successfully stopping smb service. False otherwise.
"""
+ g.log.info("Stopping SMB Service on %s", mnode)
+
# Disable Samba to start on boot
ret, _, _ = g.run(mnode, "chkconfig smb off")
if ret != 0:
@@ -106,94 +119,6 @@ def stop_smb_service(mnode):
return True
-def create_smb_users(servers, smb_users_info, start_uid=50000):
- """Creates SMB users on specified servers and sets password for SMB users.
-
- Args:
- servers (list): List of server hosts on which smb users has to be
- created
- smb_users_info (dict): Dict containing users info. Example:
- smb_users_info = {
- 'root': {'password': 'foobar',
- 'acl': ''
- },
- 'user1': {'password': 'abc',
- 'acl': ''
- },
- 'user2': {'password': 'xyz',
- 'acl': ''
- }
- }
- start_uid (int): starting uid number for the users
-
- Returns:
- bool: True on successfully creating smb users. False otherwise.
- """
- uid = start_uid
- for smb_user in smb_users_info.keys():
- if smb_user == 'root':
- continue
- for server in servers:
- # Check if user already exist with same uid
- cmd = ("getent passwd %d" % uid)
- ret, out, err = g.run(server, cmd)
- if ret == 0:
- if smb_user in out.split(":")[0]:
- continue
- else:
- cmd = ("userdel -f %s" % smb_user)
- ret, _, _ = g.run(server, cmd)
- if ret != 0:
- g.log.error("Unable to delete the smb user '%s' on "
- "server %s" % (smb_user, server))
- return False
-
- else:
- cmd = ("useradd -u %d %s" % (uid, smb_user))
- ret, out, err = g.run(server, cmd)
- if ret != 0:
- g.log.error("Unable to add the smb user '%s' on "
- "server %s" % (smb_user, server))
- return False
- uid = uid + 1
-
- mnode = servers[0]
- for smb_user in smb_users_info.keys():
- if 'password' in smb_users_info[smb_user]:
- smbpasswd = smb_users_info[smb_user]['password']
- else:
- g.log.error("Password not found for the user %s" % smb_user)
- return False
-
- # Set smb password for smb_user
- cmd = ("(echo \"%s\"; echo \"%s\") | smbpasswd -a %s" %
- (smbpasswd, smbpasswd, smb_user))
-
- ret, _, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Unable to set the smb password for smb user %s" %
- smb_user)
- return False
- g.log.info("Successfully set password for smb user %s on node %s" %
- (smb_user, mnode))
-
- return True
-
-
-def delete_smb_users(servers, smb_users_info):
- rc = True
- for smb_user in smb_users_info.keys():
- if smb_user == 'root':
- continue
- cmd = ("userdel -r -f %s" % smb_user)
- for server in servers:
- ret, out, err = g.run(server, cmd)
- if ret != 0:
- if not ("userdel: user '%s' does not exist" % smb_user) in out:
- rc = False
- return rc
-
-
def list_smb_shares(mnode):
"""List all the gluster volumes that are exported as SMB Shares
@@ -204,9 +129,10 @@ def list_smb_shares(mnode):
list: List of all volume names that are exported as SMB Shares.
Empty list if no volumes are exported as SMB Share.
"""
+ g.log.info("List all SMB Shares")
smb_shares_list = []
cmd = "smbclient -L localhost"
- ret, out, err = g.run(mnode, cmd)
+ ret, out, _ = g.run(mnode, cmd)
if ret != 0:
g.log.error("Failed to find the SMB Shares")
return smb_shares_list
@@ -219,112 +145,8 @@ def list_smb_shares(mnode):
return smb_shares_list
-def share_volume_over_smb(mnode, volname, servers, smb_users_info):
- """Sharing volumes over SMB
-
- Args:
- mnode (str): Node on which commands has to be executed.
- volname (str): Name of the volume to be shared.
- servers (list): List of all servers in the storage pool.
- smb_users_info (dict): Dict containing users info. Example:
- smb_users_info = {
- 'root': {'password': 'foobar',
- 'acl': ''
- },
- 'user1': {'password': 'abc',
- 'acl': ''
- },
- 'user2': {'password': 'xyz',
- 'acl': ''
- }
- }
-
- Returns:
- bool : True on successfully sharing the volume over SMB.
- False otherwise
- """
- # Set volume option 'stat-prefetch' to 'off'.
- cmd = "gluster volume set %s stat-prefetch off" % volname
- ret, _, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to set the volume option stat-prefetch off")
- return False
- g.log.info("Successfully set 'stat-prefetch' to 'off' on %s" % volname)
-
- # Set volume option 'server.allow-insecure' to 'on'.
- cmd = "gluster volume set %s server.allow-insecure on" % volname
- ret, _, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to set the volume option server-allow-insecure")
- return False
- g.log.info("Successfully set 'server-allow-insecure' to 'on' on %s" %
- volname)
-
- # Set 'storage.batch-fsync-delay-usec' to 0.
- # This is to ensure ping_pong's lock and I/O coherency tests works on CIFS.
- cmd = ("gluster volume set %s storage.batch-fsync-delay-usec 0" % volname)
- ret, _, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to set the volume option "
- "'storage.batch-fsync-delay-usec' to 0 on %s" % volname)
- return False
- g.log.info("Successfully set 'storage.batch-fsync-delay-usec' to 0 on %s" %
- volname)
-
- # Edit the /etc/glusterfs/glusterd.vol in each Red Hat Gluster Storage
- # node, to add a line 'option rpc-auth-allow-insecure on'
- glusterd_volfile = "/etc/glusterfs/glusterd.vol"
- glusterd_volfile_edit_cmd = (
- ("grep -F 'option rpc-auth-allow-insecure on' %s > /dev/null || "
- "(cp %s %s.orig && "
- "sed -i '/^end-volume/d' %s && "
- "echo ' option rpc-auth-allow-insecure on' >> %s && "
- "echo 'end-volume' >> %s )") %
- (glusterd_volfile, glusterd_volfile, glusterd_volfile,
- glusterd_volfile, glusterd_volfile, glusterd_volfile))
- results = g.run_parallel(servers, glusterd_volfile_edit_cmd)
- rc = True
- for server, ret_values in results.iteritems():
- retcode, out, err = ret_values
- if retcode != 0:
- g.log.error("Unable to edit glusterd volfile on server %s", server)
- rc = False
- if not rc:
- return False
- g.log.info("Succefully edited all the servers glusterd volfile to add "
- "the setting 'option rpc-auth-allow-insecure on'")
-
- # Restart glusterd service on each Red Hat Gluster Storage node.
- from glustolibs.gluster.gluster_init import restart_glusterd
- ret = restart_glusterd(servers)
- if not ret:
- g.log.error("Unable to restart glusterd on few servers")
- return False
- g.log.info("Successfully restarted glusterd on all servers")
- time.sleep(30)
- # Verify if the volume can be accessed from the SMB/CIFS share.
- cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " % volname)
- ret, _, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("volume '%s' not accessable via SMB/CIFS share" % volname)
- return False
- g.log.info("volume '%s' can be accessed from SMB/CIFS share" % volname)
-
- # To verify if the SMB/CIFS share can be accessed by the root/non-root user
- # TBD
-
- # Enable mounting volumes using SMB
- ret = enable_mounting_volume_using_smb(mnode, volname, smb_users_info)
- if not ret:
- g.log.error("Failed to enable mounting volumes using SMB")
- return False
- g.log.info("Successfully enabled mounting volumes using SMV for the "
- "smbusers: %s" % smb_users_info.keys())
- return True
-
-
-def enable_mounting_volume_using_smb(mnode, volname, smb_users_info):
- """Enable mounting volume using SMB. Set ACL's for non-root users.
+def enable_mounting_volume_over_smb(mnode, volname, smb_users_info):
+ """Enable mounting volume over SMB. Set ACL's for non-root users.
Args:
mnode (str): Node on which commands are executed.
@@ -345,8 +167,8 @@ def enable_mounting_volume_using_smb(mnode, volname, smb_users_info):
bool: True on successfully enabling to mount volume using SMB.
False otherwise.
"""
+ g.log.info("Enable mounting volume over SMB")
# Create a temp mount to provide required permissions to the smb user
- from glustolibs.gluster.mount_ops import GlusterMount
mount = {
'protocol': 'glusterfs',
'server': mnode,
@@ -381,10 +203,10 @@ def enable_mounting_volume_using_smb(mnode, volname, smb_users_info):
ret, _, _ = g.run(mnode, cmd)
if ret != 0:
g.log.error("Unable to provide required permissions to the "
- "smb user %s " % smb_user)
+ "smb user %s ", smb_user)
return False
g.log.info("Successfully provided required permissions to the "
- "smb user %s " % smb_user)
+ "smb user %s ", smb_user)
# Verify SMB/CIFS share can be accessed by the user
@@ -395,3 +217,85 @@ def enable_mounting_volume_using_smb(mnode, volname, smb_users_info):
g.log.info("Successfully unmounted the temp mount")
return True
+
+
+def share_volume_over_smb(mnode, volname, smb_users_info):
+ """Sharing volumes over SMB
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+ volname (str): Name of the volume to be shared.
+ smb_users_info (dict): Dict containing users info. Example:
+ smb_users_info = {
+ 'root': {'password': 'foobar',
+ 'acl': ''
+ },
+ 'user1': {'password': 'abc',
+ 'acl': ''
+ },
+ 'user2': {'password': 'xyz',
+ 'acl': ''
+ }
+ }
+
+ Returns:
+ bool : True on successfully sharing the volume over SMB.
+ False otherwise
+ """
+ g.log.info("Start sharing the volume over SMB")
+
+ # Set volume option 'stat-prefetch' to 'off'.
+ cmd = "gluster volume set %s stat-prefetch off" % volname
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to set the volume option stat-prefetch off")
+ return False
+ g.log.info("Successfully set 'stat-prefetch' to 'off' on %s", volname)
+
+ # Set volume option 'server.allow-insecure' to 'on'.
+ cmd = "gluster volume set %s server.allow-insecure on" % volname
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to set the volume option server-allow-insecure")
+ return False
+ g.log.info("Successfully set 'server-allow-insecure' to 'on' on %s",
+ volname)
+
+ # Set 'storage.batch-fsync-delay-usec' to 0.
+ # This is to ensure ping_pong's lock and I/O coherency tests works on CIFS.
+ cmd = ("gluster volume set %s storage.batch-fsync-delay-usec 0" % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to set the volume option "
+ "'storage.batch-fsync-delay-usec' to 0 on %s", volname)
+ return False
+ g.log.info("Successfully set 'storage.batch-fsync-delay-usec' to 0 on %s",
+ volname)
+
+ # Verify if the volume can be accessed from the SMB/CIFS share.
+ cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("volume '%s' not accessable via SMB/CIFS share", volname)
+ return False
+ g.log.info("volume '%s' can be accessed from SMB/CIFS share", volname)
+
+ # To verify if the SMB/CIFS share can be accessed by the root/non-root user
+ # TBD
+
+ # Enable mounting volumes over SMB
+ ret = enable_mounting_volume_over_smb(mnode, volname, smb_users_info)
+ if not ret:
+ g.log.error("Failed to enable mounting volumes using SMB")
+ return False
+ g.log.info("Successfully enabled mounting volumes using SMV for the "
+ "smbusers: %s", str(smb_users_info.keys()))
+
+ # Verify if volume is shared
+ ret = is_volume_exported(mnode, volname, "smb")
+ if not ret:
+ g.log.info("Volume %s is not exported as 'cifs/smb' share", volname)
+ return False
+ g.log.info("Volume %s is exported as 'cifs/smb' share", volname)
+
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/volume_libs.py b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
index 94a817dae..6a3f80ac3 100644
--- a/glustolibs-gluster/glustolibs/gluster/volume_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
@@ -26,14 +25,14 @@ from glustolibs.gluster.lib_utils import form_bricks_list
from glustolibs.gluster.volume_ops import (volume_create, volume_start,
set_volume_options, get_volume_info,
volume_stop, volume_delete,
- volume_info, volume_status)
+ volume_info, volume_status,
+ get_volume_options)
from glustolibs.gluster.tiering_ops import (add_extra_servers_to_cluster,
tier_attach,
is_tier_process_running)
from glustolibs.gluster.quota_ops import (enable_quota, set_quota_limit_usage,
is_quota_enabled)
from glustolibs.gluster.uss_ops import enable_uss, is_uss_enabled
-from glustolibs.gluster.samba_ops import share_volume_over_smb
from glustolibs.gluster.snap_ops import snap_delete_by_volumename
from glustolibs.gluster.brick_libs import are_bricks_online, get_all_bricks
from glustolibs.gluster.heal_libs import are_all_self_heal_daemons_are_online
@@ -210,14 +209,14 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
bricks_list=bricks_list, force=force,
**kwargs)
if ret != 0:
- g.log.error("Unable to create volume %s" % volname)
+ g.log.error("Unable to create volume %s", volname)
return False
# Start Volume
time.sleep(2)
ret = volume_start(mnode, volname)
if not ret:
- g.log.error("volume start %s failed" % volname)
+ g.log.error("volume start %s failed", volname)
return False
# Create Tier volume
@@ -273,13 +272,13 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
time.sleep(30)
# Check if tier is running
- rc = True
+ _rc = True
for server in extra_servers:
ret = is_tier_process_running(server, volname)
if not ret:
g.log.error("Tier process not running on %s", server)
- rc = False
- if not rc:
+ _rc = False
+ if not _rc:
return False
# Enable Quota
@@ -291,13 +290,13 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
return False
# Check if 'limit_usage' is defined
- if ('limit_usage' in volume_config['quota']):
- if ('path' in volume_config['quota']['limit_usage']):
+ if 'limit_usage' in volume_config['quota']:
+ if 'path' in volume_config['quota']['limit_usage']:
path = volume_config['quota']['limit_usage']['path']
else:
path = "/"
- if ('size' in volume_config['quota']['limit_usage']):
+ if 'size' in volume_config['quota']['limit_usage']:
size = volume_config['quota']['limit_usage']['size']
else:
size = "100GB"
@@ -331,34 +330,6 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
g.log.error("USS is not enabled on the volume %s", volname)
return False
- # Enable Ganesha
-# if ('nfs_ganesha' in volume_config and
-# 'enable' in volume_config['nfs_ganesha'] and
-# volume_config['nfs_ganesha']['enable']):
-# from glustolibs.gluster.ganesha import vol_set_ganesha
-# ret = vol_set_ganesha(mnode=mnode, volname=volname, option=True)
-# if not ret:
-# g.log.error("failed to set the ganesha option for %s" % volname)
-# return False
-
- # Enable Samba
- if ('smb' in volume_config and 'enable' in volume_config['smb'] and
- volume_config['smb']['enable']):
- smb_users_info = {}
- if ('users_info' in volume_config['smb'] and
- volume_config['smb']['users_info']):
- smb_users_info = volume_config['smb']['users_info']
- else:
- g.log.error("SMB Users info not available in the volume config."
- "Unable to export volume %s as SMB Share" % volname)
- return False
- ret = share_volume_over_smb(mnode=mnode, volname=volname,
- servers=servers,
- smb_users_info=smb_users_info)
- if not ret:
- g.log.error("Failed to export volume %s as SMB Share" % volname)
- return False
-
# Set all the volume options:
if 'options' in volume_config:
volume_options = volume_config['options']
@@ -388,23 +359,22 @@ def cleanup_volume(mnode, volname):
"""
volinfo = get_volume_info(mnode, volname)
if volinfo is None or volname not in volinfo:
- g.log.info("Volume %s does not exist in %s" % (volname, mnode))
+ g.log.info("Volume %s does not exist in %s", volname, mnode)
return True
ret, _, _ = snap_delete_by_volumename(mnode, volname)
if ret != 0:
- g.log.error("Failed to delete the snapshots in "
- "volume %s" % volname)
+ g.log.error("Failed to delete the snapshots in volume %s", volname)
return False
ret, _, _ = volume_stop(mnode, volname, force=True)
if ret != 0:
- g.log.error("Failed to stop volume %s" % volname)
+ g.log.error("Failed to stop volume %s", volname)
return False
ret = volume_delete(mnode, volname)
if not ret:
- g.log.error("Unable to cleanup the volume %s" % volname)
+ g.log.error("Unable to cleanup the volume %s", volname)
return False
return True
@@ -431,7 +401,7 @@ def is_volume_exported(mnode, volname, share_type):
else:
return True
- if 'cifs' in share_type:
+ if 'cifs' in share_type or 'smb' in share_type:
cmd = "smbclient -L localhost"
_, _, _ = g.run(mnode, cmd)
@@ -525,18 +495,19 @@ def get_subvols(mnode, volname):
['hotBricks']['hotBrickType'])
tmp = volinfo[volname]["bricks"]['hotBricks']["brick"]
hot_tier_bricks = [x["name"] for x in tmp if "name" in x]
- if (hot_tier_type == 'Distribute'):
+ if hot_tier_type == 'Distribute':
for brick in hot_tier_bricks:
subvols['hot_tier_subvols'].append([brick])
elif (hot_tier_type == 'Replicate' or
hot_tier_type == 'Distributed-Replicate'):
- rep_count = int((volinfo[volname]["bricks"]['hotBricks']
- ['numberOfBricks']).split("=", 1)[0].
- split("x")[1].strip())
- subvol_list = ([hot_tier_bricks[i:i + rep_count]
- for i in range(0, len(hot_tier_bricks),
- rep_count)])
+ rep_count = int(
+ (volinfo[volname]["bricks"]['hotBricks']
+ ['numberOfBricks']).split("=", 1)[0].split("x")[1].strip()
+ )
+ subvol_list = (
+ [hot_tier_bricks[i:i + rep_count]
+ for i in range(0, len(hot_tier_bricks), rep_count)])
subvols['hot_tier_subvols'] = subvol_list
# Get cold tier subvols
@@ -546,29 +517,31 @@ def get_subvols(mnode, volname):
cold_tier_bricks = [x["name"] for x in tmp if "name" in x]
# Distribute volume
- if (cold_tier_type == 'Distribute'):
+ if cold_tier_type == 'Distribute':
for brick in cold_tier_bricks:
subvols['cold_tier_subvols'].append([brick])
# Replicate or Distribute-Replicate volume
elif (cold_tier_type == 'Replicate' or
cold_tier_type == 'Distributed-Replicate'):
- rep_count = int((volinfo[volname]["bricks"]['coldBricks']
- ['numberOfBricks']).split("=", 1)[0].
- split("x")[1].strip())
- subvol_list = ([cold_tier_bricks[i:i + rep_count]
- for i in range(0, len(cold_tier_bricks),
- rep_count)])
+ rep_count = int(
+ (volinfo[volname]["bricks"]['coldBricks']
+ ['numberOfBricks']).split("=", 1)[0].split("x")[1].strip()
+ )
+ subvol_list = (
+ [cold_tier_bricks[i:i + rep_count]
+ for i in range(0, len(cold_tier_bricks), rep_count)])
subvols['cold_tier_subvols'] = subvol_list
# Disperse or Distribute-Disperse volume
elif (cold_tier_type == 'Disperse' or
cold_tier_type == 'Distributed-Disperse'):
- disp_count = sum([int(nums) for nums in
- ((volinfo[volname]["bricks"]['coldBricks']
- ['numberOfBricks']).split("x", 1)[1].
- strip().split("=")[0].strip().strip("()").
- split()) if nums.isdigit()])
+ disp_count = sum(
+ [int(nums) for nums in (
+ (volinfo[volname]["bricks"]['coldBricks']
+ ['numberOfBricks']).split("x", 1)[1].
+ strip().split("=")[0].strip().strip("()").
+ split()) if nums.isdigit()])
subvol_list = [cold_tier_bricks[i:i + disp_count]
for i in range(0, len(cold_tier_bricks),
disp_count)]
@@ -587,11 +560,10 @@ def get_subvols(mnode, volname):
for brick in bricks:
subvols['volume_subvols'].append([brick])
- elif (voltype == 'Disperse' or voltype == 'Distributed-Disperse'):
+ elif voltype == 'Disperse' or voltype == 'Distributed-Disperse':
disp_count = int(volinfo[volname]['disperseCount'])
- subvol_list = [bricks[i:i + disp_count]for i in range(0,
- len(bricks),
- disp_count)]
+ subvol_list = ([bricks[i:i + disp_count]
+ for i in range(0, len(bricks), disp_count)])
subvols['volume_subvols'] = subvol_list
return subvols
@@ -609,7 +581,7 @@ def is_tiered_volume(mnode, volname):
"""
volinfo = get_volume_info(mnode, volname)
if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s" % volname)
+ g.log.error("Unable to get the volume info for volume %s", volname)
return None
voltype = volinfo[volname]['typeStr']
@@ -632,7 +604,7 @@ def is_distribute_volume(mnode, volname):
"""
volume_type_info = get_volume_type_info(mnode, volname)
if volume_type_info is None:
- g.log.error("Unable to check if the volume %s is distribute" % volname)
+ g.log.error("Unable to check if the volume %s is distribute", volname)
return False
if volume_type_info['is_tier']:
@@ -640,7 +612,7 @@ def is_distribute_volume(mnode, volname):
['hotBrickType'])
cold_tier_type = (volume_type_info['cold_tier_type_info']
['coldBrickType'])
- if (hot_tier_type == 'Distribute' and cold_tier_type == 'Distribute'):
+ if hot_tier_type == 'Distribute' and cold_tier_type == 'Distribute':
return True
else:
return False
@@ -693,7 +665,7 @@ def get_volume_type_info(mnode, volname):
"""
volinfo = get_volume_info(mnode, volname)
if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s" % volname)
+ g.log.error("Unable to get the volume info for volume %s", volname)
return None
volume_type_info = {
@@ -726,7 +698,7 @@ def get_volume_type_info(mnode, volname):
non_tiered_volume_type_info[key] = volinfo[volname][key]
else:
g.log.error("Unable to find key '%s' in the volume info for "
- "the volume %s" % (key, volname))
+ "the volume %s", key, volname)
non_tiered_volume_type_info[key] = None
volume_type_info['volume_type_info'] = non_tiered_volume_type_info
@@ -754,11 +726,11 @@ def get_cold_tier_type_info(mnode, volname):
"""
volinfo = get_volume_info(mnode, volname)
if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s" % volname)
+ g.log.error("Unable to get the volume info for volume %s", volname)
return None
if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume" % volname)
+ g.log.error("Volume %s is not a tiered volume", volname)
return None
cold_tier_type_info = {
@@ -773,7 +745,7 @@ def get_cold_tier_type_info(mnode, volname):
['coldBricks'][key])
else:
g.log.error("Unable to find key '%s' in the volume info for the "
- "volume %s" % (key, volname))
+ "volume %s", key, volname)
return None
if 'Disperse' in cold_tier_type_info['coldBrickType']:
@@ -804,11 +776,11 @@ def get_hot_tier_type_info(mnode, volname):
"""
volinfo = get_volume_info(mnode, volname)
if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s" % volname)
+ g.log.error("Unable to get the volume info for volume %s", volname)
return None
if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume" % volname)
+ g.log.error("Volume %s is not a tiered volume", volname)
return None
hot_tier_type_info = {
@@ -821,7 +793,7 @@ def get_hot_tier_type_info(mnode, volname):
[key])
else:
g.log.error("Unable to find key '%s' in the volume info for the "
- "volume %s" % (key, volname))
+ "volume %s", key, volname)
return None
return hot_tier_type_info
@@ -889,7 +861,7 @@ def get_cold_tier_num_of_bricks_per_subvol(mnode, volname):
NoneType: None if volume doesnot exist or not a tiered volume.
"""
if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume" % volname)
+ g.log.error("Volume %s is not a tiered volume", volname)
return None
subvols_dict = get_subvols(mnode, volname)
if subvols_dict['cold_tier_subvols']:
@@ -910,7 +882,7 @@ def get_hot_tier_num_of_bricks_per_subvol(mnode, volname):
NoneType: None if volume doesnot exist or not a tiered volume.
"""
if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume" % volname)
+ g.log.error("Volume %s is not a tiered volume", volname)
return None
subvols_dict = get_subvols(mnode, volname)
if subvols_dict['hot_tier_subvols']:
@@ -945,7 +917,7 @@ def get_replica_count(mnode, volname):
"""
vol_type_info = get_volume_type_info(mnode, volname)
if vol_type_info is None:
- g.log.error("Unable to get the replica count info for the volume %s" %
+ g.log.error("Unable to get the replica count info for the volume %s",
volname)
return None
@@ -1036,7 +1008,7 @@ def get_disperse_count(mnode, volname):
"""
vol_type_info = get_volume_type_info(mnode, volname)
if vol_type_info is None:
- g.log.error("Unable to get the disperse count info for the volume %s" %
+ g.log.error("Unable to get the disperse count info for the volume %s",
volname)
return None
@@ -1077,3 +1049,47 @@ def get_cold_tier_disperse_count(mnode, volname):
cold_tier_disperse_count = (volinfo[volname]["bricks"]['coldBricks']
['colddisperseCount'])
return cold_tier_disperse_count
+
+
+def enable_and_validate_volume_options(mnode, volname, volume_options_list,
+ time_delay=5):
+ """Enable the volume option and validate whether the option has be
+ successfully enabled or not
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+ volume_options_list (list): List of volume options to be enabled
+ time_delay (int): Time delay between 2 volume set operations
+
+ Returns:
+ bool: True when enabling and validating all volume options is
+ successful. False otherwise
+ """
+ if not isinstance(volume_options_list, list):
+ volume_options_list = [volume_options_list]
+
+ for option in volume_options_list:
+ # Set volume option to 'enable'
+ g.log.info("Setting the volume option : %s", option)
+ ret = set_volume_options(mnode, volname, {option: "enable"})
+ if not ret:
+ return False
+
+ # Validate whether the option is set on the volume
+ g.log.info("Validating the volume option : %s to be set to 'enable'",
+ option)
+ option_dict = get_volume_options(mnode, volname, option)
+ g.log.info("Options Dict: %s", option_dict)
+ if option_dict is None:
+ g.log.error("%s is not enabled on the volume %s", option, volname)
+ return False
+
+ if option not in option_dict or "enable" not in option_dict[option]:
+ g.log.error("%s is not enabled on the volume %s", option, volname)
+ return False
+
+ g.log.info("%s is enabled on the volume %s", option, volname)
+ time.sleep(time_delay)
+
+ return True
diff --git a/tests/functional/bvt/test_bvt_lite_and_plus.py b/tests/functional/bvt/test_bvt_lite_and_plus.py
deleted file mode 100644
index 074d57910..000000000
--- a/tests/functional/bvt/test_bvt_lite_and_plus.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-import os
-from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import (GlusterVolumeBaseClass,
- runs_on)
-
-
-@runs_on([['replicated', 'distributed', 'distributed-replicated',
- 'dispersed', 'distributed-dispersed'],
- ['glusterfs', 'nfs', 'cifs']])
-class BvtTestsClass(GlusterVolumeBaseClass):
- """Class containing case for : BVT Lite and BVT Plus.
-
- BVT Lite: Run the case on dis-rep volume with glusterfs, nfs, cifs
- protocols
-
- BVT Plus: Run the case on all volume types and all protocol types
- combinations
- """
- @classmethod
- def setUpClass(cls):
- """Setup Volume and Mounts.
- """
- g.log.info("Starting %s:" % cls.__name__)
- GlusterVolumeBaseClass.setUpClass.im_func(cls)
-
- # Upload io scripts
- cls.script_local_path = ("/usr/share/glustolibs/io/"
- "scripts/file_dir_ops.py")
- cls.script_upload_path = "/tmp/file_dir_ops.py"
- ret = os.path.exists(cls.script_local_path)
- if not ret:
- raise Exception("Unable to find the io scripts")
-
- for client in cls.clients:
- g.upload(client, cls.script_local_path, cls.script_upload_path)
- g.run(client, "ls -l %s" % cls.script_upload_path)
- g.run(client, "chmod +x %s" % cls.script_upload_path)
- g.run(client, "ls -l %s" % cls.script_upload_path)
-
- def setUp(self):
- pass
-
- def test_bvt(self):
- """Test IO from the mounts.
- """
- g.log.info("Starting Test: %s on %s %s" %
- (self.id(), self.volume_type, self.mount_type))
-
- # Get stat of mount before the IO
- for mount_obj in self.mounts:
- cmd = "mount | grep %s" % mount_obj.mountpoint
- ret, out, err = g.run(mount_obj.client_system, cmd)
- cmd = "df -h %s" % mount_obj.mountpoint
- ret, out, err = g.run(mount_obj.client_system, cmd)
- cmd = "ls -ld %s" % mount_obj.mountpoint
- ret, out, err = g.run(mount_obj.client_system, cmd)
- cmd = "stat %s" % mount_obj.mountpoint
- ret, out, err = g.run(mount_obj.client_system, cmd)
-
- # Start IO on all mounts.
- all_mounts_procs = []
- count = 1
- for mount_obj in self.mounts:
- cmd = ("python %s create_deep_dirs_with_files "
- "--dirname-start-num %d "
- "--dir-depth 2 "
- "--dir-length 10 "
- "--max-num-of-dirs 5 "
- "--num-of-files 5 %s" % (self.script_upload_path,
- count, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
- count = count + 10
-
- # Get IO status
- rc = True
- for i, proc in enumerate(all_mounts_procs):
- ret, _, _ = proc.async_communicate()
- if ret != 0:
- g.log.error("IO Failed on %s:%s" %
- (self.mounts[i].client_system,
- self.mounts[i].mountpoint))
- rc = False
- assert (rc is True), "IO failed on some of the clients"
-
- # Get stat of all the files/dirs created.
- all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s stat "
- "-R %s" % (self.script_upload_path, mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- all_mounts_procs.append(proc)
- rc = True
- for i, proc in enumerate(all_mounts_procs):
- ret, _, _ = proc.async_communicate()
- if ret != 0:
- g.log.error("Stat of files and dirs under %s:%s Failed" %
- (self.mounts[i].client_system,
- self.mounts[i].mountpoint))
- rc = False
- assert (rc is True), "Stat failed on some of the clients"
-
- def tearDown(self):
- pass
-
- @classmethod
- def tearDownClass(cls):
- """Cleanup mount and Cleanup the volume
- """
- GlusterVolumeBaseClass.tearDownClass.im_func(cls)
diff --git a/tests/functional/bvt/test_vvt.py b/tests/functional/bvt/test_vvt.py
new file mode 100644
index 000000000..5730996bd
--- /dev/null
+++ b/tests/functional/bvt/test_vvt.py
@@ -0,0 +1,129 @@
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: BVT-Volume Verification Tests (VVT). Tests the Basic
+ Volume Operations like start, status, stop, delete.
+
+"""
+
+import pytest
+import time
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import (GlusterVolumeBaseClass,
+ runs_on)
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_init import is_glusterd_running
+from glustolibs.gluster.volume_ops import volume_stop, volume_start
+from glustolibs.gluster.volume_libs import (
+ verify_all_process_of_volume_are_online)
+from glustolibs.gluster.volume_libs import log_volume_info_and_status
+from glustolibs.misc.misc_libs import upload_scripts
+from glustolibs.io.utils import validate_io_procs, get_mounts_stat
+
+
+@runs_on([['replicated', 'distributed', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'],
+ ['glusterfs', 'nfs', 'cifs']])
+class VolumeAccessibilityTests(GlusterVolumeBaseClass):
+ """ VolumeAccessibilityTests contains tests which verifies
+ accessablity of the volume.
+ """
+ @classmethod
+ def setUpClass(cls):
+ """Setup Volume, Create Mounts and upload the necessary scripts to run
+ tests.
+ """
+ # Sets up volume, mounts
+ GlusterVolumeBaseClass.setUpClass.im_func(cls)
+
+ # Upload io scripts for running IO on mounts
+ script_local_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
+ "file_dir_ops.py")
+ ret = upload_scripts(cls.clients, script_local_path)
+ if not ret:
+ raise ExecutionError("Failed to upload IO scripts")
+
+ @pytest.mark.bvt_vvt
+ def test_volume_create_start_stop_start(self):
+ """Tests volume create, start, status, stop, start.
+ Also Validates whether all the brick process are running after the
+ start of the volume.
+ """
+ # Verify volume's all process are online
+ ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Volume %s : All process are not online" %
+ self.volname))
+
+ # Stop Volume
+ ret, _, _ = volume_stop(self.mnode, self.volname, force=True)
+ self.assertEqual(ret, 0, "Failed to stop volume %s" % self.volname)
+
+ # Start Volume
+ ret, _, _ = volume_start(self.mnode, self.volname)
+ self.assertEqual(ret, 0, "Unable to start volume %s" % self.volname)
+
+ time.sleep(15)
+
+ # Log Volume Info and Status
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume %s info and status failed" %
+ self.volname))
+
+ # Verify volume's all process are online
+ ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Volume %s : All process are not online" %
+ self.volname))
+
+ # Log Volume Info and Status
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume %s info and status failed" %
+ self.volname))
+
+ # Verify all glusterd's are running
+ ret = is_glusterd_running(self.servers)
+ self.assertEqual(ret, 0, ("glusterd not running on all servers: %s" %
+ self.servers))
+
+ @pytest.mark.bvt_vvt
+ def test_file_dir_create_ops_on_volume(self):
+ """Test File Directory Creation on the volume.
+ """
+ # Start IO on all mounts.
+ all_mounts_procs = []
+ count = 1
+ for mount_obj in self.mounts:
+ cmd = ("python %s create_deep_dirs_with_files "
+ "--dirname-start-num %d "
+ "--dir-depth 2 "
+ "--dir-length 10 "
+ "--max-num-of-dirs 5 "
+ "--num-of-files 5 %s" % (self.script_upload_path, count,
+ mount_obj.mountpoint))
+ proc = g.run_async(mount_obj.client_system, cmd,
+ user=mount_obj.user)
+ all_mounts_procs.append(proc)
+ count = count + 10
+
+ # Validate IO
+ ret = validate_io_procs(all_mounts_procs, self.mounts)
+ self.assertTrue(ret, "IO failed on some of the clients")
+
+ # Get stat of all the files/dirs created.
+ ret = get_mounts_stat(self.mounts)
+ self.assertTrue(ret, "Stat failed on some of the clients")