summaryrefslogtreecommitdiffstats
path: root/glustolibs-gluster/glustolibs
diff options
context:
space:
mode:
Diffstat (limited to 'glustolibs-gluster/glustolibs')
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brick_libs.py414
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brick_ops.py119
-rw-r--r--glustolibs-gluster/glustolibs/gluster/gluster_base_class.py274
-rw-r--r--glustolibs-gluster/glustolibs/gluster/gluster_init.py150
-rw-r--r--glustolibs-gluster/glustolibs/gluster/heal_libs.py272
-rw-r--r--glustolibs-gluster/glustolibs/gluster/heal_ops.py432
-rw-r--r--glustolibs-gluster/glustolibs/gluster/mount_ops.py509
-rw-r--r--glustolibs-gluster/glustolibs/gluster/peer_ops.py424
-rw-r--r--glustolibs-gluster/glustolibs/gluster/samba_ops.py400
-rw-r--r--glustolibs-gluster/glustolibs/gluster/uss_ops.py114
-rw-r--r--glustolibs-gluster/glustolibs/gluster/volume_libs.py980
-rw-r--r--glustolibs-gluster/glustolibs/gluster/windows_libs.py152
12 files changed, 4240 insertions, 0 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/brick_libs.py b/glustolibs-gluster/glustolibs/gluster/brick_libs.py
new file mode 100644
index 000000000..617419d9e
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/brick_libs.py
@@ -0,0 +1,414 @@
+#!/usr/bin/env python
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: Module for gluster brick related helper functions.
+"""
+
+import random
+from glusto.core import Glusto as g
+from glustolibs.gluster.volume_ops import (get_volume_info, get_volume_status)
+from glustolibs.gluster.volume_libs import get_subvols
+
+
+def get_all_bricks(mnode, volname):
+ """Get list of all the bricks of the specified volume.
+ If the volume is 'Tier' volume, the list will contian both
+ 'hot tier' and 'cold tier' bricks.
+
+ Args:
+ mnode (str): Node on which command has to be executed
+ volname (str): Name of the volume
+
+ Returns:
+ list: List of all the bricks of the volume on Success.
+ NoneType: None on failure.
+ """
+ volinfo = get_volume_info(mnode, volname)
+ if volinfo is None:
+ g.log.error("Unable to get the volinfo of %s." % volname)
+ return None
+
+ if 'Tier' in volinfo[volname]['typeStr']:
+ # Get bricks from hot-tier in case of Tier volume
+ hot_tier_bricks = get_hot_tier_bricks(mnode, volname)
+ if hot_tier_bricks is None:
+ return None
+ # Get cold-tier bricks in case of Tier volume
+ cold_tier_bricks = get_cold_tier_bricks(mnode, volname)
+ if cold_tier_bricks is None:
+ return None
+
+ return hot_tier_bricks + cold_tier_bricks
+
+ # Get bricks from a non Tier volume
+ all_bricks = []
+ if 'bricks' in volinfo[volname]:
+ if 'brick' in volinfo[volname]['bricks']:
+ for brick in volinfo[volname]['bricks']['brick']:
+ if 'name' in brick:
+ all_bricks.append(brick['name'])
+ else:
+ g.log.error("brick %s doesn't have the key 'name' "
+ "for the volume: %s" % (brick, volname))
+ return None
+ return all_bricks
+ else:
+ g.log.error("Bricks not found in Bricks section of volume "
+ "info for the volume %s" % volname)
+ return None
+ else:
+ g.log.error("Bricks not found for the volume %s" % volname)
+ return None
+
+
+def get_hot_tier_bricks(mnode, volname):
+ """Get list of hot-tier bricks of the specified volume
+
+ Args:
+ mnode (str): Node on which command has to be executed
+ volname (str): Name of the volume
+
+ Returns:
+ list : List of hot-tier bricks of the volume on Success.
+ NoneType: None on failure.
+ """
+ volinfo = get_volume_info(mnode, volname)
+ if volinfo is None:
+ g.log.error("Unable to get the volinfo of %s." % volname)
+ return None
+
+ if 'Tier' not in volinfo[volname]['typeStr']:
+ g.log.error("Volume %s is not a tiered volume" % volname)
+ return None
+
+ hot_tier_bricks = []
+ if 'bricks' in volinfo[volname]:
+ if 'hotBricks' in volinfo[volname]['bricks']:
+ if 'brick' in volinfo[volname]['bricks']['hotBricks']:
+ for brick in volinfo[volname]['bricks']['hotBricks']['brick']:
+ if 'name' in brick:
+ hot_tier_bricks.append(brick['name'])
+ else:
+ g.log.error("brick %s doesn't have the key 'name' "
+ "for the volume: %s" % (brick, volname))
+ return None
+ else:
+ g.log.error("Bricks not found in hotBricks section of volume "
+ "info for the volume %s" % volname)
+ return None
+ return hot_tier_bricks
+ else:
+ g.log.error("Bricks not found for the volume %s" % volname)
+ return None
+
+
+def get_cold_tier_bricks(mnode, volname):
+ """Get list of cold-tier bricks of the specified volume
+
+ Args:
+ mnode (str): Node on which command has to be executed
+ volname (str): Name of the volume
+
+ Returns:
+ list : List of cold-tier bricks of the volume on Success.
+ NoneType: None on failure.
+ """
+ volinfo = get_volume_info(mnode, volname)
+ if volinfo is None:
+ g.log.error("Unable to get the volinfo of %s." % volname)
+ return None
+
+ if 'Tier' not in volinfo[volname]['typeStr']:
+ g.log.error("Volume %s is not a tiered volume" % volname)
+ return None
+
+ cold_tier_bricks = []
+ if 'bricks' in volinfo[volname]:
+ if 'coldBricks' in volinfo[volname]['bricks']:
+ if 'brick' in volinfo[volname]['bricks']['coldBricks']:
+ for brick in volinfo[volname]['bricks']['coldBricks']['brick']:
+ if 'name' in brick:
+ cold_tier_bricks.append(brick['name'])
+ else:
+ g.log.error("brick %s doesn't have the key 'name' "
+ "for the volume: %s" % (brick, volname))
+ return None
+ else:
+ g.log.error("Bricks not found in coldBricks section of volume "
+ "info for the volume %s" % volname)
+ return None
+ return cold_tier_bricks
+ else:
+ g.log.error("Bricks not found for the volume %s" % volname)
+ return None
+
+
+def bring_bricks_offline(volname, bricks_list,
+ bring_bricks_offline_methods=['service_kill']):
+ """Bring the bricks specified in the bricks_list offline.
+
+ Args:
+ volname (str): Name of the volume
+ bricks_list (list): List of bricks to bring them offline.
+
+ Kwargs:
+ bring_bricks_offline_methods (list): List of methods using which bricks
+ will be brought offline. The method to bring a brick offline is
+ randomly selected from the bring_bricks_offline_methods list.
+ By default all bricks will be brought offline with
+ 'service_kill' method.
+
+ Returns:
+ bool : True on successfully bringing all bricks offline.
+ False otherwise
+ """
+ rc = True
+ failed_to_bring_offline_list = []
+ for brick in bricks_list:
+ bring_brick_offline_method = (random.choice
+ (bring_bricks_offline_methods))
+ if bring_brick_offline_method == 'service_kill':
+ brick_node, brick_path = brick.split(":")
+ brick_path = brick_path.replace("/", "-")
+ kill_cmd = ("pid=`cat /var/lib/glusterd/vols/%s/run/%s%s.pid` &&"
+ "kill -15 $pid || kill -9 $pid" %
+ (volname, brick_node, brick_path))
+ ret, _, _ = g.run(brick_node, kill_cmd)
+ if ret != 0:
+ g.log.error("Unable to kill the brick %s" % brick)
+ failed_to_bring_offline_list.append(brick)
+ rc = False
+ else:
+ g.log.error("Invalid method '%s' to bring brick offline" %
+ bring_brick_offline_method)
+ return False
+
+ if not rc:
+ g.log.error("Unable to bring some of the bricks %s offline" %
+ failed_to_bring_offline_list)
+ return False
+
+ g.log.info("All the bricks : %s are brought offline" % bricks_list)
+ return True
+
+
+def bring_bricks_online(mnode, volname, bricks_list,
+ bring_bricks_online_methods=['glusterd_restart',
+ 'volume_start_force']):
+ """Bring the bricks specified in the bricks_list online.
+
+ Args:
+ mnode (str): Node on which commands will be executed.
+ volname (str): Name of the volume.
+ bricks_list (list): List of bricks to bring them online.
+
+ Kwargs:
+ bring_bricks_online_methods (list): List of methods using which bricks
+ will be brought online. The method to bring a brick online is
+ randomly selected from the bring_bricks_online_methods list.
+ By default all bricks will be brought online with
+ ['glusterd_restart', 'volume_start_force'] methods.
+ If 'volume_start_force' command is randomly selected then all the
+ bricks would be started with the command execution. Hence we break
+ from bringing bricks online individually
+
+ Returns:
+ bool : True on successfully bringing all bricks online.
+ False otherwise
+ """
+ rc = True
+ failed_to_brick_online_list = []
+ for brick in bricks_list:
+ bring_brick_online_method = random.choice(bring_bricks_online_methods)
+ if bring_brick_online_method == 'glusterd_restart':
+ bring_brick_online_command = "service glusterd restart"
+ brick_node, brick_path = brick.split(":")
+ ret, _, _ = g.run(brick_node, bring_brick_online_command)
+ if ret != 0:
+ g.log.error("Unable to restart glusterd on node %s" %
+ (brick_node))
+ rc = False
+ failed_to_bring_online_list.append(brick)
+ elif bring_brick_online_method == 'volume_start_force':
+ bring_brick_online_command = ("gluster volume start %s force" %
+ volname)
+ ret, _, _ = g.run(mnode, bring_brick_online_command)
+ if ret != 0:
+ g.log.error("Unable to start the volume %s with force option" %
+ (volname))
+ rc = False
+ else:
+ break
+ else:
+ g.log.error("Invalid method '%s' to bring brick online" %
+ bring_brick_online_method)
+ return False
+ if not rc:
+ g.log.error("Unable to bring some of the bricks %s online" %
+ failed_to_bring_online_list)
+ return False
+
+ g.log.info("All the bricks : %s are brought online" % bricks_list)
+ return True
+
+
+def are_bricks_offline(mnode, volname, bricks_list):
+ """Verify all the specified list of bricks are offline.
+
+ Args:
+ mnode (str): Node on which commands will be executed.
+ volname (str): Name of the volume.
+ bricks_list (list): List of bricks to verify offline status.
+
+ Returns:
+ bool : True if all bricks offline. False otherwise.
+ NoneType: None on failure in getting volume status
+ """
+ rc = True
+ online_bricks_list = []
+ volume_status = get_volume_status(mnode, volname)
+ if not volume_status:
+ g.log.error("Unable to check if bricks are offline for the volume %s" %
+ volname)
+ return None
+ for brick in bricks_list:
+ brick_node, brick_path = brick.split(":")
+ status = int(volume_status[volname][brick_node][brick_path]['status'])
+ if status != 0:
+ g.log.error("BRICK : %s is not offline" % (brick))
+ online_bricks_list.append(brick)
+ rc = False
+ if not rc:
+ g.log.error("Some of the bricks %s are not offline" %
+ online_bricks_list)
+ return False
+
+ g.log.info("All the bricks in %s are offline" % bricks_list)
+ return True
+
+
+def are_bricks_online(mnode, volname, bricks_list):
+ """Verify all the specified list of bricks are online.
+
+ Args:
+ mnode (str): Node on which commands will be executed.
+ volname (str): Name of the volume.
+ bricks_list (list): List of bricks to verify online status.
+
+ Returns:
+ bool : True if all bricks online. False otherwise.
+ NoneType: None on failure in getting volume status
+ """
+ rc = True
+ offline_bricks_list = []
+ volume_status = get_volume_status(mnode, volname)
+ if not volume_status:
+ g.log.error("Unable to check if bricks are online for the volume %s" %
+ volname)
+ return None
+ for brick in bricks_list:
+ brick_node, brick_path = brick.split(":")
+ status = int(volume_status[volname][brick_node][brick_path]['status'])
+ if status != 1:
+ g.log.error("BRICK : %s is not online" % (brick))
+ offline_bricks_list.append(brick)
+ rc = False
+
+ if not rc:
+ g.log.error("Some of the bricks %s are not online" %
+ offline_bricks_list)
+ return False
+
+ g.log.info("All the bricks %s are online" % bricks_list)
+ return True
+
+def get_offline_bricks_list(mnode, volname):
+ """Get list of bricks which are offline.
+
+ Args:
+ mnode (str): Node on which commands will be executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ list : List of bricks in the volume which are offline.
+ NoneType: None on failure in getting volume status
+ """
+ offline_bricks_list = []
+ volume_status = get_volume_status(mnode, volname)
+ if not volume_status:
+ g.log.error("Unable to get offline bricks_list for the volume %s" %
+ volname)
+ return None
+
+ bricks_list = get_all_bricks(mnode, volname)
+ for brick in bricks_list:
+ brick_node, brick_path = brick.split(":")
+ status = int(volume_status[volname][brick_node][brick_path]['status'])
+ if status != 1:
+ offline_bricks_list.append(brick)
+
+ return offline_bricks_list
+
+
+def get_online_bricks_list(mnode, volname):
+ """Get list of bricks which are online.
+
+ Args:
+ mnode (str): Node on which commands will be executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ list : List of bricks in the volume which are online.
+ NoneType: None on failure in getting volume status
+ """
+ online_bricks_list = []
+ volume_status = get_volume_status(mnode, volname)
+ if not volume_status:
+ g.log.error("Unable to get online bricks_list for the volume %s" %
+ volname)
+ return None
+
+ bricks_list = get_all_bricks(mnode, volname)
+ for brick in bricks_list:
+ brick_node, brick_path = brick.split(":")
+ status = int(volume_status[volname][brick_node][brick_path]['status'])
+ if status == 1:
+ online_bricks_list.append(brick)
+
+ return online_bricks_list
+
+
+def delete_bricks(bricks_list):
+ """Deletes list of bricks specified from the brick nodes.
+
+ Args:
+ bricks_list (list): List of bricks to be deleted.
+
+ Returns:
+ bool : True if all the bricks are deleted. False otherwise.
+ """
+ rc = True
+ for brick in bricks_list:
+ brick_node, brick_path = brick.split(":")
+ _, _, _ = g.run(brick_node, "rm -rf %s" % brick_path)
+ ret, out, err = g.run(brick_node, "ls %s" % brick_path)
+ if ret == 0:
+ g.log.error("Unable to delete brick %s on node %s" %
+ (brick_path, brick_node))
+ rc = False
+ return rc
diff --git a/glustolibs-gluster/glustolibs/gluster/brick_ops.py b/glustolibs-gluster/glustolibs/gluster/brick_ops.py
new file mode 100644
index 000000000..ae55b6851
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/brick_ops.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: Module for gluster brick operations
+"""
+
+from glusto.core import Glusto as g
+
+def add_brick(mnode, volname, bricks_list, replica=None):
+ """Add Bricks specified in the bricks_list to the volume.
+
+ Args:
+ mnode (str): None on which the commands are executed.
+ volname (str): Name of the volume
+ bricks_list (list): List of bricks to be added
+
+ Kwargs:
+ replica (int): Replica count to increase the replica count of
+ the volume.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ if replica is None:
+ cmd = ("gluster volume add-brick %s %s" %
+ (volname, ' '.join(bricks_list)))
+ else:
+ cmd = ("gluster volume add-brick %s replica %d %s" %
+ (volname, int(replica), ' '.join(bricks_list)))
+
+ return g.run(mnode, cmd)
+
+
+# remove_brick
+def remove_brick(mnode, volname, bricks_list, option, replica=None):
+ """Remove bricks specified in the bricks_list from the volume.
+
+ Args:
+ mnode (str): None on which the commands are executed.
+ volname (str): Name of the volume
+ bricks_list (list): List of bricks to be removed
+ option (str): Remove brick options: <start|stop|status|commit|force>
+
+ Kwargs:
+ replica (int): Replica count to increase the replica count of
+ the volume.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ if option == "commit" or option == "force":
+ option = option + " --mode=script"
+
+ if replica is None:
+ cmd = ("gluster volume remove-brick %s %s %s" %
+ (volname, ' '.join(bricks_list), option))
+ else:
+ cmd = ("gluster volume remove-brick %s replica %d %s force "
+ "--mode=script" % (volname, int(replica),
+ ' '.join(bricks_list)))
+
+ return g.run(mnode, cmd)
+
+
+# replace_brick
+def replace_brick(mnode, volname, src_brick, dst_brick):
+ """Replace src brick with dst brick from the volume.
+
+ Args:
+ mnode (str): None on which the commands are executed.
+ volname (str): Name of the volume
+ src_brick (str): Source brick name
+ dst_brick (str): Destination brick name
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = ("gluster volume replace-brick %s %s %s commit force" %
+ (volname, src_brick, dst_brick))
+ return g.run(mnode, cmd)
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
new file mode 100644
index 000000000..9a2f4ec06
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+# Copyright (C) 2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+"""
+ Description: Module containing GlusterBaseClass which defines all the
+ variables necessary for tests.
+"""
+
+import unittest
+
+from glusto.core import Glusto as g
+import os
+
+
+class runs_on(g.CarteTestClass):
+ """Decorator providing runs_on capability for standard unittest script"""
+
+ def __init__(self, value):
+ # the names of the class attributes set by the runs_on decorator
+ self.axis_names = ['volume_type', 'mount_type']
+
+ # the options to replace 'ALL' in selections
+ self.available_options = [['distributed', 'replicated',
+ 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'],
+ ['glusterfs', 'nfs', 'cifs', 'smb']]
+
+ # these are the volume and mount options to run and set in config
+ # what do runs_on_volumes and runs_on_mounts need to be named????
+ run_on_volumes = g.config.get('running_on_volumes',
+ self.available_options[0])
+ run_on_mounts = g.config.get('running_on_mounts',
+ self.available_options[1])
+
+ # selections is the above info from the run that is intersected with
+ # the limits from the test script
+ self.selections = [run_on_volumes, run_on_mounts]
+
+ # value is the limits that are passed in by the decorator
+ self.limits = value
+
+
+class GlusterBaseClass(unittest.TestCase):
+ # these will be populated by either the runs_on decorator or
+ # defaults in setUpClass()
+ volume_type = None
+ mount_type = None
+ volname = None
+ servers = None
+ voltype = None
+ mnode = None
+ mounts = None
+ clients = None
+
+ @classmethod
+ def setUpClass(cls):
+ if cls.volume_type is None:
+ cls.volume_type = "distributed"
+ if cls.mount_type is None:
+ cls.mount_type = "glusterfs"
+
+ g.log.info("SETUP GLUSTER VOLUME: %s on %s" % (cls.volume_type,
+ cls.mount_type))
+
+ # Defining default volume_types configuration.
+ default_volume_type_config = {
+ 'replicated': {
+ 'type': 'replicated',
+ 'replica_count': 3,
+ 'transport': 'tcp'
+ },
+ 'dispersed': {
+ 'type': 'dispersed',
+ 'disperse_count': 4,
+ 'redundancy_count': 2,
+ 'transport': 'tcp'
+ },
+ 'distributed': {
+ 'type': 'distributed',
+ 'dist_count': 4,
+ 'transport': 'tcp'
+ },
+ 'distributed-replicated': {
+ 'type': 'distributed-replicated',
+ 'dist_count': 2,
+ 'replica_count': 2,
+ 'transport': 'tcp'
+ },
+ 'distributed-dispersed': {
+ 'type': 'distributed-dispersed',
+ 'dist_count': 2,
+ 'disperse_count': 4,
+ 'redundancy_count': 2,
+ 'transport': 'tcp'
+ }
+ }
+
+ # Get the volume configuration.
+ cls.volume = {}
+ found_volume = False
+ if 'volumes' in g.config['gluster']:
+ for volume in g.config['gluster']['volumes']:
+ if volume['voltype']['type'] == cls.volume_type:
+ cls.volume = volume
+ found_volume = True
+ break
+
+ if found_volume:
+ if not 'name' in cls.volume:
+ cls.volume['name'] = 'testvol_%s' % cls.volume_type
+
+ if 'servers' in cls.volume:
+ cls.volume['servers'] = g.config['servers']
+
+ if not found_volume:
+ cls.volume = {
+ 'name': ('testvol_%s' % cls.volume_type),
+ 'servers': g.config['servers']
+ }
+ try:
+ if g.config['gluster']['volume_types'][cls.volume_type]:
+ cls.volume['voltype'] = (g.config['gluster']
+ ['volume_types'][cls.volume_type])
+ except KeyError as e:
+ try:
+ cls.volume['voltype'] = (default_volume_type_config
+ [cls.volume_type])
+ except KeyError as e:
+ g.log.error("Unable to get configs of volume type: %s",
+ cls.volume_type)
+ return False
+
+ # Define Volume variables.
+ cls.volname = cls.volume['name']
+ cls.servers = cls.volume['servers']
+ cls.voltype = cls.volume['voltype']['type']
+ cls.mnode = cls.servers[0]
+
+ # Get the mount configuration.
+ cls.mounts_dict_list = []
+ cls.mounts = []
+ found_mount = False
+ if 'mounts' in g.config['gluster']:
+ for mount in g.config['gluster']['mounts']:
+ if mount['protocol'] == cls.mount_type:
+ if not 'volname' in mount:
+ mount['volname'] = cls.volname
+ if not 'server' in mount:
+ mount['server'] = mnode
+ if not 'mountpoint' in mount:
+ mount['mountpoint'] = (os.path.join(
+ "/mnt", '_'.join([cls.volname, cls.mount_type])))
+ cls.mounts_dict_list.append(mount)
+ found_mount = True
+ if not found_mount:
+ for client in g.config['clients']:
+ mount = {
+ 'protocol': cls.mount_type,
+ 'server': cls.mnode,
+ 'volname': cls.volname,
+ 'client': {
+ 'host': client
+ },
+ 'mountpoint': (os.path.join(
+ "/mnt", '_'.join([cls.volname, cls.mount_type]))),
+ 'options': ''
+ }
+ cls.mounts_dict_list.append(mount)
+ from glustolibs.gluster.mount_ops import create_mount_objs
+ cls.mounts = create_mount_objs(cls.mounts_dict_list)
+
+ # Get clients
+ cls.clients = []
+ if 'clients' in g.config:
+ cls.clients = g.config['clients']
+ else:
+ for mount_dict in cls.mounts_dict_list:
+ if 'client' in mount_dict:
+ if ('host' in mount_dict['client'] and
+ mount_dict['client']['host']):
+ if mount_dict['client']['host'] not in cls.clients:
+ cls.clients.append(mount_dict['client']['host'])
+
+ # All servers info
+ cls.all_servers_info = None
+ if 'servers_info' in g.config:
+ cls.all_servers_info = g.config['servers_info']
+ else:
+ g.log.error("servers_info not defined in the configuration file")
+
+ # All clients_info
+ cls.all_clients_info = None
+ if 'clients_info' in g.config:
+ cls.all_clients_info = g.config['clients_info']
+ else:
+ g.log.error("clients_info not defined in the configuration file")
+
+
+class GlusterDemoClass(GlusterBaseClass):
+ @classmethod
+ def setUpClass(cls):
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ # Start Glusterd
+ from glustolibs.gluster.gluster_init import start_glusterd
+ ret = start_glusterd(servers=cls.servers)
+ if not ret:
+ g.log.error("glusterd did not start on at least one server")
+ return False
+
+ # PeerProbe servers
+ from glustolibs.gluster.peer_ops import peer_probe_servers
+ ret = peer_probe_servers(mnode=cls.servers[0], servers=cls.servers[1:])
+ if not ret:
+ g.log.error("Unable to peer probe one or more servers")
+ return False
+
+ from glustolibs.gluster.volume_libs import setup_volume
+ ret = setup_volume(mnode=cls.mnode,
+ all_servers_info=cls.all_servers_info,
+ volume_config=cls.volume)
+ if not ret:
+ g.log.error("Setup volume %s failed" % cls.volname)
+ return False
+
+ # Create Mounts
+ for mount_obj in cls.mounts:
+ ret = mount_obj.mount()
+ if not ret:
+ g.log.error("Unable to mount volume '%s:%s' on '%s:%s'" %
+ (mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint))
+ return False
+
+ @classmethod
+ def tearDownClass(cls, umount_vol=True, cleanup_vol=True):
+ """unittest tearDownClass override"""
+ # Unmount volume
+ if umount_vol:
+ rc = True
+ for mount_obj in cls.mounts:
+ ret = mount_obj.unmount()
+ if not ret:
+ g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'" %
+ (mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint))
+ rc = False
+ if not rc:
+ return False
+
+ # Cleanup volume
+ if cleanup_vol:
+ from glustolibs.gluster.volume_libs import cleanup_volume
+ ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
+ if not ret:
+ g.log.error("cleanup volume %s failed" % cls.volname)
+ return False
+
+ g.log.info("TEARDOWN GLUSTER VOLUME: %s on %s" % (cls.volume_type,
+ cls.mount_type))
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_init.py b/glustolibs-gluster/glustolibs/gluster/gluster_init.py
new file mode 100644
index 000000000..182080717
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_init.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: This file contains the methods for starting/stopping glusterd
+ and other initial gluster environment setup helpers.
+"""
+from glusto.core import Glusto as g
+
+def start_glusterd(servers):
+ """Starts glusterd on specified servers if they are not running.
+
+ Args:
+ servers (list): List of server hosts on which glusterd has to be
+ started.
+
+ Returns:
+ bool : True if starting glusterd is successful on all servers.
+ False otherwise.
+ """
+ cmd = "pgrep glusterd || service glusterd start"
+ results = g.run_parallel(servers, cmd)
+
+ rc = True
+ for server, ret_values in results.iteritems():
+ retcode, out, err = ret_values
+ if retcode != 0:
+ g.log.error("Unable to start glusterd on server %s", server)
+ rc = False
+ if not rc:
+ return False
+
+ return True
+
+def stop_glusterd(servers):
+ """Stops the glusterd on specified servers.
+
+ Args:
+ servers (list): List of server hosts on which glusterd has to be
+ stopped.
+
+ Returns:
+ bool : True if stopping glusterd is successful on all servers.
+ False otherwise.
+ """
+ cmd = "service glusterd stop"
+ results = g.run_parallel(servers, cmd)
+
+ rc = True
+ for server, ret_values in results.iteritems():
+ retcode, out, err = ret_values
+ if retcode != 0:
+ g.log.error("Unable to start glusterd on server %s", server)
+ rc = False
+ if not rc:
+ return False
+
+ return True
+
+
+def restart_glusterd(servers):
+ """Restart the glusterd on specified servers.
+
+ Args:
+ servers (list): List of server hosts on which glusterd has to be
+ restarted.
+
+ Returns:
+ bool : True if restarting glusterd is successful on all servers.
+ False otherwise.
+ """
+ cmd = "service glusterd restart"
+ results = g.run_parallel(servers, cmd)
+
+ rc = True
+ for server, ret_values in results.iteritems():
+ retcode, out, err = ret_values
+ if retcode != 0:
+ g.log.error("Unable to restart glusterd on server %s", server)
+ rc = False
+ if not rc:
+ return False
+
+ return True
+
+def is_glusterd_running(servers):
+ """Checks the glusterd status on specified servers.
+
+ Args:
+ servers (list): List of server hosts on which glusterd status has to
+ be checked.
+
+ Returns:
+ 0 : if glusterd running
+ 1 : if glusterd not running
+ -1 : if glusterd not running and PID is alive
+
+ """
+ cmd1 = "service glusterd status"
+ cmd2 = "pidof glusterd"
+ cmd1_results = g.run_parallel(servers, cmd1)
+ cmd2_results = g.run_parallel(servers, cmd2)
+
+ rc = 0
+ for server, ret_values in cmd1_results.iteritems():
+ retcode, out, err = ret_values
+ if retcode != 0:
+ g.log.error("glusterd is not running on the server %s", server)
+ rc = 1
+ if cmd2_results[server][0] == 0:
+ g.log.error("PID of glusterd is alive and status is not running")
+ rc = -1
+ return rc
+
+#TODO: THIS IS NOT IMPLEMENTED YET. PLEASE DO THIS MANUALLY
+# TILL WE IMPLEMENT THIS PART
+
+def env_setup_servers(servers):
+ """Set up environment on all the specified servers.
+
+ Args:
+ servers (list): List of server hosts on which environment has to be
+ setup.
+
+ Returns:
+ bool : True if setting up environment is successful on all servers.
+ False otherwise.
+
+ """
+ g.log.info("The function isn't implemented fully")
+ g.log.info("Please setup the bricks manually.")
+
+ if not start_glusterd(servers):
+ return False
+
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/heal_libs.py b/glustolibs-gluster/glustolibs/gluster/heal_libs.py
new file mode 100644
index 000000000..2ad408c57
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/heal_libs.py
@@ -0,0 +1,272 @@
+#!/usr/bin/env python
+# Copyright (C) 2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+"""
+ Description: Module for gluster heal related helper functions.
+"""
+
+import os
+import time
+from glusto.core import Glusto as g
+from glustolibs.gluster.volume_ops import (get_volume_info, get_volume_status)
+try:
+ import xml.etree.cElementTree as etree
+except ImportError:
+ import xml.etree.ElementTree as etree
+
+
+def is_heal_enabled(mnode, volname):
+ """Check if heal is enabled for a volume.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ bool : True if heal is enabled on volume. False otherwise.
+ NoneType: None if unable to get the volume status.
+ """
+ enabled = True
+ vol_status_dict = get_volume_status(mnode, volname, service='shd')
+ if vol_status_dict is None:
+ g.log.error("Failed to check if heal is enabled on volume %s or not" %
+ volname)
+ return None
+ for node in vol_status_dict[volname].keys():
+ if not ('Self-heal Daemon' in vol_status_dict[volname][node]):
+ enabled = False
+ return enabled
+
+
+def is_heal_disabled(mnode, volname):
+ """Check if heal is disabled for a volume.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ bool : True if heal is diabled on volume. False otherwise.
+ NoneType: None if unable to get the volume status shd or parse error.
+ """
+ cmd = "gluster volume status %s shd --xml" % volname
+ ret, out, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to get the self-heal-daemon status for the "
+ "volume" % volname)
+ return None
+
+ try:
+ root = etree.XML(out)
+ except etree.ParseError:
+ g.log.error("Failed to parse the volume status shd xml output.")
+ return None
+
+ operr = root.find("opErrstr")
+ if operr:
+ if "Self-heal Daemon is disabled for volume" in operr.text:
+ return True
+ return False
+
+
+def are_all_self_heal_daemons_are_online(mnode, volname):
+ """Verifies whether all the self-heal-daemons are online for the specified
+ volume.
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+ volname (str): volume name
+
+ Returns:
+ bool : True if all the self-heal-daemons are online for the volume.
+ False otherwise.
+ NoneType: None if unable to get the volume status
+ """
+ service = 'shd'
+ failure_msg = ("Verifying all self-heal-daemons are online failed for "
+ "volume %s" % volname)
+ # Get volume status
+ vol_status = get_volume_status(mnode=mnode, volname=volname,
+ service=service)
+ if vol_status is None:
+ g.log.error(failure_msg)
+ return None
+
+ # Get all nodes from pool list
+ from glustolibs.gluster.peer_ops import nodes_from_pool_list
+ all_nodes = nodes_from_pool_list(mnode)
+ if not all_nodes:
+ g.log.error(failure_msg)
+ return False
+
+ online_status = True
+ for node in all_nodes:
+ node_shd_status_value = (vol_status[volname][node]['Self-heal Daemon']
+ ['status'])
+ if node_shd_status_value != '1':
+ online_status = False
+ g.run(mnode, ("gluster volume status %s shd" % volname))
+ if online_status is True:
+ g.log.info("All self-heal Daemons are online")
+ return True
+ else:
+ g.log.error("Some of the self-heal Daemons are offline")
+ return False
+
+
+def monitor_heal_completion(mnode, volname, timeout_period=1200):
+ """Monitors heal completion by looking into .glusterfs/indices/xattrop
+ directory of every brick for certain time. When there are no entries
+ in all the brick directories then heal is successful. Otherwise heal is
+ pending on the volume.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+ heal_monitor_timeout : time until which the heal monitoring to be done.
+ Default: 1200 i.e 20 minutes.
+
+ Return:
+ bool: True if heal is complete within timeout_period. False otherwise
+ """
+ if timeout_period != 0:
+ heal_monitor_timeout = timeout_period
+ time_counter = heal_monitor_timeout
+ g.log.info("The heal monitoring timeout is : %d minutes" %
+ (heal_monitor_timeout / 60))
+
+ # Get all bricks
+ from glustolibs.gluster.brick_libs import get_all_bricks
+ bricks_list = get_all_bricks(mnode, volname)
+ if bricks_list is None:
+ g.log.error("Unable to get the bricks list. Hence unable to verify "
+ "whether self-heal-daemon process is running or not "
+ "on the volume %s" % volname)
+ return False
+
+ while time_counter > 0:
+ heal_complete = True
+ for brick in bricks_list:
+ brick_node, brick_path = brick.split(":")
+ cmd = ("ls -1 %s/.glusterfs/indices/xattrop/ | "
+ "grep -ve \"xattrop-\" | wc -l" % brick_path)
+ ret, out, err = g.run(brick_node, cmd)
+ if out.strip('\n') != "0":
+ heal_complete = False
+ if heal_complete:
+ break
+ else:
+ time.sleep(120)
+ time_counter = time_counter - 120
+
+ if heal_complete:
+ heal_completion_status = is_heal_complete(mnode, volname)
+ if heal_completion_status is True:
+ g.log.info("Heal has successfully completed on volume %s" %
+ volname)
+ return True
+
+ g.log.info("Heal has not yet completed on volume %s" % volname)
+ for brick in bricks_list:
+ brick_node, brick_path = brick.split(":")
+ cmd = ("ls -1 %s/.glusterfs/indices/xattrop/ " % brick_path)
+ g.run(brick_node, cmd)
+ return False
+
+
+def is_heal_complete(mnode, volname):
+ """Verifies there are no pending heals on the volume.
+ The 'number of entries' in the output of heal info
+ for all the bricks should be 0 for heal to be completed.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Return:
+ bool: True if heal is complete. False otherwise
+ """
+ from glustolibs.gluster.heal_ops import get_heal_info
+ heal_info_data = get_heal_info(mnode, volname)
+ if heal_info_data is None:
+ g.log.error("Unable to verify whether heal is successful or not on "
+ "volume %s" % volname)
+ return False
+
+ heal_complete = True
+ for brick_heal_info_data in heal_info_data:
+ if brick_heal_info_data['numberOfEntries'] != '0':
+ heal_complete = False
+
+ if not heal_complete:
+ g.log.error("Heal is not complete on some of the bricks for the "
+ "volume %s" % volname)
+ return False
+ g.log.info("Heal is complete on all the bricks for the volume %s" %
+ volname)
+ return True
+
+
+def is_volume_in_split_brain(mnode, volname):
+ """Verifies there are no split-brain on the volume.
+ The 'number of entries' in the output of heal info split-brain
+ for all the bricks should be 0 for volume not to be in split-brain.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Return:
+ bool: True if volume is not in split-brain. False otherwise
+ """
+ from glustolibs.gluster.heal_ops import get_heal_info_split_brain
+ heal_info_split_brain_data = get_heal_info_split_brain(mnode, volname)
+ if heal_info_split_brain_data is None:
+ g.log.error("Unable to verify whether volume %s is not in split-brain "
+ "or not" % volname)
+ return False
+
+ split_brain = False
+ for brick_heal_info_split_brain_data in heal_info_split_brain_data:
+ if brick_heal_info_split_brain_data['numberOfEntries'] == '-':
+ continue
+ if brick_heal_info_split_brain_data['numberOfEntries'] != '0':
+ split_brain = True
+
+ if split_brain:
+ g.log.error("Volume %s is in split-brain state." % volname)
+ return True
+
+ g.log.info("Volume %s is not in split-brain state." % volname)
+ return False
+
+
+def get_unhealed_entries_info(volname, mnode=''):
+ """Get the information of all gfid's on which heal is pending. The
+ information includes - stat of gfid, getfattr output for all the dirs/
+ files for a given gfid
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Return:
+ bool: True if getting unhealed entries info is successful.
+ False otherwise
+ """
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/heal_ops.py b/glustolibs-gluster/glustolibs/gluster/heal_ops.py
new file mode 100644
index 000000000..1bf8d2f87
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/heal_ops.py
@@ -0,0 +1,432 @@
+#!/usr/bin/env python
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: Module for gluster heal operations.
+"""
+
+import os
+import time
+from glusto.core import Glusto as g
+from glustolibs.gluster.volume_ops import (get_volume_info, get_volume_status)
+try:
+ import xml.etree.cElementTree as etree
+except ImportError:
+ import xml.etree.ElementTree as etree
+
+
+def trigger_heal(mnode, volname):
+ """Triggers heal on the volume.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ bool : True if heal is triggered successfully. False otherwise.
+ """
+ cmd = "gluster volume heal %s" % volname
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ return False
+
+ return True
+
+
+def trigger_heal_full(mnode, volname):
+ """Triggers heal 'full' on the volume.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ bool : True if heal is triggered successfully. False otherwise.
+ """
+ cmd = "gluster volume heal %s full" % volname
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ return False
+
+ return True
+
+
+def enable_heal(mnode, volname):
+ """Enable heal by executing 'gluster volume heal enable'
+ for the specified volume.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ bool : True if heal is enabled on the volume.
+ False otherwise.
+ """
+ cmd = "gluster volume heal %s enable" % volname
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ return False
+
+ return True
+
+
+def disable_heal(mnode, volname):
+ """Disable heal by executing 'gluster volume heal disable'
+ for the specified volume.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ bool : True if heal is disabled on the volume.
+ False otherwise.
+ """
+ cmd = "gluster volume heal %s disable" % volname
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ return False
+
+ return True
+
+
+def enable_self_heal_daemon(mnode, volname):
+ """Enables self-heal-daemon on a volume by setting volume option
+ 'self-heal-daemon' to value 'on'
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ bool : True if setting self_heal_daemon option to 'on' is successful.
+ False otherwise.
+ """
+ cmd = "gluster volume set %s self-heal-daemon on" % volname
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ return False
+
+ return True
+
+
+def disable_self_heal_daemon(mnode, volname):
+ """Disables self-heal-daemon on a volume by setting volume option
+ 'self-heal-daemon' to value 'off'
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ bool : True if setting self_heal_daemon option to 'off' is successful.
+ False otherwise.
+ """
+ cmd = "gluster volume set %s self-heal-daemon off" % volname
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ return False
+
+ return True
+
+
+def heal_info(mnode, volname):
+ """Get heal info for the volume by executing:
+ 'gluster volume heal <volname> info'
+
+ Args:
+ mnode : Node on which commands are executed.
+ volname : Name of the volume
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "gluster volume heal %s info" % volname
+ return g.run(mnode, cmd)
+
+
+def heal_info_summary(mnode, volname):
+ """Get heal info summary i.e Bricks and it's corresponding number of
+ entries, status.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = ("gluster volume heal %s info | grep 'entries\|Brick\|Status'" %
+ volname)
+ return g.run(mnode, cmd)
+
+
+def heal_info_healed(mnode, volname):
+ """Get healed entries information for the volume by executing:
+ 'gluster volume heal <volname> info healed'
+
+ Args:
+ mnode : Node on which commands are executed.
+ volname : Name of the volume
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "gluster volume heal %s info healed" % volname
+ return g.run(mnode, cmd)
+
+
+def heal_info_heal_failed(mnode, volname):
+ """Get entries on which heal failed for the volume by executing:
+ 'gluster volume heal <volname> info heal-failed'
+
+ Args:
+ mnode : Node on which commands are executed.
+ volname : Name of the volume
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "gluster volume heal %s info heal-failed" % volname
+ return g.run(mnode, cmd)
+
+
+def heal_info_split_brain(mnode, volname):
+ """Get entries that are in split-brain state for the volume by executing:
+ 'gluster volume heal <volname> info split-brain'
+
+ Args:
+ mnode : Node on which commands are executed.
+ volname : Name of the volume
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "gluster volume heal %s info split-brain" % volname
+ return g.run(mnode, cmd)
+
+
+def get_heal_info(mnode, volname):
+ """From the xml output of heal info command get the heal info data.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ NoneType: None if parse errors.
+ list: list of dictionaries. Each element in the list is the
+ heal_info data per brick.
+ """
+ cmd = "gluster volume heal %s info --xml" % volname
+ ret, out, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to get the heal info xml output for the volume %s."
+ "Hence failed to get the heal info summary." % volname)
+ return None
+
+ try:
+ root = etree.XML(out)
+ except etree.ParseError:
+ g.log.error("Failed to parse the gluster heal info xml output.")
+ return None
+
+ heal_info_data = []
+ for brick in root.findall("healInfo/bricks/brick"):
+ brick_heal_info = {}
+ brick_files_to_heal = []
+ file_to_heal_exist = False
+ for element in brick.getchildren():
+ if element.tag == "file":
+ file_to_heal_exist = True
+ file_info = {}
+ file_info[element.attrib['gfid']] = element.text
+ brick_files_to_heal.append(file_info)
+
+ else:
+ brick_heal_info[element.tag] = element.text
+ if file_to_heal_exist:
+ brick_heal_info['file'] = brick_files_to_heal
+ heal_info_data.append(brick_heal_info)
+ return heal_info_data
+
+
+def get_heal_info_summary(mnode, volname):
+ """From the xml output of heal info command get heal info summary
+ i.e Bricks and it's corresponding number of entries, status.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ NoneType: None if parse errors.
+ dict: dict of dictionaries. brick names are the keys of the dict with
+ each key having brick's status, numberOfEntries info as dict.
+ Example:
+ heal_info_summary_data = {
+ 'ijk.lab.eng.xyz.com': {
+ 'status': 'Connected'
+ 'numberOfEntries': '11'
+ },
+ 'def.lab.eng.xyz.com': {
+ 'status': 'Transport endpoint is not connected',
+ 'numberOfEntries': '-'
+ }
+ }
+
+ """
+ heal_info_data = get_heal_info(mnode, volname)
+ if heal_info_data is None:
+ g.log.error("Unable to get heal info summary for the volume %s" %
+ volname)
+ return None
+
+ heal_info_summary_data = {}
+ for info_data in heal_info_data:
+ heal_info_summary_data[info_data['name']] = {
+ 'status': info_data['status'],
+ 'numberOfEntries': info_data['numberOfEntries']
+ }
+ return heal_info_summary_data
+
+
+def get_heal_info_split_brain(mnode, volname):
+ """From the xml output of heal info split-brain command get the
+ heal info split-brain data.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ NoneType: None if parse errors.
+ list: list of dictionaries. Each element in the list is the
+ heal_info_split_brain data per brick.
+ """
+ cmd = "gluster volume heal %s info split-brain --xml" % volname
+ ret, out, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to get the heal info xml output for the volume %s."
+ "Hence failed to get the heal info summary." % volname)
+ return None
+
+ try:
+ root = etree.XML(out)
+ except etree.ParseError:
+ g.log.error("Failed to parse the gluster heal info xml output.")
+ return None
+
+ heal_info_split_brain_data = []
+ for brick in root.findall("healInfo/bricks/brick"):
+ brick_heal_info_split_brain = {}
+ brick_files_in_split_brain = []
+ is_file_in_split_brain = False
+ for element in brick.getchildren():
+ if element.tag == "file":
+ is_file_in_split_brain = True
+ file_info = {}
+ file_info[element.attrib['gfid']] = element.text
+ brick_files_in_split_brain.append(file_info)
+
+ else:
+ brick_heal_info_split_brain[element.tag] = element.text
+ if is_file_in_split_brain:
+ brick_heal_info_split_brain['file'] = brick_files_in_split_brain
+ heal_info_split_brain_data.append(brick_heal_info_split_brain)
+ return heal_info_split_brain_data
+
+
+def get_heal_info_split_brain_summary(mnode, volname):
+ """Get heal info split_brain summary i.e Bricks and it's
+ corresponding number of split-brain entries, status.
+
+ Args:
+ mnode : Node on which commands are executed
+ volname : Name of the volume
+
+ Returns:
+ NoneType: None if parse errors.
+ dict: dict of dictionaries. brick names are the keys of the dict with
+ each key having brick's status, numberOfEntries info as dict.
+ Example:
+ heal_info_split_brain_summary_data = {
+ 'ijk.lab.eng.xyz.com': {
+ 'status': 'Connected'
+ 'numberOfEntries': '11'
+ },
+ 'def.lab.eng.xyz.com': {
+ 'status': 'Connected',
+ 'numberOfEntries': '11'
+ }
+ }
+
+ """
+ heal_info_split_brain_data = get_heal_info_split_brain(mnode, volname)
+ if heal_info_split_brain_data is None:
+ g.log.error("Unable to get heal info summary for the volume %s" %
+ volname)
+ return None
+
+ heal_info_split_brain_summary_data = {}
+ for info_data in heal_info_split_brain_data:
+ heal_info_split_brain_summary_data[info_data['name']] = {
+ 'status': info_data['status'],
+ 'numberOfEntries': info_data['numberOfEntries']
+ }
+ return heal_info_split_brain_summary_data
diff --git a/glustolibs-gluster/glustolibs/gluster/mount_ops.py b/glustolibs-gluster/glustolibs/gluster/mount_ops.py
new file mode 100644
index 000000000..1a697866e
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/mount_ops.py
@@ -0,0 +1,509 @@
+#!/usr/bin/env python
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: Module for Mount operations.
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.windows_libs import powershell
+import copy
+
+
+class GlusterMount():
+ """Gluster Mount class
+
+ Args:
+ mount (dict): Mount dict with mount_protocol, mountpoint,
+ server, client, volname, options, smbuser, smbpasswd,
+ platform, super_user as keys
+
+ Note: smbuser, smbpasswd are applicable for windows mounts.
+
+ client is a dict with host, super_user, platform as keys.
+
+ platform should be specified in case of windows. By default
+ it is assumed to be linux.
+
+ super_user is 'root' by default.
+ In case of windows the super_user can be the user who has
+ all the admin previliges.
+
+ Example:
+ mount =
+ {'mount_protocol': 'glusterfs',
+ 'mountpoint': '/mnt/g1',
+ 'server': 'abc.lab.eng.xyz.com',
+ 'client': {'host': 'def.lab.eng.xyz.com'},
+ 'volname': 'testvoi',
+ 'options': ''
+ }
+
+ mount =
+ {'mount_protocol': 'nfs',
+ 'mountpoint': '/mnt/n1',
+ 'server': 'abc.lab.eng.xyz.com',
+ 'client': {'host': 'def.lab.eng.xyz.com'},
+ 'volname': 'testvoi',
+ 'options': ''}
+
+ mount =
+ {'mount_protocol': 'smb',
+ 'mountpoint': '',
+ 'server': 'abc.lab.eng.xyz.com',
+ 'client': {
+ 'host': 'def.lab.eng.xyz.com',
+ 'super_user': 'Admin'
+ },
+ 'volname': 'testvoi',
+ 'options': '',
+ 'smbuser': 'abc',
+ 'smbpasswd': 'def'}
+ Returns:
+ Instance of GlusterMount class
+ """
+ def __init__(self, mount):
+ self.mounttype = None
+ self.mountpoint = None
+ self.server_system = None
+ self.client_system = None
+ self.volname = None
+ self.options = ''
+ self.smbuser = None
+ self.smbpasswd = None
+ self.user = None
+ self.platform = None
+
+ # Get Protocol
+ if 'protocol' in mount:
+ self.mounttype = mount['protocol']
+ else:
+ self.mounttype = "glusterfs"
+
+ # Get mountpoint
+ mount_point_defined = False
+ if 'mountpoint' in mount:
+ if mount['mountpoint']:
+ mount_point_defined = True
+
+ if mount_point_defined:
+ self.mountpoint = mount['mountpoint']
+ else:
+ if self.mounttype == "smb":
+ self.mountpoint = "*"
+ else:
+ self.mountpoint = "/mnt/%s" % self.mounttype
+
+ # Get server
+ self.server_system = mount['server']
+
+ # Get client
+ self.client_system = mount['client']['host']
+
+ # Get super_user
+ user_defined = False
+ if 'super_user' in mount['client']:
+ if mount['client']['super_user']:
+ self.user = mount['client']['super_user']
+ user_defined = True
+
+ if not user_defined:
+ self.user = "root"
+
+ # Get platform
+ platform_defined = False
+ if 'platform' in mount['client']:
+ if mount['client']['platform']:
+ self.platform = mount['client']['platform']
+ platform_defined = True
+
+ if not platform_defined:
+ self.platform = 'linux'
+
+ # Get Volume name
+ self.volname = mount['volname']
+
+ # Get options
+ if 'options' in mount:
+ self.options = mount['options']
+
+ # If mounttype is 'smb' or 'cifs' get 'smbuser' and 'smbpassword'
+ if self.mounttype == 'smb' or self.mounttype == 'cifs':
+ if 'smbuser' in mount:
+ if mount['smbuser']:
+ self.smbuser = mount['smbuser']
+
+ if 'smbpasswd' in mount:
+ if mount['smbpasswd']:
+ self.smbpasswd = mount['smbpasswd']
+
+ def mount(self):
+ """Mounts the volume
+
+ Args:
+ uses instance args passed at init
+
+ Returns:
+ bool: True on success and False on failure.
+ """
+ ret, out, err = mount_volume(self.volname, mtype=self.mounttype,
+ mpoint=self.mountpoint,
+ mserver=self.server_system,
+ mclient=self.client_system,
+ options=self.options,
+ smbuser=self.smbuser,
+ smbpasswd=self.smbpasswd,
+ user=self.user)
+ if ret != 0:
+ return False
+ else:
+ if self.mounttype == "smb":
+ self.mountpoint = out.strip()
+ return True
+
+ def is_mounted(self):
+ """Tests for mount on client
+
+ Args:
+ uses instance args passed at init
+
+ Returns:
+ bool: True on success and False on failure.
+ """
+ ret = is_mounted(self.volname,
+ mpoint=self.mountpoint,
+ mserver=self.server_system,
+ mclient=self.client_system,
+ mtype=self.mounttype,
+ user=self.user)
+
+ if ret:
+ return True
+ else:
+ return False
+
+ def unmount(self):
+ """Unmounts the volume
+
+ Args:
+ uses instance args passed at init
+
+ Returns:
+ bool: True on success and False on failure.
+ """
+ (ret, out, err) = umount_volume(mclient=self.client_system,
+ mpoint=self.mountpoint,
+ mtype=self.mounttype,
+ user=self.user)
+ rc = True
+ if ret == 0:
+ if self.mounttype == "smb":
+ if not (('deleted successfully' in out) or
+ ('command completed successfully' in out) or
+ ('There are no entries in the list' in out) or
+ ('The network connection could not be found')):
+ rc = False
+ else:
+ self.mountpoint = "*"
+ else:
+ rc = False
+
+ return rc
+
+
+def is_mounted(volname, mpoint, mserver, mclient, mtype, user='root'):
+ """Check if mount exist.
+
+ Args:
+ volname (str): Name of the volume
+ mpoint (str): Mountpoint dir
+ mserver (str): Server to which it is mounted to
+ mclient (str): Client from which it is mounted.
+ mtype (str): Mount type (glusterfs|nfs|smb|cifs)
+
+ Kwargs:
+ user (str): Super user of the node mclient
+
+ Returns:
+ bool: True if mounted and False otherwise.
+ """
+ # python will error on missing arg, so just checking for empty args here
+ if not volname or not mpoint or not mserver or not mclient or not mtype:
+ g.log.error("Missing arguments for mount.")
+ return False
+
+ if mtype == "smb":
+ if mpoint == "*":
+ return False
+ else:
+ cmd = powershell("net use %s" % mpoint)
+ ret, out, err = g.run(mclient, cmd, user)
+ if ret != 0:
+ return False
+ else:
+ expected_output = ("Remote name \\\%s\gluster-%s" %
+ (mserver, volname))
+ if expected_output in out:
+ return True
+ else:
+ return False
+ else:
+ ret, _, _ = g.run(mclient, "mount | grep %s | grep %s | grep \"%s\""
+ % (volname, mpoint, mserver), user)
+ if ret == 0:
+ g.log.debug("Volume %s is mounted at %s:%s" % (volname, mclient,
+ mpoint))
+ return True
+ else:
+ g.log.error("Volume %s is not mounted at %s:%s" % (volname,
+ mclient,
+ mpoint))
+ return False
+
+
+def mount_volume(volname, mtype, mpoint, mserver, mclient, options='',
+ smbuser=None, smbpasswd=None, user='root'):
+ """Mount the gluster volume with specified options.
+
+ Args:
+ volname (str): Name of the volume to mount.
+ mtype (str): Protocol to be used to mount.
+ mpoint (str): Mountpoint dir.
+ mserver (str): Server to mount.
+ mclient (str): Client from which it has to be mounted.
+
+ Kwargs:
+ option (str): Options for the mount command.
+ smbuser (str): SMB USERNAME. Used with mtype = 'cifs'
+ smbpasswd (str): SMB PASSWD. Used with mtype = 'cifs'
+ user (str): Super user of the node mclient
+
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ (0, '', '') if already mounted.
+ (1, '', '') if setup_samba_service fails in case of smb.
+ (ret, out, err) of mount commnd execution otherwise.
+ """
+ if is_mounted(volname, mpoint, mserver, mclient, mtype, user):
+ g.log.debug("Volume %s is already mounted at %s" %
+ (volname, mpoint))
+ return (0, '', '')
+
+ if options != '':
+ options = "-o %s" % options
+
+ if mtype == 'smb':
+ if smbuser is None or smbpasswd is None:
+ g.log.error("smbuser and smbpasswd to be passed as parameters "
+ "for cifs mounts")
+ return (1, '', '')
+
+ mcmd = ("net use %s \\\\%s\\gluster-%s " % (mpoint, mserver, volname) +
+ " /user:%s " % (smbuser) + '"' + smbpasswd + '"')
+
+ mcmd = powershell(mcmd)
+
+ ret, out, err = g.run(mclient, mcmd, user=user)
+ if ret != 0:
+ g.log.error("net use comand failed on windows client %s "
+ "failed: %s" % (mclient, err))
+ return (ret, out, err)
+
+ if out.startswith('Drive'):
+ drv_ltr = out.split(' ')[1]
+ g.log.info("Samba share mount success on windows client %s. "
+ "Share is : %s" % (mclient, drv_ltr))
+ return (ret, drv_ltr, err)
+
+ g.log.error("net use comand successful but error in mount of samba "
+ " share for windows client %s for reason %s" %
+ (mclient, err))
+ return (1, out, err)
+
+ if mtype == 'nfs':
+ if not options:
+ options = "-o vers=3"
+
+ elif options and 'vers' not in options:
+ options = options + ",vers=3"
+
+ mcmd = ("mount -t %s %s %s:/%s %s" %
+ (mtype, options, mserver, volname, mpoint))
+
+ if mtype == 'cifs':
+ if smbuser is None or smbpasswd is None:
+ g.log.error("smbuser and smbpasswd to be passed as parameters "
+ "for cifs mounts")
+ return (1, '', '')
+
+ mcmd = ("mount -t cifs -o username=%s,password=%s "
+ "\\\\\\\\%s\\\\gluster-%s %s" % (smbuser, smbpasswd, mserver,
+ volname, mpoint))
+
+ # Create mount dir
+ _, _, _ = g.run(mclient, "test -d %s || mkdir -p %s" % (mpoint, mpoint),
+ user=user)
+
+ # Create mount
+ return g.run(mclient, mcmd, user=user)
+
+
+def umount_volume(mclient, mpoint, mtype='', user='root'):
+ """Unmounts the mountpoint.
+
+ Args:
+ mclient (str): Client from which it has to be mounted.
+ mpoint (str): Mountpoint dir.
+
+ Kwargs:
+ mtype (str): Mounttype. Defaults to ''.
+ user (str): Super user of the node mclient. Defaults to 'root'
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err) as returned by
+ umount command execution.
+ """
+ if mtype == "smb":
+ cmd = "net use %s /d /Y" % mpoint
+ cmd = powershell(cmd)
+ else:
+ cmd = ("umount %s || umount -f %s || umount -l %s" %
+ (mpoint, mpoint, mpoint))
+ return g.run(mclient, cmd, user=user)
+
+
+def create_mount_objs(mounts):
+ """Creates GlusterMount class objects for the given list of mounts
+
+ Args:
+ mounts (list): list of mounts with each element being dict having the
+ specifics of each mount
+
+ Example:
+ mounts: [
+ {'mount_protocol': 'glusterfs',
+ 'mountpoint': '/mnt/g1',
+ 'server': 'abc.lab.eng.xyz.com',
+ 'client': {'host': 'def.lab.eng.xyz.com'},
+ 'volname': 'testvoi',
+ 'options': '',
+ 'num_of_mounts': 2},
+
+ {'mount_protocol': 'nfs',
+ 'mountpoint': '/mnt/n1',
+ 'server': 'abc.lab.eng.xyz.com',
+ 'client': {'host': 'def.lab.eng.xyz.com'},
+ 'volname': 'testvoi',
+ 'options': ''}
+
+ {'mount_protocol': 'smb',
+ 'mountpoint': '',
+ 'server': 'abc.lab.eng.xyz.com',
+ 'client': {
+ 'host': 'def.lab.eng.xyz.com',
+ 'super_user': 'Admin'
+ },
+ 'volname': 'testvoi',
+ 'options': '',
+ 'smbuser': 'abc',
+ 'smbpasswd': 'def',
+ 'num_of_mounts': 2}
+ ]
+ Returns:
+ list : List of GlusterMount class objects.
+
+ Example:
+ mount_objs = create_mount_objs(mounts)
+ """
+ mount_obj_list = []
+ for mount in mounts:
+ temp_mount = copy.deepcopy(mount)
+ if (mount['protocol'] == "glusterfs" or mount['protocol'] == "nfs" or
+ mount['protocol'] == "cifs"):
+ if 'mountpoint' in mount['mountpoint'] and mount['mountpoint']:
+ temp_mount['mountpoint'] = mount['mountpoint']
+ else:
+ temp_mount['mountpoint'] = ("/mnt/%s_%s" %
+ (mount['volname'],
+ mount['protocol']))
+ elif mount['protocol'] == "smb":
+ if 'mountpoint' in mount['mountpoint'] and mount['mountpoint']:
+ temp_mount['mountpoint'] = mount['mountpoint']
+ else:
+ temp_mount['mountpoint'] = "*"
+
+ num_of_mounts = 1
+ if 'num_of_mounts' in mount:
+ if mount['num_of_mounts']:
+ num_of_mounts = mount['num_of_mounts']
+ if num_of_mounts > 1:
+ mount_dir = temp_mount['mountpoint']
+ for count in range(1, num_of_mounts + 1):
+ if mount_dir != "*":
+ temp_mount['mountpoint'] = '_'.join(
+ [mount_dir, str(count)])
+
+ mount_obj_list.append(GlusterMount(temp_mount))
+ else:
+ mount_obj_list.append(GlusterMount(temp_mount))
+
+ return mount_obj_list
+
+
+def create_mounts(mount_objs):
+ """Creates Mounts using the details as specified in the each mount obj
+
+ Args:
+ mount_objs (list): list of mounts objects with each element being
+ the GlusterMount class object
+
+ Returns:
+ bool : True if creating the mount for all mount_objs is successful.
+ False otherwise.
+
+ Example:
+ ret = create_mounts(create_mount_objs(mounts))
+ """
+ rc = True
+ for mount_obj in mount_objs:
+ ret = mount_obj.mount()
+ if not ret:
+ rc = False
+ return rc
+
+
+def unmount_mounts(mount_objs):
+ """Creates Mounts using the details as specified in the each mount obj
+
+ Args:
+ mount_objs (list): list of mounts objects with each element being
+ the GlusterMount class object
+
+ Returns:
+ bool : True if unmounting the mount for all mount_objs is successful.
+ False otherwise.
+
+ Example:
+ ret = unmount_mounts(create_mount_objs(mounts))
+ """
+ rc = True
+ for mount_obj in mount_objs:
+ ret = mount_obj.unmount()
+ if not ret:
+ rc = False
+ return rc
diff --git a/glustolibs-gluster/glustolibs/gluster/peer_ops.py b/glustolibs-gluster/glustolibs/gluster/peer_ops.py
new file mode 100644
index 000000000..9f74bb7c7
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/peer_ops.py
@@ -0,0 +1,424 @@
+#!/usr/bin/env python
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: Library for gluster peer operations.
+"""
+
+
+from glusto.core import Glusto as g
+import re
+import time
+import socket
+try:
+ import xml.etree.cElementTree as etree
+except ImportError:
+ import xml.etree.ElementTree as etree
+
+
+def peer_probe(mnode, server):
+ """Probe the specified server.
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+ server (str): Server to be peer probed.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "gluster peer probe %s" % server
+ return g.run(mnode, cmd)
+
+
+def peer_detach(mnode, server, force=False):
+ """Detach the specified server.
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+ server (str): Server to be peer detached.
+
+ Kwargs:
+ force (bool): option to detach peer. Defaults to False.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ if force:
+ cmd = "gluster peer detach %s force" % server
+ else:
+ cmd = "gluster peer detach %s" % server
+ return g.run(mnode, cmd)
+
+
+def peer_status(mnode):
+ """Runs 'gluster peer status' on specified node.
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "gluster peer status"
+ return g.run(mnode, cmd)
+
+
+def pool_list(mnode):
+ """Runs 'gluster pool list' command on the specified node.
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "gluster pool list"
+ return g.run(mnode, cmd)
+
+
+def peer_probe_servers(mnode, servers, validate=True, time_delay=10):
+ """Probe specified servers and validate whether probed servers
+ are in cluster and connected state if validate is set to True.
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+ servers (list): List of servers to be peer probed.
+
+ Kwargs:
+ validate (bool): True to validate if probed peer is in cluster and
+ connected state. False otherwise. Defaults to True.
+ time_delay (int): time delay before validating peer status.
+ Defaults to 10 seconds.
+
+ Returns:
+ bool: True on success and False on failure.
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+ else:
+ servers = servers[:]
+
+ if mnode in servers:
+ servers.remove(mnode)
+
+ # Get list of nodes from 'gluster pool list'
+ nodes_in_pool_list = nodes_from_pool_list(mnode)
+ if nodes_in_pool_list is None:
+ g.log.error("Unable to get nodes from gluster pool list. "
+ "Failing peer probe.")
+ return False
+
+ for server in servers:
+ if server not in nodes_in_pool_list:
+ ret, out, _ = peer_probe(mnode, server)
+ if (ret != 0 or
+ re.search(r'^peer\sprobe\:\ssuccess(.*)', out) is None):
+ g.log.error("Failed to peer probe the node '%s'.", server)
+ return False
+ else:
+ g.log.info("Successfully peer probed the node '%s'.", server)
+
+ # Validating whether peer is in connected state after peer probe
+ if validate:
+ time.sleep(time_delay)
+ if not is_peer_connected(mnode, servers):
+ g.log.error("Validation after peer probe failed.")
+ return False
+ else:
+ g.log.info("Validation after peer probe is successful.")
+
+ return True
+
+
+def peer_detach_servers(mnode, servers, force=False, validate=True,
+ time_delay=10):
+ """Detach peers and validate status of peer if validate is set to True.
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+ servers (list): List of servers to be peer probed.
+
+ Kwargs:
+ force (bool): option to detach peer.
+ Defaults to False.
+ validate (bool): True if status of the peer needs to be validated,
+ False otherwise. Defaults to True.
+ time_delay (int): time delay before executing validating peer.
+ status. Defaults to 10 seconds.
+
+ Returns:
+ bool: True on success and False on failure.
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+ else:
+ servers = servers[:]
+
+ if mnode in servers:
+ servers.remove(mnode)
+
+ for server in servers:
+ ret, out, _ = peer_detach(mnode, server, force)
+ if (ret != 0 or
+ re.search(r'^peer\sdetach\:\ssuccess(.*)', out) is None):
+ g.log.error("Failed to peer detach the node '%s'.", server)
+ return False
+
+ # Validating whether peer detach is successful
+ if validate:
+ time.sleep(time_delay)
+ nodes_in_pool = nodes_from_pool_list(mnode)
+ rc = True
+ for server in servers:
+ if server in nodes_in_pool:
+ g.log.error("Peer '%s' still in pool" % server)
+ rc = False
+ if not rc:
+ g.log.error("Validation after peer detach failed.")
+ else:
+ g.log.info("Validation after peer detach is successful")
+
+ return True
+
+
+def nodes_from_pool_list(mnode):
+ """Return list of nodes from the 'gluster pool list'.
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+
+ Returns:
+ NoneType: None if command execution fails.
+ list: List of nodes in pool on Success, Empty list on failure.
+ """
+ pool_list_data = get_pool_list(mnode)
+ if pool_list_data is None:
+ g.log.error("Unable to get Nodes from the pool list command.")
+ return None
+
+ nodes = []
+ for item in pool_list_data:
+ nodes.append(item['hostname'])
+ return nodes
+
+
+def get_peer_status(mnode):
+ """Parse the output of command 'gluster peer status'.
+
+ Aargs:
+ mnode (str): Node on which command has to be executed.
+
+ Returns:
+ NoneType: None if command execution fails or parse errors.
+ list: list of dicts on success.
+
+ Examples:
+ >>> get_peer_status(mnode = 'abc.lab.eng.xyz.com')
+ [{'uuid': '77dc299a-32f7-43d8-9977-7345a344c398',
+ 'hostname': 'ijk.lab.eng.xyz.com',
+ 'state': '3',
+ 'hostnames' : ['ijk.lab.eng.xyz.com'],
+ 'connected': '1',
+ 'stateStr': 'Peer in Cluster'},
+
+ {'uuid': 'b15b8337-9f8e-4ec3-8bdb-200d6a67ae12',
+ 'hostname': 'def.lab.eng.xyz.com',
+ 'state': '3',
+ 'hostnames': ['def.lab.eng.xyz.com'],
+ 'connected': '1',
+ 'stateStr': 'Peer in Cluster'}
+ ]
+ """
+ ret, out, _ = g.run(mnode, "gluster peer status --xml")
+ if ret != 0:
+ g.log.error("Failed to execute peer status command on node '%s'. "
+ "Hence failed to parse the peer status.", mnode)
+ return None
+
+ try:
+ root = etree.XML(out)
+ except etree.ParseError:
+ g.log.error("Failed to parse the gluster peer status xml output.")
+ return None
+
+ peer_status_list = []
+ for peer in root.findall("peerStatus/peer"):
+ peer_dict = {}
+ for element in peer.getchildren():
+ if element.tag == "hostnames":
+ hostnames_list = []
+ for hostname in element.getchildren():
+ hostnames_list.append(hostname.text)
+ element.text = hostnames_list
+ peer_dict[element.tag] = element.text
+ peer_status_list.append(peer_dict)
+ return peer_status_list
+
+
+def get_pool_list(mnode):
+ """Parse the output of 'gluster pool list' command.
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+
+ Returns:
+ NoneType: None if command execution fails, parse errors.
+ list: list of dicts on success.
+
+ Examples:
+ >>> get_pool_list(mnode = 'abc.lab.eng.xyz.com')
+ [{'uuid': 'a2b88b10-eba2-4f97-add2-8dc37df08b27',
+ 'hostname': 'abc.lab.eng.xyz.com',
+ 'state': '3',
+ 'connected': '1',
+ 'stateStr': 'Peer in Cluster'},
+
+ {'uuid': 'b15b8337-9f8e-4ec3-8bdb-200d6a67ae12',
+ 'hostname': 'def.lab.eng.xyz.com',
+ 'state': '3',
+ 'hostnames': ['def.lab.eng.xyz.com'],
+ 'connected': '1',
+ 'stateStr': 'Peer in Cluster'}
+ ]
+ """
+ ret, out, _ = g.run(mnode, "gluster pool list --xml")
+ if ret != 0:
+ g.log.error("Failed to execute 'pool list' on node %s. "
+ "Hence failed to parse the pool list.", mnode)
+ return None
+
+ try:
+ root = etree.XML(out)
+ except etree.ParseError:
+ g.log.error("Failed to parse the gluster pool list xml output.")
+ return None
+
+ pool_list_list = []
+ for peer in root.findall("peerStatus/peer"):
+ peer_dict = {}
+ for element in peer.getchildren():
+ if element.tag == "hostname" and element.text == 'localhost':
+ element.text = mnode
+ if element.tag == "hostnames":
+ hostnames_list = []
+ for hostname in element.getchildren():
+ hostnames_list.append(hostname.text)
+ element.text = hostnames_list
+ peer_dict[element.tag] = element.text
+
+ pool_list_list.append(peer_dict)
+ return pool_list_list
+
+
+def is_peer_connected(mnode, servers):
+ """Checks whether specified peers are in cluster and 'Connected' state.
+
+ Args:
+ mnode (str): Node from which peer probe has to be executed.
+ servers (list): List of servers to be validated.
+
+ Returns
+ bool : True on success (peer in cluster and connected), False on
+ failure.
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+ else:
+ servers = servers[:]
+
+ if mnode in servers:
+ servers.remove(mnode)
+
+ peer_connected = True
+ peer_status_list = get_peer_status(mnode)
+ if peer_status_list is None:
+ g.log.error("Failed to parse the peer status. Hence failed to "
+ "validate the peer connected state.")
+ return False
+ if peer_status_list == []:
+ g.log.error("No peers present in the pool. Servers are not yet "
+ "connected.")
+ return False
+
+ # Convert all hostnames to ip's
+ server_ips = []
+ for server in servers:
+ server_ips.append(socket.gethostbyname(server))
+
+ is_connected = True
+ for peer_stat in peer_status_list:
+ if socket.gethostbyname(peer_stat['hostname']) in server_ips:
+ if (re.match(r'([0-9a-f]{8})(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}',
+ peer_stat['uuid'], re.I) is None):
+ g.log.error("Invalid UUID for the node '%s'",
+ peer_stat['hostname'])
+ is_connected = False
+ if (peer_stat['stateStr'] != "Peer in Cluster" or
+ peer_stat['connected'] != '1'):
+ g.log.error("Peer '%s' not in connected state",
+ peer_stat['hostname'])
+ is_connected = False
+
+ if not is_connected:
+ return False
+
+ peer_ips = [socket.gethostbyname(peer_stat['hostname']) for
+ peer_stat in peer_status_list]
+ if not (set(server_ips).issubset(peer_ips)):
+ servers_not_in_pool = list(set(server_ips).difference(peer_ips))
+ for index, server in enumerate(servers_not_in_pool):
+ if not (server in servers):
+ servers_not_in_pool[index] = socket.gethostbyaddr(server)[0]
+ g.log.error("Servers: '%s' not yet added to the pool.",
+ servers_not_in_pool)
+ return False
+
+ g.log.info("Servers: '%s' are all 'Peer in Cluster' and 'Connected' "
+ "state.", servers)
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/samba_ops.py b/glustolibs-gluster/glustolibs/gluster/samba_ops.py
new file mode 100644
index 000000000..e4f5b0154
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/samba_ops.py
@@ -0,0 +1,400 @@
+#!/usr/bin/env python
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: Library for samba operations.
+"""
+
+
+from glusto.core import Glusto as g
+import time
+
+
+def start_smb_service(mnode):
+ """Start smb service on the specified node.
+
+ Args:
+ mnode (str): Node on which smb service has to be started
+
+ Returns:
+ bool: True on successfully starting smb service. False otherwise.
+ """
+ # Enable Samba to start on boot
+ ret, _, _ = g.run(mnode, "chkconfig smb on")
+ if ret != 0:
+ g.log.error("Unable to set chkconfig smb on")
+ return False
+ g.log.info("chkconfig smb on successful")
+
+ # Start smb service
+ ret, _, _ = g.run(mnode, "service smb start")
+ if ret != 0:
+ g.log.error("Unable to start the smb service")
+ return False
+ g.log.info("Successfully started smb service")
+
+ return True
+
+
+def smb_service_status(mnode):
+ """Status of smb service on the specified node.
+
+ Args:
+ mnode (str): Node on which smb service has to be started
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ return g.run(mnode, "service smb status")
+
+
+def is_smb_service_running(mnode):
+ ret, out, err = smb_service_status(mnode)
+ if ret != 0:
+ return False
+ if "Active: active (running)" in out:
+ return True
+ else:
+ return False
+
+
+def stop_smb_service(mnode):
+ """Stop smb service on the specified node.
+
+ Args:
+ mnode (str): Node on which smb service has to be stopped.
+
+ Returns:
+ bool: True on successfully stopping smb service. False otherwise.
+ """
+ # Disable Samba to start on boot
+ ret, _, _ = g.run(mnode, "chkconfig smb off")
+ if ret != 0:
+ g.log.error("Unable to set chkconfig smb off")
+ return False
+ g.log.info("chkconfig smb off successful")
+
+ # Stop smb service
+ ret, _, _ = g.run(mnode, "service smb stop")
+ if ret != 0:
+ g.log.error("Unable to stop the smb service")
+ return False
+ g.log.info("Successfully stopped smb service")
+
+ return True
+
+
+def create_smb_users(servers, smb_users_info, start_uid=50000):
+ """Creates SMB users on specified servers and sets password for SMB users.
+
+ Args:
+ servers (list): List of server hosts on which smb users has to be
+ created
+ smb_users_info (dict): Dict containing users info. Example:
+ smb_users_info = {
+ 'root': {'password': 'foobar',
+ 'acl': ''
+ },
+ 'user1': {'password': 'abc',
+ 'acl': ''
+ },
+ 'user2': {'password': 'xyz',
+ 'acl': ''
+ }
+ }
+ start_uid (int): starting uid number for the users
+
+ Returns:
+ bool: True on successfully creating smb users. False otherwise.
+ """
+ uid = start_uid
+ for smb_user in smb_users_info.keys():
+ if smb_user == 'root':
+ continue
+ user_add_command = ("getent passwd %d | grep %s &> /dev/null || "
+ "useradd -u %d %s" % (uid, smb_user,
+ uid, smb_user))
+ for server in servers:
+ # Check if user already exist with same uid
+ cmd = ("getent passwd %d" % uid)
+ ret, out, err = g.run(server, cmd)
+ if ret == 0:
+ if smb_user in out.split(":")[0]:
+ continue
+ else:
+ cmd = ("userdel -f %s" % smb_user)
+ ret, _, _ = g.run(server, cmd)
+ if ret != 0:
+ g.log.error("Unable to delete the smb user '%s' on "
+ "server %s" % (smb_user, server))
+ return False
+
+ else:
+ cmd = ("useradd -u %d %s" % (uid, smb_user))
+ ret, out, err = g.run(server, cmd)
+ if ret != 0:
+ g.log.error("Unable to add the smb user '%s' on "
+ "server %s" % (smb_user, server))
+ return False
+ uid = uid + 1
+
+ mnode = servers[0]
+ for smb_user in smb_users_info.keys():
+ if 'password' in smb_users_info[smb_user]:
+ smbpasswd = smb_users_info[smb_user]['password']
+ else:
+ g.log.error("Password not found for the user %s" % smb_user)
+ return False
+
+ # Set smb password for smb_user
+ cmd = ("(echo \"%s\"; echo \"%s\") | smbpasswd -a %s" %
+ (smbpasswd, smbpasswd, smb_user))
+
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Unable to set the smb password for smb user %s" %
+ smb_user)
+ return False
+ g.log.info("Successfully set password for smb user %s on node %s" %
+ (smb_user, mnode))
+
+ return True
+
+
+def delete_smb_users(servers, smb_users_info):
+ rc = True
+ for smb_user in smb_users_info.keys():
+ if smb_user == 'root':
+ continue
+ cmd = ("userdel -r -f %s" % smb_user)
+ for server in servers:
+ ret, out, err = g.run(server, cmd)
+ if ret != 0:
+ if not ("userdel: user '%s' does not exist" % smb_user) in out:
+ rc = False
+ return rc
+
+
+def list_smb_shares(mnode):
+ """List all the gluster volumes that are exported as SMB Shares
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+
+ Returns:
+ list: List of all volume names that are exported as SMB Shares.
+ Empty list if no volumes are exported as SMB Share.
+ """
+ smb_shares_list = []
+ cmd = "smbclient -L localhost"
+ ret, out, err = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to find the SMB Shares")
+ return smb_shares_list
+ else:
+ out = out.splitlines()
+ for line in out:
+ if 'gluster-' in line:
+ smb_shares_list.append(line.split(" ")[0].strip())
+
+ return smb_shares_list
+
+
+def share_volume_over_smb(mnode, volname, servers, smb_users_info):
+ """Sharing volumes over SMB
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+ volname (str): Name of the volume to be shared.
+ servers (list): List of all servers in the storage pool.
+ smb_users_info (dict): Dict containing users info. Example:
+ smb_users_info = {
+ 'root': {'password': 'foobar',
+ 'acl': ''
+ },
+ 'user1': {'password': 'abc',
+ 'acl': ''
+ },
+ 'user2': {'password': 'xyz',
+ 'acl': ''
+ }
+ }
+
+ Returns:
+ bool : True on successfully sharing the volume over SMB.
+ False otherwise
+ """
+ # Set volume option 'stat-prefetch' to 'off'.
+ cmd = "gluster volume set %s stat-prefetch off" % volname
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to set the volume option stat-prefetch off")
+ return False
+ g.log.info("Successfully set 'stat-prefetch' to 'off' on %s" % volname)
+
+ # Set volume option 'server.allow-insecure' to 'on'.
+ cmd = "gluster volume set %s server.allow-insecure on" % volname
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to set the volume option server-allow-insecure")
+ return False
+ g.log.info("Successfully set 'server-allow-insecure' to 'on' on %s" %
+ volname)
+
+ # Set 'storage.batch-fsync-delay-usec' to 0.
+ # This is to ensure ping_pong's lock and I/O coherency tests works on CIFS.
+ cmd = ("gluster volume set %s storage.batch-fsync-delay-usec 0" % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Failed to set the volume option "
+ "'storage.batch-fsync-delay-usec' to 0 on %s" % volname)
+ return False
+ g.log.info("Successfully set 'storage.batch-fsync-delay-usec' to 0 on %s" %
+ volname)
+
+ # Edit the /etc/glusterfs/glusterd.vol in each Red Hat Gluster Storage
+ # node, to add a line 'option rpc-auth-allow-insecure on'
+ glusterd_volfile = "/etc/glusterfs/glusterd.vol"
+ glusterd_volfile_edit_cmd = (
+ ("grep -F 'option rpc-auth-allow-insecure on' %s > /dev/null || "
+ "(cp %s %s.orig && "
+ "sed -i '/^end-volume/d' %s && "
+ "echo ' option rpc-auth-allow-insecure on' >> %s && "
+ "echo 'end-volume' >> %s )") %
+ (glusterd_volfile, glusterd_volfile, glusterd_volfile,
+ glusterd_volfile, glusterd_volfile, glusterd_volfile))
+ results = g.run_parallel(servers, glusterd_volfile_edit_cmd)
+ rc = True
+ for server, ret_values in results.iteritems():
+ retcode, out, err = ret_values
+ if retcode != 0:
+ g.log.error("Unable to edit glusterd volfile on server %s", server)
+ rc = False
+ if not rc:
+ return False
+ g.log.info("Succefully edited all the servers glusterd volfile to add "
+ "the setting 'option rpc-auth-allow-insecure on'")
+
+ # Restart glusterd service on each Red Hat Gluster Storage node.
+ from glustolibs.gluster.gluster_init import restart_glusterd
+ ret = restart_glusterd(servers)
+ if not ret:
+ g.log.error("Unable to restart glusterd on few servers")
+ return False
+ g.log.info("Successfully restarted glusterd on all servers")
+ time.sleep(30)
+ # Verify if the volume can be accessed from the SMB/CIFS share.
+ cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " % volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("volume '%s' not accessable via SMB/CIFS share" % volname)
+ return False
+ g.log.info("volume '%s' can be accessed from SMB/CIFS share" % volname)
+
+ # To verify if the SMB/CIFS share can be accessed by the root/non-root user
+ # TBD
+
+ # Enable mounting volumes using SMB
+ ret = enable_mounting_volume_using_smb(mnode, volname, smb_users_info)
+ if not ret:
+ g.log.error("Failed to enable mounting volumes using SMB")
+ return False
+ g.log.info("Successfully enabled mounting volumes using SMV for the "
+ "smbusers: %s" % smb_users_info.keys())
+ return True
+
+
+def enable_mounting_volume_using_smb(mnode, volname, smb_users_info):
+ """Enable mounting volume using SMB. Set ACL's for non-root users.
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume on which acl's has to be set.
+ smb_users_info (dict): Dict containing users info. Example:
+ smb_users_info = {
+ 'root': {'password': 'foobar',
+ 'acl': ''
+ },
+ 'user1': {'password': 'abc',
+ 'acl': ''
+ },
+ 'user2': {'password': 'xyz',
+ 'acl': ''
+ }
+ }
+ Returns:
+ bool: True on successfully enabling to mount volume using SMB.
+ False otherwise.
+ """
+ # Create a temp mount to provide required permissions to the smb user
+ from glustolibs.gluster.mount_ops import GlusterMount
+ mount = {
+ 'protocol': 'glusterfs',
+ 'server': mnode,
+ 'volname': volname,
+ 'client': {
+ 'host': mnode
+ },
+ 'mountpoint': '/tmp/gluster_smb_set_user_permissions_%s' % volname,
+ 'options': 'acl'
+ }
+ mount_obj = GlusterMount(mount)
+ ret = mount_obj.mount()
+ if not ret:
+ g.log.error("Unable to create temporary mount for providing "
+ "required permissions to the smb users")
+ return False
+ g.log.info("Successfully created temporary mount for providing "
+ "required permissions to the smb users")
+
+ # Provide required permissions to the smb user
+ for smb_user in smb_users_info.keys():
+ if smb_user != 'root':
+ if 'acl' in smb_users_info[smb_user]:
+ acl = smb_users_info[smb_user]['acl']
+ if not acl:
+ acl = "rwx"
+ else:
+ acl = "rwx"
+
+ cmd = ("setfacl -m user:%s:%s %s" % (smb_user, acl,
+ mount_obj.mountpoint))
+ ret, _, _ = g.run(mnode, cmd)
+ if ret != 0:
+ g.log.error("Unable to provide required permissions to the "
+ "smb user %s " % smb_user)
+ return False
+ g.log.info("Successfully provided required permissions to the "
+ "smb user %s " % smb_user)
+
+ # Verify SMB/CIFS share can be accessed by the user
+
+ # Unmount the temp mount created
+ ret = mount_obj.unmount()
+ if not ret:
+ g.log.error("Unable to unmount the temp mount")
+ g.log.info("Successfully unmounted the temp mount")
+
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/uss_ops.py b/glustolibs-gluster/glustolibs/gluster/uss_ops.py
new file mode 100644
index 000000000..3275a6e61
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/uss_ops.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: Module for gluster uss operations
+"""
+
+from glusto.core import Glusto as g
+
+
+def enable_uss(mnode, volname):
+ """Enables uss on the specified volume
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+ volname (str): volume name
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "gluster volume set %s features.uss enable" % volname
+ return g.run(mnode, cmd)
+
+def disable_uss(mnode, volname):
+ """Disables uss on the specified volume
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+ volname (str): volume name
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "gluster volume set %s features.uss disable" % volname
+ return g.run(mnode, cmd)
+
+
+def is_uss_enabled(mnode, volname):
+ """Check if uss is Enabled on the specified volume
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+ volname (str): volume name
+
+ Returns:
+ bool : True if successfully enabled uss on the volume. False otherwise.
+ """
+ from glustolibs.gluster.volume_ops import get_volume_options
+ option_dict = get_volume_options(mnode=mnode, volname=volname,
+ option="uss")
+ if option_dict is None:
+ g.log.error("USS is not set on the volume %s" % volname)
+ return False
+
+ if ('features.uss' in option_dict and
+ option_dict['features.uss'] == 'enable'):
+ return True
+ else:
+ return False
+
+
+def is_uss_disabled(mnode, volname):
+ """Check if uss is disabled on the specified volume
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+ volname (str): volume name
+
+ Returns:
+ bool : True if successfully disabled uss on the volume.
+ False otherwise.
+ """
+ from glustolibs.gluster.volume_ops import get_volume_options
+ option_dict = get_volume_options(mnode=mnode, volname=volname,
+ option="uss")
+ if option_dict is None:
+ g.log.error("USS is not set on the volume %s" % volname)
+ return False
+
+ if ('features.uss' in option_dict and
+ option_dict['features.uss'] == 'disable'):
+ return True
+ else:
+ return False
diff --git a/glustolibs-gluster/glustolibs/gluster/volume_libs.py b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
new file mode 100644
index 000000000..37af7fa57
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
@@ -0,0 +1,980 @@
+#!/usr/bin/env python
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: Module for gluster volume related helper functions.
+"""
+
+
+from glusto.core import Glusto as g
+import time
+from glustolibs.gluster.volume_ops import get_volume_info
+
+
+def setup_volume(mnode, all_servers_info, volume_config, force=False):
+ """Setup Volume with the configuration defined in volume_config
+
+ Args:
+ mnode (str): Node on which commands has to be executed
+ all_servers_info (dict): Information about all servers.
+ example :
+ all_servers_info = {
+ 'abc.lab.eng.xyz.com': {
+ 'host': 'abc.lab.eng.xyz.com',
+ 'brick_root': '/bricks',
+ 'devices': ['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde']
+ },
+ 'def.lab.eng.xyz.com':{
+ 'host': 'def.lab.eng.xyz.com',
+ 'brick_root': '/bricks',
+ 'devices': ['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde']
+ }
+ }
+ volume_config (dict): Dict containing volume information
+ example :
+ volume_config = {
+ 'name': 'testvol',
+ 'servers': ['server-vm1', 'server-vm2', 'server-vm3',
+ 'server-vm4'],
+ 'voltype': {'type': 'distributed',
+ 'dist_count': 4,
+ 'transport': 'tcp'},
+ 'extra_servers': ['server-vm9', 'server-vm10',
+ 'server-vm11', 'server-vm12'],
+ 'quota': {'limit_usage': {'path': '/', 'percent': None,
+ 'size': '100GB'},
+ 'enable': False},
+ 'uss': {'enable': False},
+ 'tier': {'create_tier': True,
+ 'tier_type': {'type': 'distributed-replicated',
+ 'replica_count': 2,
+ 'dist_count': 2,
+ 'transport': 'tcp'}},
+ 'options': {'performance.readdir-ahead': True}
+ }
+ Returns:
+ bool : True on successful setup. False Otherwise
+
+ """
+ # Get volume name
+ if 'name' in volume_config:
+ volname = volume_config['name']
+ else:
+ g.log.error("Unable to get the volume name from config")
+ return False
+
+ # Check if the volume already exists
+ volinfo = get_volume_info(mnode=mnode)
+ if volinfo is not None and volname in volinfo.keys():
+ g.log.info("volume %s already exists. Returning...", volname)
+ return True
+
+ # Get servers
+ if 'servers' in volume_config:
+ servers = volume_config['servers']
+ else:
+ g.log.error("Unable to get the volume servers from config")
+ return False
+
+ # Get the volume type and values
+ if not ('voltype' in volume_config and 'type' in volume_config['voltype']):
+ g.log.error("Voltype not defined in config for the volume %s",
+ volname)
+ return False
+
+ volume_type = volume_config['voltype']['type']
+ kwargs = {}
+ number_of_bricks = 1
+ if volume_type == 'distributed':
+ if 'dist_count' in volume_config['voltype']:
+ kwargs['dist_count'] = (volume_config['voltype']['dist_count'])
+ else:
+ g.log.error("Distibute Count not specified in the volume config")
+ return False
+
+ number_of_bricks = kwargs['dist_count']
+
+ elif volume_type == 'replicated':
+ if 'replica_count' in volume_config['voltype']:
+ kwargs['replica_count'] = (volume_config['voltype']
+ ['replica_count'])
+ else:
+ g.log.error("Replica count not specified in the volume config")
+ return False
+
+ if 'arbiter_count' in volume_config['voltype']:
+ kwargs['arbiter_count'] = (volume_config['voltype']
+ ['arbiter_count'])
+
+ number_of_bricks = kwargs['replica_count']
+
+ elif volume_type == 'distributed-replicated':
+ if 'dist_count' in volume_config['voltype']:
+ kwargs['dist_count'] = (volume_config['voltype']['dist_count'])
+ else:
+ g.log.error("Distibute Count not specified in the volume config")
+ return False
+
+ if 'replica_count' in volume_config['voltype']:
+ kwargs['replica_count'] = (volume_config['voltype']
+ ['replica_count'])
+ else:
+ g.log.error("Replica count not specified in the volume config")
+ return False
+
+ number_of_bricks = (kwargs['dist_count'] * kwargs['replica_count'])
+
+ elif volume_type == 'dispersed':
+ if 'disperse_count' in volume_config['voltype']:
+ kwargs['disperse_count'] = (volume_config['voltype']
+ ['disperse_count'])
+ else:
+ g.log.error("Disperse Count not specified in the volume config")
+ return False
+
+ if 'redundancy_count' in volume_config['voltype']:
+ kwargs['redundancy_count'] = (volume_config['voltype']
+ ['redundancy_count'])
+ else:
+ g.log.error("Redunduncy Count not specified in the volume config")
+ return False
+
+ number_of_bricks = kwargs['disperse_count']
+
+ elif volume_type == 'distributed-dispersed':
+ if 'dist_count' in volume_config['voltype']:
+ kwargs['dist_count'] = (volume_config['voltype']['dist_count'])
+ else:
+ g.log.error("Distibute Count not specified in the volume config")
+ return False
+
+ if 'disperse_count' in volume_config['voltype']:
+ kwargs['disperse_count'] = (volume_config['voltype']
+ ['disperse_count'])
+ else:
+ g.log.error("Disperse Count not specified in the volume config")
+ return False
+
+ if 'redundancy_count' in volume_config['voltype']:
+ kwargs['redundancy_count'] = (volume_config['voltype']
+ ['redundancy_count'])
+ else:
+ g.log.error("Redunduncy Count not specified in the volume config")
+ return False
+
+ number_of_bricks = (kwargs['dist_count'] * kwargs['disperse_count'])
+ else:
+ g.log.error("Invalid volume type defined in config")
+ return False
+ # Get transport type
+ if 'transport' in volume_config['voltype']:
+ transpor_type = volume_config['voltype']['transport']
+ else:
+ transport_type = 'tcp'
+
+ # get bricks_list
+ from glustolibs.gluster.lib_utils import form_bricks_list
+ bricks_list = form_bricks_list(mnode=mnode, volname=volname,
+ number_of_bricks=number_of_bricks,
+ servers=servers,
+ servers_info=all_servers_info)
+ if not bricks_list:
+ g.log.error("Number_of_bricks is greater than the unused bricks on "
+ "servers")
+ return False
+ # Create volume
+ from glustolibs.gluster.volume_ops import volume_create
+ ret, out, err = volume_create(mnode=mnode, volname=volname,
+ bricks_list=bricks_list, force=force,
+ **kwargs)
+ if ret != 0:
+ g.log.error("Unable to create volume %s" % volname)
+ return False
+
+ # Start Volume
+ time.sleep(2)
+ from glustolibs.gluster.volume_ops import volume_start
+ ret = volume_start(mnode, volname)
+ if not ret:
+ g.log.error("volume start %s failed" % volname)
+ return False
+
+ # Create Tier volume
+ if ('tier' in volume_config and 'create_tier' in volume_config['tier'] and
+ volume_config['tier']['create_tier']):
+ # get servers info for tier attach
+ from glustolibs.gluster.tiering_ops import add_extra_servers_to_cluster
+ if ('extra_servers' in volume_config and
+ volume_config['extra_servers']):
+ extra_servers = volume_config['extra_servers']
+ ret = add_extra_servers_to_cluster(mnode, extra_servers)
+ if not ret:
+ return False
+ else:
+ extra_servers = volume_config['servers']
+
+ # get the tier volume type
+ if 'tier_type' in volume_config['tier']:
+ if 'type' in volume_config['tier']['tier_type']:
+ tier_volume_type = volume_config['tier']['tier_type']['type']
+ dist = rep = 1
+ if tier_volume_type == 'distributed':
+ if 'dist_count' in volume_config['tier']['tier_type']:
+ dist = (volume_config['tier']['tier_type']
+ ['dist_count'])
+
+ elif tier_volume_type == 'replicated':
+ if 'replica_count' in volume_config['tier']['tier_type']:
+ rep = (volume_config['tier']['tier_type']
+ ['replica_count'])
+
+ elif tier_volume_type == 'distributed-replicated':
+ if 'dist_count' in volume_config['tier']['tier_type']:
+ dist = (volume_config['tier']['tier_type']
+ ['dist_count'])
+ if 'replica_count' in volume_config['tier']['tier_type']:
+ rep = (volume_config['tier']['tier_type']
+ ['replica_count'])
+ else:
+ tier_volume_type = 'distributed'
+ dist = 1
+ rep = 1
+ number_of_bricks = dist * rep
+
+ # Attach Tier
+ from glustolibs.gluster.tiering_ops import tier_attach
+ ret, out, err = tier_attach(mnode=mnode, volname=volname,
+ extra_servers=extra_servers,
+ extra_servers_info=all_servers_info,
+ num_bricks_to_add=number_of_bricks,
+ replica=rep)
+ if ret != 0:
+ g.log.error("Unable to attach tier")
+ return False
+
+ time.sleep(30)
+ # Check if tier is running
+ from glustolibs.gluster.tiering_ops import is_tier_process_running
+ rc = True
+ for server in extra_servers:
+ ret = is_tier_process_running(server, volname)
+ if not ret:
+ g.log.error("Tier process not running on %s", server)
+ rc = False
+ if not rc:
+ return False
+
+ # Enable Quota
+ if ('quota' in volume_config and 'enable' in volume_config['quota'] and
+ volume_config['quota']['enable']):
+ from glustolibs.gluster.quota_ops import enable_quota
+ ret, _, _ = enable_quota(mnode=mnode, volname=volname)
+ if ret != 0:
+ g.log.error("Unable to set quota on the volume %s", volname)
+ return False
+
+ # Check if 'limit_usage' is defined
+ if ('limit_usage' in volume_config['quota']):
+ if ('path' in volume_config['quota']['limit_usage']):
+ path = volume_config['quota']['limit_usage']['path']
+ else:
+ path = "/"
+
+ if ('size' in volume_config['quota']['limit_usage']):
+ size = volume_config['quota']['limit_usage']['size']
+ else:
+ size = "100GB"
+ else:
+ path = "/"
+ size = "100GB"
+
+ # Set quota_limit_usage
+ from glustolibs.gluster.quota_ops import set_quota_limit_usage
+ ret, _, _ = set_quota_limit_usage(mnode=mnode, volname=volname,
+ path=path, limit=size)
+ if ret != 0:
+ g.log.error("Unable to set quota limit on the volume %s", volname)
+ return False
+
+ # Check if quota is enabled
+ from glustolibs.gluster.quota_ops import is_quota_enabled
+ ret = is_quota_enabled(mnode=mnode, volname=volname)
+ if not ret:
+ g.log.error("Quota not enabled on the volume %s", volname)
+ return False
+
+ # Enable USS
+ if ('uss' in volume_config and 'enable' in volume_config['uss'] and
+ volume_config['uss']['enable']):
+ from glustolibs.gluster.uss_ops import enable_uss
+ ret, out, err = enable_uss(mnode=mnode, volname=volname)
+ if ret != 0:
+ g.log.error("Unable to enable uss on the volume %s", volname)
+ return False
+
+ from glustolibs.gluster.uss_ops import is_uss_enabled
+ ret = is_uss_enabled(mnode=mnode, volname=volname)
+ if not ret:
+ g.log.error("USS is not enabled on the volume %s", volname)
+ return False
+
+ # Enable Ganesha
+## if ('nfs_ganesha' in volume_config and
+## 'enable' in volume_config['nfs_ganesha'] and
+## volume_config['nfs_ganesha']['enable']):
+## from glustolibs.gluster.ganesha import vol_set_ganesha
+## ret = vol_set_ganesha(mnode=mnode, volname=volname, option=True)
+## if not ret:
+## g.log.error("failed to set the ganesha option for %s" % volname)
+## return False
+
+ # Set all the volume options:
+ if 'options' in volume_config:
+ volume_options = volume_config['options']
+ from glustolibs.gluster.volume_ops import set_volume_options
+ ret = set_volume_options(mnode=mnode, volname=volname,
+ options=volume_options)
+ if not ret:
+ g.log.error("Unable to set few volume options")
+ return False
+ return True
+
+
+def cleanup_volume(mnode, volname):
+ """deletes snapshots in the volume, stops and deletes the gluster
+ volume if given volume exists in gluster and deletes the
+ directories in the bricks associated with the given volume
+
+ Args:
+ volname (str): volume name
+ mnode (str): Node on which cmd has to be executed.
+
+ Returns:
+ bool: True, if volume is deleted successfully
+ False, otherwise
+
+ Example:
+ cleanup_volume("abc.xyz.com", "testvol")
+ """
+ from glustolibs.gluster.snap_ops import snap_delete_by_volumename
+
+ volinfo = get_volume_info(mnode, volname)
+ if volinfo is None or volname not in volinfo:
+ g.log.info("Volume %s does not exist in %s" % (volname, mnode))
+ return True
+
+ ret, _, _ = snap_delete_by_volumename(mnode, volname)
+ if ret != 0:
+ g.log.error("Failed to delete the snapshots in "
+ "volume %s" % volname)
+ return False
+
+ from glustolibs.gluster.volume_ops import volume_stop
+ ret, _, _ = volume_stop(mnode, volname, force=True)
+ if ret != 0:
+ g.log.error("Failed to stop volume %s" % volname)
+ return False
+
+ from glustolibs.gluster.volume_ops import volume_delete
+ ret = volume_delete(mnode, volname)
+ if not ret:
+ g.log.error("Unable to cleanup the volume %s" % volname)
+ return False
+ return True
+
+
+def get_subvols(mnode, volname):
+ """Gets the subvolumes in the given volume
+
+ Args:
+ volname (str): volume name
+ mnode (str): Node on which cmd has to be executed.
+
+ Returns:
+ dict: with empty list values for all keys, if volume doesn't exist
+ dict: Dictionary of subvols, value of each key is list of lists
+ containing subvols
+ Example:
+ get_subvols("abc.xyz.com", "testvol")
+ """
+
+ subvols = {
+ 'hot_tier_subvols': [],
+ 'cold_tier_subvols': [],
+ 'volume_subvols': []
+ }
+ volinfo = get_volume_info(mnode, volname)
+ if volinfo is not None:
+ voltype = volinfo[volname]['typeStr']
+ if voltype == 'Tier':
+ # Get hot tier subvols
+ hot_tier_type = (volinfo[volname]["bricks"]
+ ['hotBricks']['hotBrickType'])
+ tmp = volinfo[volname]["bricks"]['hotBricks']["brick"]
+ hot_tier_bricks = [x["name"] for x in tmp if "name" in x]
+ if (hot_tier_type == 'Distribute'):
+ for brick in hot_tier_bricks:
+ subvols['hot_tier_subvols'].append([brick])
+
+ elif (hot_tier_type == 'Replicate' or
+ hot_tier_type == 'Distributed-Replicate'):
+ rep_count = int((volinfo[volname]["bricks"]['hotBricks']
+ ['numberOfBricks']).split("=", 1)[0].
+ split("x")[1].strip())
+ subvol_list = ([hot_tier_bricks[i:i + rep_count]
+ for i in range(0, len(hot_tier_bricks),
+ rep_count)])
+ subvols['hot_tier_subvols'] = subvol_list
+
+ # Get cold tier subvols
+ cold_tier_type = (volinfo[volname]["bricks"]['coldBricks']
+ ['coldBrickType'])
+ tmp = volinfo[volname]["bricks"]['coldBricks']["brick"]
+ cold_tier_bricks = [x["name"] for x in tmp if "name" in x]
+
+ # Distribute volume
+ if (cold_tier_type == 'Distribute'):
+ for brick in cold_tier_bricks:
+ subvols['cold_tier_subvols'].append([brick])
+
+ # Replicate or Distribute-Replicate volume
+ elif (cold_tier_type == 'Replicate' or
+ cold_tier_type == 'Distributed-Replicate'):
+ rep_count = int((volinfo[volname]["bricks"]['coldBricks']
+ ['numberOfBricks']).split("=", 1)[0].
+ split("x")[1].strip())
+ subvol_list = ([cold_tier_bricks[i:i + rep_count]
+ for i in range(0, len(cold_tier_bricks),
+ rep_count)])
+ subvols['cold_tier_subvols'] = subvol_list
+
+ # Disperse or Distribute-Disperse volume
+ elif (cold_tier_type == 'Disperse' or
+ cold_tier_type == 'Distributed-Disperse'):
+ disp_count = sum([int(nums) for nums in
+ ((volinfo[volname]["bricks"]['coldBricks']
+ ['numberOfBricks']).split("x", 1)[1].
+ strip().split("=")[0].strip().strip("()").
+ split()) if nums.isdigit()])
+ subvol_list = [cold_tier_bricks[i:i + disp_count]
+ for i in range(0, len(cold_tier_bricks),
+ disp_count)]
+ subvols['cold_tier_subvols'] = subvol_list
+ return subvols
+
+ tmp = volinfo[volname]["bricks"]["brick"]
+ bricks = [x["name"] for x in tmp if "name" in x]
+ if voltype == 'Replicate' or voltype == 'Distributed-Replicate':
+ rep_count = int(volinfo[volname]['replicaCount'])
+ subvol_list = [bricks[i:i + rep_count]for i in range(0,
+ len(bricks),
+ rep_count)]
+ subvols['volume_subvols'] = subvol_list
+ elif voltype == 'Distribute':
+ for brick in bricks:
+ subvols['volume_subvols'].append([brick])
+
+ elif (voltype == 'Disperse' or voltype == 'Distributed-Disperse'):
+ disp_count = int(volinfo[volname]['disperseCount'])
+ subvol_list = [bricks[i:i + disp_count]for i in range(0,
+ len(bricks),
+ disp_count)]
+ subvols['volume_subvols'] = subvol_list
+ return subvols
+
+
+def is_tiered_volume(mnode, volname):
+ """Check if volume is tiered volume.
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ bool : True if the volume is tiered volume. False otherwise
+ NoneType: None if volume doesnot exist.
+ """
+ volinfo = get_volume_info(mnode, volname)
+ if volinfo is None:
+ g.log.error("Unable to get the volume info for volume %s" % volname)
+ return None
+
+ voltype = volinfo[volname]['typeStr']
+ if voltype == 'Tier':
+ return True
+ else:
+ return False
+
+
+def is_distribute_volume(mnode, volname):
+ """Check if volume is a plain distributed volume
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ bool : True if the volume is distributed volume. False otherwise
+ NoneType: None if volume doesnot exist.
+ """
+ volume_type_info = get_volume_type_info(mnode, volname)
+ if volume_type_info is None:
+ g.log.error("Unable to check if the volume %s is distribute" % volname)
+ return False
+
+ if volume_type_info['is_tier']:
+ hot_tier_type = (volume_type_info['hot_tier_type_info']
+ ['hotBrickType'])
+ cold_tier_type = (volume_type_info['cold_tier_type_info']
+ ['coldBrickType'])
+ if (hot_tier_type == 'Distribute' and cold_tier_type == 'Distribute'):
+ return True
+ else:
+ return False
+ else:
+ if volume_type_info['volume_type_info']['typeStr'] == 'Distribute':
+ return True
+ else:
+ return False
+
+
+def get_volume_type_info(mnode, volname):
+ """Returns volume type information for the specified volume.
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Retunrs:
+ dict : Dict containing the keys, values defining the volume type:
+ Example:
+ volume_type_info = {
+ 'is_tier': False,
+ 'hot_tier_type_info': {},
+ 'cold_tier_type_info': {},
+ 'volume_type_info': {
+ 'typeStr': 'Disperse'
+ 'replicaCount': 1
+ 'stripeCount': 1
+ 'disperseCount': '3'
+ 'redundancyCount': '1'
+ }
+ }
+
+ volume_type_info = {
+ 'is_tier': True,
+ 'hot_tier_type_info': {
+ 'hotBrickType': 'Distribute',
+ 'hotreplicaCount': 1
+ },
+ 'cold_tier_type_info': {
+ 'coldBrickType': 'Disperse',
+ 'coldreplicaCount': 1,
+ 'colddisperseCount':3,
+ 'numberOfBricks':1
+ },
+ 'volume_type_info': {}
+
+
+ NoneType: None if volume does not exist or any other key errors.
+ """
+ volinfo = get_volume_info(mnode, volname)
+ if volinfo is None:
+ g.log.error("Unable to get the volume info for volume %s" % volname)
+ return None
+
+ volume_type_info = {
+ 'is_tier': False,
+ 'hot_tier_type_info': {},
+ 'cold_tier_type_info': {},
+ 'volume_type_info': {}
+ }
+
+ voltype = volinfo[volname]['typeStr']
+ if voltype == 'Tier':
+ volume_type_info['is_tier'] = True
+
+ hot_tier_type_info = get_hot_tier_type_info(mnode, volname)
+ volume_type_info['hot_tier_type_info'] = hot_tier_type_info
+
+ cold_tier_type_info = get_cold_tier_type_info(mnode, volname)
+ volume_type_info['cold_tier_type_info'] = cold_tier_type_info
+
+ else:
+ non_tiered_volume_type_info = {
+ 'typeStr': '',
+ 'replicaCount': '',
+ 'stripeCount': '',
+ 'disperseCount': '',
+ 'redundancyCount': ''
+ }
+ for key in non_tiered_volume_type_info.keys():
+ if key in volinfo[volname]:
+ non_tiered_volume_type_info[key] = volinfo[volname][key]
+ else:
+ g.log.error("Unable to find key '%s' in the volume info for the "
+ "volume %s" % (key, volname))
+ non_tiered_volume_type_info[key] = None
+ volume_type_info['volume_type_info'] = non_tiered_volume_type_info
+
+ return volume_type_info
+
+
+def get_cold_tier_type_info(mnode, volname):
+ """Returns cold tier type information for the specified volume.
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Retunrs:
+ dict : Dict containing the keys, values defining the cold tier type:
+ Example:
+ cold_tier_type_info = {
+ 'coldBrickType': 'Disperse',
+ 'coldreplicaCount': '1',
+ 'colddisperseCount': '3',
+ 'numberOfBricks': '3'
+ }
+ NoneType: None if volume does not exist or is not a tiered volume or
+ any other key errors.
+ """
+ volinfo = get_volume_info(mnode, volname)
+ if volinfo is None:
+ g.log.error("Unable to get the volume info for volume %s" % volname)
+ return None
+
+ if not is_tiered_volume(mnode, volname):
+ g.log.error("Volume %s is not a tiered volume" % volname)
+ return None
+
+ cold_tier_type_info = {
+ 'coldBrickType': '',
+ 'coldreplicaCount': '',
+ 'colddisperseCount': '',
+ 'numberOfBricks': ''
+ }
+ for key in cold_tier_type_info.keys():
+ if key in volinfo[volname]['bricks']['coldBricks']:
+ cold_tier_type_info[key] = (volinfo[volname]['bricks']
+ ['coldBricks'][key])
+ else:
+ g.log.error("Unable to find key '%s' in the volume info for the "
+ "volume %s" % (key, volname))
+ return None
+
+ if 'Disperse' in cold_tier_type_info['coldBrickType']:
+ redundancy_count = (cold_tier_type_info['numberOfBricks'].
+ split("x", 1)[1].strip().
+ split("=")[0].strip().strip("()").split()[2])
+ cold_tier_type_info['coldredundancyCount'] = redundancy_count
+
+ return cold_tier_type_info
+
+
+def get_hot_tier_type_info(mnode, volname):
+ """Returns hot tier type information for the specified volume.
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Retunrs:
+ dict : Dict containing the keys, values defining the hot tier type:
+ Example:
+ hot_tier_type_info = {
+ 'hotBrickType': 'Distribute',
+ 'hotreplicaCount': '1'
+ }
+ NoneType: None if volume does not exist or is not a tiered volume or
+ any other key errors.
+ """
+ volinfo = get_volume_info(mnode, volname)
+ if volinfo is None:
+ g.log.error("Unable to get the volume info for volume %s" % volname)
+ return None
+
+ if not is_tiered_volume(mnode, volname):
+ g.log.error("Volume %s is not a tiered volume" % volname)
+ return None
+
+ hot_tier_type_info = {
+ 'hotBrickType': '',
+ 'hotreplicaCount': ''
+ }
+ for key in hot_tier_type_info.keys():
+ if key in volinfo[volname]['bricks']['hotBricks']:
+ hot_tier_type_info[key] = (volinfo[volname]['bricks']['hotBricks']
+ [key])
+ else:
+ g.log.error("Unable to find key '%s' in the volume info for the "
+ "volume %s" % (key, volname))
+ return None
+
+ return hot_tier_type_info
+
+
+def get_num_of_bricks_per_subvol(mnode, volname):
+ """Returns number of bricks per subvol
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ dict : Dict containing the keys, values defining
+ number of bricks per subvol
+ Example:
+ num_of_bricks_per_subvol = {
+ 'is_tier': False,
+ 'hot_tier_num_of_bricks_per_subvol': None,
+ 'cold_tier_num_of_bricks_per_subvol': None,
+ 'volume_num_of_bricks_per_subvol': 2
+ }
+
+ num_of_bricks_per_subvol = {
+ 'is_tier': True,
+ 'hot_tier_num_of_bricks_per_subvol': 3,
+ 'cold_tier_num_of_bricks_per_subvol': 2,
+ 'volume_num_of_bricks_per_subvol': None
+ }
+
+ NoneType: None if volume doesnot exist or is a tiered volume.
+ """
+ bricks_per_subvol_dict = {
+ 'is_tier': False,
+ 'hot_tier_num_of_bricks_per_subvol': None,
+ 'cold_tier_num_of_bricks_per_subvol': None,
+ 'volume_num_of_bricks_per_subvol': None
+ }
+
+ subvols_dict = get_subvols(mnode, volname)
+ if subvols_dict['volume_subvols']:
+ bricks_per_subvol_dict['volume_num_of_bricks_per_subvol'] = (
+ len(subvols_dict['volume_subvols'][0]))
+ else:
+ if (subvols_dict['hot_tier_subvols'] and
+ subvols_dict['cold_tier_subvols']):
+ bricks_per_subvol_dict['is_tier'] = True
+ bricks_per_subvol_dict['hot_tier_num_of_bricks_per_subvol'] = (
+ len(subvols_dict['hot_tier_subvols'][0]))
+ bricks_per_subvol_dict['cold_tier_num_of_bricks_per_subvol'] = (
+ len(subvols_dict['cold_tier_subvols'][0]))
+
+ return bricks_per_subvol_dict
+
+
+def get_cold_tier_num_of_bricks_per_subvol(mnode, volname):
+ """Returns number of bricks per subvol in cold tier
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ int : Number of bricks per subvol on cold tier.
+ NoneType: None if volume doesnot exist or not a tiered volume.
+ """
+ if not is_tiered_volume(mnode, volname):
+ g.log.error("Volume %s is not a tiered volume" % volname)
+ return None
+ subvols_dict = get_subvols(mnode, volname)
+ if subvols_dict['cold_tier_subvols']:
+ return len(subvols_dict['cold_tier_subvols'][0])
+ else:
+ return None
+
+
+def get_hot_tier_num_of_bricks_per_subvol(mnode, volname):
+ """Returns number of bricks per subvol in hot tier
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ int : Number of bricks per subvol on hot tier.
+ NoneType: None if volume doesnot exist or not a tiered volume.
+ """
+ if not is_tiered_volume(mnode, volname):
+ g.log.error("Volume %s is not a tiered volume" % volname)
+ return None
+ subvols_dict = get_subvols(mnode, volname)
+ if subvols_dict['hot_tier_subvols']:
+ return len(subvols_dict['hot_tier_subvols'][0])
+ else:
+ return None
+
+
+def get_replica_count(mnode, volname):
+ """Get the replica count of the volume
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ dict : Dict contain keys, values defining Replica count of the volume.
+ Example:
+ replica_count_info = {
+ 'is_tier': False,
+ 'hot_tier_replica_count': None,
+ 'cold_tier_replica_count': None,
+ 'volume_replica_count': 3
+ }
+ replica_count_info = {
+ 'is_tier': True,
+ 'hot_tier_replica_count': 2,
+ 'cold_tier_replica_count': 3,
+ 'volume_replica_count': None
+ }
+ NoneType: None if it is parse failure.
+ """
+ vol_type_info = get_volume_type_info(mnode, volname)
+ if vol_type_info is None:
+ g.log.error("Unable to get the replica count info for the volume %s" %
+ volname)
+ return None
+
+ replica_count_info = {
+ 'is_tier': False,
+ 'hot_tier_replica_count': None,
+ 'cold_tier_replica_count': None,
+ 'volume_replica_count': None
+ }
+
+ replica_count_info['is_tier'] = vol_type_info['is_tier']
+ if replica_count_info['is_tier']:
+ replica_count_info['hot_tier_replica_count'] = (
+ vol_type_info['hot_tier_type_info']['hotreplicaCount'])
+ replica_count_info['cold_tier_replica_count'] = (
+ vol_type_info['cold_tier_type_info']['coldreplicaCount'])
+
+ else:
+ replica_count_info['volume_replica_count'] = (
+ vol_type_info['volume_type_info']['replicaCount'])
+
+ return replica_count_info
+
+
+def get_cold_tier_replica_count(mnode, volname):
+ """Get the replica count of cold tier.
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ int : Replica count of the cold tier.
+ NoneType: None if volume does not exist or not a tiered volume.
+ """
+ is_tier = is_tiered_volume(mnode, volname)
+ if not is_tier:
+ return None
+ else:
+ volinfo = get_volume_info(mnode, volname)
+ cold_tier_replica_count = (volinfo[volname]["bricks"]['coldBricks']
+ ['coldreplicaCount'])
+ return cold_tier_replica_count
+
+
+def get_hot_tier_replica_count(mnode, volname):
+ """Get the replica count of hot tier.
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ int : Replica count of the hot tier.
+ NoneType: None if volume does not exist or not a tiered volume.
+ """
+ is_tier = is_tiered_volume(mnode, volname)
+ if not is_tier:
+ return None
+ else:
+ volinfo = get_volume_info(mnode, volname)
+ hot_tier_replica_count = (volinfo[volname]["bricks"]['hotBricks']
+ ['hotreplicaCount'])
+ return hot_tier_replica_count
+
+
+def get_disperse_count(mnode, volname):
+ """Get the disperse count of the volume
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ dict : Dict contain keys, values defining Disperse count of the volume.
+ Example:
+ disperse_count_info = {
+ 'is_tier': False,
+ 'cold_tier_disperse_count': None,
+ 'volume_disperse_count': 3
+ }
+ disperse_count_info = {
+ 'is_tier': True,
+ 'cold_tier_disperse_count': 3,
+ 'volume_disperse_count': None
+ }
+ None: If it is non dispersed volume.
+ """
+ vol_type_info = get_volume_type_info(mnode, volname)
+ if vol_type_info is None:
+ g.log.error("Unable to get the disperse count info for the volume %s" %
+ volname)
+ return None
+
+ disperse_count_info = {
+ 'is_tier': False,
+ 'cold_tier_disperse_count': None,
+ 'volume_disperse_count': None
+ }
+
+ disperse_count_info['is_tier'] = vol_type_info['is_tier']
+ if disperse_count_info['is_tier']:
+ disperse_count_info['cold_tier_disperse_count'] = (
+ vol_type_info['cold_tier_type_info']['colddisperseCount'])
+
+ else:
+ disperse_count_info['volume_disperse_count'] = (
+ vol_type_info['volume_type_info']['disperseCount'])
+
+ return disperse_count_info
+
+
+def get_cold_tier_disperse_count(mnode, volname):
+ """Get the disperse count of cold tier.
+
+ Args:
+ mnode (str): Node on which commands are executed.
+ volname (str): Name of the volume.
+
+ Returns:
+ int : disperse count of the cold tier.
+ NoneType: None if volume does not exist or not a tiered volume.
+ """
+ is_tier = is_tiered_volume(mnode, volname)
+ if not is_tier:
+ return None
+ else:
+ volinfo = get_volume_info(mnode, volname)
+ cold_tier_disperse_count = (volinfo[volname]["bricks"]['coldBricks']
+ ['colddisperseCount'])
+ return cold_tier_disperse_count
diff --git a/glustolibs-gluster/glustolibs/gluster/windows_libs.py b/glustolibs-gluster/glustolibs/gluster/windows_libs.py
new file mode 100644
index 000000000..4f6cd9a65
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/windows_libs.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python
+# Copyright (C) 2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+"""
+ Description: Module for windows utility functions
+"""
+from glusto.core import Glusto as g
+
+
+def powershell(command):
+ """wrap a command in powershell call
+
+ Args:
+ command (str): the command to wrap with powershell syntax
+
+ Returns:
+ string with complete powershell command
+ """
+ ps_command = ("powershell -InputFormat Text -OutputFormat Text "
+ "-Command '& {%s}'" % command)
+
+ return ps_command
+
+
+def delete_all_windows_mounts(clients_info):
+ """Deletes all the mounts on the windows clients.
+
+ Args:
+ clients_info (list): List of windows clients info.
+
+ If any item in the clients info doesn't have the 'platform', it is
+ assumed that it is not windows client and would be ignored.
+
+ If any item in the clients info doesn't have the 'super_user' key,
+ by default we assume the 'super_user' for windows client to be 'Admin'.
+
+ For all the windows clients, the 'platform' key should be specified
+ with value 'windows'.
+
+ Example:
+ clients_info = {
+ 'def.lab.eng.xyz.com': {
+ 'host': 'def.lab.eng.xyz.com',
+ 'super_user': 'Admin',
+ 'platform': 'windows'
+ },
+
+ 'ghi.lab.eng.blr.redhat.com': {
+ 'host': 'ghi.lab.eng.xyz.com',
+ }
+ }
+
+ Returns:
+ bool : True if deleting all the mounts on all clients is successful.
+ False otherwise.
+ """
+ rc = True
+ cmd = powershell("net use * /D /Y")
+ windows_clients_info = {}
+ for client in clients_info:
+ if ('platform' in clients_info[client] and
+ clients_info[client]['platform'] == 'windows'):
+ windows_clients_info[client] = clients_info[client]
+
+ for client in windows_clients_info:
+ if 'host' in windows_clients_info[client]:
+ host = windows_clients_info[client]['host']
+ else:
+ host = client
+ if 'super_user' in windows_clients_info[client]:
+ user = windows_clients_info[client]['super_user']
+ else:
+ user = 'Admin'
+ ret, out, err = g.run(host, cmd, user)
+ if ret != 0:
+ rc = False
+
+ elif ret == 0:
+ if not (('deleted successfully' in out) or
+ ('command completed successfully' in out) or
+ ('There are no entries in the list' in out)):
+ rc = False
+ return rc
+
+
+def list_all_windows_mounts(clients_info):
+ """Lists all the mounts on the windows clients.
+
+ Args:
+ clients_info (list): List of windows clients info.
+
+ If any item in the clients info doesn't have the 'platform', it is
+ assumed that it is not windows client and would be ignored.
+
+ If any item in the clients info doesn't have the 'super_user' key,
+ by default we assume the 'super_user' for windows client to be 'Admin'.
+
+ For all the windows clients, the 'platform' key should be specified
+ with value 'windows'.
+
+ Example:
+ clients_info = {
+ 'def.lab.eng.xyz.com': {
+ 'host': 'def.lab.eng.xyz.com',
+ 'super_user': 'Admin',
+ 'platform': 'windows'
+ },
+
+ 'ghi.lab.eng.blr.redhat.com': {
+ 'host': 'ghi.lab.eng.xyz.com',
+ }
+ }
+
+ Returns:
+ bool : True if listing all the mounts on all clients is successful.
+ False otherwise.
+ """
+ rc = True
+ cmd = powershell("net use")
+ windows_clients_info = {}
+ for client in clients_info:
+ if ('platform' in clients_info[client] and
+ clients_info[client]['platform'] == 'windows'):
+ windows_clients_info[client] = clients_info[client]
+ for client in windows_clients_info:
+ if 'host' in windows_clients_info[client]:
+ host = windows_clients_info[client]['host']
+ else:
+ host = client
+ if 'super_user' in windows_clients_info[client]:
+ user = windows_clients_info[client]['super_user']
+ else:
+ user = 'Admin'
+ ret, out, err = g.run(host, cmd, user)
+ if ret != 0:
+ rc = False
+ return rc