summaryrefslogtreecommitdiffstats
path: root/glustolibs-gluster/glustolibs
diff options
context:
space:
mode:
Diffstat (limited to 'glustolibs-gluster/glustolibs')
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brick_libs.py350
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brickdir.py58
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brickmux_libs.py5
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/brickmux_ops.py12
-rw-r--r--glustolibs-gluster/glustolibs/gluster/ctdb_libs.py142
-rw-r--r--glustolibs-gluster/glustolibs/gluster/ctdb_ops.py478
-rw-r--r--glustolibs-gluster/glustolibs/gluster/dht_test_utils.py46
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/geo_rep_ops.py2
-rwxr-xr-x[-rw-r--r--]glustolibs-gluster/glustolibs/gluster/gluster_base_class.py350
-rw-r--r--glustolibs-gluster/glustolibs/gluster/gluster_init.py57
-rw-r--r--glustolibs-gluster/glustolibs/gluster/glusterdir.py17
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/glusterfile.py203
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/heal_libs.py92
-rw-r--r--glustolibs-gluster/glustolibs/gluster/layout.py31
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/lib_utils.py70
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/mount_ops.py4
-rwxr-xr-x[-rw-r--r--]glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py256
-rwxr-xr-x[-rw-r--r--]glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py177
-rw-r--r--glustolibs-gluster/glustolibs/gluster/rebalance_ops.py75
-rw-r--r--glustolibs-gluster/glustolibs/gluster/snap_ops.py28
-rw-r--r--glustolibs-gluster/glustolibs/gluster/ssl_ops.py226
-rw-r--r--glustolibs-gluster/glustolibs/gluster/tiering_ops.py1023
-rw-r--r--glustolibs-gluster/glustolibs/gluster/volume_libs.py954
-rw-r--r--glustolibs-gluster/glustolibs/gluster/volume_ops.py239
24 files changed, 2157 insertions, 2738 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/brick_libs.py b/glustolibs-gluster/glustolibs/gluster/brick_libs.py
index c3e5afed8..b92832dd1 100644
--- a/glustolibs-gluster/glustolibs/gluster/brick_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/brick_libs.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -17,20 +17,20 @@
""" Description: Module for gluster brick related helper functions. """
import random
-from math import ceil
+from math import floor
import time
from glusto.core import Glusto as g
from glustolibs.gluster.brickmux_ops import is_brick_mux_enabled
+from glustolibs.gluster.gluster_init import restart_glusterd
from glustolibs.gluster.volume_ops import (get_volume_info, get_volume_status)
-from glustolibs.gluster.volume_libs import (get_subvols, is_tiered_volume,
+from glustolibs.gluster.volume_libs import (get_subvols,
get_client_quorum_info,
get_volume_type_info)
+from glustolibs.gluster.lib_utils import (get_extended_attributes_info)
def get_all_bricks(mnode, volname):
"""Get list of all the bricks of the specified volume.
- If the volume is 'Tier' volume, the list will contain both
- 'hot tier' and 'cold tier' bricks.
Args:
mnode (str): Node on which command has to be executed
@@ -45,19 +45,7 @@ def get_all_bricks(mnode, volname):
g.log.error("Unable to get the volinfo of %s.", volname)
return None
- if 'Tier' in volinfo[volname]['typeStr']:
- # Get bricks from hot-tier in case of Tier volume
- hot_tier_bricks = get_hot_tier_bricks(mnode, volname)
- if hot_tier_bricks is None:
- return None
- # Get cold-tier bricks in case of Tier volume
- cold_tier_bricks = get_cold_tier_bricks(mnode, volname)
- if cold_tier_bricks is None:
- return None
-
- return hot_tier_bricks + cold_tier_bricks
-
- # Get bricks from a non Tier volume
+ # Get bricks from a volume
all_bricks = []
if 'bricks' in volinfo[volname]:
if 'brick' in volinfo[volname]['bricks']:
@@ -76,88 +64,6 @@ def get_all_bricks(mnode, volname):
return None
-def get_hot_tier_bricks(mnode, volname):
- """Get list of hot-tier bricks of the specified volume
-
- Args:
- mnode (str): Node on which command has to be executed
- volname (str): Name of the volume
-
- Returns:
- list : List of hot-tier bricks of the volume on Success.
- NoneType: None on failure.
- """
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volinfo of %s.", volname)
- return None
-
- if 'Tier' not in volinfo[volname]['typeStr']:
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
-
- hot_tier_bricks = []
- if 'bricks' in volinfo[volname]:
- if 'hotBricks' in volinfo[volname]['bricks']:
- if 'brick' in volinfo[volname]['bricks']['hotBricks']:
- for brick in volinfo[volname]['bricks']['hotBricks']['brick']:
- if 'name' in brick:
- hot_tier_bricks.append(brick['name'])
- else:
- g.log.error("brick %s doesn't have the key 'name' "
- "for the volume: %s", brick, volname)
- return None
- else:
- g.log.error("Bricks not found in hotBricks section of volume "
- "info for the volume %s", volname)
- return None
- return hot_tier_bricks
- else:
- g.log.error("Bricks not found for the volume %s", volname)
- return None
-
-
-def get_cold_tier_bricks(mnode, volname):
- """Get list of cold-tier bricks of the specified volume
-
- Args:
- mnode (str): Node on which command has to be executed
- volname (str): Name of the volume
-
- Returns:
- list : List of cold-tier bricks of the volume on Success.
- NoneType: None on failure.
- """
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volinfo of %s.", volname)
- return None
-
- if 'Tier' not in volinfo[volname]['typeStr']:
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
-
- cold_tier_bricks = []
- if 'bricks' in volinfo[volname]:
- if 'coldBricks' in volinfo[volname]['bricks']:
- if 'brick' in volinfo[volname]['bricks']['coldBricks']:
- for brick in volinfo[volname]['bricks']['coldBricks']['brick']:
- if 'name' in brick:
- cold_tier_bricks.append(brick['name'])
- else:
- g.log.error("brick %s doesn't have the key 'name' "
- "for the volume: %s", brick, volname)
- return None
- else:
- g.log.error("Bricks not found in coldBricks section of volume "
- "info for the volume %s", volname)
- return None
- return cold_tier_bricks
- else:
- g.log.error("Bricks not found for the volume %s", volname)
- return None
-
-
def bring_bricks_offline(volname, bricks_list,
bring_bricks_offline_methods=None):
"""Bring the bricks specified in the bricks_list offline.
@@ -304,10 +210,9 @@ def bring_bricks_online(mnode, volname, bricks_list,
"the bricks '%s' online", volname, bricks_list)
elif bring_brick_online_method == 'glusterd_restart':
- bring_brick_online_command = "service glusterd restart"
brick_node, _ = brick.split(":")
- ret, _, _ = g.run(brick_node, bring_brick_online_command)
- if ret != 0:
+ ret = restart_glusterd(brick_node)
+ if not ret:
g.log.error("Unable to restart glusterd on node %s",
brick_node)
_rc = False
@@ -504,41 +409,29 @@ def select_bricks_to_bring_offline(mnode, volname):
being empty list.
Example:
brick_to_bring_offline = {
- 'is_tier': False,
- 'hot_tier_bricks': [],
- 'cold_tier_bricks': [],
'volume_bricks': []
}
"""
# Defaulting the values to empty list
bricks_to_bring_offline = {
- 'is_tier': False,
- 'hot_tier_bricks': [],
- 'cold_tier_bricks': [],
'volume_bricks': []
- }
+ }
volinfo = get_volume_info(mnode, volname)
if volinfo is None:
g.log.error("Unable to get the volume info for volume %s", volname)
return bricks_to_bring_offline
- if is_tiered_volume(mnode, volname):
- bricks_to_bring_offline['is_tier'] = True
- # Select bricks from tiered volume.
- bricks_to_bring_offline = (
- select_tier_volume_bricks_to_bring_offline(mnode, volname))
- else:
- # Select bricks from non-tiered volume.
- volume_bricks = select_volume_bricks_to_bring_offline(mnode, volname)
- bricks_to_bring_offline['volume_bricks'] = volume_bricks
+ # Select bricks from the volume.
+ volume_bricks = select_volume_bricks_to_bring_offline(mnode, volname)
+ bricks_to_bring_offline['volume_bricks'] = volume_bricks
return bricks_to_bring_offline
def select_volume_bricks_to_bring_offline(mnode, volname):
"""Randomly selects bricks to bring offline without affecting the cluster
- from a non-tiered volume.
+ from a volume.
Args:
mnode (str): Node on which commands will be executed.
@@ -546,14 +439,10 @@ def select_volume_bricks_to_bring_offline(mnode, volname):
Returns:
list: On success returns list of bricks that can be brough offline.
- If volume doesn't exist or is a tiered volume returns empty list
+ If volume doesn't exist returns empty list
"""
volume_bricks_to_bring_offline = []
- # Check if volume is tiered
- if is_tiered_volume(mnode, volname):
- return volume_bricks_to_bring_offline
-
# get volume type
volume_type_info = get_volume_type_info(mnode, volname)
volume_type = volume_type_info['volume_type_info']['typeStr']
@@ -598,162 +487,6 @@ def select_volume_bricks_to_bring_offline(mnode, volname):
return volume_bricks_to_bring_offline
-def select_tier_volume_bricks_to_bring_offline(mnode, volname):
- """Randomly selects bricks to bring offline without affecting the cluster
- from a tiered volume.
-
- Args:
- mnode (str): Node on which commands will be executed.
- volname (str): Name of the volume.
-
- Returns:
- dict: On success returns dict. Value of each key is list of bricks to
- bring offline.
- If volume doesn't exist or is not a tiered volume returns dict
- with value of each item being empty list.
- Example:
- brick_to_bring_offline = {
- 'hot_tier_bricks': [],
- 'cold_tier_bricks': [],
- }
- """
- # Defaulting the values to empty list
- bricks_to_bring_offline = {
- 'hot_tier_bricks': [],
- 'cold_tier_bricks': [],
- }
-
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s", volname)
- return bricks_to_bring_offline
-
- if is_tiered_volume(mnode, volname):
- # Select bricks from both hot tier and cold tier.
- hot_tier_bricks = (select_hot_tier_bricks_to_bring_offline
- (mnode, volname))
- cold_tier_bricks = (select_cold_tier_bricks_to_bring_offline
- (mnode, volname))
- bricks_to_bring_offline['hot_tier_bricks'] = hot_tier_bricks
- bricks_to_bring_offline['cold_tier_bricks'] = cold_tier_bricks
- return bricks_to_bring_offline
-
-
-def select_hot_tier_bricks_to_bring_offline(mnode, volname):
- """Randomly selects bricks to bring offline without affecting the cluster
- from a hot tier.
-
- Args:
- mnode (str): Node on which commands will be executed.
- volname (str): Name of the volume.
-
- Returns:
- list: On success returns list of bricks that can be brough offline
- from hot tier. If volume doesn't exist or is a non tiered volume
- returns empty list.
- """
- hot_tier_bricks_to_bring_offline = []
-
- # Check if volume is tiered
- if not is_tiered_volume(mnode, volname):
- return hot_tier_bricks_to_bring_offline
-
- # get volume type
- volume_type_info = get_volume_type_info(mnode, volname)
- hot_tier_type = volume_type_info['hot_tier_type_info']['hotBrickType']
-
- # get subvols
- subvols_dict = get_subvols(mnode, volname)
- hot_tier_subvols = subvols_dict['hot_tier_subvols']
-
- # select bricks from distribute volume
- if hot_tier_type == 'Distribute':
- hot_tier_bricks_to_bring_offline = []
-
- # select bricks from replicated, distributed-replicated volume
- if (hot_tier_type == 'Replicate' or
- hot_tier_type == 'Distributed-Replicate'):
- # Get replica count
- hot_tier_replica_count = (volume_type_info
- ['hot_tier_type_info']['hotreplicaCount'])
-
- # Get quorum info
- quorum_info = get_client_quorum_info(mnode, volname)
- hot_tier_quorum_info = quorum_info['hot_tier_quorum_info']
-
- # Get list of bricks to bring offline
- hot_tier_bricks_to_bring_offline = (
- get_bricks_to_bring_offline_from_replicated_volume(
- hot_tier_subvols, hot_tier_replica_count,
- hot_tier_quorum_info))
-
- return hot_tier_bricks_to_bring_offline
-
-
-def select_cold_tier_bricks_to_bring_offline(mnode, volname):
- """Randomly selects bricks to bring offline without affecting the cluster
- from a cold tier.
-
- Args:
- mnode (str): Node on which commands will be executed.
- volname (str): Name of the volume.
-
- Returns:
- list: On success returns list of bricks that can be brough offline
- from cold tier. If volume doesn't exist or is a non tiered volume
- returns empty list.
- """
- cold_tier_bricks_to_bring_offline = []
-
- # Check if volume is tiered
- if not is_tiered_volume(mnode, volname):
- return cold_tier_bricks_to_bring_offline
-
- # get volume type
- volume_type_info = get_volume_type_info(mnode, volname)
- cold_tier_type = volume_type_info['cold_tier_type_info']['coldBrickType']
-
- # get subvols
- subvols_dict = get_subvols(mnode, volname)
- cold_tier_subvols = subvols_dict['cold_tier_subvols']
-
- # select bricks from distribute volume
- if cold_tier_type == 'Distribute':
- cold_tier_bricks_to_bring_offline = []
-
- # select bricks from replicated, distributed-replicated volume
- elif (cold_tier_type == 'Replicate' or
- cold_tier_type == 'Distributed-Replicate'):
- # Get replica count
- cold_tier_replica_count = (volume_type_info['cold_tier_type_info']
- ['coldreplicaCount'])
-
- # Get quorum info
- quorum_info = get_client_quorum_info(mnode, volname)
- cold_tier_quorum_info = quorum_info['cold_tier_quorum_info']
-
- # Get list of bricks to bring offline
- cold_tier_bricks_to_bring_offline = (
- get_bricks_to_bring_offline_from_replicated_volume(
- cold_tier_subvols, cold_tier_replica_count,
- cold_tier_quorum_info))
-
- # select bricks from Disperse, Distribured-Disperse volume
- elif (cold_tier_type == 'Disperse' or
- cold_tier_type == 'Distributed-Disperse'):
-
- # Get redundancy count
- cold_tier_redundancy_count = (volume_type_info['cold_tier_type_info']
- ['coldredundancyCount'])
-
- # Get list of bricks to bring offline
- cold_tier_bricks_to_bring_offline = (
- get_bricks_to_bring_offline_from_disperse_volume(
- cold_tier_subvols, cold_tier_redundancy_count))
-
- return cold_tier_bricks_to_bring_offline
-
-
def get_bricks_to_bring_offline_from_replicated_volume(subvols_list,
replica_count,
quorum_info):
@@ -761,13 +494,10 @@ def get_bricks_to_bring_offline_from_replicated_volume(subvols_list,
for a replicated volume.
Args:
- subvols_list: list of subvols. It can be volume_subvols,
- hot_tier_subvols or cold_tier_subvols.
+ subvols_list: list of subvols.
For example:
subvols = volume_libs.get_subvols(mnode, volname)
volume_subvols = subvols_dict['volume_subvols']
- hot_tier_subvols = subvols_dict['hot_tier_subvols']
- cold_tier_subvols = subvols_dict['cold_tier_subvols']
replica_count: Replica count of a Replicate or Distributed-Replicate
volume.
quorum_info: dict containing quorum info of the volume. The dict should
@@ -776,8 +506,6 @@ def get_bricks_to_bring_offline_from_replicated_volume(subvols_list,
For example:
quorum_dict = get_client_quorum_info(mnode, volname)
volume_quorum_info = quorum_info['volume_quorum_info']
- hot_tier_quorum_info = quorum_info['hot_tier_quorum_info']
- cold_tier_quorum_info = quorum_info['cold_tier_quorum_info']
Returns:
list: List of bricks that can be brought offline without affecting the
@@ -805,7 +533,7 @@ def get_bricks_to_bring_offline_from_replicated_volume(subvols_list,
offline_bricks_limit = int(replica_count) - int(quorum_count)
elif 'auto' in quorum_type:
- offline_bricks_limit = ceil(int(replica_count) / 2)
+ offline_bricks_limit = floor(int(replica_count) // 2)
elif quorum_type is None:
offline_bricks_limit = int(replica_count) - 1
@@ -835,18 +563,15 @@ def get_bricks_to_bring_offline_from_disperse_volume(subvols_list,
for a disperse volume.
Args:
- subvols_list: list of subvols. It can be volume_subvols,
- hot_tier_subvols or cold_tier_subvols.
+ subvols_list: list of subvols.
For example:
subvols = volume_libs.get_subvols(mnode, volname)
volume_subvols = subvols_dict['volume_subvols']
- hot_tier_subvols = subvols_dict['hot_tier_subvols']
- cold_tier_subvols = subvols_dict['cold_tier_subvols']
redundancy_count: Redundancy count of a Disperse or
Distributed-Disperse volume.
Returns:
- list: List of bricks that can be brought offline without affecting the
+ list: List of bricks that can be brought offline without affecting the
cluster.On any failure return empty list.
"""
list_of_bricks_to_bring_offline = []
@@ -927,3 +652,42 @@ def is_broken_symlinks_present_on_bricks(mnode, volname):
"%s on node %s.", (brick_path, brick_node))
return True
return False
+
+
+def validate_xattr_on_all_bricks(bricks_list, file_path, xattr):
+ """Checks if the xattr of the file/dir is same on all bricks.
+
+ Args:
+ bricks_list (list): List of bricks.
+ file_path (str): The path to the file/dir.
+ xattr (str): The file attribute to get from file.
+
+ Returns:
+ True if the xattr is same on all the fqpath. False otherwise
+
+ Example:
+ validate_xattr_on_all_bricks("bricks_list",
+ "dir1/file1",
+ "xattr")
+ """
+
+ time_counter = 250
+ g.log.info("The heal monitoring timeout is : %d minutes",
+ (time_counter // 60))
+ while time_counter > 0:
+ attr_vals = {}
+ for brick in bricks_list:
+ brick_node, brick_path = brick.split(":")
+ attr_vals[brick] = (
+ get_extended_attributes_info(brick_node,
+ ["{0}/{1}".format(brick_path,
+ file_path)],
+ attr_name=xattr))
+ ec_version_vals = [list(val.values())[0][xattr] for val in
+ list(attr_vals.values())]
+ if len(set(ec_version_vals)) == 1:
+ return True
+ else:
+ time.sleep(120)
+ time_counter -= 120
+ return False
diff --git a/glustolibs-gluster/glustolibs/gluster/brickdir.py b/glustolibs-gluster/glustolibs/gluster/brickdir.py
index ffc868b93..e864e8247 100644
--- a/glustolibs-gluster/glustolibs/gluster/brickdir.py
+++ b/glustolibs-gluster/glustolibs/gluster/brickdir.py
@@ -20,7 +20,6 @@
import os
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_init import get_gluster_version
from glustolibs.gluster.volume_libs import get_volume_type
@@ -81,11 +80,12 @@ def get_hashrange(brickdir_path):
"""
(host, _) = brickdir_path.split(':')
- gluster_version = get_gluster_version(host)
- # Check for the Gluster version and then volume type
- """If the GLuster version is lower than 6.0, the hash range
- can be calculated for all volume types"""
- if gluster_version < 6.0:
+ ret = get_volume_type(brickdir_path)
+ if ret in ('Replicate', 'Disperse', 'Arbiter'):
+ g.log.info("Cannot find hash-range for Replicate/Disperse/Arbiter"
+ " volume type on Gluster 6.0 and higher.")
+ return "Skipping for Replicate/Disperse/Arbiter volume type"
+ else:
ret = check_hashrange(brickdir_path)
hash_range_low = ret[0]
hash_range_high = ret[1]
@@ -94,24 +94,6 @@ def get_hashrange(brickdir_path):
else:
g.log.error("Could not get hashrange")
return None
- elif gluster_version >= 6.0:
- ret = get_volume_type(brickdir_path)
- if ret in ('Replicate', 'Disperse', 'Arbiter'):
- g.log.info("Cannot find hash-range for Replicate/Disperse/Arbiter"
- " volume type on Gluster 6.0 and higher.")
- return "Skipping for Replicate/Disperse/Arbiter volume type"
- else:
- ret = check_hashrange(brickdir_path)
- hash_range_low = ret[0]
- hash_range_high = ret[1]
- if ret is not None:
- return (hash_range_low, hash_range_high)
- else:
- g.log.error("Could not get hashrange")
- return None
- else:
- g.log.info("Failed to get hash range")
- return None
def file_exists(host, filename):
@@ -149,22 +131,14 @@ class BrickDir(object):
def _get_hashrange(self):
"""get the hash range for a brick from a remote system"""
- gluster_version = get_gluster_version(self._host)
- if gluster_version < 6.0:
+ ret = get_volume_type(self._path)
+ if ret in ('Replicate', 'Disperse', 'Arbiter'):
+ g.log.info("Cannot find hash-range as the volume type under"
+ " test is Replicate/Disperse/Arbiter")
+ else:
self._hashrange = get_hashrange(self._path)
self._hashrange_low = self._hashrange[0]
self._hashrange_high = self._hashrange[1]
- elif gluster_version >= 6.0:
- ret = get_volume_type(self._path)
- if ret in ('Replicate', 'Disperse', 'Arbiter'):
- g.log.info("Cannot find hash-range as the volume type under"
- " test is Replicate/Disperse/Arbiter")
- else:
- self._hashrange = get_hashrange(self._path)
- self._hashrange_low = self._hashrange[0]
- self._hashrange_high = self._hashrange[1]
- else:
- g.log.info("Failed to get hashrange")
@property
def path(self):
@@ -207,12 +181,10 @@ class BrickDir(object):
if self.hashrange is None or self._hashrange_high is None:
self._get_hashrange()
if self._get_hashrange() is None:
- gluster_version = get_gluster_version(self._host)
- if gluster_version >= 6.0:
- ret = get_volume_type(self._path)
- if ret in ('Replicate', 'Disperse', 'Arbiter'):
- g.log.info("Cannot find hash-range as the volume type"
- " under test is Replicate/Disperse/Arbiter")
+ ret = get_volume_type(self._path)
+ if ret in ('Replicate', 'Disperse', 'Arbiter'):
+ g.log.info("Cannot find hash-range as the volume type"
+ " under test is Replicate/Disperse/Arbiter")
else:
return self._hashrange_high
diff --git a/glustolibs-gluster/glustolibs/gluster/brickmux_libs.py b/glustolibs-gluster/glustolibs/gluster/brickmux_libs.py
index 1206b4682..cb82d8434 100644
--- a/glustolibs-gluster/glustolibs/gluster/brickmux_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/brickmux_libs.py
@@ -66,7 +66,10 @@ def get_all_bricks_from_servers_multivol(servers, servers_info):
for item in list(zip_longest(*list(servers_bricks.values()))):
for brick in item:
- server = server_ip.next()
+ try:
+ server = server_ip.next() # Python 2
+ except AttributeError:
+ server = next(server_ip) # Python 3
if brick:
bricks_list.append(server + ":" + brick)
brickCount += 1
diff --git a/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py b/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py
index eeb4e2a50..b56434741 100755
--- a/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2017-2019 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -119,7 +119,7 @@ def check_brick_pid_matches_glusterfsd_pid(mnode, volname):
"of brick path %s", brick_node, brick_path)
_rc = False
- cmd = "pidof glusterfsd"
+ cmd = "pgrep -x glusterfsd"
ret, pid, _ = g.run(brick_node, cmd)
if ret != 0:
g.log.error("Failed to run the command %s on "
@@ -127,7 +127,7 @@ def check_brick_pid_matches_glusterfsd_pid(mnode, volname):
_rc = False
else:
- glusterfsd_pid = pid.split()
+ glusterfsd_pid = pid.split('\n')[:-1]
if brick_pid not in glusterfsd_pid:
g.log.error("Brick pid %s doesn't match glusterfsd "
@@ -149,8 +149,10 @@ def get_brick_processes_count(mnode):
int: Number of brick processes running on the node.
None: If the command fails to execute.
"""
- ret, out, _ = g.run(mnode, "pidof glusterfsd")
+ ret, out, _ = g.run(mnode, "pgrep -x glusterfsd")
if not ret:
- return len(out.split(" "))
+ list_of_pids = out.split("\n")
+ list_of_pids.pop()
+ return len(list_of_pids)
else:
return None
diff --git a/glustolibs-gluster/glustolibs/gluster/ctdb_libs.py b/glustolibs-gluster/glustolibs/gluster/ctdb_libs.py
new file mode 100644
index 000000000..9dfa5f8f6
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/ctdb_libs.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description:
+ Samba ctdb base classes.
+ Pre-requisite:
+ Please install samba ctdb packages
+ on all servers
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.ctdb_ops import (
+ edit_hook_script,
+ enable_ctdb_cluster,
+ create_nodes_file,
+ create_public_address_file,
+ start_ctdb_service,
+ is_ctdb_status_healthy,
+ teardown_samba_ctdb_cluster)
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.volume_libs import (
+ setup_volume,
+ wait_for_volume_process_to_be_online)
+
+
+class SambaCtdbBaseClass(GlusterBaseClass):
+ """
+ Creates samba ctdb cluster
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Setup variable for samba ctdb test.
+ """
+ super(SambaCtdbBaseClass, cls).setUpClass()
+
+ cls.ctdb_volume_rep_count = int(len(cls.ctdb_nodes))
+ cls.primary_node = cls.servers[0]
+ g.log.info("VOLUME REP COUNT %s", cls.ctdb_volume_rep_count)
+
+ cls.ctdb_vips = (g.config['gluster']['cluster_config']
+ ['smb']['ctdb_vips'])
+ cls.ctdb_nodes = (g.config['gluster']['cluster_config']
+ ['smb']['ctdb_nodes'])
+ cls.ctdb_volname = (g.config['gluster']['cluster_config']
+ ['smb']['ctdb_volname'])
+ cls.ctdb_volume_config = (g.config['gluster']['cluster_config']['smb']
+ ['ctdb_volume_config'])
+
+ @classmethod
+ def setup_samba_ctdb_cluster(cls):
+ """
+ Create ctdb-samba cluster if doesn't exists
+
+ Returns:
+ bool: True if successfully setup samba else false
+ """
+ # Check if ctdb setup is up and running
+ if is_ctdb_status_healthy(cls.primary_node):
+ g.log.info("ctdb setup already up skipping "
+ "ctdb setup creation")
+ return True
+ g.log.info("Proceeding with ctdb setup creation")
+ for mnode in cls.servers:
+ ret = edit_hook_script(mnode, cls.ctdb_volname)
+ if not ret:
+ return False
+ ret = enable_ctdb_cluster(mnode)
+ if not ret:
+ return False
+ ret = create_nodes_file(mnode, cls.ctdb_nodes)
+ if not ret:
+ return False
+ ret = create_public_address_file(mnode, cls.ctdb_vips)
+ if not ret:
+ return False
+ server_info = cls.all_servers_info
+ ctdb_config = cls.ctdb_volume_config
+ g.log.info("Setting up ctdb volume %s", cls.ctdb_volname)
+ ret = setup_volume(mnode=cls.primary_node,
+ all_servers_info=server_info,
+ volume_config=ctdb_config)
+ if not ret:
+ g.log.error("Failed to setup ctdb volume %s", cls.ctdb_volname)
+ return False
+ g.log.info("Successful in setting up volume %s", cls.ctdb_volname)
+
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume %s processes to be online",
+ cls.ctdb_volname)
+ ret = wait_for_volume_process_to_be_online(cls.mnode, cls.ctdb_volname)
+ if not ret:
+ g.log.error("Failed to wait for volume %s processes to "
+ "be online", cls.ctdb_volname)
+ return False
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", cls.ctdb_volname)
+
+ # start ctdb services
+ ret = start_ctdb_service(cls.servers)
+ if not ret:
+ return False
+
+ ret = is_ctdb_status_healthy(cls.primary_node)
+ if not ret:
+ g.log.error("CTDB setup creation failed - exiting")
+ return False
+ g.log.info("CTDB setup creation successfull")
+ return True
+
+ @classmethod
+ def tearDownClass(cls, delete_samba_ctdb_cluster=False):
+ """
+ Teardown samba ctdb cluster.
+ """
+ super(SambaCtdbBaseClass, cls).tearDownClass()
+
+ if delete_samba_ctdb_cluster:
+ ret = teardown_samba_ctdb_cluster(
+ cls.servers, cls.ctdb_volname)
+ if not ret:
+ raise ExecutionError("Cleanup of samba ctdb "
+ "cluster failed")
+ g.log.info("Teardown samba ctdb cluster succeeded")
+ else:
+ g.log.info("Skipping teardown samba ctdb cluster...")
diff --git a/glustolibs-gluster/glustolibs/gluster/ctdb_ops.py b/glustolibs-gluster/glustolibs/gluster/ctdb_ops.py
new file mode 100644
index 000000000..8bf57ba05
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/ctdb_ops.py
@@ -0,0 +1,478 @@
+#!/usr/bin/env python
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redeat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+CTDB library operations
+pre-requisite : CTDB and Samba packages
+needs to be installed on all the server nodes.
+"""
+
+import re
+from time import sleep
+from glusto.core import Glusto as g
+from glustolibs.gluster.lib_utils import (add_services_to_firewall,
+ is_rhel6, list_files)
+from glustolibs.gluster.mount_ops import umount_volume
+from glustolibs.gluster.volume_libs import cleanup_volume
+
+
+def edit_hook_script(mnode, ctdb_volname):
+ """
+ Edit the hook scripts with ctdb volume name
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+ ctdb_volname (str): Name of the ctdb volume
+ Returns:
+ bool: True if successfully edits the hook-scripts else false
+ """
+ # Replace META='all' to META=ctdb_volname setup hook script
+ cmd = ("sed -i -- 's/META=\"all\"/META=\"%s\"/g' "
+ "/var/lib/glusterd/hooks/1"
+ "/start/post/S29CTDBsetup.sh")
+ ret, _, _ = g.run(mnode, cmd % ctdb_volname)
+ if ret:
+ g.log.error("Hook script - S29CTDBsetup edit failed on %s", mnode)
+ return False
+
+ g.log.info("Hook script - S29CTDBsetup edit success on %s", mnode)
+ # Replace META='all' to META=ctdb_volname teardown hook script
+ cmd = ("sed -i -- 's/META=\"all\"/META=\"%s\"/g' "
+ "/var/lib/glusterd/hooks/1"
+ "/stop/pre/S29CTDB-teardown.sh")
+
+ ret, _, _ = g.run(mnode, cmd % ctdb_volname)
+ if ret:
+ g.log.error("Hook script - S29CTDB-teardown edit failed on %s", mnode)
+ return False
+ g.log.info("Hook script - S29CTDBteardown edit success on %s", mnode)
+ return True
+
+
+def enable_ctdb_cluster(mnode):
+ """
+ Edit the smb.conf to add clustering = yes
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+
+ Returns:
+ bool: True if successfully enable ctdb cluster else false
+ """
+ # Add clustering = yes in smb.conf if not already there
+ cmd = (r"grep -q 'clustering = yes' "
+ r"/etc/samba/smb.conf || sed -i.bak '/\[global\]/a "
+ r"clustering = yes' /etc/samba/smb.conf")
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Failed to add cluster = yes to smb.conf in %s", mnode)
+ return False
+ g.log.info("Successfully added 'clustering = yes' to smb.conf "
+ "in all nodes")
+ return True
+
+
+def check_file_availability(mnode, file_path, filename):
+ """
+ Check for ctdb files and delete
+
+ Args:
+ mnode(str): Node on which command is executed
+ filepath(str): Absolute path of the file to be validated
+ filename(str): File to be deleted if available in /etc/ctdb/
+
+ Returns:
+ bool: True if concerned files are available else false
+ """
+ if file_path in list_files(mnode, "/etc/ctdb/", filename):
+ ret, _, _ = g.run(mnode, "rm -rf %s" % file_path)
+ if ret:
+ return False
+ return True
+
+
+def create_nodes_file(mnode, node_ips):
+ """
+ Create nodes file and add node ips
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+
+ Returns:
+ bool: True if successfully create nodes file else false
+ """
+ # check if nodes file is available and delete
+ node_file_path = "/etc/ctdb/nodes"
+ ret = check_file_availability(mnode, node_file_path, "nodes")
+ if not ret:
+ g.log.info("Failed to delete pre-existing nodes file in %s", mnode)
+ return False
+ g.log.info("Deleted pre-existing nodes file in %s", mnode)
+ for node_ip in node_ips:
+ ret, _, _ = g.run(mnode, "echo -e %s "
+ ">> %s" % (node_ip, node_file_path))
+ if ret:
+ g.log.error("Failed to add nodes list in %s", mnode)
+ return False
+ g.log.info("Nodes list added succssfully to %s"
+ "file in all servers", node_file_path)
+ return True
+
+
+def create_public_address_file(mnode, vips):
+ """
+ Create public_addresses file and add vips
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+ vips (list): List of virtual ips
+
+ Returns:
+ bool: True if successfully creates public_address file else false
+ """
+ publicip_file_path = "/etc/ctdb/public_addresses"
+ ret = check_file_availability(mnode,
+ publicip_file_path,
+ "public_addresses")
+ if not ret:
+ g.log.info("Failed to delete pre-existing public_addresses"
+ "file in %s", mnode)
+ return False
+ g.log.info("Deleted pre-existing public_addresses"
+ "file in %s", mnode)
+ for vip in vips:
+ ret, _, _ = g.run(mnode, "echo -e %s >>"
+ " %s" % (vip, publicip_file_path))
+ if ret:
+ g.log.error("Failed to add vip list in %s", mnode)
+ return False
+ g.log.info("vip list added succssfully to %s"
+ "file in all node", publicip_file_path)
+ return True
+
+
+def ctdb_service_status(servers, mnode):
+ """
+ Status of ctdb service on the specified node.
+
+ Args:
+ mnode (str): Node on which ctdb status needs to be checked
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ g.log.info("Getting ctdb service status on %s", mnode)
+ if is_rhel6(servers):
+ return g.run(mnode, "service ctdb status")
+ return g.run(mnode, "systemctl status ctdb")
+
+
+def is_ctdb_service_running(servers, mnode):
+ """
+ Check if ctdb service is running on node
+
+ Args:
+ servers (str|list): list|str of cluster nodes
+ mnode (str): Node on which ctdb service has to be checked
+
+ Returns:
+ bool: True if ctdb service running else False
+ """
+ g.log.info("Check if ctdb service is running on %s", mnode)
+ ret, out, _ = ctdb_service_status(servers, mnode)
+ if ret:
+ g.log.error("Execution error service ctdb status "
+ "on %s", mnode)
+ return False
+ if "Active: active (running)" in out:
+ g.log.info("ctdb service is running on %s", mnode)
+ return True
+ else:
+ g.log.error("ctdb service is not "
+ "running on %s", mnode)
+ return False
+
+
+def start_ctdb_service(servers):
+ """
+ start ctdb services on all nodes &
+ wait for 40 seconds
+
+ Args:
+ servers (list): IP of samba nodes
+
+ Returns:
+ bool: True if successfully starts ctdb service else false
+ """
+ cmd = "pgrep ctdb || service ctdb start"
+ for mnode in servers:
+ ret, out, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Unable to start ctdb on server %s", str(out))
+ return False
+ if not is_ctdb_service_running(servers, mnode):
+ g.log.error("ctdb services not running %s", str(out))
+ return False
+ g.log.info("Start ctdb on server %s successful", mnode)
+ # sleep for 40sec as ctdb status takes time to enable
+ sleep(40)
+ return True
+
+
+def stop_ctdb_service(servers):
+ """
+ stop ctdb services on all nodes
+
+ Args:
+ servers (list): IP of samba nodes
+
+ Returns:
+ bool: True if successfully stops ctdb service else false
+ """
+ cmd = "service ctdb stop"
+ for mnode in servers:
+ ret, out, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Unable to stop ctdb on server %s", str(out))
+ return False
+ if is_ctdb_service_running(servers, mnode):
+ g.log.error("ctdb services still running %s", str(out))
+ return False
+ g.log.info("Stop ctdb on server %s successful", mnode)
+ return True
+
+
+def ctdb_server_firewall_settings(servers):
+ """
+ Do firewall settings for ctdb
+
+ Args:
+ servers(list): IP of sambe nodes
+
+ Returns:
+ bool: True if successfully added firewall services else false
+ """
+ # List of services to enable
+ services = ['samba', 'rpc-bind']
+ ret = add_services_to_firewall(servers, services, True)
+ if not ret:
+ g.log.error("Failed to set firewall zone "
+ "permanently on ctdb nodes")
+ return False
+
+ # Add ctdb and samba port
+ if not is_rhel6(servers):
+ for mnode in servers:
+ ret, _, _ = g.run(mnode, "firewall-cmd --add-port=4379/tcp "
+ "--add-port=139/tcp")
+ if ret:
+ g.log.error("Failed to add firewall port in %s", mnode)
+ return False
+ g.log.info("samba ctdb port added successfully in %s", mnode)
+ ret, _, _ = g.run(mnode, "firewall-cmd --add-port=4379/tcp "
+ "--add-port=139/tcp --permanent")
+ if ret:
+ g.log.error("Failed to add firewall port permanently in %s",
+ mnode)
+ return False
+ return True
+
+
+def parse_ctdb_status(status):
+ """
+ Parse the ctdb status output
+
+ Number of nodes:4
+ pnn:0 <ip> OK (THIS NODE)
+ pnn:1 <ip> OK
+ pnn:2 <ip> OK
+ pnn:3 <ip> UHEALTHY
+ Generation:763624485
+ Size:4
+ hash:0 lmaster:0
+ hash:1 lmaster:1
+ hash:2 lmaster:2
+ hash:3 lmaster:3
+ Recovery mode:NORMAL (0)
+ Recovery master:3
+
+ Args:
+ status: output of ctdb status(string)
+
+ Returns:
+ dict: {<ip>: status}
+ """
+ cmd = r'pnn\:\d+\s*(\S+)\s*(\S+)'
+ ip_nodes = re.findall(cmd, status, re.S)
+ if ip_nodes:
+ # Empty dictionary to capture ctdb status output
+ node_status = {}
+ for item in ip_nodes:
+ node_status[item[0]] = item[1]
+ g.log.info("ctdb node status %s", node_status)
+ return node_status
+ else:
+ return {}
+
+
+def ctdb_status(mnode):
+ """
+ Execute ctdb status
+
+ Args:
+ mnode(str): primary node out of the servers
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ """
+ cmd = "ctdb status"
+ return g.run(mnode, cmd)
+
+
+def is_ctdb_status_healthy(mnode):
+ """
+ Check if ctdb is up & running
+
+ Args:
+ mnode(str): primary node out of the servers
+
+ Returns:
+ bool: True if ctdb status healthy else false
+ """
+ # Get the ctdb status details
+ status_res = ctdb_status(mnode)
+ if status_res[0]:
+ g.log.info("CTDB is not enabled for the cluster")
+ return False
+ # Get the ctdb status output
+ output = status_res[1]
+ # Parse the ctdb status output
+ node_status = parse_ctdb_status(output)
+ if not node_status:
+ g.log.error("ctdb status return empty list")
+ return False
+ for node_ip, status in node_status.iteritems():
+ # Check if ctdb status is OK or not
+ if node_status[node_ip] != 'OK':
+ g.log.error("CTDB node %s is %s",
+ str(node_ip), status)
+ return False
+ g.log.info("CTDB node %s is %s",
+ str(node_ip), status)
+ return True
+
+
+def edit_hookscript_for_teardown(mnode, ctdb_volname):
+ """
+ Edit the hook scripts with ctdb volume name
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+ ctdb_volname (str): Name of ctdb volume
+ Returns:
+ bool: True if successfully edits hook-scripts else false
+ """
+ # Replace META='ctdb_vol' to META=all setup hook script
+ cmd = ("sed -i -- 's/META=\"%s\"/META=\"all\"/g' "
+ "/var/lib/glusterd/hooks/1"
+ "/start/post/S29CTDBsetup.sh" % ctdb_volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Hook script - S29CTDBsetup edit failed on %s", mnode)
+ return False
+
+ g.log.info("Hook script - S29CTDBsetup edit success on %s", mnode)
+ # Replace META='all' to META=ctdb_volname teardown hook script
+ cmd = ("sed -i -- 's/META=\"%s\"/META=\"all\"/g' "
+ "/var/lib/glusterd/hooks/1"
+ "/stop/pre/S29CTDB-teardown.sh" % ctdb_volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Hook script - S29CTDB-teardown edit failed on %s", mnode)
+ return False
+ g.log.info("Hook script - S29CTDBteardown edit success on %s", mnode)
+ return True
+
+
+def teardown_samba_ctdb_cluster(servers, ctdb_volname):
+ """
+ Tear down samba ctdb setup
+
+ Args:
+ servers (list): Nodes in ctdb cluster to teardown entire
+ cluster
+ ctdb_volname (str): Name of ctdb volume
+
+ Returns:
+ bool: True if successfully tear downs ctdb cluster else false
+ """
+
+ node_file_path = "/etc/ctdb/nodes"
+ publicip_file_path = "/etc/ctdb/public_addresses"
+ g.log.info("Executing force cleanup...")
+ # Stop ctdb service
+ if stop_ctdb_service(servers):
+ for mnode in servers:
+ # check if nodes file is available and delete
+ ret = check_file_availability(mnode, node_file_path, "nodes")
+ if not ret:
+ g.log.info("Failed to delete existing "
+ "nodes file in %s", mnode)
+ return False
+ g.log.info("Deleted existing nodes file in %s", mnode)
+
+ # check if public_addresses file is available and delete
+ ret = check_file_availability(mnode, publicip_file_path,
+ "public_addresses")
+ if not ret:
+ g.log.info("Failed to delete existing public_addresses"
+ " file in %s", mnode)
+ return False
+ g.log.info("Deleted existing public_addresses"
+ "file in %s", mnode)
+
+ ctdb_mount = '/gluster/lock'
+ ret, _, _ = umount_volume(mnode, ctdb_mount, 'glusterfs')
+ if ret:
+ g.log.error("Unable to unmount lock volume in %s", mnode)
+ return False
+ if not edit_hookscript_for_teardown(mnode, ctdb_volname):
+ return False
+ mnode = servers[0]
+ ret = cleanup_volume(mnode, ctdb_volname)
+ if not ret:
+ g.log.error("Failed to delete ctdb volume - %s", ctdb_volname)
+ return False
+ return True
+ return False
diff --git a/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py b/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py
index 55dcce5c7..11f2eda62 100644
--- a/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py
+++ b/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py
@@ -29,7 +29,6 @@ import glustolibs.gluster.constants as k
import glustolibs.gluster.exceptions as gex
from glustolibs.gluster.brickdir import BrickDir
from glustolibs.gluster.volume_libs import get_subvols, get_volume_type
-from glustolibs.gluster.gluster_init import get_gluster_version
from glustolibs.misc.misc_libs import upload_scripts
@@ -39,9 +38,8 @@ def run_layout_tests(mnode, fqpath, layout, test_type):
brick_path_list = ret.get('brickdir_paths')
for brickdir_path in brick_path_list:
(server_ip, _) = brickdir_path.split(':')
- if (get_gluster_version(server_ip) >= 6.0 and
- get_volume_type(brickdir_path) in ('Replicate', 'Disperse',
- 'Arbiter')):
+ if get_volume_type(brickdir_path) in ('Replicate', 'Disperse',
+ 'Arbiter'):
g.log.info("Cannot check for layout completeness as"
" volume under test is Replicate/Disperse/Arbiter")
else:
@@ -342,6 +340,44 @@ def find_new_hashed(subvols, parent_path, oldname):
return None
+def find_specific_hashed(subvols, parent_path, subvol, existing_names=None):
+ """ Finds filename that hashes to a specific subvol.
+
+ Args:
+ subvols(list): list of subvols
+ parent_path(str): parent path (relative to mount) of "oldname"
+ subvol(str): The subvol to which the new name has to be hashed
+ existing_names(int|list): The name(s) already hashed to subvol
+
+ Returns:
+ (Class Object): For success returns an object of type NewHashed
+ holding information pertaining to new name.
+ None, otherwise
+ Note: The new hash will be searched under the same parent
+ """
+ # pylint: disable=protected-access
+ if not isinstance(existing_names, list):
+ existing_names = [existing_names]
+ brickobject = create_brickobjectlist(subvols, parent_path)
+ if brickobject is None:
+ g.log.error("could not form brickobject list")
+ return None
+ count = -1
+ for item in range(1, 5000, 1):
+ newhash = calculate_hash(brickobject[0]._host, str(item))
+ for brickdir in brickobject:
+ count += 1
+ if (subvol._fqpath == brickdir._fqpath and
+ item not in existing_names):
+ ret = brickdir.hashrange_contains_hash(newhash)
+ if ret:
+ g.log.debug("oldhashed %s new %s count %s",
+ subvol, brickdir._host, str(count))
+ return NewHashed(item, brickdir, count)
+ count = -1
+ return None
+
+
class NewHashed(object):
'''
Helper Class to hold new hashed info
@@ -420,3 +456,5 @@ def is_layout_complete(mnode, volname, dirpath):
return False
elif hash_difference < 1:
g.log.error("Layout has overlaps")
+
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/geo_rep_ops.py b/glustolibs-gluster/glustolibs/gluster/geo_rep_ops.py
index 7e12113c9..7d0f5a73e 100755
--- a/glustolibs-gluster/glustolibs/gluster/geo_rep_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/geo_rep_ops.py
@@ -292,7 +292,7 @@ def georep_config_set(mnode, mastervol, slaveip, slavevol, config, value,
"""
if user:
- cmd = ("gluster volume geo-replication %s %s::%s config %s %s" %
+ cmd = ("gluster volume geo-replication %s %s@%s::%s config %s %s" %
(mastervol, user, slaveip, slavevol, config, value))
else:
cmd = ("gluster volume geo-replication %s %s::%s config %s %s" %
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
index 0d8731994..65061cb13 100644..100755
--- a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2021 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -47,6 +47,7 @@ from glustolibs.gluster.peer_ops import (
from glustolibs.gluster.gluster_init import (
restart_glusterd, stop_glusterd, wait_for_glusterd_to_start)
from glustolibs.gluster.samba_libs import share_volume_over_smb
+from glustolibs.gluster.shared_storage_ops import is_shared_volume_mounted
from glustolibs.gluster.volume_libs import (
cleanup_volume,
log_volume_info_and_status,
@@ -59,6 +60,9 @@ from glustolibs.gluster.volume_ops import (
set_volume_options, volume_reset, volume_start)
from glustolibs.io.utils import log_mounts_info
from glustolibs.gluster.geo_rep_libs import setup_master_and_slave_volumes
+from glustolibs.gluster.nfs_ganesha_ops import (
+ teardown_nfs_ganesha_cluster)
+from glustolibs.misc.misc_libs import kill_process
class runs_on(g.CarteTestClass):
@@ -192,6 +196,11 @@ class GlusterBaseClass(TestCase):
Returns (bool): True if all peers are in connected with other peers.
False otherwise.
"""
+
+ # If the setup has single node server, by pass this validation.
+ if len(cls.servers) == 1:
+ return True
+
# Validate if peer is connected from all the servers
g.log.info("Validating if servers %s are connected from other servers "
"in the cluster", cls.servers)
@@ -256,10 +265,13 @@ class GlusterBaseClass(TestCase):
False otherwise.
"""
if error_or_failure_exists:
+ shared_storage_mounted = False
+ if is_shared_volume_mounted(cls.mnode):
+ shared_storage_mounted = True
ret = stop_glusterd(cls.servers)
if not ret:
g.log.error("Failed to stop glusterd")
- cmd_list = ("pkill pidof glusterd",
+ cmd_list = ("pkill `pidof glusterd`",
"rm /var/run/glusterd.socket")
for server in cls.servers:
for cmd in cmd_list:
@@ -268,11 +280,29 @@ class GlusterBaseClass(TestCase):
g.log.error("Failed to stop glusterd")
return False
for server in cls.servers:
- cmd_list = ("rm -rf /var/lib/glusterd/vols/*",
- "rm -rf /var/lib/glusterd/snaps/*",
- "rm -rf /var/lib/glusterd/peers/*",
- "rm -rf {}/*/*".format(
- cls.all_servers_info[server]['brick_root']))
+ ret, out, _ = g.run(server, "pgrep glusterfsd", "root")
+ if not ret:
+ ret = kill_process(server,
+ process_ids=out.strip().split('\n'))
+ if not ret:
+ g.log.error("Unable to kill process {}".format(
+ out.strip().split('\n')))
+ return False
+ if not shared_storage_mounted:
+ cmd_list = (
+ "rm -rf /var/lib/glusterd/vols/*",
+ "rm -rf /var/lib/glusterd/snaps/*",
+ "rm -rf /var/lib/glusterd/peers/*",
+ "rm -rf {}/*/*".format(
+ cls.all_servers_info[server]['brick_root']))
+ else:
+ cmd_list = (
+ "for vol in `ls /var/lib/glusterd/vols/ | "
+ "grep -v gluster_shared_storage`;do "
+ "rm -rf /var/lib/glusterd/vols/$vol;done",
+ "rm -rf /var/lib/glusterd/snaps/*"
+ "rm -rf {}/*/*".format(
+ cls.all_servers_info[server]['brick_root']))
for cmd in cmd_list:
ret, _, _ = g.run(server, cmd, "root")
if ret:
@@ -288,10 +318,11 @@ class GlusterBaseClass(TestCase):
if not ret:
g.log.error("Failed to bring glusterd up")
return False
- ret = peer_probe_servers(cls.mnode, cls.servers)
- if not ret:
- g.log.error("Failed to peer probe servers")
- return False
+ if not shared_storage_mounted:
+ ret = peer_probe_servers(cls.mnode, cls.servers)
+ if not ret:
+ g.log.error("Failed to peer probe servers")
+ return False
for client in cls.clients:
cmd_list = ("umount /mnt/*", "rm -rf /mnt/*")
for cmd in cmd_list:
@@ -303,10 +334,10 @@ class GlusterBaseClass(TestCase):
return True
@classmethod
- def setup_volume(cls, volume_create_force=False):
+ def setup_volume(cls, volume_create_force=False, only_volume_create=False):
"""Setup the volume:
- Create the volume, Start volume, Set volume
- options, enable snapshot/quota/tier if specified in the config
+ options, enable snapshot/quota if specified in the config
file.
- Wait for volume processes to be online
- Export volume as NFS/SMB share if mount_type is NFS or SMB
@@ -315,6 +346,9 @@ class GlusterBaseClass(TestCase):
Args:
volume_create_force(bool): True if create_volume should be
executed with 'force' option.
+ only_volume_create(bool): True, only volume creation is needed
+ False, by default volume creation and
+ start.
Returns (bool): True if all the steps mentioned in the descriptions
passes. False otherwise.
@@ -337,12 +371,19 @@ class GlusterBaseClass(TestCase):
g.log.info("Setting up volume %s", cls.volname)
ret = setup_volume(mnode=cls.mnode,
all_servers_info=cls.all_servers_info,
- volume_config=cls.volume, force=force_volume_create)
+ volume_config=cls.volume, force=force_volume_create,
+ create_only=only_volume_create)
if not ret:
g.log.error("Failed to Setup volume %s", cls.volname)
return False
g.log.info("Successful in setting up volume %s", cls.volname)
+ # Returning the value without proceeding for next steps
+ if only_volume_create and ret:
+ g.log.info("Setup volume with volume creation {} "
+ "successful".format(cls.volname))
+ return True
+
# Wait for volume processes to be online
g.log.info("Wait for volume %s processes to be online", cls.volname)
ret = wait_for_volume_process_to_be_online(cls.mnode, cls.volname)
@@ -433,6 +474,9 @@ class GlusterBaseClass(TestCase):
"""
g.log.info("Starting to mount volume %s", cls.volname)
for mount_obj in mounts:
+ # For nfs-ganesha, mount is done via vip
+ if cls.enable_nfs_ganesha:
+ mount_obj.server_system = cls.vips[0]
g.log.info("Mounting volume '%s:%s' on '%s:%s'",
mount_obj.server_system, mount_obj.volname,
mount_obj.client_system, mount_obj.mountpoint)
@@ -952,8 +996,8 @@ class GlusterBaseClass(TestCase):
mount_dict['volname'] = cls.slave_volume
mount_dict['server'] = cls.mnode_slave
mount_dict['mountpoint'] = path_join(
- "/mnt", '_'.join([cls.slave_volname,
- cls.mount_type]))
+ "/mnt", '_'.join([cls.slave_volname,
+ cls.mount_type]))
cls.slave_mounts = create_mount_objs(slave_mount_dict_list)
# Defining clients from mounts.
@@ -993,6 +1037,31 @@ class GlusterBaseClass(TestCase):
datetime.now().strftime('%H_%M_%d_%m_%Y'))
cls.glustotest_run_id = g.config['glustotest_run_id']
+ if cls.enable_nfs_ganesha:
+ g.log.info("Setup NFS_Ganesha")
+ cls.num_of_nfs_ganesha_nodes = int(cls.num_of_nfs_ganesha_nodes)
+ cls.servers_in_nfs_ganesha_cluster = (
+ cls.servers[:cls.num_of_nfs_ganesha_nodes])
+ cls.vips_in_nfs_ganesha_cluster = (
+ cls.vips[:cls.num_of_nfs_ganesha_nodes])
+
+ # Obtain hostname of servers in ganesha cluster
+ cls.ganesha_servers_hostname = []
+ for ganesha_server in cls.servers_in_nfs_ganesha_cluster:
+ ret, hostname, _ = g.run(ganesha_server, "hostname")
+ if ret:
+ raise ExecutionError("Failed to obtain hostname of %s"
+ % ganesha_server)
+ hostname = hostname.strip()
+ g.log.info("Obtained hostname: IP- %s, hostname- %s",
+ ganesha_server, hostname)
+ cls.ganesha_servers_hostname.append(hostname)
+ from glustolibs.gluster.nfs_ganesha_libs import setup_nfs_ganesha
+ ret = setup_nfs_ganesha(cls)
+ if not ret:
+ raise ExecutionError("Failed to setup nfs ganesha")
+ g.log.info("Successful in setting up NFS Ganesha Cluster")
+
msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
g.log.info(msg)
cls.inject_msg_in_gluster_logs(msg)
@@ -1020,7 +1089,7 @@ class GlusterBaseClass(TestCase):
if (self.error_or_failure_exists or
self._is_error_or_failure_exists()):
ret = self.scratch_cleanup(self.error_or_failure_exists)
- g.log.warn(ret)
+ g.log.info(ret)
return self.get_super_method(self, 'doCleanups')()
@classmethod
@@ -1029,5 +1098,250 @@ class GlusterBaseClass(TestCase):
cls._is_error_or_failure_exists()):
ret = cls.scratch_cleanup(
GlusterBaseClass.error_or_failure_exists)
- g.log.warn(ret)
+ g.log.info(ret)
return cls.get_super_method(cls, 'doClassCleanups')()
+
+ @classmethod
+ def delete_nfs_ganesha_cluster(cls):
+ ret = teardown_nfs_ganesha_cluster(
+ cls.servers_in_nfs_ganesha_cluster)
+ if not ret:
+ g.log.error("Teardown got failed. Hence, cleaning up "
+ "nfs-ganesha cluster forcefully")
+ ret = teardown_nfs_ganesha_cluster(
+ cls.servers_in_nfs_ganesha_cluster, force=True)
+ if not ret:
+ raise ExecutionError("Force cleanup of nfs-ganesha "
+ "cluster failed")
+ g.log.info("Teardown nfs ganesha cluster succeeded")
+
+ @classmethod
+ def start_memory_and_cpu_usage_logging(cls, test_id, interval=60,
+ count=100):
+ """Upload logger script and start logging usage on cluster
+
+ Args:
+ test_id(str): ID of the test running fetched from self.id()
+
+ Kawrgs:
+ interval(int): Time interval after which logs are to be collected
+ (Default: 60)
+ count(int): Number of samples to be collected(Default: 100)
+
+ Returns:
+ proc_dict(dict):Dictionary of logging processes
+ """
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ check_upload_memory_and_cpu_logger_script,
+ log_memory_and_cpu_usage_on_cluster)
+
+ # Checking if script is present on servers or not if not then
+ # upload it to servers.
+ if not check_upload_memory_and_cpu_logger_script(cls.servers):
+ return None
+
+ # Checking if script is present on clients or not if not then
+ # upload it to clients.
+ if not check_upload_memory_and_cpu_logger_script(cls.clients):
+ return None
+
+ # Start logging on servers and clients
+ proc_dict = log_memory_and_cpu_usage_on_cluster(
+ cls.servers, cls.clients, test_id, interval, count)
+
+ return proc_dict
+
+ @classmethod
+ def compute_and_print_usage_stats(cls, test_id, proc_dict,
+ kill_proc=False):
+ """Compute and print CPU and memory usage statistics
+
+ Args:
+ proc_dict(dict):Dictionary of logging processes
+ test_id(str): ID of the test running fetched from self.id()
+
+ Kwargs:
+ kill_proc(bool): Kill logging process if true else wait
+ for process to complete execution
+ """
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ wait_for_logging_processes_to_stop, kill_all_logging_processes,
+ compute_data_usage_stats_on_servers,
+ compute_data_usage_stats_on_clients)
+
+ # Wait or kill running logging process
+ if kill_proc:
+ nodes = cls.servers + cls.clients
+ ret = kill_all_logging_processes(proc_dict, nodes, cluster=True)
+ if not ret:
+ g.log.error("Unable to stop logging processes.")
+ else:
+ ret = wait_for_logging_processes_to_stop(proc_dict, cluster=True)
+ if not ret:
+ g.log.error("Processes didn't complete still running.")
+
+ # Compute and print stats for servers
+ ret = compute_data_usage_stats_on_servers(cls.servers, test_id)
+ g.log.info('*' * 50)
+ g.log.info(ret) # TODO: Make logged message more structured
+ g.log.info('*' * 50)
+
+ # Compute and print stats for clients
+ ret = compute_data_usage_stats_on_clients(cls.clients, test_id)
+ g.log.info('*' * 50)
+ g.log.info(ret) # TODO: Make logged message more structured
+ g.log.info('*' * 50)
+
+ @classmethod
+ def check_for_memory_leaks_and_oom_kills_on_servers(cls, test_id,
+ gain=30.0):
+ """Check for memory leaks and OOM kills on servers
+
+ Args:
+ test_id(str): ID of the test running fetched from self.id()
+
+ Kwargs:
+ gain(float): Accepted amount of leak for a given testcase in MB
+ (Default:30)
+
+ Returns:
+ bool: True if memory leaks or OOM kills are observed else false
+ """
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ check_for_memory_leaks_in_glusterd,
+ check_for_memory_leaks_in_glusterfs,
+ check_for_memory_leaks_in_glusterfsd,
+ check_for_oom_killers_on_servers)
+
+ # Check for memory leaks on glusterd
+ if check_for_memory_leaks_in_glusterd(cls.servers, test_id, gain):
+ g.log.error("Memory leak on glusterd.")
+ return True
+
+ if cls.volume_type != "distributed":
+ # Check for memory leaks on shd
+ if check_for_memory_leaks_in_glusterfs(cls.servers, test_id,
+ gain):
+ g.log.error("Memory leak on shd.")
+ return True
+
+ # Check for memory leaks on brick processes
+ if check_for_memory_leaks_in_glusterfsd(cls.servers, test_id, gain):
+ g.log.error("Memory leak on brick process.")
+ return True
+
+ # Check OOM kills on servers for all gluster server processes
+ if check_for_oom_killers_on_servers(cls.servers):
+ g.log.error('OOM kills present on servers.')
+ return True
+ return False
+
+ @classmethod
+ def check_for_memory_leaks_and_oom_kills_on_clients(cls, test_id, gain=30):
+ """Check for memory leaks and OOM kills on clients
+
+ Args:
+ test_id(str): ID of the test running fetched from self.id()
+
+ Kwargs:
+ gain(float): Accepted amount of leak for a given testcase in MB
+ (Default:30)
+
+ Returns:
+ bool: True if memory leaks or OOM kills are observed else false
+ """
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ check_for_memory_leaks_in_glusterfs_fuse,
+ check_for_oom_killers_on_clients)
+
+ # Check for memory leak on glusterfs fuse process
+ if check_for_memory_leaks_in_glusterfs_fuse(cls.clients, test_id,
+ gain):
+ g.log.error("Memory leaks observed on FUSE clients.")
+ return True
+
+ # Check for oom kills on clients
+ if check_for_oom_killers_on_clients(cls.clients):
+ g.log.error("OOM kills present on clients.")
+ return True
+ return False
+
+ @classmethod
+ def check_for_cpu_usage_spikes_on_servers(cls, test_id, threshold=3):
+ """Check for CPU usage spikes on servers
+
+ Args:
+ test_id(str): ID of the test running fetched from self.id()
+
+ Kwargs:
+ threshold(int): Accepted amount of instances of 100% CPU usage
+ (Default:3)
+ Returns:
+ bool: True if CPU spikes are more than threshold else False
+ """
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ check_for_cpu_usage_spikes_on_glusterd,
+ check_for_cpu_usage_spikes_on_glusterfs,
+ check_for_cpu_usage_spikes_on_glusterfsd)
+
+ # Check for CPU usage spikes on glusterd
+ if check_for_cpu_usage_spikes_on_glusterd(cls.servers, test_id,
+ threshold):
+ g.log.error("CPU usage spikes observed more than threshold "
+ "on glusterd.")
+ return True
+
+ if cls.volume_type != "distributed":
+ # Check for CPU usage spikes on shd
+ if check_for_cpu_usage_spikes_on_glusterfs(cls.servers, test_id,
+ threshold):
+ g.log.error("CPU usage spikes observed more than threshold "
+ "on shd.")
+ return True
+
+ # Check for CPU usage spikes on brick processes
+ if check_for_cpu_usage_spikes_on_glusterfsd(cls.servers, test_id,
+ threshold):
+ g.log.error("CPU usage spikes observed more than threshold "
+ "on shd.")
+ return True
+ return False
+
+ @classmethod
+ def check_for_cpu_spikes_on_clients(cls, test_id, threshold=3):
+ """Check for CPU usage spikes on clients
+
+ Args:
+ test_id(str): ID of the test running fetched from self.id()
+
+ Kwargs:
+ threshold(int): Accepted amount of instances of 100% CPU usage
+ (Default:3)
+ Returns:
+ bool: True if CPU spikes are more than threshold else False
+ """
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ check_for_cpu_usage_spikes_on_glusterfs_fuse)
+
+ ret = check_for_cpu_usage_spikes_on_glusterfs_fuse(cls.clients,
+ test_id,
+ threshold)
+ return ret
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_init.py b/glustolibs-gluster/glustolibs/gluster/gluster_init.py
index 29059e6a1..6a49ffc8b 100644
--- a/glustolibs-gluster/glustolibs/gluster/gluster_init.py
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_init.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -23,13 +23,17 @@ from time import sleep
from glusto.core import Glusto as g
-def start_glusterd(servers):
+def start_glusterd(servers, enable_retry=True):
"""Starts glusterd on specified servers if they are not running.
Args:
servers (str|list): A server|List of server hosts on which glusterd
has to be started.
+ Kwargs:
+ enable_retry(Bool): If set to True then runs reset-failed else
+ do nothing.
+
Returns:
bool : True if starting glusterd is successful on all servers.
False otherwise.
@@ -46,10 +50,13 @@ def start_glusterd(servers):
if retcode != 0:
g.log.error("Unable to start glusterd on server %s", server)
_rc = False
- if not _rc:
- return False
+ if not _rc and enable_retry:
+ ret = reset_failed_glusterd(servers)
+ if ret:
+ ret = start_glusterd(servers)
+ return ret
- return True
+ return _rc
def stop_glusterd(servers):
@@ -81,13 +88,17 @@ def stop_glusterd(servers):
return True
-def restart_glusterd(servers):
+def restart_glusterd(servers, enable_retry=True):
"""Restart the glusterd on specified servers.
Args:
servers (str|list): A server|List of server hosts on which glusterd
has to be restarted.
+ Kwargs:
+ enable_retry(Bool): If set to True than runs reset-failed else
+ do nothing.
+
Returns:
bool : True if restarting glusterd is successful on all servers.
False otherwise.
@@ -104,9 +115,35 @@ def restart_glusterd(servers):
if retcode != 0:
g.log.error("Unable to restart glusterd on server %s", server)
_rc = False
- if not _rc:
- return False
+ if not _rc and enable_retry:
+ ret = reset_failed_glusterd(servers)
+ if ret:
+ ret = restart_glusterd(servers)
+ return ret
+ return _rc
+
+
+def reset_failed_glusterd(servers):
+ """Reset-failed glusterd on specified servers.
+
+ Args:
+ servers (str|list): A server|List of server hosts on which glusterd
+ has to be reset-failed.
+
+ Returns:
+ bool : True if reset-failed glusterd is successful on all servers.
+ False otherwise.
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ cmd = "systemctl reset-failed glusterd"
+ results = g.run_parallel(servers, cmd)
+ for server, (retcode, _, _) in results.items():
+ if retcode:
+ g.log.error("Unable to reset glusterd on server %s", server)
+ return False
return True
@@ -260,10 +297,10 @@ def get_gluster_version(host):
host(str): IP of the host whose gluster version has to be checked.
Returns:
- (float): The gluster version value.
+ str: The gluster version value.
"""
command = 'gluster --version'
_, out, _ = g.run(host, command)
g.log.info("The Gluster verion of the cluster under test is %s",
out)
- return float(out.split(' ')[1])
+ return out.split(' ')[1]
diff --git a/glustolibs-gluster/glustolibs/gluster/glusterdir.py b/glustolibs-gluster/glustolibs/gluster/glusterdir.py
index f2981cb93..5618926c8 100644
--- a/glustolibs-gluster/glustolibs/gluster/glusterdir.py
+++ b/glustolibs-gluster/glustolibs/gluster/glusterdir.py
@@ -82,22 +82,29 @@ def rmdir(host, fqpath, force=False):
return False
-def get_dir_contents(host, path):
+def get_dir_contents(host, path, recursive=False):
"""Get the files and directories present in a given directory.
Args:
host (str): The hostname/ip of the remote system.
path (str): The path to the directory.
+ Kwargs:
+ recursive (bool): lists all entries recursively
+
Returns:
file_dir_list (list): List of files and directories on path.
None: In case of error or failure.
"""
- ret, out, _ = g.run(host, ' ls '+path)
- if ret != 0:
+ if recursive:
+ cmd = "find {}".format(path)
+ else:
+ cmd = "ls " + path
+ ret, out, _ = g.run(host, cmd)
+ if ret:
+ g.log.error("No such file or directory {}".format(path))
return None
- file_dir_list = list(filter(None, out.split("\n")))
- return file_dir_list
+ return(list(filter(None, out.split("\n"))))
class GlusterDir(GlusterFile):
diff --git a/glustolibs-gluster/glustolibs/gluster/glusterfile.py b/glustolibs-gluster/glustolibs/gluster/glusterfile.py
index 4d712a5f3..ee9b6040d 100755
--- a/glustolibs-gluster/glustolibs/gluster/glusterfile.py
+++ b/glustolibs-gluster/glustolibs/gluster/glusterfile.py
@@ -97,40 +97,50 @@ def get_mountpoint(host, fqpath):
return None
-def get_fattr(host, fqpath, fattr):
+def get_fattr(host, fqpath, fattr, encode="hex"):
"""getfattr for filepath on remote system
Args:
host (str): The hostname/ip of the remote system.
fqpath (str): The fully-qualified path to the file.
fattr (str): name of the fattr to retrieve
-
+ Kwargs:
+ encode(str): The supported types of encoding are
+ [hex|text|base64]
+ Defaults to hex type of encoding
Returns:
getfattr result on success. None on fail.
"""
- command = ("getfattr --absolute-names --only-values -n '%s' %s" %
- (fattr, fqpath))
+ command = ("getfattr --absolute-names -e '%s' "
+ "-n '%s' %s" %
+ (encode, fattr, fqpath))
rcode, rout, rerr = g.run(host, command)
-
- if rcode == 0:
- return rout.strip()
+ if not rcode:
+ return rout.strip().split('=')[1].replace('"', '')
g.log.error('getfattr failed: %s' % rerr)
return None
-def get_fattr_list(host, fqpath):
+def get_fattr_list(host, fqpath, encode_hex=False):
"""List of xattr for filepath on remote system.
Args:
host (str): The hostname/ip of the remote system.
fqpath (str): The fully-qualified path to the file.
+ Kwargs:
+ encode_hex(bool): Fetch xattr in hex if True
+ (Default:False)
+
Returns:
Dictionary of xattrs on success. None on fail.
"""
- command = "getfattr --absolute-names -d -m - %s" % fqpath
- rcode, rout, rerr = g.run(host, command)
+ cmd = "getfattr --absolute-names -d -m - {}".format(fqpath)
+ if encode_hex:
+ cmd = ("getfattr --absolute-names -d -m - -e hex {}"
+ .format(fqpath))
+ rcode, rout, rerr = g.run(host, cmd)
if rcode == 0:
xattr_list = {}
@@ -237,7 +247,7 @@ def get_file_stat(host, fqpath):
Returns:
A dictionary of file stat data. None on fail.
"""
- statformat = '%F:%n:%i:%a:%s:%h:%u:%g:%U:%G'
+ statformat = '%F$%n$%i$%a$%s$%h$%u$%g$%U$%G$%x$%y$%z$%X$%Y$%Z'
command = "stat -c '%s' %s" % (statformat, fqpath)
rcode, rout, rerr = g.run(host, command)
if rcode == 0:
@@ -245,7 +255,9 @@ def get_file_stat(host, fqpath):
stat_string = rout.strip()
(filetype, filename, inode,
access, size, links,
- uid, gid, username, groupname) = stat_string.split(":")
+ uid, gid, username, groupname,
+ atime, mtime, ctime, epoch_atime,
+ epoch_mtime, epoch_ctime) = stat_string.split("$")
stat_data['filetype'] = filetype
stat_data['filename'] = filename
@@ -257,6 +269,12 @@ def get_file_stat(host, fqpath):
stat_data["groupname"] = groupname
stat_data["uid"] = uid
stat_data["gid"] = gid
+ stat_data["atime"] = atime
+ stat_data["mtime"] = mtime
+ stat_data["ctime"] = ctime
+ stat_data["epoch_atime"] = epoch_atime
+ stat_data["epoch_mtime"] = epoch_mtime
+ stat_data["epoch_ctime"] = epoch_ctime
return stat_data
@@ -382,7 +400,8 @@ def get_pathinfo(host, fqpath):
A dictionary of pathinfo data for a remote file. None on fail.
"""
pathinfo = {}
- pathinfo['raw'] = get_fattr(host, fqpath, 'trusted.glusterfs.pathinfo')
+ pathinfo['raw'] = get_fattr(host, fqpath, 'trusted.glusterfs.pathinfo',
+ encode="text")
pathinfo['brickdir_paths'] = re.findall(r".*?POSIX.*?:(\S+)\>",
pathinfo['raw'])
@@ -405,17 +424,14 @@ def is_linkto_file(host, fqpath):
"""
command = 'file %s' % fqpath
rcode, rout, _ = g.run(host, command)
-
if rcode == 0:
- if 'sticky empty' in rout.strip():
+ # An additional ',' is there for newer platforms
+ if 'sticky empty' or 'sticky, empty' in rout.strip():
stat = get_file_stat(host, fqpath)
if int(stat['size']) == 0:
- # xattr = get_fattr(host, fqpath,
- # 'trusted.glusterfs.dht.linkto')
xattr = get_dht_linkto_xattr(host, fqpath)
if xattr is not None:
return True
-
return False
@@ -429,7 +445,8 @@ def get_dht_linkto_xattr(host, fqpath):
Returns:
Return value of get_fattr trusted.glusterfs.dht.linkto call.
"""
- linkto_xattr = get_fattr(host, fqpath, 'trusted.glusterfs.dht.linkto')
+ linkto_xattr = get_fattr(host, fqpath, 'trusted.glusterfs.dht.linkto',
+ encode="text")
return linkto_xattr
@@ -480,6 +497,154 @@ def check_if_pattern_in_file(host, pattern, fqpath):
return 0
+def occurences_of_pattern_in_file(node, search_pattern, filename):
+ """
+ Get the number of occurences of pattern in the file
+
+ Args:
+ node (str): Host on which the command is executed.
+ search_pattern (str): Pattern to be found in the file.
+ filename (str): File in which the pattern is to be validated
+
+ Returns:
+ (int): (-1), When the file doesn't exists.
+ (0), When pattern doesn't exists in the file.
+ (number), When pattern is found and the number of
+ occurences of pattern in the file.
+
+ Example:
+ occurences_of_pattern_in_file(node, search_pattern, filename)
+ """
+
+ ret = file_exists(node, filename)
+ if not ret:
+ g.log.error("File %s is not present on the node " % filename)
+ return -1
+
+ cmd = ("grep -c '%s' %s" % (search_pattern, filename))
+ ret, out, _ = g.run(node, cmd)
+ if ret:
+ g.log.error("No occurence of the pattern found in the file %s" %
+ filename)
+ return 0
+ return int(out.strip('\n'))
+
+
+def create_link_file(node, file, link, soft=False):
+ """
+ Create hard or soft link for an exisiting file
+
+ Args:
+ node(str): Host on which the command is executed.
+ file(str): Path to the source file.
+ link(str): Path to the link file.
+
+ Kawrgs:
+ soft(bool): Create soft link if True else create
+ hard link.
+
+ Returns:
+ (bool): True if command successful else False.
+
+ Example:
+ >>> create_link_file('10.20.30.40', '/mnt/mp/file.txt',
+ '/mnt/mp/link')
+ True
+ """
+ cmd = "ln {} {}".format(file, link)
+ if soft:
+ cmd = "ln -s {} {}".format(file, link)
+
+ ret, _, err = g.run(node, cmd)
+ if ret:
+ if soft:
+ g.log.error('Failed to create soft link on {} '
+ 'for file {} with error {}'
+ .format(node, file, err))
+ else:
+ g.log.error('Failed to create hard link on {} '
+ 'for file {} with error {}'
+ .format(node, file, err))
+ return False
+ return True
+
+
+def set_acl(client, rule, fqpath):
+ """Set acl rule on a specific file
+
+ Args:
+ client(str): Host on which the command is executed.
+ rule(str): The acl rule to be set on the file.
+ fqpath (str): The fully-qualified path to the file.
+
+ Returns:
+ (bool): True if command successful else False.
+ """
+ cmd = "setfacl -m {} {}".format(rule, fqpath)
+ ret, _, _ = g.run(client, cmd)
+ if ret:
+ g.log.error('Failed to set rule {} on file {}'.format(rule, fqpath))
+ return False
+ return True
+
+
+def get_acl(client, path, filename):
+ """Get all acl rules set to a file
+
+ Args:
+ client(str): Host on which the command is executed.
+ path (str): The fully-qualified path to the dir where file is present.
+ filename(str): Name of the file for which rules have to be fetched.
+
+ Returns:
+ (dict): A dictionary with the formatted output of the command.
+ (None): In case of failures
+
+ Example:
+ >>> get_acl('dhcp35-4.lab.eng.blr.redhat.com', '/root/', 'file')
+ {'owner': 'root', 'rules': ['user::rw-', 'user:root:rwx', 'group::r--',
+ 'mask::rwx', 'other::r--'], 'group': 'root', 'file': 'file'}
+ """
+ cmd = "cd {};getfacl {}".format(path, filename)
+ ret, out, _ = g.run(client, cmd)
+ if ret:
+ return None
+
+ # Generate a dict out of the output
+ output_dict = {}
+ data = out.strip().split('\n')
+ for key, index in (('file', 0), ('owner', 1), ('group', 2)):
+ output_dict[key] = data[index].split(' ')[2]
+ output_dict['rules'] = data[3:]
+
+ return output_dict
+
+
+def delete_acl(client, fqpath, rule=None):
+ """Delete a specific or all acl rules set on a file
+
+ Args:
+ client(str): Host on which the command is executed.
+ fqpath (str): The fully-qualified path to the file.
+
+ Kwargs:
+ rule(str): The acl rule to be removed from the file.
+
+ Returns:
+ (bool): True if command successful else False.
+ """
+ # Remove all acls set on a file
+ cmd = "setfacl -b {}".format(fqpath)
+ # Remove a specific acl of the file
+ if rule:
+ cmd = "setfacl -x {} {}".format(rule, fqpath)
+
+ ret, _, _ = g.run(client, cmd)
+ if ret:
+ return False
+ return True
+
+
class GlusterFile(object):
"""Class to handle files specific to Gluster (client and backend)"""
def __init__(self, host, fqpath):
diff --git a/glustolibs-gluster/glustolibs/gluster/heal_libs.py b/glustolibs-gluster/glustolibs/gluster/heal_libs.py
index 504173ae7..4a551cd48 100755
--- a/glustolibs-gluster/glustolibs/gluster/heal_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/heal_libs.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2016-2020 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2021 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -135,7 +135,8 @@ def are_all_self_heal_daemons_are_online(mnode, volname):
return False
-def monitor_heal_completion(mnode, volname, timeout_period=1200):
+def monitor_heal_completion(mnode, volname, timeout_period=1200,
+ bricks=None, interval_check=120):
"""Monitors heal completion by looking into .glusterfs/indices/xattrop
directory of every brick for certain time. When there are no entries
in all the brick directories then heal is successful. Otherwise heal is
@@ -147,6 +148,12 @@ def monitor_heal_completion(mnode, volname, timeout_period=1200):
heal_monitor_timeout : time until which the heal monitoring to be done.
Default: 1200 i.e 20 minutes.
+ Kwargs:
+ bricks : list of bricks to monitor heal, if not provided
+ heal will be monitored on all bricks of volume
+ interval_check : Time in seconds, for every given interval checks
+ the heal info, defaults to 120.
+
Return:
bool: True if heal is complete within timeout_period. False otherwise
"""
@@ -158,7 +165,7 @@ def monitor_heal_completion(mnode, volname, timeout_period=1200):
# Get all bricks
from glustolibs.gluster.brick_libs import get_all_bricks
- bricks_list = get_all_bricks(mnode, volname)
+ bricks_list = bricks or get_all_bricks(mnode, volname)
if bricks_list is None:
g.log.error("Unable to get the bricks list. Hence unable to verify "
"whether self-heal-daemon process is running or not "
@@ -177,10 +184,15 @@ def monitor_heal_completion(mnode, volname, timeout_period=1200):
if heal_complete:
break
else:
- time.sleep(120)
- time_counter = time_counter - 120
+ time.sleep(interval_check)
+ time_counter = time_counter - interval_check
+
+ if heal_complete and bricks:
+ # In EC volumes, check heal completion only on online bricks
+ # and `gluster volume heal info` fails for an offline brick
+ return True
- if heal_complete:
+ if heal_complete and not bricks:
heal_completion_status = is_heal_complete(mnode, volname)
if heal_completion_status is True:
g.log.info("Heal has successfully completed on volume %s" %
@@ -509,3 +521,71 @@ def bring_self_heal_daemon_process_offline(nodes):
_rc = False
return _rc
+
+
+def is_shd_daemon_running(mnode, node, volname):
+ """
+ Verifies whether the shd daemon is up and running on a particular node by
+ checking the existence of shd pid and parsing the get volume status output.
+
+ Args:
+ mnode (str): The first node in servers list
+ node (str): The node to be checked for whether the glustershd
+ process is up or not
+ volname (str): Name of the volume created
+
+ Returns:
+ boolean: True if shd is running on the node, False, otherwise
+ """
+
+ # Get glustershd pid from node.
+ ret, glustershd_pids = get_self_heal_daemon_pid(node)
+ if not ret and glustershd_pids[node] != -1:
+ return False
+ # Verifying glustershd process is no longer running from get status.
+ vol_status = get_volume_status(mnode, volname)
+ if vol_status is None:
+ return False
+ try:
+ _ = vol_status[volname][node]['Self-heal Daemon']
+ return True
+ except KeyError:
+ return False
+
+
+def enable_granular_heal(mnode, volname):
+ """Enable granular heal on a given volume
+
+ Args:
+ mnode(str): Node on which command has to be exectued
+ volname(str): Name of the volume on which granular heal is to be enabled
+
+ Returns:
+ bool: True if granular heal is enabled successfully else False
+ """
+ cmd = "gluster volume heal {} granular-entry-heal enable".format(volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error('Unable to enable granular-entry-heal on volume %s',
+ volname)
+ return False
+ return True
+
+
+def disable_granular_heal(mnode, volname):
+ """Diable granular heal on a given volume
+
+ Args:
+ mnode(str): Node on which command will be exectued
+ volname(str): Name of the volume on which granular heal is to be disabled
+
+ Returns:
+ bool: True if granular heal is disabled successfully else False
+ """
+ cmd = "gluster volume heal {} granular-entry-heal disable".format(volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error('Unable to disable granular-entry-heal on volume %s',
+ volname)
+ return False
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/layout.py b/glustolibs-gluster/glustolibs/gluster/layout.py
index 8d7ae2d6f..ea5a5bc8b 100644
--- a/glustolibs-gluster/glustolibs/gluster/layout.py
+++ b/glustolibs-gluster/glustolibs/gluster/layout.py
@@ -19,7 +19,6 @@
from glusto.core import Glusto as g
from glustolibs.gluster.brickdir import BrickDir
-from glustolibs.gluster.gluster_init import get_gluster_version
class Layout(object):
@@ -35,20 +34,19 @@ class Layout(object):
self._brickdirs = []
for brickdir_path in self._pathinfo['brickdir_paths']:
(host, _) = brickdir_path.split(':')
- if get_gluster_version(host) >= 6.0:
- ret = get_volume_type(brickdir_path)
- if ret in ('Replicate', 'Disperse', 'Arbiter'):
- g.log.info("Cannot get layout as volume under test is"
- " Replicate/Disperse/Arbiter and DHT"
- " pass-through was enabled after Gluster 6.")
+ ret = get_volume_type(brickdir_path)
+ if ret in ('Replicate', 'Disperse', 'Arbiter'):
+ g.log.info("Cannot get layout as volume under test is"
+ " Replicate/Disperse/Arbiter and DHT"
+ " pass-through was enabled after Gluster 6.0")
+ else:
+ brickdir = BrickDir(brickdir_path)
+ if brickdir is None:
+ g.log.error("Failed to get the layout")
else:
- brickdir = BrickDir(brickdir_path)
- if brickdir is None:
- g.log.error("Failed to get the layout")
- else:
- g.log.debug("%s: %s" % (brickdir.path,
- brickdir.hashrange))
- self._brickdirs.append(brickdir)
+ g.log.debug("%s: %s" % (brickdir.path,
+ brickdir.hashrange))
+ self._brickdirs.append(brickdir)
def __init__(self, pathinfo):
"""Init the layout class
@@ -80,9 +78,8 @@ class Layout(object):
for brickdir_path in self._pathinfo['brickdir_paths']:
(host, _) = brickdir_path.split(':')
- if (get_gluster_version(host) >= 6.0 and
- get_volume_type(brickdir_path) in ('Replicate', 'Disperse',
- 'Arbiter')):
+ if get_volume_type(brickdir_path) in ('Replicate', 'Disperse',
+ 'Arbiter'):
g.log.info("Cannot check for layout completeness as volume"
" under test is Replicate/Disperse/Arbiter and DHT"
" pass-though was enabled after Gluster 6.")
diff --git a/glustolibs-gluster/glustolibs/gluster/lib_utils.py b/glustolibs-gluster/glustolibs/gluster/lib_utils.py
index 7299874d0..b04976b1c 100755
--- a/glustolibs-gluster/glustolibs/gluster/lib_utils.py
+++ b/glustolibs-gluster/glustolibs/gluster/lib_utils.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2021 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -387,7 +387,8 @@ def get_servers_unused_bricks_dict(mnode, servers, servers_info):
return servers_unused_bricks_dict
-def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info):
+def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info,
+ dirname=None):
"""Forms bricks list for create-volume/add-brick given the num_of_bricks
servers and servers_info.
@@ -400,6 +401,9 @@ def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info):
needs to be selected for creating the brick list.
servers_info (dict): dict of server info of each servers.
+ kwargs:
+ dirname (str): Name of the directory for glusterfs brick
+
Returns:
list - List of bricks to use with volume-create/add-brick
None - if number_of_bricks is greater than unused bricks.
@@ -437,10 +441,18 @@ def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info):
list(servers_unused_bricks_dict.values())[dict_index])
brick_path = ''
if current_server_unused_bricks_list:
- brick_path = ("%s:%s/%s_brick%s" %
- (current_server,
- current_server_unused_bricks_list[0], volname, num))
- bricks_list.append(brick_path)
+ if dirname and (" " not in dirname):
+ brick_path = ("%s:%s/%s_brick%s" %
+ (current_server,
+ current_server_unused_bricks_list[0], dirname,
+ num))
+ bricks_list.append(brick_path)
+ else:
+ brick_path = ("%s:%s/%s_brick%s" %
+ (current_server,
+ current_server_unused_bricks_list[0], volname,
+ num))
+ bricks_list.append(brick_path)
# Remove the added brick from the current_server_unused_bricks_list
list(servers_unused_bricks_dict.values())[dict_index].pop(0)
@@ -1004,6 +1016,30 @@ def group_add(servers, groupname):
return True
+def group_del(servers, groupname):
+ """
+ Deletes a group in all the servers.
+
+ Args:
+ servers(list|str): Nodes on which cmd is to be executed.
+ groupname(str): Name of the group to be removed.
+
+ Return always True
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ cmd = "groupdel %s" % groupname
+ results = g.run_parallel(servers, cmd)
+
+ for server, ret_value in list(results.items()):
+ retcode, _, err = ret_value
+ if retcode != 0 and "does not exist" in err:
+ g.log.error("Group %s on server %s already removed",
+ groupname, server)
+ return True
+
+
def ssh_keygen(mnode):
"""
Creates a pair of ssh private and public key if not present
@@ -1199,3 +1235,25 @@ def collect_bricks_arequal(bricks_list):
arequal_list.append(arequal)
return (return_code, arequal_list)
+
+
+def get_usable_size_per_disk(brickpath, min_free_limit=10):
+ """Get the usable size per disk
+
+ Args:
+ brickpath(str): Brick path to be used to calculate usable size
+
+ Kwargs:
+ min_free_limit(int): Min free disk limit to be used
+
+ Returns:
+ (int): Usable size in GB. None in case of errors.
+ """
+ node, brick_path = brickpath.split(':')
+ size = get_size_of_mountpoint(node, brick_path)
+ if not size:
+ return None
+ size = int(size)
+ min_free_size = size * min_free_limit // 100
+ usable_size = ((size - min_free_size) // 1048576) + 1
+ return usable_size
diff --git a/glustolibs-gluster/glustolibs/gluster/mount_ops.py b/glustolibs-gluster/glustolibs/gluster/mount_ops.py
index 02dc0a253..c8fbddd05 100755
--- a/glustolibs-gluster/glustolibs/gluster/mount_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/mount_ops.py
@@ -336,10 +336,10 @@ def mount_volume(volname, mtype, mpoint, mserver, mclient, options='',
if mtype == 'nfs':
if not options:
- options = "-o vers=3"
+ options = "-o vers=4.1"
elif options and 'vers' not in options:
- options = options + ",vers=3"
+ options = options + ",vers=4.1"
if mserver:
mcmd = ("mount -t %s %s %s:/%s %s" %
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
index 19e98408e..5f69e68f6 100644..100755
--- a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
@@ -32,179 +32,123 @@ from glustolibs.gluster.nfs_ganesha_ops import (
create_nfs_ganesha_cluster,
configure_ports_on_clients,
ganesha_client_firewall_settings)
-from glustolibs.gluster.gluster_base_class import GlusterBaseClass
-from glustolibs.gluster.exceptions import ExecutionError, ConfigError
from glustolibs.gluster.volume_libs import is_volume_exported
+from glustolibs.gluster.lib_utils import is_rhel7
-class NfsGaneshaClusterSetupClass(GlusterBaseClass):
- """Creates nfs ganesha cluster
+def setup_nfs_ganesha(cls):
"""
- @classmethod
- def setUpClass(cls):
- """
- Setup variable for nfs-ganesha tests.
- """
- # pylint: disable=too-many-statements, too-many-branches
- super(NfsGaneshaClusterSetupClass, cls).setUpClass()
-
- # Check if enable_nfs_ganesha is set in config file
- if not cls.enable_nfs_ganesha:
- raise ConfigError("Please enable nfs ganesha in config")
-
- # Read num_of_nfs_ganesha_nodes from config file and create
- # nfs ganesha cluster accordingly
- cls.num_of_nfs_ganesha_nodes = int(cls.num_of_nfs_ganesha_nodes)
- cls.servers_in_nfs_ganesha_cluster = (
- cls.servers[:cls.num_of_nfs_ganesha_nodes])
- cls.vips_in_nfs_ganesha_cluster = (
- cls.vips[:cls.num_of_nfs_ganesha_nodes])
-
- # Obtain hostname of servers in ganesha cluster
- cls.ganesha_servers_hostname = []
- for ganesha_server in cls.servers_in_nfs_ganesha_cluster:
- ret, hostname, _ = g.run(ganesha_server, "hostname")
- if ret:
- raise ExecutionError("Failed to obtain hostname of %s"
- % ganesha_server)
- hostname = hostname.strip()
- g.log.info("Obtained hostname: IP- %s, hostname- %s",
- ganesha_server, hostname)
- cls.ganesha_servers_hostname.append(hostname)
-
- @classmethod
- def setup_nfs_ganesha(cls):
- """
- Create nfs-ganesha cluster if not exists
- Set client configurations for nfs-ganesha
-
- Returns:
- True(bool): If setup is successful
- False(bool): If setup is failure
- """
- # pylint: disable = too-many-statements, too-many-branches
- # pylint: disable = too-many-return-statements
- cluster_exists = is_nfs_ganesha_cluster_exists(
+ Create nfs-ganesha cluster if not exists
+ Set client configurations for nfs-ganesha
+
+ Returns:
+ True(bool): If setup is successful
+ False(bool): If setup is failure
+ """
+ # pylint: disable = too-many-statements, too-many-branches
+ # pylint: disable = too-many-return-statements
+ cluster_exists = is_nfs_ganesha_cluster_exists(
+ cls.servers_in_nfs_ganesha_cluster[0])
+ if cluster_exists:
+ is_healthy = is_nfs_ganesha_cluster_in_healthy_state(
cls.servers_in_nfs_ganesha_cluster[0])
- if cluster_exists:
- is_healthy = is_nfs_ganesha_cluster_in_healthy_state(
- cls.servers_in_nfs_ganesha_cluster[0])
-
- if is_healthy:
- g.log.info("Nfs-ganesha Cluster exists and is in healthy "
- "state. Skipping cluster creation...")
- else:
- g.log.info("Nfs-ganesha Cluster exists and is not in "
- "healthy state.")
- g.log.info("Tearing down existing cluster which is not in "
- "healthy state")
- ganesha_ha_file = ("/var/run/gluster/shared_storage/"
- "nfs-ganesha/ganesha-ha.conf")
-
- g.log.info("Collecting server details of existing "
- "nfs ganesha cluster")
- conn = g.rpyc_get_connection(
- cls.servers_in_nfs_ganesha_cluster[0], user="root")
- if not conn:
- tmp_node = cls.servers_in_nfs_ganesha_cluster[0]
- g.log.error("Unable to get connection to 'root' of node"
- " %s", tmp_node)
- return False
-
- if not conn.modules.os.path.exists(ganesha_ha_file):
- g.log.error("Unable to locate %s", ganesha_ha_file)
- return False
- with conn.builtin.open(ganesha_ha_file, "r") as fhand:
- ganesha_ha_contents = fhand.read()
- g.rpyc_close_connection(
- host=cls.servers_in_nfs_ganesha_cluster[0], user="root")
- servers_in_existing_cluster = re.findall(r'VIP_(.*)\=.*',
- ganesha_ha_contents)
-
- ret = teardown_nfs_ganesha_cluster(
- servers_in_existing_cluster, force=True)
- if not ret:
- g.log.error("Failed to teardown unhealthy ganesha "
- "cluster")
- return False
-
- g.log.info("Existing unhealthy cluster got teardown "
- "successfully")
-
- if (not cluster_exists) or (not is_healthy):
- g.log.info("Creating nfs-ganesha cluster of %s nodes"
- % str(cls.num_of_nfs_ganesha_nodes))
- g.log.info("Nfs-ganesha cluster node info: %s"
- % cls.servers_in_nfs_ganesha_cluster)
- g.log.info("Nfs-ganesha cluster vip info: %s"
- % cls.vips_in_nfs_ganesha_cluster)
-
- ret = create_nfs_ganesha_cluster(
- cls.ganesha_servers_hostname,
- cls.vips_in_nfs_ganesha_cluster)
+
+ if is_healthy:
+ g.log.info("Nfs-ganesha Cluster exists and is in healthy "
+ "state. Skipping cluster creation...")
+ else:
+ g.log.info("Nfs-ganesha Cluster exists and is not in "
+ "healthy state.")
+ g.log.info("Tearing down existing cluster which is not in "
+ "healthy state")
+ ganesha_ha_file = ("/var/run/gluster/shared_storage/"
+ "nfs-ganesha/ganesha-ha.conf")
+ g_node = cls.servers_in_nfs_ganesha_cluster[0]
+
+ g.log.info("Collecting server details of existing "
+ "nfs ganesha cluster")
+
+ # Check whether ganesha ha file exists
+ cmd = "[ -f {} ]".format(ganesha_ha_file)
+ ret, _, _ = g.run(g_node, cmd)
+ if ret:
+ g.log.error("Unable to locate %s", ganesha_ha_file)
+ return False
+
+ # Read contents of ganesha_ha_file
+ cmd = "cat {}".format(ganesha_ha_file)
+ ret, ganesha_ha_contents, _ = g.run(g_node, cmd)
+ if ret:
+ g.log.error("Failed to read %s", ganesha_ha_file)
+ return False
+
+ servers_in_existing_cluster = re.findall(r'VIP_(.*)\=.*',
+ ganesha_ha_contents)
+
+ ret = teardown_nfs_ganesha_cluster(
+ servers_in_existing_cluster, force=True)
if not ret:
- g.log.error("Creation of nfs-ganesha cluster failed")
+ g.log.error("Failed to teardown unhealthy ganesha "
+ "cluster")
return False
- if not is_nfs_ganesha_cluster_in_healthy_state(
- cls.servers_in_nfs_ganesha_cluster[0]):
- g.log.error("Nfs-ganesha cluster is not healthy")
- return False
- g.log.info("Nfs-ganesha Cluster exists is in healthy state")
+ g.log.info("Existing unhealthy cluster got teardown "
+ "successfully")
- ret = configure_ports_on_clients(cls.clients)
+ if (not cluster_exists) or (not is_healthy):
+ g.log.info("Creating nfs-ganesha cluster of %s nodes"
+ % str(cls.num_of_nfs_ganesha_nodes))
+ g.log.info("Nfs-ganesha cluster node info: %s"
+ % cls.servers_in_nfs_ganesha_cluster)
+ g.log.info("Nfs-ganesha cluster vip info: %s"
+ % cls.vips_in_nfs_ganesha_cluster)
+
+ ret = create_nfs_ganesha_cluster(
+ cls.ganesha_servers_hostname,
+ cls.vips_in_nfs_ganesha_cluster)
if not ret:
- g.log.error("Failed to configure ports on clients")
+ g.log.error("Creation of nfs-ganesha cluster failed")
return False
- ret = ganesha_client_firewall_settings(cls.clients)
+ if not is_nfs_ganesha_cluster_in_healthy_state(
+ cls.servers_in_nfs_ganesha_cluster[0]):
+ g.log.error("Nfs-ganesha cluster is not healthy")
+ return False
+ g.log.info("Nfs-ganesha Cluster exists is in healthy state")
+
+ if is_rhel7(cls.clients):
+ ret = configure_ports_on_clients(cls.clients)
if not ret:
- g.log.error("Failed to do firewall setting in clients")
+ g.log.error("Failed to configure ports on clients")
return False
- for server in cls.servers:
- for client in cls.clients:
- cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then "
- "echo \"%s %s\" >> /etc/hosts; fi"
- % (client, socket.gethostbyname(client), client))
- ret, _, _ = g.run(server, cmd)
- if ret != 0:
- g.log.error("Failed to add entry of client %s in "
- "/etc/hosts of server %s"
- % (client, server))
+ ret = ganesha_client_firewall_settings(cls.clients)
+ if not ret:
+ g.log.error("Failed to do firewall setting in clients")
+ return False
+ for server in cls.servers:
for client in cls.clients:
- for server in cls.servers:
- cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then "
- "echo \"%s %s\" >> /etc/hosts; fi"
- % (server, socket.gethostbyname(server), server))
- ret, _, _ = g.run(client, cmd)
- if ret != 0:
- g.log.error("Failed to add entry of server %s in "
- "/etc/hosts of client %s"
- % (server, client))
- return True
-
- @classmethod
- def tearDownClass(cls, delete_nfs_ganesha_cluster=True):
- """Teardown nfs ganesha cluster.
- """
- super(NfsGaneshaClusterSetupClass, cls).tearDownClass()
-
- if delete_nfs_ganesha_cluster:
- ret = teardown_nfs_ganesha_cluster(
- cls.servers_in_nfs_ganesha_cluster)
- if not ret:
- g.log.error("Teardown got failed. Hence, cleaning up "
- "nfs-ganesha cluster forcefully")
- ret = teardown_nfs_ganesha_cluster(
- cls.servers_in_nfs_ganesha_cluster, force=True)
- if not ret:
- raise ExecutionError("Force cleanup of nfs-ganesha "
- "cluster failed")
- g.log.info("Teardown nfs ganesha cluster succeeded")
- else:
- g.log.info("Skipping teardown nfs-ganesha cluster...")
+ cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then "
+ "echo \"%s %s\" >> /etc/hosts; fi"
+ % (client, socket.gethostbyname(client), client))
+ ret, _, _ = g.run(server, cmd)
+ if ret != 0:
+ g.log.error("Failed to add entry of client %s in "
+ "/etc/hosts of server %s"
+ % (client, server))
+
+ for client in cls.clients:
+ for server in cls.servers:
+ cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then "
+ "echo \"%s %s\" >> /etc/hosts; fi"
+ % (server, socket.gethostbyname(server), server))
+ ret, _, _ = g.run(client, cmd)
+ if ret != 0:
+ g.log.error("Failed to add entry of server %s in "
+ "/etc/hosts of client %s"
+ % (server, client))
+ return True
def wait_for_nfs_ganesha_volume_to_get_exported(mnode, volname, timeout=120):
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
index 3e53ec29d..d8486c7d2 100644..100755
--- a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2021 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -23,10 +23,10 @@
"""
import os
-import random
from glusto.core import Glusto as g
from glustolibs.gluster.glusterdir import mkdir
-from glustolibs.gluster.lib_utils import add_services_to_firewall
+from glustolibs.gluster.lib_utils import (add_services_to_firewall,
+ is_rhel7)
from glustolibs.gluster.shared_storage_ops import enable_shared_storage
from glustolibs.gluster.peer_ops import peer_probe_servers
@@ -50,17 +50,33 @@ def teardown_nfs_ganesha_cluster(servers, force=False):
Example:
teardown_nfs_ganesha_cluster(servers)
"""
+ # Copy ganesha.conf before proceeding to clean up
+ for server in servers:
+ cmd = "cp /etc/ganesha/ganesha.conf ganesha.conf"
+ ret, _, _ = g.run(server, cmd)
+ if ret:
+ g.log.error("Failed to copy ganesha.conf")
+
if force:
g.log.info("Executing force cleanup...")
+ cleanup_ops = ['--teardown', '--cleanup']
for server in servers:
- cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --teardown "
- "/var/run/gluster/shared_storage/nfs-ganesha")
- _, _, _ = g.run(server, cmd)
- cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --cleanup /var/run/"
- "gluster/shared_storage/nfs-ganesha")
- _, _, _ = g.run(server, cmd)
+ # Perform teardown and cleanup
+ for op in cleanup_ops:
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh {} /var/run/"
+ "gluster/shared_storage/nfs-ganesha".format(op))
+ _, _, _ = g.run(server, cmd)
+
+ # Stop nfs ganesha service
_, _, _ = stop_nfs_ganesha_service(server)
+
+ # Clean shared storage, ganesha.conf, and replace with backup
+ for cmd in ("rm -rf /var/run/gluster/shared_storage/*",
+ "rm -rf /etc/ganesha/ganesha.conf",
+ "cp ganesha.conf /etc/ganesha/ganesha.conf"):
+ _, _, _ = g.run(server, cmd)
return True
+
ret, _, _ = disable_nfs_ganesha(servers[0])
if ret != 0:
g.log.error("Nfs-ganesha disable failed")
@@ -667,14 +683,17 @@ def create_nfs_ganesha_cluster(servers, vips):
False(bool): If failed to configure ganesha cluster
"""
# pylint: disable=too-many-return-statements
+ # pylint: disable=too-many-branches
+ # pylint: disable=too-many-statements
ganesha_mnode = servers[0]
- # Configure ports in ganesha servers
- g.log.info("Defining statd service ports")
- ret = configure_ports_on_servers(servers)
- if not ret:
- g.log.error("Failed to set statd service ports on nodes.")
- return False
+ # Configure ports in ganesha servers for RHEL7
+ if is_rhel7(servers):
+ g.log.info("Defining statd service ports")
+ ret = configure_ports_on_servers(servers)
+ if not ret:
+ g.log.error("Failed to set statd service ports on nodes.")
+ return False
# Firewall settings for nfs-ganesha
ret = ganesha_server_firewall_settings(servers)
@@ -752,6 +771,22 @@ def create_nfs_ganesha_cluster(servers, vips):
# Create backup of ganesha-ha.conf file in ganesha_mnode
g.upload(ganesha_mnode, tmp_ha_conf, '/etc/ganesha/')
+ # setsebool ganesha_use_fusefs on
+ cmd = "setsebool ganesha_use_fusefs on"
+ for server in servers:
+ ret, _, _ = g.run(server, cmd)
+ if ret:
+ g.log.error("Failed to 'setsebool ganesha_use_fusefs on' on %",
+ server)
+ return False
+
+ # Verify ganesha_use_fusefs is on
+ _, out, _ = g.run(server, "getsebool ganesha_use_fusefs")
+ if "ganesha_use_fusefs --> on" not in out:
+ g.log.error("Failed to 'setsebool ganesha_use_fusefs on' on %",
+ server)
+ return False
+
# Enabling ganesha
g.log.info("Enable nfs-ganesha")
ret, _, _ = enable_nfs_ganesha(ganesha_mnode)
@@ -765,6 +800,31 @@ def create_nfs_ganesha_cluster(servers, vips):
# pcs status output
_, _, _ = g.run(ganesha_mnode, "pcs status")
+ # pacemaker status output
+ _, _, _ = g.run(ganesha_mnode, "systemctl status pacemaker")
+
+ return True
+
+
+def enable_firewall(servers):
+ """Enables Firewall if not enabled already
+ Args:
+ servers(list): Hostname of ganesha nodes
+ Returns:
+ Status (bool) : True/False based on the status of firewall enable
+ """
+
+ cmd = "systemctl status firewalld | grep Active"
+ for server in servers:
+ ret, out, _ = g.run(server, cmd)
+ if 'inactive' in out:
+ g.log.info("Firewalld is not running. Enabling Firewalld")
+ for command in ("enable", "start"):
+ ret, out, _ = g.run(server,
+ "systemctl {} firewalld".format(command))
+ if ret:
+ g.log.error("Failed to enable Firewalld on %s", server)
+ return False
return True
@@ -778,9 +838,11 @@ def ganesha_server_firewall_settings(servers):
True(bool): If successfully set the firewall settings
False(bool): If failed to do firewall settings
"""
+ if not enable_firewall(servers):
+ return False
+
services = ['nfs', 'rpc-bind', 'high-availability', 'nlm', 'mountd',
'rquota']
-
ret = add_services_to_firewall(servers, services, True)
if not ret:
g.log.error("Failed to set firewall zone permanently on ganesha nodes")
@@ -852,47 +914,51 @@ def create_nfs_passwordless_ssh(mnode, gnodes, guser='root'):
False(bool): On failure
"""
loc = "/var/lib/glusterd/nfs/"
- mconn_inst = random.randint(20, 100)
- mconn = g.rpyc_get_connection(host=mnode, instance=mconn_inst)
- if not mconn.modules.os.path.isfile('/root/.ssh/id_rsa'):
+ # Check whether key is present
+ cmd = "[ -f /root/.ssh/id_rsa ]"
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
# Generate key on mnode if not already present
- if not mconn.modules.os.path.isfile('%s/secret.pem' % loc):
+ g.log.info("id_rsa not found")
+ cmd = "[ -f %s/secret.pem ]" % loc
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.info("Secret.pem file not found. Creating new")
ret, _, _ = g.run(
mnode, "ssh-keygen -f %s/secret.pem -q -N ''" % loc)
- if ret != 0:
+ if ret:
g.log.error("Failed to generate the secret pem file")
return False
g.log.info("Key generated on %s" % mnode)
else:
- mconn.modules.shutil.copyfile("/root/.ssh/id_rsa",
- "%s/secret.pem" % loc)
- g.log.info("Copying the id_rsa.pub to secret.pem.pub")
- mconn.modules.shutil.copyfile("/root/.ssh/id_rsa.pub",
- "%s/secret.pem.pub" % loc)
+ g.log.info("Found existing key")
+ # Copy the .pem and .pyb files
+ for file, to_file in (('id_rsa', 'secret.pem'), ('id_rsa.pub',
+ 'secret.pem.pub')):
+ cmd = "cp /root/.ssh/{} {}{}".format(file, loc, to_file)
+ ret, _, err = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Failed to copy {} to {} file {}".format(file,
+ to_file,
+ err))
+ return False
# Create password less ssh from mnode to all ganesha nodes
+ cmd = "cat /root/.ssh/id_rsa.pub"
+ ret, id_rsa, _ = g.run(mnode, cmd, user=guser)
+ if ret:
+ g.log.info("Failed to read key from %s", mnode)
+ return False
for gnode in gnodes:
- gconn_inst = random.randint(20, 100)
- gconn = g.rpyc_get_connection(gnode, user=guser, instance=gconn_inst)
- try:
- glocal = gconn.modules.os.path.expanduser('~')
- gfhand = gconn.builtin.open("%s/.ssh/authorized_keys" % glocal,
- "a")
- with mconn.builtin.open("/root/.ssh/id_rsa.pub", 'r') as fhand:
- for line in fhand:
- gfhand.write(line)
- gfhand.close()
- except Exception as exep:
- g.log.error("Exception occurred while trying to establish "
- "password less ssh from %s@%s to %s@%s. Exception: %s"
- % ('root', mnode, guser, gnode, exep))
+ file = "~/.ssh/authorized_keys"
+ cmd = ("grep -q '{}' {} || echo '{}' >> {}"
+ .format(id_rsa.rstrip(), file, id_rsa.rstrip(), file))
+ ret, _, _ = g.run(gnode, cmd, user=guser)
+ if ret:
+ g.log.info("Failed to add ssh key for %s", gnode)
return False
- finally:
- g.rpyc_close_connection(
- host=gnode, user=guser, instance=gconn_inst)
-
- g.rpyc_close_connection(host=mnode, instance=mconn_inst)
+ g.log.info("Successfully copied ssh key to all Ganesha nodes")
# Copy the ssh key pair from mnode to all the nodes in the Ganesha-HA
# cluster
@@ -906,8 +972,8 @@ def create_nfs_passwordless_ssh(mnode, gnodes, guser='root'):
% (loc, loc, guser, gnode, loc))
ret, _, _ = g.run(mnode, cmd)
if ret != 0:
- g.log.error("Failed to copy the ssh key pair from %s to %s",
- mnode, gnode)
+ g.log.error("Failed to copy the ssh key pair from "
+ "%s to %s", mnode, gnode)
return False
return True
@@ -923,7 +989,7 @@ def create_ganesha_ha_conf(hostnames, vips, temp_ha_file):
"""
hosts = ','.join(hostnames)
- with open(temp_ha_file, 'wb') as fhand:
+ with open(temp_ha_file, 'w') as fhand:
fhand.write('HA_NAME="ganesha-ha-360"\n')
fhand.write('HA_CLUSTER_NODES="%s"\n' % hosts)
for (hostname, vip) in zip(hostnames, vips):
@@ -940,7 +1006,6 @@ def cluster_auth_setup(servers):
True(bool): If configuration of cluster services is success
False(bool): If failed to configure cluster services
"""
- result = True
for node in servers:
# Enable pacemaker.service
ret, _, _ = g.run(node, "systemctl enable pacemaker.service")
@@ -965,13 +1030,15 @@ def cluster_auth_setup(servers):
return False
# Perform cluster authentication between the nodes
+ auth_type = 'cluster' if is_rhel7(servers) else 'host'
for node in servers:
- ret, _, _ = g.run(node, "pcs cluster auth %s -u hacluster -p "
- "hacluster" % ' '.join(servers))
- if ret != 0:
- g.log.error("pcs cluster auth command failed on %s", node)
- result = False
- return result
+ ret, _, _ = g.run(node, "pcs %s auth %s -u hacluster -p hacluster"
+ % (auth_type, ' '.join(servers)))
+ if ret:
+ g.log.error("pcs %s auth command failed on %s",
+ auth_type, node)
+ return False
+ return True
def configure_ports_on_servers(servers):
diff --git a/glustolibs-gluster/glustolibs/gluster/rebalance_ops.py b/glustolibs-gluster/glustolibs/gluster/rebalance_ops.py
index 1c8c10a4b..1011c89c6 100644
--- a/glustolibs-gluster/glustolibs/gluster/rebalance_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/rebalance_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -401,3 +401,76 @@ def get_remove_brick_status(mnode, volname, bricks_list):
else:
remove_brick_status[element.tag] = element.text
return remove_brick_status
+
+
+def wait_for_remove_brick_to_complete(mnode, volname, bricks_list,
+ timeout=1200):
+ """Waits for the remove brick to complete
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+ volname (str): volume name
+ bricks_list (str): List of bricks participating in
+ remove-brick operation
+
+ Kwargs:
+ timeout (int): timeout value in seconds to wait for remove brick
+ to complete
+
+ Returns:
+ True on success, False otherwise
+
+ Examples:
+ >>> wait_for_remove_brick_to_complete("abc.com", "testvol")
+ """
+
+ count = 0
+ while count < timeout:
+ status_info = get_remove_brick_status(mnode, volname, bricks_list)
+ if status_info is None:
+ return False
+ status = status_info['aggregate']['statusStr']
+ if status == 'completed':
+ g.log.info("Remove brick is successfully completed in %s sec",
+ count)
+ return True
+ elif status == 'failed':
+ g.log.error(" Remove brick failed on one or more nodes. "
+ "Check remove brick status for more details")
+ return False
+ else:
+ time.sleep(10)
+ count += 10
+ g.log.error("Remove brick operation has not completed. "
+ "Wait timeout is %s" % count)
+ return False
+
+
+def set_rebalance_throttle(mnode, volname, throttle_type='normal'):
+ """Sets rebalance throttle
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+ volname (str): volume name
+
+ Kwargs:
+ throttle_type (str): throttling type (lazy|normal|aggressive)
+ Defaults to 'normal'
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ Example:
+ set_rebalance_throttle(mnode, testvol, throttle_type='aggressive')
+ """
+ cmd = ("gluster volume set {} rebal-throttle {}".format
+ (volname, throttle_type))
+ return g.run(mnode, cmd)
diff --git a/glustolibs-gluster/glustolibs/gluster/snap_ops.py b/glustolibs-gluster/glustolibs/gluster/snap_ops.py
index 1e792ada7..0fba7771b 100644
--- a/glustolibs-gluster/glustolibs/gluster/snap_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/snap_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -859,3 +859,29 @@ def snap_deactivate(mnode, snapname):
cmd = "gluster snapshot deactivate %s --mode=script" % snapname
return g.run(mnode, cmd)
+
+
+def terminate_snapd_on_node(mnode):
+ """Terminate snapd on the specified node
+
+ Args:
+ mnode(str):node on which commands has to be executed
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "ps aux| grep -m1 snapd | awk '{print $2}'"
+ _, out, _ = g.run(mnode, cmd)
+ if out is None:
+ g.log.error("Failed to get the snapd PID using command %s", cmd)
+ return None
+ cmd = "kill -9 %s" % out
+ return g.run(mnode, cmd)
diff --git a/glustolibs-gluster/glustolibs/gluster/ssl_ops.py b/glustolibs-gluster/glustolibs/gluster/ssl_ops.py
deleted file mode 100644
index f5d310d01..000000000
--- a/glustolibs-gluster/glustolibs/gluster/ssl_ops.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-"""
- Description: Module for creating ssl machines for
- validating basic ssl cases
-"""
-
-from io import StringIO
-
-from glusto.core import Glusto as g
-
-
-def create_ssl_machine(servers, clients):
- """Following are the steps to create ssl machines:
- - Stop glusterd on all servers
- - Run: openssl genrsa -out /etc/ssl/glusterfs.key 2048
- - Run: openssl req -new -x509 -key /etc/ssl/glusterfs.key
- -subj "/CN=ip's" -days 365 -out /etc/ssl/glusterfs.pem
- - copy glusterfs.pem files into glusterfs.ca from all
- the nodes(servers+clients) to all the servers
- - touch /var/lib/glusterd/secure-access
- - Start glusterd on all servers
- Args:
- servers: List of servers
- clients: List of clients
-
- Returns:
- bool : True if successfully created ssl machine. False otherwise.
- """
- # pylint: disable=too-many-statements, too-many-branches
- # pylint: disable=too-many-return-statements
- # Variable to collect all servers ca_file for servers
- ca_file_server = StringIO()
-
- # Stop glusterd on all servers
- ret = g.run_parallel(servers, "systemctl stop glusterd")
- if not ret:
- g.log.error("Failed to stop glusterd on all servers")
- return False
-
- # Generate key file on all servers
- cmd = "openssl genrsa -out /etc/ssl/glusterfs.key 2048"
- ret = g.run_parallel(servers, cmd)
- if not ret:
- g.log.error("Failed to create /etc/ssl/glusterfs.key "
- "file on all servers")
- return False
-
- # Generate glusterfs.pem file on all servers
- for server in servers:
- _, hostname, _ = g.run(server, "hostname")
- cmd = ("openssl req -new -x509 -key /etc/ssl/glusterfs.key -subj "
- "/CN=%s -days 365 -out /etc/ssl/glusterfs.pem" % (hostname))
- ret = g.run(server, cmd)
- if not ret:
- g.log.error("Failed to create /etc/ssl/glusterfs.pem "
- "file on server %s", server)
- return False
-
- # Copy glusterfs.pem file of all servers into ca_file_server
- for server in servers:
- conn1 = g.rpyc_get_connection(server)
- if conn1 == "None":
- g.log.error("Failed to get rpyc connection on %s", server)
-
- with conn1.builtin.open('/etc/ssl/glusterfs.pem') as fin:
- ca_file_server.write(fin.read())
-
- # Copy all ca_file_server for clients use
- ca_file_client = ca_file_server.getvalue()
-
- # Generate key file on all clients
- for client in clients:
- _, hostname, _ = g.run(client, "hostname -s")
- cmd = "openssl genrsa -out /etc/ssl/glusterfs.key 2048"
- ret = g.run(client, cmd)
- if not ret:
- g.log.error("Failed to create /etc/ssl/glusterfs.key "
- "file on client %s", client)
- return False
-
- # Generate glusterfs.pem file on all clients
- cmd = ("openssl req -new -x509 -key /etc/ssl/glusterfs.key -subj "
- "/CN=%s -days 365 -out /etc/ssl/glusterfs.pem" % (client))
- ret = g.run(client, cmd)
- if not ret:
- g.log.error("Failed to create /etc/ssl/glusterf.pem "
- "file on client %s", client)
- return False
-
- # Copy glusterfs.pem file of client to a ca_file_server
- conn2 = g.rpyc_get_connection(client)
- if conn2 == "None":
- g.log.error("Failed to get rpyc connection on %s", server)
- with conn2.builtin.open('/etc/ssl/glusterfs.pem') as fin:
- ca_file_server.write(fin.read())
-
- # Copy glusterfs.pem file to glusterfs.ca of client such that
- # clients shouldn't share respectives ca file each other
- cmd = "cp /etc/ssl/glusterfs.pem /etc/ssl/glusterfs.ca"
- ret, _, _ = g.run(client, cmd)
- if ret != 0:
- g.log.error("Failed to copy the glusterfs.pem to "
- "glusterfs.ca of client")
- return False
-
- # Now copy the ca_file of all servers to client ca file
- with conn2.builtin.open('/etc/ssl/glusterfs.ca', 'a') as fout:
- fout.write(ca_file_client)
-
- # Create /var/lib/glusterd directory on clients
- ret = g.run(client, "mkdir -p /var/lib/glusterd/")
- if not ret:
- g.log.error("Failed to create directory /var/lib/glusterd/"
- " on clients")
-
- # Copy ca_file_server to all servers
- for server in servers:
- conn3 = g.rpyc_get_connection(server)
- if conn3 == "None":
- g.log.error("Failed to get rpyc connection on %s", server)
-
- with conn3.builtin.open('/etc/ssl/glusterfs.ca', 'w') as fout:
- fout.write(ca_file_server.getvalue())
-
- # Touch /var/lib/glusterd/secure-access on all servers
- ret = g.run_parallel(servers, "touch /var/lib/glusterd/secure-access")
- if not ret:
- g.log.error("Failed to touch the file on servers")
- return False
-
- # Touch /var/lib/glusterd/secure-access on all clients
- ret = g.run_parallel(clients, "touch /var/lib/glusterd/secure-access")
- if not ret:
- g.log.error("Failed to touch the file on clients")
- return False
-
- # Start glusterd on all servers
- ret = g.run_parallel(servers, "systemctl start glusterd")
- if not ret:
- g.log.error("Failed to stop glusterd on servers")
- return False
-
- return True
-
-
-def cleanup_ssl_setup(servers, clients):
- """
- Following are the steps to cleanup ssl setup:
- - Stop glusterd on all servers
- - Remove folder /etc/ssl/*
- - Remove /var/lib/glusterd/*
- - Start glusterd on all servers
-
- Args:
- servers: List of servers
- clients: List of clients
-
- Returns:
- bool : True if successfully cleaned ssl machine. False otherwise.
- """
- # pylint: disable=too-many-return-statements
- _rc = True
-
- # Stop glusterd on all servers
- ret = g.run_parallel(servers, "systemctl stop glusterd")
- if not ret:
- _rc = False
- g.log.error("Failed to stop glusterd on all servers")
-
- # Remove glusterfs.key, glusterfs.pem and glusterfs.ca file
- # from all servers
- cmd = "rm -rf /etc/ssl/glusterfs*"
- ret = g.run_parallel(servers, cmd)
- if not ret:
- _rc = False
- g.log.error("Failed to remove folder /etc/ssl/glusterfs* "
- "on all servers")
-
- # Remove folder /var/lib/glusterd/secure-access from servers
- cmd = "rm -rf /var/lib/glusterd/secure-access"
- ret = g.run_parallel(servers, cmd)
- if not ret:
- _rc = False
- g.log.error("Failed to remove folder /var/lib/glusterd/secure-access "
- "on all servers")
-
- # Remove glusterfs.key, glusterfs.pem and glusterfs.ca file
- # from all clients
- cmd = "rm -rf /etc/ssl/glusterfs*"
- ret = g.run_parallel(clients, cmd)
- if not ret:
- _rc = False
- g.log.error("Failed to remove folder /etc/ssl/glusterfs* "
- "on all clients")
-
- # Remove folder /var/lib/glusterd/secure-access from clients
- cmd = "rm -rf /var/lib/glusterd/secure-access"
- ret = g.run_parallel(clients, cmd)
- if not ret:
- _rc = False
- g.log.error("Failed to remove folder /var/lib/glusterd/secure-access "
- "on all clients")
-
- # Start glusterd on all servers
- ret = g.run_parallel(servers, "systemctl start glusterd")
- if not ret:
- _rc = False
- g.log.error("Failed to stop glusterd on servers")
-
- return _rc
diff --git a/glustolibs-gluster/glustolibs/gluster/tiering_ops.py b/glustolibs-gluster/glustolibs/gluster/tiering_ops.py
deleted file mode 100644
index 357b3d471..000000000
--- a/glustolibs-gluster/glustolibs/gluster/tiering_ops.py
+++ /dev/null
@@ -1,1023 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-"""
- Description: Library for gluster tiering operations.
-"""
-
-import re
-import time
-from glusto.core import Glusto as g
-from glustolibs.gluster.peer_ops import peer_probe_servers
-from glustolibs.gluster.gluster_init import start_glusterd
-from glustolibs.gluster.lib_utils import list_files
-
-try:
- import xml.etree.cElementTree as etree
-except ImportError:
- import xml.etree.ElementTree as etree
-
-
-def add_extra_servers_to_cluster(mnode, extra_servers):
- """Adds the given extra servers to cluster
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- extra_servers (str|list) : A server|list of extra servers to be
- attached to cluster
-
- Returns:
- bool: True, if extra servers are attached to cluster
- False, otherwise
-
- Example:
- add_extra_servers_to_cluster("abc.com", ['peer_node1','peer_node2'])
- """
-
- if not isinstance(extra_servers, list):
- extra_servers = [extra_servers]
-
- ret = start_glusterd(servers=extra_servers)
- if not ret:
- g.log.error("glusterd did not start in peer nodes")
- return False
-
- ret = peer_probe_servers(mnode, servers=extra_servers)
- if not ret:
- g.log.error("Unable to do peer probe on extra server machines")
- return False
-
- return True
-
-
-def tier_attach(mnode, volname, num_bricks_to_add, extra_servers,
- extra_servers_info, replica=1, force=False):
- """Attaches tier to the volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
- num_bricks_to_add (str): number of bricks to be added as hot tier
- extra_servers (str|list): from this server|these servers,
- hot tier will be added to volume
- extra_servers_info (dict): dict of server info of each extra servers
-
- Kwargs:
- replica (str): replica count of the hot tier
- force (bool): If this option is set to True, then attach tier
- will get executed with force option. If it is set to False,
- then attach tier will get executed without force option
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_attach("abc.com", testvol, '2', ['extra_server1','extra_server2'],
- extra_server_info)
- """
- if not isinstance(extra_servers, list):
- extra_servers = [extra_servers]
-
- replica = int(replica)
- repc = ''
- if replica != 1:
- repc = "replica %d" % replica
-
- frce = ''
- if force:
- frce = 'force'
-
- num_bricks_to_add = int(num_bricks_to_add)
-
- from glustolibs.gluster.lib_utils import form_bricks_list
- bricks_list = form_bricks_list(mnode, volname, num_bricks_to_add,
- extra_servers[:], extra_servers_info)
- if bricks_list is None:
- g.log.error("number of bricks required are greater than "
- "unused bricks")
- return (-1, '', '')
-
- bricks_path = ' '.join(bricks_list)
- bricks_path = [re.sub(r"(.*\/\S+\_)brick(\d+)", r"\1tier\2", item)
- for item in bricks_path.split() if item]
- tier_bricks_path = " ".join(bricks_path)
- cmd = ("gluster volume tier %s attach %s %s %s --mode=script"
- % (volname, repc, tier_bricks_path, frce))
-
- return g.run(mnode, cmd)
-
-
-def tier_start(mnode, volname, force=False):
- """Starts the tier volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Kwargs:
- force (bool): If this option is set to True, then attach tier
- will get executed with force option. If it is set to False,
- then attach tier will get executed without force option
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_start("abc.com", testvol)
- """
-
- frce = ''
- if force:
- frce = 'force'
-
- cmd = ("gluster volume tier %s start %s --mode=script"
- % (volname, frce))
- return g.run(mnode, cmd)
-
-
-def tier_status(mnode, volname):
- """executes tier status command
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_status("abc.com", testvol)
- """
-
- cmd = "gluster volume tier %s status" % volname
- ret = g.run(mnode, cmd)
-
- return ret
-
-
-def get_tier_status(mnode, volname):
- """Parse the output of 'gluster tier status' command.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- dict: dict on success.
-
- Examples:
- >>> get_tier_status('abc.lab.eng.xyz.com', 'testvol')
- {'node': [{'promotedFiles': '0', 'demotedFiles': '0', 'nodeName':
- 'localhost', 'statusStr': 'in progress'}, {'promotedFiles': '0',
- 'demotedFiles': '0', 'nodeName': '10.70.47.16', 'statusStr':
- 'in progress'}], 'task-id': '2ed28cbd-4246-493a-87b8-1fdcce313b34',
- 'nodeCount': '4', 'op': '7'}
- """
-
- cmd = "gluster volume tier %s status --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to execute 'tier status' on node %s. "
- "Hence failed to get tier status.", mnode)
- return None
-
- try:
- root = etree.XML(out)
- except etree.ParseError:
- g.log.error("Failed to parse the gluster tier status xml output.")
- return None
-
- tier_status = {}
- tier_status["node"] = []
- for info in root.findall("volRebalance"):
- for element in info.getchildren():
- if element.tag == "node":
- status_info = {}
- for elmt in element.getchildren():
- status_info[elmt.tag] = elmt.text
- tier_status[element.tag].append(status_info)
- else:
- tier_status[element.tag] = element.text
- return tier_status
-
-
-def tier_detach_start(mnode, volname):
- """starts detaching tier on given volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_detach_start("abc.com", testvol)
-
- """
-
- cmd = "gluster volume tier %s detach start --mode=script" % volname
- return g.run(mnode, cmd)
-
-
-def tier_detach_status(mnode, volname):
- """executes detach tier status on given volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_detach_status("abc.com", testvol)
-
- """
-
- cmd = "gluster volume tier %s detach status --mode=script" % volname
- return g.run(mnode, cmd)
-
-
-def tier_detach_stop(mnode, volname):
- """stops detaching tier on given volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_detach_stop("abc.com", testvol)
-
- """
-
- cmd = "gluster volume tier %s detach stop --mode=script" % volname
- return g.run(mnode, cmd)
-
-
-def tier_detach_commit(mnode, volname):
- """commits detach tier on given volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_detach_commit("abc.com", testvol)
-
- """
-
- cmd = "gluster volume tier %s detach commit --mode=script" % volname
- return g.run(mnode, cmd)
-
-
-def tier_detach_force(mnode, volname):
- """detaches tier forcefully on given volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_detach_force("abc.com", testvol)
-
- """
-
- cmd = "gluster volume tier %s detach force --mode=script" % volname
- return g.run(mnode, cmd)
-
-
-def get_detach_tier_status(mnode, volname):
- """Parse the output of 'gluster volume tier detach status' command.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- dict: dict on success.
-
- Examples:
- >>> get_detach_tier_status('abc.lab.eng.xyz.com', "testvol")
- {'node': [{'files': '0', 'status': '3', 'lookups': '1', 'skipped': '0',
- 'nodeName': 'localhost', 'failures': '0', 'runtime': '0.00', 'id':
- '11336017-9561-4e88-9ac3-a94d4b403340', 'statusStr': 'completed',
- 'size': '0'}, {'files': '0', 'status': '3', 'lookups': '0', 'skipped':
- '0', 'nodeName': '10.70.47.16', 'failures': '0', 'runtime': '0.00',
- 'id': 'a2b88b10-eba2-4f97-add2-8dc37df08b27', 'statusStr': 'completed',
- 'size': '0'}], 'nodeCount': '4', 'aggregate': {'files': '0', 'status':
- '3', 'lookups': '1', 'skipped': '0', 'failures': '0', 'runtime': '0.0',
- 'statusStr': 'completed', 'size': '0'}}
- """
-
- cmd = "gluster volume tier %s detach status --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to execute 'detach tier status' on node %s. "
- "Hence failed to get detach tier status.", mnode)
- return None
-
- try:
- root = etree.XML(out)
- except etree.ParseError:
- g.log.error("Failed to parse the detach tier status xml output.")
- return None
-
- tier_status = {}
- tier_status["node"] = []
- for info in root.findall("volDetachTier"):
- for element in info.getchildren():
- if element.tag == "node":
- status_info = {}
- for elmt in element.getchildren():
- status_info[elmt.tag] = elmt.text
- tier_status[element.tag].append(status_info)
- elif element.tag == "aggregate":
- status_info = {}
- for elmt in element.getchildren():
- status_info[elmt.tag] = elmt.text
- tier_status[element.tag] = status_info
- else:
- tier_status[element.tag] = element.text
- return tier_status
-
-
-def tier_detach_start_and_get_taskid(mnode, volname):
- """Parse the output of 'gluster volume tier detach start' command.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- dict: dict on success.
-
- Examples:
- >>> tier_detach_start_and_get_taskid('abc.lab.eng.xyz.com',
- "testvol")
- {'task-id': '8020835c-ff0d-4ea1-9f07-62dd067e92d4'}
- """
-
- cmd = "gluster volume tier %s detach start --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to execute 'detach tier start' on node %s. "
- "Hence failed to parse the detach tier start.", mnode)
- return None
-
- try:
- root = etree.XML(out)
- except etree.ParseError:
- g.log.error("Failed to parse the gluster detach tier "
- "start xml output.")
- return None
-
- tier_status = {}
- for info in root.findall("volDetachTier"):
- for element in info.getchildren():
- tier_status[element.tag] = element.text
- return tier_status
-
-
-def tier_detach_stop_and_get_status(mnode, volname):
- """Parse the output of 'gluster volume tier detach stop' command.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- dict: dict on success.
-
- Examples:
- >>> tier_detach_stop_and_get_status('abc.lab.eng.xyz.com',
- "testvol")
- {'node': [{'files': '0', 'status': '3', 'lookups': '1', 'skipped': '0',
- 'nodeName': 'localhost', 'failures': '0', 'runtime': '0.00', 'id':
- '11336017-9561-4e88-9ac3-a94d4b403340', 'statusStr': 'completed',
- 'size': '0'}, {'files': '0', 'status': '3', 'lookups': '0', 'skipped':
- '0', 'nodeName': '10.70.47.16', 'failures': '0', 'runtime': '0.00',
- 'id': 'a2b88b12-eba2-4f97-add2-8dc37df08b27', 'statusStr': 'completed',
- 'size': '0'}], 'nodeCount': '4', 'aggregate': {'files': '0', 'status':
- '3', 'lookups': '1', 'skipped': '0', 'failures': '0', 'runtime': '0.0',
- 'statusStr': 'completed', 'size': '0'}}
- """
-
- cmd = "gluster volume tier %s detach stop --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to execute 'tier start' on node %s. "
- "Hence failed to parse the tier start.", mnode)
- return None
-
- try:
- root = etree.XML(out)
- except etree.ParseError:
- g.log.error("Failed to parse the gluster detach tier stop"
- " xml output.")
- return None
-
- tier_status = {}
- tier_status["node"] = []
- for info in root.findall("volDetachTier"):
- for element in info.getchildren():
- if element.tag == "node":
- status_info = {}
- for elmt in element.getchildren():
- status_info[elmt.tag] = elmt.text
- tier_status[element.tag].append(status_info)
- elif element.tag == "aggregate":
- status_info = {}
- for elmt in element.getchildren():
- status_info[elmt.tag] = elmt.text
- tier_status[element.tag] = status_info
- else:
- tier_status[element.tag] = element.text
- return tier_status
-
-
-def wait_for_detach_tier_to_complete(mnode, volname, timeout=300):
- """Waits for the detach tier to complete
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Kwargs:
- timeout (int): timeout value to wait for detach tier to complete
-
- Returns:
- True on success, False otherwise
-
- Examples:
- >>> wait_for_detach_tier_to_complete(mnode, "testvol")
- """
-
- count = 0
- flag = 0
- while (count < timeout):
- status_info = get_detach_tier_status(mnode, volname)
- if status_info is None:
- return False
-
- status = status_info['aggregate']['statusStr']
- if status == 'completed':
- flag = 1
- break
-
- time.sleep(10)
- count = count + 10
- if not flag:
- g.log.error("detach tier is not completed")
- return False
- else:
- g.log.info("detach tier is successfully completed")
- return True
-
-
-def get_files_from_hot_tier(mnode, volname):
- """Lists files from hot tier for the given volume
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- Emptylist: if there are no files in hot tier.
- list: list of files in hot tier on success.
-
- Examples:
- >>>get_files_from_hot_tier(mnode, "testvol")
- """
-
- files = []
- from glustolibs.gluster.volume_libs import get_subvols
- subvols = get_subvols(mnode, volname)
- for subvol in subvols['hot_tier_subvols']:
- info = subvol[0].split(':')
- file_list = list_files(info[0], info[1])
- for file in file_list:
- if ".glusterfs" not in file:
- files.append(file)
-
- return files
-
-
-def get_files_from_cold_tier(mnode, volname):
- """Lists files from cold tier for the given volume
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- Emptylist: if there are no files in cold tier.
- list: list of files in cold tier on success.
-
- Examples:
- >>>get_files_from_hot_tier("testvol")
- """
-
- files = []
- from glustolibs.gluster.volume_libs import get_subvols
- subvols = get_subvols(mnode, volname)
- for subvol in subvols['cold_tier_subvols']:
- info = subvol[0].split(':')
- file_list = list_files(info[0], info[1])
- for file in file_list:
- if ".glusterfs" not in file:
- files.append(file)
-
- return files
-
-
-def get_tier_promote_frequency(mnode, volname):
- """Gets tier promote frequency value for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: promote frequency value on success.
-
- Examples:
- >>>get_tier_promote_frequency("abc.com", "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.tier-promote-frequency']
-
-
-def get_tier_demote_frequency(mnode, volname):
- """Gets tier demote frequency value for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: demote frequency value on success.
-
- Examples:
- >>>get_tier_demote_frequency("abc.com", "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.tier-demote-frequency']
-
-
-def get_tier_mode(mnode, volname):
- """Gets tier mode for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: tier mode on success.
-
- Examples:
- >>>get_tier_mode("testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.tier-mode']
-
-
-def get_tier_max_mb(mnode, volname):
- """Gets tier max mb for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: tier max mb on success.
-
- Examples:
- >>>get_tier_max_mb("abc.com", "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.tier-max-mb']
-
-
-def get_tier_max_files(mnode, volname):
- """Gets tier max files for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: tier max files on success.
-
- Examples:
- >>>get_tier_max_files("abc.com", "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.tier-max-files']
-
-
-def get_tier_watermark_high_limit(mnode, volname):
- """Gets tier watermark high limit for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: tier watermark high limit on success.
-
- Examples:
- >>>get_tier_watermark_high_limit(mnode, "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.watermark-hi']
-
-
-def get_tier_watermark_low_limit(mnode, volname):
- """Gets tier watermark low limit for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: tier watermark low limit on success.
-
- Examples:
- >>>get_tier_watermark_low_limit("abc.com", "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.watermark-low']
-
-
-def set_tier_promote_frequency(mnode, volname, value):
- """Sets tier promote frequency value for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): promote frequency value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_promote_frequency("abc.com", "testvol", '1000')
- """
-
- option = {'cluster.tier-promote-frequency': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set promote frequency to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_demote_frequency(mnode, volname, value):
- """Sets tier demote frequency value for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): demote frequency value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_demote_frequency("abc.com", "testvol", "500")
- """
-
- option = {'cluster.tier-demote-frequency': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set demote frequency to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_mode(mnode, volname, value):
- """Sets tier mode for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): tier mode value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_mode("abc.com", "testvol", "cache")
- """
-
- option = {'cluster.tier-mode': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set tier mode to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_max_mb(mnode, volname, value):
- """Sets tier max mb for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): tier mode value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_max_mb("abc.com", "testvol", "50")
- """
-
- option = {'cluster.tier-max-mb': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set tier max mb to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_max_files(mnode, volname, value):
- """Sets tier max files for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): tier mode value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_max_files("abc.com", "testvol", "10")
- """
-
- option = {'cluster.tier-max-files': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set tier max files to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_watermark_high_limit(mnode, volname, value):
- """Sets tier watermark high limit for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): tier mode value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_watermark_high_limit("abc.com", "testvol", "95")
- """
-
- option = {'cluster.watermark-hi': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set tier watermark high limit to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_watermark_low_limit(mnode, volname, value):
- """Sets tier watermark low limit for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): tier mode value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_watermark_low_limit("abc.com", "testvol", "40")
- """
-
- option = {'cluster.watermark-low': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set tier watermark low limit to %s"
- % value)
- return False
-
- return True
-
-
-def get_tier_pid(mnode, volname):
- """Gets tier pid for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: pid of tier process on success.
-
- Examples:
- >>>get_tier_pid("abc.xyz.com", "testvol")
- """
-
- cmd = ("ps -ef | grep -v grep | grep '/var/log/glusterfs/%s-tier.log' |"
- "awk '{print $2}'" % volname)
- ret, out, err = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to execute 'ps' cmd")
- return None
-
- return out.strip("\n")
-
-
-def is_tier_process_running(mnode, volname):
- """Checks whether tier process is running
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- True on success, False otherwise
-
- Examples:
- >>>is_tier_process_running("abc.xyz.com", "testvol")
- """
-
- pid = get_tier_pid(mnode, volname)
- if pid == '':
- return False
- return True
diff --git a/glustolibs-gluster/glustolibs/gluster/volume_libs.py b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
index a5e54101e..87e70ca8c 100644
--- a/glustolibs-gluster/glustolibs/gluster/volume_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
@@ -31,9 +31,6 @@ from glustolibs.gluster.volume_ops import (volume_create, volume_start,
volume_info, volume_status,
get_volume_options,
get_volume_list)
-from glustolibs.gluster.tiering_ops import (add_extra_servers_to_cluster,
- tier_attach,
- is_tier_process_running)
from glustolibs.gluster.quota_ops import (quota_enable, quota_limit_usage,
is_quota_enabled)
from glustolibs.gluster.uss_ops import enable_uss, is_uss_enabled
@@ -67,7 +64,7 @@ def volume_exists(mnode, volname):
def setup_volume(mnode, all_servers_info, volume_config, multi_vol=False,
- force=False):
+ force=False, create_only=False):
"""Setup Volume with the configuration defined in volume_config
Args:
@@ -101,11 +98,6 @@ def setup_volume(mnode, all_servers_info, volume_config, multi_vol=False,
'size': '100GB'},
'enable': False},
'uss': {'enable': False},
- 'tier': {'create_tier': True,
- 'tier_type': {'type': 'distributed-replicated',
- 'replica_count': 2,
- 'dist_count': 2,
- 'transport': 'tcp'}},
'options': {'performance.readdir-ahead': True}
}
Kwargs:
@@ -115,7 +107,11 @@ def setup_volume(mnode, all_servers_info, volume_config, multi_vol=False,
force (bool): If this option is set to True, then volume creation
command is executed with force option.
False, without force option.
- By default, value is set to False
+ By default, value is set to False.
+ create_only(bool): True, if only volume creation is needed.
+ False, will do volume create, start, set operation
+ if any provided in the volume_config.
+ By default, value is set to False.
Returns:
bool : True on successful setup. False Otherwise
@@ -128,8 +124,8 @@ def setup_volume(mnode, all_servers_info, volume_config, multi_vol=False,
return False
# Check if the volume already exists
- volinfo = get_volume_info(mnode=mnode)
- if volinfo is not None and volname in volinfo.keys():
+ vollist = get_volume_list(mnode=mnode)
+ if vollist is not None and volname in vollist:
g.log.info("volume %s already exists. Returning...", volname)
return True
@@ -293,6 +289,25 @@ def setup_volume(mnode, all_servers_info, volume_config, multi_vol=False,
g.log.error("Unable to create volume %s", volname)
return False
+ if create_only and (ret == 0):
+ g.log.info("Volume creation of {} is done successfully".format(
+ volname))
+ return True
+
+ is_ganesha = False
+ if 'nfs_ganesha' in volume_config:
+ is_ganesha = bool(volume_config['nfs_ganesha']['enable'])
+
+ if not is_ganesha:
+ # Set all the volume options:
+ if 'options' in volume_config:
+ volume_options = volume_config['options']
+ ret = set_volume_options(mnode=mnode, volname=volname,
+ options=volume_options)
+ if not ret:
+ g.log.error("Unable to set few volume options")
+ return False
+
# Start Volume
time.sleep(2)
ret = volume_start(mnode, volname)
@@ -300,68 +315,6 @@ def setup_volume(mnode, all_servers_info, volume_config, multi_vol=False,
g.log.error("volume start %s failed", volname)
return False
- # Create Tier volume
- if ('tier' in volume_config and 'create_tier' in volume_config['tier'] and
- volume_config['tier']['create_tier']):
- # get servers info for tier attach
- if ('extra_servers' in volume_config and
- volume_config['extra_servers']):
- extra_servers = volume_config['extra_servers']
- ret = add_extra_servers_to_cluster(mnode, extra_servers)
- if not ret:
- return False
- else:
- extra_servers = volume_config['servers']
-
- # get the tier volume type
- if 'tier_type' in volume_config['tier']:
- if 'type' in volume_config['tier']['tier_type']:
- tier_volume_type = volume_config['tier']['tier_type']['type']
- dist = rep = 1
- if tier_volume_type == 'distributed':
- if 'dist_count' in volume_config['tier']['tier_type']:
- dist = (volume_config['tier']['tier_type']
- ['dist_count'])
-
- elif tier_volume_type == 'replicated':
- if 'replica_count' in volume_config['tier']['tier_type']:
- rep = (volume_config['tier']['tier_type']
- ['replica_count'])
-
- elif tier_volume_type == 'distributed-replicated':
- if 'dist_count' in volume_config['tier']['tier_type']:
- dist = (volume_config['tier']['tier_type']
- ['dist_count'])
- if 'replica_count' in volume_config['tier']['tier_type']:
- rep = (volume_config['tier']['tier_type']
- ['replica_count'])
- else:
- tier_volume_type = 'distributed'
- dist = 1
- rep = 1
- number_of_bricks = dist * rep
-
- # Attach Tier
- ret, _, _ = tier_attach(mnode=mnode, volname=volname,
- extra_servers=extra_servers,
- extra_servers_info=all_servers_info,
- num_bricks_to_add=number_of_bricks,
- replica=rep)
- if ret != 0:
- g.log.error("Unable to attach tier")
- return False
-
- time.sleep(30)
- # Check if tier is running
- _rc = True
- for server in extra_servers:
- ret = is_tier_process_running(server, volname)
- if not ret:
- g.log.error("Tier process not running on %s", server)
- _rc = False
- if not _rc:
- return False
-
# Enable Quota
if ('quota' in volume_config and 'enable' in volume_config['quota'] and
volume_config['quota']['enable']):
@@ -411,20 +364,22 @@ def setup_volume(mnode, all_servers_info, volume_config, multi_vol=False,
g.log.error("USS is not enabled on the volume %s", volname)
return False
- # Set all the volume options:
- if 'options' in volume_config:
- volume_options = volume_config['options']
- ret = set_volume_options(mnode=mnode, volname=volname,
- options=volume_options)
- if not ret:
- g.log.error("Unable to set few volume options")
- return False
+ if is_ganesha:
+ # Set all the volume options for NFS Ganesha
+ if 'options' in volume_config:
+ volume_options = volume_config['options']
+ ret = set_volume_options(mnode=mnode, volname=volname,
+ options=volume_options)
+ if not ret:
+ g.log.error("Unable to set few volume options")
+ return False
+
return True
def bulk_volume_creation(mnode, number_of_volumes, servers_info,
volume_config, vol_prefix="mult_vol_",
- is_force=False):
+ is_force=False, is_create_only=False):
"""
Creates the number of volumes user has specified
@@ -438,7 +393,11 @@ def bulk_volume_creation(mnode, number_of_volumes, servers_info,
Kwargs:
vol_prefix (str): Prefix to be added to the volume name.
is_force (bool): True, If volume create command need to be executed
- with force, False Otherwise. Defaults to False
+ with force, False Otherwise. Defaults to False.
+ create_only(bool): True, if only volume creation is needed.
+ False, will do volume create, start, set operation
+ if any provided in the volume_config.
+ By default, value is set to False.
Returns:
bool: True on successful bulk volume creation, False Otherwise.
@@ -468,7 +427,7 @@ def bulk_volume_creation(mnode, number_of_volumes, servers_info,
for volume in range(number_of_volumes):
volume_config['name'] = vol_prefix + volume_name + str(volume)
ret = setup_volume(mnode, servers_info, volume_config, multi_vol=True,
- force=is_force)
+ force=is_force, create_only=is_create_only)
if not ret:
g.log.error("Volume creation failed for the volume %s"
% volume_config['name'])
@@ -617,77 +576,11 @@ def get_subvols(mnode, volname):
get_subvols("abc.xyz.com", "testvol")
"""
- subvols = {
- 'is_tier': False,
- 'hot_tier_subvols': [],
- 'cold_tier_subvols': [],
- 'volume_subvols': []
- }
+ subvols = {'volume_subvols': []}
+
volinfo = get_volume_info(mnode, volname)
if volinfo is not None:
voltype = volinfo[volname]['typeStr']
- if voltype == 'Tier':
- # Set is_tier to True
- subvols['is_tier'] = True
-
- # Get hot tier subvols
- hot_tier_type = (volinfo[volname]["bricks"]
- ['hotBricks']['hotBrickType'])
- tmp = volinfo[volname]["bricks"]['hotBricks']["brick"]
- hot_tier_bricks = [x["name"] for x in tmp if "name" in x]
- if hot_tier_type == 'Distribute':
- for brick in hot_tier_bricks:
- subvols['hot_tier_subvols'].append([brick])
-
- elif (hot_tier_type == 'Replicate' or
- hot_tier_type == 'Distributed-Replicate'):
- rep_count = int(
- (volinfo[volname]["bricks"]['hotBricks']
- ['numberOfBricks']).split("=", 1)[0].split("x")[1].strip()
- )
- subvol_list = (
- [hot_tier_bricks[i:i + rep_count]
- for i in range(0, len(hot_tier_bricks), rep_count)])
- subvols['hot_tier_subvols'] = subvol_list
-
- # Get cold tier subvols
- cold_tier_type = (volinfo[volname]["bricks"]['coldBricks']
- ['coldBrickType'])
- tmp = volinfo[volname]["bricks"]['coldBricks']["brick"]
- cold_tier_bricks = [x["name"] for x in tmp if "name" in x]
-
- # Distribute volume
- if cold_tier_type == 'Distribute':
- for brick in cold_tier_bricks:
- subvols['cold_tier_subvols'].append([brick])
-
- # Replicate or Distribute-Replicate volume
- elif (cold_tier_type == 'Replicate' or
- cold_tier_type == 'Distributed-Replicate'):
- rep_count = int(
- (volinfo[volname]["bricks"]['coldBricks']
- ['numberOfBricks']).split("=", 1)[0].split("x")[1].strip()
- )
- subvol_list = (
- [cold_tier_bricks[i:i + rep_count]
- for i in range(0, len(cold_tier_bricks), rep_count)])
- subvols['cold_tier_subvols'] = subvol_list
-
- # Disperse or Distribute-Disperse volume
- elif (cold_tier_type == 'Disperse' or
- cold_tier_type == 'Distributed-Disperse'):
- disp_count = sum(
- [int(nums) for nums in (
- (volinfo[volname]["bricks"]['coldBricks']
- ['numberOfBricks']).split("x", 1)[1].
- strip().split("=")[0].strip().strip("()").
- split()) if nums.isdigit()])
- subvol_list = [cold_tier_bricks[i:i + disp_count]
- for i in range(0, len(cold_tier_bricks),
- disp_count)]
- subvols['cold_tier_subvols'] = subvol_list
- return subvols
-
tmp = volinfo[volname]["bricks"]["brick"]
bricks = [x["name"] for x in tmp if "name" in x]
if voltype == 'Replicate' or voltype == 'Distributed-Replicate':
@@ -708,29 +601,6 @@ def get_subvols(mnode, volname):
return subvols
-def is_tiered_volume(mnode, volname):
- """Check if volume is tiered volume.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- bool : True if the volume is tiered volume. False otherwise
- NoneType: None if volume does not exist.
- """
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s", volname)
- return None
-
- voltype = volinfo[volname]['typeStr']
- if voltype == 'Tier':
- return True
- else:
- return False
-
-
def is_distribute_volume(mnode, volname):
"""Check if volume is a plain distributed volume
@@ -747,20 +617,10 @@ def is_distribute_volume(mnode, volname):
g.log.error("Unable to check if the volume %s is distribute", volname)
return False
- if volume_type_info['is_tier']:
- hot_tier_type = (volume_type_info['hot_tier_type_info']
- ['hotBrickType'])
- cold_tier_type = (volume_type_info['cold_tier_type_info']
- ['coldBrickType'])
- if hot_tier_type == 'Distribute' and cold_tier_type == 'Distribute':
- return True
- else:
- return False
+ if volume_type_info['volume_type_info']['typeStr'] == 'Distribute':
+ return True
else:
- if volume_type_info['volume_type_info']['typeStr'] == 'Distribute':
- return True
- else:
- return False
+ return False
def get_volume_type_info(mnode, volname):
@@ -774,9 +634,6 @@ def get_volume_type_info(mnode, volname):
dict : Dict containing the keys, values defining the volume type:
Example:
volume_type_info = {
- 'is_tier': False,
- 'hot_tier_type_info': {},
- 'cold_tier_type_info': {},
'volume_type_info': {
'typeStr': 'Disperse',
'replicaCount': '1',
@@ -788,18 +645,6 @@ def get_volume_type_info(mnode, volname):
}
volume_type_info = {
- 'is_tier': True,
- 'hot_tier_type_info': {
- 'hotBrickType': 'Distribute',
- 'hotreplicaCount': '1'
- },
- 'cold_tier_type_info': {
- 'coldBrickType': 'Disperse',
- 'coldreplicaCount': '1',
- 'coldarbiterCount': '0',
- 'colddisperseCount': '3',
- 'numberOfBricks':1
- },
'volume_type_info': {}
@@ -810,138 +655,26 @@ def get_volume_type_info(mnode, volname):
g.log.error("Unable to get the volume info for volume %s", volname)
return None
- volume_type_info = {
- 'is_tier': False,
- 'hot_tier_type_info': {},
- 'cold_tier_type_info': {},
- 'volume_type_info': {}
- }
-
- voltype = volinfo[volname]['typeStr']
- if voltype == 'Tier':
- volume_type_info['is_tier'] = True
-
- hot_tier_type_info = get_hot_tier_type_info(mnode, volname)
- volume_type_info['hot_tier_type_info'] = hot_tier_type_info
-
- cold_tier_type_info = get_cold_tier_type_info(mnode, volname)
- volume_type_info['cold_tier_type_info'] = cold_tier_type_info
-
- else:
- non_tiered_volume_type_info = {
- 'typeStr': '',
- 'replicaCount': '',
- 'arbiterCount': '',
- 'stripeCount': '',
- 'disperseCount': '',
- 'redundancyCount': ''
- }
- for key in non_tiered_volume_type_info.keys():
- if key in volinfo[volname]:
- non_tiered_volume_type_info[key] = volinfo[volname][key]
- else:
- g.log.error("Unable to find key '%s' in the volume info for "
- "the volume %s", key, volname)
- non_tiered_volume_type_info[key] = None
- volume_type_info['volume_type_info'] = non_tiered_volume_type_info
-
- return volume_type_info
-
-
-def get_cold_tier_type_info(mnode, volname):
- """Returns cold tier type information for the specified volume.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- dict : Dict containing the keys, values defining the cold tier type:
- Example:
- cold_tier_type_info = {
- 'coldBrickType': 'Disperse',
- 'coldreplicaCount': '1',
- 'coldarbiterCount': '0',
- 'colddisperseCount': '3',
- 'numberOfBricks': '3'
- }
- NoneType: None if volume does not exist or is not a tiered volume or
- any other key errors.
- """
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s", volname)
- return None
-
- if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
-
- cold_tier_type_info = {
- 'coldBrickType': '',
- 'coldreplicaCount': '',
- 'coldarbiterCount': '',
- 'colddisperseCount': '',
- 'numberOfBricks': ''
- }
- for key in cold_tier_type_info.keys():
- if key in volinfo[volname]['bricks']['coldBricks']:
- cold_tier_type_info[key] = (volinfo[volname]['bricks']
- ['coldBricks'][key])
- else:
- g.log.error("Unable to find key '%s' in the volume info for the "
- "volume %s", key, volname)
- return None
-
- if 'Disperse' in cold_tier_type_info['coldBrickType']:
- redundancy_count = (cold_tier_type_info['numberOfBricks'].
- split("x", 1)[1].strip().
- split("=")[0].strip().strip("()").split()[2])
- cold_tier_type_info['coldredundancyCount'] = redundancy_count
-
- return cold_tier_type_info
-
-
-def get_hot_tier_type_info(mnode, volname):
- """Returns hot tier type information for the specified volume.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- dict : Dict containing the keys, values defining the hot tier type:
- Example:
- hot_tier_type_info = {
- 'hotBrickType': 'Distribute',
- 'hotreplicaCount': '1'
- }
- NoneType: None if volume does not exist or is not a tiered volume or
- any other key errors.
- """
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s", volname)
- return None
-
- if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
-
- hot_tier_type_info = {
- 'hotBrickType': '',
- 'hotreplicaCount': ''
- }
- for key in hot_tier_type_info.keys():
- if key in volinfo[volname]['bricks']['hotBricks']:
- hot_tier_type_info[key] = (volinfo[volname]['bricks']['hotBricks']
- [key])
+ volume_type_info = {'volume_type_info': {}}
+
+ all_volume_type_info = {
+ 'typeStr': '',
+ 'replicaCount': '',
+ 'arbiterCount': '',
+ 'stripeCount': '',
+ 'disperseCount': '',
+ 'redundancyCount': ''
+ }
+ for key in all_volume_type_info.keys():
+ if key in volinfo[volname]:
+ all_volume_type_info[key] = volinfo[volname][key]
else:
- g.log.error("Unable to find key '%s' in the volume info for the "
- "volume %s", key, volname)
- return None
+ g.log.error("Unable to find key '%s' in the volume info for "
+ "the volume %s", key, volname)
+ all_volume_type_info[key] = None
+ volume_type_info['volume_type_info'] = all_volume_type_info
- return hot_tier_type_info
+ return volume_type_info
def get_num_of_bricks_per_subvol(mnode, volname):
@@ -956,86 +689,21 @@ def get_num_of_bricks_per_subvol(mnode, volname):
number of bricks per subvol
Example:
num_of_bricks_per_subvol = {
- 'is_tier': False,
- 'hot_tier_num_of_bricks_per_subvol': None,
- 'cold_tier_num_of_bricks_per_subvol': None,
'volume_num_of_bricks_per_subvol': 2
}
- num_of_bricks_per_subvol = {
- 'is_tier': True,
- 'hot_tier_num_of_bricks_per_subvol': 3,
- 'cold_tier_num_of_bricks_per_subvol': 2,
- 'volume_num_of_bricks_per_subvol': None
- }
-
- NoneType: None if volume does not exist or is a tiered volume.
+ NoneType: None if volume does not exist.
"""
- bricks_per_subvol_dict = {
- 'is_tier': False,
- 'hot_tier_num_of_bricks_per_subvol': None,
- 'cold_tier_num_of_bricks_per_subvol': None,
- 'volume_num_of_bricks_per_subvol': None
- }
+ bricks_per_subvol_dict = {'volume_num_of_bricks_per_subvol': None}
subvols_dict = get_subvols(mnode, volname)
if subvols_dict['volume_subvols']:
bricks_per_subvol_dict['volume_num_of_bricks_per_subvol'] = (
len(subvols_dict['volume_subvols'][0]))
- else:
- if (subvols_dict['hot_tier_subvols'] and
- subvols_dict['cold_tier_subvols']):
- bricks_per_subvol_dict['is_tier'] = True
- bricks_per_subvol_dict['hot_tier_num_of_bricks_per_subvol'] = (
- len(subvols_dict['hot_tier_subvols'][0]))
- bricks_per_subvol_dict['cold_tier_num_of_bricks_per_subvol'] = (
- len(subvols_dict['cold_tier_subvols'][0]))
return bricks_per_subvol_dict
-def get_cold_tier_num_of_bricks_per_subvol(mnode, volname):
- """Returns number of bricks per subvol in cold tier
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- int : Number of bricks per subvol on cold tier.
- NoneType: None if volume does not exist or not a tiered volume.
- """
- if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
- subvols_dict = get_subvols(mnode, volname)
- if subvols_dict['cold_tier_subvols']:
- return len(subvols_dict['cold_tier_subvols'][0])
- else:
- return None
-
-
-def get_hot_tier_num_of_bricks_per_subvol(mnode, volname):
- """Returns number of bricks per subvol in hot tier
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- int : Number of bricks per subvol on hot tier.
- NoneType: None if volume does not exist or not a tiered volume.
- """
- if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
- subvols_dict = get_subvols(mnode, volname)
- if subvols_dict['hot_tier_subvols']:
- return len(subvols_dict['hot_tier_subvols'][0])
- else:
- return None
-
-
def get_replica_count(mnode, volname):
"""Get the replica count of the volume
@@ -1047,17 +715,8 @@ def get_replica_count(mnode, volname):
dict : Dict contain keys, values defining Replica count of the volume.
Example:
replica_count_info = {
- 'is_tier': False,
- 'hot_tier_replica_count': None,
- 'cold_tier_replica_count': None,
'volume_replica_count': 3
}
- replica_count_info = {
- 'is_tier': True,
- 'hot_tier_replica_count': 2,
- 'cold_tier_replica_count': 3,
- 'volume_replica_count': None
- }
NoneType: None if it is parse failure.
"""
vol_type_info = get_volume_type_info(mnode, volname)
@@ -1066,69 +725,14 @@ def get_replica_count(mnode, volname):
volname)
return None
- replica_count_info = {
- 'is_tier': False,
- 'hot_tier_replica_count': None,
- 'cold_tier_replica_count': None,
- 'volume_replica_count': None
- }
+ replica_count_info = {'volume_replica_count': None}
- replica_count_info['is_tier'] = vol_type_info['is_tier']
- if replica_count_info['is_tier']:
- replica_count_info['hot_tier_replica_count'] = (
- vol_type_info['hot_tier_type_info']['hotreplicaCount'])
- replica_count_info['cold_tier_replica_count'] = (
- vol_type_info['cold_tier_type_info']['coldreplicaCount'])
-
- else:
- replica_count_info['volume_replica_count'] = (
- vol_type_info['volume_type_info']['replicaCount'])
+ replica_count_info['volume_replica_count'] = (
+ vol_type_info['volume_type_info']['replicaCount'])
return replica_count_info
-def get_cold_tier_replica_count(mnode, volname):
- """Get the replica count of cold tier.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- int : Replica count of the cold tier.
- NoneType: None if volume does not exist or not a tiered volume.
- """
- is_tier = is_tiered_volume(mnode, volname)
- if not is_tier:
- return None
- else:
- volinfo = get_volume_info(mnode, volname)
- cold_tier_replica_count = (volinfo[volname]["bricks"]['coldBricks']
- ['coldreplicaCount'])
- return cold_tier_replica_count
-
-
-def get_hot_tier_replica_count(mnode, volname):
- """Get the replica count of hot tier.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- int : Replica count of the hot tier.
- NoneType: None if volume does not exist or not a tiered volume.
- """
- is_tier = is_tiered_volume(mnode, volname)
- if not is_tier:
- return None
- else:
- volinfo = get_volume_info(mnode, volname)
- hot_tier_replica_count = (volinfo[volname]["bricks"]['hotBricks']
- ['hotreplicaCount'])
- return hot_tier_replica_count
-
-
def get_disperse_count(mnode, volname):
"""Get the disperse count of the volume
@@ -1140,15 +744,8 @@ def get_disperse_count(mnode, volname):
dict : Dict contain keys, values defining Disperse count of the volume.
Example:
disperse_count_info = {
- 'is_tier': False,
- 'cold_tier_disperse_count': None,
'volume_disperse_count': 3
}
- disperse_count_info = {
- 'is_tier': True,
- 'cold_tier_disperse_count': 3,
- 'volume_disperse_count': None
- }
None: If it is non dispersed volume.
"""
vol_type_info = get_volume_type_info(mnode, volname)
@@ -1157,45 +754,14 @@ def get_disperse_count(mnode, volname):
volname)
return None
- disperse_count_info = {
- 'is_tier': False,
- 'cold_tier_disperse_count': None,
- 'volume_disperse_count': None
- }
-
- disperse_count_info['is_tier'] = vol_type_info['is_tier']
- if disperse_count_info['is_tier']:
- disperse_count_info['cold_tier_disperse_count'] = (
- vol_type_info['cold_tier_type_info']['colddisperseCount'])
+ disperse_count_info = {'volume_disperse_count': None}
- else:
- disperse_count_info['volume_disperse_count'] = (
+ disperse_count_info['volume_disperse_count'] = (
vol_type_info['volume_type_info']['disperseCount'])
return disperse_count_info
-def get_cold_tier_disperse_count(mnode, volname):
- """Get the disperse count of cold tier.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- int : disperse count of the cold tier.
- NoneType: None if volume does not exist or not a tiered volume.
- """
- is_tier = is_tiered_volume(mnode, volname)
- if not is_tier:
- return None
- else:
- volinfo = get_volume_info(mnode, volname)
- cold_tier_disperse_count = (volinfo[volname]["bricks"]['coldBricks']
- ['colddisperseCount'])
- return cold_tier_disperse_count
-
-
def enable_and_validate_volume_options(mnode, volname, volume_options_list,
time_delay=5):
"""Enable the volume option and validate whether the option has be
@@ -1242,7 +808,6 @@ def enable_and_validate_volume_options(mnode, volname, volume_options_list,
def form_bricks_list_to_add_brick(mnode, volname, servers, all_servers_info,
- add_to_hot_tier=False,
**kwargs):
"""Forms list of bricks to add-bricks to the volume.
@@ -1265,9 +830,6 @@ def form_bricks_list_to_add_brick(mnode, volname, servers, all_servers_info,
}
}
Kwargs:
- add_to_hot_tier (bool): True If bricks are to be added to hot_tier.
- False otherwise. Defaults to False.
-
The keys, values in kwargs are:
- replica_count : (int)|None.
Increase the current_replica_count by replica_count
@@ -1306,19 +868,8 @@ def form_bricks_list_to_add_brick(mnode, volname, servers, all_servers_info,
bricks_per_subvol_dict = get_num_of_bricks_per_subvol(mnode, volname)
# Get number of bricks to add.
- if bricks_per_subvol_dict['is_tier']:
- if add_to_hot_tier:
- num_of_bricks_per_subvol = (
- bricks_per_subvol_dict['hot_tier_num_of_bricks_per_subvol']
- )
- else:
- num_of_bricks_per_subvol = (
- bricks_per_subvol_dict
- ['cold_tier_num_of_bricks_per_subvol']
- )
- else:
- num_of_bricks_per_subvol = (
- bricks_per_subvol_dict['volume_num_of_bricks_per_subvol'])
+ num_of_bricks_per_subvol = (
+ bricks_per_subvol_dict['volume_num_of_bricks_per_subvol'])
if num_of_bricks_per_subvol is None:
g.log.error("Number of bricks per subvol is None. "
@@ -1334,15 +885,7 @@ def form_bricks_list_to_add_brick(mnode, volname, servers, all_servers_info,
if replica_count:
# Get Subvols
subvols_info = get_subvols(mnode, volname)
-
- # Calculate number of bricks to add
- if subvols_info['is_tier']:
- if add_to_hot_tier:
- num_of_subvols = len(subvols_info['hot_tier_subvols'])
- else:
- num_of_subvols = len(subvols_info['cold_tier_subvols'])
- else:
- num_of_subvols = len(subvols_info['volume_subvols'])
+ num_of_subvols = len(subvols_info['volume_subvols'])
if num_of_subvols == 0:
g.log.error("No Sub-Volumes available for the volume %s."
@@ -1380,7 +923,7 @@ def form_bricks_list_to_add_brick(mnode, volname, servers, all_servers_info,
def expand_volume(mnode, volname, servers, all_servers_info, force=False,
- add_to_hot_tier=False, **kwargs):
+ **kwargs):
"""Forms list of bricks to add and adds those bricks to the volume.
Args:
@@ -1406,9 +949,6 @@ def expand_volume(mnode, volname, servers, all_servers_info, force=False,
will get executed with force option. If it is set to False,
then add-brick command will get executed without force option
- add_to_hot_tier (bool): True If bricks are to be added to hot_tier.
- False otherwise. Defaults to False.
-
**kwargs
The keys, values in kwargs are:
- replica_count : (int)|None.
@@ -1420,11 +960,9 @@ def expand_volume(mnode, volname, servers, all_servers_info, force=False,
bool: True of expanding volumes is successful.
False otherwise.
- NOTE: adding bricks to hot tier is yet to be added in this function.
"""
bricks_list = form_bricks_list_to_add_brick(mnode, volname, servers,
- all_servers_info,
- add_to_hot_tier, **kwargs)
+ all_servers_info, **kwargs)
if not bricks_list:
g.log.info("Unable to get bricks list to add-bricks. "
@@ -1436,17 +974,8 @@ def expand_volume(mnode, volname, servers, all_servers_info, force=False,
# Get replica count info.
replica_count_info = get_replica_count(mnode, volname)
-
- if is_tiered_volume(mnode, volname):
- if add_to_hot_tier:
- current_replica_count = (
- int(replica_count_info['hot_tier_replica_count']))
- else:
- current_replica_count = (
- int(replica_count_info['cold_tier_replica_count']))
- else:
- current_replica_count = (
- int(replica_count_info['volume_replica_count']))
+ current_replica_count = (
+ int(replica_count_info['volume_replica_count']))
kwargs['replica_count'] = current_replica_count + replica_count
@@ -1462,8 +991,7 @@ def expand_volume(mnode, volname, servers, all_servers_info, force=False,
def form_bricks_list_to_remove_brick(mnode, volname, subvol_num=None,
- replica_num=None,
- remove_from_hot_tier=False, **kwargs):
+ replica_num=None, **kwargs):
"""Form bricks list for removing the bricks.
Args:
@@ -1480,9 +1008,6 @@ def form_bricks_list_to_remove_brick(mnode, volname, subvol_num=None,
If replica_num = 0, then 1st brick from each subvolume is removed.
the replica_num starts from 0.
- remove_from_hot_tier (bool): True If bricks are to be removed from
- hot_tier. False otherwise. Defaults to False.
-
**kwargs
The keys, values in kwargs are:
- replica_count : (int)|None. Specify the number of replicas
@@ -1525,27 +1050,13 @@ def form_bricks_list_to_remove_brick(mnode, volname, subvol_num=None,
is_arbiter = False
# Calculate bricks to remove
- if subvols_info['is_tier']:
- if remove_from_hot_tier:
- current_replica_count = (
- int(replica_count_info['hot_tier_replica_count']))
- subvols_list = subvols_info['hot_tier_subvols']
- else:
- current_replica_count = (
- int(replica_count_info['cold_tier_replica_count']))
- subvols_list = subvols_info['cold_tier_subvols']
- arbiter_count = int(volume_type_info['cold_tier_type_info']
- ['coldarbiterCount'])
- if arbiter_count == 1:
- is_arbiter = True
- else:
- current_replica_count = (
- int(replica_count_info['volume_replica_count']))
- subvols_list = subvols_info['volume_subvols']
- arbiter_count = int(volume_type_info['volume_type_info']
- ['arbiterCount'])
- if arbiter_count == 1:
- is_arbiter = True
+ current_replica_count = (
+ int(replica_count_info['volume_replica_count']))
+ subvols_list = subvols_info['volume_subvols']
+ arbiter_count = int(volume_type_info['volume_type_info']
+ ['arbiterCount'])
+ if arbiter_count == 1:
+ is_arbiter = True
# If replica_num is specified select the bricks of that replica number
# from all the subvolumes.
@@ -1591,14 +1102,7 @@ def form_bricks_list_to_remove_brick(mnode, volname, subvol_num=None,
# remove bricks from sub-volumes
if subvol_num is not None or 'distribute_count' in kwargs:
- if subvols_info['is_tier']:
- if remove_from_hot_tier:
- subvols_list = subvols_info['hot_tier_subvols']
- else:
- subvols_list = subvols_info['cold_tier_subvols']
- else:
- subvols_list = subvols_info['volume_subvols']
-
+ subvols_list = subvols_info['volume_subvols']
if not subvols_list:
g.log.error("No Sub-Volumes available for the volume %s", volname)
return None
@@ -1634,7 +1138,7 @@ def form_bricks_list_to_remove_brick(mnode, volname, subvol_num=None,
def shrink_volume(mnode, volname, subvol_num=None, replica_num=None,
force=False, rebalance_timeout=300, delete_bricks=True,
- remove_from_hot_tier=False, **kwargs):
+ **kwargs):
"""Remove bricks from the volume.
Args:
@@ -1661,9 +1165,6 @@ def shrink_volume(mnode, volname, subvol_num=None, replica_num=None,
delete_bricks (bool): After remove-brick delete the removed bricks.
- remove_from_hot_tier (bool): True If bricks are to be removed from
- hot_tier. False otherwise. Defaults to False.
-
**kwargs
The keys, values in kwargs are:
- replica_count : (int)|None. Specify the replica count to
@@ -1674,12 +1175,10 @@ def shrink_volume(mnode, volname, subvol_num=None, replica_num=None,
bool: True if removing bricks from the volume is successful.
False otherwise.
- NOTE: remove-bricks from hot-tier is yet to be added in this function.
"""
# Form bricks list to remove-bricks
bricks_list_to_remove = form_bricks_list_to_remove_brick(
- mnode, volname, subvol_num, replica_num, remove_from_hot_tier,
- **kwargs)
+ mnode, volname, subvol_num, replica_num, **kwargs)
if not bricks_list_to_remove:
g.log.error("Failed to form bricks list to remove-brick. "
@@ -1698,16 +1197,8 @@ def shrink_volume(mnode, volname, subvol_num=None, replica_num=None,
# Get replica count info.
replica_count_info = get_replica_count(mnode, volname)
- if is_tiered_volume(mnode, volname):
- if remove_from_hot_tier:
- current_replica_count = (
- int(replica_count_info['hot_tier_replica_count']))
- else:
- current_replica_count = (
- int(replica_count_info['cold_tier_replica_count']))
- else:
- current_replica_count = (
- int(replica_count_info['volume_replica_count']))
+ current_replica_count = (
+ int(replica_count_info['volume_replica_count']))
kwargs['replica_count'] = current_replica_count - replica_count
@@ -1825,8 +1316,7 @@ def shrink_volume(mnode, volname, subvol_num=None, replica_num=None,
def form_bricks_to_replace_brick(mnode, volname, servers, all_servers_info,
- src_brick=None, dst_brick=None,
- replace_brick_from_hot_tier=False):
+ src_brick=None, dst_brick=None):
"""Get src_brick, dst_brick to replace brick
Args:
@@ -1853,9 +1343,6 @@ def form_bricks_to_replace_brick(mnode, volname, servers, all_servers_info,
dst_brick (str): New brick to replace the faulty brick
- replace_brick_from_hot_tier (bool): True If brick are to be
- replaced from hot_tier. False otherwise. Defaults to False.
-
Returns:
Tuple: (src_brick, dst_brick)
Nonetype: if volume doesn't exists or any other failure.
@@ -1881,13 +1368,7 @@ def form_bricks_to_replace_brick(mnode, volname, servers, all_servers_info,
if not src_brick:
# Randomly pick up a brick to bring the brick down and replace.
- if subvols_info['is_tier']:
- if replace_brick_from_hot_tier:
- subvols_list = subvols_info['hot_tier_subvols']
- else:
- subvols_list = subvols_info['cold_tier_subvols']
- else:
- subvols_list = subvols_info['volume_subvols']
+ subvols_list = subvols_info['volume_subvols']
src_brick = (random.choice(random.choice(subvols_list)))
@@ -1896,8 +1377,7 @@ def form_bricks_to_replace_brick(mnode, volname, servers, all_servers_info,
def replace_brick_from_volume(mnode, volname, servers, all_servers_info,
src_brick=None, dst_brick=None,
- delete_brick=True,
- replace_brick_from_hot_tier=False):
+ delete_brick=True, multi_vol=False):
"""Replace faulty brick from the volume.
Args:
@@ -1926,8 +1406,9 @@ def replace_brick_from_volume(mnode, volname, servers, all_servers_info,
delete_bricks (bool): After remove-brick delete the removed bricks.
- replace_brick_from_hot_tier (bool): True If brick are to be
- replaced from hot_tier. False otherwise. Defaults to False.
+ multi_vol (bool): True, If bricks need to created for multiple
+ volumes(more than 5)
+ False, Otherwise. By default, value is set to False.
Returns:
bool: True if replacing brick from the volume is successful.
@@ -1945,10 +1426,17 @@ def replace_brick_from_volume(mnode, volname, servers, all_servers_info,
subvols_info = get_subvols(mnode, volname)
if not dst_brick:
- dst_brick = form_bricks_list(mnode=mnode, volname=volname,
- number_of_bricks=1,
- servers=servers,
- servers_info=all_servers_info)
+ if multi_vol:
+ dst_brick = form_bricks_for_multivol(mnode=mnode,
+ volname=volname,
+ number_of_bricks=1,
+ servers=servers,
+ servers_info=all_servers_info)
+ else:
+ dst_brick = form_bricks_list(mnode=mnode, volname=volname,
+ number_of_bricks=1,
+ servers=servers,
+ servers_info=all_servers_info)
if not dst_brick:
g.log.error("Failed to get a new brick to replace the faulty "
"brick")
@@ -1957,13 +1445,7 @@ def replace_brick_from_volume(mnode, volname, servers, all_servers_info,
if not src_brick:
# Randomly pick up a brick to bring the brick down and replace.
- if subvols_info['is_tier']:
- if replace_brick_from_hot_tier:
- subvols_list = subvols_info['hot_tier_subvols']
- else:
- subvols_list = subvols_info['cold_tier_subvols']
- else:
- subvols_list = subvols_info['volume_subvols']
+ subvols_list = subvols_info['volume_subvols']
src_brick = (random.choice(random.choice(subvols_list)))
@@ -2028,17 +1510,6 @@ def get_client_quorum_info(mnode, volname):
Returns:
dict: client quorum information for the volume.
client_quorum_dict = {
- 'is_tier': False,
- 'hot_tier_quorum_info':{
- 'is_quorum_applicable': False,
- 'quorum_type': None,
- 'quorum_count': None
- },
- 'cold_tier_quorum_info':{
- 'is_quorum_applicable': False,
- 'quorum_type': None,
- 'quorum_count': None
- },
'volume_quorum_info':{
'is_quorum_applicable': False,
'quorum_type': None,
@@ -2048,17 +1519,6 @@ def get_client_quorum_info(mnode, volname):
NoneType: None if volume does not exist.
"""
client_quorum_dict = {
- 'is_tier': False,
- 'hot_tier_quorum_info': {
- 'is_quorum_applicable': False,
- 'quorum_type': None,
- 'quorum_count': None
- },
- 'cold_tier_quorum_info': {
- 'is_quorum_applicable': False,
- 'quorum_type': None,
- 'quorum_count': None
- },
'volume_quorum_info': {
'is_quorum_applicable': False,
'quorum_type': None,
@@ -2084,111 +1544,37 @@ def get_client_quorum_info(mnode, volname):
# Set the quorum info
volume_type_info = get_volume_type_info(mnode, volname)
- if volume_type_info['is_tier'] is True:
- client_quorum_dict['is_tier'] = True
-
- # Hot Tier quorum info
- hot_tier_type = volume_type_info['hot_tier_type_info']['hotBrickType']
- if (hot_tier_type == 'Replicate' or
- hot_tier_type == 'Distributed-Replicate'):
-
- (client_quorum_dict['hot_tier_quorum_info']
- ['is_quorum_applicable']) = True
- replica_count = (volume_type_info['hot_tier_type_info']
- ['hotreplicaCount'])
-
- # Case1: Replica 2
- if int(replica_count) == 2:
- if 'none' not in quorum_type:
- (client_quorum_dict['hot_tier_quorum_info']
- ['quorum_type']) = quorum_type
-
- if quorum_type == 'fixed':
- if not quorum_count == '(null)':
- (client_quorum_dict['hot_tier_quorum_info']
- ['quorum_count']) = quorum_count
-
- # Case2: Replica > 2
- if int(replica_count) > 2:
- if quorum_type == 'none':
- (client_quorum_dict['hot_tier_quorum_info']
- ['quorum_type']) = 'auto'
- quorum_type == 'auto'
- else:
- (client_quorum_dict['hot_tier_quorum_info']
- ['quorum_type']) = quorum_type
- if quorum_type == 'fixed':
- if not quorum_count == '(null)':
- (client_quorum_dict['hot_tier_quorum_info']
- ['quorum_count']) = quorum_count
-
- # Cold Tier quorum info
- cold_tier_type = (volume_type_info['cold_tier_type_info']
- ['coldBrickType'])
- if (cold_tier_type == 'Replicate' or
- cold_tier_type == 'Distributed-Replicate'):
- (client_quorum_dict['cold_tier_quorum_info']
- ['is_quorum_applicable']) = True
- replica_count = (volume_type_info['cold_tier_type_info']
- ['coldreplicaCount'])
-
- # Case1: Replica 2
- if int(replica_count) == 2:
- if 'none' not in quorum_type:
- (client_quorum_dict['cold_tier_quorum_info']
- ['quorum_type']) = quorum_type
-
- if quorum_type == 'fixed':
- if not quorum_count == '(null)':
- (client_quorum_dict['cold_tier_quorum_info']
- ['quorum_count']) = quorum_count
-
- # Case2: Replica > 2
- if int(replica_count) > 2:
- if quorum_type == 'none':
- (client_quorum_dict['cold_tier_quorum_info']
- ['quorum_type']) = 'auto'
- quorum_type == 'auto'
- else:
- (client_quorum_dict['cold_tier_quorum_info']
- ['quorum_type']) = quorum_type
- if quorum_type == 'fixed':
- if not quorum_count == '(null)':
- (client_quorum_dict['cold_tier_quorum_info']
- ['quorum_count']) = quorum_count
- else:
- volume_type = (volume_type_info['volume_type_info']['typeStr'])
- if (volume_type == 'Replicate' or
- volume_type == 'Distributed-Replicate'):
- (client_quorum_dict['volume_quorum_info']
- ['is_quorum_applicable']) = True
- replica_count = (volume_type_info['volume_type_info']
- ['replicaCount'])
-
- # Case1: Replica 2
- if int(replica_count) == 2:
- if 'none' not in quorum_type:
- (client_quorum_dict['volume_quorum_info']
- ['quorum_type']) = quorum_type
+ volume_type = (volume_type_info['volume_type_info']['typeStr'])
+ if (volume_type == 'Replicate' or
+ volume_type == 'Distributed-Replicate'):
+ (client_quorum_dict['volume_quorum_info']
+ ['is_quorum_applicable']) = True
+ replica_count = (volume_type_info['volume_type_info']['replicaCount'])
+
+ # Case1: Replica 2
+ if int(replica_count) == 2:
+ if 'none' not in quorum_type:
+ (client_quorum_dict['volume_quorum_info']
+ ['quorum_type']) = quorum_type
- if quorum_type == 'fixed':
- if not quorum_count == '(null)':
- (client_quorum_dict['volume_quorum_info']
- ['quorum_count']) = quorum_count
-
- # Case2: Replica > 2
- if int(replica_count) > 2:
- if quorum_type == 'none':
- (client_quorum_dict['volume_quorum_info']
- ['quorum_type']) = 'auto'
- quorum_type == 'auto'
- else:
- (client_quorum_dict['volume_quorum_info']
- ['quorum_type']) = quorum_type
if quorum_type == 'fixed':
if not quorum_count == '(null)':
(client_quorum_dict['volume_quorum_info']
- ['quorum_count']) = quorum_count
+ ['quorum_count']) = quorum_count
+
+ # Case2: Replica > 2
+ if int(replica_count) > 2:
+ if quorum_type == 'none':
+ (client_quorum_dict['volume_quorum_info']
+ ['quorum_type']) = 'auto'
+ quorum_type == 'auto'
+ else:
+ (client_quorum_dict['volume_quorum_info']
+ ['quorum_type']) = quorum_type
+ if quorum_type == 'fixed':
+ if not quorum_count == '(null)':
+ (client_quorum_dict['volume_quorum_info']
+ ['quorum_count']) = quorum_count
return client_quorum_dict
@@ -2300,7 +1686,8 @@ def get_volume_type(brickdir_path):
# Adding import here to avoid cyclic imports
from glustolibs.gluster.brick_libs import get_all_bricks
(host, brick_path_info) = brickdir_path.split(':')
- path_info = brick_path_info[:-1]
+ path_info = (brick_path_info[:-2] if brick_path_info.endswith("//")
+ else brick_path_info[:-1])
for volume in get_volume_list(host):
brick_paths = [brick.split(':')[1] for brick in get_all_bricks(host,
volume)]
@@ -2321,3 +1708,62 @@ def get_volume_type(brickdir_path):
else:
g.log.info("Failed to find brick-path %s for volume %s",
brickdir_path, volume)
+
+
+def parse_vol_file(mnode, vol_file):
+ """ Parses the .vol file and returns the content as a dict
+ Args:
+ mnode (str): Node on which commands will be executed.
+ vol_file(str) : Path to the .vol file
+ Returns:
+ (dict): Content of the .vol file
+ None : if failure happens
+ Example:
+ >>> ret = parse_vol_file("abc@xyz.com",
+ "/var/lib/glusterd/vols/testvol_distributed/
+ trusted-testvol_distributed.tcp-fuse.vol")
+ {'testvol_distributed-client-0': {'type': 'protocol/client',
+ 'option': {'send-gids': 'true','transport.socket.keepalive-count': '9',
+ 'transport.socket.keepalive-interval': '2',
+ 'transport.socket.keepalive-time': '20',
+ 'transport.tcp-user-timeout': '0',
+ 'transport.socket.ssl-enabled': 'off', 'password':
+ 'bcc934b3-9e76-47fd-930c-c31ad9f6e2f0', 'username':
+ '23bb8f1c-b373-4f85-8bab-aaa77b4918ce', 'transport.address-family':
+ 'inet', 'transport-type': 'tcp', 'remote-subvolume':
+ '/gluster/bricks/brick1/testvol_distributed_brick0',
+ 'remote-host': 'xx.xx.xx.xx', 'ping-timeout': '42'}}}
+ """
+ vol_dict, data, key = {}, {}, None
+
+ def _create_dict_from_list(cur_dict, keys, value):
+ """Creates dynamic dictionary from a given list of keys and values"""
+ if len(keys) == 1:
+ cur_dict[keys[0]] = value
+ return
+ if keys[0] not in cur_dict:
+ cur_dict[keys[0]] = {}
+ _create_dict_from_list(cur_dict[keys[0]], keys[1:], value)
+
+ ret, file_contents, err = g.run(mnode, "cat {}".format(vol_file))
+ if ret:
+ g.log.error("Failed to read the .vol file : %s", err)
+ return None
+ if not file_contents:
+ g.log.error("The given .vol file is empty")
+ return None
+ for line in file_contents.split("\n"):
+ if line:
+ line = line.strip()
+ if line.startswith('end-volume'):
+ vol_dict[key] = data
+ data = {}
+ elif line.startswith("volume "):
+ key = line.split(" ")[-1]
+ elif line.startswith("subvolumes "):
+ key_list = line.split(" ")[0]
+ _create_dict_from_list(data, [key_list], line.split(" ")[1:])
+ else:
+ key_list = line.split(" ")[:-1]
+ _create_dict_from_list(data, key_list, line.split(" ")[-1])
+ return vol_dict
diff --git a/glustolibs-gluster/glustolibs/gluster/volume_ops.py b/glustolibs-gluster/glustolibs/gluster/volume_ops.py
index 8445efa11..d25a9349b 100644
--- a/glustolibs-gluster/glustolibs/gluster/volume_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/volume_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,6 +20,11 @@ import re
import copy
from glusto.core import Glusto as g
from pprint import pformat
+import io
+try:
+ import ConfigParser as configparser # Python 2
+except ImportError:
+ import configparser as configparser # Python 3
try:
import xml.etree.cElementTree as etree
except ImportError:
@@ -233,15 +238,8 @@ def volume_delete(mnode, volname, xfail=False):
)
return False
- if volinfo[volname]['typeStr'] == 'Tier':
- tmp_hot_brick = volinfo[volname]["bricks"]["hotBricks"]["brick"]
- hot_bricks = [x["name"] for x in tmp_hot_brick if "name" in x]
- tmp_cold_brick = volinfo[volname]["bricks"]["coldBricks"]["brick"]
- cold_bricks = [x["name"] for x in tmp_cold_brick if "name" in x]
- bricks = hot_bricks + cold_bricks
- else:
- bricks = [x["name"] for x in volinfo[volname]["bricks"]["brick"]
- if "name" in x]
+ bricks = [x["name"] for x in volinfo[volname]["bricks"]["brick"] if
+ "name" in x]
ret, out, err = g.run(mnode, "gluster volume delete {} --mode=script"
.format(volname))
if ret != 0:
@@ -387,27 +385,34 @@ def get_volume_status(mnode, volname='all', service='', options=''):
NoneType: on failure
Example:
- get_volume_status("10.70.47.89", volname="testvol")
- >>>{'testvol': {'10.70.47.89': {'/bricks/brick1/a11': {'status': '1',
- 'pid': '28963', 'bricktype': 'cold', 'port': '49163', 'peerid':
- '7fc9015e-8134-4753-b837-54cbc6030c98', 'ports': {'rdma': 'N/A',
- 'tcp': '49163'}}, '/bricks/brick2/a31': {'status': '1', 'pid':
- '28982', 'bricktype': 'cold', 'port': '49164', 'peerid':
- '7fc9015e-8134-4753-b837-54cbc6030c98', 'ports': {'rdma': 'N/A',
- 'tcp': '49164'}}, 'NFS Server': {'status': '1', 'pid': '30525',
- 'port': '2049', 'peerid': '7fc9015e-8134-4753-b837-54cbc6030c98',
- 'ports': {'rdma': 'N/A', 'tcp': '2049'}}, '/bricks/brick1/a12':
- {'status': '1', 'pid': '30505', 'bricktype': 'hot', 'port': '49165',
- 'peerid': '7fc9015e-8134-4753-b837-54cbc6030c98', 'ports': {'rdma':
- 'N/A', 'tcp': '49165'}}}, '10.70.47.118': {'/bricks/brick1/a21':
- {'status': '1', 'pid': '5427', 'bricktype': 'cold', 'port': '49162',
- 'peerid': '5397d8f5-2986-453a-b0b5-5c40a9bb87ff', 'ports': {'rdma':
- 'N/A', 'tcp': '49162'}}, '/bricks/brick2/a41': {'status': '1', 'pid':
- '5446', 'bricktype': 'cold', 'port': '49163', 'peerid':
- '5397d8f5-2986-453a-b0b5-5c40a9bb87ff', 'ports': {'rdma': 'N/A',
- 'tcp': '49163'}}, 'NFS Server': {'status': '1', 'pid': '6397', 'port':
- '2049', 'peerid': '5397d8f5-2986-453a-b0b5-5c40a9bb87ff', 'ports':
- {'rdma': 'N/A', 'tcp': '2049'}}}}}
+ get_volume_status(host1, volname="testvol_replicated")
+ >>>{'testvol_replicated': {'host1': {'Self-heal Daemon': {'status':
+ '1', 'pid': '2479', 'port': 'N/A', 'peerid':
+ 'b7a02af9-eea4-4657-8b86-3b21ec302f48', 'ports': {'rdma': 'N/A',
+ 'tcp': 'N/A'}}, '/bricks/brick4/testvol_replicated_brick2': {'status':
+ '1', 'pid': '2468', 'bricktype': 'None', 'port': '49160', 'peerid':
+ 'b7a02af9-eea4-4657-8b86-3b21ec302f48', 'ports': {'rdma': 'N/A',
+ 'tcp': '49160'}}}, 'host2': {'Self-heal Daemon': {'status': '1',
+ 'pid': '2513', 'port': 'N/A', 'peerid':
+ '7f6fb9ed-3e0b-4f27-89b3-9e4f836c2332', 'ports': {'rdma': 'N/A',
+ 'tcp': 'N/A'}}, '/bricks/brick4/testvol_replicated_brick1': {'status':
+ '1', 'pid': '2456', 'bricktype': 'None', 'port': '49160', 'peerid':
+ '7f6fb9ed-3e0b-4f27-89b3-9e4f836c2332', 'ports': {'rdma': 'N/A',
+ 'tcp': '49160'}}}, 'host3': {'Self-heal Daemon': {'status': '1', 'pid'
+ : '2515', 'port': 'N/A', 'peerid':
+ '6172cfab-9d72-43b5-ba6f-612e5cfc020c', 'ports': {'rdma': 'N/A',
+ 'tcp': 'N/A'}}}, 'host4': {'Self-heal Daemon': {'status': '1', 'pid':
+ '2445', 'port': 'N/A', 'peerid': 'c16a1660-ee73-4e0f-b9c7-d2e830e39539
+ ', 'ports': {'rdma': 'N/A', 'tcp': 'N/A'}}}, 'host5':
+ {'Self-heal Daemon': {'status': '1', 'pid': '2536', 'port': 'N/A',
+ 'peerid': '79ea9f52-88f0-4293-ae21-8ea13f44b58d', 'ports':
+ {'rdma': 'N/A', 'tcp': 'N/A'}}}, 'host6': {'Self-heal Daemon':
+ {'status': '1', 'pid': '2526', 'port': 'N/A', 'peerid':
+ 'c00a3c5e-668f-440b-860c-da43e999737b', 'ports': {'rdma': 'N/A',
+ 'tcp': 'N/A'}}, '/bricks/brick4/testvol_replicated_brick0': {'status':
+ '1', 'pid': '2503', 'bricktype': 'None', 'port': '49160', 'peerid':
+ 'c00a3c5e-668f-440b-860c-da43e999737b', 'ports': {'rdma': 'N/A',
+ 'tcp': '49160'}}}}}
"""
cmd = "gluster vol status %s %s %s --xml" % (volname, service, options)
@@ -428,8 +433,6 @@ def get_volume_status(mnode, volname='all', service='', options=''):
for volume in volume_list:
tmp_dict1 = {}
tmp_dict2 = {}
- hot_bricks = []
- cold_bricks = []
vol_name = [vol.text for vol in volume if vol.tag == "volName"]
# parsing volume status xml output
@@ -449,24 +452,7 @@ def get_volume_status(mnode, volname='all', service='', options=''):
elem_tag = []
for elem in volume.getchildren():
elem_tag.append(elem.tag)
- if ('hotBricks' in elem_tag) or ('coldBricks' in elem_tag):
- for elem in volume.getchildren():
- if (elem.tag == 'hotBricks'):
- nodes = elem.findall("node")
- hot_bricks = [node.find('path').text
- for node in nodes
- if (
- node.find('path').text.startswith('/'))]
- if (elem.tag == 'coldBricks'):
- for n in elem.findall("node"):
- nodes.append(n)
- cold_bricks = [node.find('path').text
- for node in nodes
- if (
- (node.find('path').
- text.startswith('/')))]
- else:
- nodes = volume.findall("node")
+ nodes = volume.findall("node")
for each_node in nodes:
if each_node.find('path').text.startswith('/'):
@@ -479,12 +465,7 @@ def get_volume_status(mnode, volname='all', service='', options=''):
tmp_dict3 = {}
if "hostname" in node_dict.keys():
if node_dict['path'].startswith('/'):
- if node_dict['path'] in hot_bricks:
- node_dict["bricktype"] = 'hot'
- elif node_dict['path'] in cold_bricks:
- node_dict["bricktype"] = 'cold'
- else:
- node_dict["bricktype"] = 'None'
+ node_dict["bricktype"] = 'None'
tmp = node_dict["path"]
tmp_dict3[node_dict["path"]] = node_dict
else:
@@ -673,29 +654,42 @@ def get_volume_info(mnode, volname='all', xfail=False):
dict: volume info in dict of dicts
Example:
- get_volume_info("abc.com", volname="testvol")
- >>>{'testvol': {'status': '1', 'xlators': None, 'disperseCount': '0',
- 'bricks': {'coldBricks': {'colddisperseCount': '0',
- 'coldarbiterCount': '0', 'coldBrickType': 'Distribute',
- 'coldbrickCount': '4', 'numberOfBricks': '4', 'brick':
- [{'isArbiter': '0', 'name': '10.70.47.89:/bricks/brick1/a11',
- 'hostUuid': '7fc9015e-8134-4753-b837-54cbc6030c98'}, {'isArbiter':
- '0', 'name': '10.70.47.118:/bricks/brick1/a21', 'hostUuid':
- '7fc9015e-8134-4753-b837-54cbc6030c98'}, {'isArbiter': '0', 'name':
- '10.70.47.89:/bricks/brick2/a31', 'hostUuid':
- '7fc9015e-8134-4753-b837-54cbc6030c98'}, {'isArbiter': '0',
- 'name': '10.70.47.118:/bricks/brick2/a41', 'hostUuid':
- '7fc9015e-8134-4753-b837-54cbc6030c98'}], 'coldreplicaCount': '1'},
- 'hotBricks': {'hotBrickType': 'Distribute', 'numberOfBricks': '1',
- 'brick': [{'name': '10.70.47.89:/bricks/brick1/a12', 'hostUuid':
- '7fc9015e-8134-4753-b837-54cbc6030c98'}], 'hotbrickCount': '1',
- 'hotreplicaCount': '1'}}, 'type': '5', 'distCount': '1',
- 'replicaCount': '1', 'brickCount': '5', 'options':
- {'cluster.tier-mode': 'cache', 'performance.readdir-ahead': 'on',
- 'features.ctr-enabled': 'on'}, 'redundancyCount': '0', 'transport':
- '0', 'typeStr': 'Tier', 'stripeCount': '1', 'arbiterCount': '0',
- 'id': 'ffa8a8d1-546f-4ebf-8e82-fcc96c7e4e05', 'statusStr': 'Started',
- 'optCount': '3'}}
+ get_volume_info("host1", volname="testvol")
+ >>>{'testvol': {'status': '1', 'disperseCount': '6',
+ 'bricks': {'brick': [{'isArbiter': '0', 'name':
+ 'host1:/bricks/brick6/testvol_brick0', 'hostUuid':
+ 'c00a3c5e-668f-440b-860c-da43e999737b'}, {'isArbiter': '0', 'name':
+ 'host2:/bricks/brick6/testvol_brick1', 'hostUuid':
+ '7f6fb9ed-3e0b-4f27-89b3-9e4f836c2332'}, {'isArbiter': '0', 'name':
+ 'host3:/bricks/brick6/testvol_brick2', 'hostUuid':
+ 'b7a02af9-eea4-4657-8b86-3b21ec302f48'}, {'isArbiter': '0', 'name':
+ 'host4:/bricks/brick4/testvol_brick3', 'hostUuid':
+ '79ea9f52-88f0-4293-ae21-8ea13f44b58d'}, {'isArbiter': '0', 'name':
+ 'host5:/bricks/brick2/testvol_brick4', 'hostUuid':
+ 'c16a1660-ee73-4e0f-b9c7-d2e830e39539'}, {'isArbiter': '0', 'name':
+ 'host6:/bricks/brick2/testvol_brick5', 'hostUuid':
+ '6172cfab-9d72-43b5-ba6f-612e5cfc020c'}, {'isArbiter': '0', 'name':
+ 'host1:/bricks/brick7/testvol_brick6', 'hostUuid':
+ 'c00a3c5e-668f-440b-860c-da43e999737b'}, {'isArbiter': '0', 'name':
+ 'host2:/bricks/brick7/testvol_brick7', 'hostUuid':
+ '7f6fb9ed-3e0b-4f27-89b3-9e4f836c2332'}, {'isArbiter': '0', 'name':
+ 'host3:/bricks/brick7/testvol_brick8', 'hostUuid':
+ 'b7a02af9-eea4-4657-8b86-3b21ec302f48'}, {'isArbiter': '0', 'name':
+ 'host4:/bricks/brick5/testvol_brick9', 'hostUuid':
+ '79ea9f52-88f0-4293-ae21-8ea13f44b58d'}, {'isArbiter': '0', 'name':
+ 'host5:/bricks/brick4/testvol_brick10', 'hostUuid':
+ 'c16a1660-ee73-4e0f-b9c7-d2e830e39539'}, {'isArbiter': '0', 'name':
+ 'host6:/bricks/brick4/testvol_brick11', 'hostUuid':
+ '6172cfab-9d72-43b5-ba6f-612e5cfc020c'}]},
+ 'type': '9', 'distCount': '2', 'replicaCount': '1', 'brickCount':
+ '12', 'options': {'nfs.disable': 'on', 'cluster.server-quorum-ratio':
+ '90%', 'storage.fips-mode-rchecksum': 'on',
+ 'transport.address-family': 'inet', 'cluster.brick-multiplex':
+ 'disable'}, 'redundancyCount': '2', 'snapshotCount': '0',
+ 'transport': '0', 'typeStr': 'Distributed-Disperse', 'stripeCount':
+ '1', 'arbiterCount': '0',
+ 'id': '8d217fa3-094b-4293-89b5-41d447c06d22', 'statusStr': 'Started',
+ 'optCount': '5'}}
"""
cmd = "gluster volume info %s --xml" % volname
@@ -727,18 +721,6 @@ def get_volume_info(mnode, volname='all', xfail=False):
(volinfo[volname]["bricks"]["brick"].
append(brick_info_dict))
- if el.tag == "hotBricks" or el.tag == "coldBricks":
- volinfo[volname]["bricks"][el.tag] = {}
- volinfo[volname]["bricks"][el.tag]["brick"] = []
- for elmt in el.getchildren():
- if elmt.tag == 'brick':
- brick_info_dict = {}
- for el_brk in elmt.getchildren():
- brick_info_dict[el_brk.tag] = el_brk.text
- (volinfo[volname]["bricks"][el.tag]["brick"].
- append(brick_info_dict))
- else:
- volinfo[volname]["bricks"][el.tag][elmt.tag] = elmt.text # noqa: E501
elif elem.tag == "options":
volinfo[volname]["options"] = {}
for option in elem.findall("option"):
@@ -840,3 +822,76 @@ def get_volume_list(mnode):
vol_list.append(elem.text)
return vol_list
+
+
+def get_gluster_state(mnode):
+ """Executes the 'gluster get-state' command on the specified node, checks
+ for the data dump, reads the glusterd state dump and returns it.
+
+ Args:
+ mnode (str): Node on which command has to be executed
+
+ Returns:
+ dict: The output of gluster get-state command in dict format
+
+ Example:
+ >>>get_gluster_state(self.mnode)
+ {'Global': {'myuuid': 'e92964c8-a7d2-4e59-81ac-feb0687df55e',
+ 'op-version': '70000'}, 'Global options': {}, 'Peers':
+ {'peer1.primary_hostname': 'dhcp43-167.lab.eng.blr.redhat.com',
+ 'peer1.uuid': 'd3a85b6a-134f-4df2-ba93-4bd0321b6d6a', 'peer1.state':
+ 'Peer in Cluster', 'peer1.connected': 'Connected',
+ 'peer1.othernames': '', 'peer2.primary_hostname':
+ 'dhcp43-68.lab.eng.blr.redhat.com', 'peer2.uuid':
+ 'f488aa35-bc56-4aea-9581-8db54e137937', 'peer2.state':
+ 'Peer in Cluster', 'peer2.connected': 'Connected',
+ 'peer2.othernames': '', 'peer3.primary_hostname':
+ 'dhcp43-64.lab.eng.blr.redhat.com', 'peer3.uuid':
+ 'dfe75b01-2988-4eac-879a-cf3d701e1382', 'peer3.state':
+ 'Peer in Cluster', 'peer3.connected': 'Connected',
+ 'peer3.othernames': '', 'peer4.primary_hostname':
+ 'dhcp42-147.lab.eng.blr.redhat.com', 'peer4.uuid':
+ '05e3858b-33bf-449a-b170-2d3dac9adc45', 'peer4.state':
+ 'Peer in Cluster', 'peer4.connected': 'Connected',
+ 'peer4.othernames': '', 'peer5.primary_hostname':
+ 'dhcp41-246.lab.eng.blr.redhat.com', 'peer5.uuid':
+ 'c2e3f833-98fa-42d9-ae63-2bc471515810', 'peer5.state':
+ 'Peer in Cluster', 'peer5.connected': 'Connected',
+ 'peer5.othernames': ''}, 'Volumes': {}, 'Services': {'svc1.name':
+ 'glustershd', 'svc1.online_status': 'Offline', 'svc2.name': 'nfs',
+ 'svc2.online_status': 'Offline', 'svc3.name': 'bitd',
+ 'svc3.online_status': 'Offline', 'svc4.name': 'scrub',
+ 'svc4.online_status': 'Offline', 'svc5.name': 'quotad',
+ 'svc5.online_status': 'Offline'}, 'Misc': {'base port': '49152',
+ 'last allocated port': '49154'}}
+ """
+
+ ret, out, _ = g.run(mnode, "gluster get-state")
+ if ret:
+ g.log.error("Failed to execute gluster get-state command!")
+ return None
+ # get-state should dump properly.
+ # Checking whether a path is returned or not and then
+ # extracting path from the out data
+
+ path = re.search(r"/.*?/.\S*", out).group()
+ if not path:
+ g.log.error("Failed to get the gluster state dump file path.")
+ return None
+ ret, out, _ = g.run(mnode, "cat {}".format(path))
+ if ret:
+ g.log.error("Failed to read the gluster state dump.")
+ return None
+ g.log.info("Command Executed successfully and the data dump verified")
+
+ # Converting the string to unicode for py2/3 compatibility
+ out = u"".join(out)
+ data_buf = io.StringIO(out)
+ config = configparser.ConfigParser()
+ try:
+ config.read_file(data_buf) # Python3
+ except AttributeError:
+ config.readfp(data_buf) # Python2
+ # Converts the config parser object to a dictionary and returns it
+ return {section: dict(config.items(section)) for section in
+ config.sections()}