summaryrefslogtreecommitdiffstats
path: root/glustolibs-gluster
diff options
context:
space:
mode:
Diffstat (limited to 'glustolibs-gluster')
-rw-r--r--glustolibs-gluster/glustolibs/gluster/block_ops.py2
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brick_libs.py381
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brickdir.py77
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brickmux_libs.py149
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/brickmux_ops.py35
-rw-r--r--glustolibs-gluster/glustolibs/gluster/ctdb_libs.py142
-rw-r--r--glustolibs-gluster/glustolibs/gluster/ctdb_ops.py478
-rw-r--r--glustolibs-gluster/glustolibs/gluster/dht_test_utils.py138
-rw-r--r--glustolibs-gluster/glustolibs/gluster/geo_rep_libs.py474
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/geo_rep_ops.py252
-rwxr-xr-x[-rw-r--r--]glustolibs-gluster/glustolibs/gluster/gluster_base_class.py1491
-rw-r--r--glustolibs-gluster/glustolibs/gluster/gluster_init.py120
-rw-r--r--glustolibs-gluster/glustolibs/gluster/glusterdir.py17
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/glusterfile.py252
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/heal_libs.py142
-rw-r--r--glustolibs-gluster/glustolibs/gluster/layout.py116
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/lib_utils.py466
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/mount_ops.py29
-rwxr-xr-x[-rw-r--r--]glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py500
-rwxr-xr-x[-rw-r--r--]glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py228
-rw-r--r--glustolibs-gluster/glustolibs/gluster/peer_ops.py43
-rwxr-xr-xglustolibs-gluster/glustolibs/gluster/quota_libs.py4
-rw-r--r--glustolibs-gluster/glustolibs/gluster/rebalance_ops.py75
-rw-r--r--glustolibs-gluster/glustolibs/gluster/samba_libs.py417
-rw-r--r--glustolibs-gluster/glustolibs/gluster/shared_storage_ops.py75
-rw-r--r--glustolibs-gluster/glustolibs/gluster/snap_ops.py105
-rw-r--r--glustolibs-gluster/glustolibs/gluster/snap_scheduler.py2
-rw-r--r--glustolibs-gluster/glustolibs/gluster/ssl_ops.py225
-rw-r--r--glustolibs-gluster/glustolibs/gluster/tiering_ops.py1023
-rw-r--r--glustolibs-gluster/glustolibs/gluster/uss_ops.py27
-rw-r--r--glustolibs-gluster/glustolibs/gluster/volume_libs.py1097
-rw-r--r--glustolibs-gluster/glustolibs/gluster/volume_ops.py278
-rw-r--r--glustolibs-gluster/scripts/compute_hash.py32
-rw-r--r--glustolibs-gluster/scripts/walk_dir.py26
-rw-r--r--glustolibs-gluster/setup.py12
35 files changed, 4470 insertions, 4460 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/block_ops.py b/glustolibs-gluster/glustolibs/gluster/block_ops.py
index 8e6ff25fe..f51c3e4b2 100644
--- a/glustolibs-gluster/glustolibs/gluster/block_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/block_ops.py
@@ -56,7 +56,7 @@ def block_create(mnode, volname, blockname, servers, size=None,
block_create(mnode, volname, blockname, servers, size,
**block_args_info)
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
ha = auth = prealloc = storage = ring_buffer = ''
diff --git a/glustolibs-gluster/glustolibs/gluster/brick_libs.py b/glustolibs-gluster/glustolibs/gluster/brick_libs.py
index f84085969..b92832dd1 100644
--- a/glustolibs-gluster/glustolibs/gluster/brick_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/brick_libs.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -17,20 +17,20 @@
""" Description: Module for gluster brick related helper functions. """
import random
-from math import ceil
+from math import floor
import time
from glusto.core import Glusto as g
from glustolibs.gluster.brickmux_ops import is_brick_mux_enabled
+from glustolibs.gluster.gluster_init import restart_glusterd
from glustolibs.gluster.volume_ops import (get_volume_info, get_volume_status)
-from glustolibs.gluster.volume_libs import (get_subvols, is_tiered_volume,
+from glustolibs.gluster.volume_libs import (get_subvols,
get_client_quorum_info,
get_volume_type_info)
+from glustolibs.gluster.lib_utils import (get_extended_attributes_info)
def get_all_bricks(mnode, volname):
"""Get list of all the bricks of the specified volume.
- If the volume is 'Tier' volume, the list will contain both
- 'hot tier' and 'cold tier' bricks.
Args:
mnode (str): Node on which command has to be executed
@@ -45,19 +45,7 @@ def get_all_bricks(mnode, volname):
g.log.error("Unable to get the volinfo of %s.", volname)
return None
- if 'Tier' in volinfo[volname]['typeStr']:
- # Get bricks from hot-tier in case of Tier volume
- hot_tier_bricks = get_hot_tier_bricks(mnode, volname)
- if hot_tier_bricks is None:
- return None
- # Get cold-tier bricks in case of Tier volume
- cold_tier_bricks = get_cold_tier_bricks(mnode, volname)
- if cold_tier_bricks is None:
- return None
-
- return hot_tier_bricks + cold_tier_bricks
-
- # Get bricks from a non Tier volume
+ # Get bricks from a volume
all_bricks = []
if 'bricks' in volinfo[volname]:
if 'brick' in volinfo[volname]['bricks']:
@@ -76,88 +64,6 @@ def get_all_bricks(mnode, volname):
return None
-def get_hot_tier_bricks(mnode, volname):
- """Get list of hot-tier bricks of the specified volume
-
- Args:
- mnode (str): Node on which command has to be executed
- volname (str): Name of the volume
-
- Returns:
- list : List of hot-tier bricks of the volume on Success.
- NoneType: None on failure.
- """
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volinfo of %s.", volname)
- return None
-
- if 'Tier' not in volinfo[volname]['typeStr']:
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
-
- hot_tier_bricks = []
- if 'bricks' in volinfo[volname]:
- if 'hotBricks' in volinfo[volname]['bricks']:
- if 'brick' in volinfo[volname]['bricks']['hotBricks']:
- for brick in volinfo[volname]['bricks']['hotBricks']['brick']:
- if 'name' in brick:
- hot_tier_bricks.append(brick['name'])
- else:
- g.log.error("brick %s doesn't have the key 'name' "
- "for the volume: %s", brick, volname)
- return None
- else:
- g.log.error("Bricks not found in hotBricks section of volume "
- "info for the volume %s", volname)
- return None
- return hot_tier_bricks
- else:
- g.log.error("Bricks not found for the volume %s", volname)
- return None
-
-
-def get_cold_tier_bricks(mnode, volname):
- """Get list of cold-tier bricks of the specified volume
-
- Args:
- mnode (str): Node on which command has to be executed
- volname (str): Name of the volume
-
- Returns:
- list : List of cold-tier bricks of the volume on Success.
- NoneType: None on failure.
- """
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volinfo of %s.", volname)
- return None
-
- if 'Tier' not in volinfo[volname]['typeStr']:
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
-
- cold_tier_bricks = []
- if 'bricks' in volinfo[volname]:
- if 'coldBricks' in volinfo[volname]['bricks']:
- if 'brick' in volinfo[volname]['bricks']['coldBricks']:
- for brick in volinfo[volname]['bricks']['coldBricks']['brick']:
- if 'name' in brick:
- cold_tier_bricks.append(brick['name'])
- else:
- g.log.error("brick %s doesn't have the key 'name' "
- "for the volume: %s", brick, volname)
- return None
- else:
- g.log.error("Bricks not found in coldBricks section of volume "
- "info for the volume %s", volname)
- return None
- return cold_tier_bricks
- else:
- g.log.error("Bricks not found for the volume %s", volname)
- return None
-
-
def bring_bricks_offline(volname, bricks_list,
bring_bricks_offline_methods=None):
"""Bring the bricks specified in the bricks_list offline.
@@ -179,10 +85,10 @@ def bring_bricks_offline(volname, bricks_list,
"""
if bring_bricks_offline_methods is None:
bring_bricks_offline_methods = ['service_kill']
- elif isinstance(bring_bricks_offline_methods, str):
+ elif not isinstance(bring_bricks_offline_methods, list):
bring_bricks_offline_methods = [bring_bricks_offline_methods]
- if isinstance(bricks_list, str):
+ if not isinstance(bricks_list, list):
bricks_list = [bricks_list]
node_list = []
@@ -281,7 +187,7 @@ def bring_bricks_online(mnode, volname, bricks_list,
if bring_bricks_online_methods is None:
bring_bricks_online_methods = ['glusterd_restart',
'volume_start_force']
- elif isinstance(bring_bricks_online_methods, str):
+ elif not isinstance(bring_bricks_online_methods, list):
bring_bricks_online_methods = [bring_bricks_online_methods]
g.log.info("Bringing bricks '%s' online with '%s'",
@@ -304,10 +210,9 @@ def bring_bricks_online(mnode, volname, bricks_list,
"the bricks '%s' online", volname, bricks_list)
elif bring_brick_online_method == 'glusterd_restart':
- bring_brick_online_command = "service glusterd restart"
brick_node, _ = brick.split(":")
- ret, _, _ = g.run(brick_node, bring_brick_online_command)
- if ret != 0:
+ ret = restart_glusterd(brick_node)
+ if not ret:
g.log.error("Unable to restart glusterd on node %s",
brick_node)
_rc = False
@@ -504,41 +409,29 @@ def select_bricks_to_bring_offline(mnode, volname):
being empty list.
Example:
brick_to_bring_offline = {
- 'is_tier': False,
- 'hot_tier_bricks': [],
- 'cold_tier_bricks': [],
'volume_bricks': []
}
"""
# Defaulting the values to empty list
bricks_to_bring_offline = {
- 'is_tier': False,
- 'hot_tier_bricks': [],
- 'cold_tier_bricks': [],
'volume_bricks': []
- }
+ }
volinfo = get_volume_info(mnode, volname)
if volinfo is None:
g.log.error("Unable to get the volume info for volume %s", volname)
return bricks_to_bring_offline
- if is_tiered_volume(mnode, volname):
- bricks_to_bring_offline['is_tier'] = True
- # Select bricks from tiered volume.
- bricks_to_bring_offline = (
- select_tier_volume_bricks_to_bring_offline(mnode, volname))
- else:
- # Select bricks from non-tiered volume.
- volume_bricks = select_volume_bricks_to_bring_offline(mnode, volname)
- bricks_to_bring_offline['volume_bricks'] = volume_bricks
+ # Select bricks from the volume.
+ volume_bricks = select_volume_bricks_to_bring_offline(mnode, volname)
+ bricks_to_bring_offline['volume_bricks'] = volume_bricks
return bricks_to_bring_offline
def select_volume_bricks_to_bring_offline(mnode, volname):
"""Randomly selects bricks to bring offline without affecting the cluster
- from a non-tiered volume.
+ from a volume.
Args:
mnode (str): Node on which commands will be executed.
@@ -546,14 +439,10 @@ def select_volume_bricks_to_bring_offline(mnode, volname):
Returns:
list: On success returns list of bricks that can be brough offline.
- If volume doesn't exist or is a tiered volume returns empty list
+ If volume doesn't exist returns empty list
"""
volume_bricks_to_bring_offline = []
- # Check if volume is tiered
- if is_tiered_volume(mnode, volname):
- return volume_bricks_to_bring_offline
-
# get volume type
volume_type_info = get_volume_type_info(mnode, volname)
volume_type = volume_type_info['volume_type_info']['typeStr']
@@ -598,162 +487,6 @@ def select_volume_bricks_to_bring_offline(mnode, volname):
return volume_bricks_to_bring_offline
-def select_tier_volume_bricks_to_bring_offline(mnode, volname):
- """Randomly selects bricks to bring offline without affecting the cluster
- from a tiered volume.
-
- Args:
- mnode (str): Node on which commands will be executed.
- volname (str): Name of the volume.
-
- Returns:
- dict: On success returns dict. Value of each key is list of bricks to
- bring offline.
- If volume doesn't exist or is not a tiered volume returns dict
- with value of each item being empty list.
- Example:
- brick_to_bring_offline = {
- 'hot_tier_bricks': [],
- 'cold_tier_bricks': [],
- }
- """
- # Defaulting the values to empty list
- bricks_to_bring_offline = {
- 'hot_tier_bricks': [],
- 'cold_tier_bricks': [],
- }
-
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s", volname)
- return bricks_to_bring_offline
-
- if is_tiered_volume(mnode, volname):
- # Select bricks from both hot tier and cold tier.
- hot_tier_bricks = (select_hot_tier_bricks_to_bring_offline
- (mnode, volname))
- cold_tier_bricks = (select_cold_tier_bricks_to_bring_offline
- (mnode, volname))
- bricks_to_bring_offline['hot_tier_bricks'] = hot_tier_bricks
- bricks_to_bring_offline['cold_tier_bricks'] = cold_tier_bricks
- return bricks_to_bring_offline
-
-
-def select_hot_tier_bricks_to_bring_offline(mnode, volname):
- """Randomly selects bricks to bring offline without affecting the cluster
- from a hot tier.
-
- Args:
- mnode (str): Node on which commands will be executed.
- volname (str): Name of the volume.
-
- Returns:
- list: On success returns list of bricks that can be brough offline
- from hot tier. If volume doesn't exist or is a non tiered volume
- returns empty list.
- """
- hot_tier_bricks_to_bring_offline = []
-
- # Check if volume is tiered
- if not is_tiered_volume(mnode, volname):
- return hot_tier_bricks_to_bring_offline
-
- # get volume type
- volume_type_info = get_volume_type_info(mnode, volname)
- hot_tier_type = volume_type_info['hot_tier_type_info']['hotBrickType']
-
- # get subvols
- subvols_dict = get_subvols(mnode, volname)
- hot_tier_subvols = subvols_dict['hot_tier_subvols']
-
- # select bricks from distribute volume
- if hot_tier_type == 'Distribute':
- hot_tier_bricks_to_bring_offline = []
-
- # select bricks from replicated, distributed-replicated volume
- if (hot_tier_type == 'Replicate' or
- hot_tier_type == 'Distributed-Replicate'):
- # Get replica count
- hot_tier_replica_count = (volume_type_info
- ['hot_tier_type_info']['hotreplicaCount'])
-
- # Get quorum info
- quorum_info = get_client_quorum_info(mnode, volname)
- hot_tier_quorum_info = quorum_info['hot_tier_quorum_info']
-
- # Get list of bricks to bring offline
- hot_tier_bricks_to_bring_offline = (
- get_bricks_to_bring_offline_from_replicated_volume(
- hot_tier_subvols, hot_tier_replica_count,
- hot_tier_quorum_info))
-
- return hot_tier_bricks_to_bring_offline
-
-
-def select_cold_tier_bricks_to_bring_offline(mnode, volname):
- """Randomly selects bricks to bring offline without affecting the cluster
- from a cold tier.
-
- Args:
- mnode (str): Node on which commands will be executed.
- volname (str): Name of the volume.
-
- Returns:
- list: On success returns list of bricks that can be brough offline
- from cold tier. If volume doesn't exist or is a non tiered volume
- returns empty list.
- """
- cold_tier_bricks_to_bring_offline = []
-
- # Check if volume is tiered
- if not is_tiered_volume(mnode, volname):
- return cold_tier_bricks_to_bring_offline
-
- # get volume type
- volume_type_info = get_volume_type_info(mnode, volname)
- cold_tier_type = volume_type_info['cold_tier_type_info']['coldBrickType']
-
- # get subvols
- subvols_dict = get_subvols(mnode, volname)
- cold_tier_subvols = subvols_dict['cold_tier_subvols']
-
- # select bricks from distribute volume
- if cold_tier_type == 'Distribute':
- cold_tier_bricks_to_bring_offline = []
-
- # select bricks from replicated, distributed-replicated volume
- elif (cold_tier_type == 'Replicate' or
- cold_tier_type == 'Distributed-Replicate'):
- # Get replica count
- cold_tier_replica_count = (volume_type_info['cold_tier_type_info']
- ['coldreplicaCount'])
-
- # Get quorum info
- quorum_info = get_client_quorum_info(mnode, volname)
- cold_tier_quorum_info = quorum_info['cold_tier_quorum_info']
-
- # Get list of bricks to bring offline
- cold_tier_bricks_to_bring_offline = (
- get_bricks_to_bring_offline_from_replicated_volume(
- cold_tier_subvols, cold_tier_replica_count,
- cold_tier_quorum_info))
-
- # select bricks from Disperse, Distribured-Disperse volume
- elif (cold_tier_type == 'Disperse' or
- cold_tier_type == 'Distributed-Disperse'):
-
- # Get redundancy count
- cold_tier_redundancy_count = (volume_type_info['cold_tier_type_info']
- ['coldredundancyCount'])
-
- # Get list of bricks to bring offline
- cold_tier_bricks_to_bring_offline = (
- get_bricks_to_bring_offline_from_disperse_volume(
- cold_tier_subvols, cold_tier_redundancy_count))
-
- return cold_tier_bricks_to_bring_offline
-
-
def get_bricks_to_bring_offline_from_replicated_volume(subvols_list,
replica_count,
quorum_info):
@@ -761,13 +494,10 @@ def get_bricks_to_bring_offline_from_replicated_volume(subvols_list,
for a replicated volume.
Args:
- subvols_list: list of subvols. It can be volume_subvols,
- hot_tier_subvols or cold_tier_subvols.
+ subvols_list: list of subvols.
For example:
subvols = volume_libs.get_subvols(mnode, volname)
volume_subvols = subvols_dict['volume_subvols']
- hot_tier_subvols = subvols_dict['hot_tier_subvols']
- cold_tier_subvols = subvols_dict['cold_tier_subvols']
replica_count: Replica count of a Replicate or Distributed-Replicate
volume.
quorum_info: dict containing quorum info of the volume. The dict should
@@ -776,8 +506,6 @@ def get_bricks_to_bring_offline_from_replicated_volume(subvols_list,
For example:
quorum_dict = get_client_quorum_info(mnode, volname)
volume_quorum_info = quorum_info['volume_quorum_info']
- hot_tier_quorum_info = quorum_info['hot_tier_quorum_info']
- cold_tier_quorum_info = quorum_info['cold_tier_quorum_info']
Returns:
list: List of bricks that can be brought offline without affecting the
@@ -805,7 +533,7 @@ def get_bricks_to_bring_offline_from_replicated_volume(subvols_list,
offline_bricks_limit = int(replica_count) - int(quorum_count)
elif 'auto' in quorum_type:
- offline_bricks_limit = ceil(int(replica_count) / 2)
+ offline_bricks_limit = floor(int(replica_count) // 2)
elif quorum_type is None:
offline_bricks_limit = int(replica_count) - 1
@@ -835,18 +563,15 @@ def get_bricks_to_bring_offline_from_disperse_volume(subvols_list,
for a disperse volume.
Args:
- subvols_list: list of subvols. It can be volume_subvols,
- hot_tier_subvols or cold_tier_subvols.
+ subvols_list: list of subvols.
For example:
subvols = volume_libs.get_subvols(mnode, volname)
volume_subvols = subvols_dict['volume_subvols']
- hot_tier_subvols = subvols_dict['hot_tier_subvols']
- cold_tier_subvols = subvols_dict['cold_tier_subvols']
redundancy_count: Redundancy count of a Disperse or
Distributed-Disperse volume.
Returns:
- list: List of bricks that can be brought offline without affecting the
+ list: List of bricks that can be brought offline without affecting the
cluster.On any failure return empty list.
"""
list_of_bricks_to_bring_offline = []
@@ -902,3 +627,67 @@ def wait_for_bricks_to_be_online(mnode, volname, timeout=300):
else:
g.log.info("All Bricks of the volume '%s' are online ", volname)
return True
+
+
+def is_broken_symlinks_present_on_bricks(mnode, volname):
+ """ Checks if the backend bricks have broken symlinks.
+
+ Args:
+ mnode(str): Node on which command has to be executed
+ volname(str): Name of the volume
+
+ Retruns:
+ (bool):Returns True if present else returns False.
+ """
+ brick_list = get_all_bricks(mnode, volname)
+ for brick in brick_list:
+ brick_node, brick_path = brick.split(":")
+ cmd = ("find %s -xtype l | wc -l" % brick_path)
+ ret, out, _ = g.run(brick_node, cmd)
+ if ret:
+ g.log.error("Failed to run command on node %s", brick_node)
+ return True
+ if out:
+ g.log.error("Error: Broken symlink found on brick path: "
+ "%s on node %s.", (brick_path, brick_node))
+ return True
+ return False
+
+
+def validate_xattr_on_all_bricks(bricks_list, file_path, xattr):
+ """Checks if the xattr of the file/dir is same on all bricks.
+
+ Args:
+ bricks_list (list): List of bricks.
+ file_path (str): The path to the file/dir.
+ xattr (str): The file attribute to get from file.
+
+ Returns:
+ True if the xattr is same on all the fqpath. False otherwise
+
+ Example:
+ validate_xattr_on_all_bricks("bricks_list",
+ "dir1/file1",
+ "xattr")
+ """
+
+ time_counter = 250
+ g.log.info("The heal monitoring timeout is : %d minutes",
+ (time_counter // 60))
+ while time_counter > 0:
+ attr_vals = {}
+ for brick in bricks_list:
+ brick_node, brick_path = brick.split(":")
+ attr_vals[brick] = (
+ get_extended_attributes_info(brick_node,
+ ["{0}/{1}".format(brick_path,
+ file_path)],
+ attr_name=xattr))
+ ec_version_vals = [list(val.values())[0][xattr] for val in
+ list(attr_vals.values())]
+ if len(set(ec_version_vals)) == 1:
+ return True
+ else:
+ time.sleep(120)
+ time_counter -= 120
+ return False
diff --git a/glustolibs-gluster/glustolibs/gluster/brickdir.py b/glustolibs-gluster/glustolibs/gluster/brickdir.py
index 564f1421b..e864e8247 100644
--- a/glustolibs-gluster/glustolibs/gluster/brickdir.py
+++ b/glustolibs-gluster/glustolibs/gluster/brickdir.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,20 +20,20 @@
import os
from glusto.core import Glusto as g
+from glustolibs.gluster.volume_libs import get_volume_type
-def get_hashrange(brickdir_path):
- """Get the int hash range for a brick
+def check_hashrange(brickdir_path):
+ """Check the hash range for a brick
Args:
- brickdir_url (str): path of the directory as returned from pathinfo
+ brickdir_path (str): path of the directory as returned from pathinfo
(e.g., server1.example.com:/bricks/brick1/testdir1)
Returns:
list containing the low and high hash for the brickdir. None on fail.
"""
(host, fqpath) = brickdir_path.split(':')
-
command = ("getfattr -n trusted.glusterfs.dht -e hex %s "
"2> /dev/null | grep -i trusted.glusterfs.dht | "
"cut -d= -f2" % fqpath)
@@ -53,6 +53,49 @@ def get_hashrange(brickdir_path):
return None
+def get_hashrange(brickdir_path):
+ """Check the gluster version and then the volume type.
+ And accordingly, get the int hash range for a brick.
+
+ Note:
+ If the Gluster version is equal to or greater than 6, the hash range
+ can be calculated only for distributed, distributed-dispersed,
+ distributed-arbiter and distributed-replicated volume types because of
+ DHT pass-through option which was introduced in Gluster 6.
+
+ About DHT pass-through option:
+ There are no user controllable changes with this feature.
+ The distribute xlator now skips unnecessary checks and operations when
+ the distribute count is one for a volume, resulting in improved
+ performance.It comes into play when there is only 1 brick or it is a
+ pure-replicate or pure-disperse or pure-arbiter volume.
+
+ Args:
+ brickdir_path (str): path of the directory as returned from pathinfo
+ (e.g., server1.example.com:/bricks/brick1/testdir1)
+
+ Returns:
+ list containing the low and high hash for the brickdir. None on fail.
+
+ """
+
+ (host, _) = brickdir_path.split(':')
+ ret = get_volume_type(brickdir_path)
+ if ret in ('Replicate', 'Disperse', 'Arbiter'):
+ g.log.info("Cannot find hash-range for Replicate/Disperse/Arbiter"
+ " volume type on Gluster 6.0 and higher.")
+ return "Skipping for Replicate/Disperse/Arbiter volume type"
+ else:
+ ret = check_hashrange(brickdir_path)
+ hash_range_low = ret[0]
+ hash_range_high = ret[1]
+ if ret is not None:
+ return (hash_range_low, hash_range_high)
+ else:
+ g.log.error("Could not get hashrange")
+ return None
+
+
def file_exists(host, filename):
"""Check if file exists at path on host
@@ -80,12 +123,23 @@ class BrickDir(object):
self._hashrange_low = None
self._hashrange_high = None
- def _get_hashrange(self):
+ def _check_hashrange(self):
"""get the hash range for a brick from a remote system"""
- self._hashrange = get_hashrange(self._path)
+ self._hashrange = check_hashrange(self._path)
self._hashrange_low = self._hashrange[0]
self._hashrange_high = self._hashrange[1]
+ def _get_hashrange(self):
+ """get the hash range for a brick from a remote system"""
+ ret = get_volume_type(self._path)
+ if ret in ('Replicate', 'Disperse', 'Arbiter'):
+ g.log.info("Cannot find hash-range as the volume type under"
+ " test is Replicate/Disperse/Arbiter")
+ else:
+ self._hashrange = get_hashrange(self._path)
+ self._hashrange_low = self._hashrange[0]
+ self._hashrange_high = self._hashrange[1]
+
@property
def path(self):
"""The brick url
@@ -126,8 +180,13 @@ class BrickDir(object):
"""The high hash of the brick hashrange"""
if self.hashrange is None or self._hashrange_high is None:
self._get_hashrange()
-
- return self._hashrange_high
+ if self._get_hashrange() is None:
+ ret = get_volume_type(self._path)
+ if ret in ('Replicate', 'Disperse', 'Arbiter'):
+ g.log.info("Cannot find hash-range as the volume type"
+ " under test is Replicate/Disperse/Arbiter")
+ else:
+ return self._hashrange_high
def hashrange_contains_hash(self, filehash):
"""Check if a hash number falls between the brick hashrange
diff --git a/glustolibs-gluster/glustolibs/gluster/brickmux_libs.py b/glustolibs-gluster/glustolibs/gluster/brickmux_libs.py
new file mode 100644
index 000000000..cb82d8434
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/brickmux_libs.py
@@ -0,0 +1,149 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description: Module for Brick multiplexing realted helper functions.
+"""
+
+from itertools import cycle
+try:
+ from itertools import zip_longest
+except ImportError:
+ from itertools import izip_longest as zip_longest
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.volume_ops import get_volume_list
+from glustolibs.gluster.lib_utils import get_servers_bricks_dict
+
+
+def get_all_bricks_from_servers_multivol(servers, servers_info):
+ """
+ Form list of all the bricks to create/add-brick from the given
+ servers and servers_info
+
+ Args:
+ servers (list): List of servers in the storage pool.
+ servers_info (dict): Information about all servers.
+
+ Returns:
+ brickCount (int): Number of bricks available from the servers.
+ bricks_list (list): List of all bricks from the servers provided.
+
+ example :
+ servers_info = {
+ 'abc.lab.eng.xyz.com': {
+ 'host': 'abc.lab.eng.xyz.com',
+ 'brick_root': '/bricks',
+ 'devices': ['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde']
+ },
+ 'def.lab.eng.xyz.com':{
+ 'host': 'def.lab.eng.xyz.com',
+ 'brick_root': '/bricks',
+ 'devices': ['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde']
+ }
+ }
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ brickCount, bricks_list = 0, []
+
+ servers_bricks = get_servers_bricks_dict(servers, servers_info)
+ server_ip = cycle(servers_bricks.keys())
+
+ for item in list(zip_longest(*list(servers_bricks.values()))):
+ for brick in item:
+ try:
+ server = server_ip.next() # Python 2
+ except AttributeError:
+ server = next(server_ip) # Python 3
+ if brick:
+ bricks_list.append(server + ":" + brick)
+ brickCount += 1
+ return brickCount, bricks_list
+
+
+def get_current_brick_index(mnode):
+ """
+ Get the brick current index from the node of the cluster.
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+
+ Returns:
+ NoneType: If there are any errors
+ int: Count of the bricks in the cluster.
+ """
+ ret, brick_index, err = g.run(mnode, "gluster volume info | egrep "
+ "\"^Brick[0-9]+\" | grep -v \"ss_brick\"")
+ if ret:
+ g.log.error("Error in getting bricklist using gluster v info %s" % err)
+ return None
+
+ g.log.info("brick_index is ", brick_index)
+ return len(brick_index.splitlines())
+
+
+def form_bricks_for_multivol(mnode, volname, number_of_bricks, servers,
+ servers_info):
+ """
+ Forms brics list for volume create/add-brick given the number_of_bricks
+ servers, servers_info, for multiple volume cluster and for brick multiplex
+ enabled cluster.
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+ volname (str): Volume name for which we require brick-list
+ number_of_bricks (int): The number of bricks for which brick list
+ has to be created.
+ servers (str|list): A server|List of servers from which the bricks
+ needs to be selected for creating the brick list.
+ servers_info (dict): Dict of server info of each servers.
+
+ Returns:
+ list: List of bricks to use with volume create.
+ Nonetype: If unable to fetch the brick list
+
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ brick_index, brick_list_for_volume = 0, []
+
+ # Importing get_all_bricks() from bricks_libs to avoid cyclic imports
+ from glustolibs.gluster.brick_libs import get_all_bricks
+
+ # Get all volume list present in the cluster from mnode
+ current_vol_list = get_volume_list(mnode)
+ for volume in current_vol_list:
+ brick_index = brick_index + len(get_all_bricks(mnode, volume))
+ g.log.info("current brick_index %s" % brick_index)
+
+ # Get all bricks_count and bricks_list
+ all_brick_count, bricks_list = get_all_bricks_from_servers_multivol(
+ servers, servers_info)
+ if not (all_brick_count > 1):
+ g.log.error("Unable to get the bricks present in the specified"
+ "servers")
+ return None
+
+ for num in range(number_of_bricks):
+ brick = brick_index % all_brick_count
+ brick_list_for_volume.append("%s/%s_brick%d" % (bricks_list[brick],
+ volname, brick_index))
+ brick_index += 1
+
+ return brick_list_for_volume
diff --git a/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py b/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py
index 2fff05806..b56434741 100755
--- a/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/brickmux_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -56,7 +56,7 @@ def is_brick_mux_enabled(mnode):
elif get_brick_mux_status(mnode) in negative_states:
return False
else:
- raise ValueError('Brick mux status % is incorrect',
+ raise ValueError('Brick mux status %s is incorrect',
get_brick_mux_status(mnode))
@@ -119,21 +119,40 @@ def check_brick_pid_matches_glusterfsd_pid(mnode, volname):
"of brick path %s", brick_node, brick_path)
_rc = False
- cmd = ("ps -eaf | grep glusterfsd | "
- "grep %s.%s | grep -v 'grep %s.%s'"
- % (volname, brick_node,
- volname, brick_node))
+ cmd = "pgrep -x glusterfsd"
ret, pid, _ = g.run(brick_node, cmd)
if ret != 0:
g.log.error("Failed to run the command %s on "
"node %s", cmd, brick_node)
_rc = False
- glusterfsd_pid = pid.split()[1]
- if glusterfsd_pid != brick_pid:
+ else:
+ glusterfsd_pid = pid.split('\n')[:-1]
+
+ if brick_pid not in glusterfsd_pid:
g.log.error("Brick pid %s doesn't match glusterfsd "
"pid %s of the node %s", brick_pid,
glusterfsd_pid, brick_node)
_rc = False
return _rc
+
+
+def get_brick_processes_count(mnode):
+ """
+ Get the brick process count for a given node.
+
+ Args:
+ mnode (str): Node on which brick process has to be counted.
+
+ Returns:
+ int: Number of brick processes running on the node.
+ None: If the command fails to execute.
+ """
+ ret, out, _ = g.run(mnode, "pgrep -x glusterfsd")
+ if not ret:
+ list_of_pids = out.split("\n")
+ list_of_pids.pop()
+ return len(list_of_pids)
+ else:
+ return None
diff --git a/glustolibs-gluster/glustolibs/gluster/ctdb_libs.py b/glustolibs-gluster/glustolibs/gluster/ctdb_libs.py
new file mode 100644
index 000000000..9dfa5f8f6
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/ctdb_libs.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ Description:
+ Samba ctdb base classes.
+ Pre-requisite:
+ Please install samba ctdb packages
+ on all servers
+"""
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.ctdb_ops import (
+ edit_hook_script,
+ enable_ctdb_cluster,
+ create_nodes_file,
+ create_public_address_file,
+ start_ctdb_service,
+ is_ctdb_status_healthy,
+ teardown_samba_ctdb_cluster)
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.volume_libs import (
+ setup_volume,
+ wait_for_volume_process_to_be_online)
+
+
+class SambaCtdbBaseClass(GlusterBaseClass):
+ """
+ Creates samba ctdb cluster
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ Setup variable for samba ctdb test.
+ """
+ super(SambaCtdbBaseClass, cls).setUpClass()
+
+ cls.ctdb_volume_rep_count = int(len(cls.ctdb_nodes))
+ cls.primary_node = cls.servers[0]
+ g.log.info("VOLUME REP COUNT %s", cls.ctdb_volume_rep_count)
+
+ cls.ctdb_vips = (g.config['gluster']['cluster_config']
+ ['smb']['ctdb_vips'])
+ cls.ctdb_nodes = (g.config['gluster']['cluster_config']
+ ['smb']['ctdb_nodes'])
+ cls.ctdb_volname = (g.config['gluster']['cluster_config']
+ ['smb']['ctdb_volname'])
+ cls.ctdb_volume_config = (g.config['gluster']['cluster_config']['smb']
+ ['ctdb_volume_config'])
+
+ @classmethod
+ def setup_samba_ctdb_cluster(cls):
+ """
+ Create ctdb-samba cluster if doesn't exists
+
+ Returns:
+ bool: True if successfully setup samba else false
+ """
+ # Check if ctdb setup is up and running
+ if is_ctdb_status_healthy(cls.primary_node):
+ g.log.info("ctdb setup already up skipping "
+ "ctdb setup creation")
+ return True
+ g.log.info("Proceeding with ctdb setup creation")
+ for mnode in cls.servers:
+ ret = edit_hook_script(mnode, cls.ctdb_volname)
+ if not ret:
+ return False
+ ret = enable_ctdb_cluster(mnode)
+ if not ret:
+ return False
+ ret = create_nodes_file(mnode, cls.ctdb_nodes)
+ if not ret:
+ return False
+ ret = create_public_address_file(mnode, cls.ctdb_vips)
+ if not ret:
+ return False
+ server_info = cls.all_servers_info
+ ctdb_config = cls.ctdb_volume_config
+ g.log.info("Setting up ctdb volume %s", cls.ctdb_volname)
+ ret = setup_volume(mnode=cls.primary_node,
+ all_servers_info=server_info,
+ volume_config=ctdb_config)
+ if not ret:
+ g.log.error("Failed to setup ctdb volume %s", cls.ctdb_volname)
+ return False
+ g.log.info("Successful in setting up volume %s", cls.ctdb_volname)
+
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume %s processes to be online",
+ cls.ctdb_volname)
+ ret = wait_for_volume_process_to_be_online(cls.mnode, cls.ctdb_volname)
+ if not ret:
+ g.log.error("Failed to wait for volume %s processes to "
+ "be online", cls.ctdb_volname)
+ return False
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", cls.ctdb_volname)
+
+ # start ctdb services
+ ret = start_ctdb_service(cls.servers)
+ if not ret:
+ return False
+
+ ret = is_ctdb_status_healthy(cls.primary_node)
+ if not ret:
+ g.log.error("CTDB setup creation failed - exiting")
+ return False
+ g.log.info("CTDB setup creation successfull")
+ return True
+
+ @classmethod
+ def tearDownClass(cls, delete_samba_ctdb_cluster=False):
+ """
+ Teardown samba ctdb cluster.
+ """
+ super(SambaCtdbBaseClass, cls).tearDownClass()
+
+ if delete_samba_ctdb_cluster:
+ ret = teardown_samba_ctdb_cluster(
+ cls.servers, cls.ctdb_volname)
+ if not ret:
+ raise ExecutionError("Cleanup of samba ctdb "
+ "cluster failed")
+ g.log.info("Teardown samba ctdb cluster succeeded")
+ else:
+ g.log.info("Skipping teardown samba ctdb cluster...")
diff --git a/glustolibs-gluster/glustolibs/gluster/ctdb_ops.py b/glustolibs-gluster/glustolibs/gluster/ctdb_ops.py
new file mode 100644
index 000000000..8bf57ba05
--- /dev/null
+++ b/glustolibs-gluster/glustolibs/gluster/ctdb_ops.py
@@ -0,0 +1,478 @@
+#!/usr/bin/env python
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redeat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+CTDB library operations
+pre-requisite : CTDB and Samba packages
+needs to be installed on all the server nodes.
+"""
+
+import re
+from time import sleep
+from glusto.core import Glusto as g
+from glustolibs.gluster.lib_utils import (add_services_to_firewall,
+ is_rhel6, list_files)
+from glustolibs.gluster.mount_ops import umount_volume
+from glustolibs.gluster.volume_libs import cleanup_volume
+
+
+def edit_hook_script(mnode, ctdb_volname):
+ """
+ Edit the hook scripts with ctdb volume name
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+ ctdb_volname (str): Name of the ctdb volume
+ Returns:
+ bool: True if successfully edits the hook-scripts else false
+ """
+ # Replace META='all' to META=ctdb_volname setup hook script
+ cmd = ("sed -i -- 's/META=\"all\"/META=\"%s\"/g' "
+ "/var/lib/glusterd/hooks/1"
+ "/start/post/S29CTDBsetup.sh")
+ ret, _, _ = g.run(mnode, cmd % ctdb_volname)
+ if ret:
+ g.log.error("Hook script - S29CTDBsetup edit failed on %s", mnode)
+ return False
+
+ g.log.info("Hook script - S29CTDBsetup edit success on %s", mnode)
+ # Replace META='all' to META=ctdb_volname teardown hook script
+ cmd = ("sed -i -- 's/META=\"all\"/META=\"%s\"/g' "
+ "/var/lib/glusterd/hooks/1"
+ "/stop/pre/S29CTDB-teardown.sh")
+
+ ret, _, _ = g.run(mnode, cmd % ctdb_volname)
+ if ret:
+ g.log.error("Hook script - S29CTDB-teardown edit failed on %s", mnode)
+ return False
+ g.log.info("Hook script - S29CTDBteardown edit success on %s", mnode)
+ return True
+
+
+def enable_ctdb_cluster(mnode):
+ """
+ Edit the smb.conf to add clustering = yes
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+
+ Returns:
+ bool: True if successfully enable ctdb cluster else false
+ """
+ # Add clustering = yes in smb.conf if not already there
+ cmd = (r"grep -q 'clustering = yes' "
+ r"/etc/samba/smb.conf || sed -i.bak '/\[global\]/a "
+ r"clustering = yes' /etc/samba/smb.conf")
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Failed to add cluster = yes to smb.conf in %s", mnode)
+ return False
+ g.log.info("Successfully added 'clustering = yes' to smb.conf "
+ "in all nodes")
+ return True
+
+
+def check_file_availability(mnode, file_path, filename):
+ """
+ Check for ctdb files and delete
+
+ Args:
+ mnode(str): Node on which command is executed
+ filepath(str): Absolute path of the file to be validated
+ filename(str): File to be deleted if available in /etc/ctdb/
+
+ Returns:
+ bool: True if concerned files are available else false
+ """
+ if file_path in list_files(mnode, "/etc/ctdb/", filename):
+ ret, _, _ = g.run(mnode, "rm -rf %s" % file_path)
+ if ret:
+ return False
+ return True
+
+
+def create_nodes_file(mnode, node_ips):
+ """
+ Create nodes file and add node ips
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+
+ Returns:
+ bool: True if successfully create nodes file else false
+ """
+ # check if nodes file is available and delete
+ node_file_path = "/etc/ctdb/nodes"
+ ret = check_file_availability(mnode, node_file_path, "nodes")
+ if not ret:
+ g.log.info("Failed to delete pre-existing nodes file in %s", mnode)
+ return False
+ g.log.info("Deleted pre-existing nodes file in %s", mnode)
+ for node_ip in node_ips:
+ ret, _, _ = g.run(mnode, "echo -e %s "
+ ">> %s" % (node_ip, node_file_path))
+ if ret:
+ g.log.error("Failed to add nodes list in %s", mnode)
+ return False
+ g.log.info("Nodes list added succssfully to %s"
+ "file in all servers", node_file_path)
+ return True
+
+
+def create_public_address_file(mnode, vips):
+ """
+ Create public_addresses file and add vips
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+ vips (list): List of virtual ips
+
+ Returns:
+ bool: True if successfully creates public_address file else false
+ """
+ publicip_file_path = "/etc/ctdb/public_addresses"
+ ret = check_file_availability(mnode,
+ publicip_file_path,
+ "public_addresses")
+ if not ret:
+ g.log.info("Failed to delete pre-existing public_addresses"
+ "file in %s", mnode)
+ return False
+ g.log.info("Deleted pre-existing public_addresses"
+ "file in %s", mnode)
+ for vip in vips:
+ ret, _, _ = g.run(mnode, "echo -e %s >>"
+ " %s" % (vip, publicip_file_path))
+ if ret:
+ g.log.error("Failed to add vip list in %s", mnode)
+ return False
+ g.log.info("vip list added succssfully to %s"
+ "file in all node", publicip_file_path)
+ return True
+
+
+def ctdb_service_status(servers, mnode):
+ """
+ Status of ctdb service on the specified node.
+
+ Args:
+ mnode (str): Node on which ctdb status needs to be checked
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ g.log.info("Getting ctdb service status on %s", mnode)
+ if is_rhel6(servers):
+ return g.run(mnode, "service ctdb status")
+ return g.run(mnode, "systemctl status ctdb")
+
+
+def is_ctdb_service_running(servers, mnode):
+ """
+ Check if ctdb service is running on node
+
+ Args:
+ servers (str|list): list|str of cluster nodes
+ mnode (str): Node on which ctdb service has to be checked
+
+ Returns:
+ bool: True if ctdb service running else False
+ """
+ g.log.info("Check if ctdb service is running on %s", mnode)
+ ret, out, _ = ctdb_service_status(servers, mnode)
+ if ret:
+ g.log.error("Execution error service ctdb status "
+ "on %s", mnode)
+ return False
+ if "Active: active (running)" in out:
+ g.log.info("ctdb service is running on %s", mnode)
+ return True
+ else:
+ g.log.error("ctdb service is not "
+ "running on %s", mnode)
+ return False
+
+
+def start_ctdb_service(servers):
+ """
+ start ctdb services on all nodes &
+ wait for 40 seconds
+
+ Args:
+ servers (list): IP of samba nodes
+
+ Returns:
+ bool: True if successfully starts ctdb service else false
+ """
+ cmd = "pgrep ctdb || service ctdb start"
+ for mnode in servers:
+ ret, out, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Unable to start ctdb on server %s", str(out))
+ return False
+ if not is_ctdb_service_running(servers, mnode):
+ g.log.error("ctdb services not running %s", str(out))
+ return False
+ g.log.info("Start ctdb on server %s successful", mnode)
+ # sleep for 40sec as ctdb status takes time to enable
+ sleep(40)
+ return True
+
+
+def stop_ctdb_service(servers):
+ """
+ stop ctdb services on all nodes
+
+ Args:
+ servers (list): IP of samba nodes
+
+ Returns:
+ bool: True if successfully stops ctdb service else false
+ """
+ cmd = "service ctdb stop"
+ for mnode in servers:
+ ret, out, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Unable to stop ctdb on server %s", str(out))
+ return False
+ if is_ctdb_service_running(servers, mnode):
+ g.log.error("ctdb services still running %s", str(out))
+ return False
+ g.log.info("Stop ctdb on server %s successful", mnode)
+ return True
+
+
+def ctdb_server_firewall_settings(servers):
+ """
+ Do firewall settings for ctdb
+
+ Args:
+ servers(list): IP of sambe nodes
+
+ Returns:
+ bool: True if successfully added firewall services else false
+ """
+ # List of services to enable
+ services = ['samba', 'rpc-bind']
+ ret = add_services_to_firewall(servers, services, True)
+ if not ret:
+ g.log.error("Failed to set firewall zone "
+ "permanently on ctdb nodes")
+ return False
+
+ # Add ctdb and samba port
+ if not is_rhel6(servers):
+ for mnode in servers:
+ ret, _, _ = g.run(mnode, "firewall-cmd --add-port=4379/tcp "
+ "--add-port=139/tcp")
+ if ret:
+ g.log.error("Failed to add firewall port in %s", mnode)
+ return False
+ g.log.info("samba ctdb port added successfully in %s", mnode)
+ ret, _, _ = g.run(mnode, "firewall-cmd --add-port=4379/tcp "
+ "--add-port=139/tcp --permanent")
+ if ret:
+ g.log.error("Failed to add firewall port permanently in %s",
+ mnode)
+ return False
+ return True
+
+
+def parse_ctdb_status(status):
+ """
+ Parse the ctdb status output
+
+ Number of nodes:4
+ pnn:0 <ip> OK (THIS NODE)
+ pnn:1 <ip> OK
+ pnn:2 <ip> OK
+ pnn:3 <ip> UHEALTHY
+ Generation:763624485
+ Size:4
+ hash:0 lmaster:0
+ hash:1 lmaster:1
+ hash:2 lmaster:2
+ hash:3 lmaster:3
+ Recovery mode:NORMAL (0)
+ Recovery master:3
+
+ Args:
+ status: output of ctdb status(string)
+
+ Returns:
+ dict: {<ip>: status}
+ """
+ cmd = r'pnn\:\d+\s*(\S+)\s*(\S+)'
+ ip_nodes = re.findall(cmd, status, re.S)
+ if ip_nodes:
+ # Empty dictionary to capture ctdb status output
+ node_status = {}
+ for item in ip_nodes:
+ node_status[item[0]] = item[1]
+ g.log.info("ctdb node status %s", node_status)
+ return node_status
+ else:
+ return {}
+
+
+def ctdb_status(mnode):
+ """
+ Execute ctdb status
+
+ Args:
+ mnode(str): primary node out of the servers
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ """
+ cmd = "ctdb status"
+ return g.run(mnode, cmd)
+
+
+def is_ctdb_status_healthy(mnode):
+ """
+ Check if ctdb is up & running
+
+ Args:
+ mnode(str): primary node out of the servers
+
+ Returns:
+ bool: True if ctdb status healthy else false
+ """
+ # Get the ctdb status details
+ status_res = ctdb_status(mnode)
+ if status_res[0]:
+ g.log.info("CTDB is not enabled for the cluster")
+ return False
+ # Get the ctdb status output
+ output = status_res[1]
+ # Parse the ctdb status output
+ node_status = parse_ctdb_status(output)
+ if not node_status:
+ g.log.error("ctdb status return empty list")
+ return False
+ for node_ip, status in node_status.iteritems():
+ # Check if ctdb status is OK or not
+ if node_status[node_ip] != 'OK':
+ g.log.error("CTDB node %s is %s",
+ str(node_ip), status)
+ return False
+ g.log.info("CTDB node %s is %s",
+ str(node_ip), status)
+ return True
+
+
+def edit_hookscript_for_teardown(mnode, ctdb_volname):
+ """
+ Edit the hook scripts with ctdb volume name
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+ ctdb_volname (str): Name of ctdb volume
+ Returns:
+ bool: True if successfully edits hook-scripts else false
+ """
+ # Replace META='ctdb_vol' to META=all setup hook script
+ cmd = ("sed -i -- 's/META=\"%s\"/META=\"all\"/g' "
+ "/var/lib/glusterd/hooks/1"
+ "/start/post/S29CTDBsetup.sh" % ctdb_volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Hook script - S29CTDBsetup edit failed on %s", mnode)
+ return False
+
+ g.log.info("Hook script - S29CTDBsetup edit success on %s", mnode)
+ # Replace META='all' to META=ctdb_volname teardown hook script
+ cmd = ("sed -i -- 's/META=\"%s\"/META=\"all\"/g' "
+ "/var/lib/glusterd/hooks/1"
+ "/stop/pre/S29CTDB-teardown.sh" % ctdb_volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Hook script - S29CTDB-teardown edit failed on %s", mnode)
+ return False
+ g.log.info("Hook script - S29CTDBteardown edit success on %s", mnode)
+ return True
+
+
+def teardown_samba_ctdb_cluster(servers, ctdb_volname):
+ """
+ Tear down samba ctdb setup
+
+ Args:
+ servers (list): Nodes in ctdb cluster to teardown entire
+ cluster
+ ctdb_volname (str): Name of ctdb volume
+
+ Returns:
+ bool: True if successfully tear downs ctdb cluster else false
+ """
+
+ node_file_path = "/etc/ctdb/nodes"
+ publicip_file_path = "/etc/ctdb/public_addresses"
+ g.log.info("Executing force cleanup...")
+ # Stop ctdb service
+ if stop_ctdb_service(servers):
+ for mnode in servers:
+ # check if nodes file is available and delete
+ ret = check_file_availability(mnode, node_file_path, "nodes")
+ if not ret:
+ g.log.info("Failed to delete existing "
+ "nodes file in %s", mnode)
+ return False
+ g.log.info("Deleted existing nodes file in %s", mnode)
+
+ # check if public_addresses file is available and delete
+ ret = check_file_availability(mnode, publicip_file_path,
+ "public_addresses")
+ if not ret:
+ g.log.info("Failed to delete existing public_addresses"
+ " file in %s", mnode)
+ return False
+ g.log.info("Deleted existing public_addresses"
+ "file in %s", mnode)
+
+ ctdb_mount = '/gluster/lock'
+ ret, _, _ = umount_volume(mnode, ctdb_mount, 'glusterfs')
+ if ret:
+ g.log.error("Unable to unmount lock volume in %s", mnode)
+ return False
+ if not edit_hookscript_for_teardown(mnode, ctdb_volname):
+ return False
+ mnode = servers[0]
+ ret = cleanup_volume(mnode, ctdb_volname)
+ if not ret:
+ g.log.error("Failed to delete ctdb volume - %s", ctdb_volname)
+ return False
+ return True
+ return False
diff --git a/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py b/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py
index 692f09baf..11f2eda62 100644
--- a/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py
+++ b/glustolibs-gluster/glustolibs/gluster/dht_test_utils.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,32 +21,43 @@ import os
from glusto.core import Glusto as g
-from glustolibs.gluster.glusterfile import GlusterFile, calculate_hash
+from glustolibs.gluster.glusterfile import (GlusterFile, calculate_hash,
+ get_pathinfo, file_exists)
from glustolibs.gluster.glusterdir import GlusterDir
from glustolibs.gluster.layout import Layout
import glustolibs.gluster.constants as k
import glustolibs.gluster.exceptions as gex
from glustolibs.gluster.brickdir import BrickDir
-from glustolibs.gluster.volume_libs import get_subvols
+from glustolibs.gluster.volume_libs import get_subvols, get_volume_type
+from glustolibs.misc.misc_libs import upload_scripts
-def run_layout_tests(fqpath, layout, test_type):
+def run_layout_tests(mnode, fqpath, layout, test_type):
"""run the is_complete and/or is_balanced tests"""
- if test_type & k.TEST_LAYOUT_IS_COMPLETE:
- g.log.info("Testing layout complete for %s" % fqpath)
- if not layout.is_complete:
- msg = ("Layout for %s IS NOT COMPLETE" % fqpath)
- g.log.error(msg)
- raise gex.LayoutIsNotCompleteError(msg)
- if test_type & k.TEST_LAYOUT_IS_BALANCED:
- g.log.info("Testing layout balance for %s" % fqpath)
- if not layout.is_balanced:
- msg = ("Layout for %s IS NOT BALANCED" % fqpath)
- g.log.error(msg)
- raise gex.LayoutIsNotBalancedError(msg)
-
- # returning True until logic requires non-exception error check(s)
- return True
+ ret = get_pathinfo(mnode, fqpath)
+ brick_path_list = ret.get('brickdir_paths')
+ for brickdir_path in brick_path_list:
+ (server_ip, _) = brickdir_path.split(':')
+ if get_volume_type(brickdir_path) in ('Replicate', 'Disperse',
+ 'Arbiter'):
+ g.log.info("Cannot check for layout completeness as"
+ " volume under test is Replicate/Disperse/Arbiter")
+ else:
+ if test_type & k.TEST_LAYOUT_IS_COMPLETE:
+ g.log.info("Testing layout complete for %s" % fqpath)
+ if not layout.is_complete:
+ msg = ("Layout for %s IS NOT COMPLETE" % fqpath)
+ g.log.error(msg)
+ raise gex.LayoutIsNotCompleteError(msg)
+ if test_type & k.TEST_LAYOUT_IS_BALANCED:
+ g.log.info("Testing layout balance for %s" % fqpath)
+ if not layout.is_balanced:
+ msg = ("Layout for %s IS NOT BALANCED" % fqpath)
+ g.log.error(msg)
+ raise gex.LayoutIsNotBalancedError(msg)
+
+ # returning True until logic requires non-exception error check(s)
+ return True
def run_hashed_bricks_test(gfile):
@@ -62,13 +73,13 @@ def run_hashed_bricks_test(gfile):
return True
-def validate_files_in_dir(host, rootdir,
+def validate_files_in_dir(mnode, rootdir,
file_type=k.FILETYPE_ALL,
test_type=k.TEST_ALL):
"""walk a directory tree and check if layout is_complete.
Args:
- host (str): The host of the directory being traversed.
+ mnode (str): The host of the directory being traversed.
rootdir (str): The fully qualified path of the dir being traversed.
file_type (int): An or'd set of constants defining the file types
to test.
@@ -108,16 +119,32 @@ def validate_files_in_dir(host, rootdir,
"""
layout_cache = {}
- conn = g.rpyc_get_connection(host)
-
- for walkies in conn.modules.os.walk(rootdir):
+ script_path = ("/usr/share/glustolibs/scripts/walk_dir.py")
+ if not file_exists(mnode, script_path):
+ if upload_scripts(mnode, script_path,
+ "/usr/share/glustolibs/scripts/"):
+ g.log.info("Successfully uploaded script "
+ "walk_dir.py!")
+ else:
+ g.log.error("Faild to upload walk_dir.py!")
+ return False
+ else:
+ g.log.info("compute_hash.py already present!")
+
+ cmd = ("/usr/bin/env python {0} {1}".format(script_path, rootdir))
+ ret, out, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error('Unable to run the script on node {0}'
+ .format(mnode))
+ return False
+ for walkies in eval(out):
g.log.info("TESTING DIRECTORY %s..." % walkies[0])
# check directories
if file_type & k.FILETYPE_DIR:
for testdir in walkies[1]:
fqpath = os.path.join(walkies[0], testdir)
- gdir = GlusterDir(host, fqpath)
+ gdir = GlusterDir(mnode, fqpath)
if gdir.parent_dir in layout_cache:
layout = layout_cache[gdir.parent_dir]
@@ -125,7 +152,7 @@ def validate_files_in_dir(host, rootdir,
layout = Layout(gdir.parent_dir_pathinfo)
layout_cache[gdir.parent_dir] = layout
- run_layout_tests(gdir.parent_dir, layout, test_type)
+ run_layout_tests(mnode, gdir.parent_dir, layout, test_type)
if test_type & k.TEST_FILE_EXISTS_ON_HASHED_BRICKS:
run_hashed_bricks_test(gdir)
@@ -134,7 +161,7 @@ def validate_files_in_dir(host, rootdir,
if file_type & k.FILETYPE_FILE:
for file in walkies[2]:
fqpath = os.path.join(walkies[0], file)
- gfile = GlusterFile(host, fqpath)
+ gfile = GlusterFile(mnode, fqpath)
if gfile.parent_dir in layout_cache:
layout = layout_cache[gfile.parent_dir]
@@ -142,11 +169,11 @@ def validate_files_in_dir(host, rootdir,
layout = Layout(gfile.parent_dir_pathinfo)
layout_cache[gfile.parent_dir] = layout
- run_layout_tests(gfile.parent_dir, layout, test_type)
+ run_layout_tests(mnode, gfile.parent_dir, layout,
+ test_type)
if test_type & k.TEST_FILE_EXISTS_ON_HASHED_BRICKS:
run_hashed_bricks_test(gfile)
-
return True
@@ -281,11 +308,22 @@ def find_new_hashed(subvols, parent_path, oldname):
g.log.error("could not form brickobject list")
return None
+ for bro in brickobject:
+ bro._get_hashrange()
+ low = bro._hashrange_low
+ high = bro._hashrange_high
+ g.log.debug("low hashrange %s high hashrange %s", str(low), str(high))
+ g.log.debug("absoulte path %s", bro._fqpath)
+
+ hash_num = calculate_hash(brickobject[0]._host, oldname)
oldhashed, _ = find_hashed_subvol(subvols, parent_path, oldname)
if oldhashed is None:
g.log.error("could not find old hashed subvol")
return None
+ g.log.debug("oldhashed: %s oldname: %s oldhash %s", oldhashed._host,
+ oldname, hash_num)
+
count = -1
for item in range(1, 5000, 1):
newhash = calculate_hash(brickobject[0]._host, str(item))
@@ -293,7 +331,7 @@ def find_new_hashed(subvols, parent_path, oldname):
count += 1
ret = brickdir.hashrange_contains_hash(newhash)
if ret == 1:
- if oldhashed._host != brickdir._host:
+ if oldhashed._fqpath != brickdir._fqpath:
g.log.debug("oldhashed %s new %s count %s",
oldhashed, brickdir._host, str(count))
return NewHashed(item, brickdir, count)
@@ -302,6 +340,44 @@ def find_new_hashed(subvols, parent_path, oldname):
return None
+def find_specific_hashed(subvols, parent_path, subvol, existing_names=None):
+ """ Finds filename that hashes to a specific subvol.
+
+ Args:
+ subvols(list): list of subvols
+ parent_path(str): parent path (relative to mount) of "oldname"
+ subvol(str): The subvol to which the new name has to be hashed
+ existing_names(int|list): The name(s) already hashed to subvol
+
+ Returns:
+ (Class Object): For success returns an object of type NewHashed
+ holding information pertaining to new name.
+ None, otherwise
+ Note: The new hash will be searched under the same parent
+ """
+ # pylint: disable=protected-access
+ if not isinstance(existing_names, list):
+ existing_names = [existing_names]
+ brickobject = create_brickobjectlist(subvols, parent_path)
+ if brickobject is None:
+ g.log.error("could not form brickobject list")
+ return None
+ count = -1
+ for item in range(1, 5000, 1):
+ newhash = calculate_hash(brickobject[0]._host, str(item))
+ for brickdir in brickobject:
+ count += 1
+ if (subvol._fqpath == brickdir._fqpath and
+ item not in existing_names):
+ ret = brickdir.hashrange_contains_hash(newhash)
+ if ret:
+ g.log.debug("oldhashed %s new %s count %s",
+ subvol, brickdir._host, str(count))
+ return NewHashed(item, brickdir, count)
+ count = -1
+ return None
+
+
class NewHashed(object):
'''
Helper Class to hold new hashed info
@@ -380,3 +456,5 @@ def is_layout_complete(mnode, volname, dirpath):
return False
elif hash_difference < 1:
g.log.error("Layout has overlaps")
+
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/geo_rep_libs.py b/glustolibs-gluster/glustolibs/gluster/geo_rep_libs.py
index 0c15e64ac..20531b946 100644
--- a/glustolibs-gluster/glustolibs/gluster/geo_rep_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/geo_rep_libs.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -15,243 +15,347 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
- Description: Library for gluster geo-replication operations
+ Description: Library for gluster geo-replication operations.
"""
+from time import sleep
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_init import restart_glusterd
-from glustolibs.gluster.geo_rep_ops import (create_shared_storage,
- georep_groupadd,
- georep_geoaccount,
- georep_mountbroker_setup,
- georep_mountbroker_adduser,
- georep_mountbroker_status,
- georep_geoaccount_setpasswd,
- georep_ssh_keygen,
- georep_ssh_copyid,
- georep_createpem, georep_create,
- georep_set_pemkeys,
+from glustolibs.gluster.peer_ops import is_peer_connected
+from glustolibs.gluster.geo_rep_ops import (georep_mountbroker_setup,
+ georep_mountbroker_add_user,
+ georep_create_pem,
+ georep_create,
+ georep_set_pem_keys,
georep_config_set)
+from glustolibs.gluster.shared_storage_ops import (enable_shared_storage,
+ is_shared_volume_mounted,
+ check_gluster_shared_volume)
+from glustolibs.gluster.lib_utils import (group_add, ssh_copy_id,
+ ssh_keygen, add_user, set_passwd,
+ is_group_exists, is_user_exists,
+ is_passwordless_ssh_configured)
+from glustolibs.gluster.glusterdir import get_dir_contents
+from glustolibs.gluster.volume_ops import set_volume_options
+from glustolibs.gluster.volume_libs import setup_volume
+
+
+def georep_prerequisites(mnode, snode, passwd, user="root", group=None,
+ mntbroker_dir="/var/mountbroker-root",
+ nonrootpass=None):
+ """
+ Sets up all the prerequisites for geo-rep.
-
-def georep_root_prerequisites(mnode, snode, user, passwd):
- """Takes care of the pre-requisites required for
- setting up a successful geo-rep
- session which includes:
- 1. Check if shared_storage has been successfully created
- 2. Check if shared_storage has been mounted
- 3. Generating the common pem pub file on all the nodes
- on the master
- 4. Establishing passwordless ssh connection to the slave
Args:
- mnode (str) : The primary master node where the commands are executed
- snode (str) : The primary slave node where the commande are executed
- mastervol (str) : The name of the master volume
- slavevol (str): The name of the slave volume
+ mnode(str): The primary master node where the commands are executed
+ snode(str|list): slave nodes on which setup has to be completed.
+ passwd(str): Password of the root user.
+
+ Kwargs:
+ user(str): User to be used to setup the geo-rep session.
+ (Default: root)
+ mntbroker_dir(str): Mountbroker mount directory.
+ (Default: /var/mountbroker-root)
+ group(str): Group under which geo-rep useraccount is setup.
+ (Default: None)
+ nonrootpass(str): Password of the non-root user.(Default: None)
+
Returns:
bool : True if all the steps are successful, false if there are
any failures in the middle
"""
- g.log.debug("Enable shared-storage")
- ret, _, _ = create_shared_storage(mnode)
- if not ret:
- g.log.error("Failed to create gluster shared storage on "
- "the master cluster")
+ # Converting snode to list if string.
+ if not isinstance(snode, list):
+ snode = [snode]
+
+ # Checking for blank username.
+ if not user.strip():
+ g.log.error("Blank username isn't possible.")
return False
- g.log.debug("Creating a common pem pub file on all the nodes in "
- "the master to establish passwordless ssh connection "
- "to the slave ")
- ret = georep_ssh_keygen(mnode)
- if not ret:
- g.log.error("Failed to create a common pem pub file")
+ # Checking if non-root user is given without userpassword.
+ if user != "root" and nonrootpass is None:
+ g.log.error("Non-root user specified without password.")
return False
- g.log.debug("Establishing passwordless ssh between master and slave")
- ret = georep_ssh_copyid(mnode, snode, user, passwd)
+ # Checking and enabling shared storage on master cluster.
+ ret = is_shared_volume_mounted(mnode)
if not ret:
- g.log.error("Failed to establish ssh connection")
- return False
- g.log.info("Shared storage has been created. "
- "Passwordless ssh between the master and the "
- "slave has been successful!")
- return True
+ ret = enable_shared_storage(mnode)
+ if not ret:
+ g.log.error("Failed to set cluster"
+ ".enable-shared-storage to enable.")
+ return False
+ # Check volume list to confirm gluster_shared_storage is created
+ ret = check_gluster_shared_volume(mnode)
+ if not ret:
+ g.log.error("gluster_shared_storage volume not"
+ " created even after enabling it.")
+ return False
-def georep_create_root_session(mnode, snode, mastervol, slavevol,
- user=None, force=False):
- """ Create a geo-replication session between the master and
- the slave
- Args:
- mnode (str) : The primary master node where the commands are executed
- snode (str) : The primary slave node where the commande are executed
- mastervol (str) : The name of the master volume
- slavevol (str): The name of the slave volume
- user (str): Since it's a root session, user is root or None
- force (bool) : Set to true if session needs to be created with force
- else it remains false as the default option
+ # Running prerequisites for non-root user.
+ if user != "root" and group is not None:
- Returns:
- bool : True if all the steps are successful, false if there are
- any failures in the middle
- """
- g.log.debug("Creating a common pem file on %s", mnode)
- ret, out, err = georep_createpem(mnode)
- if not ret:
- g.log.error("Failed to create a common pem file on all the nodes "
- "belonging to the cluster %s ", mnode)
- g.log.error("Error: out: %s \nerr: %s", out, err)
- return False
+ if len(snode) < 2:
+ g.log.error("A list of all slave nodes is needed for non-root"
+ " setup as every slave node will have a non-root"
+ " user.")
+ return False
- g.log.debug("Create geo-rep session from %s to %s", mnode, snode)
- ret, out, err = georep_create(mnode, mastervol, snode,
- slavevol, user, force)
- if not ret:
- g.log.error("Failed to create geo-rep session")
- g.log.error("Error: out: %s \nerr: %s", out, err)
- return False
+ # Checking and creating a group on all slave nodes.
+ if not is_group_exists(snode, group):
+ ret = group_add(snode, group)
+ if not ret:
+ g.log.error("Creating group: %s on all slave nodes failed.",
+ group)
+ return False
+
+ # Checking and creating a non-root user on all the nodes.
+ if not is_user_exists(snode, user):
+ ret = add_user(snode, user, group)
+ if not ret:
+ g.log.error("Creating user: %s in group: %s on all slave nodes"
+ " failed,", user, group)
+ return False
+
+ # Setting password for user on all the nodes.
+ ret = set_passwd(snode, user, nonrootpass)
+ if not ret:
+ g.log.error("Setting password failed on slaves")
+ return False
+
+ # Setting up mount broker on first slave node.
+ ret, _, _ = georep_mountbroker_setup(snode[0], group,
+ mntbroker_dir)
+ if ret:
+ g.log.error("Setting up of mount broker directory"
+ " failed on node: %s", snode[0])
+ return False
+
+ # Checking if ssh keys are present.
+ ret = get_dir_contents(mnode, "~/.ssh/")
+ if "id_rsa" not in ret or "id_rsa.pub" not in ret:
+ ret = ssh_keygen(mnode)
+ if not ret:
+ g.log.error("Failed to create a common pem pub file.")
+ return False
- g.log.debug("Setting up meta-volume on %s", mnode)
- ret, out, err = georep_config_set(mnode, mastervol, snode, slavevol,
- "use_meta_volume", "True")
- if not ret:
- g.log.error("Failed to set up meta-volume")
- g.log.error("Error: out: %s \nerr: %s", out, err)
- return False
+ # Setting up passwordless ssh to primary slave node.
+ if not is_passwordless_ssh_configured(mnode, snode[0], user):
+ if user != "root":
+ ret = ssh_copy_id(mnode, snode[0], nonrootpass, user)
+ else:
+ ret = ssh_copy_id(mnode, snode[0], passwd, user)
+ if not ret:
+ g.log.error("Failed to setup passwordless ssh.")
+ return False
- g.log.info("Pem file has been created and the keys have "
- "been pushed to all the slave nodes. The meta-volume "
- "has been successfully configured as well ")
+ # Checking if pem files else running gsec_create.
+ ret = get_dir_contents(mnode, "/var/lib/glusterd/geo-replication/")
+ list_of_pem_files = [
+ "common_secret.pem.pub", "secret.pem",
+ "tar_ssh.pem", "gsyncd_template.conf",
+ "secret.pem.pub", "tar_ssh.pem.pub"
+ ]
+ if ret != list_of_pem_files:
+ ret, _, _ = georep_create_pem(mnode)
+ if ret:
+ g.log.error("Failed exeucte gluster system:: execute gsec_create.")
+ return False
return True
-def georep_nonroot_prerequisites(mnode, snodes, group, user, mntbroker_dir,
- slavevol):
- """ Setup pre-requisites for mountbroker setup
+def georep_create_session(mnode, snode, mastervol, slavevol,
+ user="root", force=False, sync="rsync"):
+ """ Create a geo-replication session between the master and
+ the slave.
Args:
- mnode (str) : Master node on which cmd is to be executed
- snodes (list): List of slave nodes
- group (str): Specifies a group name
- user (str): Specifies a user name
- mntbroker_dir: Mountbroker mount directory
- slavevol (str) The name of the slave volume
- Returns:
- bool: True if all pre-requisite are successful else False
+ mnode(str): The primary master node where the commands are executed
+ snode(str|list): slave node where the commande are executed
+ mastervol(str): The name of the master volume
+ slavevol(str): The name of the slave volume
+ Kwargs:
+ user (str): User to be used to create geo-rep session.(Default: root)
+ force (bool) : Set to true if session needs to be created with force
+ else it remains false as the default option.
+ (Default: False)
+ sync (str): Sync method to be used for geo-rep session.(Default:rsync)
+ Returns:
+ bool : True if all the steps are successful, false if there are
+ any failures in the middle
"""
- g.log.debug("Enable shared-storage")
- ret, _, err = create_shared_storage(mnode)
- if ret:
- if "already exists" not in err:
- g.log.error("Failed to enable shared storage on %s", mnode)
- return False
-
- g.log.debug("Create new group: %s on all slave nodes", group)
- if not georep_groupadd(snodes, group):
- g.log.error("Creating group: %s on all slave nodes failed", group)
- return False
+ # Converting snode to list if string.
+ if not isinstance(snode, list):
+ snode = [snode]
- g.log.debug("Create user: %s in group: %s on all slave nodes", user, group)
- if not georep_geoaccount(snodes, group, user):
- g.log.error("Creating user: %s in group: %s on all slave nodes "
- "failed", user, group)
+ # Checking for blank username.
+ if not user.strip():
+ g.log.error("Blank username isn't possible.")
return False
- g.log.debug("Setting up mount broker root directory: %s node: %s",
- mntbroker_dir, snodes[0])
- ret, _, _ = georep_mountbroker_setup(snodes[0], group, mntbroker_dir)
- if ret:
- g.log.error("Setting up of mount broker directory failed: %s node: %s",
- mntbroker_dir, snodes[0])
+ if sync not in ["rsync", "tarssh"]:
+ g.log.error("Invalid sync method used. "
+ "%s is not a valid sync method.", sync)
return False
- g.log.debug("Add volume: %s and user: %s to mountbroker service",
- slavevol, user)
- ret, _, _ = georep_mountbroker_adduser(snodes[0], slavevol, user)
- if ret:
- g.log.error("Add volume: %s and user: %s to mountbroker "
- "service failed", slavevol, user)
- return False
+ # Setting up root geo-rep session.
+ if user == "root":
+ g.log.debug("Creating root geo-rep session.")
+ ret, _, _ = georep_create(mnode, mastervol, snode[0],
+ slavevol, user, force)
+ if ret:
+ g.log.error("Failed to create geo-rep session")
+ return False
- g.log.debug("Checking mountbroker status")
- ret, out, _ = georep_mountbroker_status(snodes[0])
- if not ret:
- if "not ok" in out:
- g.log.error("Mountbroker status not ok")
+ g.log.debug("Enabling meta-volume for master volume.")
+ ret, _, _ = georep_config_set(mnode, mastervol, snode[0],
+ slavevol, "use_meta_volume", "True")
+ if ret:
+ g.log.error("Failed to set up meta-volume for root "
+ "geo-rep session from %s to %s",
+ (mastervol, slavevol))
return False
+
+ # Setting up sync method if not rsync.
+ g.log.debug("Enabling tarssh for master volume.")
+ if sync == "tarssh":
+ ret, _, _ = georep_config_set(mnode, mastervol, snode[0],
+ slavevol, "sync_method", "tarssh")
+ if ret:
+ g.log.error("Failed to set sync method to tarssh for root "
+ "geo-rep session from %s to %s",
+ (mastervol, slavevol))
+ return False
+ return True
+
+ # Setting up non-root geo-rep session.
else:
- g.log.error("Mountbroker status command failed")
- return False
+ # Glusterd has to be restarted on all the slave nodes.
+ if len(snode) < 2:
+ g.log.error("A list of all slave nodes is needed for non-root"
+ " session to restart glusterd on all slaves after"
+ " adding it to mountbroker.")
+ return False
- g.log.debug("Restart glusterd on all slave nodes")
- if not restart_glusterd(snodes):
- g.log.error("Restarting glusterd failed")
- return False
+ # Adding volume to mountbroker.
+ g.log.debug("Creating a non-root geo-rep session.")
+ ret, _, _ = georep_mountbroker_add_user(snode[0], slavevol, user)
+ if ret:
+ g.log.error("Failed to setup mountbroker.")
+ return False
- g.log.debug("Set passwd for user account on slave")
- if not georep_geoaccount_setpasswd(snodes, group, user, "geopasswd"):
- g.log.error("Setting password failed on slaves")
- return False
+ # Restarting glusterd on all nodes.
+ ret = restart_glusterd(snode)
+ if not ret:
+ g.log.error("Restarting glusterd failed.")
+ return False
- g.log.debug("Setup passwordless SSH between %s and %s", mnode, snodes[0])
- if not georep_ssh_keygen(mnode):
- g.log.error("ssh keygen is failed on %s", mnode)
- return False
+ # Checking if peers are in connected state or not.
+ ret = is_peer_connected(snode[0], snode)
+ if not ret:
- if not georep_ssh_copyid(mnode, snodes[0], user, "geopasswd"):
- g.log.error("ssh copy-id is failed from %s to %s", mnode, snodes[0])
- return False
+ counter = 20
+ while counter > 0:
- return True
+ ret = is_peer_connected(snode[0], snode)
+ if ret:
+ break
+ sleep(3)
+ counter += 1
+ # Creating a geo-rep session.
+ ret, _, _ = georep_create(mnode, mastervol, snode[0], slavevol,
+ user, force)
+ if ret:
+ g.log.error("Failed to create geo-rep session.")
+ return False
-def georep_create_nonroot_session(mnode, mastervol, snode, slavevol, user,
- force=False):
- """ Create mountbroker/non-root geo-rep session
+ # Setting up pem keys between master and slave node.
+ g.log.debug("Copy geo-rep pem keys onto all slave nodes.")
+ ret, _, _ = georep_set_pem_keys(snode[0], user, mastervol, slavevol)
+ if ret:
+ g.log.error("Failed to copy geo-rep pem keys onto all slave nodes")
+ return False
- Args:
- mnode (str) : Master node for session creation
- mastervol (str) The name of the master volume
- snode (str): Slave node for session creation
- slavevol (str) The name of the slave volume
- user (str): Specifies a user name
- Returns:
- bool: True if geo-rep session is created successfully
- Else False
+ # Setting use_meta_volume to true.
+ g.log.debug("Setting use_meta_volume to true.")
+ ret, _, _ = georep_config_set(mnode, mastervol, snode[0], slavevol,
+ "use_meta_volume", "true", user)
+ if ret:
+ g.log.error("Failed to set up meta-volume for %s "
+ "geo-rep session from %s to %s.",
+ (user, mastervol, slavevol))
+ return False
- """
+ # Setting up sync method if not rsync.
+ g.log.debug("Setting sync method to tarssh.")
+ if sync == "tarssh":
+ ret, _, _ = georep_config_set(mnode, mastervol, snode[0],
+ slavevol, "sync_method", "tarssh",
+ user)
+ if ret:
+ g.log.error("Failed to set sync method to tarssh for %s "
+ "geo-rep session from %s to %s",
+ (user, mastervol, slavevol))
+ return False
+ return True
+
+
+def setup_master_and_slave_volumes(mnode, all_servers_info,
+ master_volume_config,
+ snode, all_slaves_info,
+ slave_volume_config,
+ force=False):
+ """Create master and slave volumes for geo-replication.
- g.log.debug("Create geo-rep pem keys")
- ret, out, err = georep_createpem(mnode)
- if ret:
- g.log.error("Failed to create pem keys")
- g.log.error("Error: out: %s \nerr: %s", out, err)
- return False
+ Args:
+ mnode(str): The primary master node where the commands are executed.
+ all_servers_info(dict): Information about all master servers.
+ master_volume_config(dict): Dict containing volume information
+ of master.
+ snode(str): slave node where the commande are executed.
+ all_slaves_info(dict): Information about all slave servers.
+ slave_volume_config(dict): Dict containing volume information
+ of slave.
+ kwargs:
+ force(bool): If set to true then will create volumes
+ with force option.
- g.log.debug("Create geo-rep session")
- ret, out, err = georep_create(mnode, mastervol, snode, slavevol,
- user, force)
- if ret:
- g.log.error("Failed to create geo-rep session")
- g.log.error("Error: out: %s \nerr: %s", out, err)
+ Returns:
+ bool : True if volumes created successfully, false if there are
+ any failures in the middle.
+
+ Example:
+ setup_master_and_slave_volumes(
+ cls.mode, cls.all_servers_info, cls.master_volume,
+ cls.snode, cls.all_slaves_info, cls.slave_volume)
+ >>> True
+ """
+ # Setting up the master and the slave volume.
+ ret = setup_volume(mnode, all_servers_info, master_volume_config,
+ force)
+ if not ret:
+ g.log.error("Failed to Setup master volume %s",
+ master_volume_config['name'])
return False
- g.log.debug("Copy geo-rep pem keys onto all slave nodes")
- ret, out, err = georep_set_pemkeys(snode, user, mastervol, slavevol)
- if ret:
- g.log.error("Failed to copy geo-rep pem keys onto all slave nodes")
- g.log.error("Error: out:%s \nerr:%s", out, err)
+ ret = setup_volume(snode, all_slaves_info, slave_volume_config,
+ force)
+ if not ret:
+ g.log.error("Failed to Setup slave volume %s",
+ slave_volume_config['name'])
return False
- g.log.debug("Enable meta-volume")
- ret, out, err = georep_config_set(mnode, mastervol, snode, slavevol,
- "use_meta_volume", "true")
- if ret:
- g.log.error("Failed to set meta-volume")
- g.log.error("Error: out: %s \nerr: %s", out, err)
+ # Setting performance.quick-read to off.
+ ret = set_volume_options(snode, slave_volume_config['name'],
+ {"performance.quick-read": "off"})
+ if not ret:
+ g.log.error("Failed to performance.quick-read to off on "
+ "slave volume %s", slave_volume_config['name'])
return False
-
return True
diff --git a/glustolibs-gluster/glustolibs/gluster/geo_rep_ops.py b/glustolibs-gluster/glustolibs/gluster/geo_rep_ops.py
index 8acec0d0e..7d0f5a73e 100755
--- a/glustolibs-gluster/glustolibs/gluster/geo_rep_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/geo_rep_ops.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2019 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,13 +21,11 @@
from glusto.core import Glusto as g
-def create_shared_storage(mnode):
- """Create shared volume which is necessary for the setup of
- a geo-rep session
-
+def georep_create_pem(mnode):
+ """ Creates a common pem pub file on all the nodes in the master and
+ is used to implement the passwordless SSH connection
Args:
- mnode(str): Node on which command is to be executed
-
+ mnode (str): Node on which cmd is to be executed
Returns:
tuple: Tuple containing three elements (ret, out, err).
The first element 'ret' is of type 'int' and is the return value
@@ -40,15 +38,19 @@ def create_shared_storage(mnode):
of the command execution.
"""
- cmd = "gluster volume set all cluster.enable-shared-storage enable"
+ cmd = "gluster system:: execute gsec_create"
return g.run(mnode, cmd)
-def georep_createpem(mnode):
- """ Creates a common pem pub file on all the nodes in the master and
- is used to implement the passwordless SSH connection
+def georep_set_pem_keys(mnode, useraccount, mastervol, slavevol):
+ """ Sets geo-rep pem keys
+
Args:
- mnode (str): Node on which cmd is to be executed
+ mnode (str): Node on which command is to be executed
+ useraccount (str) : User with which geo-rep is to be set up
+ mastervol (str) : The master volume
+ slavevol (str): The slave volume
+
Returns:
tuple: Tuple containing three elements (ret, out, err).
The first element 'ret' is of type 'int' and is the return value
@@ -61,140 +63,11 @@ def georep_createpem(mnode):
of the command execution.
"""
- cmd = "gluster system:: execute gsec_create"
+ cmd = ("/usr/libexec/glusterfs/set_geo_rep_pem_keys.sh %s %s %s" %
+ (useraccount, mastervol, slavevol))
return g.run(mnode, cmd)
-def georep_ssh_keygen(mnode):
- """ Creates a pair of ssh private and public key if not present
-
- Args:
- mnode (str): Node on which cmd is to be executed
- Returns:
- bool : True if ssh-keygen is successful on all servers.
- False otherwise. It also returns True if ssh key
- is already present
-
- """
- cmd = 'echo -e "n" | ssh-keygen -f ~/.ssh/id_rsa -q -N ""'
- ret, out, _ = g.run(mnode, cmd)
- if ret and "already exists" not in out:
- return False
- return True
-
-
-def georep_ssh_copyid(mnode, tonode, user, passwd):
- """ Copies the default ssh public key onto tonode's
- authorized_keys file
-
- Args:
- mnode (str): Node on which cmd is to be executed
- tonode (str): Node to which ssh key is to be copied
- user (str): user of tonode
- passwd (str): passwd of the user of tonode
- Returns:
- bool : True if ssh-copy-id is successful to tonode.
- False otherwise. It also returns True if ssh key
- is already present
-
- """
- cmd = ('sshpass -p "%s" ssh-copy-id -o StrictHostKeyChecking=no %s@%s' %
- (passwd, user, tonode))
- ret, _, _ = g.run(mnode, cmd)
- if ret:
- return False
- return True
-
-
-def georep_groupadd(servers, groupname):
- """ Creates a group in all the slave nodes where a user will be added
- to set up a non-root session
-
- Args:
- servers (list): list of nodes on which cmd is to be executed
- groupname (str): Specifies a groupname
-
- Returns:
- bool : True if add group is successful on all servers.
- False otherwise.
-
- """
- cmd = "groupadd %s" % groupname
- results = g.run_parallel(servers, cmd)
-
- _rc = True
- for server, ret_value in list(results.items()):
- retcode, _, err = ret_value
- if retcode != 0 and "already exists" not in err:
- g.log.error("Unable to add group %s on server %s",
- groupname, server)
- _rc = False
- if not _rc:
- return False
-
- return True
-
-
-def georep_geoaccount(servers, groupname, groupaccount):
- """ Creates a user account with which the geo-rep session can be securely
- set up
-
- Args:
- servers (list): list of nodes on which cmd is to be executed
- groupname (str): Specifies a groupname
- groupaccount (str): Specifies the user account to set up geo-rep
-
- Returns:
- bool : True if user add is successful on all servers.
- False otherwise.
-
- """
- cmd = "useradd -G %s %s" % (groupname, groupaccount)
- results = g.run_parallel(servers, cmd)
-
- _rc = True
- for server, ret_value in list(results.items()):
- retcode, _, err = ret_value
- if retcode != 0 and "already exists" not in err:
- g.log.error("Unable to add user on %s", server)
- _rc = False
- if not _rc:
- return False
-
- return True
-
-
-def georep_geoaccount_setpasswd(servers, groupname, groupaccount, passwd):
- """ Creates a user account with which the geo-rep session can be securely
- set up
-
- Args:
- servers (list): list of nodes on which cmd is to be executed
- groupname (str): Specifies a groupname
- groupaccount (str): Specifies the user account to set up geo-rep
- passwd (str): Specifies password for they groupaccount
-
- Returns:
- bool : True if password set is successful on all servers.
- False otherwise.
-
- """
- cmd = "echo %s:%s | chpasswd" % (groupaccount, passwd)
- results = g.run_parallel(servers, cmd)
-
- _rc = True
- for server, ret_value in list(results.items()):
- retcode, _, err = ret_value
- if retcode != 0:
- g.log.error("Unable to set passwd for user %s on %s",
- groupaccount, server)
- _rc = False
- if not _rc:
- return False
-
- return True
-
-
def georep_mountbroker_setup(mnode, groupname, directory):
""" Sets up mountbroker root directory and group
@@ -219,13 +92,13 @@ def georep_mountbroker_setup(mnode, groupname, directory):
return g.run(mnode, cmd)
-def georep_mountbroker_adduser(mnode, slavevol, useraccount):
+def georep_mountbroker_add_user(mnode, slavevol, useraccount):
""" Adds the volume and user to the mountbroker
Args:
mnode (str): Node on which command is to be executed
slavevol (str) : The slave volume name
- useraccount (str): The user with which geo-rep is to be set up
+ useraccount (str): The user with which geo-rep is to be setup
Returns:
tuple: Tuple containing three elements (ret, out, err).
@@ -266,14 +139,13 @@ def georep_mountbroker_status(mnode):
return g.run(mnode, cmd)
-def georep_set_pemkeys(mnode, useraccount, mastervol, slavevol):
- """ Sets geo-rep pem keys
+def georep_mountbroker_remove_user(mnode, slavevol, useraccount):
+ """ Remove the volume and user from the mountbroker
Args:
mnode (str): Node on which command is to be executed
- useraccount (str) : User with which geo-rep is to be set up
- mastervol (str) : The master volume
- slavevol (str): The slave volume
+ slavevol (str) : The slave volume name
+ useraccount (str): The user with which geo-rep is to be setup
Returns:
tuple: Tuple containing three elements (ret, out, err).
@@ -287,8 +159,8 @@ def georep_set_pemkeys(mnode, useraccount, mastervol, slavevol):
of the command execution.
"""
- cmd = ("/usr/libexec/glusterfs/set_geo_rep_pem_keys.sh %s %s %s" %
- (useraccount, mastervol, slavevol))
+ cmd = ("gluster-mountbroker remove --volume %s --user %s"
+ % (slavevol, useraccount))
return g.run(mnode, cmd)
@@ -351,23 +223,18 @@ def georep_create(mnode, mastervol, slaveip, slavevol, user=None, force=False):
"""
if user:
- if force:
- cmd = ("gluster volume geo-replication %s %s@%s::%s create "
- "push-pem force" % (mastervol, user, slaveip, slavevol))
- else:
- cmd = ("gluster volume geo-replication %s %s@%s::%s create "
- "push-pem" % (mastervol, user, slaveip, slavevol))
+ cmd = ("gluster volume geo-replication %s %s@%s::%s create "
+ "push-pem " % (mastervol, user, slaveip, slavevol))
else:
- if force:
- cmd = ("gluster volume geo-replication %s %s::%s create "
- "push-pem force" % (mastervol, slaveip, slavevol))
- else:
- cmd = ("gluster volume geo-replication %s %s::%s create push-pem" %
- (mastervol, slaveip, slavevol))
+ cmd = ("gluster volume geo-replication %s %s::%s create push-pem" %
+ (mastervol, slaveip, slavevol))
+ if force:
+ cmd = (cmd + " force")
return g.run(mnode, cmd)
-def georep_config_get(mnode, mastervol, slaveip, slavevol, config_key):
+def georep_config_get(mnode, mastervol, slaveip, slavevol, config_key,
+ user=None):
""" All the available configurable geo-rep options can be got
using the config_key and seeing what it has been set to
@@ -390,12 +257,17 @@ def georep_config_get(mnode, mastervol, slaveip, slavevol, config_key):
of the command execution.
"""
- cmd = ("gluster volume geo-replication %s %s::%s config %s" %
- (mastervol, slaveip, slavevol, config_key))
+ if user:
+ cmd = ("gluster volume geo-replication %s %s@%s::%s config %s" %
+ (mastervol, user, slaveip, slavevol, config_key))
+ else:
+ cmd = ("gluster volume geo-replication %s %s::%s config %s" %
+ (mastervol, slaveip, slavevol, config_key))
return g.run(mnode, cmd)
-def georep_config_set(mnode, mastervol, slaveip, slavevol, config, value):
+def georep_config_set(mnode, mastervol, slaveip, slavevol, config, value,
+ user=None):
""" All the available configurable geo-rep options can be set with a
specific command if required or
just with the config parameter
@@ -419,8 +291,12 @@ def georep_config_set(mnode, mastervol, slaveip, slavevol, config, value):
of the command execution.
"""
- cmd = ("gluster volume geo-replication %s %s::%s config %s %s" %
- (mastervol, slaveip, slavevol, config, value))
+ if user:
+ cmd = ("gluster volume geo-replication %s %s@%s::%s config %s %s" %
+ (mastervol, user, slaveip, slavevol, config, value))
+ else:
+ cmd = ("gluster volume geo-replication %s %s::%s config %s %s" %
+ (mastervol, slaveip, slavevol, config, value))
return g.run(mnode, cmd)
@@ -454,19 +330,13 @@ def georep_start(mnode, mastervol, slaveip, slavevol, user=None, force=False):
"""
if user:
- if force:
- cmd = ("gluster volume geo-replication %s %s@%s::%s start force" %
- (mastervol, user, slaveip, slavevol))
- else:
- cmd = ("gluster volume geo-replication %s %s@%s::%s start" %
- (mastervol, user, slaveip, slavevol))
+ cmd = ("gluster volume geo-replication %s %s@%s::%s start " %
+ (mastervol, user, slaveip, slavevol))
else:
- if force:
- cmd = ("gluster volume geo-replication %s %s::%s start force" %
- (mastervol, slaveip, slavevol))
- else:
- cmd = ("gluster volume geo-replication %s %s::%s start" %
- (mastervol, slaveip, slavevol))
+ cmd = ("gluster volume geo-replication %s %s::%s start " %
+ (mastervol, slaveip, slavevol))
+ if force:
+ cmd = (cmd + "force")
return g.run(mnode, cmd)
@@ -500,19 +370,13 @@ def georep_stop(mnode, mastervol, slaveip, slavevol, user=None, force=False):
"""
if user:
- if force:
- cmd = ("gluster volume geo-replication %s %s@%s::%s stop force" %
- (mastervol, user, slaveip, slavevol))
- else:
- cmd = ("gluster volume geo-replication %s %s@%s::%s stop" %
- (mastervol, user, slaveip, slavevol))
+ cmd = ("gluster volume geo-replication %s %s@%s::%s stop " %
+ (mastervol, user, slaveip, slavevol))
else:
- if force:
- cmd = ("gluster volume geo-replication %s %s::%s stop force" %
- (mastervol, slaveip, slavevol))
- else:
- cmd = ("gluster volume geo-replication %s %s::%s stop" %
- (mastervol, slaveip, slavevol))
+ cmd = ("gluster volume geo-replication %s %s::%s stop " %
+ (mastervol, slaveip, slavevol))
+ if force:
+ cmd = (cmd + "force")
return g.run(mnode, cmd)
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
index 97a9fe365..65061cb13 100644..100755
--- a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2021 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,35 +19,54 @@
variables necessary for tests.
"""
-import unittest
-import os
-import random
-import copy
-import datetime
-import time
-import socket
+from copy import deepcopy
+from datetime import datetime
+from inspect import isclass
+from os.path import join as path_join
+from random import choice as random_choice
+from socket import (
+ gethostbyname,
+ gaierror,
+)
+from unittest import TestCase
+from time import sleep
+
from glusto.core import Glusto as g
-from glustolibs.gluster.exceptions import ConfigError, ExecutionError
-from glustolibs.gluster.peer_ops import is_peer_connected, peer_status
-from glustolibs.gluster.volume_ops import set_volume_options
-from glustolibs.gluster.block_ops import block_delete
-from glustolibs.gluster.block_libs import (setup_block, if_block_exists,
- get_block_list,
- get_block_info)
-from glustolibs.gluster.volume_libs import (setup_volume,
- cleanup_volume,
- log_volume_info_and_status)
-from glustolibs.gluster.volume_libs import (
- wait_for_volume_process_to_be_online)
-from glustolibs.gluster.samba_libs import share_volume_over_smb
-from glustolibs.gluster.nfs_libs import export_volume_through_nfs
+
+from glustolibs.gluster.exceptions import (
+ ConfigError,
+ ExecutionError,
+)
+from glustolibs.gluster.lib_utils import inject_msg_in_logs
from glustolibs.gluster.mount_ops import create_mount_objs
+from glustolibs.gluster.nfs_libs import export_volume_through_nfs
+from glustolibs.gluster.peer_ops import (
+ is_peer_connected,
+ peer_probe_servers, peer_status
+)
+from glustolibs.gluster.gluster_init import (
+ restart_glusterd, stop_glusterd, wait_for_glusterd_to_start)
+from glustolibs.gluster.samba_libs import share_volume_over_smb
+from glustolibs.gluster.shared_storage_ops import is_shared_volume_mounted
+from glustolibs.gluster.volume_libs import (
+ cleanup_volume,
+ log_volume_info_and_status,
+ setup_volume,
+ wait_for_volume_process_to_be_online,
+)
+from glustolibs.gluster.brick_libs import (
+ wait_for_bricks_to_be_online, get_offline_bricks_list)
+from glustolibs.gluster.volume_ops import (
+ set_volume_options, volume_reset, volume_start)
from glustolibs.io.utils import log_mounts_info
-from glustolibs.gluster.lib_utils import inject_msg_in_logs
+from glustolibs.gluster.geo_rep_libs import setup_master_and_slave_volumes
+from glustolibs.gluster.nfs_ganesha_ops import (
+ teardown_nfs_ganesha_cluster)
+from glustolibs.misc.misc_libs import kill_process
class runs_on(g.CarteTestClass):
- """Decorator providing runs_on capability for standard unittest script"""
+ """Decorator providing runs_on capability for standard unittest script."""
def __init__(self, value):
# the names of the class attributes set by the runs_on decorator
@@ -56,21 +75,19 @@ class runs_on(g.CarteTestClass):
# the options to replace 'ALL' in selections
self.available_options = [['distributed', 'replicated',
'distributed-replicated',
- 'dispersed', 'distributed-dispersed'],
+ 'dispersed', 'distributed-dispersed',
+ 'arbiter', 'distributed-arbiter'],
['glusterfs', 'nfs', 'cifs', 'smb']]
# these are the volume and mount options to run and set in config
# what do runs_on_volumes and runs_on_mounts need to be named????
- run_on_volumes = self.available_options[0]
- run_on_mounts = self.available_options[1]
- if 'gluster' in g.config and g.config['gluster']:
- if ('running_on_volumes' in g.config['gluster'] and
- g.config['gluster']['running_on_volumes']):
- run_on_volumes = g.config['gluster']['running_on_volumes']
-
- if ('running_on_mounts' in g.config['gluster'] and
- g.config['gluster']['running_on_mounts']):
- run_on_mounts = g.config['gluster']['running_on_mounts']
+ run_on_volumes, run_on_mounts = self.available_options[0:2]
+ if g.config.get('gluster', {}).get('running_on_volumes'):
+ run_on_volumes = g.config['gluster']['running_on_volumes']
+
+ if g.config.get('gluster', {}).get('running_on_mounts'):
+ run_on_mounts = g.config['gluster']['running_on_mounts']
+
# selections is the above info from the run that is intersected with
# the limits from the test script
self.selections = [run_on_volumes, run_on_mounts]
@@ -79,7 +96,7 @@ class runs_on(g.CarteTestClass):
self.limits = value
-class GlusterBaseClass(unittest.TestCase):
+class GlusterBaseClass(TestCase):
"""GlusterBaseClass to be subclassed by Gluster Tests.
This class reads the config for variable values that will be used in
gluster tests. If variable values are not specified in the config file,
@@ -89,6 +106,36 @@ class GlusterBaseClass(unittest.TestCase):
# defaults in setUpClass()
volume_type = None
mount_type = None
+ error_or_failure_exists = False
+
+ @staticmethod
+ def get_super_method(obj, method_name):
+ """PY2/3 compatible method for getting proper parent's (super) methods.
+
+ Useful for test classes wrapped by 'runs_on' decorator which has
+ duplicated original test class [py3] as parent instead of the
+ base class as it is expected.
+
+ Example for calling 'setUp()' method of the base class from the
+ 'setUp' method of a test class which was decorated with 'runs_on':
+
+ @runs_on([['distributed'], ['glusterfs']])
+ class TestDecoratedClass(GlusterBaseClass):
+ ...
+ @classmethod
+ def setUpClass(cls):
+ cls.get_super_method(cls, 'setUpClass')()
+ ...
+ def setUp(self):
+ self.get_super_method(self, 'setUp')()
+ ...
+
+ """
+ current_type = obj if isclass(obj) else obj.__class__
+ while getattr(super(current_type, obj), method_name) == getattr(
+ obj, method_name):
+ current_type = current_type.__base__
+ return getattr(super(current_type, obj), method_name)
@classmethod
def inject_msg_in_gluster_logs(cls, msg):
@@ -129,12 +176,12 @@ class GlusterBaseClass(unittest.TestCase):
list: List of IP's corresponding to the hostnames of nodes.
"""
nodes_ips = []
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
for node in nodes:
try:
- ip = socket.gethostbyname(node)
- except socket.gaierror as e:
+ ip = gethostbyname(node)
+ except gaierror as e:
g.log.error("Failed to get the IP of Host: %s : %s", node,
e.strerror)
ip = None
@@ -149,6 +196,11 @@ class GlusterBaseClass(unittest.TestCase):
Returns (bool): True if all peers are in connected with other peers.
False otherwise.
"""
+
+ # If the setup has single node server, by pass this validation.
+ if len(cls.servers) == 1:
+ return True
+
# Validate if peer is connected from all the servers
g.log.info("Validating if servers %s are connected from other servers "
"in the cluster", cls.servers)
@@ -171,11 +223,121 @@ class GlusterBaseClass(unittest.TestCase):
return True
+ def _is_error_or_failure_exists(self):
+ """Function to get execution error in case of
+ failures in testcases
+ """
+ if hasattr(self, '_outcome'):
+ # Python 3.4+
+ result = self.defaultTestResult()
+ self._feedErrorsToResult(result, self._outcome.errors)
+ else:
+ # Python 2.7-3.3
+ result = getattr(
+ self, '_outcomeForDoCleanups', self._resultForDoCleanups)
+ ok_result = True
+ for attr in ('errors', 'failures'):
+ if not hasattr(result, attr):
+ continue
+ exc_list = getattr(result, attr)
+ if exc_list and exc_list[-1][0] is self:
+ ok_result = ok_result and not exc_list[-1][1]
+ if hasattr(result, '_excinfo'):
+ ok_result = ok_result and not result._excinfo
+ if ok_result:
+ return False
+ self.error_or_failure_exists = True
+ GlusterBaseClass.error_or_failure_exists = True
+ return True
+
+ @classmethod
+ def scratch_cleanup(cls, error_or_failure_exists):
+ """
+ This scratch_cleanup script will run only when the code
+ currently running goes into execution or assertion error.
+
+ Args:
+ error_or_failure_exists (bool): If set True will cleanup setup
+ atlast of testcase only if exectution or assertion error in
+ teststeps. False will skip this scratch cleanup step.
+
+ Returns (bool): True if setup cleanup is successful.
+ False otherwise.
+ """
+ if error_or_failure_exists:
+ shared_storage_mounted = False
+ if is_shared_volume_mounted(cls.mnode):
+ shared_storage_mounted = True
+ ret = stop_glusterd(cls.servers)
+ if not ret:
+ g.log.error("Failed to stop glusterd")
+ cmd_list = ("pkill `pidof glusterd`",
+ "rm /var/run/glusterd.socket")
+ for server in cls.servers:
+ for cmd in cmd_list:
+ ret, _, _ = g.run(server, cmd, "root")
+ if ret:
+ g.log.error("Failed to stop glusterd")
+ return False
+ for server in cls.servers:
+ ret, out, _ = g.run(server, "pgrep glusterfsd", "root")
+ if not ret:
+ ret = kill_process(server,
+ process_ids=out.strip().split('\n'))
+ if not ret:
+ g.log.error("Unable to kill process {}".format(
+ out.strip().split('\n')))
+ return False
+ if not shared_storage_mounted:
+ cmd_list = (
+ "rm -rf /var/lib/glusterd/vols/*",
+ "rm -rf /var/lib/glusterd/snaps/*",
+ "rm -rf /var/lib/glusterd/peers/*",
+ "rm -rf {}/*/*".format(
+ cls.all_servers_info[server]['brick_root']))
+ else:
+ cmd_list = (
+ "for vol in `ls /var/lib/glusterd/vols/ | "
+ "grep -v gluster_shared_storage`;do "
+ "rm -rf /var/lib/glusterd/vols/$vol;done",
+ "rm -rf /var/lib/glusterd/snaps/*"
+ "rm -rf {}/*/*".format(
+ cls.all_servers_info[server]['brick_root']))
+ for cmd in cmd_list:
+ ret, _, _ = g.run(server, cmd, "root")
+ if ret:
+ g.log.error(
+ "failed to cleanup server {}".format(server))
+ return False
+ ret = restart_glusterd(cls.servers)
+ if not ret:
+ g.log.error("Failed to start glusterd")
+ return False
+ sleep(2)
+ ret = wait_for_glusterd_to_start(cls.servers)
+ if not ret:
+ g.log.error("Failed to bring glusterd up")
+ return False
+ if not shared_storage_mounted:
+ ret = peer_probe_servers(cls.mnode, cls.servers)
+ if not ret:
+ g.log.error("Failed to peer probe servers")
+ return False
+ for client in cls.clients:
+ cmd_list = ("umount /mnt/*", "rm -rf /mnt/*")
+ for cmd in cmd_list:
+ ret = g.run(client, cmd, "root")
+ if ret:
+ g.log.error(
+ "failed to unmount/already unmounted {}"
+ .format(client))
+ return True
+
@classmethod
- def setup_volume(cls, volume_create_force=False):
+ def setup_volume(cls, volume_create_force=False, only_volume_create=False):
"""Setup the volume:
- Create the volume, Start volume, Set volume
- options, enable snapshot/quota/tier if specified in the config
+ options, enable snapshot/quota if specified in the config
file.
- Wait for volume processes to be online
- Export volume as NFS/SMB share if mount_type is NFS or SMB
@@ -184,6 +346,9 @@ class GlusterBaseClass(unittest.TestCase):
Args:
volume_create_force(bool): True if create_volume should be
executed with 'force' option.
+ only_volume_create(bool): True, only volume creation is needed
+ False, by default volume creation and
+ start.
Returns (bool): True if all the steps mentioned in the descriptions
passes. False otherwise.
@@ -206,12 +371,19 @@ class GlusterBaseClass(unittest.TestCase):
g.log.info("Setting up volume %s", cls.volname)
ret = setup_volume(mnode=cls.mnode,
all_servers_info=cls.all_servers_info,
- volume_config=cls.volume, force=force_volume_create)
+ volume_config=cls.volume, force=force_volume_create,
+ create_only=only_volume_create)
if not ret:
g.log.error("Failed to Setup volume %s", cls.volname)
return False
g.log.info("Successful in setting up volume %s", cls.volname)
+ # Returning the value without proceeding for next steps
+ if only_volume_create and ret:
+ g.log.info("Setup volume with volume creation {} "
+ "successful".format(cls.volname))
+ return True
+
# Wait for volume processes to be online
g.log.info("Wait for volume %s processes to be online", cls.volname)
ret = wait_for_volume_process_to_be_online(cls.mnode, cls.volname)
@@ -302,6 +474,9 @@ class GlusterBaseClass(unittest.TestCase):
"""
g.log.info("Starting to mount volume %s", cls.volname)
for mount_obj in mounts:
+ # For nfs-ganesha, mount is done via vip
+ if cls.enable_nfs_ganesha:
+ mount_obj.server_system = cls.vips[0]
g.log.info("Mounting volume '%s:%s' on '%s:%s'",
mount_obj.server_system, mount_obj.volname,
mount_obj.client_system, mount_obj.mountpoint)
@@ -350,6 +525,53 @@ class GlusterBaseClass(unittest.TestCase):
return True
@classmethod
+ def bricks_online_and_volume_reset(cls):
+ """
+ reset the volume if any bricks are offline.
+ waits for all bricks to be online and resets
+ volume options set
+ """
+ bricks_offline = get_offline_bricks_list(cls.mnode, cls.volname)
+ if bricks_offline is not None:
+ ret = volume_start(cls.mnode, cls.volname, force=True)
+ if not ret:
+ raise ExecutionError("Failed to force start volume"
+ "%s" % cls.volname)
+ ret = wait_for_bricks_to_be_online(cls.mnode, cls.volname)
+ if not ret:
+ raise ExecutionError("Failed to bring bricks online"
+ "for volume %s" % cls.volname)
+
+ ret, _, _ = volume_reset(cls.mnode, cls.volname, force=True)
+ if ret:
+ raise ExecutionError("Failed to reset volume %s" % cls.volname)
+ g.log.info("Successful in volume reset %s", cls.volname)
+
+ @classmethod
+ def setup_and_mount_geo_rep_master_and_slave_volumes(cls, force=False):
+ """Setup geo-rep master and slave volumes.
+
+ Returns (bool): True if cleanup volume is successful. False otherwise.
+ """
+ # Creating and starting master and slave volume.
+ ret = setup_master_and_slave_volumes(
+ cls.mode, cls.all_servers_info, cls.master_volume,
+ cls.snode, cls.all_slaves_info, cls.slave_volume,
+ force)
+ if not ret:
+ g.log.error('Failed to create master and slave volumes.')
+ return False
+
+ # Mounting master and slave volumes
+ for mount in [cls.master_mounts, cls.slave_mounts]:
+ ret = cls.mount_volume(cls, mount)
+ if not ret:
+ g.log.error('Failed to mount volume %s.',
+ mount['volname'])
+ return False
+ return True
+
+ @classmethod
def unmount_volume(cls, mounts):
"""Unmount all mounts for the volume
@@ -376,13 +598,24 @@ class GlusterBaseClass(unittest.TestCase):
log_mounts_info(cls.mounts)
return False
- else:
- g.log.info("Successful in unmounting volume '%s:%s' on "
- "'%s:%s'", mount_obj.server_system,
- mount_obj.volname, mount_obj.client_system,
- mount_obj.mountpoint)
- g.log.info("Successful in unmounting all mount objs for the volume %s",
- cls.volname)
+
+ g.log.info("Starting to delete the directory path used for "
+ "mounting")
+ cmd = ('rm -rf %s' % mount_obj.mountpoint)
+ ret, _, err = g.run(
+ mount_obj.client_system, cmd, user=mount_obj.user)
+ if ret:
+ g.log.error(
+ "failed to delete the directory path used for "
+ "mounting %s: %s" % (mount_obj.mountpoint, err))
+ return False
+
+ g.log.info(
+ "Successful in deleting the directory path used for "
+ "mounting '%s:%s' on '%s:%s'" % (
+ mount_obj.server_system,
+ mount_obj.volname, mount_obj.client_system,
+ mount_obj.mountpoint))
# Get mounts info
g.log.info("Get mounts Info:")
@@ -391,11 +624,30 @@ class GlusterBaseClass(unittest.TestCase):
return True
@classmethod
+ def get_unique_lv_list_from_all_servers(cls):
+ """Get all unique lv path from all servers
+
+ Returns: List of all unique lv path in all servers. None otherwise.
+ """
+ cmd = "lvs --noheadings -o lv_path | awk '{if ($1) print $1}'"
+ lv_list = []
+ for server in cls.servers:
+ ret, out, _ = g.run(server, cmd, "root")
+ current_lv_list = out.splitlines()
+ if current_lv_list:
+ lv_list.extend(current_lv_list)
+ if ret:
+ g.log.error("failed to execute command %s" % cmd)
+ raise ExecutionError("Failed to execute %s cmd" % cmd)
+ return list(set(lv_list))
+
+ @classmethod
def cleanup_volume(cls):
"""Cleanup the volume
Returns (bool): True if cleanup volume is successful. False otherwise.
"""
+ cls.bricks_online_and_volume_reset()
g.log.info("Cleanup Volume %s", cls.volname)
ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
if not ret:
@@ -407,7 +659,28 @@ class GlusterBaseClass(unittest.TestCase):
g.log.info("Log Volume %s Info and Status", cls.volname)
log_volume_info_and_status(cls.mnode, cls.volname)
- return ret
+ # compare and remove additional lv created, skip otherwise
+ new_lv_list = cls.get_unique_lv_list_from_all_servers()
+ if cls.lv_list != new_lv_list:
+ cmd = ("for mnt in `mount | grep 'run/gluster/snaps' |"
+ "awk '{print $3}'`; do umount $mnt; done")
+ for server in cls.servers:
+ ret, _, err = g.run(server, cmd, "root")
+ if ret:
+ g.log.error("Failed to remove snap "
+ "bricks from mountpoint %s" % err)
+ return False
+ new_lv_list = cls.get_unique_lv_list_from_all_servers()
+ lv_remove_list = list(set(new_lv_list) - set(cls.lv_list))
+ for server in cls.servers:
+ for lv in lv_remove_list:
+ cmd = ("lvremove %s --force" % lv)
+ ret, _, err = g.run(server, cmd, "root")
+ if ret:
+ g.log.error("failed to remove lv: %s" % err)
+ g.log.info("Expected error msg '%s'" % err)
+ g.log.info("Successfully cleaned-up volumes")
+ return True
@classmethod
def unmount_volume_and_cleanup_volume(cls, mounts):
@@ -433,8 +706,7 @@ class GlusterBaseClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
- """Initialize all the variables necessary for testing Gluster
- """
+ """Initialize all the variables necessary for testing Gluster."""
# Get all servers
cls.all_servers = None
if 'servers' in g.config and g.config['servers']:
@@ -445,17 +717,16 @@ class GlusterBaseClass(unittest.TestCase):
# Get all slaves
cls.slaves = None
- if 'slaves' in g.config and g.config['slaves']:
+ if g.config.get('slaves'):
cls.slaves = g.config['slaves']
# Set mnode_slave : Node on which slave commands are executed
cls.mnode_slave = cls.slaves[0]
# Slave IP's
- cls.slaves_ip = []
cls.slaves_ip = cls.get_ip_from_hostname(cls.slaves)
# Get all clients
cls.all_clients = None
- if 'clients' in g.config and g.config['clients']:
+ if g.config.get('clients'):
cls.all_clients = g.config['clients']
cls.clients = cls.all_clients
else:
@@ -463,29 +734,31 @@ class GlusterBaseClass(unittest.TestCase):
# Get all servers info
cls.all_servers_info = None
- if 'servers_info' in g.config and g.config['servers_info']:
+ if g.config.get('servers_info'):
cls.all_servers_info = g.config['servers_info']
else:
raise ConfigError("'servers_info' not defined in the global "
"config")
# Get all slaves info
cls.all_slaves_info = None
- if 'slaves_info' in g.config and g.config['slaves_info']:
+ if g.config.get('slaves_info'):
cls.all_slaves_info = g.config['slaves_info']
# All clients_info
cls.all_clients_info = None
- if 'clients_info' in g.config and g.config['clients_info']:
+ if g.config.get('clients_info'):
cls.all_clients_info = g.config['clients_info']
else:
raise ConfigError("'clients_info' not defined in the global "
"config")
+ # get lv list
+ cls.lv_list = cls.get_unique_lv_list_from_all_servers()
+
# Set mnode : Node on which gluster commands are executed
cls.mnode = cls.all_servers[0]
# Server IP's
- cls.servers_ips = []
cls.servers_ips = cls.get_ip_from_hostname(cls.servers)
# SMB Cluster info
@@ -500,58 +773,82 @@ class GlusterBaseClass(unittest.TestCase):
# NFS-Ganesha Cluster info
try:
- cls.enable_nfs_ganesha = bool(g.config['gluster']['cluster_config']
- ['nfs_ganesha']['enable'])
- cls.num_of_nfs_ganesha_nodes = (g.config['gluster']
- ['cluster_config']['nfs_ganesha']
- ['num_of_nfs_ganesha_nodes'])
- cls.vips = (g.config['gluster']['cluster_config']['nfs_ganesha']
- ['vips'])
+ cls.enable_nfs_ganesha = (
+ g.config['gluster']['cluster_config']['nfs_ganesha']['enable']
+ in ('TRUE', 'True', 'true', 'YES', 'Yes', 'yes', '1', 1)
+ )
+ cls.num_of_nfs_ganesha_nodes = g.config['gluster'][
+ 'cluster_config']['nfs_ganesha']['num_of_nfs_ganesha_nodes']
+ cls.vips = (
+ g.config['gluster']['cluster_config']['nfs_ganesha']['vips'])
except KeyError:
cls.enable_nfs_ganesha = False
cls.num_of_nfs_ganesha_nodes = None
cls.vips = []
+ # Geo-rep Cluster information
+ try:
+ cls.geo_rep_info = (g.config['gluster']['geo_rep']
+ ['cluster_config'])
+ except KeyError:
+ cls.geo_rep_info = {}
+ cls.geo_rep_info['root'] = {}
+ cls.geo_rep_info['user'] = {}
+ cls.geo_rep_info['root']['password'] = ''
+ cls.geo_rep_info['user']['name'] = ''
+ cls.geo_rep_info['user']['password'] = ''
+ cls.geo_rep_info['user']['group'] = ''
+
# Defining default volume_types configuration.
cls.default_volume_type_config = {
'replicated': {
'type': 'replicated',
'replica_count': 3,
- 'transport': 'tcp'
- },
+ 'transport': 'tcp',
+ },
'dispersed': {
'type': 'dispersed',
'disperse_count': 6,
'redundancy_count': 2,
- 'transport': 'tcp'
- },
+ 'transport': 'tcp',
+ },
'distributed': {
'type': 'distributed',
'dist_count': 4,
- 'transport': 'tcp'
- },
+ 'transport': 'tcp',
+ },
'distributed-replicated': {
'type': 'distributed-replicated',
'dist_count': 2,
'replica_count': 3,
- 'transport': 'tcp'
- },
+ 'transport': 'tcp',
+ },
'distributed-dispersed': {
'type': 'distributed-dispersed',
'dist_count': 2,
'disperse_count': 6,
'redundancy_count': 2,
- 'transport': 'tcp'
- }
+ 'transport': 'tcp',
+ },
+ 'arbiter': {
+ 'type': 'arbiter',
+ 'replica_count': 3,
+ 'arbiter_count': 1,
+ 'transport': 'tcp',
+ },
+ 'distributed-arbiter': {
+ 'type': 'distributed-arbiter',
+ 'dist_count': 2,
+ 'replica_count': 3,
+ 'arbiter_count': 1,
+ 'tranport': 'tcp',
}
+ }
- # Check if default volume_type configuration is provided in
- # config yml
- if (g.config.get('gluster') and
- g.config['gluster'].get('volume_types')):
+ # Check if default volume_type configuration is provided in config yml
+ if g.config.get('gluster', {}).get('volume_types'):
default_volume_type_from_config = (
g.config['gluster']['volume_types'])
-
for volume_type in default_volume_type_from_config.keys():
if default_volume_type_from_config[volume_type]:
if volume_type in cls.default_volume_type_config:
@@ -560,53 +857,40 @@ class GlusterBaseClass(unittest.TestCase):
# Create Volume with force option
cls.volume_create_force = False
- if (g.config.get('gluster') and
- g.config['gluster'].get('volume_create_force')):
+ if g.config.get('gluster', {}).get('volume_create_force'):
cls.volume_create_force = (
g.config['gluster']['volume_create_force'])
# Default volume options which is applicable for all the volumes
cls.volume_options = {}
- if (g.config.get('gluster') and
- g.config['gluster'].get('volume_options')):
+ if g.config.get('gluster', {}).get('volume_options'):
cls.volume_options = g.config['gluster']['volume_options']
# If the volume is exported as SMB Share, then set the following
# volume options on the share.
cls.smb_share_options = {}
- if (g.config.get('gluster') and
- g.config['gluster'].get('smb_share_options')):
- cls.smb_share_options = (
- g.config['gluster']['smb_share_options'])
+ if g.config.get('gluster', {}).get('smb_share_options'):
+ cls.smb_share_options = g.config['gluster']['smb_share_options']
# If the volume is exported as NFS-Ganesha export,
# then set the following volume options on the export.
cls.nfs_ganesha_export_options = {}
- if (g.config.get('gluster') and
- g.config['gluster'].get('nfs_ganesha_export_options')):
+ if g.config.get('gluster', {}).get('nfs_ganesha_export_options'):
cls.nfs_ganesha_export_options = (
g.config['gluster']['nfs_ganesha_export_options'])
# Get the volume configuration.
cls.volume = {}
if cls.volume_type:
- found_volume = False
- if 'gluster' in g.config:
- if 'volumes' in g.config['gluster']:
- for volume in g.config['gluster']['volumes']:
- if volume['voltype']['type'] == cls.volume_type:
- cls.volume = copy.deepcopy(volume)
- found_volume = True
- break
-
- if found_volume:
- if 'name' not in cls.volume:
- cls.volume['name'] = 'testvol_%s' % cls.volume_type
-
- if 'servers' not in cls.volume:
- cls.volume['servers'] = cls.all_servers
-
- if not found_volume:
+ for volume in g.config.get('gluster', {}).get('volumes', []):
+ if volume['voltype']['type'] == cls.volume_type:
+ cls.volume = deepcopy(volume)
+ if 'name' not in cls.volume:
+ cls.volume['name'] = 'testvol_%s' % cls.volume_type
+ if 'servers' not in cls.volume:
+ cls.volume['servers'] = cls.all_servers
+ break
+ else:
try:
if g.config['gluster']['volume_types'][cls.volume_type]:
cls.volume['voltype'] = (g.config['gluster']
@@ -614,8 +898,8 @@ class GlusterBaseClass(unittest.TestCase):
[cls.volume_type])
except KeyError:
try:
- cls.volume['voltype'] = (cls.default_volume_type_config
- [cls.volume_type])
+ cls.volume['voltype'] = (
+ cls.default_volume_type_config[cls.volume_type])
except KeyError:
raise ConfigError("Unable to get configs of volume "
"type: %s", cls.volume_type)
@@ -633,75 +917,89 @@ class GlusterBaseClass(unittest.TestCase):
cls.mnode = cls.servers[0]
cls.vol_options = cls.volume['options']
+ # Define useful variable for geo-rep volumes.
+ if cls.slaves:
+ # For master volume
+ cls.master_volume = cls.volume
+ cls.master_volume['name'] = ('master_testvol_%s'
+ % cls.volume_type)
+ cls.master_volname = cls.master_volume['name']
+ cls.master_voltype = (cls.master_volume['voltype']
+ ['type'])
+
+ # For slave volume
+ cls.slave_volume = deepcopy(cls.volume)
+ cls.slave_volume['name'] = ('slave_testvol_%s'
+ % cls.volume_type)
+ cls.slave_volume['servers'] = cls.slaves
+ cls.slave_volname = cls.slave_volume['name']
+ cls.slave_voltype = (cls.slave_volume['voltype']
+ ['type'])
+
# Get the mount configuration.
cls.mounts = []
if cls.mount_type:
cls.mounts_dict_list = []
- found_mount = False
- if 'gluster' in g.config:
- if 'mounts' in g.config['gluster']:
- for mount in g.config['gluster']['mounts']:
- if mount['protocol'] == cls.mount_type:
- temp_mount = {}
- temp_mount['protocol'] = cls.mount_type
- if 'volname' in mount and mount['volname']:
- if mount['volname'] == cls.volname:
- temp_mount = copy.deepcopy(mount)
- else:
- continue
- else:
- temp_mount['volname'] = cls.volname
- if ('server' not in mount or
- (not mount['server'])):
- temp_mount['server'] = cls.mnode
- else:
- temp_mount['server'] = mount['server']
- if ('mountpoint' not in mount or
- (not mount['mountpoint'])):
- temp_mount['mountpoint'] = (os.path.join(
- "/mnt", '_'.join([cls.volname,
- cls.mount_type])))
- else:
- temp_mount['mountpoint'] = mount['mountpoint']
- if ('client' not in mount or
- (not mount['client'])):
- temp_mount['client'] = (
- cls.all_clients_info[
- random.choice(
- cls.all_clients_info.keys())]
- )
- else:
- temp_mount['client'] = mount['client']
- if 'options' in mount and mount['options']:
- temp_mount['options'] = mount['options']
- else:
- temp_mount['options'] = ''
- cls.mounts_dict_list.append(temp_mount)
- found_mount = True
-
- if not found_mount:
+ for mount in g.config.get('gluster', {}).get('mounts', []):
+ if mount['protocol'] != cls.mount_type:
+ continue
+ temp_mount = {
+ 'protocol': cls.mount_type,
+ 'volname': cls.volname,
+ }
+ if mount.get('volname'):
+ if mount['volname'] == cls.volname:
+ temp_mount = deepcopy(mount)
+ else:
+ continue
+ temp_mount.update({
+ 'server': mount.get('server', cls.mnode),
+ 'mountpoint': mount.get('mountpoint', path_join(
+ "/mnt", '_'.join([cls.volname, cls.mount_type]))),
+ 'client': mount.get('client', cls.all_clients_info[
+ random_choice(list(cls.all_clients_info.keys()))]),
+ 'options': mount.get('options', ''),
+ })
+ cls.mounts_dict_list.append(temp_mount)
+
+ if not cls.mounts_dict_list:
for client in cls.all_clients_info.keys():
- mount = {
+ cls.mounts_dict_list.append({
'protocol': cls.mount_type,
'server': cls.mnode,
'volname': cls.volname,
'client': cls.all_clients_info[client],
- 'mountpoint': (os.path.join(
- "/mnt", '_'.join([cls.volname, cls.mount_type]))),
- 'options': ''
- }
- cls.mounts_dict_list.append(mount)
+ 'mountpoint': path_join(
+ "/mnt", '_'.join([cls.volname, cls.mount_type])),
+ 'options': '',
+ })
if cls.mount_type == 'cifs' or cls.mount_type == 'smb':
for mount in cls.mounts_dict_list:
if 'smbuser' not in mount:
- mount['smbuser'] = random.choice(
- cls.smb_users_info.keys())
+ mount['smbuser'] = random_choice(
+ list(cls.smb_users_info.keys()))
mount['smbpasswd'] = (
cls.smb_users_info[mount['smbuser']]['password'])
cls.mounts = create_mount_objs(cls.mounts_dict_list)
+ # Setting mounts for geo-rep volumes.
+ if cls.slaves:
+
+ # For master volume mount
+ cls.master_mounts = cls.mounts
+
+ # For slave volume mount
+ slave_mount_dict_list = deepcopy(cls.mounts_dict_list)
+ for mount_dict in slave_mount_dict_list:
+ mount_dict['volname'] = cls.slave_volume
+ mount_dict['server'] = cls.mnode_slave
+ mount_dict['mountpoint'] = path_join(
+ "/mnt", '_'.join([cls.slave_volname,
+ cls.mount_type]))
+ cls.slave_mounts = create_mount_objs(slave_mount_dict_list)
+
# Defining clients from mounts.
cls.clients = []
for mount in cls.mounts_dict_list:
@@ -709,37 +1007,26 @@ class GlusterBaseClass(unittest.TestCase):
cls.clients = list(set(cls.clients))
# Gluster Logs info
- cls.server_gluster_logs_dirs = ["/var/log/glusterfs",
- "/var/log/samba"]
+ cls.server_gluster_logs_dirs = ["/var/log/glusterfs", "/var/log/samba"]
cls.server_gluster_logs_files = ["/var/log/ganesha.log",
"/var/log/ganesha-gfapi.log"]
- if ('gluster' in g.config and
- 'server_gluster_logs_info' in g.config['gluster']):
+ if g.config.get('gluster', {}).get('server_gluster_logs_info'):
server_gluster_logs_info = (
g.config['gluster']['server_gluster_logs_info'])
- if ('dirs' in server_gluster_logs_info and
- server_gluster_logs_info['dirs']):
- cls.server_gluster_logs_dirs = (
- server_gluster_logs_info['dirs'])
-
- if ('files' in server_gluster_logs_info and
- server_gluster_logs_info['files']):
+ if server_gluster_logs_info.get('dirs'):
+ cls.server_gluster_logs_dirs = server_gluster_logs_info['dirs']
+ if server_gluster_logs_info.get('files'):
cls.server_gluster_logs_files = (
server_gluster_logs_info['files'])
cls.client_gluster_logs_dirs = ["/var/log/glusterfs"]
cls.client_gluster_logs_files = []
- if ('gluster' in g.config and
- 'client_gluster_logs_info' in g.config['gluster']):
+ if g.config.get('gluster', {}).get('client_gluster_logs_info'):
client_gluster_logs_info = (
g.config['gluster']['client_gluster_logs_info'])
- if ('dirs' in client_gluster_logs_info and
- client_gluster_logs_info['dirs']):
- cls.client_gluster_logs_dirs = (
- client_gluster_logs_info['dirs'])
-
- if ('files' in client_gluster_logs_info and
- client_gluster_logs_info['files']):
+ if client_gluster_logs_info.get('dirs'):
+ cls.client_gluster_logs_dirs = client_gluster_logs_info['dirs']
+ if client_gluster_logs_info.get('files'):
cls.client_gluster_logs_files = (
client_gluster_logs_info['files'])
@@ -747,9 +1034,34 @@ class GlusterBaseClass(unittest.TestCase):
# gluster logs
if 'glustotest_run_id' not in g.config:
g.config['glustotest_run_id'] = (
- datetime.datetime.now().strftime('%H_%M_%d_%m_%Y'))
+ datetime.now().strftime('%H_%M_%d_%m_%Y'))
cls.glustotest_run_id = g.config['glustotest_run_id']
+ if cls.enable_nfs_ganesha:
+ g.log.info("Setup NFS_Ganesha")
+ cls.num_of_nfs_ganesha_nodes = int(cls.num_of_nfs_ganesha_nodes)
+ cls.servers_in_nfs_ganesha_cluster = (
+ cls.servers[:cls.num_of_nfs_ganesha_nodes])
+ cls.vips_in_nfs_ganesha_cluster = (
+ cls.vips[:cls.num_of_nfs_ganesha_nodes])
+
+ # Obtain hostname of servers in ganesha cluster
+ cls.ganesha_servers_hostname = []
+ for ganesha_server in cls.servers_in_nfs_ganesha_cluster:
+ ret, hostname, _ = g.run(ganesha_server, "hostname")
+ if ret:
+ raise ExecutionError("Failed to obtain hostname of %s"
+ % ganesha_server)
+ hostname = hostname.strip()
+ g.log.info("Obtained hostname: IP- %s, hostname- %s",
+ ganesha_server, hostname)
+ cls.ganesha_servers_hostname.append(hostname)
+ from glustolibs.gluster.nfs_ganesha_libs import setup_nfs_ganesha
+ ret = setup_nfs_ganesha(cls)
+ if not ret:
+ raise ExecutionError("Failed to setup nfs ganesha")
+ g.log.info("Successful in setting up NFS Ganesha Cluster")
+
msg = "Setupclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
g.log.info(msg)
cls.inject_msg_in_gluster_logs(msg)
@@ -773,642 +1085,263 @@ class GlusterBaseClass(unittest.TestCase):
g.log.info(msg)
cls.inject_msg_in_gluster_logs(msg)
+ def doCleanups(self):
+ if (self.error_or_failure_exists or
+ self._is_error_or_failure_exists()):
+ ret = self.scratch_cleanup(self.error_or_failure_exists)
+ g.log.info(ret)
+ return self.get_super_method(self, 'doCleanups')()
-class GlusterBlockBaseClass(GlusterBaseClass):
- """GlusterBlockBaseClass sets up the volume and blocks.
- """
@classmethod
- def setup_blocks(cls, blocknames):
- """Create blocks and calls the methods:
- update_block_info_dict and create_client_block_map
-
- Args:
- blocknames(list): Blocks to be create
- Returns:
- bool: False if block creation is unsuccessful and
- true if all blocks created.
-
- """
- if not isinstance(blocknames, list):
- blocknames = [blocknames]
-
- g.log.info("Creating block devices on volume %s", cls.volname)
- for blockname in blocknames:
- each_block = cls.gluster_block_args_info.get(blockname)
- if each_block:
- # Form a dict for keyargs
- block_args_info = {}
- block_args_info['ha'] = each_block['ha']
- block_args_info['auth'] = each_block['auth']
- block_args_info['prealloc'] = each_block['prealloc']
- block_args_info['storage'] = each_block['storage']
- block_args_info['ring-buffer'] = each_block['ring-buffer']
-
- _rc = setup_block(
- mnode=cls.mnode,
- volname=each_block['volname'],
- blockname=each_block['blockname'],
- servers=each_block['servers'],
- size=each_block['size'],
- **block_args_info)
- if not _rc:
- g.log.error("Failed to create block on volume "
- "%s: \n%s", cls.volname, each_block)
- return False
- g.log.info("Successfully created block on volume "
- "%s: \n%s", cls.volname, each_block)
- else:
- g.log.error("Unable to get args info for block %s on "
- "volume %s", blockname, cls.volname)
- return False
-
- # Check if all the blocks are listed in block list command
- for blockname in blocknames:
- each_block = cls.gluster_block_args_info.get(blockname)
- _rc = if_block_exists(cls.mnode, each_block['volname'], blockname)
- if not _rc:
- return False
-
- # Update the block info dict
- cls.update_block_info_dict()
- # Create client-block map
- cls.create_client_block_map(cls.blocknames)
- return True
+ def doClassCleanups(cls):
+ if (GlusterBaseClass.error_or_failure_exists or
+ cls._is_error_or_failure_exists()):
+ ret = cls.scratch_cleanup(
+ GlusterBaseClass.error_or_failure_exists)
+ g.log.info(ret)
+ return cls.get_super_method(cls, 'doClassCleanups')()
@classmethod
- def update_block_info_dict(cls):
- """Updates the class's block_info_dict variable
- Calls the gluster-block info command and updates the block info.
- """
- # Get Block dict
- cls.blocknames = get_block_list(cls.mnode, cls.volname)
-
- if cls.blocknames:
- for blockname in cls.blocknames:
- cls.block_info_dict[blockname] = (get_block_info(cls.mnode,
- cls.volname,
- blockname))
- if cls.block_info_dict[blockname] is None:
- g.log.error("Could not get block info")
- return False
- # Update total_number_of_blocks
- cls.total_num_of_blocks = len(cls.blocknames)
-
- # Log the block_info_dict
- g.log.info("Logging Block Info:")
- for key, value in cls.block_info_dict.iteritems():
- g.log.info("Glusto block info: %s\n %s" % (key, value))
-
- return True
+ def delete_nfs_ganesha_cluster(cls):
+ ret = teardown_nfs_ganesha_cluster(
+ cls.servers_in_nfs_ganesha_cluster)
+ if not ret:
+ g.log.error("Teardown got failed. Hence, cleaning up "
+ "nfs-ganesha cluster forcefully")
+ ret = teardown_nfs_ganesha_cluster(
+ cls.servers_in_nfs_ganesha_cluster, force=True)
+ if not ret:
+ raise ExecutionError("Force cleanup of nfs-ganesha "
+ "cluster failed")
+ g.log.info("Teardown nfs ganesha cluster succeeded")
@classmethod
- def discover_blocks_on_clients(cls, blocknames):
- """Discover blocks on all the clients
- """
- # List all the block devices on clients (Logging)
-
- if not isinstance(blocknames, list):
- blocknames = [blocknames]
-
- # results = g.run_parallel(cls.clients, "lsblk -S")
- server_list = []
- for blockname in blocknames:
- block_info = get_block_info(cls.mnode, cls.volname, blockname)
- if block_info:
- servers_to_add = block_info.get("EXPORTED ON")
- for server_ip in servers_to_add:
- if server_ip not in server_list:
- server_list.append(server_ip)
- else:
- g.log.error("Failed to get block info for block %s"
- " on volume %s", blockname, cls.volname)
- return False
+ def start_memory_and_cpu_usage_logging(cls, test_id, interval=60,
+ count=100):
+ """Upload logger script and start logging usage on cluster
- g.log.info("Server list %s", server_list)
- # Discover the block devices from clients
- for client in cls.clients:
- for server in server_list:
- cmd = ("iscsiadm -m discovery -t st -p %s" %
- server)
- ret, out, err = g.run(client, cmd)
- if ret != 0:
- g.log.error("Failed to discover blocks on "
- "client %s: %s", client, err)
- return False
- g.log.info("Discovered blocks on client %s: %s",
- client, out)
- return True
+ Args:
+ test_id(str): ID of the test running fetched from self.id()
- @classmethod
- def get_iqn_of_blocks_on_clients(cls, blocknames):
- """Get iqn number of each block on it's respective client.
+ Kawrgs:
+ interval(int): Time interval after which logs are to be collected
+ (Default: 60)
+ count(int): Number of samples to be collected(Default: 100)
- Args:
- blocknames: list
Returns:
- bool: True if iqn of all blocks is obtained. False otherwise
+ proc_dict(dict):Dictionary of logging processes
"""
- if not isinstance(blocknames, list):
- blocknames = [blocknames]
- for blockname in blocknames:
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ check_upload_memory_and_cpu_logger_script,
+ log_memory_and_cpu_usage_on_cluster)
- try:
- block_gbid = cls.block_info_dict[blockname]['GBID']
- except KeyError:
- g.log.error("Failed to get GBID of block %s on volume %s",
- blockname, cls.volname)
- return False
+ # Checking if script is present on servers or not if not then
+ # upload it to servers.
+ if not check_upload_memory_and_cpu_logger_script(cls.servers):
+ return None
- try:
- block_mapped_client = (
- cls.clients_blocks_map[blockname]['client'])
- except KeyError:
- g.log.error("Failed to get the client which mounts the block "
- "%s on the volume %s", blockname, cls.volname)
- return False
+ # Checking if script is present on clients or not if not then
+ # upload it to clients.
+ if not check_upload_memory_and_cpu_logger_script(cls.clients):
+ return None
- # Get the servers where the blocks are exported
- server_ip = cls.block_info_dict[blockname]['EXPORTED ON'][0]
-
- # Get iqn from gbid
- cmd = ("iscsiadm -m discovery -t st -p %s | grep -F %s | "
- "tail -1 | cut -d ' ' -f2" %
- (server_ip, block_gbid))
-
- # Not using async here as if two processes execute the above
- # command at the same time in background, it will cause:
- # 'iscsiadm: Connection to Discovery Address' error
- ret, out, err = g.run(block_mapped_client, cmd)
- if ret != 0:
- g.log.error("Failed to get iqn of block %s on client %s: %s",
- block_gbid, block_mapped_client, err)
- return False
- g.log.info("Iqn for gbid '%s' on client %s : '%s'",
- block_gbid, block_mapped_client, out)
- block_iqn = out.strip()
- cls.clients_blocks_map[blockname]['iqn'] = block_iqn
+ # Start logging on servers and clients
+ proc_dict = log_memory_and_cpu_usage_on_cluster(
+ cls.servers, cls.clients, test_id, interval, count)
- return True
+ return proc_dict
@classmethod
- def login_to_iqn_on_clients(cls, blocknames):
- """Login the blocks on their clients/initiator.
+ def compute_and_print_usage_stats(cls, test_id, proc_dict,
+ kill_proc=False):
+ """Compute and print CPU and memory usage statistics
- Return:
- Either bool or Execution error.
- """
- if not isinstance(blocknames, list):
- blocknames = [blocknames]
-
- ret = cls.update_block_info_dict()
- if not ret:
- return False
-
- result = cls.get_iqn_of_blocks_on_clients(blocknames)
- if not result:
- return False
-
- # ret_value = True
- # Login to the block from the client
- for blockname in blocknames:
- block_gbid = cls.block_info_dict[blockname]['GBID']
- block_mapped_client = (
- cls.clients_blocks_map[blockname]['client'])
-
- if not cls.clients_blocks_map[blockname].get('iqn'):
- g.log.error("Iqn info for block %s not there. So can't login",
- blockname)
- return False
-
- block_iqn = cls.clients_blocks_map[blockname]['iqn']
- cls.clients_blocks_map[blockname]['logged_in'] = False
-
- if cls.block_info_dict[blockname]['PASSWORD']:
- block_password = cls.block_info_dict[blockname]['PASSWORD']
- cmd = ("iscsiadm -m node -T %s -o update -n "
- "node.session.auth.authmethod -v CHAP -n "
- "node.session.auth.username -v %s -n "
- "node.session.auth.password -v %s " %
- (block_iqn, block_gbid, block_password))
- ret, out, err = g.run(block_mapped_client, cmd)
- if ret != 0:
- g.log.error("Unable to update login credentials for "
- "iqn %s on %s: %s",
- block_iqn, block_mapped_client, err)
- return False
- g.log.info("Credentials for iqn %s updated successfully "
- "on %s",
- block_iqn, block_mapped_client)
-
- # Login to iqn
- if not cls.clients_blocks_map[blockname].get('logged_in'):
- cmd = "iscsiadm -m node -T %s -l" % block_iqn
- ret, out, err = g.run(block_mapped_client, cmd)
- if ret != 0:
- raise ExecutionError("Failed to login to iqn %s on "
- "%s: %s Command o/p: %s ",
- block_iqn, block_mapped_client,
- err, out)
-
- g.log.info("Successfully logged in to iqn %s on %s: %s",
- block_iqn, block_mapped_client, out)
- cls.clients_blocks_map[blockname]['logged_in'] = True
-
- return True
+ Args:
+ proc_dict(dict):Dictionary of logging processes
+ test_id(str): ID of the test running fetched from self.id()
- @classmethod
- def logout_iqn_on_clients(cls, blocknames):
- """Logout each block from the initiator
+ Kwargs:
+ kill_proc(bool): Kill logging process if true else wait
+ for process to complete execution
"""
- # Convert string or unicode type to list
- if not isinstance(blocknames, list):
- blocknames = [blocknames]
-
- for blockname in blocknames:
- block_mapped_client = (
- cls.clients_blocks_map[blockname]['client'])
- block_iqn = cls.clients_blocks_map[blockname]['iqn']
- cmd = "iscsiadm -m node -T %s -u" % block_iqn
- ret, out, err = g.run(block_mapped_client, cmd)
- if ret != 0:
- g.log.error("Failed to logout of iqn %s on %s: %s"
- " Command o/p: %s",
- block_iqn, block_mapped_client, err, out)
- return False
- g.log.info("Successfully logged out of iqn %s on %s: %s",
- block_iqn, block_mapped_client, out)
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ wait_for_logging_processes_to_stop, kill_all_logging_processes,
+ compute_data_usage_stats_on_servers,
+ compute_data_usage_stats_on_clients)
+
+ # Wait or kill running logging process
+ if kill_proc:
+ nodes = cls.servers + cls.clients
+ ret = kill_all_logging_processes(proc_dict, nodes, cluster=True)
+ if not ret:
+ g.log.error("Unable to stop logging processes.")
+ else:
+ ret = wait_for_logging_processes_to_stop(proc_dict, cluster=True)
+ if not ret:
+ g.log.error("Processes didn't complete still running.")
- return True
+ # Compute and print stats for servers
+ ret = compute_data_usage_stats_on_servers(cls.servers, test_id)
+ g.log.info('*' * 50)
+ g.log.info(ret) # TODO: Make logged message more structured
+ g.log.info('*' * 50)
+
+ # Compute and print stats for clients
+ ret = compute_data_usage_stats_on_clients(cls.clients, test_id)
+ g.log.info('*' * 50)
+ g.log.info(ret) # TODO: Make logged message more structured
+ g.log.info('*' * 50)
@classmethod
- def get_mpath_of_iqn_on_clients(cls, blocknames):
- """Get mpath of the logged in blocks
+ def check_for_memory_leaks_and_oom_kills_on_servers(cls, test_id,
+ gain=30.0):
+ """Check for memory leaks and OOM kills on servers
- Return:
- True if successful and execution error if getting mpath fails.
- """
- # Get the mpath for iqn
- # Donot forget to install 'sg3_utils'
- # Convert string or unicode type to list
- if not isinstance(blocknames, list):
- blocknames = [blocknames]
-
- for blockname in blocknames:
- block_gbid = cls.block_info_dict[blockname]['GBID']
- block_mapped_client = cls.clients_blocks_map[blockname]['client']
- block_iqn = cls.clients_blocks_map[blockname]['iqn']
- if not cls.clients_blocks_map[blockname].get('mpath'):
- cmd = ("for i in `/dev/mapper/mpath*` ; do "
- "sg_inq -i $i | grep %s > /dev/null ; "
- "if [[ $? -eq 0 ]] ; then echo $i ; fi ; done" %
- (block_gbid))
- ret, out, err = g.run(block_mapped_client, cmd)
- if ret != 0:
- raise ExecutionError("Failed to get mpath for iqn %s on "
- "client %s: %s", block_iqn,
- block_mapped_client, err)
- block_mpath = out.strip()
- g.log.info("Successfully got mpath '%s' for iqn '%s' on "
- "client %s", block_mpath, block_iqn,
- block_mapped_client)
- cls.clients_blocks_map[blockname]['mpath'] = block_mpath
- time.sleep(1)
+ Args:
+ test_id(str): ID of the test running fetched from self.id()
- return True
+ Kwargs:
+ gain(float): Accepted amount of leak for a given testcase in MB
+ (Default:30)
- @classmethod
- def create_client_block_map(cls, blocknames):
- """
- Mapping a single block to a client.
- Select a client randomly from the list
+ Returns:
+ bool: True if memory leaks or OOM kills are observed else false
"""
- if not isinstance(blocknames, list):
- blocknames = [blocknames]
-
- tmp_client_list = cls.clients[:]
- for blockname in blocknames:
- if blockname not in cls.clients_blocks_map:
- if len(tmp_client_list) == 0:
- tmp_client_list = cls.clients[:]
- client_to_map = random.choice(tmp_client_list)
- tmp_client_list.remove(client_to_map)
- cls.clients_blocks_map[blockname] = {
- 'client': client_to_map,
- 'iqn': '',
- 'logged_in': False,
- 'mpath': '',
- 'is_formatted': False,
- 'is_mounted': False,
- }
- g.log.info("Blocks mapped to clients. Each block is mapped to a "
- "randomly selected client")
- for blockname in blocknames:
- g.log.info("Block %s mapped to %s", blockname,
- cls.clients_blocks_map[blockname]['client'])
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ check_for_memory_leaks_in_glusterd,
+ check_for_memory_leaks_in_glusterfs,
+ check_for_memory_leaks_in_glusterfsd,
+ check_for_oom_killers_on_servers)
+
+ # Check for memory leaks on glusterd
+ if check_for_memory_leaks_in_glusterd(cls.servers, test_id, gain):
+ g.log.error("Memory leak on glusterd.")
+ return True
+
+ if cls.volume_type != "distributed":
+ # Check for memory leaks on shd
+ if check_for_memory_leaks_in_glusterfs(cls.servers, test_id,
+ gain):
+ g.log.error("Memory leak on shd.")
+ return True
+
+ # Check for memory leaks on brick processes
+ if check_for_memory_leaks_in_glusterfsd(cls.servers, test_id, gain):
+ g.log.error("Memory leak on brick process.")
+ return True
+
+ # Check OOM kills on servers for all gluster server processes
+ if check_for_oom_killers_on_servers(cls.servers):
+ g.log.error('OOM kills present on servers.')
+ return True
+ return False
@classmethod
- def mount_blocks(cls, blocknames, filesystem='xfs'):
- """Mount the blocks on their clients
- """
- if not isinstance(blocknames, list):
- blocknames = [blocknames]
- # Discover the block on client
- ret = cls.discover_blocks_on_clients(blocknames)
- if not ret:
- return False
-
- for blockname in blocknames:
- if not cls.clients_blocks_map[blockname]['logged_in']:
+ def check_for_memory_leaks_and_oom_kills_on_clients(cls, test_id, gain=30):
+ """Check for memory leaks and OOM kills on clients
- # Login inside the block on client
- ret = cls.login_to_iqn_on_clients(blockname)
- if not ret:
- return False
-
- # time.sleep added because the path /dev/mapper/mapth*
- # is getting read even before the logging is completed.
- time.sleep(2)
- # Get mpath of block on it's client
- ret = cls.get_mpath_of_iqn_on_clients(blockname)
- if not ret:
- return False
-
- # make fs
- block_mpath = cls.clients_blocks_map[blockname]['mpath']
- block_mapped_client = cls.clients_blocks_map[blockname]['client']
- if not cls.clients_blocks_map[blockname].get('is_formatted'):
- cmd = "mkfs.%s -f %s" % (filesystem, block_mpath)
- ret, out, err = g.run(block_mapped_client, cmd)
- if ret != 0:
- raise ExecutionError("Failed to make fs on %s on client "
- "%s: %s", block_mpath,
- block_mapped_client, err)
- g.log.info("Successfully created fs on %s on client %s: %s",
- block_mpath, block_mapped_client, out)
- cls.clients_blocks_map[blockname]['is_formatted'] = True
-
- # mount the block
- if not cls.clients_blocks_map[blockname].get('is_mounted'):
- temp_mount = {
- 'protocol': 'xfs',
- 'client': {
- 'host': cls.clients_blocks_map[blockname]['client'],
- },
- 'volname': cls.clients_blocks_map[blockname]['mpath'],
- 'mountpoint': "/mnt/%s" % blockname
- }
- mount_obj = create_mount_objs([temp_mount]).pop()
-
- g.log.info("Mount Obj %s", mount_obj)
- g.log.info("Mounting the device %s on %s:%s" %
- (mount_obj.volname, mount_obj.client_system,
- mount_obj.mountpoint))
-
- # The function is_mounted will give an error in log file:
- # "Missing arguments for mount"
- # Because this is also used for mounting glusterfs volumes and
- # a server name is needed But here mounting does not
- # require a server name and therefore the argument check
- # for server fails and an error is reported in the log file.
- # But that will not affect the block mounting.
- # So, we can live with it for now.
- ret = mount_obj.mount()
- if not ret:
- raise ExecutionError("Unable to mount the "
- "device %s on %s:%s" %
- (mount_obj.volname,
- mount_obj.client_system,
- mount_obj.mountpoint))
- g.log.info("Successfully mounted the device %s on %s:%s" %
- (mount_obj.volname, mount_obj.client_system,
- mount_obj.mountpoint))
- cls.mount_blocks_list.append(mount_obj)
- cls.clients_blocks_map[blockname]['is_mounted'] = True
+ Args:
+ test_id(str): ID of the test running fetched from self.id()
- return True
+ Kwargs:
+ gain(float): Accepted amount of leak for a given testcase in MB
+ (Default:30)
- @classmethod
- def setup_block_mount_block(cls, blocknames):
- """Create and mount the blocks
+ Returns:
+ bool: True if memory leaks or OOM kills are observed else false
"""
- # Setup block
- g.log.info("Setting up blocks")
- ret = cls.setup_blocks(blocknames)
- if not ret:
- raise ExecutionError("Failed to setup blocks")
- g.log.info("Successful in setting up blocks")
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ check_for_memory_leaks_in_glusterfs_fuse,
+ check_for_oom_killers_on_clients)
+
+ # Check for memory leak on glusterfs fuse process
+ if check_for_memory_leaks_in_glusterfs_fuse(cls.clients, test_id,
+ gain):
+ g.log.error("Memory leaks observed on FUSE clients.")
+ return True
+
+ # Check for oom kills on clients
+ if check_for_oom_killers_on_clients(cls.clients):
+ g.log.error("OOM kills present on clients.")
+ return True
+ return False
- # Mount Blocks
- g.log.info("Mounting the blocks on initiator nodes")
- ret = cls.mount_blocks(blocknames)
- if not ret:
- raise ExecutionError("Failed to mount the blocks of volume %s",
- cls.volname)
- g.log.info("Successful in mounting the blocks of the volume %s",
- cls.volname)
+ @classmethod
+ def check_for_cpu_usage_spikes_on_servers(cls, test_id, threshold=3):
+ """Check for CPU usage spikes on servers
- return True
+ Args:
+ test_id(str): ID of the test running fetched from self.id()
- @classmethod
- def get_block_args_info_from_config(cls):
- """Created the dict gluster_block_args_info which helps in
- providing block information during block creation
+ Kwargs:
+ threshold(int): Accepted amount of instances of 100% CPU usage
+ (Default:3)
+ Returns:
+ bool: True if CPU spikes are more than threshold else False
"""
- # Get gluster block info from config file
- if g.config.get('gluster_block_args_info'):
- cls.gluster_block_args_info = {}
- blocks_count = 0
- each_block_info = g.config['gluster_block_args_info']
- # for i, each_block_info in enumerate(
- # g.config['gluster_block_args_info']):
- # volname
- block_on_volume = cls.volname
- if each_block_info.get('volname'):
- block_on_volume = each_block_info['volname']
-
- # Block name
- block_base_name = "gluster_block"
- if each_block_info.get('blockname'):
- block_base_name = each_block_info['blockname']
-
- # servers
- block_servers = cls.servers
- if each_block_info.get('servers'):
- block_servers = each_block_info['servers']
- if not filter(None, block_servers):
- block_servers = cls.servers
-
- # Block size
- block_size = "1GiB"
- if each_block_info.get('size'):
- block_size = each_block_info['size']
-
- # HA
- block_ha = 3
- if each_block_info.get('ha'):
- block_ha = each_block_info['ha']
-
- # auth
- block_auth = None
- if each_block_info.get('auth'):
- block_auth = each_block_info['auth']
-
- # prealloc
- block_prealloc = None
- if each_block_info.get('prealloc'):
- block_prealloc = each_block_info['prealloc']
-
- # ring-buffer
- block_ring_buffer = None
- if each_block_info.get('ring-buffer'):
- block_ring_buffer = each_block_info['ring-buffer']
-
- # Number of blocks
- num_of_blocks = 1
- if each_block_info.get('num_of_blocks'):
- num_of_blocks = int(each_block_info['num_of_blocks'])
-
- # for count in range(blocks_count,num_of_blocks +blocks_count):
- for count in range(blocks_count, num_of_blocks):
- # blocks_count = int(count) + i
-
- if block_ha:
- selected_block_servers = random.sample(block_servers,
- block_ha)
- else:
- selected_block_servers = random.choice(block_servers)
-
- block_name = "_".join([block_base_name,
- str(count + 1)])
-
- cls.gluster_block_args_info[block_name] = (
- {'volname': block_on_volume,
- 'blockname': block_name,
- 'servers': cls.get_ip_from_hostname(
- selected_block_servers),
- 'size': block_size,
- 'ha': block_ha,
- 'auth': block_auth,
- 'prealloc': block_prealloc,
- 'storage': None,
- 'ring-buffer': block_ring_buffer}
- )
-
- for key in cls.gluster_block_args_info.keys():
- value = cls.gluster_block_args_info[key]
- g.log.info("Gluster-Block args info: %s\n %s" % (key, value))
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ check_for_cpu_usage_spikes_on_glusterd,
+ check_for_cpu_usage_spikes_on_glusterfs,
+ check_for_cpu_usage_spikes_on_glusterfsd)
+
+ # Check for CPU usage spikes on glusterd
+ if check_for_cpu_usage_spikes_on_glusterd(cls.servers, test_id,
+ threshold):
+ g.log.error("CPU usage spikes observed more than threshold "
+ "on glusterd.")
+ return True
+
+ if cls.volume_type != "distributed":
+ # Check for CPU usage spikes on shd
+ if check_for_cpu_usage_spikes_on_glusterfs(cls.servers, test_id,
+ threshold):
+ g.log.error("CPU usage spikes observed more than threshold "
+ "on shd.")
+ return True
+
+ # Check for CPU usage spikes on brick processes
+ if check_for_cpu_usage_spikes_on_glusterfsd(cls.servers, test_id,
+ threshold):
+ g.log.error("CPU usage spikes observed more than threshold "
+ "on shd.")
+ return True
+ return False
@classmethod
- def setUpClass(cls, setup_vol=True, setup_blk=True, mount_blk=True):
- """Setup volume, create blocks, mount the blocks if specified.
- """
- GlusterBaseClass.setUpClass.im_func(cls)
-
- cls.mount_blocks_list = []
- cls.total_num_of_blocks = 0
- cls.block_info_dict = {}
- cls.clients_blocks_map = {}
- cls.blocknames = []
-
- # Default gluster block info
- cls.gluster_block_args_info = {
- 'gluster_block_%d' % (cls.total_num_of_blocks + 1): {
- 'volname': cls.volname,
- 'blockname': 'gluster_block_%d'
- % (cls.total_num_of_blocks + 1),
- 'servers': random.sample(cls.servers_ips, 2),
- 'size': '1GiB',
- 'ha': 2,
- 'auth': None,
- 'prealloc': None,
- 'storage': None,
- 'ring-buffer': None
- }
- }
+ def check_for_cpu_spikes_on_clients(cls, test_id, threshold=3):
+ """Check for CPU usage spikes on clients
- if g.config.get('gluster_block_args_info'):
- cls.get_block_args_info_from_config()
+ Args:
+ test_id(str): ID of the test running fetched from self.id()
- @classmethod
- def tearDownClass(cls, umount_blocks=True, cleanup_blocks=True,
- cleanup_vol=True, unlink_storage="yes"):
- """Teardown the mounts, deletes blocks, gluster volume.
+ Kwargs:
+ threshold(int): Accepted amount of instances of 100% CPU usage
+ (Default:3)
+ Returns:
+ bool: True if CPU spikes are more than threshold else False
"""
- # Unmount volume
- if umount_blocks:
- _rc = True
- g.log.info("Starting to UnMount Blocks")
- for mount_obj in cls.mount_blocks_list:
- ret = mount_obj.unmount()
- if not ret:
- g.log.error("Unable to unmount block '%s on cleint %s "
- "at %s'",
- mount_obj.volname, mount_obj.client_system,
- mount_obj.mountpoint)
- _rc = False
- if not _rc:
- raise ExecutionError("Unmount of all mounts are not "
- "successful")
- else:
- g.log.info("Successful in unmounting volume on all clients")
- else:
- g.log.info("Not Unmounting the Volume as 'umount_vol' is set "
- "to %s", umount_blocks)
-
- # Logout the blocks
- for blockname in cls.clients_blocks_map:
- block_iqn = cls.clients_blocks_map[blockname]['iqn']
- block_mapped_client = (
- cls.clients_blocks_map[blockname]['client'])
- g.log.info("Logging out iqn %s on client %s", block_iqn,
- block_mapped_client)
- cmd = "iscsiadm -m node -T %s -u" % block_iqn
- ret, out, err = g.run(block_mapped_client, cmd)
- if ret != 0:
- raise ExecutionError("Failed to logout iqn %s on client %s "
- ":%s", block_iqn, block_mapped_client,
- err)
- g.log.info("Successfully logged out iqn %s on client %s: %s",
- block_iqn, block_mapped_client, out)
-
- # Restarting multipathd on all clients
- g.log.info("Restarting multipathd on all clients")
- cmd = "service multipathd restart && service multipathd status"
- results = g.run_parallel(cls.clients, cmd)
- for client in results:
- ret, out, err = results[client]
- if ret != 0:
- raise ExecutionError("Failed to restart multipathd on "
- "client %s: %s", client, err)
- g.log.info("Successfully restarted multipathd on client %s: %s",
- client, out)
-
- # Cleanup blocks
- if cleanup_blocks:
- blocknames = get_block_list(cls.mnode, cls.volname)
- if blocknames:
- g.log.info("Listing blocks before deleting:\n%s",
- '\n'.join(blocknames))
- for blockname in blocknames:
- ret, out, err = block_delete(cls.mnode, cls.volname,
- blockname, unlink_storage)
- if ret != 0:
- raise ExecutionError("Failed to delete the block "
- "%s on volume %s", blockname,
- cls.volname)
- g.log.info("Successfully deleted the block %s on "
- "volume %s", blockname, cls.volname)
-
- # Cleanup volume
- if cleanup_vol:
- g.log.info("Cleanup Volume %s", cls.volname)
- ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
- if not ret:
- raise ExecutionError("cleanup volume %s failed", cls.volname)
- else:
- g.log.info("Successfully cleaned-up volume")
- else:
- g.log.info("Not Cleaning-Up volume as 'cleanup_vol' is %s",
- cleanup_vol)
-
- GlusterBaseClass.tearDownClass.im_func(cls)
+ # imports are added inside function to make it them
+ # optional and not cause breakage on installation
+ # which don't use the resource leak library
+ from glustolibs.io.memory_and_cpu_utils import (
+ check_for_cpu_usage_spikes_on_glusterfs_fuse)
+
+ ret = check_for_cpu_usage_spikes_on_glusterfs_fuse(cls.clients,
+ test_id,
+ threshold)
+ return ret
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_init.py b/glustolibs-gluster/glustolibs/gluster/gluster_init.py
index e59d36fa1..6a49ffc8b 100644
--- a/glustolibs-gluster/glustolibs/gluster/gluster_init.py
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_init.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,36 +19,44 @@
Description: This file contains the methods for starting/stopping glusterd
and other initial gluster environment setup helpers.
"""
+from time import sleep
from glusto.core import Glusto as g
-def start_glusterd(servers):
+def start_glusterd(servers, enable_retry=True):
"""Starts glusterd on specified servers if they are not running.
Args:
servers (str|list): A server|List of server hosts on which glusterd
has to be started.
+ Kwargs:
+ enable_retry(Bool): If set to True then runs reset-failed else
+ do nothing.
+
Returns:
bool : True if starting glusterd is successful on all servers.
False otherwise.
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
cmd = "pgrep glusterd || service glusterd start"
results = g.run_parallel(servers, cmd)
_rc = True
- for server, ret_values in results.iteritems():
+ for server, ret_values in results.items():
retcode, _, _ = ret_values
if retcode != 0:
g.log.error("Unable to start glusterd on server %s", server)
_rc = False
- if not _rc:
- return False
+ if not _rc and enable_retry:
+ ret = reset_failed_glusterd(servers)
+ if ret:
+ ret = start_glusterd(servers)
+ return ret
- return True
+ return _rc
def stop_glusterd(servers):
@@ -62,14 +70,14 @@ def stop_glusterd(servers):
bool : True if stopping glusterd is successful on all servers.
False otherwise.
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
cmd = "service glusterd stop"
results = g.run_parallel(servers, cmd)
_rc = True
- for server, ret_values in results.iteritems():
+ for server, ret_values in results.items():
retcode, _, _ = ret_values
if retcode != 0:
g.log.error("Unable to stop glusterd on server %s", server)
@@ -80,32 +88,62 @@ def stop_glusterd(servers):
return True
-def restart_glusterd(servers):
+def restart_glusterd(servers, enable_retry=True):
"""Restart the glusterd on specified servers.
Args:
servers (str|list): A server|List of server hosts on which glusterd
has to be restarted.
+ Kwargs:
+ enable_retry(Bool): If set to True than runs reset-failed else
+ do nothing.
+
Returns:
bool : True if restarting glusterd is successful on all servers.
False otherwise.
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
cmd = "service glusterd restart"
results = g.run_parallel(servers, cmd)
_rc = True
- for server, ret_values in results.iteritems():
+ for server, ret_values in results.items():
retcode, _, _ = ret_values
if retcode != 0:
g.log.error("Unable to restart glusterd on server %s", server)
_rc = False
- if not _rc:
- return False
+ if not _rc and enable_retry:
+ ret = reset_failed_glusterd(servers)
+ if ret:
+ ret = restart_glusterd(servers)
+ return ret
+ return _rc
+
+
+def reset_failed_glusterd(servers):
+ """Reset-failed glusterd on specified servers.
+
+ Args:
+ servers (str|list): A server|List of server hosts on which glusterd
+ has to be reset-failed.
+
+ Returns:
+ bool : True if reset-failed glusterd is successful on all servers.
+ False otherwise.
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ cmd = "systemctl reset-failed glusterd"
+ results = g.run_parallel(servers, cmd)
+ for server, (retcode, _, _) in results.items():
+ if retcode:
+ g.log.error("Unable to reset glusterd on server %s", server)
+ return False
return True
@@ -122,7 +160,7 @@ def is_glusterd_running(servers):
-1 : if glusterd not running and PID is alive
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
cmd1 = "service glusterd status"
@@ -131,7 +169,7 @@ def is_glusterd_running(servers):
cmd2_results = g.run_parallel(servers, cmd2)
_rc = 0
- for server, ret_values in cmd1_results.iteritems():
+ for server, ret_values in cmd1_results.items():
retcode, _, _ = ret_values
if retcode != 0:
g.log.error("glusterd is not running on the server %s", server)
@@ -157,7 +195,7 @@ def env_setup_servers(servers):
False otherwise.
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
g.log.info("The function isn't implemented fully")
@@ -175,7 +213,7 @@ def get_glusterd_pids(nodes):
return the process id's in dictionary format
Args:
- nodes ( str|list ) : Node/Nodes of the cluster
+ nodes (str|list) : Node(s) of the cluster
Returns:
tuple : Tuple containing two elements (ret, gluster_pids).
@@ -190,7 +228,7 @@ def get_glusterd_pids(nodes):
"""
glusterd_pids = {}
_rc = True
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
cmd = "pidof glusterd"
@@ -222,3 +260,47 @@ def get_glusterd_pids(nodes):
glusterd_pids[node] = ['-1']
return _rc, glusterd_pids
+
+
+def wait_for_glusterd_to_start(servers, glusterd_start_wait_timeout=80):
+ """Checks glusterd is running on nodes with timeout.
+
+ Args:
+ servers (str|list): A server|List of server hosts on which glusterd
+ status has to be checked.
+ glusterd_start_wait_timeout: timeout to retry glusterd running
+ check in node.
+
+ Returns:
+ bool : True if glusterd is running on servers.
+ False otherwise.
+
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+ count = 0
+ while count <= glusterd_start_wait_timeout:
+ ret = is_glusterd_running(servers)
+ if not ret:
+ g.log.info("glusterd is running on %s", servers)
+ return True
+ sleep(1)
+ count += 1
+ g.log.error("glusterd is not running on %s", servers)
+ return False
+
+
+def get_gluster_version(host):
+ """Checks the gluster version on the nodes
+
+ Args:
+ host(str): IP of the host whose gluster version has to be checked.
+
+ Returns:
+ str: The gluster version value.
+ """
+ command = 'gluster --version'
+ _, out, _ = g.run(host, command)
+ g.log.info("The Gluster verion of the cluster under test is %s",
+ out)
+ return out.split(' ')[1]
diff --git a/glustolibs-gluster/glustolibs/gluster/glusterdir.py b/glustolibs-gluster/glustolibs/gluster/glusterdir.py
index f1d882607..5618926c8 100644
--- a/glustolibs-gluster/glustolibs/gluster/glusterdir.py
+++ b/glustolibs-gluster/glustolibs/gluster/glusterdir.py
@@ -82,22 +82,29 @@ def rmdir(host, fqpath, force=False):
return False
-def get_dir_contents(host, path):
+def get_dir_contents(host, path, recursive=False):
"""Get the files and directories present in a given directory.
Args:
host (str): The hostname/ip of the remote system.
path (str): The path to the directory.
+ Kwargs:
+ recursive (bool): lists all entries recursively
+
Returns:
file_dir_list (list): List of files and directories on path.
None: In case of error or failure.
"""
- ret, out, _ = g.run(host, ' ls '+path)
- if ret != 0:
+ if recursive:
+ cmd = "find {}".format(path)
+ else:
+ cmd = "ls " + path
+ ret, out, _ = g.run(host, cmd)
+ if ret:
+ g.log.error("No such file or directory {}".format(path))
return None
- file_dir_list = filter(None, out.split("\n"))
- return file_dir_list
+ return(list(filter(None, out.split("\n"))))
class GlusterDir(GlusterFile):
diff --git a/glustolibs-gluster/glustolibs/gluster/glusterfile.py b/glustolibs-gluster/glustolibs/gluster/glusterfile.py
index 413a4f9a7..ee9b6040d 100755
--- a/glustolibs-gluster/glustolibs/gluster/glusterfile.py
+++ b/glustolibs-gluster/glustolibs/gluster/glusterfile.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -27,8 +27,8 @@ import os
import re
from glusto.core import Glusto as g
-
from glustolibs.gluster.layout import Layout
+from glustolibs.misc.misc_libs import upload_scripts
def calculate_hash(host, filename):
@@ -39,26 +39,43 @@ def calculate_hash(host, filename):
Returns:
An integer representation of the hash
+
+ TODO: For testcases specifically testing hashing routine
+ consider using a baseline external Davies-Meyer hash_value.c
+ Creating comparison hash from same library we are testing
+ may not be best practice here. (Holloway)
"""
- # TODO: For testcases specifically testing hashing routine
- # consider using a baseline external Davies-Meyer hash_value.c
- # Creating comparison hash from same library we are testing
- # may not be best practice here. (Holloway)
try:
# Check if libglusterfs.so.0 is available locally
glusterfs = ctypes.cdll.LoadLibrary("libglusterfs.so.0")
g.log.debug("Library libglusterfs.so.0 loaded locally")
+ computed_hash = (
+ ctypes.c_uint32(glusterfs.gf_dm_hashfn(filename,
+ len(filename))))
+ hash_value = int(computed_hash.value)
except OSError:
- conn = g.rpyc_get_connection(host)
- glusterfs = \
- conn.modules.ctypes.cdll.LoadLibrary("libglusterfs.so.0")
- g.log.debug("Library libglusterfs.so.0 loaded via rpyc")
-
- computed_hash = \
- ctypes.c_uint32(glusterfs.gf_dm_hashfn(filename, len(filename)))
- # conn.close()
-
- return int(computed_hash.value)
+ script_path = ("/usr/share/glustolibs/scripts/"
+ "compute_hash.py")
+ if not file_exists(host, script_path):
+ if upload_scripts(host, script_path,
+ '/usr/share/glustolibs/scripts/'):
+ g.log.info("Successfully uploaded script "
+ "compute_hash.py!")
+ else:
+ g.log.error('Unable to upload the script to node {0}'
+ .format(host))
+ return 0
+ else:
+ g.log.info("compute_hash.py already present!")
+ cmd = ("/usr/bin/env python {0} {1}".format(script_path,
+ filename))
+ ret, out, _ = g.run(host, cmd)
+ if ret:
+ g.log.error('Unable to run the script on node {0}'
+ .format(host))
+ return 0
+ hash_value = int(out.split('\n')[0])
+ return hash_value
def get_mountpoint(host, fqpath):
@@ -80,40 +97,50 @@ def get_mountpoint(host, fqpath):
return None
-def get_fattr(host, fqpath, fattr):
+def get_fattr(host, fqpath, fattr, encode="hex"):
"""getfattr for filepath on remote system
Args:
host (str): The hostname/ip of the remote system.
fqpath (str): The fully-qualified path to the file.
fattr (str): name of the fattr to retrieve
-
+ Kwargs:
+ encode(str): The supported types of encoding are
+ [hex|text|base64]
+ Defaults to hex type of encoding
Returns:
getfattr result on success. None on fail.
"""
- command = ("getfattr --absolute-names --only-values -n '%s' %s" %
- (fattr, fqpath))
+ command = ("getfattr --absolute-names -e '%s' "
+ "-n '%s' %s" %
+ (encode, fattr, fqpath))
rcode, rout, rerr = g.run(host, command)
-
- if rcode == 0:
- return rout.strip()
+ if not rcode:
+ return rout.strip().split('=')[1].replace('"', '')
g.log.error('getfattr failed: %s' % rerr)
return None
-def get_fattr_list(host, fqpath):
+def get_fattr_list(host, fqpath, encode_hex=False):
"""List of xattr for filepath on remote system.
Args:
host (str): The hostname/ip of the remote system.
fqpath (str): The fully-qualified path to the file.
+ Kwargs:
+ encode_hex(bool): Fetch xattr in hex if True
+ (Default:False)
+
Returns:
Dictionary of xattrs on success. None on fail.
"""
- command = "getfattr --absolute-names -d -m - %s" % fqpath
- rcode, rout, rerr = g.run(host, command)
+ cmd = "getfattr --absolute-names -d -m - {}".format(fqpath)
+ if encode_hex:
+ cmd = ("getfattr --absolute-names -d -m - -e hex {}"
+ .format(fqpath))
+ rcode, rout, rerr = g.run(host, cmd)
if rcode == 0:
xattr_list = {}
@@ -220,7 +247,7 @@ def get_file_stat(host, fqpath):
Returns:
A dictionary of file stat data. None on fail.
"""
- statformat = '%F:%n:%i:%a:%s:%h:%u:%g:%U:%G'
+ statformat = '%F$%n$%i$%a$%s$%h$%u$%g$%U$%G$%x$%y$%z$%X$%Y$%Z'
command = "stat -c '%s' %s" % (statformat, fqpath)
rcode, rout, rerr = g.run(host, command)
if rcode == 0:
@@ -228,7 +255,9 @@ def get_file_stat(host, fqpath):
stat_string = rout.strip()
(filetype, filename, inode,
access, size, links,
- uid, gid, username, groupname) = stat_string.split(":")
+ uid, gid, username, groupname,
+ atime, mtime, ctime, epoch_atime,
+ epoch_mtime, epoch_ctime) = stat_string.split("$")
stat_data['filetype'] = filetype
stat_data['filename'] = filename
@@ -240,6 +269,12 @@ def get_file_stat(host, fqpath):
stat_data["groupname"] = groupname
stat_data["uid"] = uid
stat_data["gid"] = gid
+ stat_data["atime"] = atime
+ stat_data["mtime"] = mtime
+ stat_data["ctime"] = ctime
+ stat_data["epoch_atime"] = epoch_atime
+ stat_data["epoch_mtime"] = epoch_mtime
+ stat_data["epoch_ctime"] = epoch_ctime
return stat_data
@@ -365,7 +400,8 @@ def get_pathinfo(host, fqpath):
A dictionary of pathinfo data for a remote file. None on fail.
"""
pathinfo = {}
- pathinfo['raw'] = get_fattr(host, fqpath, 'trusted.glusterfs.pathinfo')
+ pathinfo['raw'] = get_fattr(host, fqpath, 'trusted.glusterfs.pathinfo',
+ encode="text")
pathinfo['brickdir_paths'] = re.findall(r".*?POSIX.*?:(\S+)\>",
pathinfo['raw'])
@@ -388,17 +424,14 @@ def is_linkto_file(host, fqpath):
"""
command = 'file %s' % fqpath
rcode, rout, _ = g.run(host, command)
-
if rcode == 0:
- if 'sticky empty' in rout.strip():
+ # An additional ',' is there for newer platforms
+ if 'sticky empty' or 'sticky, empty' in rout.strip():
stat = get_file_stat(host, fqpath)
if int(stat['size']) == 0:
- # xattr = get_fattr(host, fqpath,
- # 'trusted.glusterfs.dht.linkto')
xattr = get_dht_linkto_xattr(host, fqpath)
if xattr is not None:
return True
-
return False
@@ -412,7 +445,8 @@ def get_dht_linkto_xattr(host, fqpath):
Returns:
Return value of get_fattr trusted.glusterfs.dht.linkto call.
"""
- linkto_xattr = get_fattr(host, fqpath, 'trusted.glusterfs.dht.linkto')
+ linkto_xattr = get_fattr(host, fqpath, 'trusted.glusterfs.dht.linkto',
+ encode="text")
return linkto_xattr
@@ -463,6 +497,154 @@ def check_if_pattern_in_file(host, pattern, fqpath):
return 0
+def occurences_of_pattern_in_file(node, search_pattern, filename):
+ """
+ Get the number of occurences of pattern in the file
+
+ Args:
+ node (str): Host on which the command is executed.
+ search_pattern (str): Pattern to be found in the file.
+ filename (str): File in which the pattern is to be validated
+
+ Returns:
+ (int): (-1), When the file doesn't exists.
+ (0), When pattern doesn't exists in the file.
+ (number), When pattern is found and the number of
+ occurences of pattern in the file.
+
+ Example:
+ occurences_of_pattern_in_file(node, search_pattern, filename)
+ """
+
+ ret = file_exists(node, filename)
+ if not ret:
+ g.log.error("File %s is not present on the node " % filename)
+ return -1
+
+ cmd = ("grep -c '%s' %s" % (search_pattern, filename))
+ ret, out, _ = g.run(node, cmd)
+ if ret:
+ g.log.error("No occurence of the pattern found in the file %s" %
+ filename)
+ return 0
+ return int(out.strip('\n'))
+
+
+def create_link_file(node, file, link, soft=False):
+ """
+ Create hard or soft link for an exisiting file
+
+ Args:
+ node(str): Host on which the command is executed.
+ file(str): Path to the source file.
+ link(str): Path to the link file.
+
+ Kawrgs:
+ soft(bool): Create soft link if True else create
+ hard link.
+
+ Returns:
+ (bool): True if command successful else False.
+
+ Example:
+ >>> create_link_file('10.20.30.40', '/mnt/mp/file.txt',
+ '/mnt/mp/link')
+ True
+ """
+ cmd = "ln {} {}".format(file, link)
+ if soft:
+ cmd = "ln -s {} {}".format(file, link)
+
+ ret, _, err = g.run(node, cmd)
+ if ret:
+ if soft:
+ g.log.error('Failed to create soft link on {} '
+ 'for file {} with error {}'
+ .format(node, file, err))
+ else:
+ g.log.error('Failed to create hard link on {} '
+ 'for file {} with error {}'
+ .format(node, file, err))
+ return False
+ return True
+
+
+def set_acl(client, rule, fqpath):
+ """Set acl rule on a specific file
+
+ Args:
+ client(str): Host on which the command is executed.
+ rule(str): The acl rule to be set on the file.
+ fqpath (str): The fully-qualified path to the file.
+
+ Returns:
+ (bool): True if command successful else False.
+ """
+ cmd = "setfacl -m {} {}".format(rule, fqpath)
+ ret, _, _ = g.run(client, cmd)
+ if ret:
+ g.log.error('Failed to set rule {} on file {}'.format(rule, fqpath))
+ return False
+ return True
+
+
+def get_acl(client, path, filename):
+ """Get all acl rules set to a file
+
+ Args:
+ client(str): Host on which the command is executed.
+ path (str): The fully-qualified path to the dir where file is present.
+ filename(str): Name of the file for which rules have to be fetched.
+
+ Returns:
+ (dict): A dictionary with the formatted output of the command.
+ (None): In case of failures
+
+ Example:
+ >>> get_acl('dhcp35-4.lab.eng.blr.redhat.com', '/root/', 'file')
+ {'owner': 'root', 'rules': ['user::rw-', 'user:root:rwx', 'group::r--',
+ 'mask::rwx', 'other::r--'], 'group': 'root', 'file': 'file'}
+ """
+ cmd = "cd {};getfacl {}".format(path, filename)
+ ret, out, _ = g.run(client, cmd)
+ if ret:
+ return None
+
+ # Generate a dict out of the output
+ output_dict = {}
+ data = out.strip().split('\n')
+ for key, index in (('file', 0), ('owner', 1), ('group', 2)):
+ output_dict[key] = data[index].split(' ')[2]
+ output_dict['rules'] = data[3:]
+
+ return output_dict
+
+
+def delete_acl(client, fqpath, rule=None):
+ """Delete a specific or all acl rules set on a file
+
+ Args:
+ client(str): Host on which the command is executed.
+ fqpath (str): The fully-qualified path to the file.
+
+ Kwargs:
+ rule(str): The acl rule to be removed from the file.
+
+ Returns:
+ (bool): True if command successful else False.
+ """
+ # Remove all acls set on a file
+ cmd = "setfacl -b {}".format(fqpath)
+ # Remove a specific acl of the file
+ if rule:
+ cmd = "setfacl -x {} {}".format(rule, fqpath)
+
+ ret, _, _ = g.run(client, cmd)
+ if ret:
+ return False
+ return True
+
+
class GlusterFile(object):
"""Class to handle files specific to Gluster (client and backend)"""
def __init__(self, host, fqpath):
diff --git a/glustolibs-gluster/glustolibs/gluster/heal_libs.py b/glustolibs-gluster/glustolibs/gluster/heal_libs.py
index e4f2b2fdf..4a551cd48 100755
--- a/glustolibs-gluster/glustolibs/gluster/heal_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/heal_libs.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2021 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -135,7 +135,8 @@ def are_all_self_heal_daemons_are_online(mnode, volname):
return False
-def monitor_heal_completion(mnode, volname, timeout_period=1200):
+def monitor_heal_completion(mnode, volname, timeout_period=1200,
+ bricks=None, interval_check=120):
"""Monitors heal completion by looking into .glusterfs/indices/xattrop
directory of every brick for certain time. When there are no entries
in all the brick directories then heal is successful. Otherwise heal is
@@ -147,6 +148,12 @@ def monitor_heal_completion(mnode, volname, timeout_period=1200):
heal_monitor_timeout : time until which the heal monitoring to be done.
Default: 1200 i.e 20 minutes.
+ Kwargs:
+ bricks : list of bricks to monitor heal, if not provided
+ heal will be monitored on all bricks of volume
+ interval_check : Time in seconds, for every given interval checks
+ the heal info, defaults to 120.
+
Return:
bool: True if heal is complete within timeout_period. False otherwise
"""
@@ -158,7 +165,7 @@ def monitor_heal_completion(mnode, volname, timeout_period=1200):
# Get all bricks
from glustolibs.gluster.brick_libs import get_all_bricks
- bricks_list = get_all_bricks(mnode, volname)
+ bricks_list = bricks or get_all_bricks(mnode, volname)
if bricks_list is None:
g.log.error("Unable to get the bricks list. Hence unable to verify "
"whether self-heal-daemon process is running or not "
@@ -177,10 +184,15 @@ def monitor_heal_completion(mnode, volname, timeout_period=1200):
if heal_complete:
break
else:
- time.sleep(120)
- time_counter = time_counter - 120
+ time.sleep(interval_check)
+ time_counter = time_counter - interval_check
+
+ if heal_complete and bricks:
+ # In EC volumes, check heal completion only on online bricks
+ # and `gluster volume heal info` fails for an offline brick
+ return True
- if heal_complete:
+ if heal_complete and not bricks:
heal_completion_status = is_heal_complete(mnode, volname)
if heal_completion_status is True:
g.log.info("Heal has successfully completed on volume %s" %
@@ -313,7 +325,7 @@ def wait_for_self_heal_daemons_to_be_online(mnode, volname, timeout=300):
if not flag:
g.log.error("All self-heal-daemons of the volume '%s' are not online "
- "even after %d minutes", (volname, timeout/60.0))
+ "even after %d minutes" % (volname, timeout/60.0))
return False
else:
g.log.info("All self-heal-daemons of the volume '%s' are online ",
@@ -341,7 +353,7 @@ def get_self_heal_daemon_pid(nodes):
"""
glustershd_pids = {}
_rc = True
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
cmd = r"pgrep -f glustershd | grep -v ^$$\$"
g.log.info("Executing cmd: %s on node %s" % (cmd, nodes))
@@ -395,30 +407,26 @@ def do_bricks_exist_in_shd_volfile(mnode, volname, brick_list):
host = brick = None
parse = False
- # Establish connection to mnode
- conn = g.rpyc_get_connection(mnode)
- if conn is None:
- g.log.info("Not able to establish connection to node %s" % mnode)
- return False
- try:
- fd = conn.builtins.open(GLUSTERSHD)
- for each_line in fd:
- each_line = each_line.strip()
- if volume_clients in each_line:
- parse = True
- elif "end-volume" in each_line:
- if parse:
- brick_list_server_vol.append("%s:%s" % (host, brick))
- parse = False
- elif parse:
- if "option remote-subvolume" in each_line:
- brick = each_line.split(" ")[2]
- if "option remote-host" in each_line:
- host = each_line.split(" ")[2]
-
- except IOError as e:
- g.log.info("I/O error ({0}): {1}".format(e.errno, e.strerror))
+ cmd = "cat {0}".format(GLUSTERSHD)
+ ret, out, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Unable to cat the GLUSTERSHD file.")
return False
+ fd = out.split('\n')
+
+ for each_line in fd:
+ each_line = each_line.strip()
+ if volume_clients in each_line:
+ parse = True
+ elif "end-volume" in each_line:
+ if parse:
+ brick_list_server_vol.append("%s:%s" % (host, brick))
+ parse = False
+ elif parse:
+ if "option remote-subvolume" in each_line:
+ brick = each_line.split(" ")[2]
+ if "option remote-host" in each_line:
+ host = each_line.split(" ")[2]
g.log.info("Brick List from volume info : %s" % brick_list)
g.log.info("Brick List from glustershd server volume "
@@ -447,7 +455,7 @@ def is_shd_daemonized(nodes, timeout=120):
"""
counter = 0
flag = 0
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
while counter < timeout:
ret, pids = get_self_heal_daemon_pid(nodes)
@@ -483,7 +491,7 @@ def bring_self_heal_daemon_process_offline(nodes):
bool : True on successfully bringing self-heal daemon process offline.
False otherwise
"""
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
failed_nodes = []
@@ -513,3 +521,71 @@ def bring_self_heal_daemon_process_offline(nodes):
_rc = False
return _rc
+
+
+def is_shd_daemon_running(mnode, node, volname):
+ """
+ Verifies whether the shd daemon is up and running on a particular node by
+ checking the existence of shd pid and parsing the get volume status output.
+
+ Args:
+ mnode (str): The first node in servers list
+ node (str): The node to be checked for whether the glustershd
+ process is up or not
+ volname (str): Name of the volume created
+
+ Returns:
+ boolean: True if shd is running on the node, False, otherwise
+ """
+
+ # Get glustershd pid from node.
+ ret, glustershd_pids = get_self_heal_daemon_pid(node)
+ if not ret and glustershd_pids[node] != -1:
+ return False
+ # Verifying glustershd process is no longer running from get status.
+ vol_status = get_volume_status(mnode, volname)
+ if vol_status is None:
+ return False
+ try:
+ _ = vol_status[volname][node]['Self-heal Daemon']
+ return True
+ except KeyError:
+ return False
+
+
+def enable_granular_heal(mnode, volname):
+ """Enable granular heal on a given volume
+
+ Args:
+ mnode(str): Node on which command has to be exectued
+ volname(str): Name of the volume on which granular heal is to be enabled
+
+ Returns:
+ bool: True if granular heal is enabled successfully else False
+ """
+ cmd = "gluster volume heal {} granular-entry-heal enable".format(volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error('Unable to enable granular-entry-heal on volume %s',
+ volname)
+ return False
+ return True
+
+
+def disable_granular_heal(mnode, volname):
+ """Diable granular heal on a given volume
+
+ Args:
+ mnode(str): Node on which command will be exectued
+ volname(str): Name of the volume on which granular heal is to be disabled
+
+ Returns:
+ bool: True if granular heal is disabled successfully else False
+ """
+ cmd = "gluster volume heal {} granular-entry-heal disable".format(volname)
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.error('Unable to disable granular-entry-heal on volume %s',
+ volname)
+ return False
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/layout.py b/glustolibs-gluster/glustolibs/gluster/layout.py
index c1ddb40f8..ea5a5bc8b 100644
--- a/glustolibs-gluster/glustolibs/gluster/layout.py
+++ b/glustolibs-gluster/glustolibs/gluster/layout.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2018-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -28,11 +28,25 @@ class Layout(object):
"""
def _get_layout(self):
"""Discover brickdir data and cache in instance for further use"""
+ # Adding here to avoid cyclic imports
+ from glustolibs.gluster.volume_libs import get_volume_type
+
self._brickdirs = []
for brickdir_path in self._pathinfo['brickdir_paths']:
- brickdir = BrickDir(brickdir_path)
- g.log.debug("%s: %s" % (brickdir.path, brickdir.hashrange))
- self._brickdirs.append(brickdir)
+ (host, _) = brickdir_path.split(':')
+ ret = get_volume_type(brickdir_path)
+ if ret in ('Replicate', 'Disperse', 'Arbiter'):
+ g.log.info("Cannot get layout as volume under test is"
+ " Replicate/Disperse/Arbiter and DHT"
+ " pass-through was enabled after Gluster 6.0")
+ else:
+ brickdir = BrickDir(brickdir_path)
+ if brickdir is None:
+ g.log.error("Failed to get the layout")
+ else:
+ g.log.debug("%s: %s" % (brickdir.path,
+ brickdir.hashrange))
+ self._brickdirs.append(brickdir)
def __init__(self, pathinfo):
"""Init the layout class
@@ -59,48 +73,60 @@ class Layout(object):
ends at 32-bits high,
and has no holes or overlaps
"""
- joined_hashranges = []
- for brickdir in self.brickdirs:
- # join all of the hashranges into a single list
- joined_hashranges += brickdir.hashrange
- g.log.debug("joined range list: %s" % joined_hashranges)
- # remove duplicate hashes
- collapsed_ranges = list(set(joined_hashranges))
- # sort the range list for good measure
- collapsed_ranges.sort()
-
- # first hash in the list is 0?
- if collapsed_ranges[0] != 0:
- g.log.error('First hash in range (%d) is not zero' %
- collapsed_ranges[0])
- return False
-
- # last hash in the list is 32-bits high?
- if collapsed_ranges[-1] != int(0xffffffff):
- g.log.error('Last hash in ranges (%s) is not 0xffffffff' %
- hex(collapsed_ranges[-1]))
- return False
-
- # remove the first and last hashes
- clipped_ranges = collapsed_ranges[1:-1]
- g.log.debug('clipped: %s' % clipped_ranges)
-
- # walk through the list in pairs and look for diff == 1
- iter_ranges = iter(clipped_ranges)
- for first in iter_ranges:
- second = next(iter_ranges)
- hash_difference = second - first
- g.log.debug('%d - %d = %d' % (second, first, hash_difference))
- if hash_difference > 1:
- g.log.error("Layout has holes")
-
- return False
- elif hash_difference < 1:
- g.log.error("Layout has overlaps")
-
- return False
+ # Adding here to avoid cyclic imports
+ from glustolibs.gluster.volume_libs import get_volume_type
- return True
+ for brickdir_path in self._pathinfo['brickdir_paths']:
+ (host, _) = brickdir_path.split(':')
+ if get_volume_type(brickdir_path) in ('Replicate', 'Disperse',
+ 'Arbiter'):
+ g.log.info("Cannot check for layout completeness as volume"
+ " under test is Replicate/Disperse/Arbiter and DHT"
+ " pass-though was enabled after Gluster 6.")
+ else:
+ joined_hashranges = []
+ for brickdir in self.brickdirs:
+ # join all of the hashranges into a single list
+ joined_hashranges += brickdir.hashrange
+ g.log.debug("joined range list: %s" % joined_hashranges)
+ # remove duplicate hashes
+ collapsed_ranges = list(set(joined_hashranges))
+ # sort the range list for good measure
+ collapsed_ranges.sort()
+
+ # first hash in the list is 0?
+ if collapsed_ranges[0] != 0:
+ g.log.error('First hash in range (%d) is not zero' %
+ collapsed_ranges[0])
+ return False
+
+ # last hash in the list is 32-bits high?
+ if collapsed_ranges[-1] != int(0xffffffff):
+ g.log.error('Last hash in ranges (%s) is not 0xffffffff' %
+ hex(collapsed_ranges[-1]))
+ return False
+
+ # remove the first and last hashes
+ clipped_ranges = collapsed_ranges[1:-1]
+ g.log.debug('clipped: %s' % clipped_ranges)
+
+ # walk through the list in pairs and look for diff == 1
+ iter_ranges = iter(clipped_ranges)
+ for first in iter_ranges:
+ second = next(iter_ranges)
+ hash_difference = second - first
+ g.log.debug('%d - %d = %d' % (second, first,
+ hash_difference))
+ if hash_difference > 1:
+ g.log.error("Layout has holes")
+
+ return False
+ elif hash_difference < 1:
+ g.log.error("Layout has overlaps")
+
+ return False
+
+ return True
@property
def has_zero_hashranges(self):
diff --git a/glustolibs-gluster/glustolibs/gluster/lib_utils.py b/glustolibs-gluster/glustolibs/gluster/lib_utils.py
index 29172b2ea..b04976b1c 100755
--- a/glustolibs-gluster/glustolibs/gluster/lib_utils.py
+++ b/glustolibs-gluster/glustolibs/gluster/lib_utils.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2021 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -26,8 +26,6 @@ import re
import time
from collections import OrderedDict
import tempfile
-import subprocess
-import random
ONE_GB_BYTES = 1073741824.0
@@ -53,23 +51,16 @@ def append_string_to_file(mnode, filename, str_to_add_in_file,
Returns:
True, on success, False otherwise
"""
- try:
- conn = g.rpyc_get_connection(mnode, user=user)
- if conn is None:
- g.log.error("Unable to get connection to 'root' of node %s"
- " in append_string_to_file()" % mnode)
- return False
-
- with conn.builtin.open(filename, 'a') as _filehandle:
- _filehandle.write(str_to_add_in_file)
-
- return True
- except IOError:
- g.log.error("Exception occurred while adding string to "
- "file %s in append_string_to_file()", filename)
+ cmd = "echo '{0}' >> {1}".format(str_to_add_in_file,
+ filename)
+ ret, out, err = g.run(mnode, cmd, user)
+ if ret or out or err:
+ g.log.error("Unable to append string '{0}' to file "
+ "'{1}' on node {2} using user {3}"
+ .format(str_to_add_in_file, filename,
+ mnode, user))
return False
- finally:
- g.rpyc_close_connection(host=mnode, user=user)
+ return True
def search_pattern_in_file(mnode, search_pattern, filename, start_str_to_parse,
@@ -268,31 +259,19 @@ def list_files(mnode, dir_path, parse_str="", user="root"):
NoneType: None if command execution fails, parse errors.
list: files with absolute name
"""
-
- try:
- conn = g.rpyc_get_connection(mnode, user=user)
- if conn is None:
- g.log.error("Unable to get connection to 'root' of node %s"
- % mnode)
- return None
-
- filepaths = []
- for root, directories, files in conn.modules.os.walk(dir_path):
- for filename in files:
- if parse_str != "":
- if parse_str in filename:
- filepath = conn.modules.os.path.join(root, filename)
- filepaths.append(filepath)
- else:
- filepath = conn.modules.os.path.join(root, filename)
- filepaths.append(filepath)
- return filepaths
- except StopIteration:
- g.log.error("Exception occurred in list_files()")
+ if parse_str == "":
+ cmd = "find {0} -type f".format(dir_path)
+ else:
+ cmd = "find {0} -type f | grep {1}".format(dir_path,
+ parse_str)
+ ret, out, err = g.run(mnode, cmd, user)
+ if ret or err:
+ g.log.error("Unable to get the list of files on path "
+ "{0} on node {1} using user {2} due to error {3}"
+ .format(dir_path, mnode, user, err))
return None
-
- finally:
- g.rpyc_close_connection(host=mnode, user=user)
+ file_list = out.split('\n')
+ return file_list[0:len(file_list)-1]
def get_servers_bricks_dict(servers, servers_info):
@@ -308,7 +287,7 @@ def get_servers_bricks_dict(servers, servers_info):
get_servers_bricks_dict(g.config['servers'], g.config['servers_info'])
"""
servers_bricks_dict = OrderedDict()
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
for server in servers:
server_info = servers_info[server]
@@ -342,7 +321,7 @@ def get_servers_used_bricks_dict(mnode, servers):
get_servers_used_bricks_dict(g.config['servers'][0]['host'],
g.config['servers'])
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
servers_used_bricks_dict = OrderedDict()
@@ -389,7 +368,7 @@ def get_servers_unused_bricks_dict(mnode, servers, servers_info):
g.config['servers'],
g.config['servers_info'])
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
dict1 = get_servers_bricks_dict(servers, servers_info)
dict2 = get_servers_used_bricks_dict(mnode, servers)
@@ -408,7 +387,8 @@ def get_servers_unused_bricks_dict(mnode, servers, servers_info):
return servers_unused_bricks_dict
-def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info):
+def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info,
+ dirname=None):
"""Forms bricks list for create-volume/add-brick given the num_of_bricks
servers and servers_info.
@@ -421,6 +401,9 @@ def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info):
needs to be selected for creating the brick list.
servers_info (dict): dict of server info of each servers.
+ kwargs:
+ dirname (str): Name of the directory for glusterfs brick
+
Returns:
list - List of bricks to use with volume-create/add-brick
None - if number_of_bricks is greater than unused bricks.
@@ -429,7 +412,7 @@ def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info):
form_bricks_path(g.config['servers'](0), "testvol", 6,
g.config['servers'], g.config['servers_info'])
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
dict_index = 0
bricks_list = []
@@ -458,10 +441,18 @@ def form_bricks_list(mnode, volname, number_of_bricks, servers, servers_info):
list(servers_unused_bricks_dict.values())[dict_index])
brick_path = ''
if current_server_unused_bricks_list:
- brick_path = ("%s:%s/%s_brick%s" %
- (current_server,
- current_server_unused_bricks_list[0], volname, num))
- bricks_list.append(brick_path)
+ if dirname and (" " not in dirname):
+ brick_path = ("%s:%s/%s_brick%s" %
+ (current_server,
+ current_server_unused_bricks_list[0], dirname,
+ num))
+ bricks_list.append(brick_path)
+ else:
+ brick_path = ("%s:%s/%s_brick%s" %
+ (current_server,
+ current_server_unused_bricks_list[0], volname,
+ num))
+ bricks_list.append(brick_path)
# Remove the added brick from the current_server_unused_bricks_list
list(servers_unused_bricks_dict.values())[dict_index].pop(0)
@@ -483,7 +474,7 @@ def is_rhel6(servers):
Returns:
bool:Returns True, if its RHEL-6 else returns false
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
results = g.run_parallel(servers, "cat /etc/redhat-release")
@@ -509,7 +500,7 @@ def is_rhel7(servers):
Returns:
bool:Returns True, if its RHEL-7 else returns false
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
results = g.run_parallel(servers, "cat /etc/redhat-release")
@@ -544,22 +535,13 @@ def get_disk_usage(mnode, path, user="root"):
Example:
get_disk_usage("abc.com", "/mnt/glusterfs")
"""
-
- inst = random.randint(10, 100)
- conn = g.rpyc_get_connection(mnode, user=user, instance=inst)
- if conn is None:
- g.log.error("Failed to get rpyc connection")
- return None
- cmd = 'stat -f ' + path
- p = conn.modules.subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out, err = p.communicate()
- ret = p.returncode
- if ret != 0:
- g.log.error("Failed to execute stat command")
+ cmd = 'stat -f {0}'.format(path)
+ ret, out, err = g.run(mnode, cmd, user)
+ if ret:
+ g.log.error("Unable to get stat of path {0} on node {1} "
+ "using user {2} due to error {3}".format(path, mnode,
+ user, err))
return None
-
- g.rpyc_close_connection(host=mnode, user=user, instance=inst)
res = ''.join(out)
match = re.match(r'.*Block size:\s(\d+).*Blocks:\sTotal:\s(\d+)\s+?'
r'Free:\s(\d+)\s+?Available:\s(\d+).*Inodes:\s'
@@ -680,7 +662,7 @@ def install_epel(servers):
Example:
install_epel(["abc.com", "def.com"])
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
rt = True
@@ -734,7 +716,7 @@ def inject_msg_in_logs(nodes, log_msg, list_of_dirs=None, list_of_files=None):
Returns:
bool: True if successfully injected msg on all log files.
"""
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
if list_of_dirs is None:
@@ -779,10 +761,11 @@ def inject_msg_in_logs(nodes, log_msg, list_of_dirs=None, list_of_files=None):
def is_core_file_created(nodes, testrun_timestamp,
- paths=['/', '/var/log/core', '/tmp']):
+ paths=['/', '/var/log/core',
+ '/tmp', '/var/crash', '~/']):
'''
- Listing directories and files in "/", /var/log/core, /tmp
- directory for checking if the core file created or not
+ Listing directories and files in "/", /var/log/core, /tmp,
+ "/var/crash", "~/" directory for checking if the core file created or not
Args:
@@ -795,7 +778,7 @@ def is_core_file_created(nodes, testrun_timestamp,
of test case 'date +%s'
paths(list):
By default core file will be verified in "/","/tmp",
- "/var/log/core"
+ "/var/log/core", "/var/crash", "~/"
If test case need to verify core file in specific path,
need to pass path from test method
'''
@@ -805,8 +788,16 @@ def is_core_file_created(nodes, testrun_timestamp,
cmd = ' '.join(['cd', path, '&&', 'ls', 'core*'])
cmd_list.append(cmd)
- # Checks for core file in "/", "/var/log/core", "/tmp" directory
+ # Checks for core file in "/", "/var/log/core", "/tmp" "/var/crash",
+ # "~/" directory
for node in nodes:
+ ret, logfiles, err = g.run(node, 'grep -r "time of crash" '
+ '/var/log/glusterfs/')
+ if ret == 0:
+ g.log.error(" Seems like there was a crash, kindly check "
+ "the logfiles, even if you don't see a core file")
+ for logfile in logfiles.strip('\n').split('\n'):
+ g.log.error("Core was found in %s " % logfile.split(':')[0])
for cmd in cmd_list:
ret, out, _ = g.run(node, cmd)
g.log.info("storing all files and directory names into list")
@@ -824,7 +815,8 @@ def is_core_file_created(nodes, testrun_timestamp,
file_timestamp = file_timestamp.strip()
if(file_timestamp > testrun_timestamp):
count += 1
- g.log.error("New core file created %s " % file1)
+ g.log.error("New core file was created and found "
+ "at %s " % file1)
else:
g.log.info("Old core file Found")
# return the status of core file
@@ -848,10 +840,10 @@ def remove_service_from_firewall(nodes, firewall_service, permanent=False):
bool: True|False(Firewall removed or Failed)
"""
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
- if isinstance(firewall_service, str):
+ if not isinstance(firewall_service, list):
firewall_service = [firewall_service]
_rc = True
@@ -892,10 +884,10 @@ def add_services_to_firewall(nodes, firewall_service, permanent=False):
bool: True|False(Firewall Enabled or Failed)
"""
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
- if isinstance(firewall_service, str):
+ if not isinstance(firewall_service, list):
firewall_service = [firewall_service]
_rc = True
@@ -944,30 +936,47 @@ def get_size_of_mountpoint(node, mount_point):
return out
-def add_user(host, uname):
+def add_user(servers, username, group=None):
"""
- Add user with default home directory
+ Add user with default home directory
+
Args:
- host (str): hostname/ip of the system
- uname (str): username
- Returns always True
- """
+ servers(list|str): hostname/ip of the system
+ username(str): username of the user to be created.
+ Kwargs:
+ group(str): Group name to which user is to be
+ added.(Default:None)
- command = "useradd -m %s -d /home/%s" % (uname, uname)
- ret, _, err = g.run(host, command)
- if 'already exists' in err:
- g.log.warn("User %s is already exists", uname)
+ Returns:
+ bool : True if user add is successful on all servers.
+ False otherwise.
+ """
+ # Checking if group is given or not.
+ if not group:
+ cmd = "useradd -m %s -d /home/%s" % (username, username)
else:
- g.log.info("User %s is created successfully", uname)
+ cmd = "useradd -G %s %s" % (group, username)
+
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ results = g.run_parallel(servers, cmd)
+ for server, ret_value in list(results.items()):
+ retcode, _, err = ret_value
+ if retcode != 0 and "already exists" not in err:
+ g.log.error("Unable to add user on %s", server)
+ return False
return True
def del_user(host, uname):
"""
- Delete user with home directory
+ Delete user with home directory
+
Args:
host (str): hostname/ip of the system
uname (str): username
+
Return always True
"""
command = "userdel -r %s" % (uname)
@@ -977,3 +986,274 @@ def del_user(host, uname):
else:
g.log.info("User %s successfully deleted", uname)
return True
+
+
+def group_add(servers, groupname):
+ """
+ Creates a group in all the servers.
+
+ Args:
+ servers(list|str): Nodes on which cmd is to be executed.
+ groupname(str): Name of the group to be created.
+
+ Returns:
+ bool: True if add group is successful on all servers.
+ False otherwise.
+
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ cmd = "groupadd %s" % groupname
+ results = g.run_parallel(servers, cmd)
+
+ for server, ret_value in list(results.items()):
+ retcode, _, err = ret_value
+ if retcode != 0 and "already exists" not in err:
+ g.log.error("Unable to add group %s on server %s",
+ groupname, server)
+ return False
+ return True
+
+
+def group_del(servers, groupname):
+ """
+ Deletes a group in all the servers.
+
+ Args:
+ servers(list|str): Nodes on which cmd is to be executed.
+ groupname(str): Name of the group to be removed.
+
+ Return always True
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ cmd = "groupdel %s" % groupname
+ results = g.run_parallel(servers, cmd)
+
+ for server, ret_value in list(results.items()):
+ retcode, _, err = ret_value
+ if retcode != 0 and "does not exist" in err:
+ g.log.error("Group %s on server %s already removed",
+ groupname, server)
+ return True
+
+
+def ssh_keygen(mnode):
+ """
+ Creates a pair of ssh private and public key if not present
+
+ Args:
+ mnode (str): Node on which cmd is to be executed
+ Returns:
+ bool : True if ssh-keygen is successful on all servers.
+ False otherwise. It also returns True if ssh key
+ is already present
+
+ """
+ cmd = 'echo -e "n" | ssh-keygen -f ~/.ssh/id_rsa -q -N ""'
+ ret, out, _ = g.run(mnode, cmd)
+ if ret and "already exists" not in out:
+ return False
+ return True
+
+
+def ssh_copy_id(mnode, tonode, passwd, username="root"):
+ """
+ Copies the default ssh public key onto tonode's
+ authorized_keys file.
+
+ Args:
+ mnode (str): Node on which cmd is to be executed
+ tonode (str): Node to which ssh key is to be copied
+ passwd (str): passwd of the user of tonode
+ Kwargs:
+ username (str): username of tonode(Default:root)
+
+ Returns:
+ bool: True if ssh-copy-id is successful to tonode.
+ False otherwise. It also returns True if ssh key
+ is already present
+
+ """
+ cmd = ('sshpass -p "%s" ssh-copy-id -o StrictHostKeyChecking=no %s@%s' %
+ (passwd, username, tonode))
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ return False
+ return True
+
+
+def set_passwd(servers, username, passwd):
+ """
+ Sets password for a given username.
+
+ Args:
+ servers(list|str): list of nodes on which cmd is to be executed.
+ username(str): username of user for which password is to be set.
+ passwd(str): Password to be set.
+
+ Returns:
+ bool : True if password set is successful on all servers.
+ False otherwise.
+
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+ cmd = "echo %s:%s | chpasswd" % (username, passwd)
+ results = g.run_parallel(servers, cmd)
+
+ for server, ret_value in list(results.items()):
+ retcode, _, _ = ret_value
+ if retcode != 0:
+ g.log.error("Unable to set passwd for user %s on %s",
+ username, server)
+ return False
+ return True
+
+
+def is_user_exists(servers, username):
+ """
+ Checks if user is present on the given servers or not.
+
+ Args:
+ servers(str|list): list of nodes on which you need to
+ check if the user is present or not.
+ username(str): username of user whose presence has to be checked.
+
+ Returns:
+ bool: True if user is present on all nodes else False.
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ cmd = "id %s" % username
+ results = g.run_parallel(servers, cmd)
+
+ for server, (ret_value, _, _) in results.items():
+ if not ret_value:
+ g.log.error("User %s doesn't exists on server %s.",
+ (username, server))
+ return False
+ return True
+
+
+def is_group_exists(servers, group):
+ """
+ Checks if group is present on the given servers.
+
+ Args:
+ servers(str|list): list of nodes on which you need to
+ check if group is present or not.
+ group(str): groupname of group whose presence has
+ to be checked.
+
+ Returns:
+ bool: True if group is present on all nodes else False.
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ cmd = "grep -q %s /etc/group" % group
+ results = g.run_parallel(servers, cmd)
+
+ for server, (ret_value, _, _) in results.items():
+ if not ret_value:
+ g.log.error("Group %s doesn't exists on server %s.",
+ (group, server))
+ return False
+ return True
+
+
+def is_passwordless_ssh_configured(fromnode, tonode, username):
+ """
+ Checks if passwordless ssh is configured between nodes or not.
+
+ Args:
+ fromnode: Server from which passwordless ssh has to be
+ configured.
+ tonode: Server to which passwordless ssh has to be
+ configured.
+ username: username of user to be used for checking
+ passwordless ssh.
+ Returns:
+ bool: True if configured else false.
+ """
+ cmd = ("ssh %s@%s hostname" % (username, tonode))
+ ret, out, _ = g.run(fromnode, cmd)
+ _, hostname, _ = g.run(tonode, "hostname")
+ if ret or hostname not in out:
+ g.log.error("Passwordless ssh not configured "
+ "from server %s to server %s using user %s.",
+ (fromnode, tonode, username))
+ return False
+ return True
+
+
+def collect_bricks_arequal(bricks_list):
+ """Collects arequal for all bricks in list
+
+ Args:
+ bricks_list (list): List of bricks.
+ Example:
+ bricks_list = 'gluster.blr.cluster.com:/bricks/brick1/vol'
+
+ Returns:
+ tuple(bool, list):
+ On success returns (True, list of arequal-checksums of each brick)
+ On failure returns (False, list of arequal-checksums of each brick)
+ arequal-checksum for a brick would be 'None' when failed to
+ collect arequal for that brick.
+
+ Example:
+ >>> all_bricks = get_all_bricks(self.mnode, self.volname)
+ >>> ret, arequal = collect_bricks_arequal(all_bricks)
+ >>> ret
+ True
+ """
+ # Converting a bricks_list to list if not.
+ if not isinstance(bricks_list, list):
+ bricks_list = [bricks_list]
+
+ return_code, arequal_list = True, []
+ for brick in bricks_list:
+
+ # Running arequal-checksum on the brick.
+ node, brick_path = brick.split(':')
+ cmd = ('arequal-checksum -p {} -i .glusterfs -i .landfill -i .trashcan'
+ .format(brick_path))
+ ret, arequal, _ = g.run(node, cmd)
+
+ # Generating list accordingly
+ if ret:
+ g.log.error('Failed to get arequal on brick %s', brick)
+ return_code = False
+ arequal_list.append(None)
+ else:
+ g.log.info('Successfully calculated arequal for brick %s', brick)
+ arequal_list.append(arequal)
+
+ return (return_code, arequal_list)
+
+
+def get_usable_size_per_disk(brickpath, min_free_limit=10):
+ """Get the usable size per disk
+
+ Args:
+ brickpath(str): Brick path to be used to calculate usable size
+
+ Kwargs:
+ min_free_limit(int): Min free disk limit to be used
+
+ Returns:
+ (int): Usable size in GB. None in case of errors.
+ """
+ node, brick_path = brickpath.split(':')
+ size = get_size_of_mountpoint(node, brick_path)
+ if not size:
+ return None
+ size = int(size)
+ min_free_size = size * min_free_limit // 100
+ usable_size = ((size - min_free_size) // 1048576) + 1
+ return usable_size
diff --git a/glustolibs-gluster/glustolibs/gluster/mount_ops.py b/glustolibs-gluster/glustolibs/gluster/mount_ops.py
index cc63d0588..c8fbddd05 100755
--- a/glustolibs-gluster/glustolibs/gluster/mount_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/mount_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -261,7 +261,7 @@ def is_mounted(volname, mpoint, mserver, mclient, mtype, user='root'):
return True
return False
else:
- ret, _, _ = g.run(mclient, "mount | grep %s | grep %s | grep \"%s\""
+ ret, _, _ = g.run(mclient, "mount | egrep '%s | %s' | grep \"%s\""
% (volname, mpoint, mserver), user)
if ret == 0:
g.log.debug("Volume %s is mounted at %s:%s" % (volname, mclient,
@@ -336,10 +336,10 @@ def mount_volume(volname, mtype, mpoint, mserver, mclient, options='',
if mtype == 'nfs':
if not options:
- options = "-o vers=3"
+ options = "-o vers=4.1"
elif options and 'vers' not in options:
- options = options + ",vers=3"
+ options = options + ",vers=4.1"
if mserver:
mcmd = ("mount -t %s %s %s:/%s %s" %
@@ -356,23 +356,10 @@ def mount_volume(volname, mtype, mpoint, mserver, mclient, options='',
# Check if client is running rhel. If so add specific options
cifs_options = ""
- try:
- conn = g.rpyc_get_connection(mclient, user=user)
- if conn is None:
- g.log.error("Unable to get connection to %s on node %s"
- " in mount_volume()", user, mclient)
- return (1, '', '')
-
- os, version, name = conn.modules.platform.linux_distribution()
- if "Santiago" in name:
- cifs_options = "sec=ntlmssp"
- except Exception as e:
- g.log.error("Exception occurred while getting the platform "
- "of node %s: %s", mclient, str(e))
- return (1, '', '')
- finally:
- g.rpyc_close_connection(host=mclient, user=user)
-
+ cmd = "cat /etc/redhat-release | grep Santiago"
+ ret, _, _ = g.run(mclient, cmd, user=user)
+ if not ret:
+ cifs_options = "sec=ntlmssp"
mcmd = ("mount -t cifs -o username=%s,password=%s,%s "
"\\\\\\\\%s\\\\gluster-%s %s" % (smbuser, smbpasswd,
cifs_options, mserver,
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
index 20dbe430d..5f69e68f6 100644..100755
--- a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_libs.py
@@ -30,427 +30,125 @@ from glustolibs.gluster.nfs_ganesha_ops import (
is_nfs_ganesha_cluster_in_healthy_state,
teardown_nfs_ganesha_cluster,
create_nfs_ganesha_cluster,
- export_nfs_ganesha_volume,
- unexport_nfs_ganesha_volume,
configure_ports_on_clients,
ganesha_client_firewall_settings)
-from glustolibs.gluster.gluster_base_class import GlusterBaseClass
-from glustolibs.gluster.exceptions import ExecutionError, ConfigError
-from glustolibs.gluster.peer_ops import peer_probe_servers, peer_status
-from glustolibs.gluster.volume_ops import volume_info, get_volume_info
-from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume,
- log_volume_info_and_status,
- get_volume_options,
- is_volume_exported)
-from glustolibs.gluster.mount_ops import create_mount_objs
-from glustolibs.io.utils import log_mounts_info, wait_for_io_to_complete
-from glustolibs.misc.misc_libs import upload_scripts
-
-
-class NfsGaneshaClusterSetupClass(GlusterBaseClass):
- """Creates nfs ganesha cluster
+from glustolibs.gluster.volume_libs import is_volume_exported
+from glustolibs.gluster.lib_utils import is_rhel7
+
+
+def setup_nfs_ganesha(cls):
"""
- @classmethod
- def setUpClass(cls):
- """
- Setup variable for nfs-ganesha tests.
- """
- # pylint: disable=too-many-statements, too-many-branches
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # Check if enable_nfs_ganesha is set in config file
- if not cls.enable_nfs_ganesha:
- raise ConfigError("Please enable nfs ganesha in config")
-
- # Read num_of_nfs_ganesha_nodes from config file and create
- # nfs ganesha cluster accordingly
- cls.num_of_nfs_ganesha_nodes = int(cls.num_of_nfs_ganesha_nodes)
- cls.servers_in_nfs_ganesha_cluster = (
- cls.servers[:cls.num_of_nfs_ganesha_nodes])
- cls.vips_in_nfs_ganesha_cluster = (
- cls.vips[:cls.num_of_nfs_ganesha_nodes])
-
- # Obtain hostname of servers in ganesha cluster
- cls.ganesha_servers_hostname = []
- for ganesha_server in cls.servers_in_nfs_ganesha_cluster:
- ret, hostname, _ = g.run(ganesha_server, "hostname")
- if ret:
- raise ExecutionError("Failed to obtain hostname of %s"
- % ganesha_server)
- hostname = hostname.strip()
- g.log.info("Obtained hostname: IP- %s, hostname- %s",
- ganesha_server, hostname)
- cls.ganesha_servers_hostname.append(hostname)
-
- @classmethod
- def setup_nfs_ganesha(cls):
- """
- Create nfs-ganesha cluster if not exists
- Set client configurations for nfs-ganesha
-
- Returns:
- True(bool): If setup is successful
- False(bool): If setup is failure
- """
- # pylint: disable = too-many-statements, too-many-branches
- # pylint: disable = too-many-return-statements
- cluster_exists = is_nfs_ganesha_cluster_exists(
- cls.servers_in_nfs_ganesha_cluster[0])
- if cluster_exists:
- is_healthy = is_nfs_ganesha_cluster_in_healthy_state(
- cls.servers_in_nfs_ganesha_cluster[0])
-
- if is_healthy:
- g.log.info("Nfs-ganesha Cluster exists and is in healthy "
- "state. Skipping cluster creation...")
- else:
- g.log.info("Nfs-ganesha Cluster exists and is not in "
- "healthy state.")
- g.log.info("Tearing down existing cluster which is not in "
- "healthy state")
- ganesha_ha_file = ("/var/run/gluster/shared_storage/"
- "nfs-ganesha/ganesha-ha.conf")
-
- g.log.info("Collecting server details of existing "
- "nfs ganesha cluster")
- conn = g.rpyc_get_connection(
- cls.servers_in_nfs_ganesha_cluster[0], user="root")
- if not conn:
- tmp_node = cls.servers_in_nfs_ganesha_cluster[0]
- g.log.error("Unable to get connection to 'root' of node"
- " %s", tmp_node)
- return False
-
- if not conn.modules.os.path.exists(ganesha_ha_file):
- g.log.error("Unable to locate %s", ganesha_ha_file)
- return False
- with conn.builtin.open(ganesha_ha_file, "r") as fhand:
- ganesha_ha_contents = fhand.read()
- g.rpyc_close_connection(
- host=cls.servers_in_nfs_ganesha_cluster[0], user="root")
- servers_in_existing_cluster = re.findall(r'VIP_(.*)\=.*',
- ganesha_ha_contents)
-
- ret = teardown_nfs_ganesha_cluster(
- servers_in_existing_cluster, force=True)
- if not ret:
- g.log.error("Failed to teardown unhealthy ganesha "
- "cluster")
- return False
-
- g.log.info("Existing unhealthy cluster got teardown "
- "successfully")
-
- if (not cluster_exists) or (not is_healthy):
- g.log.info("Creating nfs-ganesha cluster of %s nodes"
- % str(cls.num_of_nfs_ganesha_nodes))
- g.log.info("Nfs-ganesha cluster node info: %s"
- % cls.servers_in_nfs_ganesha_cluster)
- g.log.info("Nfs-ganesha cluster vip info: %s"
- % cls.vips_in_nfs_ganesha_cluster)
-
- ret = create_nfs_ganesha_cluster(
- cls.ganesha_servers_hostname,
- cls.vips_in_nfs_ganesha_cluster)
- if not ret:
- g.log.error("Creation of nfs-ganesha cluster failed")
- return False
+ Create nfs-ganesha cluster if not exists
+ Set client configurations for nfs-ganesha
- if not is_nfs_ganesha_cluster_in_healthy_state(
- cls.servers_in_nfs_ganesha_cluster[0]):
- g.log.error("Nfs-ganesha cluster is not healthy")
- return False
- g.log.info("Nfs-ganesha Cluster exists is in healthy state")
+ Returns:
+ True(bool): If setup is successful
+ False(bool): If setup is failure
+ """
+ # pylint: disable = too-many-statements, too-many-branches
+ # pylint: disable = too-many-return-statements
+ cluster_exists = is_nfs_ganesha_cluster_exists(
+ cls.servers_in_nfs_ganesha_cluster[0])
+ if cluster_exists:
+ is_healthy = is_nfs_ganesha_cluster_in_healthy_state(
+ cls.servers_in_nfs_ganesha_cluster[0])
- ret = configure_ports_on_clients(cls.clients)
- if not ret:
- g.log.error("Failed to configure ports on clients")
- return False
+ if is_healthy:
+ g.log.info("Nfs-ganesha Cluster exists and is in healthy "
+ "state. Skipping cluster creation...")
+ else:
+ g.log.info("Nfs-ganesha Cluster exists and is not in "
+ "healthy state.")
+ g.log.info("Tearing down existing cluster which is not in "
+ "healthy state")
+ ganesha_ha_file = ("/var/run/gluster/shared_storage/"
+ "nfs-ganesha/ganesha-ha.conf")
+ g_node = cls.servers_in_nfs_ganesha_cluster[0]
+
+ g.log.info("Collecting server details of existing "
+ "nfs ganesha cluster")
+
+ # Check whether ganesha ha file exists
+ cmd = "[ -f {} ]".format(ganesha_ha_file)
+ ret, _, _ = g.run(g_node, cmd)
+ if ret:
+ g.log.error("Unable to locate %s", ganesha_ha_file)
+ return False
- ret = ganesha_client_firewall_settings(cls.clients)
- if not ret:
- g.log.error("Failed to do firewall setting in clients")
- return False
+ # Read contents of ganesha_ha_file
+ cmd = "cat {}".format(ganesha_ha_file)
+ ret, ganesha_ha_contents, _ = g.run(g_node, cmd)
+ if ret:
+ g.log.error("Failed to read %s", ganesha_ha_file)
+ return False
- for server in cls.servers:
- for client in cls.clients:
- cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then "
- "echo \"%s %s\" >> /etc/hosts; fi"
- % (client, socket.gethostbyname(client), client))
- ret, _, _ = g.run(server, cmd)
- if ret != 0:
- g.log.error("Failed to add entry of client %s in "
- "/etc/hosts of server %s"
- % (client, server))
+ servers_in_existing_cluster = re.findall(r'VIP_(.*)\=.*',
+ ganesha_ha_contents)
- for client in cls.clients:
- for server in cls.servers:
- cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then "
- "echo \"%s %s\" >> /etc/hosts; fi"
- % (server, socket.gethostbyname(server), server))
- ret, _, _ = g.run(client, cmd)
- if ret != 0:
- g.log.error("Failed to add entry of server %s in "
- "/etc/hosts of client %s"
- % (server, client))
- return True
-
- @classmethod
- def tearDownClass(cls, delete_nfs_ganesha_cluster=True):
- """Teardown nfs ganesha cluster.
- """
- GlusterBaseClass.tearDownClass.im_func(cls)
-
- if delete_nfs_ganesha_cluster:
ret = teardown_nfs_ganesha_cluster(
- cls.servers_in_nfs_ganesha_cluster)
+ servers_in_existing_cluster, force=True)
if not ret:
- g.log.error("Teardown got failed. Hence, cleaning up "
- "nfs-ganesha cluster forcefully")
- ret = teardown_nfs_ganesha_cluster(
- cls.servers_in_nfs_ganesha_cluster, force=True)
- if not ret:
- raise ExecutionError("Force cleanup of nfs-ganesha "
- "cluster failed")
- g.log.info("Teardown nfs ganesha cluster succeeded")
- else:
- g.log.info("Skipping teardown nfs-ganesha cluster...")
-
-
-class NfsGaneshaVolumeBaseClass(NfsGaneshaClusterSetupClass):
- """Sets up the nfs ganesha cluster, volume for testing purposes.
- """
- @classmethod
- def setUpClass(cls):
- """Setup volume exports volume with nfs-ganesha,
- mounts the volume.
- """
- # pylint: disable=too-many-branches
- NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)
-
- # Peer probe servers
- ret = peer_probe_servers(cls.mnode, cls.servers)
- if not ret:
- raise ExecutionError("Failed to peer probe servers")
+ g.log.error("Failed to teardown unhealthy ganesha "
+ "cluster")
+ return False
- g.log.info("All peers are in connected state")
+ g.log.info("Existing unhealthy cluster got teardown "
+ "successfully")
- # Peer Status from mnode
- peer_status(cls.mnode)
+ if (not cluster_exists) or (not is_healthy):
+ g.log.info("Creating nfs-ganesha cluster of %s nodes"
+ % str(cls.num_of_nfs_ganesha_nodes))
+ g.log.info("Nfs-ganesha cluster node info: %s"
+ % cls.servers_in_nfs_ganesha_cluster)
+ g.log.info("Nfs-ganesha cluster vip info: %s"
+ % cls.vips_in_nfs_ganesha_cluster)
- for server in cls.servers:
- mount_info = [
- {'protocol': 'glusterfs',
- 'mountpoint': '/run/gluster/shared_storage',
- 'server': server,
- 'client': {'host': server},
- 'volname': 'gluster_shared_storage',
- 'options': ''}]
-
- mount_obj = create_mount_objs(mount_info)
- if not mount_obj[0].is_mounted():
- ret = mount_obj[0].mount()
- if not ret:
- raise ExecutionError("Unable to mount volume '%s:%s' "
- "on '%s:%s'"
- % (mount_obj.server_system,
- mount_obj.volname,
- mount_obj.client_system,
- mount_obj.mountpoint))
-
- # Setup Volume
- ret = setup_volume(mnode=cls.mnode,
- all_servers_info=cls.all_servers_info,
- volume_config=cls.volume)
+ ret = create_nfs_ganesha_cluster(
+ cls.ganesha_servers_hostname,
+ cls.vips_in_nfs_ganesha_cluster)
if not ret:
- raise ExecutionError("Setup volume %s failed", cls.volume)
- time.sleep(10)
-
- # Export volume with nfs ganesha, if it is not exported already
- vol_option = get_volume_options(cls.mnode, cls.volname,
- option='ganesha.enable')
- if vol_option is None:
- raise ExecutionError("Failed to get ganesha.enable volume option "
- "for %s " % cls.volume)
- if vol_option['ganesha.enable'] != 'on':
- ret, _, _ = export_nfs_ganesha_volume(
- mnode=cls.mnode, volname=cls.volname)
- if ret != 0:
- raise ExecutionError("Failed to export volume %s "
- "as NFS export", cls.volname)
- time.sleep(5)
+ g.log.error("Creation of nfs-ganesha cluster failed")
+ return False
- ret = wait_for_nfs_ganesha_volume_to_get_exported(cls.mnode,
- cls.volname)
- if not ret:
- raise ExecutionError("Failed to export volume %s. volume is "
- "not listed in showmount" % cls.volname)
- else:
- g.log.info("Volume %s is exported successfully"
- % cls.volname)
+ if not is_nfs_ganesha_cluster_in_healthy_state(
+ cls.servers_in_nfs_ganesha_cluster[0]):
+ g.log.error("Nfs-ganesha cluster is not healthy")
+ return False
+ g.log.info("Nfs-ganesha Cluster exists is in healthy state")
- # Log Volume Info and Status
- ret = log_volume_info_and_status(cls.mnode, cls.volname)
+ if is_rhel7(cls.clients):
+ ret = configure_ports_on_clients(cls.clients)
if not ret:
- raise ExecutionError("Logging volume %s info and status failed",
- cls.volname)
-
- # Create Mounts
- _rc = True
- for mount_obj in cls.mounts:
- ret = mount_obj.mount()
- if not ret:
- g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
- mount_obj.server_system, mount_obj.volname,
- mount_obj.client_system, mount_obj.mountpoint)
- _rc = False
- if not _rc:
- raise ExecutionError("Mounting volume %s on few clients failed",
- cls.volname)
-
- # Get info of mount before the IO
- log_mounts_info(cls.mounts)
-
- @classmethod
- def tearDownClass(cls, umount_vol=True, cleanup_vol=True,
- teardown_nfs_ganesha_cluster=True):
- """Teardown the export, mounts and volume.
- """
- # pylint: disable=too-many-branches
- # Unmount volume
- if umount_vol:
- _rc = True
- for mount_obj in cls.mounts:
- ret = mount_obj.unmount()
- if not ret:
- g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'",
- mount_obj.server_system, mount_obj.volname,
- mount_obj.client_system, mount_obj.mountpoint)
- _rc = False
- if not _rc:
- raise ExecutionError("Unmount of all mounts are not "
- "successful")
-
- # Cleanup volume
- if cleanup_vol:
-
- volinfo = get_volume_info(cls.mnode, cls.volname)
- if volinfo is None or cls.volname not in volinfo:
- g.log.info("Volume %s does not exist in %s"
- % (cls.volname, cls.mnode))
- else:
- # Unexport volume, if it is not unexported already
- vol_option = get_volume_options(cls.mnode, cls.volname,
- option='ganesha.enable')
- if vol_option is None:
- raise ExecutionError("Failed to get ganesha.enable volume "
- " option for %s " % cls.volume)
- if vol_option['ganesha.enable'] != 'off':
- if is_volume_exported(cls.mnode, cls.volname, "nfs"):
- ret, _, _ = unexport_nfs_ganesha_volume(
- mnode=cls.mnode, volname=cls.volname)
- if ret != 0:
- raise ExecutionError("Failed to unexport volume %s"
- % cls.volname)
- time.sleep(5)
- else:
- g.log.info("Volume %s is unexported already"
- % cls.volname)
-
- _, _, _ = g.run(cls.mnode, "showmount -e")
-
- ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
- if not ret:
- raise ExecutionError("cleanup volume %s failed", cls.volname)
-
- # All Volume Info
- volume_info(cls.mnode)
-
- (NfsGaneshaClusterSetupClass.
- tearDownClass.
- im_func(cls,
- delete_nfs_ganesha_cluster=teardown_nfs_ganesha_cluster))
-
-
-class NfsGaneshaIOBaseClass(NfsGaneshaVolumeBaseClass):
- """ Nfs Ganesha IO base class to run the tests when IO is in progress """
+ g.log.error("Failed to configure ports on clients")
+ return False
- @classmethod
- def setUpClass(cls):
+ ret = ganesha_client_firewall_settings(cls.clients)
+ if not ret:
+ g.log.error("Failed to do firewall setting in clients")
+ return False
- NfsGaneshaVolumeBaseClass.setUpClass.im_func(cls)
+ for server in cls.servers:
+ for client in cls.clients:
+ cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then "
+ "echo \"%s %s\" >> /etc/hosts; fi"
+ % (client, socket.gethostbyname(client), client))
+ ret, _, _ = g.run(server, cmd)
+ if ret != 0:
+ g.log.error("Failed to add entry of client %s in "
+ "/etc/hosts of server %s"
+ % (client, server))
- # Upload io scripts for running IO on mounts
- script_local_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
- cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
- "file_dir_ops.py")
- ret = upload_scripts(cls.clients, script_local_path)
- if not ret:
- raise ExecutionError("Failed to upload IO scripts")
-
- cls.counter = 1
-
- def setUp(self):
- """setUp starts the io from all the mounts.
- IO creates deep dirs and files.
- """
-
- NfsGaneshaVolumeBaseClass.setUp.im_func(self)
-
- # Start IO on mounts
- g.log.info("Starting IO on all mounts...")
- self.all_mounts_procs = []
- for mount_obj in self.mounts:
- cmd = ("python %s create_deep_dirs_with_files "
- "--dirname-start-num %d "
- "--dir-depth 2 "
- "--dir-length 15 "
- "--max-num-of-dirs 5 "
- "--num-of-files 10 %s" % (self.script_upload_path,
- self.counter,
- mount_obj.mountpoint))
- proc = g.run_async(mount_obj.client_system, cmd,
- user=mount_obj.user)
- self.all_mounts_procs.append(proc)
- self.counter = self.counter + 10
- self.io_validation_complete = False
-
- # Adding a delay of 15 seconds before test method starts. This
- # is to ensure IO's are in progress and giving some time to fill data
- time.sleep(15)
-
- def tearDown(self):
- """If test method failed before validating IO, tearDown waits for the
- IO's to complete and checks for the IO exit status
- """
-
- # Wait for IO to complete if io validation is not executed in the
- # test method
- if not self.io_validation_complete:
- g.log.info("Wait for IO to complete as IO validation did not "
- "succeed in test method")
- ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
- if not ret:
- raise ExecutionError("IO failed on some of the clients")
- g.log.info("IO is successful on all mounts")
- NfsGaneshaVolumeBaseClass.tearDown.im_func(self)
-
- @classmethod
- def tearDownClass(cls, umount_volume=True, cleanup_volume=True,
- teardown_nfsganesha_cluster=True):
- """Cleanup data from mount, cleanup volume and delete nfs ganesha
- cluster.
- """
- # Log Mounts info
- g.log.info("Log mounts info")
- log_mounts_info(cls.mounts)
-
- (NfsGaneshaVolumeBaseClass.
- tearDownClass.
- im_func(cls,
- umount_vol=umount_volume, cleanup_vol=cleanup_volume,
- teardown_nfs_ganesha_cluster=teardown_nfsganesha_cluster))
+ for client in cls.clients:
+ for server in cls.servers:
+ cmd = ("if [ -z \"$(grep -R \"%s\" /etc/hosts)\" ]; then "
+ "echo \"%s %s\" >> /etc/hosts; fi"
+ % (server, socket.gethostbyname(server), server))
+ ret, _, _ = g.run(client, cmd)
+ if ret != 0:
+ g.log.error("Failed to add entry of server %s in "
+ "/etc/hosts of client %s"
+ % (server, client))
+ return True
def wait_for_nfs_ganesha_volume_to_get_exported(mnode, volname, timeout=120):
diff --git a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
index 476db2d09..d8486c7d2 100644..100755
--- a/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/nfs_ganesha_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2016-2017 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2016-2021 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -23,10 +23,10 @@
"""
import os
-import random
from glusto.core import Glusto as g
from glustolibs.gluster.glusterdir import mkdir
-from glustolibs.gluster.lib_utils import add_services_to_firewall
+from glustolibs.gluster.lib_utils import (add_services_to_firewall,
+ is_rhel7)
from glustolibs.gluster.shared_storage_ops import enable_shared_storage
from glustolibs.gluster.peer_ops import peer_probe_servers
@@ -50,17 +50,33 @@ def teardown_nfs_ganesha_cluster(servers, force=False):
Example:
teardown_nfs_ganesha_cluster(servers)
"""
+ # Copy ganesha.conf before proceeding to clean up
+ for server in servers:
+ cmd = "cp /etc/ganesha/ganesha.conf ganesha.conf"
+ ret, _, _ = g.run(server, cmd)
+ if ret:
+ g.log.error("Failed to copy ganesha.conf")
+
if force:
g.log.info("Executing force cleanup...")
+ cleanup_ops = ['--teardown', '--cleanup']
for server in servers:
- cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --teardown "
- "/var/run/gluster/shared_storage/nfs-ganesha")
- _, _, _ = g.run(server, cmd)
- cmd = ("/usr/libexec/ganesha/ganesha-ha.sh --cleanup /var/run/"
- "gluster/shared_storage/nfs-ganesha")
- _, _, _ = g.run(server, cmd)
+ # Perform teardown and cleanup
+ for op in cleanup_ops:
+ cmd = ("/usr/libexec/ganesha/ganesha-ha.sh {} /var/run/"
+ "gluster/shared_storage/nfs-ganesha".format(op))
+ _, _, _ = g.run(server, cmd)
+
+ # Stop nfs ganesha service
_, _, _ = stop_nfs_ganesha_service(server)
+
+ # Clean shared storage, ganesha.conf, and replace with backup
+ for cmd in ("rm -rf /var/run/gluster/shared_storage/*",
+ "rm -rf /etc/ganesha/ganesha.conf",
+ "cp ganesha.conf /etc/ganesha/ganesha.conf"):
+ _, _, _ = g.run(server, cmd)
return True
+
ret, _, _ = disable_nfs_ganesha(servers[0])
if ret != 0:
g.log.error("Nfs-ganesha disable failed")
@@ -282,57 +298,6 @@ def unexport_nfs_ganesha_volume(mnode, volname):
return g.run(mnode, cmd)
-def run_refresh_config(mnode, volname):
- """Runs refresh config on nfs ganesha volume.
-
- Args:
- mnode (str): Node in which refresh config command will
- be executed.
- volname (str): volume name
-
- Returns:
- bool : True on successfully running refresh config on
- nfs-ganesha volume. False otherwise
-
- Example:
- run_refresh_config("abc.com", volname)
- """
-
- conf_file = "nfs_ganesha_refresh_config.jinja"
- gdeploy_config_file = GDEPLOY_CONF_DIR + conf_file
-
- tmp_gdeploy_config_file = ("/tmp/" + os.path.splitext(conf_file)[0] +
- ".conf")
-
- values_to_substitute_in_template = {'server': mnode,
- 'volname': volname}
-
- ret = g.render_template(gdeploy_config_file,
- values_to_substitute_in_template,
- tmp_gdeploy_config_file)
- if not ret:
- g.log.error("Failed to substitute values in %s file"
- % tmp_gdeploy_config_file)
- return False
-
- cmd = "gdeploy -c " + tmp_gdeploy_config_file
- retcode, stdout, stderr = g.run_local(cmd)
- if retcode != 0:
- g.log.error("Failed to execute gdeploy cmd %s for running "
- "refresh config on nfs ganesha volume" % cmd)
- g.log.error("gdeploy console output for running refresh config "
- "on nfs ganesha volume: %s" % stderr)
-
- return False
-
- g.log.info("gdeploy output for running refresh config "
- "on nfs ganesha volume: %s" % stdout)
-
- # Removing the gdeploy conf file from /tmp
- os.remove(tmp_gdeploy_config_file)
- return True
-
-
def update_volume_export_configuration(mnode, volname, config_to_update):
"""Updates volume export configuration and runs
refresh config for the volume.
@@ -718,14 +683,17 @@ def create_nfs_ganesha_cluster(servers, vips):
False(bool): If failed to configure ganesha cluster
"""
# pylint: disable=too-many-return-statements
+ # pylint: disable=too-many-branches
+ # pylint: disable=too-many-statements
ganesha_mnode = servers[0]
- # Configure ports in ganesha servers
- g.log.info("Defining statd service ports")
- ret = configure_ports_on_servers(servers)
- if not ret:
- g.log.error("Failed to set statd service ports on nodes.")
- return False
+ # Configure ports in ganesha servers for RHEL7
+ if is_rhel7(servers):
+ g.log.info("Defining statd service ports")
+ ret = configure_ports_on_servers(servers)
+ if not ret:
+ g.log.error("Failed to set statd service ports on nodes.")
+ return False
# Firewall settings for nfs-ganesha
ret = ganesha_server_firewall_settings(servers)
@@ -803,6 +771,22 @@ def create_nfs_ganesha_cluster(servers, vips):
# Create backup of ganesha-ha.conf file in ganesha_mnode
g.upload(ganesha_mnode, tmp_ha_conf, '/etc/ganesha/')
+ # setsebool ganesha_use_fusefs on
+ cmd = "setsebool ganesha_use_fusefs on"
+ for server in servers:
+ ret, _, _ = g.run(server, cmd)
+ if ret:
+ g.log.error("Failed to 'setsebool ganesha_use_fusefs on' on %",
+ server)
+ return False
+
+ # Verify ganesha_use_fusefs is on
+ _, out, _ = g.run(server, "getsebool ganesha_use_fusefs")
+ if "ganesha_use_fusefs --> on" not in out:
+ g.log.error("Failed to 'setsebool ganesha_use_fusefs on' on %",
+ server)
+ return False
+
# Enabling ganesha
g.log.info("Enable nfs-ganesha")
ret, _, _ = enable_nfs_ganesha(ganesha_mnode)
@@ -816,6 +800,31 @@ def create_nfs_ganesha_cluster(servers, vips):
# pcs status output
_, _, _ = g.run(ganesha_mnode, "pcs status")
+ # pacemaker status output
+ _, _, _ = g.run(ganesha_mnode, "systemctl status pacemaker")
+
+ return True
+
+
+def enable_firewall(servers):
+ """Enables Firewall if not enabled already
+ Args:
+ servers(list): Hostname of ganesha nodes
+ Returns:
+ Status (bool) : True/False based on the status of firewall enable
+ """
+
+ cmd = "systemctl status firewalld | grep Active"
+ for server in servers:
+ ret, out, _ = g.run(server, cmd)
+ if 'inactive' in out:
+ g.log.info("Firewalld is not running. Enabling Firewalld")
+ for command in ("enable", "start"):
+ ret, out, _ = g.run(server,
+ "systemctl {} firewalld".format(command))
+ if ret:
+ g.log.error("Failed to enable Firewalld on %s", server)
+ return False
return True
@@ -829,9 +838,11 @@ def ganesha_server_firewall_settings(servers):
True(bool): If successfully set the firewall settings
False(bool): If failed to do firewall settings
"""
+ if not enable_firewall(servers):
+ return False
+
services = ['nfs', 'rpc-bind', 'high-availability', 'nlm', 'mountd',
'rquota']
-
ret = add_services_to_firewall(servers, services, True)
if not ret:
g.log.error("Failed to set firewall zone permanently on ganesha nodes")
@@ -903,47 +914,51 @@ def create_nfs_passwordless_ssh(mnode, gnodes, guser='root'):
False(bool): On failure
"""
loc = "/var/lib/glusterd/nfs/"
- mconn_inst = random.randint(20, 100)
- mconn = g.rpyc_get_connection(host=mnode, instance=mconn_inst)
- if not mconn.modules.os.path.isfile('/root/.ssh/id_rsa'):
+ # Check whether key is present
+ cmd = "[ -f /root/.ssh/id_rsa ]"
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
# Generate key on mnode if not already present
- if not mconn.modules.os.path.isfile('%s/secret.pem' % loc):
+ g.log.info("id_rsa not found")
+ cmd = "[ -f %s/secret.pem ]" % loc
+ ret, _, _ = g.run(mnode, cmd)
+ if ret:
+ g.log.info("Secret.pem file not found. Creating new")
ret, _, _ = g.run(
mnode, "ssh-keygen -f %s/secret.pem -q -N ''" % loc)
- if ret != 0:
+ if ret:
g.log.error("Failed to generate the secret pem file")
return False
g.log.info("Key generated on %s" % mnode)
else:
- mconn.modules.shutil.copyfile("/root/.ssh/id_rsa",
- "%s/secret.pem" % loc)
- g.log.info("Copying the id_rsa.pub to secret.pem.pub")
- mconn.modules.shutil.copyfile("/root/.ssh/id_rsa.pub",
- "%s/secret.pem.pub" % loc)
+ g.log.info("Found existing key")
+ # Copy the .pem and .pyb files
+ for file, to_file in (('id_rsa', 'secret.pem'), ('id_rsa.pub',
+ 'secret.pem.pub')):
+ cmd = "cp /root/.ssh/{} {}{}".format(file, loc, to_file)
+ ret, _, err = g.run(mnode, cmd)
+ if ret:
+ g.log.error("Failed to copy {} to {} file {}".format(file,
+ to_file,
+ err))
+ return False
# Create password less ssh from mnode to all ganesha nodes
+ cmd = "cat /root/.ssh/id_rsa.pub"
+ ret, id_rsa, _ = g.run(mnode, cmd, user=guser)
+ if ret:
+ g.log.info("Failed to read key from %s", mnode)
+ return False
for gnode in gnodes:
- gconn_inst = random.randint(20, 100)
- gconn = g.rpyc_get_connection(gnode, user=guser, instance=gconn_inst)
- try:
- glocal = gconn.modules.os.path.expanduser('~')
- gfhand = gconn.builtin.open("%s/.ssh/authorized_keys" % glocal,
- "a")
- with mconn.builtin.open("/root/.ssh/id_rsa.pub", 'r') as fhand:
- for line in fhand:
- gfhand.write(line)
- gfhand.close()
- except Exception as exep:
- g.log.error("Exception occurred while trying to establish "
- "password less ssh from %s@%s to %s@%s. Exception: %s"
- % ('root', mnode, guser, gnode, exep))
+ file = "~/.ssh/authorized_keys"
+ cmd = ("grep -q '{}' {} || echo '{}' >> {}"
+ .format(id_rsa.rstrip(), file, id_rsa.rstrip(), file))
+ ret, _, _ = g.run(gnode, cmd, user=guser)
+ if ret:
+ g.log.info("Failed to add ssh key for %s", gnode)
return False
- finally:
- g.rpyc_close_connection(
- host=gnode, user=guser, instance=gconn_inst)
-
- g.rpyc_close_connection(host=mnode, instance=mconn_inst)
+ g.log.info("Successfully copied ssh key to all Ganesha nodes")
# Copy the ssh key pair from mnode to all the nodes in the Ganesha-HA
# cluster
@@ -957,8 +972,8 @@ def create_nfs_passwordless_ssh(mnode, gnodes, guser='root'):
% (loc, loc, guser, gnode, loc))
ret, _, _ = g.run(mnode, cmd)
if ret != 0:
- g.log.error("Failed to copy the ssh key pair from %s to %s",
- mnode, gnode)
+ g.log.error("Failed to copy the ssh key pair from "
+ "%s to %s", mnode, gnode)
return False
return True
@@ -974,7 +989,7 @@ def create_ganesha_ha_conf(hostnames, vips, temp_ha_file):
"""
hosts = ','.join(hostnames)
- with open(temp_ha_file, 'wb') as fhand:
+ with open(temp_ha_file, 'w') as fhand:
fhand.write('HA_NAME="ganesha-ha-360"\n')
fhand.write('HA_CLUSTER_NODES="%s"\n' % hosts)
for (hostname, vip) in zip(hostnames, vips):
@@ -991,7 +1006,6 @@ def cluster_auth_setup(servers):
True(bool): If configuration of cluster services is success
False(bool): If failed to configure cluster services
"""
- result = True
for node in servers:
# Enable pacemaker.service
ret, _, _ = g.run(node, "systemctl enable pacemaker.service")
@@ -1016,13 +1030,15 @@ def cluster_auth_setup(servers):
return False
# Perform cluster authentication between the nodes
+ auth_type = 'cluster' if is_rhel7(servers) else 'host'
for node in servers:
- ret, _, _ = g.run(node, "pcs cluster auth %s -u hacluster -p "
- "hacluster" % ' '.join(servers))
- if ret != 0:
- g.log.error("pcs cluster auth command failed on %s", node)
- result = False
- return result
+ ret, _, _ = g.run(node, "pcs %s auth %s -u hacluster -p hacluster"
+ % (auth_type, ' '.join(servers)))
+ if ret:
+ g.log.error("pcs %s auth command failed on %s",
+ auth_type, node)
+ return False
+ return True
def configure_ports_on_servers(servers):
diff --git a/glustolibs-gluster/glustolibs/gluster/peer_ops.py b/glustolibs-gluster/glustolibs/gluster/peer_ops.py
index 55385e8a7..778953c33 100644
--- a/glustolibs-gluster/glustolibs/gluster/peer_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/peer_ops.py
@@ -20,10 +20,10 @@
"""
-from glusto.core import Glusto as g
import re
-import time
import socket
+from time import sleep
+from glusto.core import Glusto as g
try:
import xml.etree.cElementTree as etree
except ImportError:
@@ -139,7 +139,7 @@ def peer_probe_servers(mnode, servers, validate=True, time_delay=10):
Returns:
bool: True on success and False on failure.
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
else:
servers = servers[:]
@@ -166,7 +166,7 @@ def peer_probe_servers(mnode, servers, validate=True, time_delay=10):
# Validating whether peer is in connected state after peer probe
if validate:
- time.sleep(time_delay)
+ sleep(time_delay)
if not is_peer_connected(mnode, servers):
g.log.error("Validation after peer probe failed.")
return False
@@ -195,7 +195,7 @@ def peer_detach_servers(mnode, servers, force=False, validate=True,
Returns:
bool: True on success and False on failure.
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
else:
servers = servers[:]
@@ -212,7 +212,7 @@ def peer_detach_servers(mnode, servers, force=False, validate=True,
# Validating whether peer detach is successful
if validate:
- time.sleep(time_delay)
+ sleep(time_delay)
nodes_in_pool = nodes_from_pool_list(mnode)
rc = True
for server in servers:
@@ -367,7 +367,7 @@ def is_peer_connected(mnode, servers):
bool : True on success (peer in cluster and connected), False on
failure.
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
else:
servers = servers[:]
@@ -421,3 +421,32 @@ def is_peer_connected(mnode, servers):
g.log.info("Servers: '%s' are all 'Peer in Cluster' and 'Connected' "
"state.", servers)
return True
+
+
+def wait_for_peers_to_connect(mnode, servers, wait_timeout=30):
+ """Checks nodes are peer connected with timeout.
+
+ Args:
+ mnode: node on which cmd has to be executed.
+ servers (str|list): A server|List of server hosts on which peer
+ status has to be checked.
+ wait_timeout: timeout to retry connected status check in node.
+
+ Returns:
+ bool : True if all the peers are connected.
+ False otherwise.
+
+ """
+ if not isinstance(servers, list):
+ servers = [servers]
+
+ count = 0
+ while count <= wait_timeout:
+ ret = is_peer_connected(mnode, servers)
+ if ret:
+ g.log.info("peers in connected state: %s", servers)
+ return True
+ sleep(1)
+ count += 1
+ g.log.error("Peers are not in connected state: %s", servers)
+ return False
diff --git a/glustolibs-gluster/glustolibs/gluster/quota_libs.py b/glustolibs-gluster/glustolibs/gluster/quota_libs.py
index 5da4d8698..7462b69c3 100755
--- a/glustolibs-gluster/glustolibs/gluster/quota_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/quota_libs.py
@@ -54,7 +54,7 @@ def quota_validate(mnode, volname, path, **kwargs):
listinfo = quotalist[path]
ret = True
- for key, value in kwargs.iteritems():
+ for key, value in kwargs.items():
if key and listinfo[key] != value:
g.log.error("%s = %s does not match with expected value %s",
key, str(listinfo[key]), str(value))
@@ -97,7 +97,7 @@ def quota_fetch_daemon_pid(nodes):
"""
quotad_pids = {}
_rc = True
- if isinstance(nodes, str):
+ if not isinstance(nodes, list):
nodes = [nodes]
cmd = r"pgrep -f quotad | grep -v ^$$\$"
g.log.info("Executing cmd: %s on node %s" % (cmd, nodes))
diff --git a/glustolibs-gluster/glustolibs/gluster/rebalance_ops.py b/glustolibs-gluster/glustolibs/gluster/rebalance_ops.py
index 1c8c10a4b..1011c89c6 100644
--- a/glustolibs-gluster/glustolibs/gluster/rebalance_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/rebalance_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -401,3 +401,76 @@ def get_remove_brick_status(mnode, volname, bricks_list):
else:
remove_brick_status[element.tag] = element.text
return remove_brick_status
+
+
+def wait_for_remove_brick_to_complete(mnode, volname, bricks_list,
+ timeout=1200):
+ """Waits for the remove brick to complete
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+ volname (str): volume name
+ bricks_list (str): List of bricks participating in
+ remove-brick operation
+
+ Kwargs:
+ timeout (int): timeout value in seconds to wait for remove brick
+ to complete
+
+ Returns:
+ True on success, False otherwise
+
+ Examples:
+ >>> wait_for_remove_brick_to_complete("abc.com", "testvol")
+ """
+
+ count = 0
+ while count < timeout:
+ status_info = get_remove_brick_status(mnode, volname, bricks_list)
+ if status_info is None:
+ return False
+ status = status_info['aggregate']['statusStr']
+ if status == 'completed':
+ g.log.info("Remove brick is successfully completed in %s sec",
+ count)
+ return True
+ elif status == 'failed':
+ g.log.error(" Remove brick failed on one or more nodes. "
+ "Check remove brick status for more details")
+ return False
+ else:
+ time.sleep(10)
+ count += 10
+ g.log.error("Remove brick operation has not completed. "
+ "Wait timeout is %s" % count)
+ return False
+
+
+def set_rebalance_throttle(mnode, volname, throttle_type='normal'):
+ """Sets rebalance throttle
+
+ Args:
+ mnode (str): Node on which cmd has to be executed.
+ volname (str): volume name
+
+ Kwargs:
+ throttle_type (str): throttling type (lazy|normal|aggressive)
+ Defaults to 'normal'
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ Example:
+ set_rebalance_throttle(mnode, testvol, throttle_type='aggressive')
+ """
+ cmd = ("gluster volume set {} rebal-throttle {}".format
+ (volname, throttle_type))
+ return g.run(mnode, cmd)
diff --git a/glustolibs-gluster/glustolibs/gluster/samba_libs.py b/glustolibs-gluster/glustolibs/gluster/samba_libs.py
index 3051579ab..5c0f20a52 100644
--- a/glustolibs-gluster/glustolibs/gluster/samba_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/samba_libs.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -17,7 +17,8 @@
"""
Description: Library for samba operations.
"""
-
+import re
+import time
from glusto.core import Glusto as g
from glustolibs.gluster.volume_libs import is_volume_exported
from glustolibs.gluster.mount_ops import GlusterMount
@@ -47,7 +48,6 @@ def start_smb_service(mnode):
g.log.error("Unable to start the smb service")
return False
g.log.info("Successfully started smb service")
-
return True
@@ -115,7 +115,6 @@ def stop_smb_service(mnode):
g.log.error("Unable to stop the smb service")
return False
g.log.info("Successfully stopped smb service")
-
return True
@@ -141,7 +140,6 @@ def list_smb_shares(mnode):
for line in out:
if 'gluster-' in line:
smb_shares_list.append(line.split(" ")[0].strip())
-
return smb_shares_list
@@ -297,7 +295,7 @@ def share_volume_over_smb(mnode, volname, smb_users_info):
g.log.error("Failed to enable mounting volumes using SMB")
return False
g.log.info("Successfully enabled mounting volumes using SMV for the "
- "smbusers: %s", str(smb_users_info.keys()))
+ "smbusers: %s", str(list(smb_users_info.keys())))
# Verify if volume is shared
ret = is_volume_exported(mnode, volname, "smb")
@@ -305,5 +303,412 @@ def share_volume_over_smb(mnode, volname, smb_users_info):
g.log.info("Volume %s is not exported as 'cifs/smb' share", volname)
return False
g.log.info("Volume %s is exported as 'cifs/smb' share", volname)
+ return True
+
+
+def is_ctdb_service_running(mnode):
+ """Check if ctdb services is running on node
+
+ Args:
+ mnode (str): Node on which ctdb service status has to be verified.
+
+ Returns:
+ bool: True if ctdb service is running. False otherwise.
+ """
+ g.log.info("Check if CTDB service is running on %s", mnode)
+ ret, out, _ = g.run(mnode, "service ctdb status")
+ if "Active: active (running)" in out:
+ return True
+ return False
+
+
+def stop_ctdb_service(mnode):
+ """Stop ctdb service on the specified node.
+
+ Args:
+ mnode (str): Node on which ctdb service has to be stopped.
+
+ Returns:
+ bool: True on successfully stopping ctdb service. False otherwise.
+ """
+ g.log.info("Stopping CTDB Service on %s", mnode)
+ # Stop ctdb service
+ ret, _, _ = g.run(mnode, "service ctdb stop")
+ if ret != 0:
+ g.log.error("Unable to stop the ctdb service")
+ return False
+ g.log.info("Successfully stopped ctdb service")
+ return True
+
+
+def start_ctdb_service(mnode):
+ """Start ctdb service on the specified node.
+
+ Args:
+ mnode (str): Node on which ctdb service has to be started
+
+ Returns:
+ bool: True on successfully starting ctdb service. False otherwise.
+ """
+ g.log.info("Starting CTDB Service on %s", mnode)
+
+ # Start ctdb service
+ ret, _, _ = g.run(mnode, "service ctdb start")
+ if ret != 0:
+ g.log.error("Unable to start the ctdb service")
+ return False
+ g.log.info("Successfully started ctdb service")
return True
+
+
+def start_nmb_service(mnode):
+ """Start nmb service on the specified node.
+
+ Args:
+ mnode (str): Node on which nmb service has to be started
+
+ Returns:
+ bool: True on successfully starting nmb service. False otherwise.
+ """
+ g.log.info("Starting nmb Service on %s", mnode)
+
+ # Start nmb service
+ ret, _, _ = g.run(mnode, "service nmb start")
+ if ret != 0:
+ g.log.error("Unable to start the nmb service")
+ return False
+ g.log.info("Successfully started nmb service")
+
+ return True
+
+
+def is_nmb_service_running(mnode):
+ """Check if nmb service is running on node
+
+ Args:
+ mnode (str): Node on which nmb service status has to be verified.
+
+ Returns:
+ bool: True if nmb service is running. False otherwise.
+ """
+ g.log.info("Check if nmb service is running on %s", mnode)
+ ret, out, _ = g.run(mnode, "service nmb status")
+ if "Active: active (running)" in out:
+ return True
+ return False
+
+
+def start_winbind_service(mnode):
+ """Start winbind service on the specified node.
+
+ Args:
+ mnode (str): Node on which winbind service has to be started
+
+ Returns:
+ bool: True on successfully starting winbind service. False otherwise.
+ """
+ g.log.info("Starting winbind Service on %s", mnode)
+
+ # Start winbind service
+ ret, _, _ = g.run(mnode, "service winbind start")
+ if ret != 0:
+ g.log.error("Unable to start the winbind service")
+ return False
+ g.log.info("Successfully started winbind service")
+ return True
+
+
+def is_winbind_service_running(mnode):
+ """Check if winbind service is running on node
+
+ Args:
+ mnode (str): Node on which winbind service status has to be verified.
+
+ Returns:
+ bool: True if winbind service is running. False otherwise.
+ """
+ g.log.info("Check if winbind service is running on %s", mnode)
+ ret, out, _ = g.run(mnode, "service winbind status")
+ if "Active: active (running)" in out:
+ return True
+ return False
+
+
+def samba_ad(all_servers, netbios_name, domain_name, ad_admin_user,
+ ad_admin_passwd, idmap_range=None):
+ """Active Directory Integration
+
+ Args:
+ all_servers [list]: List of all servers where AD needs to be setup.
+ netbios_name (str): Provide netbios name
+ domain_name (str): Provide domain name
+ ad_admin_user (str): Provide admin user
+ ad_admin_passwd (str): Provide admin password
+ idmap_range (str): Provide idmap range
+
+ Returns:
+ bool: True on successfully setting up AD. False otherwise.
+ """
+ g.log.info("Setting up AD Integration on %s", all_servers)
+ mnode = all_servers[0]
+ if netbios_name == '':
+ g.log.error("netbios name is missing")
+ return False
+ # Validate netbios name
+ if len(netbios_name) < 1 or len(netbios_name) > 15:
+ g.log.error("The NetBIOS name must be 1 to 15 characters in length.")
+ return False
+ validate_netbios_name = re.compile(r"(^[A-Za-z\d_!@#$%^()\-'"
+ r"{}\.~]{1,15}$)")
+ isnetbiosname = validate_netbios_name.match(netbios_name)
+ if isnetbiosname is None:
+ g.log.error("The NetBIOS name entered is invalid.")
+ return False
+
+ if domain_name == '':
+ g.log.error("domain name is missing")
+ return False
+ validate_domain_name = re.compile(r"^(?=.{1,253}$)(?!.*\.\..*)(?!\..*)"
+ r"([a-zA-Z0-9-]{,63}\.)"
+ r"{,127}[a-zA-Z0-9-]{1,63}$")
+ isdomain = validate_domain_name.match(domain_name)
+ if isdomain is None:
+ g.log.error("The AD domain name string is invalid")
+ return False
+ # ad_workgroup should be in capital letters
+ ad_workgroup = domain_name.split(".")[0].upper()
+
+ if idmap_range is None:
+ idmap_range = '1000000-1999999'
+ else:
+ try:
+ idmap_range_start = int(idmap_range.split("-")[0])
+ idmap_range_end = int(idmap_range.split("-")[1])
+ except Exception as e:
+ g.log.error("Invalid format.Use \'m-n\' for the range %s", str(e))
+ return False
+ if int(idmap_range_start) < 10000:
+ g.log.error("Please select a starting value 10000 or above")
+ return False
+ # Maximum UIDs is 2^32
+ elif int(idmap_range_end) > 4294967296:
+ g.log.error("Please select an ending value 4294967296 or below")
+ return False
+
+ # Run the below in all servers
+ for node in all_servers:
+ smb_conf_file = "/etc/samba/smb.conf"
+ add_netbios_name = r"sed -i '/^\[global\]/a netbios name = %s' %s"
+ ret, _, err = g.run(node, add_netbios_name
+ % (netbios_name, smb_conf_file))
+ if ret != 0:
+ g.log.error("Failed to set netbios name parameters in smb.conf "
+ "file due to %s", str(err))
+ return False
+ add_realm = r"sed -i '/^\[global\]/a realm = %s' %s"
+ ret, _, err = g.run(node, add_realm % (domain_name, smb_conf_file))
+ if ret != 0:
+ g.log.error("Failed to set realm parameters in smb.conf file "
+ "due to %s", str(err))
+ return False
+ add_idmap_range = (r"sed -i '/^\[global\]/a idmap config \* : "
+ "range = %s' %s")
+ ret, _, err = g.run(node, add_idmap_range
+ % (idmap_range, smb_conf_file))
+ if ret != 0:
+ g.log.error("Failed to set idmap range parameters in smb.conf "
+ "file due to %s", str(err))
+ return False
+ add_idmap_bcknd = (r"sed -i '/^\[global\]/a idmap config \* : "
+ "backend = tdb' %s")
+ ret, _, err = g.run(node, add_idmap_bcknd % smb_conf_file)
+ if ret != 0:
+ g.log.error("Failed to set idmap bcknd parameters in smb.conf "
+ "file due to %s", str(err))
+ return False
+ add_workgroup = ("sed -i '/^\\tworkgroup = "
+ "MYGROUP/c\\\tworkgroup = %s' %s")
+ ret, _, err = g.run(node, add_workgroup
+ % (ad_workgroup, smb_conf_file))
+ if ret != 0:
+ g.log.error("Failed to set workgroup parameters in smb.conf file "
+ " due to %s", str(add_workgroup))
+ return False
+ add_security = "sed -i '/^\\tsecurity = user/c\\\tsecurity = ads' %s"
+ ret, _, err = g.run(node, add_security % smb_conf_file)
+ if ret != 0:
+ g.log.error("Failed to set security parameters in smb.conf "
+ "file due to %s", str(err))
+ return False
+
+ # Verifying the Samba AD Configuration running testparm
+ smb_ad_list = ["netbios name = "+netbios_name,
+ "workgroup = "+ad_workgroup,
+ "realm = " + domain_name.upper(), "security = ADS",
+ "idmap config * : backend = tdb", "idmap config * "
+ ": range = "+str(idmap_range)]
+ testparm_cmd = "echo -e "+'"'+"\n"+'"'+" | testparm -v"
+ ret, out, _ = g.run(node, testparm_cmd)
+ if ret != 0:
+ g.log.error("Testparm Command Failed to Execute")
+ g.log.info("Testparm Command Execute Success")
+ for smb_options in smb_ad_list:
+ smb_options = smb_options.strip()
+ if smb_options not in str(out).strip():
+ g.log.info("Option %s entry present not in testparm" % smb_options)
+ return False
+ g.log.info("All required samba ad options set in smb.conf")
+
+ if ad_admin_user == '':
+ ad_admin_user = 'Administrator'
+
+ # nsswitch Configuration
+ # Run these in all servers
+ for node in all_servers:
+ winbind_passwd = ("sed -i '/^passwd: files sss/cpasswd: "
+ "files winbind' /etc/nsswitch.conf")
+ ret, _, err = g.run(node, winbind_passwd)
+ g.log.info("MASTER %s" % str(ret))
+ if ret != 0:
+ g.log.error("Failed to set winbind passwd parameters in "
+ "nsswitch.conf file due to %s", str(err))
+ return False
+ winbind_group = ("sed -i '/^group: files sss/cgroup: "
+ "files winbind' /etc/nsswitch.conf")
+ ret, _, err = g.run(node, winbind_group)
+ if ret != 0:
+ g.log.error("Failed to set winbind group parameters "
+ "in nsswitch.conf file due to %s", str(err))
+ return False
+
+ # Disable samba & winbind scripts
+ samba_script = "/etc/ctdb/events.d/50.samba"
+ winbind_script = "/etc/ctdb/events.d/49.winbind"
+ ret, _, err = g.run(node, "chmod -x " + samba_script)
+ if ret != 0:
+ g.log.error("Failed to disable samba script as %s", str(err))
+ return False
+ ret, _, err = g.run(node, "chmod -x " + winbind_script)
+ if ret != 0:
+ g.log.error("Failed to disable winbind script as %s", str(err))
+ return False
+ # stop ctdb if already running
+ ret = is_ctdb_service_running(node)
+ if ret:
+ ret = stop_ctdb_service(node)
+ if not ret:
+ return ret
+ ret = start_ctdb_service(node)
+ ret = is_ctdb_service_running(node)
+ if ret:
+ ret = is_smb_service_running(node)
+ if ret:
+ g.log.error("Samba services still running even after "
+ "samba script is disable")
+ return False
+ ret = is_winbind_service_running(node)
+ if ret:
+ g.log.error("Winbind services still running even after "
+ "winbind script is disable")
+ return False
+
+ # Join Active Directory Domain
+ # One node only
+ net_join_cmd = "net ads join -U "
+ success_out = ("Joined '" + netbios_name +
+ "' to dns domain '" + domain_name + "'")
+ ret, out, err = g.run(mnode, net_join_cmd + ad_admin_user +
+ "%" + ad_admin_passwd)
+ if success_out not in str(out).strip():
+ g.log.error("net ads join failed %s", str(err))
+ return False
+ g.log.info("Net ads join success")
+
+ # RUN THESE IN ALL NODES
+ for node in all_servers:
+ ret = start_nmb_service(node)
+ ret = is_nmb_service_running(node)
+ if not ret:
+ g.log.error("Failed to start nmb service")
+ return False
+ ret, _, err = g.run(node, "chmod +x " + samba_script)
+ if ret != 0:
+ g.log.error("Failed to enable samba script as %s", str(err))
+ return False
+ ret, _, err = g.run(node, "chmod +x " + winbind_script)
+ if ret != 0:
+ g.log.error("Failed to enable winbind script as %s", str(err))
+ return False
+ ret = stop_ctdb_service(node)
+ if not ret:
+ return False
+ ret = start_ctdb_service(node)
+ ret = is_ctdb_service_running(node)
+ if ret:
+ count = 0
+ while count < 95:
+ ret = is_smb_service_running(node)
+ if ret:
+ break
+ time.sleep(2)
+ count += 1
+ if not ret:
+ g.log.error("Samba services not started running even "
+ "after samba "
+ "script is enabled")
+ return False
+ ret = start_winbind_service(node)
+ ret = is_winbind_service_running(node)
+ if not ret:
+ g.log.error("Winbind services not running even after winbind "
+ "script is enabled")
+ return False
+
+ # Verify/Test Active Directory and Services
+ ret, out, err = g.run(mnode, "net ads testjoin")
+ if "Join is OK" not in str(out).strip():
+ g.log.error("net ads join validation failed %s", str(err))
+ return False
+ # Verify if winbind is operating correctly by executing the following steps
+ ret, out, err = g.run(mnode, "wbinfo -t")
+ if "succeeded" not in str(out):
+ g.log.error("wbinfo -t command failed, ad setup is not correct %s",
+ str(err))
+ return False
+
+ # Execute the following command to resolve the given name to a Windows SID
+ sid_cmd = ("wbinfo --name-to-sid '" + ad_workgroup +
+ "\\" + ad_admin_user + "'")
+ ret, out, err = g.run(mnode, sid_cmd)
+ if "-500 SID_USER" not in str(out):
+ g.log.error("Failed to execute wbinfo --name-to-sid command %s",
+ str(err))
+ return False
+ sid = str(out).split('SID')[0].strip()
+
+ # Execute the following command to verify authentication:
+ wbinfo_auth_cmd = ("wbinfo -a '" + ad_workgroup + "\\" + ad_admin_user +
+ "%" + ad_admin_passwd + "'")
+ ret, out, err = g.run(mnode, wbinfo_auth_cmd)
+ if "password authentication succeeded" not in str(out).strip():
+ g.log.error("winbind does nothave authentication to acess "
+ "ad server %s", str(err))
+ return False
+
+ # Execute the following command to verify if the id-mapping is
+ # working properly
+ idmap_range_start = str(idmap_range.split("-")[0])
+ id_map_cmd = "wbinfo --sid-to-uid " + sid
+ ret, out, err = g.run(mnode, id_map_cmd)
+ if str(out).strip() != str(idmap_range_start):
+ g.log.error("id mapping is not correct %s", str(err))
+ return False
+ # getent password validation
+ getent_cmd = "getent passwd '" + ad_workgroup + "\\" + ad_admin_user + "'"
+ getent_expected = "/home/" + ad_workgroup + "/" + ad_admin_user.lower()
+ ret, out, err = g.run(mnode, getent_cmd)
+ if getent_expected not in str(out).strip():
+ g.log.error("winbind Name Service Switch failed %s", str(err))
+ return False
+ return True
diff --git a/glustolibs-gluster/glustolibs/gluster/shared_storage_ops.py b/glustolibs-gluster/glustolibs/gluster/shared_storage_ops.py
index baf7de77a..b1cf7030a 100644
--- a/glustolibs-gluster/glustolibs/gluster/shared_storage_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/shared_storage_ops.py
@@ -19,9 +19,10 @@ Description : Modules for enabling and disabling
shared storoge
"""
-import time
+from time import sleep
from glusto.core import Glusto as g
-from glustolibs.gluster.volume_ops import set_volume_options
+from glustolibs.gluster.volume_ops import (set_volume_options,
+ get_volume_list)
def enable_shared_storage(mnode):
@@ -46,7 +47,7 @@ def enable_shared_storage(mnode):
def disable_shared_storage(mnode):
"""
- Enables the shared storage
+ Disables the shared storage
Args:
mnode (str) : Node on which command is to be executed
@@ -60,19 +61,19 @@ def disable_shared_storage(mnode):
if not ret:
g.log.error("Failed to disable shared storage")
return False
- g.log.info("Successfully disabled shared storage option")
+ g.log.info("Successfully disabled shared storage")
return True
def is_shared_volume_mounted(mnode):
"""
- Checks shared volume mounted after enabling it
+ Checks if shared storage volume is mounted
Args:
mnode (str) : Node on which command is to be executed
Returns:
- bool : True if successfully mounted shared volume.
+ bool : True if shared storage volume is mounted.
False otherwise.
"""
halt = 20
@@ -81,36 +82,62 @@ def is_shared_volume_mounted(mnode):
while counter < halt:
_, out, _ = g.run(mnode, "df -h")
if path in out:
- g.log.info("Shared volume mounted successfully")
+ g.log.info("Shared storage volume is mounted")
return True
else:
- time.sleep(2)
+ sleep(2)
counter = counter + 2
- g.log.error("Shared volume not mounted")
+ g.log.info("Shared storage volume not mounted")
return False
-def is_shared_volume_unmounted(mnode):
+def check_gluster_shared_volume(mnode, present=True):
"""
- Checks shared volume unmounted after disabling it
+ Check gluster shared volume present or absent.
Args:
mnode (str) : Node on which command is to be executed
+ present (bool) : True if you want to check presence
+ False if you want to check absence.
Returns:
- bool : True if successfully unmounted shared volume.
+ bool : True if shared volume is present or absent.
False otherwise.
"""
- halt = 20
- counter = 0
- path = "/run/gluster/shared_storage"
- while counter < halt:
- _, out, _ = g.run(mnode, "df -h")
- if path not in out:
- g.log.info("Shared volume unmounted successfully")
- return True
- else:
- time.sleep(2)
+ if present:
+ halt = 20
+ counter = 0
+ g.log.info("Wait for some seconds to create "
+ "gluster_shared_storage volume.")
+
+ while counter < halt:
+ vol_list = get_volume_list(mnode)
+ if "gluster_shared_storage" in vol_list:
+ return True
+ else:
+ g.log.info("Wait for some seconds, since it takes "
+ "time to create gluster_shared_storage "
+ "volume.")
+ sleep(2)
+ counter = counter + 2
+
+ return False
+
+ else:
+ halt = 20
+ counter = 0
+ g.log.info("Wait for some seconds to delete "
+ "gluster_shared_storage volume.")
+
+ while counter < halt:
+ vol_list = get_volume_list(mnode)
+ if "gluster_shared_storage" not in vol_list:
+ return True
+ else:
+ g.log.info("Wait for some seconds, since it takes "
+ "time to delete gluster_shared_storage "
+ "volume.")
+ sleep(2)
counter = counter + 2
- g.log.error("Shared volume not unmounted")
- return False
+
+ return False
diff --git a/glustolibs-gluster/glustolibs/gluster/snap_ops.py b/glustolibs-gluster/glustolibs/gluster/snap_ops.py
index 2ca5688b5..0fba7771b 100644
--- a/glustolibs-gluster/glustolibs/gluster/snap_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/snap_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -304,7 +304,7 @@ def get_snap_status_by_snapname(mnode, snapname):
return None
-def get_snap_status_by_volname(mnode, volname):
+def snap_status_by_volname(mnode, volname):
"""Parse the output of 'gluster snapshot status' command
for the given volume.
@@ -313,59 +313,18 @@ def get_snap_status_by_volname(mnode, volname):
volname (str): snapshot name
Returns:
- NoneType: None if command execution fails, parse errors.
- list: list of dicts on success.
-
- Examples:
- >>> get_snap_status_by_volname('abc.lab.eng.xyz.com',
- 'testvol')
- [{'volCount': '1', 'volume': {'brick': [{'path': '10.70.47.11:
- testvol_brick0', 'pid': '26747', 'lvUsage': '3.52', 'volumeGroup':
- 'RHS_vg0', 'lvSize': '9.95g'}, {'path': '10.70.47.16:/testvol_brick1',
- 'pid': '25497', 'lvUsage': '3.52', 'volumeGroup': 'RHS_vg0',
- 'lvSize': '9.95g'}], 'brickCount': '2'}, 'name': 'snap2', 'uuid':
- '56a39a92-c339-47cc-a8b2-9e54bb2a6324'}, {'volCount': '1', 'volume':
- {'brick': [{'path': '10.70.47.11:testvol_next_brick0', 'pid': '26719',
- 'lvUsage': '4.93', 'volumeGroup': 'RHS_vg1', 'lvSize': '9.95g'}],
- 'brickCount': '1'}, 'name': 'next_snap1',
- 'uuid': 'dcf0cd31-c0db-47ad-92ec-f72af2d7b385'}]
- """
-
- cmd = "gluster snapshot status volume %s --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to execute 'snapshot status' on node %s. "
- "Hence failed to get the snapshot status.", mnode)
- return None
-
- try:
- root = etree.XML(out)
- except etree.ParseError:
- g.log.error("Failed to parse the gluster snapshot "
- "status xml output.")
- return None
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
- snap_status_list = []
- for snap in root.findall("snapStatus/snapshots/snapshot"):
- snap_status = {}
- for element in snap.getchildren():
- if element.tag == "volume":
- status = {}
- status["brick"] = []
- for elmt in element.getchildren():
- if elmt.tag == "brick":
- brick_info = {}
- for el in elmt.getchildren():
- brick_info[el.tag] = el.text
- status["brick"].append(brick_info)
- else:
- status[elmt.tag] = elmt.text
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
- snap_status[element.tag] = status
- else:
- snap_status[element.tag] = element.text
- snap_status_list.append(snap_status)
- return snap_status_list
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "gluster snapshot status volume %s" % volname
+ return g.run(mnode, cmd)
def snap_info(mnode, snapname="", volname=""):
@@ -502,7 +461,7 @@ def get_snap_info_by_volname(mnode, volname):
Args:
mnode (str): Node on which command has to be executed.
- volname (str): snapshot name
+ volname (str): volume name
Returns:
NoneType: None if command execution fails, parse errors.
@@ -593,12 +552,17 @@ def snap_list(mnode):
return g.run(mnode, cmd)
-def get_snap_list(mnode):
+def get_snap_list(mnode, volname=""):
"""Parse the output of 'gluster snapshot list' command.
+ If a volname is provided then the output will be specific
+ to that volume.
Args:
mnode (str): Node on which command has to be executed.
+ Kwargs:
+ volname (str): volume name
+
Returns:
NoneType: None if command execution fails, parse errors.
list: list of snapshots on success.
@@ -608,7 +572,8 @@ def get_snap_list(mnode):
['snap1', 'snap2']
"""
- ret, out, _ = g.run(mnode, "gluster snapshot list --xml")
+ cmd = "gluster snapshot list %s --xml" % volname
+ ret, out, _ = g.run(mnode, cmd)
if ret != 0:
g.log.error("Failed to execute 'snapshot list' on node %s. "
"Hence failed to get the snapshot list.", mnode)
@@ -750,7 +715,7 @@ def set_snap_config(mnode, option, volname=None):
volname = ""
cmd = ("gluster snapshot config %s %s %s --mode=script"
- % (volname, option.keys()[0], option.values()[0]))
+ % (volname, list(option.keys())[0], list(option.values())[0]))
return g.run(mnode, cmd)
@@ -894,3 +859,29 @@ def snap_deactivate(mnode, snapname):
cmd = "gluster snapshot deactivate %s --mode=script" % snapname
return g.run(mnode, cmd)
+
+
+def terminate_snapd_on_node(mnode):
+ """Terminate snapd on the specified node
+
+ Args:
+ mnode(str):node on which commands has to be executed
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+ """
+ cmd = "ps aux| grep -m1 snapd | awk '{print $2}'"
+ _, out, _ = g.run(mnode, cmd)
+ if out is None:
+ g.log.error("Failed to get the snapd PID using command %s", cmd)
+ return None
+ cmd = "kill -9 %s" % out
+ return g.run(mnode, cmd)
diff --git a/glustolibs-gluster/glustolibs/gluster/snap_scheduler.py b/glustolibs-gluster/glustolibs/gluster/snap_scheduler.py
index 4493cf40f..f0ccbf547 100644
--- a/glustolibs-gluster/glustolibs/gluster/snap_scheduler.py
+++ b/glustolibs-gluster/glustolibs/gluster/snap_scheduler.py
@@ -36,7 +36,7 @@ def scheduler_init(servers):
scheduler_init("abc.com")
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
cmd = "snap_scheduler.py init"
diff --git a/glustolibs-gluster/glustolibs/gluster/ssl_ops.py b/glustolibs-gluster/glustolibs/gluster/ssl_ops.py
deleted file mode 100644
index 9ce7c08a5..000000000
--- a/glustolibs-gluster/glustolibs/gluster/ssl_ops.py
+++ /dev/null
@@ -1,225 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-"""
- Description: Module for creating ssl machines for
- validating basic ssl cases
-"""
-
-from StringIO import StringIO
-from glusto.core import Glusto as g
-
-
-def create_ssl_machine(servers, clients):
- """Following are the steps to create ssl machines:
- - Stop glusterd on all servers
- - Run: openssl genrsa -out /etc/ssl/glusterfs.key 2048
- - Run: openssl req -new -x509 -key /etc/ssl/glusterfs.key
- -subj "/CN=ip's" -days 365 -out /etc/ssl/glusterfs.pem
- - copy glusterfs.pem files into glusterfs.ca from all
- the nodes(servers+clients) to all the servers
- - touch /var/lib/glusterd/secure-access
- - Start glusterd on all servers
- Args:
- servers: List of servers
- clients: List of clients
-
- Returns:
- bool : True if successfully created ssl machine. False otherwise.
- """
- # pylint: disable=too-many-statements, too-many-branches
- # pylint: disable=too-many-return-statements
- # Variable to collect all servers ca_file for servers
- ca_file_server = StringIO()
-
- # Stop glusterd on all servers
- ret = g.run_parallel(servers, "systemctl stop glusterd")
- if not ret:
- g.log.error("Failed to stop glusterd on all servers")
- return False
-
- # Generate key file on all servers
- cmd = "openssl genrsa -out /etc/ssl/glusterfs.key 2048"
- ret = g.run_parallel(servers, cmd)
- if not ret:
- g.log.error("Failed to create /etc/ssl/glusterfs.key "
- "file on all servers")
- return False
-
- # Generate glusterfs.pem file on all servers
- for server in servers:
- _, hostname, _ = g.run(server, "hostname")
- cmd = ("openssl req -new -x509 -key /etc/ssl/glusterfs.key -subj "
- "/CN=%s -days 365 -out /etc/ssl/glusterfs.pem" % (hostname))
- ret = g.run(server, cmd)
- if not ret:
- g.log.error("Failed to create /etc/ssl/glusterfs.pem "
- "file on server %s", server)
- return False
-
- # Copy glusterfs.pem file of all servers into ca_file_server
- for server in servers:
- conn1 = g.rpyc_get_connection(server)
- if conn1 == "None":
- g.log.error("Failed to get rpyc connection on %s", server)
-
- with conn1.builtin.open('/etc/ssl/glusterfs.pem') as fin:
- ca_file_server.write(fin.read())
-
- # Copy all ca_file_server for clients use
- ca_file_client = ca_file_server.getvalue()
-
- # Generate key file on all clients
- for client in clients:
- _, hostname, _ = g.run(client, "hostname -s")
- cmd = "openssl genrsa -out /etc/ssl/glusterfs.key 2048"
- ret = g.run(client, cmd)
- if not ret:
- g.log.error("Failed to create /etc/ssl/glusterfs.key "
- "file on client %s", client)
- return False
-
- # Generate glusterfs.pem file on all clients
- cmd = ("openssl req -new -x509 -key /etc/ssl/glusterfs.key -subj "
- "/CN=%s -days 365 -out /etc/ssl/glusterfs.pem" % (client))
- ret = g.run(client, cmd)
- if not ret:
- g.log.error("Failed to create /etc/ssl/glusterf.pem "
- "file on client %s", client)
- return False
-
- # Copy glusterfs.pem file of client to a ca_file_server
- conn2 = g.rpyc_get_connection(client)
- if conn2 == "None":
- g.log.error("Failed to get rpyc connection on %s", server)
- with conn2.builtin.open('/etc/ssl/glusterfs.pem') as fin:
- ca_file_server.write(fin.read())
-
- # Copy glusterfs.pem file to glusterfs.ca of client such that
- # clients shouldn't share respectives ca file each other
- cmd = "cp /etc/ssl/glusterfs.pem /etc/ssl/glusterfs.ca"
- ret, _, _ = g.run(client, cmd)
- if ret != 0:
- g.log.error("Failed to copy the glusterfs.pem to "
- "glusterfs.ca of client")
- return False
-
- # Now copy the ca_file of all servers to client ca file
- with conn2.builtin.open('/etc/ssl/glusterfs.ca', 'a') as fout:
- fout.write(ca_file_client)
-
- # Create /var/lib/glusterd directory on clients
- ret = g.run(client, "mkdir -p /var/lib/glusterd/")
- if not ret:
- g.log.error("Failed to create directory /var/lib/glusterd/"
- " on clients")
-
- # Copy ca_file_server to all servers
- for server in servers:
- conn3 = g.rpyc_get_connection(server)
- if conn3 == "None":
- g.log.error("Failed to get rpyc connection on %s", server)
-
- with conn3.builtin.open('/etc/ssl/glusterfs.ca', 'w') as fout:
- fout.write(ca_file_server.getvalue())
-
- # Touch /var/lib/glusterd/secure-access on all servers
- ret = g.run_parallel(servers, "touch /var/lib/glusterd/secure-access")
- if not ret:
- g.log.error("Failed to touch the file on servers")
- return False
-
- # Touch /var/lib/glusterd/secure-access on all clients
- ret = g.run_parallel(clients, "touch /var/lib/glusterd/secure-access")
- if not ret:
- g.log.error("Failed to touch the file on clients")
- return False
-
- # Start glusterd on all servers
- ret = g.run_parallel(servers, "systemctl start glusterd")
- if not ret:
- g.log.error("Failed to stop glusterd on servers")
- return False
-
- return True
-
-
-def cleanup_ssl_setup(servers, clients):
- """
- Following are the steps to cleanup ssl setup:
- - Stop glusterd on all servers
- - Remove folder /etc/ssl/*
- - Remove /var/lib/glusterd/*
- - Start glusterd on all servers
-
- Args:
- servers: List of servers
- clients: List of clients
-
- Returns:
- bool : True if successfully cleaned ssl machine. False otherwise.
- """
- # pylint: disable=too-many-return-statements
- _rc = True
-
- # Stop glusterd on all servers
- ret = g.run_parallel(servers, "systemctl stop glusterd")
- if not ret:
- _rc = False
- g.log.error("Failed to stop glusterd on all servers")
-
- # Remove glusterfs.key, glusterfs.pem and glusterfs.ca file
- # from all servers
- cmd = "rm -rf /etc/ssl/glusterfs*"
- ret = g.run_parallel(servers, cmd)
- if not ret:
- _rc = False
- g.log.error("Failed to remove folder /etc/ssl/glusterfs* "
- "on all servers")
-
- # Remove folder /var/lib/glusterd/secure-access from servers
- cmd = "rm -rf /var/lib/glusterd/secure-access"
- ret = g.run_parallel(servers, cmd)
- if not ret:
- _rc = False
- g.log.error("Failed to remove folder /var/lib/glusterd/secure-access "
- "on all servers")
-
- # Remove glusterfs.key, glusterfs.pem and glusterfs.ca file
- # from all clients
- cmd = "rm -rf /etc/ssl/glusterfs*"
- ret = g.run_parallel(clients, cmd)
- if not ret:
- _rc = False
- g.log.error("Failed to remove folder /etc/ssl/glusterfs* "
- "on all clients")
-
- # Remove folder /var/lib/glusterd/secure-access from clients
- cmd = "rm -rf /var/lib/glusterd/secure-access"
- ret = g.run_parallel(clients, cmd)
- if not ret:
- _rc = False
- g.log.error("Failed to remove folder /var/lib/glusterd/secure-access "
- "on all clients")
-
- # Start glusterd on all servers
- ret = g.run_parallel(servers, "systemctl start glusterd")
- if not ret:
- _rc = False
- g.log.error("Failed to stop glusterd on servers")
-
- return _rc
diff --git a/glustolibs-gluster/glustolibs/gluster/tiering_ops.py b/glustolibs-gluster/glustolibs/gluster/tiering_ops.py
deleted file mode 100644
index 7e48a48c7..000000000
--- a/glustolibs-gluster/glustolibs/gluster/tiering_ops.py
+++ /dev/null
@@ -1,1023 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-"""
- Description: Library for gluster tiering operations.
-"""
-
-import re
-import time
-from glusto.core import Glusto as g
-from glustolibs.gluster.peer_ops import peer_probe_servers
-from glustolibs.gluster.gluster_init import start_glusterd
-from glustolibs.gluster.lib_utils import list_files
-
-try:
- import xml.etree.cElementTree as etree
-except ImportError:
- import xml.etree.ElementTree as etree
-
-
-def add_extra_servers_to_cluster(mnode, extra_servers):
- """Adds the given extra servers to cluster
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- extra_servers (str|list) : A server|list of extra servers to be
- attached to cluster
-
- Returns:
- bool: True, if extra servers are attached to cluster
- False, otherwise
-
- Example:
- add_extra_servers_to_cluster("abc.com", ['peer_node1','peer_node2'])
- """
-
- if isinstance(extra_servers, str):
- extra_servers = [extra_servers]
-
- ret = start_glusterd(servers=extra_servers)
- if not ret:
- g.log.error("glusterd did not start in peer nodes")
- return False
-
- ret = peer_probe_servers(mnode, servers=extra_servers)
- if not ret:
- g.log.error("Unable to do peer probe on extra server machines")
- return False
-
- return True
-
-
-def tier_attach(mnode, volname, num_bricks_to_add, extra_servers,
- extra_servers_info, replica=1, force=False):
- """Attaches tier to the volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
- num_bricks_to_add (str): number of bricks to be added as hot tier
- extra_servers (str|list): from this server|these servers,
- hot tier will be added to volume
- extra_servers_info (dict): dict of server info of each extra servers
-
- Kwargs:
- replica (str): replica count of the hot tier
- force (bool): If this option is set to True, then attach tier
- will get executed with force option. If it is set to False,
- then attach tier will get executed without force option
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_attach("abc.com", testvol, '2', ['extra_server1','extra_server2'],
- extra_server_info)
- """
- if isinstance(extra_servers, str):
- extra_servers = [extra_servers]
-
- replica = int(replica)
- repc = ''
- if replica != 1:
- repc = "replica %d" % replica
-
- frce = ''
- if force:
- frce = 'force'
-
- num_bricks_to_add = int(num_bricks_to_add)
-
- from glustolibs.gluster.lib_utils import form_bricks_list
- bricks_list = form_bricks_list(mnode, volname, num_bricks_to_add,
- extra_servers[:], extra_servers_info)
- if bricks_list is None:
- g.log.error("number of bricks required are greater than "
- "unused bricks")
- return (-1, '', '')
-
- bricks_path = ' '.join(bricks_list)
- bricks_path = [re.sub(r"(.*\/\S+\_)brick(\d+)", r"\1tier\2", item)
- for item in bricks_path.split() if item]
- tier_bricks_path = " ".join(bricks_path)
- cmd = ("gluster volume tier %s attach %s %s %s --mode=script"
- % (volname, repc, tier_bricks_path, frce))
-
- return g.run(mnode, cmd)
-
-
-def tier_start(mnode, volname, force=False):
- """Starts the tier volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Kwargs:
- force (bool): If this option is set to True, then attach tier
- will get executed with force option. If it is set to False,
- then attach tier will get executed without force option
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_start("abc.com", testvol)
- """
-
- frce = ''
- if force:
- frce = 'force'
-
- cmd = ("gluster volume tier %s start %s --mode=script"
- % (volname, frce))
- return g.run(mnode, cmd)
-
-
-def tier_status(mnode, volname):
- """executes tier status command
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_status("abc.com", testvol)
- """
-
- cmd = "gluster volume tier %s status" % volname
- ret = g.run(mnode, cmd)
-
- return ret
-
-
-def get_tier_status(mnode, volname):
- """Parse the output of 'gluster tier status' command.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- dict: dict on success.
-
- Examples:
- >>> get_tier_status('abc.lab.eng.xyz.com', 'testvol')
- {'node': [{'promotedFiles': '0', 'demotedFiles': '0', 'nodeName':
- 'localhost', 'statusStr': 'in progress'}, {'promotedFiles': '0',
- 'demotedFiles': '0', 'nodeName': '10.70.47.16', 'statusStr':
- 'in progress'}], 'task-id': '2ed28cbd-4246-493a-87b8-1fdcce313b34',
- 'nodeCount': '4', 'op': '7'}
- """
-
- cmd = "gluster volume tier %s status --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to execute 'tier status' on node %s. "
- "Hence failed to get tier status.", mnode)
- return None
-
- try:
- root = etree.XML(out)
- except etree.ParseError:
- g.log.error("Failed to parse the gluster tier status xml output.")
- return None
-
- tier_status = {}
- tier_status["node"] = []
- for info in root.findall("volRebalance"):
- for element in info.getchildren():
- if element.tag == "node":
- status_info = {}
- for elmt in element.getchildren():
- status_info[elmt.tag] = elmt.text
- tier_status[element.tag].append(status_info)
- else:
- tier_status[element.tag] = element.text
- return tier_status
-
-
-def tier_detach_start(mnode, volname):
- """starts detaching tier on given volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_detach_start("abc.com", testvol)
-
- """
-
- cmd = "gluster volume tier %s detach start --mode=script" % volname
- return g.run(mnode, cmd)
-
-
-def tier_detach_status(mnode, volname):
- """executes detach tier status on given volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_detach_status("abc.com", testvol)
-
- """
-
- cmd = "gluster volume tier %s detach status --mode=script" % volname
- return g.run(mnode, cmd)
-
-
-def tier_detach_stop(mnode, volname):
- """stops detaching tier on given volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_detach_stop("abc.com", testvol)
-
- """
-
- cmd = "gluster volume tier %s detach stop --mode=script" % volname
- return g.run(mnode, cmd)
-
-
-def tier_detach_commit(mnode, volname):
- """commits detach tier on given volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_detach_commit("abc.com", testvol)
-
- """
-
- cmd = "gluster volume tier %s detach commit --mode=script" % volname
- return g.run(mnode, cmd)
-
-
-def tier_detach_force(mnode, volname):
- """detaches tier forcefully on given volume
-
- Args:
- mnode (str): Node on which cmd has to be executed.
- volname (str): volume name
-
- Returns:
- tuple: Tuple containing three elements (ret, out, err).
- The first element 'ret' is of type 'int' and is the return value
- of command execution.
-
- The second element 'out' is of type 'str' and is the stdout value
- of the command execution.
-
- The third element 'err' is of type 'str' and is the stderr value
- of the command execution.
-
- Example:
- tier_detach_force("abc.com", testvol)
-
- """
-
- cmd = "gluster volume tier %s detach force --mode=script" % volname
- return g.run(mnode, cmd)
-
-
-def get_detach_tier_status(mnode, volname):
- """Parse the output of 'gluster volume tier detach status' command.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- dict: dict on success.
-
- Examples:
- >>> get_detach_tier_status('abc.lab.eng.xyz.com', "testvol")
- {'node': [{'files': '0', 'status': '3', 'lookups': '1', 'skipped': '0',
- 'nodeName': 'localhost', 'failures': '0', 'runtime': '0.00', 'id':
- '11336017-9561-4e88-9ac3-a94d4b403340', 'statusStr': 'completed',
- 'size': '0'}, {'files': '0', 'status': '3', 'lookups': '0', 'skipped':
- '0', 'nodeName': '10.70.47.16', 'failures': '0', 'runtime': '0.00',
- 'id': 'a2b88b10-eba2-4f97-add2-8dc37df08b27', 'statusStr': 'completed',
- 'size': '0'}], 'nodeCount': '4', 'aggregate': {'files': '0', 'status':
- '3', 'lookups': '1', 'skipped': '0', 'failures': '0', 'runtime': '0.0',
- 'statusStr': 'completed', 'size': '0'}}
- """
-
- cmd = "gluster volume tier %s detach status --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to execute 'detach tier status' on node %s. "
- "Hence failed to get detach tier status.", mnode)
- return None
-
- try:
- root = etree.XML(out)
- except etree.ParseError:
- g.log.error("Failed to parse the detach tier status xml output.")
- return None
-
- tier_status = {}
- tier_status["node"] = []
- for info in root.findall("volDetachTier"):
- for element in info.getchildren():
- if element.tag == "node":
- status_info = {}
- for elmt in element.getchildren():
- status_info[elmt.tag] = elmt.text
- tier_status[element.tag].append(status_info)
- elif element.tag == "aggregate":
- status_info = {}
- for elmt in element.getchildren():
- status_info[elmt.tag] = elmt.text
- tier_status[element.tag] = status_info
- else:
- tier_status[element.tag] = element.text
- return tier_status
-
-
-def tier_detach_start_and_get_taskid(mnode, volname):
- """Parse the output of 'gluster volume tier detach start' command.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- dict: dict on success.
-
- Examples:
- >>> tier_detach_start_and_get_taskid('abc.lab.eng.xyz.com',
- "testvol")
- {'task-id': '8020835c-ff0d-4ea1-9f07-62dd067e92d4'}
- """
-
- cmd = "gluster volume tier %s detach start --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to execute 'detach tier start' on node %s. "
- "Hence failed to parse the detach tier start.", mnode)
- return None
-
- try:
- root = etree.XML(out)
- except etree.ParseError:
- g.log.error("Failed to parse the gluster detach tier "
- "start xml output.")
- return None
-
- tier_status = {}
- for info in root.findall("volDetachTier"):
- for element in info.getchildren():
- tier_status[element.tag] = element.text
- return tier_status
-
-
-def tier_detach_stop_and_get_status(mnode, volname):
- """Parse the output of 'gluster volume tier detach stop' command.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- dict: dict on success.
-
- Examples:
- >>> tier_detach_stop_and_get_status('abc.lab.eng.xyz.com',
- "testvol")
- {'node': [{'files': '0', 'status': '3', 'lookups': '1', 'skipped': '0',
- 'nodeName': 'localhost', 'failures': '0', 'runtime': '0.00', 'id':
- '11336017-9561-4e88-9ac3-a94d4b403340', 'statusStr': 'completed',
- 'size': '0'}, {'files': '0', 'status': '3', 'lookups': '0', 'skipped':
- '0', 'nodeName': '10.70.47.16', 'failures': '0', 'runtime': '0.00',
- 'id': 'a2b88b12-eba2-4f97-add2-8dc37df08b27', 'statusStr': 'completed',
- 'size': '0'}], 'nodeCount': '4', 'aggregate': {'files': '0', 'status':
- '3', 'lookups': '1', 'skipped': '0', 'failures': '0', 'runtime': '0.0',
- 'statusStr': 'completed', 'size': '0'}}
- """
-
- cmd = "gluster volume tier %s detach stop --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to execute 'tier start' on node %s. "
- "Hence failed to parse the tier start.", mnode)
- return None
-
- try:
- root = etree.XML(out)
- except etree.ParseError:
- g.log.error("Failed to parse the gluster detach tier stop"
- " xml output.")
- return None
-
- tier_status = {}
- tier_status["node"] = []
- for info in root.findall("volDetachTier"):
- for element in info.getchildren():
- if element.tag == "node":
- status_info = {}
- for elmt in element.getchildren():
- status_info[elmt.tag] = elmt.text
- tier_status[element.tag].append(status_info)
- elif element.tag == "aggregate":
- status_info = {}
- for elmt in element.getchildren():
- status_info[elmt.tag] = elmt.text
- tier_status[element.tag] = status_info
- else:
- tier_status[element.tag] = element.text
- return tier_status
-
-
-def wait_for_detach_tier_to_complete(mnode, volname, timeout=300):
- """Waits for the detach tier to complete
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Kwargs:
- timeout (int): timeout value to wait for detach tier to complete
-
- Returns:
- True on success, False otherwise
-
- Examples:
- >>> wait_for_detach_tier_to_complete(mnode, "testvol")
- """
-
- count = 0
- flag = 0
- while (count < timeout):
- status_info = get_detach_tier_status(mnode, volname)
- if status_info is None:
- return False
-
- status = status_info['aggregate']['statusStr']
- if status == 'completed':
- flag = 1
- break
-
- time.sleep(10)
- count = count + 10
- if not flag:
- g.log.error("detach tier is not completed")
- return False
- else:
- g.log.info("detach tier is successfully completed")
- return True
-
-
-def get_files_from_hot_tier(mnode, volname):
- """Lists files from hot tier for the given volume
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- Emptylist: if there are no files in hot tier.
- list: list of files in hot tier on success.
-
- Examples:
- >>>get_files_from_hot_tier(mnode, "testvol")
- """
-
- files = []
- from glustolibs.gluster.volume_libs import get_subvols
- subvols = get_subvols(mnode, volname)
- for subvol in subvols['hot_tier_subvols']:
- info = subvol[0].split(':')
- file_list = list_files(info[0], info[1])
- for file in file_list:
- if ".glusterfs" not in file:
- files.append(file)
-
- return files
-
-
-def get_files_from_cold_tier(mnode, volname):
- """Lists files from cold tier for the given volume
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- Emptylist: if there are no files in cold tier.
- list: list of files in cold tier on success.
-
- Examples:
- >>>get_files_from_hot_tier("testvol")
- """
-
- files = []
- from glustolibs.gluster.volume_libs import get_subvols
- subvols = get_subvols(mnode, volname)
- for subvol in subvols['cold_tier_subvols']:
- info = subvol[0].split(':')
- file_list = list_files(info[0], info[1])
- for file in file_list:
- if ".glusterfs" not in file:
- files.append(file)
-
- return files
-
-
-def get_tier_promote_frequency(mnode, volname):
- """Gets tier promote frequency value for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: promote frequency value on success.
-
- Examples:
- >>>get_tier_promote_frequency("abc.com", "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.tier-promote-frequency']
-
-
-def get_tier_demote_frequency(mnode, volname):
- """Gets tier demote frequency value for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: demote frequency value on success.
-
- Examples:
- >>>get_tier_demote_frequency("abc.com", "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.tier-demote-frequency']
-
-
-def get_tier_mode(mnode, volname):
- """Gets tier mode for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: tier mode on success.
-
- Examples:
- >>>get_tier_mode("testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.tier-mode']
-
-
-def get_tier_max_mb(mnode, volname):
- """Gets tier max mb for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: tier max mb on success.
-
- Examples:
- >>>get_tier_max_mb("abc.com", "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.tier-max-mb']
-
-
-def get_tier_max_files(mnode, volname):
- """Gets tier max files for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: tier max files on success.
-
- Examples:
- >>>get_tier_max_files("abc.com", "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.tier-max-files']
-
-
-def get_tier_watermark_high_limit(mnode, volname):
- """Gets tier watermark high limit for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: tier watermark high limit on success.
-
- Examples:
- >>>get_tier_watermark_high_limit(mnode, "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.watermark-hi']
-
-
-def get_tier_watermark_low_limit(mnode, volname):
- """Gets tier watermark low limit for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: tier watermark low limit on success.
-
- Examples:
- >>>get_tier_watermark_low_limit("abc.com", "testvol")
- """
-
- from glustolibs.gluster.volume_ops import get_volume_options
- vol_options = get_volume_options(mnode, volname)
- if vol_options is None:
- g.log.error("Failed to get volume options")
- return None
-
- return vol_options['cluster.watermark-low']
-
-
-def set_tier_promote_frequency(mnode, volname, value):
- """Sets tier promote frequency value for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): promote frequency value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_promote_frequency("abc.com", "testvol", '1000')
- """
-
- option = {'cluster.tier-promote-frequency': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set promote frequency to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_demote_frequency(mnode, volname, value):
- """Sets tier demote frequency value for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): demote frequency value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_demote_frequency("abc.com", "testvol", "500")
- """
-
- option = {'cluster.tier-demote-frequency': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set demote frequency to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_mode(mnode, volname, value):
- """Sets tier mode for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): tier mode value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_mode("abc.com", "testvol", "cache")
- """
-
- option = {'cluster.tier-mode': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set tier mode to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_max_mb(mnode, volname, value):
- """Sets tier max mb for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): tier mode value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_max_mb("abc.com", "testvol", "50")
- """
-
- option = {'cluster.tier-max-mb': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set tier max mb to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_max_files(mnode, volname, value):
- """Sets tier max files for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): tier mode value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_max_files("abc.com", "testvol", "10")
- """
-
- option = {'cluster.tier-max-files': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set tier max files to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_watermark_high_limit(mnode, volname, value):
- """Sets tier watermark high limit for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): tier mode value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_watermark_high_limit("abc.com", "testvol", "95")
- """
-
- option = {'cluster.watermark-hi': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set tier watermark high limit to %s"
- % value)
- return False
-
- return True
-
-
-def set_tier_watermark_low_limit(mnode, volname, value):
- """Sets tier watermark low limit for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
- value (str): tier mode value
-
- Returns:
- bool: True on success, False Otherwise
-
- Examples:
- >>>set_tier_watermark_low_limit("abc.com", "testvol", "40")
- """
-
- option = {'cluster.watermark-low': value}
-
- from glustolibs.gluster.volume_ops import set_volume_options
- if not set_volume_options(mnode, volname,
- options=option):
- g.log.error("Failed to set tier watermark low limit to %s"
- % value)
- return False
-
- return True
-
-
-def get_tier_pid(mnode, volname):
- """Gets tier pid for given volume.
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- NoneType: None if command execution fails, parse errors.
- str: pid of tier process on success.
-
- Examples:
- >>>get_tier_pid("abc.xyz.com", "testvol")
- """
-
- cmd = ("ps -ef | grep -v grep | grep '/var/log/glusterfs/%s-tier.log' |"
- "awk '{print $2}'" % volname)
- ret, out, err = g.run(mnode, cmd)
- if ret != 0:
- g.log.error("Failed to execute 'ps' cmd")
- return None
-
- return out.strip("\n")
-
-
-def is_tier_process_running(mnode, volname):
- """Checks whether tier process is running
-
- Args:
- mnode (str): Node on which command has to be executed.
- volname (str): volume name
-
- Returns:
- True on success, False otherwise
-
- Examples:
- >>>is_tier_process_running("abc.xyz.com", "testvol")
- """
-
- pid = get_tier_pid(mnode, volname)
- if pid == '':
- return False
- return True
diff --git a/glustolibs-gluster/glustolibs/gluster/uss_ops.py b/glustolibs-gluster/glustolibs/gluster/uss_ops.py
index a6f9b8f98..2df112d78 100644
--- a/glustolibs-gluster/glustolibs/gluster/uss_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/uss_ops.py
@@ -154,6 +154,7 @@ def uss_list_snaps(client, mount):
Args:
client(str):client on which commands has to be executed
mount(str): Mount points to be executed
+
Returns:
tuple: Tuple containing three elements (ret, out, err).
The first element 'ret' is of type 'int' and is the return value
@@ -167,3 +168,29 @@ def uss_list_snaps(client, mount):
"""
cmd = "ls -R %s/.snaps" % (mount)
return g.run(client, cmd)
+
+
+def get_uss_list_snaps(client, mount):
+ """Fetches the list of snapshots under the .snaps directory
+
+ Args:
+ client(str):client on which commands has to be executed
+ mount(str): Mount points to be executed
+
+ Returns:
+ NoneType: If there are errors
+ list: List of snapshot names present under .snaps directory
+
+ Examples:
+ >>> get_uss_list_snaps('abc.lab.eng.xyz.com', 'mountpoint')
+ ['snap1', 'snap2', 'snap3']
+
+ """
+ cmd = "ls %s/.snaps" % (mount)
+ ret, out, _ = g.run(client, cmd)
+ if ret:
+ g.log.error(".snaps list returned error")
+ return None
+
+ snap_dir_list = out.splitlines()
+ return snap_dir_list
diff --git a/glustolibs-gluster/glustolibs/gluster/volume_libs.py b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
index 784e61697..87e70ca8c 100644
--- a/glustolibs-gluster/glustolibs/gluster/volume_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/volume_libs.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -24,15 +24,13 @@ except ImportError:
import xml.etree.ElementTree as etree
from glusto.core import Glusto as g
from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.gluster.brickmux_libs import form_bricks_for_multivol
from glustolibs.gluster.volume_ops import (volume_create, volume_start,
set_volume_options, get_volume_info,
volume_stop, volume_delete,
volume_info, volume_status,
get_volume_options,
get_volume_list)
-from glustolibs.gluster.tiering_ops import (add_extra_servers_to_cluster,
- tier_attach,
- is_tier_process_running)
from glustolibs.gluster.quota_ops import (quota_enable, quota_limit_usage,
is_quota_enabled)
from glustolibs.gluster.uss_ops import enable_uss, is_uss_enabled
@@ -65,7 +63,8 @@ def volume_exists(mnode, volname):
return False
-def setup_volume(mnode, all_servers_info, volume_config, force=False):
+def setup_volume(mnode, all_servers_info, volume_config, multi_vol=False,
+ force=False, create_only=False):
"""Setup Volume with the configuration defined in volume_config
Args:
@@ -99,13 +98,20 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
'size': '100GB'},
'enable': False},
'uss': {'enable': False},
- 'tier': {'create_tier': True,
- 'tier_type': {'type': 'distributed-replicated',
- 'replica_count': 2,
- 'dist_count': 2,
- 'transport': 'tcp'}},
'options': {'performance.readdir-ahead': True}
}
+ Kwargs:
+ multi_vol (bool): True, If bricks need to created for multiple
+ volumes(more than 5)
+ False, Otherwise. By default, value is set to False.
+ force (bool): If this option is set to True, then volume creation
+ command is executed with force option.
+ False, without force option.
+ By default, value is set to False.
+ create_only(bool): True, if only volume creation is needed.
+ False, will do volume create, start, set operation
+ if any provided in the volume_config.
+ By default, value is set to False.
Returns:
bool : True on successful setup. False Otherwise
@@ -118,8 +124,8 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
return False
# Check if the volume already exists
- volinfo = get_volume_info(mnode=mnode)
- if volinfo is not None and volname in volinfo.keys():
+ vollist = get_volume_list(mnode=mnode)
+ if vollist is not None and volname in vollist:
g.log.info("volume %s already exists. Returning...", volname)
return True
@@ -221,15 +227,55 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
return False
number_of_bricks = (kwargs['dist_count'] * kwargs['disperse_count'])
+
+ elif volume_type == 'arbiter':
+ if 'replica_count' in volume_config.get('voltype'):
+ kwargs['replica_count'] = (volume_config['voltype']
+ ['replica_count'])
+ else:
+ g.log.error("Replica count not specified in the volume config")
+ return False
+ if 'arbiter_count' in volume_config.get('voltype'):
+ kwargs['arbiter_count'] = (volume_config['voltype']
+ ['arbiter_count'])
+ else:
+ g.log.error("Arbiter count not specified in the volume config")
+ return False
+ number_of_bricks = kwargs['replica_count']
+ elif volume_type == 'distributed-arbiter':
+ if 'dist_count' in volume_config.get('voltype'):
+ kwargs['dist_count'] = (volume_config['voltype']['dist_count'])
+ else:
+ g.log.error("Distribute Count not specified in the volume config")
+ return False
+ if 'replica_count' in volume_config.get('voltype'):
+ kwargs['replica_count'] = (volume_config['voltype']
+ ['replica_count'])
+ else:
+ g.log.error("Replica count not specified in the volume config")
+ return False
+ if 'arbiter_count' in volume_config.get('voltype'):
+ kwargs['arbiter_count'] = (volume_config['voltype']
+ ['arbiter_count'])
+ else:
+ g.log.error("Arbiter count not specified in the volume config")
+ return False
+ number_of_bricks = (kwargs['dist_count'] * kwargs['replica_count'])
+
else:
g.log.error("Invalid volume type defined in config")
return False
# get bricks_list
- bricks_list = form_bricks_list(mnode=mnode, volname=volname,
- number_of_bricks=number_of_bricks,
- servers=servers,
- servers_info=all_servers_info)
+ if multi_vol:
+ bricks_list = form_bricks_for_multivol(
+ mnode=mnode, volname=volname, number_of_bricks=number_of_bricks,
+ servers=servers, servers_info=all_servers_info)
+ else:
+ bricks_list = form_bricks_list(mnode=mnode, volname=volname,
+ number_of_bricks=number_of_bricks,
+ servers=servers,
+ servers_info=all_servers_info)
if not bricks_list:
g.log.error("Number_of_bricks is greater than the unused bricks on "
"servers")
@@ -243,6 +289,25 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
g.log.error("Unable to create volume %s", volname)
return False
+ if create_only and (ret == 0):
+ g.log.info("Volume creation of {} is done successfully".format(
+ volname))
+ return True
+
+ is_ganesha = False
+ if 'nfs_ganesha' in volume_config:
+ is_ganesha = bool(volume_config['nfs_ganesha']['enable'])
+
+ if not is_ganesha:
+ # Set all the volume options:
+ if 'options' in volume_config:
+ volume_options = volume_config['options']
+ ret = set_volume_options(mnode=mnode, volname=volname,
+ options=volume_options)
+ if not ret:
+ g.log.error("Unable to set few volume options")
+ return False
+
# Start Volume
time.sleep(2)
ret = volume_start(mnode, volname)
@@ -250,68 +315,6 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
g.log.error("volume start %s failed", volname)
return False
- # Create Tier volume
- if ('tier' in volume_config and 'create_tier' in volume_config['tier'] and
- volume_config['tier']['create_tier']):
- # get servers info for tier attach
- if ('extra_servers' in volume_config and
- volume_config['extra_servers']):
- extra_servers = volume_config['extra_servers']
- ret = add_extra_servers_to_cluster(mnode, extra_servers)
- if not ret:
- return False
- else:
- extra_servers = volume_config['servers']
-
- # get the tier volume type
- if 'tier_type' in volume_config['tier']:
- if 'type' in volume_config['tier']['tier_type']:
- tier_volume_type = volume_config['tier']['tier_type']['type']
- dist = rep = 1
- if tier_volume_type == 'distributed':
- if 'dist_count' in volume_config['tier']['tier_type']:
- dist = (volume_config['tier']['tier_type']
- ['dist_count'])
-
- elif tier_volume_type == 'replicated':
- if 'replica_count' in volume_config['tier']['tier_type']:
- rep = (volume_config['tier']['tier_type']
- ['replica_count'])
-
- elif tier_volume_type == 'distributed-replicated':
- if 'dist_count' in volume_config['tier']['tier_type']:
- dist = (volume_config['tier']['tier_type']
- ['dist_count'])
- if 'replica_count' in volume_config['tier']['tier_type']:
- rep = (volume_config['tier']['tier_type']
- ['replica_count'])
- else:
- tier_volume_type = 'distributed'
- dist = 1
- rep = 1
- number_of_bricks = dist * rep
-
- # Attach Tier
- ret, _, _ = tier_attach(mnode=mnode, volname=volname,
- extra_servers=extra_servers,
- extra_servers_info=all_servers_info,
- num_bricks_to_add=number_of_bricks,
- replica=rep)
- if ret != 0:
- g.log.error("Unable to attach tier")
- return False
-
- time.sleep(30)
- # Check if tier is running
- _rc = True
- for server in extra_servers:
- ret = is_tier_process_running(server, volname)
- if not ret:
- g.log.error("Tier process not running on %s", server)
- _rc = False
- if not _rc:
- return False
-
# Enable Quota
if ('quota' in volume_config and 'enable' in volume_config['quota'] and
volume_config['quota']['enable']):
@@ -361,13 +364,73 @@ def setup_volume(mnode, all_servers_info, volume_config, force=False):
g.log.error("USS is not enabled on the volume %s", volname)
return False
- # Set all the volume options:
- if 'options' in volume_config:
- volume_options = volume_config['options']
- ret = set_volume_options(mnode=mnode, volname=volname,
- options=volume_options)
+ if is_ganesha:
+ # Set all the volume options for NFS Ganesha
+ if 'options' in volume_config:
+ volume_options = volume_config['options']
+ ret = set_volume_options(mnode=mnode, volname=volname,
+ options=volume_options)
+ if not ret:
+ g.log.error("Unable to set few volume options")
+ return False
+
+ return True
+
+
+def bulk_volume_creation(mnode, number_of_volumes, servers_info,
+ volume_config, vol_prefix="mult_vol_",
+ is_force=False, is_create_only=False):
+ """
+ Creates the number of volumes user has specified
+
+ Args:
+ mnode (str): Node on which commands has to be executed.
+ number_of_volumes (int): Specify the number of volumes
+ to be created.
+ servers_info (dict): Information about all servers.
+ volume_config (dict): Dict containing the volume information
+
+ Kwargs:
+ vol_prefix (str): Prefix to be added to the volume name.
+ is_force (bool): True, If volume create command need to be executed
+ with force, False Otherwise. Defaults to False.
+ create_only(bool): True, if only volume creation is needed.
+ False, will do volume create, start, set operation
+ if any provided in the volume_config.
+ By default, value is set to False.
+ Returns:
+ bool: True on successful bulk volume creation, False Otherwise.
+
+ example:
+ volume_config = {
+ 'name': 'testvol',
+ 'servers': ['server-vm1', 'server-vm2', 'server-vm3',
+ 'server-vm4'],
+ 'voltype': {'type': 'distributed',
+ 'dist_count': 4,
+ 'transport': 'tcp'},
+ 'extra_servers': ['server-vm9', 'server-vm10',
+ 'server-vm11', 'server-vm12'],
+ 'quota': {'limit_usage': {'path': '/', 'percent': None,
+ 'size': '100GB'},
+ 'enable': False},
+ 'uss': {'enable': False},
+ 'options': {'performance.readdir-ahead': True}
+ }
+ """
+
+ if not (number_of_volumes > 1):
+ g.log.error("Provide number of volume greater than 1")
+ return False
+
+ volume_name = volume_config['name']
+ for volume in range(number_of_volumes):
+ volume_config['name'] = vol_prefix + volume_name + str(volume)
+ ret = setup_volume(mnode, servers_info, volume_config, multi_vol=True,
+ force=is_force, create_only=is_create_only)
if not ret:
- g.log.error("Unable to set few volume options")
+ g.log.error("Volume creation failed for the volume %s"
+ % volume_config['name'])
return False
return True
@@ -513,77 +576,11 @@ def get_subvols(mnode, volname):
get_subvols("abc.xyz.com", "testvol")
"""
- subvols = {
- 'is_tier': False,
- 'hot_tier_subvols': [],
- 'cold_tier_subvols': [],
- 'volume_subvols': []
- }
+ subvols = {'volume_subvols': []}
+
volinfo = get_volume_info(mnode, volname)
if volinfo is not None:
voltype = volinfo[volname]['typeStr']
- if voltype == 'Tier':
- # Set is_tier to True
- subvols['is_tier'] = True
-
- # Get hot tier subvols
- hot_tier_type = (volinfo[volname]["bricks"]
- ['hotBricks']['hotBrickType'])
- tmp = volinfo[volname]["bricks"]['hotBricks']["brick"]
- hot_tier_bricks = [x["name"] for x in tmp if "name" in x]
- if hot_tier_type == 'Distribute':
- for brick in hot_tier_bricks:
- subvols['hot_tier_subvols'].append([brick])
-
- elif (hot_tier_type == 'Replicate' or
- hot_tier_type == 'Distributed-Replicate'):
- rep_count = int(
- (volinfo[volname]["bricks"]['hotBricks']
- ['numberOfBricks']).split("=", 1)[0].split("x")[1].strip()
- )
- subvol_list = (
- [hot_tier_bricks[i:i + rep_count]
- for i in range(0, len(hot_tier_bricks), rep_count)])
- subvols['hot_tier_subvols'] = subvol_list
-
- # Get cold tier subvols
- cold_tier_type = (volinfo[volname]["bricks"]['coldBricks']
- ['coldBrickType'])
- tmp = volinfo[volname]["bricks"]['coldBricks']["brick"]
- cold_tier_bricks = [x["name"] for x in tmp if "name" in x]
-
- # Distribute volume
- if cold_tier_type == 'Distribute':
- for brick in cold_tier_bricks:
- subvols['cold_tier_subvols'].append([brick])
-
- # Replicate or Distribute-Replicate volume
- elif (cold_tier_type == 'Replicate' or
- cold_tier_type == 'Distributed-Replicate'):
- rep_count = int(
- (volinfo[volname]["bricks"]['coldBricks']
- ['numberOfBricks']).split("=", 1)[0].split("x")[1].strip()
- )
- subvol_list = (
- [cold_tier_bricks[i:i + rep_count]
- for i in range(0, len(cold_tier_bricks), rep_count)])
- subvols['cold_tier_subvols'] = subvol_list
-
- # Disperse or Distribute-Disperse volume
- elif (cold_tier_type == 'Disperse' or
- cold_tier_type == 'Distributed-Disperse'):
- disp_count = sum(
- [int(nums) for nums in (
- (volinfo[volname]["bricks"]['coldBricks']
- ['numberOfBricks']).split("x", 1)[1].
- strip().split("=")[0].strip().strip("()").
- split()) if nums.isdigit()])
- subvol_list = [cold_tier_bricks[i:i + disp_count]
- for i in range(0, len(cold_tier_bricks),
- disp_count)]
- subvols['cold_tier_subvols'] = subvol_list
- return subvols
-
tmp = volinfo[volname]["bricks"]["brick"]
bricks = [x["name"] for x in tmp if "name" in x]
if voltype == 'Replicate' or voltype == 'Distributed-Replicate':
@@ -604,29 +601,6 @@ def get_subvols(mnode, volname):
return subvols
-def is_tiered_volume(mnode, volname):
- """Check if volume is tiered volume.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- bool : True if the volume is tiered volume. False otherwise
- NoneType: None if volume does not exist.
- """
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s", volname)
- return None
-
- voltype = volinfo[volname]['typeStr']
- if voltype == 'Tier':
- return True
- else:
- return False
-
-
def is_distribute_volume(mnode, volname):
"""Check if volume is a plain distributed volume
@@ -643,20 +617,10 @@ def is_distribute_volume(mnode, volname):
g.log.error("Unable to check if the volume %s is distribute", volname)
return False
- if volume_type_info['is_tier']:
- hot_tier_type = (volume_type_info['hot_tier_type_info']
- ['hotBrickType'])
- cold_tier_type = (volume_type_info['cold_tier_type_info']
- ['coldBrickType'])
- if hot_tier_type == 'Distribute' and cold_tier_type == 'Distribute':
- return True
- else:
- return False
+ if volume_type_info['volume_type_info']['typeStr'] == 'Distribute':
+ return True
else:
- if volume_type_info['volume_type_info']['typeStr'] == 'Distribute':
- return True
- else:
- return False
+ return False
def get_volume_type_info(mnode, volname):
@@ -670,9 +634,6 @@ def get_volume_type_info(mnode, volname):
dict : Dict containing the keys, values defining the volume type:
Example:
volume_type_info = {
- 'is_tier': False,
- 'hot_tier_type_info': {},
- 'cold_tier_type_info': {},
'volume_type_info': {
'typeStr': 'Disperse',
'replicaCount': '1',
@@ -684,18 +645,6 @@ def get_volume_type_info(mnode, volname):
}
volume_type_info = {
- 'is_tier': True,
- 'hot_tier_type_info': {
- 'hotBrickType': 'Distribute',
- 'hotreplicaCount': '1'
- },
- 'cold_tier_type_info': {
- 'coldBrickType': 'Disperse',
- 'coldreplicaCount': '1',
- 'coldarbiterCount': '0',
- 'colddisperseCount': '3',
- 'numberOfBricks':1
- },
'volume_type_info': {}
@@ -706,138 +655,26 @@ def get_volume_type_info(mnode, volname):
g.log.error("Unable to get the volume info for volume %s", volname)
return None
- volume_type_info = {
- 'is_tier': False,
- 'hot_tier_type_info': {},
- 'cold_tier_type_info': {},
- 'volume_type_info': {}
- }
-
- voltype = volinfo[volname]['typeStr']
- if voltype == 'Tier':
- volume_type_info['is_tier'] = True
-
- hot_tier_type_info = get_hot_tier_type_info(mnode, volname)
- volume_type_info['hot_tier_type_info'] = hot_tier_type_info
-
- cold_tier_type_info = get_cold_tier_type_info(mnode, volname)
- volume_type_info['cold_tier_type_info'] = cold_tier_type_info
-
- else:
- non_tiered_volume_type_info = {
- 'typeStr': '',
- 'replicaCount': '',
- 'arbiterCount': '',
- 'stripeCount': '',
- 'disperseCount': '',
- 'redundancyCount': ''
- }
- for key in non_tiered_volume_type_info.keys():
- if key in volinfo[volname]:
- non_tiered_volume_type_info[key] = volinfo[volname][key]
- else:
- g.log.error("Unable to find key '%s' in the volume info for "
- "the volume %s", key, volname)
- non_tiered_volume_type_info[key] = None
- volume_type_info['volume_type_info'] = non_tiered_volume_type_info
-
- return volume_type_info
-
-
-def get_cold_tier_type_info(mnode, volname):
- """Returns cold tier type information for the specified volume.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- dict : Dict containing the keys, values defining the cold tier type:
- Example:
- cold_tier_type_info = {
- 'coldBrickType': 'Disperse',
- 'coldreplicaCount': '1',
- 'coldarbiterCount': '0',
- 'colddisperseCount': '3',
- 'numberOfBricks': '3'
- }
- NoneType: None if volume does not exist or is not a tiered volume or
- any other key errors.
- """
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s", volname)
- return None
-
- if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
-
- cold_tier_type_info = {
- 'coldBrickType': '',
- 'coldreplicaCount': '',
- 'coldarbiterCount': '',
- 'colddisperseCount': '',
- 'numberOfBricks': ''
- }
- for key in cold_tier_type_info.keys():
- if key in volinfo[volname]['bricks']['coldBricks']:
- cold_tier_type_info[key] = (volinfo[volname]['bricks']
- ['coldBricks'][key])
+ volume_type_info = {'volume_type_info': {}}
+
+ all_volume_type_info = {
+ 'typeStr': '',
+ 'replicaCount': '',
+ 'arbiterCount': '',
+ 'stripeCount': '',
+ 'disperseCount': '',
+ 'redundancyCount': ''
+ }
+ for key in all_volume_type_info.keys():
+ if key in volinfo[volname]:
+ all_volume_type_info[key] = volinfo[volname][key]
else:
- g.log.error("Unable to find key '%s' in the volume info for the "
- "volume %s", key, volname)
- return None
-
- if 'Disperse' in cold_tier_type_info['coldBrickType']:
- redundancy_count = (cold_tier_type_info['numberOfBricks'].
- split("x", 1)[1].strip().
- split("=")[0].strip().strip("()").split()[2])
- cold_tier_type_info['coldredundancyCount'] = redundancy_count
+ g.log.error("Unable to find key '%s' in the volume info for "
+ "the volume %s", key, volname)
+ all_volume_type_info[key] = None
+ volume_type_info['volume_type_info'] = all_volume_type_info
- return cold_tier_type_info
-
-
-def get_hot_tier_type_info(mnode, volname):
- """Returns hot tier type information for the specified volume.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- dict : Dict containing the keys, values defining the hot tier type:
- Example:
- hot_tier_type_info = {
- 'hotBrickType': 'Distribute',
- 'hotreplicaCount': '1'
- }
- NoneType: None if volume does not exist or is not a tiered volume or
- any other key errors.
- """
- volinfo = get_volume_info(mnode, volname)
- if volinfo is None:
- g.log.error("Unable to get the volume info for volume %s", volname)
- return None
-
- if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
-
- hot_tier_type_info = {
- 'hotBrickType': '',
- 'hotreplicaCount': ''
- }
- for key in hot_tier_type_info.keys():
- if key in volinfo[volname]['bricks']['hotBricks']:
- hot_tier_type_info[key] = (volinfo[volname]['bricks']['hotBricks']
- [key])
- else:
- g.log.error("Unable to find key '%s' in the volume info for the "
- "volume %s", key, volname)
- return None
-
- return hot_tier_type_info
+ return volume_type_info
def get_num_of_bricks_per_subvol(mnode, volname):
@@ -852,86 +689,21 @@ def get_num_of_bricks_per_subvol(mnode, volname):
number of bricks per subvol
Example:
num_of_bricks_per_subvol = {
- 'is_tier': False,
- 'hot_tier_num_of_bricks_per_subvol': None,
- 'cold_tier_num_of_bricks_per_subvol': None,
'volume_num_of_bricks_per_subvol': 2
}
- num_of_bricks_per_subvol = {
- 'is_tier': True,
- 'hot_tier_num_of_bricks_per_subvol': 3,
- 'cold_tier_num_of_bricks_per_subvol': 2,
- 'volume_num_of_bricks_per_subvol': None
- }
-
- NoneType: None if volume does not exist or is a tiered volume.
+ NoneType: None if volume does not exist.
"""
- bricks_per_subvol_dict = {
- 'is_tier': False,
- 'hot_tier_num_of_bricks_per_subvol': None,
- 'cold_tier_num_of_bricks_per_subvol': None,
- 'volume_num_of_bricks_per_subvol': None
- }
+ bricks_per_subvol_dict = {'volume_num_of_bricks_per_subvol': None}
subvols_dict = get_subvols(mnode, volname)
if subvols_dict['volume_subvols']:
bricks_per_subvol_dict['volume_num_of_bricks_per_subvol'] = (
len(subvols_dict['volume_subvols'][0]))
- else:
- if (subvols_dict['hot_tier_subvols'] and
- subvols_dict['cold_tier_subvols']):
- bricks_per_subvol_dict['is_tier'] = True
- bricks_per_subvol_dict['hot_tier_num_of_bricks_per_subvol'] = (
- len(subvols_dict['hot_tier_subvols'][0]))
- bricks_per_subvol_dict['cold_tier_num_of_bricks_per_subvol'] = (
- len(subvols_dict['cold_tier_subvols'][0]))
return bricks_per_subvol_dict
-def get_cold_tier_num_of_bricks_per_subvol(mnode, volname):
- """Returns number of bricks per subvol in cold tier
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- int : Number of bricks per subvol on cold tier.
- NoneType: None if volume does not exist or not a tiered volume.
- """
- if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
- subvols_dict = get_subvols(mnode, volname)
- if subvols_dict['cold_tier_subvols']:
- return len(subvols_dict['cold_tier_subvols'][0])
- else:
- return None
-
-
-def get_hot_tier_num_of_bricks_per_subvol(mnode, volname):
- """Returns number of bricks per subvol in hot tier
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- int : Number of bricks per subvol on hot tier.
- NoneType: None if volume does not exist or not a tiered volume.
- """
- if not is_tiered_volume(mnode, volname):
- g.log.error("Volume %s is not a tiered volume", volname)
- return None
- subvols_dict = get_subvols(mnode, volname)
- if subvols_dict['hot_tier_subvols']:
- return len(subvols_dict['hot_tier_subvols'][0])
- else:
- return None
-
-
def get_replica_count(mnode, volname):
"""Get the replica count of the volume
@@ -943,17 +715,8 @@ def get_replica_count(mnode, volname):
dict : Dict contain keys, values defining Replica count of the volume.
Example:
replica_count_info = {
- 'is_tier': False,
- 'hot_tier_replica_count': None,
- 'cold_tier_replica_count': None,
'volume_replica_count': 3
}
- replica_count_info = {
- 'is_tier': True,
- 'hot_tier_replica_count': 2,
- 'cold_tier_replica_count': 3,
- 'volume_replica_count': None
- }
NoneType: None if it is parse failure.
"""
vol_type_info = get_volume_type_info(mnode, volname)
@@ -962,69 +725,14 @@ def get_replica_count(mnode, volname):
volname)
return None
- replica_count_info = {
- 'is_tier': False,
- 'hot_tier_replica_count': None,
- 'cold_tier_replica_count': None,
- 'volume_replica_count': None
- }
+ replica_count_info = {'volume_replica_count': None}
- replica_count_info['is_tier'] = vol_type_info['is_tier']
- if replica_count_info['is_tier']:
- replica_count_info['hot_tier_replica_count'] = (
- vol_type_info['hot_tier_type_info']['hotreplicaCount'])
- replica_count_info['cold_tier_replica_count'] = (
- vol_type_info['cold_tier_type_info']['coldreplicaCount'])
-
- else:
- replica_count_info['volume_replica_count'] = (
- vol_type_info['volume_type_info']['replicaCount'])
+ replica_count_info['volume_replica_count'] = (
+ vol_type_info['volume_type_info']['replicaCount'])
return replica_count_info
-def get_cold_tier_replica_count(mnode, volname):
- """Get the replica count of cold tier.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- int : Replica count of the cold tier.
- NoneType: None if volume does not exist or not a tiered volume.
- """
- is_tier = is_tiered_volume(mnode, volname)
- if not is_tier:
- return None
- else:
- volinfo = get_volume_info(mnode, volname)
- cold_tier_replica_count = (volinfo[volname]["bricks"]['coldBricks']
- ['coldreplicaCount'])
- return cold_tier_replica_count
-
-
-def get_hot_tier_replica_count(mnode, volname):
- """Get the replica count of hot tier.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- int : Replica count of the hot tier.
- NoneType: None if volume does not exist or not a tiered volume.
- """
- is_tier = is_tiered_volume(mnode, volname)
- if not is_tier:
- return None
- else:
- volinfo = get_volume_info(mnode, volname)
- hot_tier_replica_count = (volinfo[volname]["bricks"]['hotBricks']
- ['hotreplicaCount'])
- return hot_tier_replica_count
-
-
def get_disperse_count(mnode, volname):
"""Get the disperse count of the volume
@@ -1036,15 +744,8 @@ def get_disperse_count(mnode, volname):
dict : Dict contain keys, values defining Disperse count of the volume.
Example:
disperse_count_info = {
- 'is_tier': False,
- 'cold_tier_disperse_count': None,
'volume_disperse_count': 3
}
- disperse_count_info = {
- 'is_tier': True,
- 'cold_tier_disperse_count': 3,
- 'volume_disperse_count': None
- }
None: If it is non dispersed volume.
"""
vol_type_info = get_volume_type_info(mnode, volname)
@@ -1053,45 +754,14 @@ def get_disperse_count(mnode, volname):
volname)
return None
- disperse_count_info = {
- 'is_tier': False,
- 'cold_tier_disperse_count': None,
- 'volume_disperse_count': None
- }
-
- disperse_count_info['is_tier'] = vol_type_info['is_tier']
- if disperse_count_info['is_tier']:
- disperse_count_info['cold_tier_disperse_count'] = (
- vol_type_info['cold_tier_type_info']['colddisperseCount'])
+ disperse_count_info = {'volume_disperse_count': None}
- else:
- disperse_count_info['volume_disperse_count'] = (
+ disperse_count_info['volume_disperse_count'] = (
vol_type_info['volume_type_info']['disperseCount'])
return disperse_count_info
-def get_cold_tier_disperse_count(mnode, volname):
- """Get the disperse count of cold tier.
-
- Args:
- mnode (str): Node on which commands are executed.
- volname (str): Name of the volume.
-
- Returns:
- int : disperse count of the cold tier.
- NoneType: None if volume does not exist or not a tiered volume.
- """
- is_tier = is_tiered_volume(mnode, volname)
- if not is_tier:
- return None
- else:
- volinfo = get_volume_info(mnode, volname)
- cold_tier_disperse_count = (volinfo[volname]["bricks"]['coldBricks']
- ['colddisperseCount'])
- return cold_tier_disperse_count
-
-
def enable_and_validate_volume_options(mnode, volname, volume_options_list,
time_delay=5):
"""Enable the volume option and validate whether the option has be
@@ -1108,7 +778,7 @@ def enable_and_validate_volume_options(mnode, volname, volume_options_list,
bool: True when enabling and validating all volume options is
successful. False otherwise
"""
- if isinstance(volume_options_list, str):
+ if not isinstance(volume_options_list, list):
volume_options_list = [volume_options_list]
for option in volume_options_list:
@@ -1138,7 +808,6 @@ def enable_and_validate_volume_options(mnode, volname, volume_options_list,
def form_bricks_list_to_add_brick(mnode, volname, servers, all_servers_info,
- add_to_hot_tier=False,
**kwargs):
"""Forms list of bricks to add-bricks to the volume.
@@ -1161,9 +830,6 @@ def form_bricks_list_to_add_brick(mnode, volname, servers, all_servers_info,
}
}
Kwargs:
- add_to_hot_tier (bool): True If bricks are to be added to hot_tier.
- False otherwise. Defaults to False.
-
The keys, values in kwargs are:
- replica_count : (int)|None.
Increase the current_replica_count by replica_count
@@ -1202,19 +868,8 @@ def form_bricks_list_to_add_brick(mnode, volname, servers, all_servers_info,
bricks_per_subvol_dict = get_num_of_bricks_per_subvol(mnode, volname)
# Get number of bricks to add.
- if bricks_per_subvol_dict['is_tier']:
- if add_to_hot_tier:
- num_of_bricks_per_subvol = (
- bricks_per_subvol_dict['hot_tier_num_of_bricks_per_subvol']
- )
- else:
- num_of_bricks_per_subvol = (
- bricks_per_subvol_dict
- ['cold_tier_num_of_bricks_per_subvol']
- )
- else:
- num_of_bricks_per_subvol = (
- bricks_per_subvol_dict['volume_num_of_bricks_per_subvol'])
+ num_of_bricks_per_subvol = (
+ bricks_per_subvol_dict['volume_num_of_bricks_per_subvol'])
if num_of_bricks_per_subvol is None:
g.log.error("Number of bricks per subvol is None. "
@@ -1230,15 +885,7 @@ def form_bricks_list_to_add_brick(mnode, volname, servers, all_servers_info,
if replica_count:
# Get Subvols
subvols_info = get_subvols(mnode, volname)
-
- # Calculate number of bricks to add
- if subvols_info['is_tier']:
- if add_to_hot_tier:
- num_of_subvols = len(subvols_info['hot_tier_subvols'])
- else:
- num_of_subvols = len(subvols_info['cold_tier_subvols'])
- else:
- num_of_subvols = len(subvols_info['volume_subvols'])
+ num_of_subvols = len(subvols_info['volume_subvols'])
if num_of_subvols == 0:
g.log.error("No Sub-Volumes available for the volume %s."
@@ -1276,7 +923,7 @@ def form_bricks_list_to_add_brick(mnode, volname, servers, all_servers_info,
def expand_volume(mnode, volname, servers, all_servers_info, force=False,
- add_to_hot_tier=False, **kwargs):
+ **kwargs):
"""Forms list of bricks to add and adds those bricks to the volume.
Args:
@@ -1302,9 +949,6 @@ def expand_volume(mnode, volname, servers, all_servers_info, force=False,
will get executed with force option. If it is set to False,
then add-brick command will get executed without force option
- add_to_hot_tier (bool): True If bricks are to be added to hot_tier.
- False otherwise. Defaults to False.
-
**kwargs
The keys, values in kwargs are:
- replica_count : (int)|None.
@@ -1316,11 +960,9 @@ def expand_volume(mnode, volname, servers, all_servers_info, force=False,
bool: True of expanding volumes is successful.
False otherwise.
- NOTE: adding bricks to hot tier is yet to be added in this function.
"""
bricks_list = form_bricks_list_to_add_brick(mnode, volname, servers,
- all_servers_info,
- add_to_hot_tier, **kwargs)
+ all_servers_info, **kwargs)
if not bricks_list:
g.log.info("Unable to get bricks list to add-bricks. "
@@ -1332,17 +974,8 @@ def expand_volume(mnode, volname, servers, all_servers_info, force=False,
# Get replica count info.
replica_count_info = get_replica_count(mnode, volname)
-
- if is_tiered_volume(mnode, volname):
- if add_to_hot_tier:
- current_replica_count = (
- int(replica_count_info['hot_tier_replica_count']))
- else:
- current_replica_count = (
- int(replica_count_info['cold_tier_replica_count']))
- else:
- current_replica_count = (
- int(replica_count_info['volume_replica_count']))
+ current_replica_count = (
+ int(replica_count_info['volume_replica_count']))
kwargs['replica_count'] = current_replica_count + replica_count
@@ -1358,8 +991,7 @@ def expand_volume(mnode, volname, servers, all_servers_info, force=False,
def form_bricks_list_to_remove_brick(mnode, volname, subvol_num=None,
- replica_num=None,
- remove_from_hot_tier=False, **kwargs):
+ replica_num=None, **kwargs):
"""Form bricks list for removing the bricks.
Args:
@@ -1376,9 +1008,6 @@ def form_bricks_list_to_remove_brick(mnode, volname, subvol_num=None,
If replica_num = 0, then 1st brick from each subvolume is removed.
the replica_num starts from 0.
- remove_from_hot_tier (bool): True If bricks are to be removed from
- hot_tier. False otherwise. Defaults to False.
-
**kwargs
The keys, values in kwargs are:
- replica_count : (int)|None. Specify the number of replicas
@@ -1421,27 +1050,13 @@ def form_bricks_list_to_remove_brick(mnode, volname, subvol_num=None,
is_arbiter = False
# Calculate bricks to remove
- if subvols_info['is_tier']:
- if remove_from_hot_tier:
- current_replica_count = (
- int(replica_count_info['hot_tier_replica_count']))
- subvols_list = subvols_info['hot_tier_subvols']
- else:
- current_replica_count = (
- int(replica_count_info['cold_tier_replica_count']))
- subvols_list = subvols_info['cold_tier_subvols']
- arbiter_count = int(volume_type_info['cold_tier_type_info']
- ['coldarbiterCount'])
- if arbiter_count == 1:
- is_arbiter = True
- else:
- current_replica_count = (
- int(replica_count_info['volume_replica_count']))
- subvols_list = subvols_info['volume_subvols']
- arbiter_count = int(volume_type_info['volume_type_info']
- ['arbiterCount'])
- if arbiter_count == 1:
- is_arbiter = True
+ current_replica_count = (
+ int(replica_count_info['volume_replica_count']))
+ subvols_list = subvols_info['volume_subvols']
+ arbiter_count = int(volume_type_info['volume_type_info']
+ ['arbiterCount'])
+ if arbiter_count == 1:
+ is_arbiter = True
# If replica_num is specified select the bricks of that replica number
# from all the subvolumes.
@@ -1487,14 +1102,7 @@ def form_bricks_list_to_remove_brick(mnode, volname, subvol_num=None,
# remove bricks from sub-volumes
if subvol_num is not None or 'distribute_count' in kwargs:
- if subvols_info['is_tier']:
- if remove_from_hot_tier:
- subvols_list = subvols_info['hot_tier_subvols']
- else:
- subvols_list = subvols_info['cold_tier_subvols']
- else:
- subvols_list = subvols_info['volume_subvols']
-
+ subvols_list = subvols_info['volume_subvols']
if not subvols_list:
g.log.error("No Sub-Volumes available for the volume %s", volname)
return None
@@ -1530,7 +1138,7 @@ def form_bricks_list_to_remove_brick(mnode, volname, subvol_num=None,
def shrink_volume(mnode, volname, subvol_num=None, replica_num=None,
force=False, rebalance_timeout=300, delete_bricks=True,
- remove_from_hot_tier=False, **kwargs):
+ **kwargs):
"""Remove bricks from the volume.
Args:
@@ -1557,9 +1165,6 @@ def shrink_volume(mnode, volname, subvol_num=None, replica_num=None,
delete_bricks (bool): After remove-brick delete the removed bricks.
- remove_from_hot_tier (bool): True If bricks are to be removed from
- hot_tier. False otherwise. Defaults to False.
-
**kwargs
The keys, values in kwargs are:
- replica_count : (int)|None. Specify the replica count to
@@ -1570,12 +1175,10 @@ def shrink_volume(mnode, volname, subvol_num=None, replica_num=None,
bool: True if removing bricks from the volume is successful.
False otherwise.
- NOTE: remove-bricks from hot-tier is yet to be added in this function.
"""
# Form bricks list to remove-bricks
bricks_list_to_remove = form_bricks_list_to_remove_brick(
- mnode, volname, subvol_num, replica_num, remove_from_hot_tier,
- **kwargs)
+ mnode, volname, subvol_num, replica_num, **kwargs)
if not bricks_list_to_remove:
g.log.error("Failed to form bricks list to remove-brick. "
@@ -1594,16 +1197,8 @@ def shrink_volume(mnode, volname, subvol_num=None, replica_num=None,
# Get replica count info.
replica_count_info = get_replica_count(mnode, volname)
- if is_tiered_volume(mnode, volname):
- if remove_from_hot_tier:
- current_replica_count = (
- int(replica_count_info['hot_tier_replica_count']))
- else:
- current_replica_count = (
- int(replica_count_info['cold_tier_replica_count']))
- else:
- current_replica_count = (
- int(replica_count_info['volume_replica_count']))
+ current_replica_count = (
+ int(replica_count_info['volume_replica_count']))
kwargs['replica_count'] = current_replica_count - replica_count
@@ -1721,8 +1316,7 @@ def shrink_volume(mnode, volname, subvol_num=None, replica_num=None,
def form_bricks_to_replace_brick(mnode, volname, servers, all_servers_info,
- src_brick=None, dst_brick=None,
- replace_brick_from_hot_tier=False):
+ src_brick=None, dst_brick=None):
"""Get src_brick, dst_brick to replace brick
Args:
@@ -1749,9 +1343,6 @@ def form_bricks_to_replace_brick(mnode, volname, servers, all_servers_info,
dst_brick (str): New brick to replace the faulty brick
- replace_brick_from_hot_tier (bool): True If brick are to be
- replaced from hot_tier. False otherwise. Defaults to False.
-
Returns:
Tuple: (src_brick, dst_brick)
Nonetype: if volume doesn't exists or any other failure.
@@ -1777,13 +1368,7 @@ def form_bricks_to_replace_brick(mnode, volname, servers, all_servers_info,
if not src_brick:
# Randomly pick up a brick to bring the brick down and replace.
- if subvols_info['is_tier']:
- if replace_brick_from_hot_tier:
- subvols_list = subvols_info['hot_tier_subvols']
- else:
- subvols_list = subvols_info['cold_tier_subvols']
- else:
- subvols_list = subvols_info['volume_subvols']
+ subvols_list = subvols_info['volume_subvols']
src_brick = (random.choice(random.choice(subvols_list)))
@@ -1792,8 +1377,7 @@ def form_bricks_to_replace_brick(mnode, volname, servers, all_servers_info,
def replace_brick_from_volume(mnode, volname, servers, all_servers_info,
src_brick=None, dst_brick=None,
- delete_brick=True,
- replace_brick_from_hot_tier=False):
+ delete_brick=True, multi_vol=False):
"""Replace faulty brick from the volume.
Args:
@@ -1822,14 +1406,15 @@ def replace_brick_from_volume(mnode, volname, servers, all_servers_info,
delete_bricks (bool): After remove-brick delete the removed bricks.
- replace_brick_from_hot_tier (bool): True If brick are to be
- replaced from hot_tier. False otherwise. Defaults to False.
+ multi_vol (bool): True, If bricks need to created for multiple
+ volumes(more than 5)
+ False, Otherwise. By default, value is set to False.
Returns:
bool: True if replacing brick from the volume is successful.
False otherwise.
"""
- if isinstance(servers, str):
+ if not isinstance(servers, list):
servers = [servers]
# Check if volume exists
@@ -1841,10 +1426,17 @@ def replace_brick_from_volume(mnode, volname, servers, all_servers_info,
subvols_info = get_subvols(mnode, volname)
if not dst_brick:
- dst_brick = form_bricks_list(mnode=mnode, volname=volname,
- number_of_bricks=1,
- servers=servers,
- servers_info=all_servers_info)
+ if multi_vol:
+ dst_brick = form_bricks_for_multivol(mnode=mnode,
+ volname=volname,
+ number_of_bricks=1,
+ servers=servers,
+ servers_info=all_servers_info)
+ else:
+ dst_brick = form_bricks_list(mnode=mnode, volname=volname,
+ number_of_bricks=1,
+ servers=servers,
+ servers_info=all_servers_info)
if not dst_brick:
g.log.error("Failed to get a new brick to replace the faulty "
"brick")
@@ -1853,13 +1445,7 @@ def replace_brick_from_volume(mnode, volname, servers, all_servers_info,
if not src_brick:
# Randomly pick up a brick to bring the brick down and replace.
- if subvols_info['is_tier']:
- if replace_brick_from_hot_tier:
- subvols_list = subvols_info['hot_tier_subvols']
- else:
- subvols_list = subvols_info['cold_tier_subvols']
- else:
- subvols_list = subvols_info['volume_subvols']
+ subvols_list = subvols_info['volume_subvols']
src_brick = (random.choice(random.choice(subvols_list)))
@@ -1924,17 +1510,6 @@ def get_client_quorum_info(mnode, volname):
Returns:
dict: client quorum information for the volume.
client_quorum_dict = {
- 'is_tier': False,
- 'hot_tier_quorum_info':{
- 'is_quorum_applicable': False,
- 'quorum_type': None,
- 'quorum_count': None
- },
- 'cold_tier_quorum_info':{
- 'is_quorum_applicable': False,
- 'quorum_type': None,
- 'quorum_count': None
- },
'volume_quorum_info':{
'is_quorum_applicable': False,
'quorum_type': None,
@@ -1944,17 +1519,6 @@ def get_client_quorum_info(mnode, volname):
NoneType: None if volume does not exist.
"""
client_quorum_dict = {
- 'is_tier': False,
- 'hot_tier_quorum_info': {
- 'is_quorum_applicable': False,
- 'quorum_type': None,
- 'quorum_count': None
- },
- 'cold_tier_quorum_info': {
- 'is_quorum_applicable': False,
- 'quorum_type': None,
- 'quorum_count': None
- },
'volume_quorum_info': {
'is_quorum_applicable': False,
'quorum_type': None,
@@ -1980,111 +1544,37 @@ def get_client_quorum_info(mnode, volname):
# Set the quorum info
volume_type_info = get_volume_type_info(mnode, volname)
- if volume_type_info['is_tier'] is True:
- client_quorum_dict['is_tier'] = True
-
- # Hot Tier quorum info
- hot_tier_type = volume_type_info['hot_tier_type_info']['hotBrickType']
- if (hot_tier_type == 'Replicate' or
- hot_tier_type == 'Distributed-Replicate'):
-
- (client_quorum_dict['hot_tier_quorum_info']
- ['is_quorum_applicable']) = True
- replica_count = (volume_type_info['hot_tier_type_info']
- ['hotreplicaCount'])
-
- # Case1: Replica 2
- if int(replica_count) == 2:
- if 'none' not in quorum_type:
- (client_quorum_dict['hot_tier_quorum_info']
- ['quorum_type']) = quorum_type
-
- if quorum_type == 'fixed':
- if not quorum_count == '(null)':
- (client_quorum_dict['hot_tier_quorum_info']
- ['quorum_count']) = quorum_count
-
- # Case2: Replica > 2
- if int(replica_count) > 2:
- if quorum_type == 'none':
- (client_quorum_dict['hot_tier_quorum_info']
- ['quorum_type']) = 'auto'
- quorum_type == 'auto'
- else:
- (client_quorum_dict['hot_tier_quorum_info']
- ['quorum_type']) = quorum_type
- if quorum_type == 'fixed':
- if not quorum_count == '(null)':
- (client_quorum_dict['hot_tier_quorum_info']
- ['quorum_count']) = quorum_count
-
- # Cold Tier quorum info
- cold_tier_type = (volume_type_info['cold_tier_type_info']
- ['coldBrickType'])
- if (cold_tier_type == 'Replicate' or
- cold_tier_type == 'Distributed-Replicate'):
- (client_quorum_dict['cold_tier_quorum_info']
- ['is_quorum_applicable']) = True
- replica_count = (volume_type_info['cold_tier_type_info']
- ['coldreplicaCount'])
-
- # Case1: Replica 2
- if int(replica_count) == 2:
- if 'none' not in quorum_type:
- (client_quorum_dict['cold_tier_quorum_info']
- ['quorum_type']) = quorum_type
-
- if quorum_type == 'fixed':
- if not quorum_count == '(null)':
- (client_quorum_dict['cold_tier_quorum_info']
- ['quorum_count']) = quorum_count
-
- # Case2: Replica > 2
- if int(replica_count) > 2:
- if quorum_type == 'none':
- (client_quorum_dict['cold_tier_quorum_info']
- ['quorum_type']) = 'auto'
- quorum_type == 'auto'
- else:
- (client_quorum_dict['cold_tier_quorum_info']
- ['quorum_type']) = quorum_type
- if quorum_type == 'fixed':
- if not quorum_count == '(null)':
- (client_quorum_dict['cold_tier_quorum_info']
- ['quorum_count']) = quorum_count
- else:
- volume_type = (volume_type_info['volume_type_info']['typeStr'])
- if (volume_type == 'Replicate' or
- volume_type == 'Distributed-Replicate'):
- (client_quorum_dict['volume_quorum_info']
- ['is_quorum_applicable']) = True
- replica_count = (volume_type_info['volume_type_info']
- ['replicaCount'])
-
- # Case1: Replica 2
- if int(replica_count) == 2:
- if 'none' not in quorum_type:
- (client_quorum_dict['volume_quorum_info']
- ['quorum_type']) = quorum_type
-
- if quorum_type == 'fixed':
- if not quorum_count == '(null)':
- (client_quorum_dict['volume_quorum_info']
- ['quorum_count']) = quorum_count
+ volume_type = (volume_type_info['volume_type_info']['typeStr'])
+ if (volume_type == 'Replicate' or
+ volume_type == 'Distributed-Replicate'):
+ (client_quorum_dict['volume_quorum_info']
+ ['is_quorum_applicable']) = True
+ replica_count = (volume_type_info['volume_type_info']['replicaCount'])
+
+ # Case1: Replica 2
+ if int(replica_count) == 2:
+ if 'none' not in quorum_type:
+ (client_quorum_dict['volume_quorum_info']
+ ['quorum_type']) = quorum_type
- # Case2: Replica > 2
- if int(replica_count) > 2:
- if quorum_type == 'none':
- (client_quorum_dict['volume_quorum_info']
- ['quorum_type']) = 'auto'
- quorum_type == 'auto'
- else:
- (client_quorum_dict['volume_quorum_info']
- ['quorum_type']) = quorum_type
if quorum_type == 'fixed':
if not quorum_count == '(null)':
(client_quorum_dict['volume_quorum_info']
- ['quorum_count']) = quorum_count
+ ['quorum_count']) = quorum_count
+
+ # Case2: Replica > 2
+ if int(replica_count) > 2:
+ if quorum_type == 'none':
+ (client_quorum_dict['volume_quorum_info']
+ ['quorum_type']) = 'auto'
+ quorum_type == 'auto'
+ else:
+ (client_quorum_dict['volume_quorum_info']
+ ['quorum_type']) = quorum_type
+ if quorum_type == 'fixed':
+ if not quorum_count == '(null)':
+ (client_quorum_dict['volume_quorum_info']
+ ['quorum_count']) = quorum_count
return client_quorum_dict
@@ -2155,7 +1645,7 @@ def get_files_and_dirs_from_brick(brick_node, brick_path,
raise RuntimeError("Not specified object type to find dir/files")
skip_items = ["'.glusterfs'", "'.trashcan'"]
- if isinstance(skip, str):
+ if not isinstance(skip, list):
skip_items.append("'%s'" % skip)
exclude_pattern = ' '.join([' | grep -ve {}'.format(item)
@@ -2180,3 +1670,100 @@ def get_files_and_dirs_from_brick(brick_node, brick_path,
brick_node, brick_path)
result.extend(out.splitlines())
return result
+
+
+def get_volume_type(brickdir_path):
+ """Checks for the type of volume under test.
+
+ Args:
+ brickdir_path(str): The complete brick path.
+ (e.g., server1.example.com:/bricks/brick1/testvol_brick0/)
+
+ Returns:
+ volume type(str): The volume type in str.
+ NoneType : None on failure
+ """
+ # Adding import here to avoid cyclic imports
+ from glustolibs.gluster.brick_libs import get_all_bricks
+ (host, brick_path_info) = brickdir_path.split(':')
+ path_info = (brick_path_info[:-2] if brick_path_info.endswith("//")
+ else brick_path_info[:-1])
+ for volume in get_volume_list(host):
+ brick_paths = [brick.split(':')[1] for brick in get_all_bricks(host,
+ volume)]
+ if path_info in brick_paths:
+ ret = get_volume_info(host, volume)
+ if ret is None:
+ g.log.error("Failed to get volume type for %s", volume)
+ return None
+ list_of_replica = ('Replicate', 'Distributed-Replicate')
+ if (ret[volume].get('typeStr') in list_of_replica and
+ int(ret[volume]['arbiterCount']) == 1):
+ if int(ret[volume]['distCount']) >= 2:
+ return 'Distributed-Arbiter'
+ else:
+ return 'Arbiter'
+ else:
+ return ret[volume].get('typeStr')
+ else:
+ g.log.info("Failed to find brick-path %s for volume %s",
+ brickdir_path, volume)
+
+
+def parse_vol_file(mnode, vol_file):
+ """ Parses the .vol file and returns the content as a dict
+ Args:
+ mnode (str): Node on which commands will be executed.
+ vol_file(str) : Path to the .vol file
+ Returns:
+ (dict): Content of the .vol file
+ None : if failure happens
+ Example:
+ >>> ret = parse_vol_file("abc@xyz.com",
+ "/var/lib/glusterd/vols/testvol_distributed/
+ trusted-testvol_distributed.tcp-fuse.vol")
+ {'testvol_distributed-client-0': {'type': 'protocol/client',
+ 'option': {'send-gids': 'true','transport.socket.keepalive-count': '9',
+ 'transport.socket.keepalive-interval': '2',
+ 'transport.socket.keepalive-time': '20',
+ 'transport.tcp-user-timeout': '0',
+ 'transport.socket.ssl-enabled': 'off', 'password':
+ 'bcc934b3-9e76-47fd-930c-c31ad9f6e2f0', 'username':
+ '23bb8f1c-b373-4f85-8bab-aaa77b4918ce', 'transport.address-family':
+ 'inet', 'transport-type': 'tcp', 'remote-subvolume':
+ '/gluster/bricks/brick1/testvol_distributed_brick0',
+ 'remote-host': 'xx.xx.xx.xx', 'ping-timeout': '42'}}}
+ """
+ vol_dict, data, key = {}, {}, None
+
+ def _create_dict_from_list(cur_dict, keys, value):
+ """Creates dynamic dictionary from a given list of keys and values"""
+ if len(keys) == 1:
+ cur_dict[keys[0]] = value
+ return
+ if keys[0] not in cur_dict:
+ cur_dict[keys[0]] = {}
+ _create_dict_from_list(cur_dict[keys[0]], keys[1:], value)
+
+ ret, file_contents, err = g.run(mnode, "cat {}".format(vol_file))
+ if ret:
+ g.log.error("Failed to read the .vol file : %s", err)
+ return None
+ if not file_contents:
+ g.log.error("The given .vol file is empty")
+ return None
+ for line in file_contents.split("\n"):
+ if line:
+ line = line.strip()
+ if line.startswith('end-volume'):
+ vol_dict[key] = data
+ data = {}
+ elif line.startswith("volume "):
+ key = line.split(" ")[-1]
+ elif line.startswith("subvolumes "):
+ key_list = line.split(" ")[0]
+ _create_dict_from_list(data, [key_list], line.split(" ")[1:])
+ else:
+ key_list = line.split(" ")[:-1]
+ _create_dict_from_list(data, key_list, line.split(" ")[-1])
+ return vol_dict
diff --git a/glustolibs-gluster/glustolibs/gluster/volume_ops.py b/glustolibs-gluster/glustolibs/gluster/volume_ops.py
index e478ed979..d25a9349b 100644
--- a/glustolibs-gluster/glustolibs/gluster/volume_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/volume_ops.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2015-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,6 +20,11 @@ import re
import copy
from glusto.core import Glusto as g
from pprint import pformat
+import io
+try:
+ import ConfigParser as configparser # Python 2
+except ImportError:
+ import configparser as configparser # Python 3
try:
import xml.etree.cElementTree as etree
except ImportError:
@@ -233,15 +238,8 @@ def volume_delete(mnode, volname, xfail=False):
)
return False
- if volinfo[volname]['typeStr'] == 'Tier':
- tmp_hot_brick = volinfo[volname]["bricks"]["hotBricks"]["brick"]
- hot_bricks = [x["name"] for x in tmp_hot_brick if "name" in x]
- tmp_cold_brick = volinfo[volname]["bricks"]["coldBricks"]["brick"]
- cold_bricks = [x["name"] for x in tmp_cold_brick if "name" in x]
- bricks = hot_bricks + cold_bricks
- else:
- bricks = [x["name"] for x in volinfo[volname]["bricks"]["brick"]
- if "name" in x]
+ bricks = [x["name"] for x in volinfo[volname]["bricks"]["brick"] if
+ "name" in x]
ret, out, err = g.run(mnode, "gluster volume delete {} --mode=script"
.format(volname))
if ret != 0:
@@ -263,7 +261,7 @@ def volume_delete(mnode, volname, xfail=False):
ret, out, err = g.run(node, "rm -rf %s" % vol_dir)
if ret != 0:
if not xfail:
- g.log.err(
+ g.log.error(
"Unexpected: rm -rf {} failed ({}: {})"
.format(vol_dir, out, err)
)
@@ -387,27 +385,34 @@ def get_volume_status(mnode, volname='all', service='', options=''):
NoneType: on failure
Example:
- get_volume_status("10.70.47.89", volname="testvol")
- >>>{'testvol': {'10.70.47.89': {'/bricks/brick1/a11': {'status': '1',
- 'pid': '28963', 'bricktype': 'cold', 'port': '49163', 'peerid':
- '7fc9015e-8134-4753-b837-54cbc6030c98', 'ports': {'rdma': 'N/A',
- 'tcp': '49163'}}, '/bricks/brick2/a31': {'status': '1', 'pid':
- '28982', 'bricktype': 'cold', 'port': '49164', 'peerid':
- '7fc9015e-8134-4753-b837-54cbc6030c98', 'ports': {'rdma': 'N/A',
- 'tcp': '49164'}}, 'NFS Server': {'status': '1', 'pid': '30525',
- 'port': '2049', 'peerid': '7fc9015e-8134-4753-b837-54cbc6030c98',
- 'ports': {'rdma': 'N/A', 'tcp': '2049'}}, '/bricks/brick1/a12':
- {'status': '1', 'pid': '30505', 'bricktype': 'hot', 'port': '49165',
- 'peerid': '7fc9015e-8134-4753-b837-54cbc6030c98', 'ports': {'rdma':
- 'N/A', 'tcp': '49165'}}}, '10.70.47.118': {'/bricks/brick1/a21':
- {'status': '1', 'pid': '5427', 'bricktype': 'cold', 'port': '49162',
- 'peerid': '5397d8f5-2986-453a-b0b5-5c40a9bb87ff', 'ports': {'rdma':
- 'N/A', 'tcp': '49162'}}, '/bricks/brick2/a41': {'status': '1', 'pid':
- '5446', 'bricktype': 'cold', 'port': '49163', 'peerid':
- '5397d8f5-2986-453a-b0b5-5c40a9bb87ff', 'ports': {'rdma': 'N/A',
- 'tcp': '49163'}}, 'NFS Server': {'status': '1', 'pid': '6397', 'port':
- '2049', 'peerid': '5397d8f5-2986-453a-b0b5-5c40a9bb87ff', 'ports':
- {'rdma': 'N/A', 'tcp': '2049'}}}}}
+ get_volume_status(host1, volname="testvol_replicated")
+ >>>{'testvol_replicated': {'host1': {'Self-heal Daemon': {'status':
+ '1', 'pid': '2479', 'port': 'N/A', 'peerid':
+ 'b7a02af9-eea4-4657-8b86-3b21ec302f48', 'ports': {'rdma': 'N/A',
+ 'tcp': 'N/A'}}, '/bricks/brick4/testvol_replicated_brick2': {'status':
+ '1', 'pid': '2468', 'bricktype': 'None', 'port': '49160', 'peerid':
+ 'b7a02af9-eea4-4657-8b86-3b21ec302f48', 'ports': {'rdma': 'N/A',
+ 'tcp': '49160'}}}, 'host2': {'Self-heal Daemon': {'status': '1',
+ 'pid': '2513', 'port': 'N/A', 'peerid':
+ '7f6fb9ed-3e0b-4f27-89b3-9e4f836c2332', 'ports': {'rdma': 'N/A',
+ 'tcp': 'N/A'}}, '/bricks/brick4/testvol_replicated_brick1': {'status':
+ '1', 'pid': '2456', 'bricktype': 'None', 'port': '49160', 'peerid':
+ '7f6fb9ed-3e0b-4f27-89b3-9e4f836c2332', 'ports': {'rdma': 'N/A',
+ 'tcp': '49160'}}}, 'host3': {'Self-heal Daemon': {'status': '1', 'pid'
+ : '2515', 'port': 'N/A', 'peerid':
+ '6172cfab-9d72-43b5-ba6f-612e5cfc020c', 'ports': {'rdma': 'N/A',
+ 'tcp': 'N/A'}}}, 'host4': {'Self-heal Daemon': {'status': '1', 'pid':
+ '2445', 'port': 'N/A', 'peerid': 'c16a1660-ee73-4e0f-b9c7-d2e830e39539
+ ', 'ports': {'rdma': 'N/A', 'tcp': 'N/A'}}}, 'host5':
+ {'Self-heal Daemon': {'status': '1', 'pid': '2536', 'port': 'N/A',
+ 'peerid': '79ea9f52-88f0-4293-ae21-8ea13f44b58d', 'ports':
+ {'rdma': 'N/A', 'tcp': 'N/A'}}}, 'host6': {'Self-heal Daemon':
+ {'status': '1', 'pid': '2526', 'port': 'N/A', 'peerid':
+ 'c00a3c5e-668f-440b-860c-da43e999737b', 'ports': {'rdma': 'N/A',
+ 'tcp': 'N/A'}}, '/bricks/brick4/testvol_replicated_brick0': {'status':
+ '1', 'pid': '2503', 'bricktype': 'None', 'port': '49160', 'peerid':
+ 'c00a3c5e-668f-440b-860c-da43e999737b', 'ports': {'rdma': 'N/A',
+ 'tcp': '49160'}}}}}
"""
cmd = "gluster vol status %s %s %s --xml" % (volname, service, options)
@@ -428,8 +433,6 @@ def get_volume_status(mnode, volname='all', service='', options=''):
for volume in volume_list:
tmp_dict1 = {}
tmp_dict2 = {}
- hot_bricks = []
- cold_bricks = []
vol_name = [vol.text for vol in volume if vol.tag == "volName"]
# parsing volume status xml output
@@ -449,24 +452,7 @@ def get_volume_status(mnode, volname='all', service='', options=''):
elem_tag = []
for elem in volume.getchildren():
elem_tag.append(elem.tag)
- if ('hotBricks' in elem_tag) or ('coldBricks' in elem_tag):
- for elem in volume.getchildren():
- if (elem.tag == 'hotBricks'):
- nodes = elem.findall("node")
- hot_bricks = [node.find('path').text
- for node in nodes
- if (
- node.find('path').text.startswith('/'))]
- if (elem.tag == 'coldBricks'):
- for n in elem.findall("node"):
- nodes.append(n)
- cold_bricks = [node.find('path').text
- for node in nodes
- if (
- (node.find('path').
- text.startswith('/')))]
- else:
- nodes = volume.findall("node")
+ nodes = volume.findall("node")
for each_node in nodes:
if each_node.find('path').text.startswith('/'):
@@ -479,12 +465,7 @@ def get_volume_status(mnode, volname='all', service='', options=''):
tmp_dict3 = {}
if "hostname" in node_dict.keys():
if node_dict['path'].startswith('/'):
- if node_dict['path'] in hot_bricks:
- node_dict["bricktype"] = 'hot'
- elif node_dict['path'] in cold_bricks:
- node_dict["bricktype"] = 'cold'
- else:
- node_dict["bricktype"] = 'None'
+ node_dict["bricktype"] = 'None'
tmp = node_dict["path"]
tmp_dict3[node_dict["path"]] = node_dict
else:
@@ -573,7 +554,7 @@ def set_volume_options(mnode, volname, options):
# Check if group options are specified.
if 'group' in volume_options:
group_options = volume_options.pop('group')
- if isinstance(group_options, str):
+ if not isinstance(group_options, list):
group_options = [group_options]
for group_option in group_options:
cmd = ("gluster volume set %s group %s --mode=script" %
@@ -594,6 +575,41 @@ def set_volume_options(mnode, volname, options):
return _rc
+def reset_volume_option(mnode, volname, option, force=False):
+ """Resets the volume option
+
+ Args:
+ mnode (str): Node on which cmd has to be executed
+ volname (str): volume name
+ option (str): volume option
+
+ Kwargs:
+ force (bool): If this option is set to True, then reset volume
+ will get executed with force option. If it is set to False,
+ then reset volume will get executed without force option
+
+ Returns:
+ tuple: Tuple containing three elements (ret, out, err).
+ The first element 'ret' is of type 'int' and is the return value
+ of command execution.
+
+ The second element 'out' is of type 'str' and is the stdout value
+ of the command execution.
+
+ The third element 'err' is of type 'str' and is the stderr value
+ of the command execution.
+
+ Example:
+ reset_volume_option("abc.xyz.com", "testvol", "option")
+ """
+ if force:
+ cmd = ("gluster volume reset %s %s force --mode=script"
+ % (volname, option))
+ else:
+ cmd = "gluster volume reset %s %s --mode=script" % (volname, option)
+ return g.run(mnode, cmd)
+
+
def volume_info(mnode, volname='all'):
"""Executes gluster volume info cli command
@@ -638,29 +654,42 @@ def get_volume_info(mnode, volname='all', xfail=False):
dict: volume info in dict of dicts
Example:
- get_volume_info("abc.com", volname="testvol")
- >>>{'testvol': {'status': '1', 'xlators': None, 'disperseCount': '0',
- 'bricks': {'coldBricks': {'colddisperseCount': '0',
- 'coldarbiterCount': '0', 'coldBrickType': 'Distribute',
- 'coldbrickCount': '4', 'numberOfBricks': '4', 'brick':
- [{'isArbiter': '0', 'name': '10.70.47.89:/bricks/brick1/a11',
- 'hostUuid': '7fc9015e-8134-4753-b837-54cbc6030c98'}, {'isArbiter':
- '0', 'name': '10.70.47.118:/bricks/brick1/a21', 'hostUuid':
- '7fc9015e-8134-4753-b837-54cbc6030c98'}, {'isArbiter': '0', 'name':
- '10.70.47.89:/bricks/brick2/a31', 'hostUuid':
- '7fc9015e-8134-4753-b837-54cbc6030c98'}, {'isArbiter': '0',
- 'name': '10.70.47.118:/bricks/brick2/a41', 'hostUuid':
- '7fc9015e-8134-4753-b837-54cbc6030c98'}], 'coldreplicaCount': '1'},
- 'hotBricks': {'hotBrickType': 'Distribute', 'numberOfBricks': '1',
- 'brick': [{'name': '10.70.47.89:/bricks/brick1/a12', 'hostUuid':
- '7fc9015e-8134-4753-b837-54cbc6030c98'}], 'hotbrickCount': '1',
- 'hotreplicaCount': '1'}}, 'type': '5', 'distCount': '1',
- 'replicaCount': '1', 'brickCount': '5', 'options':
- {'cluster.tier-mode': 'cache', 'performance.readdir-ahead': 'on',
- 'features.ctr-enabled': 'on'}, 'redundancyCount': '0', 'transport':
- '0', 'typeStr': 'Tier', 'stripeCount': '1', 'arbiterCount': '0',
- 'id': 'ffa8a8d1-546f-4ebf-8e82-fcc96c7e4e05', 'statusStr': 'Started',
- 'optCount': '3'}}
+ get_volume_info("host1", volname="testvol")
+ >>>{'testvol': {'status': '1', 'disperseCount': '6',
+ 'bricks': {'brick': [{'isArbiter': '0', 'name':
+ 'host1:/bricks/brick6/testvol_brick0', 'hostUuid':
+ 'c00a3c5e-668f-440b-860c-da43e999737b'}, {'isArbiter': '0', 'name':
+ 'host2:/bricks/brick6/testvol_brick1', 'hostUuid':
+ '7f6fb9ed-3e0b-4f27-89b3-9e4f836c2332'}, {'isArbiter': '0', 'name':
+ 'host3:/bricks/brick6/testvol_brick2', 'hostUuid':
+ 'b7a02af9-eea4-4657-8b86-3b21ec302f48'}, {'isArbiter': '0', 'name':
+ 'host4:/bricks/brick4/testvol_brick3', 'hostUuid':
+ '79ea9f52-88f0-4293-ae21-8ea13f44b58d'}, {'isArbiter': '0', 'name':
+ 'host5:/bricks/brick2/testvol_brick4', 'hostUuid':
+ 'c16a1660-ee73-4e0f-b9c7-d2e830e39539'}, {'isArbiter': '0', 'name':
+ 'host6:/bricks/brick2/testvol_brick5', 'hostUuid':
+ '6172cfab-9d72-43b5-ba6f-612e5cfc020c'}, {'isArbiter': '0', 'name':
+ 'host1:/bricks/brick7/testvol_brick6', 'hostUuid':
+ 'c00a3c5e-668f-440b-860c-da43e999737b'}, {'isArbiter': '0', 'name':
+ 'host2:/bricks/brick7/testvol_brick7', 'hostUuid':
+ '7f6fb9ed-3e0b-4f27-89b3-9e4f836c2332'}, {'isArbiter': '0', 'name':
+ 'host3:/bricks/brick7/testvol_brick8', 'hostUuid':
+ 'b7a02af9-eea4-4657-8b86-3b21ec302f48'}, {'isArbiter': '0', 'name':
+ 'host4:/bricks/brick5/testvol_brick9', 'hostUuid':
+ '79ea9f52-88f0-4293-ae21-8ea13f44b58d'}, {'isArbiter': '0', 'name':
+ 'host5:/bricks/brick4/testvol_brick10', 'hostUuid':
+ 'c16a1660-ee73-4e0f-b9c7-d2e830e39539'}, {'isArbiter': '0', 'name':
+ 'host6:/bricks/brick4/testvol_brick11', 'hostUuid':
+ '6172cfab-9d72-43b5-ba6f-612e5cfc020c'}]},
+ 'type': '9', 'distCount': '2', 'replicaCount': '1', 'brickCount':
+ '12', 'options': {'nfs.disable': 'on', 'cluster.server-quorum-ratio':
+ '90%', 'storage.fips-mode-rchecksum': 'on',
+ 'transport.address-family': 'inet', 'cluster.brick-multiplex':
+ 'disable'}, 'redundancyCount': '2', 'snapshotCount': '0',
+ 'transport': '0', 'typeStr': 'Distributed-Disperse', 'stripeCount':
+ '1', 'arbiterCount': '0',
+ 'id': '8d217fa3-094b-4293-89b5-41d447c06d22', 'statusStr': 'Started',
+ 'optCount': '5'}}
"""
cmd = "gluster volume info %s --xml" % volname
@@ -692,18 +721,6 @@ def get_volume_info(mnode, volname='all', xfail=False):
(volinfo[volname]["bricks"]["brick"].
append(brick_info_dict))
- if el.tag == "hotBricks" or el.tag == "coldBricks":
- volinfo[volname]["bricks"][el.tag] = {}
- volinfo[volname]["bricks"][el.tag]["brick"] = []
- for elmt in el.getchildren():
- if elmt.tag == 'brick':
- brick_info_dict = {}
- for el_brk in elmt.getchildren():
- brick_info_dict[el_brk.tag] = el_brk.text
- (volinfo[volname]["bricks"][el.tag]["brick"].
- append(brick_info_dict))
- else:
- volinfo[volname]["bricks"][el.tag][elmt.tag] = elmt.text # noqa: E501
elif elem.tag == "options":
volinfo[volname]["options"] = {}
for option in elem.findall("option"):
@@ -805,3 +822,76 @@ def get_volume_list(mnode):
vol_list.append(elem.text)
return vol_list
+
+
+def get_gluster_state(mnode):
+ """Executes the 'gluster get-state' command on the specified node, checks
+ for the data dump, reads the glusterd state dump and returns it.
+
+ Args:
+ mnode (str): Node on which command has to be executed
+
+ Returns:
+ dict: The output of gluster get-state command in dict format
+
+ Example:
+ >>>get_gluster_state(self.mnode)
+ {'Global': {'myuuid': 'e92964c8-a7d2-4e59-81ac-feb0687df55e',
+ 'op-version': '70000'}, 'Global options': {}, 'Peers':
+ {'peer1.primary_hostname': 'dhcp43-167.lab.eng.blr.redhat.com',
+ 'peer1.uuid': 'd3a85b6a-134f-4df2-ba93-4bd0321b6d6a', 'peer1.state':
+ 'Peer in Cluster', 'peer1.connected': 'Connected',
+ 'peer1.othernames': '', 'peer2.primary_hostname':
+ 'dhcp43-68.lab.eng.blr.redhat.com', 'peer2.uuid':
+ 'f488aa35-bc56-4aea-9581-8db54e137937', 'peer2.state':
+ 'Peer in Cluster', 'peer2.connected': 'Connected',
+ 'peer2.othernames': '', 'peer3.primary_hostname':
+ 'dhcp43-64.lab.eng.blr.redhat.com', 'peer3.uuid':
+ 'dfe75b01-2988-4eac-879a-cf3d701e1382', 'peer3.state':
+ 'Peer in Cluster', 'peer3.connected': 'Connected',
+ 'peer3.othernames': '', 'peer4.primary_hostname':
+ 'dhcp42-147.lab.eng.blr.redhat.com', 'peer4.uuid':
+ '05e3858b-33bf-449a-b170-2d3dac9adc45', 'peer4.state':
+ 'Peer in Cluster', 'peer4.connected': 'Connected',
+ 'peer4.othernames': '', 'peer5.primary_hostname':
+ 'dhcp41-246.lab.eng.blr.redhat.com', 'peer5.uuid':
+ 'c2e3f833-98fa-42d9-ae63-2bc471515810', 'peer5.state':
+ 'Peer in Cluster', 'peer5.connected': 'Connected',
+ 'peer5.othernames': ''}, 'Volumes': {}, 'Services': {'svc1.name':
+ 'glustershd', 'svc1.online_status': 'Offline', 'svc2.name': 'nfs',
+ 'svc2.online_status': 'Offline', 'svc3.name': 'bitd',
+ 'svc3.online_status': 'Offline', 'svc4.name': 'scrub',
+ 'svc4.online_status': 'Offline', 'svc5.name': 'quotad',
+ 'svc5.online_status': 'Offline'}, 'Misc': {'base port': '49152',
+ 'last allocated port': '49154'}}
+ """
+
+ ret, out, _ = g.run(mnode, "gluster get-state")
+ if ret:
+ g.log.error("Failed to execute gluster get-state command!")
+ return None
+ # get-state should dump properly.
+ # Checking whether a path is returned or not and then
+ # extracting path from the out data
+
+ path = re.search(r"/.*?/.\S*", out).group()
+ if not path:
+ g.log.error("Failed to get the gluster state dump file path.")
+ return None
+ ret, out, _ = g.run(mnode, "cat {}".format(path))
+ if ret:
+ g.log.error("Failed to read the gluster state dump.")
+ return None
+ g.log.info("Command Executed successfully and the data dump verified")
+
+ # Converting the string to unicode for py2/3 compatibility
+ out = u"".join(out)
+ data_buf = io.StringIO(out)
+ config = configparser.ConfigParser()
+ try:
+ config.read_file(data_buf) # Python3
+ except AttributeError:
+ config.readfp(data_buf) # Python2
+ # Converts the config parser object to a dictionary and returns it
+ return {section: dict(config.items(section)) for section in
+ config.sections()}
diff --git a/glustolibs-gluster/scripts/compute_hash.py b/glustolibs-gluster/scripts/compute_hash.py
new file mode 100644
index 000000000..7cab7c494
--- /dev/null
+++ b/glustolibs-gluster/scripts/compute_hash.py
@@ -0,0 +1,32 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from __future__ import print_function
+import ctypes
+import sys
+
+filename = sys.argv[1]
+glusterfs = ctypes.cdll.LoadLibrary("libglusterfs.so.0")
+
+# In case of python3 encode string to ascii
+if sys.version_info.major == 3:
+ computed_hash = ctypes.c_uint32(glusterfs.gf_dm_hashfn(
+ filename.encode('ascii'), len(filename)))
+else:
+ computed_hash = ctypes.c_uint32(glusterfs.gf_dm_hashfn(
+ filename, len(filename)))
+
+print(computed_hash.value)
diff --git a/glustolibs-gluster/scripts/walk_dir.py b/glustolibs-gluster/scripts/walk_dir.py
new file mode 100644
index 000000000..02d115b0b
--- /dev/null
+++ b/glustolibs-gluster/scripts/walk_dir.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2020 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from __future__ import print_function
+import os
+import sys
+
+rootdir = sys.argv[1]
+
+list_of_levels = []
+for level in os.walk(rootdir):
+ list_of_levels.append(level)
+print(list_of_levels)
diff --git a/glustolibs-gluster/setup.py b/glustolibs-gluster/setup.py
index b4b1cf170..05e59fde6 100644
--- a/glustolibs-gluster/setup.py
+++ b/glustolibs-gluster/setup.py
@@ -1,5 +1,5 @@
-#!/usr/bin/python
-# Copyright (c) 2016 Red Hat, Inc.
+#!/usr/bin/env python
+# Copyright (c) 2016-2020 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -26,7 +26,7 @@ setup(
name=name,
version=version,
description='Glusto - Red Hat Gluster Libraries',
- license='GPLv2+',
+ license='GPLv3+',
author='Red Hat, Inc.',
author_email='gluster-devel@gluster.org',
url='http://www.gluster.org',
@@ -35,7 +35,7 @@ setup(
'Development Status :: 4 - Beta'
'Environment :: Console'
'Intended Audience :: Developers'
- 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)'
+ 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)'
'Operating System :: POSIX :: Linux'
'Programming Language :: Python'
'Programming Language :: Python :: 2'
@@ -49,6 +49,8 @@ setup(
)
try:
- dir_util.copy_tree('./gdeploy_configs', '/usr/share/glustolibs/gdeploy_configs')
+ for srcdir, destdir in (('./gdeploy_configs', '/usr/share/glustolibs/gdeploy_configs'),
+ ('./scripts', '/usr/share/glustolibs/scripts/')):
+ dir_util.copy_tree(srcdir, destdir)
except:
pass