From a3c024ac1525783323061e1565c3a1f760074217 Mon Sep 17 00:00:00 2001 From: Nitin Goyal Date: Fri, 13 Sep 2019 14:40:18 +0530 Subject: Add new TCs remove or delete heketi node Add new TC remove or delete node from heketi, and verify gluster peer status, bricks on the new node and heal info. Change-Id: If4b6006a4a58fd581ac9851b377a82f69ce0f30f --- .../openshiftstoragelibs/gluster_ops.py | 7 ++-- .../openshiftstoragelibs/heketi_ops.py | 38 ++++++++++++++++++++++ .../openshiftstoragelibs/podcmd.py | 12 +++++-- 3 files changed, 52 insertions(+), 5 deletions(-) (limited to 'openshift-storage-libs/openshiftstoragelibs') diff --git a/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py index 785bde58..33ffa18d 100644 --- a/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py +++ b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py @@ -26,16 +26,17 @@ from openshiftstoragelibs import waiter @podcmd.GlustoPod() -def wait_to_heal_complete(timeout=300, wait_step=5): +def wait_to_heal_complete( + timeout=300, wait_step=5, g_node="auto_get_gluster_endpoint"): """Monitors heal for volumes on gluster""" - gluster_vol_list = get_volume_list("auto_get_gluster_endpoint") + gluster_vol_list = get_volume_list(g_node) if not gluster_vol_list: raise AssertionError("failed to get gluster volume list") _waiter = waiter.Waiter(timeout=timeout, interval=wait_step) for gluster_vol in gluster_vol_list: for w in _waiter: - if is_heal_complete("auto_get_gluster_endpoint", gluster_vol): + if is_heal_complete(g_node, gluster_vol): # NOTE(vponomar): Reset attempts for waiter to avoid redundant # sleep equal to 'interval' on the next usage. _waiter._attempt = 0 diff --git a/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py b/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py index f1e535fa..df00dbf3 100644 --- a/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py +++ b/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py @@ -1845,3 +1845,41 @@ def get_vol_file_servers_and_hosts( glusterfs['device'].split(":")[:1] + glusterfs['options']['backup-volfile-servers'].split(",")) return {'vol_servers': vol_servers, 'vol_hosts': glusterfs['hosts']} + + +def get_bricks_on_heketi_node( + heketi_client_node, heketi_server_url, node_id, **kwargs): + """Get bricks on heketi node. + + Args: + heketi_client_node (str): Node on which cmd has to be executed. + heketi_server_url (str): Heketi server url + node_id (str): Node ID + + Kwargs: + The keys, values in kwargs are: + - secret : (str)|None + - user : (str)|None + + Returns: + list: list of bricks. + + Raises: + AssertionError: if command fails. + """ + + if 'json' in kwargs: + raise AssertionError("json is not expected parameter") + + kwargs['json'] = True + + node_info = heketi_node_info( + heketi_client_node, heketi_server_url, node_id, **kwargs) + + if len(node_info['devices']) < 1: + raise AssertionError("No device found on node %s" % node_info) + + bricks = [] + for device in node_info['devices']: + bricks += device['bricks'] + return bricks diff --git a/openshift-storage-libs/openshiftstoragelibs/podcmd.py b/openshift-storage-libs/openshiftstoragelibs/podcmd.py index 62fff01a..33e88a26 100644 --- a/openshift-storage-libs/openshiftstoragelibs/podcmd.py +++ b/openshift-storage-libs/openshiftstoragelibs/podcmd.py @@ -49,6 +49,7 @@ from collections import namedtuple from functools import partial, wraps from glusto.core import Glusto as g +import mock import six from openshiftstoragelibs import openshift_ops @@ -85,14 +86,21 @@ def run(target, command, user=None, log_level=None, orig_run=g.run): # definition time in order to capture the method before # any additional monkeypatching by other code - if target == 'auto_get_gluster_endpoint': - ocp_client_node = list(g.config['ocp_servers']['client'].keys())[0] + ocp_client_node = list(g.config['ocp_servers']['client'].keys())[0] + with mock.patch.object(g, 'run', new=orig_run): gluster_pods = openshift_ops.get_ocp_gluster_pod_details( ocp_client_node) + + if target == 'auto_get_gluster_endpoint': if gluster_pods: target = Pod(ocp_client_node, gluster_pods[0]["pod_name"]) else: target = list(g.config.get("gluster_servers", {}).keys())[0] + elif not isinstance(target, Pod) and gluster_pods: + for g_pod in gluster_pods: + if target in (g_pod['pod_host_ip'], g_pod['pod_hostname']): + target = Pod(ocp_client_node, g_pod['pod_name']) + break if isinstance(target, Pod): prefix = ['oc', 'rsh', target.podname] -- cgit