summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/baseclass.py13
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/gluster_ops.py15
-rw-r--r--tests/functional/metrics/test_metrics_validation.py27
3 files changed, 49 insertions, 6 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
index b8080d23..f30c2bad 100644
--- a/openshift-storage-libs/openshiftstoragelibs/baseclass.py
+++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
@@ -738,13 +738,21 @@ class GlusterBlockBaseClass(BaseClass):
msg = "All paths are not up in mpath %s on Node %s" % (out, node)
self.assertNotIn(state, out, msg)
- def get_block_hosting_volume_by_pvc_name(self, pvc_name):
+ def get_block_hosting_volume_by_pvc_name(
+ self, pvc_name, heketi_server_url=None, gluster_node=None,
+ ocp_client_node=None):
"""Get block hosting volume of pvc name given
Args:
pvc_name (str): pvc name for which the BHV name needs
to be returned
+ Kwargs:
+ heketi_server_url (str): heketi server url to run heketi commands
+ gluster_node (str): gluster node where to run gluster commands
+ ocp_client_node (str): ocp cleint node where to run oc commands
"""
+ if not heketi_server_url:
+ heketi_server_url = self.heketi_server_url
pv_name = get_pv_name_from_pvc(self.ocp_client[0], pvc_name)
block_volume = oc_get_custom_resource(
self.ocp_client[0], 'pv',
@@ -754,6 +762,7 @@ class GlusterBlockBaseClass(BaseClass):
# get block hosting volume from block volume
block_hosting_vol = get_block_hosting_volume_name(
- self.heketi_client_node, self.heketi_server_url, block_volume)
+ self.heketi_client_node, heketi_server_url, block_volume,
+ gluster_node=gluster_node, ocp_client_node=ocp_client_node)
return block_hosting_vol
diff --git a/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
index 422c8a01..d7df73ac 100644
--- a/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
@@ -240,7 +240,8 @@ def match_heketi_and_gluster_block_volumes_by_prefix(
@podcmd.GlustoPod()
def get_block_hosting_volume_name(heketi_client_node, heketi_server_url,
- block_volume):
+ block_volume, gluster_node=None,
+ ocp_client_node=None):
"""Returns block hosting volume name of given block volume
Args:
@@ -248,6 +249,10 @@ def get_block_hosting_volume_name(heketi_client_node, heketi_server_url,
heketi_server_url (str): Heketi server url
block_volume (str): Block volume of which block hosting volume
returned
+ Kwargs:
+ gluster_node (str): gluster node/pod ip where gluster command can be
+ run
+ ocp_client_node (str): OCP client node where oc commands can be run
Returns:
str : Name of the block hosting volume for given block volume
"""
@@ -263,7 +268,13 @@ def get_block_hosting_volume_name(heketi_client_node, heketi_server_url,
if not block_hosting_vol_match:
continue
- gluster_vol_list = get_volume_list("auto_get_gluster_endpoint")
+ if gluster_node and ocp_client_node:
+ cmd = 'gluster volume list'
+ gluster_vol_list = cmd_run_on_gluster_pod_or_node(
+ ocp_client_node, cmd, gluster_node).split('\n')
+ else:
+ gluster_vol_list = get_volume_list('auto_get_gluster_endpoint')
+
for vol in gluster_vol_list:
if block_hosting_vol_match.group(1).strip() in vol:
return vol
diff --git a/tests/functional/metrics/test_metrics_validation.py b/tests/functional/metrics/test_metrics_validation.py
index 2e584829..7efb7227 100644
--- a/tests/functional/metrics/test_metrics_validation.py
+++ b/tests/functional/metrics/test_metrics_validation.py
@@ -6,6 +6,9 @@ from glusto.core import Glusto as g
from openshiftstoragelibs.baseclass import GlusterBlockBaseClass
from openshiftstoragelibs import command
from openshiftstoragelibs import exceptions
+from openshiftstoragelibs.gluster_ops import (
+ restart_gluster_vol_brick_processes,
+)
from openshiftstoragelibs.openshift_ops import (
get_ocp_gluster_pod_details,
get_pod_name_from_rc,
@@ -156,20 +159,23 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass):
rtype='rc', heketi_server_url=self.registry_heketi_server_url,
is_registry_gluster=True)
- def cassandra_pod_delete_cleanup(self):
+ def cassandra_pod_delete_cleanup(self, raise_on_error=False):
"""Cleanup for deletion of cassandra pod using force delete"""
+ switch_oc_project(self.master, self.metrics_project_name)
try:
# Check if pod is up or ready
pod_name = get_pod_name_from_rc(
self.master, self.metrics_rc_hawkular_cassandra)
wait_for_pod_be_ready(self.master, pod_name, timeout=1)
- except exceptions.ExecutionError:
+ except exceptions.ExecutionError as err:
# Force delete and wait for new pod to come up
oc_delete(self.master, 'pod', pod_name, is_force=True)
wait_for_resource_absence(self.master, 'pod', pod_name)
new_pod_name = get_pod_name_from_rc(
self.master, self.metrics_rc_hawkular_cassandra)
wait_for_pod_be_ready(self.master, new_pod_name)
+ if raise_on_error:
+ raise err
@ddt.data('delete', 'drain')
def test_metrics_during_cassandra_pod_respin(self, motive='delete'):
@@ -245,3 +251,20 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass):
cmd_remove_file = 'rm {}'.format(file_name)
self.addCleanup(
oc_rsh, self.master, hawkular_cassandra, cmd_remove_file)
+
+ def test_metrics_cassandra_pod_with_bhv_brick_process_down(self):
+ """Validate metrics during restart of brick process of bhv"""
+
+ # Validate iscsi and multipath
+ gluster_node = list(self.registry_servers_info.keys())[0]
+ hawkular_cassandra, pvc_name, _, _, _ = (
+ self.verify_cassandra_pod_multipath_and_iscsi())
+ switch_oc_project(self.master, self.registry_project_name)
+
+ # Kill the brick process and force restart the volume
+ bhv_name = self.get_block_hosting_volume_by_pvc_name(
+ pvc_name, heketi_server_url=self.registry_heketi_server_url,
+ gluster_node=gluster_node, ocp_client_node=self.master)
+ restart_gluster_vol_brick_processes(
+ self.master, bhv_name, list(self.registry_servers_info.keys()))
+ self.addCleanup(self.cassandra_pod_delete_cleanup, raise_on_error=True)