summaryrefslogtreecommitdiffstats
path: root/tests/functional/heketi/test_heketi_zones.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/heketi/test_heketi_zones.py')
-rw-r--r--tests/functional/heketi/test_heketi_zones.py326
1 files changed, 322 insertions, 4 deletions
diff --git a/tests/functional/heketi/test_heketi_zones.py b/tests/functional/heketi/test_heketi_zones.py
index 489092c7..eb62b6b3 100644
--- a/tests/functional/heketi/test_heketi_zones.py
+++ b/tests/functional/heketi/test_heketi_zones.py
@@ -5,15 +5,21 @@ try:
except ImportError:
# py2
import json
+from unittest import skip
import ddt
from glusto.core import Glusto as g
+import six
import pytest
from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import command
+from openshiftstoragelibs import exceptions
from openshiftstoragelibs import heketi_ops
from openshiftstoragelibs import openshift_ops
from openshiftstoragelibs import openshift_storage_libs
+from openshiftstoragelibs import utils
+from openshiftstoragelibs.waiter import Waiter
@ddt.ddt
@@ -31,6 +37,8 @@ class TestHeketiZones(baseclass.BaseClass):
def setUp(self):
super(TestHeketiZones, self).setUp()
self.node = self.ocp_master_node[0]
+ self.h_client = self.heketi_client_node
+ self.h_server = self.heketi_server_url
def _set_heketi_zones(self, unique_zones_amount=1):
h = heketi_ops.cmd_run_on_heketi_pod
@@ -107,13 +115,11 @@ class TestHeketiZones(baseclass.BaseClass):
return heketi_db_data
def _get_online_nodes(self):
- node_ids = heketi_ops.heketi_node_list(
- self.heketi_client_node, self.heketi_server_url)
+ node_ids = heketi_ops.heketi_node_list(self.h_client, self.h_server)
online_nodes = []
for node_id in node_ids:
node_info = heketi_ops.heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
+ self.h_client, self.h_server, node_id, json=True)
if (node_info["state"] == "online"
and node_info['cluster'] == self.cluster_id):
online_nodes.append(
@@ -246,3 +252,315 @@ class TestHeketiZones(baseclass.BaseClass):
# Create app DC with the above PVC
self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
+
+ def _get_online_devices_and_nodes_with_zone(self):
+ """
+ This function returns the list of nodes and devices associated to zone
+
+ Returns:
+ dict: dict with zone, devices and nodes values
+ e.g.,
+ { zone_1: {
+ "nodes": [
+ node1, node2, node3],
+ "devices": [
+ device1, device2]
+ },
+ zone_2: {
+ "nodes": [
+ node1, node2, node3],
+ "devices": [
+ device1, device2]
+ }
+ }
+ """
+ zone_devices_nodes = dict()
+ topology_info = heketi_ops.heketi_topology_info(
+ self.h_client, self.h_server, json=True)
+ for cluster in topology_info['clusters']:
+ for node in cluster['nodes']:
+ if node['state'] == 'online':
+ if node['zone'] not in zone_devices_nodes:
+ zone_devices_nodes[node['zone']] = dict()
+
+ if 'nodes' not in zone_devices_nodes[node['zone']]:
+ zone_devices_nodes[node['zone']]['nodes'] = []
+
+ (zone_devices_nodes[node['zone']][
+ 'nodes'].append(node['id']))
+
+ for device in node['devices']:
+ if device['state'] == 'online':
+ if node['zone'] not in zone_devices_nodes:
+ zone_devices_nodes[node['zone']] = dict()
+
+ if 'devices' not in zone_devices_nodes[node['zone']]:
+ zone_devices_nodes[node['zone']]['devices'] = []
+
+ (zone_devices_nodes[
+ node['zone']]['devices'].append(device['id']))
+
+ return zone_devices_nodes
+
+ def _check_heketi_pod_to_come_up_after_changing_env(self):
+ err_str = 'Err: Error from server (NotFound): pods "{}" not found'
+ for w in Waiter(120, 5):
+ heketi_pod = openshift_ops.get_pod_names_from_dc(
+ self.node, self.heketi_dc_name)[0]
+ try:
+ openshift_ops.wait_for_pod_be_ready(
+ self.node, heketi_pod, 1, 1)
+ except (exceptions.ExecutionError, AssertionError) as e:
+ if err_str.format(heketi_pod) not in six.text_type(e):
+ raise
+ continue
+
+ break
+
+ if w.expired:
+ raise AssertionError(
+ "Heketi pod failing to come up after changing value of env "
+ "inside heketi dc")
+
+ def _set_zone_checking_option_in_heketi_dc_or_create_sc(
+ self, is_set_env, prefix):
+ sc_name = None
+ if is_set_env:
+ # Set env option for strict zone checking in heketi dc
+ set_env = ('HEKETI_POST_REQUEST_VOLUME_OPTIONS='
+ '"user.heketi.zone-checking strict"')
+ unset_env, e_list = "HEKETI_POST_REQUEST_VOLUME_OPTIONS-", "--list"
+ cmd_set_env = (
+ "oc set env dc/{} {}".format(self.heketi_dc_name, set_env))
+ cmd_unset_env = (
+ "oc set env dc/{} {}".format(self.heketi_dc_name, unset_env))
+ command.cmd_run(cmd_set_env, hostname=self.node)
+ self._check_heketi_pod_to_come_up_after_changing_env()
+ self.addCleanup(
+ self._check_heketi_pod_to_come_up_after_changing_env)
+ self.addCleanup(command.cmd_run, cmd_unset_env, hostname=self.node)
+
+ # List all envs and validate if env is set successfully
+ env = set_env.replace('"', '')
+ cmd_list_env = (
+ "oc set env dc/{} {}".format(self.heketi_dc_name, e_list))
+ env_list = command.cmd_run(cmd_list_env, hostname=self.node)
+ self.assertIn(env, env_list, "Failed to set env {}".format(env))
+
+ else:
+ # Create storage class setting "user.heketi.zone-checking" up
+ heketi_zone_checking = "strict"
+ sc_name = self.create_storage_class(
+ sc_name_prefix=prefix, vol_name_prefix=prefix,
+ heketi_zone_checking=heketi_zone_checking)
+
+ return sc_name
+
+ def _create_dcs_and_check_brick_placement(
+ self, prefix, sc_name, heketi_zone_checking, zone_count):
+ app_pods = []
+
+ # Create multiple PVCs using storage class
+ pvc_names = self.create_and_wait_for_pvcs(
+ pvc_name_prefix=prefix, pvc_amount=5, sc_name=sc_name)
+
+ # Create app dcs with I/O
+ for pvc_name in pvc_names:
+ app_dc = openshift_ops.oc_create_app_dc_with_io(
+ self.node, pvc_name=pvc_name, dc_name_prefix=prefix)
+ self.addCleanup(openshift_ops.oc_delete, self.node, 'dc', app_dc)
+
+ # Get pod names
+ pod_name = openshift_ops.get_pod_name_from_dc(self.node, app_dc)
+ app_pods.append(pod_name)
+
+ # Validate brick placement in heketi zones
+ self._validate_brick_placement_in_correct_zone_or_with_expand_pvc(
+ heketi_zone_checking, pvc_name, zone_count)
+
+ return app_pods
+
+ @skip("Blocked by BZ-1828249")
+ @pytest.mark.tier1
+ @ddt.data(
+ (3, False),
+ (3, True),
+ (4, True),
+ (3, False, True),
+ (3, True, True),
+ (4, True, True),
+ )
+ @ddt.unpack
+ def test_check_node_disable_based_on_heketi_zone(
+ self, zone_count, is_disable_on_different_zone, is_set_env=False):
+ """Validate node disable in different heketi zones"""
+ expected_node_count, heketi_zone_checking = 4, "strict"
+ prefix = "hzone-{}".format(utils.get_random_str())
+
+ # Check amount of available online nodes
+ online_node_count = len(self._get_online_nodes())
+ if online_node_count < expected_node_count:
+ self.skipTest(
+ 'Available node count {} is less than expected node '
+ 'count {}'.format(online_node_count, expected_node_count))
+
+ # Check amount of available online heketi zones
+ self._check_for_available_zones(zone_count)
+
+ # Get the online devices and nodes w.r.t. to zone
+ zone_devices_nodes = self._get_online_devices_and_nodes_with_zone()
+
+ # Set heketi zone checking option to "strict"
+ sc_name = self._set_zone_checking_option_in_heketi_dc_or_create_sc(
+ is_set_env, prefix)
+
+ # Choose a zone and node_id to disable the device
+ for zone, nodes_and_devices in zone_devices_nodes.items():
+ if zone_count == 3:
+ # Select a node with a zone having multiple nodes in same
+ # zone to cover the test cases disable node in same zone
+ if len(nodes_and_devices['nodes']) > 1:
+ zone_with_disabled_node = zone
+ disabled_node = nodes_and_devices['nodes'][0]
+ break
+
+ else:
+ # Select node from any of the zones
+ zone_with_disabled_node = zone
+ disabled_node = nodes_and_devices['nodes'][0]
+ break
+
+ # Disable the selected node
+ heketi_ops.heketi_node_disable(
+ self.h_client, self.h_server, disabled_node)
+ self.addCleanup(heketi_ops.heketi_node_enable, self.h_client,
+ self.h_server, disabled_node)
+
+ # Create some DCs with PVCs and check brick placement in heketi zones
+ pod_names = self._create_dcs_and_check_brick_placement(
+ prefix, sc_name, heketi_zone_checking, zone_count)
+
+ # Enable disabled node
+ heketi_ops.heketi_node_enable(
+ self.h_client, self.h_server, disabled_node)
+
+ if is_disable_on_different_zone:
+ # Select the new node in a different zone
+ for zone, nodes_and_devices in zone_devices_nodes.items():
+ if zone != zone_with_disabled_node:
+ new_node_to_disable = nodes_and_devices['nodes'][0]
+ break
+
+ else:
+ # Select the new node in the same zone
+ new_node_to_disable = zone_devices_nodes[
+ zone_with_disabled_node]['nodes'][1]
+
+ # Disable the newly selected node
+ heketi_ops.heketi_node_disable(
+ self.h_client, self.h_server, new_node_to_disable)
+ self.addCleanup(heketi_ops.heketi_node_enable, self.h_client,
+ self.h_server, new_node_to_disable)
+
+ # Verify if pods are in ready state
+ for pod_name in pod_names:
+ openshift_ops.wait_for_pod_be_ready(
+ self.node, pod_name, timeout=5, wait_step=2)
+
+ @skip("Blocked by BZ-1828249")
+ @pytest.mark.tier1
+ @ddt.data(
+ (3, False),
+ (3, True),
+ (4, True),
+ (3, False, True),
+ (3, True, True),
+ (4, True, True),
+ )
+ @ddt.unpack
+ def test_check_device_disable_based_on_heketi_zone(
+ self, zone_count, is_disable_on_different_zone, is_set_env=False):
+ """Validate device disable in different heketi zones"""
+ online_device_count, expected_device_count = 0, 4
+ expected_node_count, heketi_zone_checking = 4, "strict"
+ prefix = "hzone-{}".format(utils.get_random_str())
+
+ # Check amount of available online nodes
+ online_node_count = len(self._get_online_nodes())
+ if online_node_count < expected_node_count:
+ self.skipTest(
+ 'Available node count {} is less than expected node '
+ 'count {}'.format(online_node_count, expected_node_count))
+
+ # Check amount of available online heketi zones
+ self._check_for_available_zones(zone_count)
+
+ # Get the online devices and nodes w.r.t. to zone
+ zone_devices_nodes = self._get_online_devices_and_nodes_with_zone()
+
+ # Check amount of available online heketi devices
+ for zone in zone_devices_nodes:
+ online_device_count += len(
+ zone_devices_nodes[zone]['devices'])
+ if online_device_count < expected_device_count:
+ self.skipTest(
+ "Expected the heketi device count {} is greater than the "
+ "available device count {}".format(
+ expected_device_count, online_device_count))
+
+ # Set heketi zone checking option to "strict"
+ sc_name = self._set_zone_checking_option_in_heketi_dc_or_create_sc(
+ is_set_env, prefix)
+
+ # Choose a zone and device_id to disable the device
+ for zone, nodes_and_devices in zone_devices_nodes.items():
+ if zone_count == 3:
+ # Select a device with a zone having multiple nodes in
+ # same zone to cover the test cases "disable in same zone"
+ if len(nodes_and_devices['devices']) > 1:
+ zone_with_disabled_device = zone
+ disabled_device = nodes_and_devices['devices'][0]
+ break
+
+ else:
+ # Select device from any of the zones
+ zone_with_disabled_device = zone
+ disabled_device = nodes_and_devices['devices'][0]
+ break
+
+ # Disable the selected device
+ heketi_ops.heketi_device_disable(
+ self.h_client, self.h_server, disabled_device)
+ self.addCleanup(heketi_ops.heketi_device_enable, self.h_client,
+ self.h_server, disabled_device)
+
+ # Create some DCs with PVCs and check brick placement in heketi zones
+ pod_names = self._create_dcs_and_check_brick_placement(
+ prefix, sc_name, heketi_zone_checking, zone_count)
+
+ # Enable disabled device
+ heketi_ops.heketi_device_enable(
+ self.h_client, self.h_server, disabled_device)
+
+ if is_disable_on_different_zone:
+ # Select the new device in a different zone
+ for zone, nodes_and_devices in zone_devices_nodes.items():
+ if zone != zone_with_disabled_device:
+ new_device_to_disable = nodes_and_devices['devices'][0]
+ break
+
+ else:
+ # Select the new device in the same zone
+ new_device_to_disable = zone_devices_nodes[
+ zone_with_disabled_device]['devices'][1]
+
+ # Disable the newly selected device
+ heketi_ops.heketi_device_disable(
+ self.h_client, self.h_server, new_device_to_disable)
+ self.addCleanup(heketi_ops.heketi_device_enable, self.h_client,
+ self.h_server, new_device_to_disable)
+
+ # Verify if pods are in ready state
+ for pod_name in pod_names:
+ openshift_ops.wait_for_pod_be_ready(
+ self.node, pod_name, timeout=5, wait_step=2)