summaryrefslogtreecommitdiffstats
path: root/tests/functional/heketi
diff options
context:
space:
mode:
authorValerii Ponomarov <vponomar@redhat.com>2019-08-15 22:19:35 +0530
committervponomar <vponomar@redhat.com>2019-09-06 13:13:29 +0000
commit21f46f578e2712137b578766e15e6788b78ec73a (patch)
treecbeaf792f762007da9e4bfc929ef4c67d08fda5d /tests/functional/heketi
parentb1c89257407d0acfcfd12a59e4fcab728e268bac (diff)
Add tc where we check vol size and brick amount config limits in Heketi
Add test case where we set and test Heketi config options to limit lower/upper allowed volume size and amount of bricks per volume. Change-Id: Ifb5cf64bba34dbf4e89f2fe9364263385a04cfa7
Diffstat (limited to 'tests/functional/heketi')
-rw-r--r--tests/functional/heketi/test_restart_heketi_pod.py100
1 files changed, 100 insertions, 0 deletions
diff --git a/tests/functional/heketi/test_restart_heketi_pod.py b/tests/functional/heketi/test_restart_heketi_pod.py
index 0c32fea2..5e778890 100644
--- a/tests/functional/heketi/test_restart_heketi_pod.py
+++ b/tests/functional/heketi/test_restart_heketi_pod.py
@@ -1,15 +1,24 @@
from jsondiff import diff
+try:
+ # py2/3
+ import simplejson as json
+except ImportError:
+ # py2
+ import json
from openshiftstoragelibs.baseclass import BaseClass
from openshiftstoragelibs.heketi_ops import (
heketi_topology_info,
heketi_volume_create,
heketi_volume_delete,
+ heketi_volume_expand,
hello_heketi,
)
from openshiftstoragelibs.openshift_ops import (
+ oc_get_custom_resource,
get_pod_name_from_dc,
oc_delete,
+ scale_dc_pod_amount_and_wait,
wait_for_pod_be_ready,
wait_for_resource_absence,
)
@@ -67,3 +76,94 @@ class TestRestartHeketi(BaseClass):
self.assertTrue(vol_info, "Failed to create heketi volume of size 20")
heketi_volume_delete(
self.heketi_client_node, self.heketi_server_url, vol_info['id'])
+
+ def test_set_heketi_vol_size_and_brick_amount_limits(self):
+ # Get Heketi secret name
+ cmd_get_heketi_secret_name = (
+ "oc get dc -n %s %s -o jsonpath='{.spec.template.spec.volumes"
+ "[?(@.name==\"config\")].secret.secretName}'" % (
+ self.storage_project_name, self.heketi_dc_name))
+ heketi_secret_name = self.cmd_run(cmd_get_heketi_secret_name)
+
+ # Read Heketi secret data
+ self.node = self.ocp_master_node[0]
+ heketi_secret_data_str_base64 = oc_get_custom_resource(
+ self.node, "secret", ":.data.'heketi\.json'", # noqa
+ name=heketi_secret_name)[0]
+ heketi_secret_data_str = self.cmd_run(
+ "echo %s | base64 -d" % heketi_secret_data_str_base64)
+ heketi_secret_data = json.loads(heketi_secret_data_str)
+
+ # Update Heketi secret data
+ brick_min_size_gb, brick_max_size_gb = 2, 4
+ heketi_secret_data["glusterfs"].update({
+ "brick_min_size_gb": brick_min_size_gb,
+ "brick_max_size_gb": brick_max_size_gb,
+ "max_bricks_per_volume": 3,
+ })
+ heketi_secret_data_patched = json.dumps(heketi_secret_data)
+ heketi_secret_data_str_encoded = self.cmd_run(
+ "echo '%s' |base64" % heketi_secret_data_patched).replace('\n', '')
+ h_client, h_server = self.heketi_client_node, self.heketi_server_url
+ try:
+ # Patch Heketi secret
+ cmd_patch_heketi_secret = (
+ 'oc patch secret -n %s %s -p '
+ '"{\\"data\\": {\\"heketi.json\\": \\"%s\\"}}"'
+ ) % (self.storage_project_name, heketi_secret_name, "%s")
+ self.cmd_run(
+ cmd_patch_heketi_secret % heketi_secret_data_str_encoded)
+
+ # Recreate the Heketi pod to make it reuse updated configuration
+ scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 0)
+ scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 1)
+
+ # Try to create too small and too big volumes
+ # It must fail because allowed range is not satisfied
+ for gb in (brick_min_size_gb - 1, brick_max_size_gb + 1):
+ try:
+ vol_1 = heketi_volume_create(
+ h_client, h_server, size=gb, json=True)
+ except AssertionError:
+ pass
+ else:
+ self.addCleanup(
+ heketi_volume_delete, h_client, h_server, vol_1['id'])
+ self.assertFalse(
+ vol_1,
+ "Volume '%s' got unexpectedly created. Heketi server "
+ "configuration haven't made required effect." % (
+ vol_1.get('id', 'failed_to_get_heketi_vol_id')))
+
+ # Create the smallest allowed volume
+ vol_2 = heketi_volume_create(
+ h_client, h_server, size=brick_min_size_gb, json=True)
+ self.addCleanup(
+ heketi_volume_delete, h_client, h_server, vol_2['id'])
+
+ # Try to expand volume, it must fail due to the brick amount limit
+ self.assertRaises(
+ AssertionError, heketi_volume_expand, h_client,
+ h_server, vol_2['id'], 2)
+
+ # Create the largest allowed volume
+ vol_3 = heketi_volume_create(
+ h_client, h_server, size=brick_max_size_gb, json=True)
+ heketi_volume_delete(h_client, h_server, vol_3['id'])
+ finally:
+ # Revert the Heketi configuration back
+ self.cmd_run(
+ cmd_patch_heketi_secret % heketi_secret_data_str_base64)
+ scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 0)
+ scale_dc_pod_amount_and_wait(self.node, self.heketi_dc_name, 1)
+
+ # Create volume less than the old minimum limit
+ vol_4 = heketi_volume_create(
+ h_client, h_server, size=(brick_min_size_gb - 1), json=True)
+ self.addCleanup(heketi_volume_delete, h_client, h_server, vol_4['id'])
+
+ # Create volume bigger than the old maximum limit and expand it
+ vol_5 = heketi_volume_create(
+ h_client, h_server, size=(brick_max_size_gb + 1), json=True)
+ self.addCleanup(heketi_volume_delete, h_client, h_server, vol_5['id'])
+ heketi_volume_expand(h_client, h_server, vol_5['id'], 2)