summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/cns_tests_sample_config.yml1
-rw-r--r--tests/functional/common/gluster_stability/__init__.py0
-rw-r--r--tests/functional/common/gluster_stability/test_gluster_services_restart.py233
-rw-r--r--tests/functional/common/heketi/test_heketi_metrics.py272
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py166
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py146
6 files changed, 791 insertions, 27 deletions
diff --git a/tests/cns_tests_sample_config.yml b/tests/cns_tests_sample_config.yml
index ce22ac59..00f304db 100644
--- a/tests/cns_tests_sample_config.yml
+++ b/tests/cns_tests_sample_config.yml
@@ -110,6 +110,7 @@ cns:
restsecretname:
hacount: "3"
chapauthenabled: "true"
+ volumenameprefix: "cns-vol"
secrets:
secret1:
secret_name: secret1
diff --git a/tests/functional/common/gluster_stability/__init__.py b/tests/functional/common/gluster_stability/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/functional/common/gluster_stability/__init__.py
diff --git a/tests/functional/common/gluster_stability/test_gluster_services_restart.py b/tests/functional/common/gluster_stability/test_gluster_services_restart.py
new file mode 100644
index 00000000..2cc09099
--- /dev/null
+++ b/tests/functional/common/gluster_stability/test_gluster_services_restart.py
@@ -0,0 +1,233 @@
+
+import ddt
+import re
+
+from cnslibs.common.heketi_ops import (
+ heketi_blockvolume_list,
+ match_heketi_and_gluster_block_volumes
+)
+from cnslibs.common.openshift_ops import (
+ check_service_status,
+ get_ocp_gluster_pod_names,
+ get_pod_name_from_dc,
+ match_pv_and_heketi_block_volumes,
+ match_pvc_and_pv,
+ oc_create_app_dc_with_io,
+ oc_create_pvc,
+ oc_create_sc,
+ oc_create_secret,
+ oc_delete,
+ oc_get_yaml,
+ restart_service_on_pod,
+ scale_dc_pod_amount_and_wait,
+ verify_pvc_status_is_bound,
+ wait_for_pod_be_ready,
+ wait_for_resource_absence
+)
+from cnslibs.cns.cns_baseclass import CnsBaseClass
+from cnslibs.common import podcmd
+
+HEKETI_BLOCK_VOLUME_REGEX = "^Id:(.*).Cluster:(.*).Name:%s_(.*)$"
+
+SERVICE_TARGET = "gluster-block-target"
+SERVICE_BLOCKD = "gluster-blockd"
+SERVICE_TCMU = "tcmu-runner"
+
+
+@ddt.ddt
+class GlusterStabilityTestSetup(CnsBaseClass):
+ """class for gluster stability (restarts different servces) testcases
+ TC No's: CNS-1393, CNS-1394, CNS-1395
+ """
+
+ def setUp(self):
+ """Deploys, Verifies and adds resources required for testcases
+ in cleanup method
+ """
+ self.oc_node = self.ocp_master_node[0]
+ self.gluster_pod = get_ocp_gluster_pod_names(self.oc_node)[0]
+
+ # prefix used to create resources, generating using glusto_test_id
+ # which uses time and date of test case
+ self.prefix = "autotest-%s" % (self.glustotest_run_id.replace("_", ""))
+
+ _cns_storage_class = self.cns_storage_class['storage_class2']
+ self.provisioner = _cns_storage_class["provisioner"]
+ self.restsecretname = _cns_storage_class["restsecretname"]
+ self.restsecretnamespace = _cns_storage_class["restsecretnamespace"]
+ self.restuser = _cns_storage_class["restuser"]
+ self.resturl = _cns_storage_class["resturl"]
+
+ _cns_secret = self.cns_secret['secret2']
+ self.secretnamespace = _cns_secret['namespace']
+ self.secrettype = _cns_secret['type']
+
+ # using pvc size count as 1 by default
+ self.pvcsize = 1
+
+ # using pvc count as 10 by default
+ self.pvccount = 10
+
+ # create gluster block storage class, PVC and user app pod
+ self.sc_name, self.pvc_name, self.dc_name, self.secret_name = (
+ self.deploy_resouces()
+ )
+
+ # verify storage class
+ oc_get_yaml(self.oc_node, "sc", self.sc_name)
+
+ # verify pod creation, it's state and get the pod name
+ self.pod_name = get_pod_name_from_dc(
+ self.oc_node, self.dc_name, timeout=180, wait_step=3
+ )
+ wait_for_pod_be_ready(
+ self.oc_node, self.pod_name, timeout=180, wait_step=3
+ )
+ verify_pvc_status_is_bound(self.oc_node, self.pvc_name)
+
+ # create pvc's to test
+ self.pvc_list = []
+ for pvc in range(self.pvccount):
+ test_pvc_name = oc_create_pvc(
+ self.oc_node, self.sc_name,
+ pvc_name_prefix=self.prefix, pvc_size=self.pvcsize
+ )
+ self.pvc_list.append(test_pvc_name)
+ self.addCleanup(
+ wait_for_resource_absence, self.oc_node, "pvc", test_pvc_name,
+ timeout=600, interval=10
+ )
+
+ for pvc_name in self.pvc_list:
+ self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
+
+ def deploy_resouces(self):
+ """Deploys required resources storage class, pvc and user app
+ with continous I/O runnig
+
+ Returns:
+ sc_name (str): deployed storage class name
+ pvc_name (str): deployed persistent volume claim name
+ dc_name (str): deployed deployment config name
+ secretname (str): created secret file name
+ """
+ secretname = oc_create_secret(
+ self.oc_node, namespace=self.secretnamespace,
+ data_key=self.heketi_cli_key, secret_type=self.secrettype)
+ self.addCleanup(oc_delete, self.oc_node, 'secret', secretname)
+
+ sc_name = oc_create_sc(
+ self.oc_node,
+ sc_name_prefix=self.prefix, provisioner=self.provisioner,
+ resturl=self.resturl, restuser=self.restuser,
+ restsecretnamespace=self.restsecretnamespace,
+ restsecretname=secretname, volumenameprefix=self.prefix
+ )
+ self.addCleanup(oc_delete, self.oc_node, "sc", sc_name)
+
+ pvc_name = oc_create_pvc(
+ self.oc_node, sc_name,
+ pvc_name_prefix=self.prefix, pvc_size=self.pvcsize
+ )
+ self.addCleanup(
+ wait_for_resource_absence, self.oc_node, "pvc", pvc_name,
+ timeout=120, interval=5
+ )
+ self.addCleanup(oc_delete, self.oc_node, "pvc", pvc_name)
+
+ dc_name = oc_create_app_dc_with_io(
+ self.oc_node, pvc_name, dc_name_prefix=self.prefix
+ )
+ self.addCleanup(oc_delete, self.oc_node, "dc", dc_name)
+ self.addCleanup(scale_dc_pod_amount_and_wait, self.oc_node, dc_name, 0)
+
+ return sc_name, pvc_name, dc_name, secretname
+
+ def get_heketi_block_volumes(self):
+ """lists heketi block volumes
+
+ Returns:
+ list : list of ids of heketi block volumes
+ """
+ heketi_cmd_out = heketi_blockvolume_list(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ secret=self.heketi_cli_key,
+ user=self.heketi_cli_user
+ )
+
+ self.assertTrue(heketi_cmd_out, "failed to get block volume list")
+
+ heketi_block_volume_ids = []
+ heketi_block_volume_names = []
+ for block_vol in heketi_cmd_out.split("\n"):
+ heketi_vol_match = re.search(
+ HEKETI_BLOCK_VOLUME_REGEX % self.prefix, block_vol.strip()
+ )
+ if heketi_vol_match:
+ heketi_block_volume_ids.append(
+ (heketi_vol_match.group(1)).strip()
+ )
+ heketi_block_volume_names.append(
+ (heketi_vol_match.group(3)).strip()
+ )
+
+ return (sorted(heketi_block_volume_ids), sorted(
+ heketi_block_volume_names)
+ )
+
+ def validate_volumes_and_blocks(self):
+ """Validates PVC and block volumes generated through heketi and OCS
+ """
+
+ # verify pvc status is in "Bound" for all the pvc
+ for pvc in self.pvc_list:
+ verify_pvc_status_is_bound(
+ self.oc_node, pvc, timeout=300, wait_step=10
+ )
+
+ # validate pvcs and pvs created on OCS
+ match_pvc_and_pv(self.oc_node, self.prefix)
+
+ # get list of block volumes using heketi
+ heketi_block_volume_ids, heketi_block_volume_names = (
+ self.get_heketi_block_volumes()
+ )
+
+ # validate block volumes listed by heketi and pvs
+ match_pv_and_heketi_block_volumes(
+ self.oc_node, heketi_block_volume_ids, self.prefix
+ )
+
+ # validate block volumes listed by heketi and gluster
+ gluster_pod_obj = podcmd.Pod(self.heketi_client_node, self.gluster_pod)
+ match_heketi_and_gluster_block_volumes(
+ gluster_pod_obj, heketi_block_volume_names, "%s_" % self.prefix
+ )
+
+ @ddt.data(SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET)
+ def test_restart_services_provision_volume_and_run_io(self, service):
+ """[CNS-1393-1395] Restart gluster service then validate volumes
+ """
+ # restarts glusterfs service
+ restart_service_on_pod(self.oc_node, self.gluster_pod, service)
+
+ # wait for deployed user pod to be in Running state after restarting
+ # service
+ wait_for_pod_be_ready(
+ self.oc_node, self.pod_name, timeout=60, wait_step=5
+ )
+
+ # checks if all glusterfs services are in running state
+ for service in (SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET):
+ status = "exited" if service == SERVICE_TARGET else "running"
+ self.assertTrue(
+ check_service_status(
+ self.oc_node, self.gluster_pod, service, status
+ ),
+ "service %s is not in %s state" % (service, status)
+ )
+
+ # validates pvc, pv, heketi block and gluster block count after
+ # service restarts
+ self.validate_volumes_and_blocks()
diff --git a/tests/functional/common/heketi/test_heketi_metrics.py b/tests/functional/common/heketi/test_heketi_metrics.py
new file mode 100644
index 00000000..04147e37
--- /dev/null
+++ b/tests/functional/common/heketi/test_heketi_metrics.py
@@ -0,0 +1,272 @@
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_ops import (
+ get_heketi_metrics,
+ heketi_cluster_info,
+ heketi_topology_info,
+ heketi_volume_create,
+ heketi_volume_delete,
+ heketi_volume_list
+ )
+from cnslibs.common.openshift_ops import (
+ get_pod_name_from_dc,
+ scale_dc_pod_amount_and_wait,
+ wait_for_pod_be_ready
+ )
+
+
+class TestHeketiMetrics(HeketiClientSetupBaseClass):
+
+ def verify_heketi_metrics_with_topology_info(self):
+ topology = heketi_topology_info(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+
+ metrics = get_heketi_metrics(
+ self.heketi_client_node, self.heketi_server_url)
+
+ self.assertTrue(topology)
+ self.assertIn('clusters', list(topology.keys()))
+ self.assertGreater(len(topology['clusters']), 0)
+
+ self.assertTrue(metrics)
+ self.assertGreater(len(metrics.keys()), 0)
+
+ self.assertEqual(
+ len(topology['clusters']), metrics['heketi_cluster_count'])
+
+ for cluster in topology['clusters']:
+ self.assertIn('nodes', list(cluster.keys()))
+ self.assertGreater(len(cluster['nodes']), 0)
+
+ cluster_id = cluster['id']
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_nodes_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ for node_count in metrics['heketi_nodes_count']:
+ if node_count['cluster'] == cluster_id:
+ self.assertEqual(
+ len(cluster['nodes']), node_count['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_volumes_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ for vol_count in metrics['heketi_volumes_count']:
+ if vol_count['cluster'] == cluster_id:
+ self.assertEqual(
+ len(cluster['volumes']), vol_count['value'])
+
+ for node in cluster['nodes']:
+ self.assertIn('devices', list(node.keys()))
+ self.assertGreater(len(node['devices']), 0)
+
+ hostname = node['hostnames']['manage'][0]
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_count']])
+ self.assertIn(hostname, hostnames)
+ for device_count in metrics['heketi_device_count']:
+ if (device_count['cluster'] == cluster_id and
+ device_count['hostname'] == hostname):
+ self.assertEqual(
+ len(node['devices']), device_count['value'])
+
+ for device in node['devices']:
+ device_name = device['name']
+ device_size_t = device['storage']['total']
+ device_free_t = device['storage']['free']
+ device_used_t = device['storage']['used']
+
+ cluster_ids = ([obj['cluster']
+ for obj in
+ metrics['heketi_device_brick_count']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in
+ metrics['heketi_device_brick_count']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in
+ metrics['heketi_device_brick_count']])
+ self.assertIn(device_name, devices)
+ for brick_count in metrics['heketi_device_brick_count']:
+ if (brick_count['cluster'] == cluster_id and
+ brick_count['hostname'] == hostname and
+ brick_count['device'] == device_name):
+ self.assertEqual(
+ len(device['bricks']), brick_count['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_size']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_size']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in metrics['heketi_device_size']])
+ self.assertIn(device_name, devices)
+ for device_size in metrics['heketi_device_size']:
+ if (device_size['cluster'] == cluster_id and
+ device_size['hostname'] == hostname and
+ device_size['device'] == device_name):
+ self.assertEqual(
+ device_size_t, device_size['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_free']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_free']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in metrics['heketi_device_free']])
+ self.assertIn(device_name, devices)
+ for device_free in metrics['heketi_device_free']:
+ if (device_free['cluster'] == cluster_id and
+ device_free['hostname'] == hostname and
+ device_free['device'] == device_name):
+ self.assertEqual(
+ device_free_t, device_free['value'])
+
+ cluster_ids = ([obj['cluster']
+ for obj in metrics['heketi_device_used']])
+ self.assertIn(cluster_id, cluster_ids)
+ hostnames = ([obj['hostname']
+ for obj in metrics['heketi_device_used']])
+ self.assertIn(hostname, hostnames)
+ devices = ([obj['device']
+ for obj in metrics['heketi_device_used']])
+ self.assertIn(device_name, devices)
+ for device_used in metrics['heketi_device_used']:
+ if (device_used['cluster'] == cluster_id and
+ device_used['hostname'] == hostname and
+ device_used['device'] == device_name):
+ self.assertEqual(
+ device_used_t, device_used['value'])
+
+ def verify_volume_count(self):
+ metrics = get_heketi_metrics(
+ self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertTrue(metrics['heketi_volumes_count'])
+
+ for vol_count in metrics['heketi_volumes_count']:
+ self.assertTrue(vol_count['cluster'])
+ cluster_info = heketi_cluster_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol_count['cluster'], json=True)
+ self.assertEqual(vol_count['value'], len(cluster_info['volumes']))
+
+ def test_heketi_metrics_with_topology_info(self):
+ # CNS-1243 - Heketi_metrics_generate
+ self.verify_heketi_metrics_with_topology_info()
+
+ def test_heketi_metrics_heketipod_failure(self):
+ # CNS-1262 - Heketi-metrics_validating_heketi_pod failure
+ scale_dc_pod_amount_and_wait(
+ self.ocp_master_node, self.heketi_dc_name, pod_amount=0)
+ self.addCleanup(
+ scale_dc_pod_amount_and_wait, self.ocp_master_node,
+ self.heketi_dc_name, pod_amount=1)
+
+ # verify that metrics is not accessable when heketi pod is down
+ with self.assertRaises(AssertionError):
+ get_heketi_metrics(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ prometheus_format=True)
+
+ scale_dc_pod_amount_and_wait(
+ self.ocp_master_node, self.heketi_dc_name, pod_amount=1)
+
+ pod_name = get_pod_name_from_dc(
+ self.ocp_master_node, self.heketi_dc_name, self.heketi_dc_name)
+ wait_for_pod_be_ready(self.ocp_master_node, pod_name, wait_step=5)
+
+ for i in range(3):
+ vol = heketi_volume_create(
+ self.heketi_client_node,
+ self.heketi_server_url, 1, json=True)
+
+ self.assertTrue(vol)
+
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'],
+ raise_on_error=False)
+
+ vol_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+
+ self.assertIn(vol['id'], vol_list)
+
+ self.verify_heketi_metrics_with_topology_info()
+
+ def test_heketi_metrics_validating_vol_count_on_vol_creation(self):
+ # CNS-1244 - Heketi_metrics_validating_VolumeCount_on_creation
+
+ for i in range(3):
+ # Create volume
+ vol = heketi_volume_create(
+ self.heketi_client_node,
+ self.heketi_server_url, 1, json=True)
+ self.assertTrue(vol)
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'],
+ raise_on_error=False)
+
+ vol_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+
+ self.assertIn(vol['id'], vol_list)
+
+ self.verify_volume_count()
+
+ def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
+ # CNS-1245 - Heketi_metrics_validating_VolumeCount_on_deletion
+
+ vol_list = []
+
+ for i in range(3):
+ # Create volume
+ vol = heketi_volume_create(
+ self.heketi_client_node,
+ self.heketi_server_url, 1, json=True)
+
+ self.assertTrue(vol)
+
+ self.addCleanup(
+ heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'],
+ raise_on_error=False)
+
+ volume_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+
+ self.assertIn(vol['id'], volume_list)
+ vol_list.append(vol)
+
+ for vol in vol_list:
+ # delete volume
+ heketi_volume_delete(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ vol['id'])
+ volume_list = heketi_volume_list(
+ self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertNotIn(vol['id'], volume_list)
+ self.verify_volume_count()
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
index 65a01c61..c717e44e 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -5,18 +5,25 @@ from cnslibs.cns.cns_baseclass import CnsGlusterBlockBaseClass
from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.openshift_ops import (
get_gluster_pod_names_by_pvc_name,
- get_pvc_status,
get_pod_name_from_dc,
+ get_pv_name_from_pvc,
+ get_pvc_status,
oc_create_app_dc_with_io,
oc_create_secret,
oc_create_sc,
oc_create_pvc,
oc_delete,
+ oc_get_custom_resource,
oc_rsh,
scale_dc_pod_amount_and_wait,
verify_pvc_status_is_bound,
wait_for_pod_be_ready,
- wait_for_resource_absence)
+ wait_for_resource_absence
+ )
+from cnslibs.common.heketi_ops import (
+ heketi_blockvolume_delete,
+ heketi_blockvolume_list
+ )
from cnslibs.common.waiter import Waiter
from glusto.core import Glusto as g
@@ -30,9 +37,10 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
def setUp(self):
super(TestDynamicProvisioningBlockP0, self).setUp()
self.node = self.ocp_master_node[0]
+ self.sc = self.cns_storage_class['storage_class2']
- def _create_storage_class(self, hacount=True):
- sc = self.cns_storage_class['storage_class2']
+ def _create_storage_class(self, hacount=True, create_name_prefix=False,
+ reclaim_policy="Delete"):
secret = self.cns_secret['secret2']
# Create secret file
@@ -41,15 +49,22 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
data_key=self.heketi_cli_key, secret_type=secret['type'])
self.addCleanup(oc_delete, self.node, 'secret', self.secret_name)
- # Create storage class
+ # create storage class
+ kwargs = {
+ "provisioner": "gluster.org/glusterblock",
+ "resturl": self.sc['resturl'],
+ "restuser": self.sc['restuser'],
+ "restsecretnamespace": self.sc['restsecretnamespace'],
+ "restsecretname": self.secret_name
+ }
+ if hacount:
+ kwargs["hacount"] = self.sc['hacount']
+ if create_name_prefix:
+ kwargs["volumenameprefix"] = self.sc.get(
+ 'volumenameprefix', 'autotest-blk')
+
self.sc_name = oc_create_sc(
- self.ocp_master_node[0], provisioner="gluster.org/glusterblock",
- resturl=sc['resturl'], restuser=sc['restuser'],
- restsecretnamespace=sc['restsecretnamespace'],
- restsecretname=self.secret_name,
- **({"hacount": sc['hacount']}
- if hacount else {})
- )
+ self.node, reclaim_policy=reclaim_policy, **kwargs)
self.addCleanup(oc_delete, self.node, 'sc', self.sc_name)
return self.sc_name
@@ -66,13 +81,29 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
pvc_names.append(pvc_name)
self.addCleanup(
wait_for_resource_absence, self.node, 'pvc', pvc_name)
- for pvc_name in pvc_names:
- self.addCleanup(oc_delete, self.node, 'pvc', pvc_name,
- raise_on_absence=False)
# Wait for PVCs to be in bound state
- for pvc_name in pvc_names:
- verify_pvc_status_is_bound(self.node, pvc_name)
+ try:
+ for pvc_name in pvc_names:
+ verify_pvc_status_is_bound(self.node, pvc_name)
+ finally:
+ reclaim_policy = oc_get_custom_resource(
+ self.node, 'sc', ':.reclaimPolicy', self.sc_name)[0]
+
+ for pvc_name in pvc_names:
+ if reclaim_policy == 'Retain':
+ pv_name = get_pv_name_from_pvc(self.node, pvc_name)
+ self.addCleanup(oc_delete, self.node, 'pv', pv_name,
+ raise_on_absence=False)
+ custom = (':.metadata.annotations."gluster\.kubernetes'
+ '\.io\/heketi\-volume\-id"')
+ vol_id = oc_get_custom_resource(
+ self.node, 'pv', custom, pv_name)[0]
+ self.addCleanup(heketi_blockvolume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url, vol_id)
+ self.addCleanup(oc_delete, self.node, 'pvc', pvc_name,
+ raise_on_absence=False)
return pvc_names
@@ -82,9 +113,10 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
pvc_size=pvc_size, pvc_name_prefix=pvc_name_prefix)[0]
return self.pvc_name
- def _create_dc_with_pvc(self, hacount=True):
+ def _create_dc_with_pvc(self, hacount=True, create_name_prefix=False):
# Create storage class and secret objects
- self._create_storage_class(hacount)
+ self._create_storage_class(
+ hacount, create_name_prefix=create_name_prefix)
# Create PVC
pvc_name = self._create_and_wait_for_pvc()
@@ -98,11 +130,13 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
return dc_name, pod_name, pvc_name
- def dynamic_provisioning_glusterblock(self, hacount=True):
+ def dynamic_provisioning_glusterblock(
+ self, hacount=True, create_name_prefix=False):
datafile_path = '/mnt/fake_file_for_%s' % self.id()
# Create DC with attached PVC
- dc_name, pod_name, pvc_name = self._create_dc_with_pvc(hacount)
+ dc_name, pod_name, pvc_name = self._create_dc_with_pvc(
+ hacount, create_name_prefix=create_name_prefix)
# Check that we can write data
for cmd in ("dd if=/dev/urandom of=%s bs=1K count=100",
@@ -312,3 +346,93 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
# create a new PVC
self._create_and_wait_for_pvc()
+
+ def test_recreate_app_pod_with_attached_block_pv(self):
+ """Test Case CNS-1392"""
+ datafile_path = '/mnt/temporary_test_file'
+
+ # Create DC with POD and attached PVC to it
+ dc_name, pod_name, pvc_name = self._create_dc_with_pvc()
+
+ # Write data
+ write_cmd = "oc exec %s -- dd if=/dev/urandom of=%s bs=4k count=10000"
+ self.cmd_run(write_cmd % (pod_name, datafile_path))
+
+ # Recreate app POD
+ scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
+ scale_dc_pod_amount_and_wait(self.node, dc_name, 1)
+ new_pod_name = get_pod_name_from_dc(self.node, dc_name)
+
+ # Check presence of already written file
+ check_existing_file_cmd = (
+ "oc exec %s -- ls %s" % (new_pod_name, datafile_path))
+ out = self.cmd_run(check_existing_file_cmd)
+ self.assertIn(datafile_path, out)
+
+ # Perform I/O on the new POD
+ self.cmd_run(write_cmd % (new_pod_name, datafile_path))
+
+ def test_volname_prefix_glusterblock(self):
+ # CNS-926 - custom_volname_prefix_blockvol
+
+ self.dynamic_provisioning_glusterblock(create_name_prefix=True)
+
+ pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
+ vol_name = oc_get_custom_resource(
+ self.node, 'pv',
+ ':.metadata.annotations.glusterBlockShare', pv_name)[0]
+
+ block_vol_list = heketi_blockvolume_list(
+ self.heketi_client_node, self.heketi_server_url)
+
+ self.assertIn(vol_name, block_vol_list)
+
+ self.assertTrue(vol_name.startswith(
+ self.sc.get('volumenameprefix', 'autotest-blk')))
+
+ def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
+ # CNS-1391 - Retain policy - gluster-block - delete pvc
+
+ self._create_storage_class(reclaim_policy='Retain')
+ self._create_and_wait_for_pvc()
+
+ dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
+
+ try:
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name)
+ finally:
+ scale_dc_pod_amount_and_wait(self.node, dc_name, pod_amount=0)
+ oc_delete(self.node, 'dc', dc_name)
+
+ # get the name of volume
+ pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
+
+ custom = [':.metadata.annotations."gluster\.org\/volume\-id"',
+ ':.spec.persistentVolumeReclaimPolicy']
+ vol_id, reclaim_policy = oc_get_custom_resource(
+ self.node, 'pv', custom, pv_name)
+
+ # checking the retainPolicy of pvc
+ self.assertEqual(reclaim_policy, 'Retain')
+
+ # delete the pvc
+ oc_delete(self.node, 'pvc', self.pvc_name)
+
+ # check if pv is also deleted or not
+ with self.assertRaises(ExecutionError):
+ wait_for_resource_absence(
+ self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
+
+ # getting the blockvol list
+ blocklist = heketi_blockvolume_list(self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertIn(vol_id, blocklist)
+
+ heketi_blockvolume_delete(self.heketi_client_node,
+ self.heketi_server_url, vol_id)
+ blocklist = heketi_blockvolume_list(self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertNotIn(vol_id, blocklist)
+ oc_delete(self.node, 'pv', pv_name)
+ wait_for_resource_absence(self.node, 'pv', pv_name)
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
index 54eaea07..9875e6dd 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
@@ -7,19 +7,27 @@ from cnslibs.common.heketi_ops import (
verify_volume_name_prefix)
from cnslibs.common.openshift_ops import (
get_gluster_pod_names_by_pvc_name,
+ get_pv_name_from_pvc,
get_pvc_status,
get_pod_name_from_dc,
+ get_pod_names_from_dc,
oc_create_secret,
oc_create_sc,
oc_create_pvc,
oc_create_app_dc_with_io,
oc_create_tiny_pod_with_volume,
oc_delete,
+ oc_get_custom_resource,
oc_rsh,
+ oc_version,
scale_dc_pod_amount_and_wait,
verify_pvc_status_is_bound,
wait_for_pod_be_ready,
wait_for_resource_absence)
+from cnslibs.common.heketi_ops import (
+ heketi_volume_delete,
+ heketi_volume_list
+ )
from cnslibs.common.waiter import Waiter
from glusto.core import Glusto as g
@@ -35,7 +43,8 @@ class TestDynamicProvisioningP0(CnsBaseClass):
self.node = self.ocp_master_node[0]
self.sc = self.cns_storage_class['storage_class1']
- def _create_storage_class(self, create_name_prefix=False):
+ def _create_storage_class(
+ self, create_name_prefix=False, reclaim_policy='Delete'):
sc = self.cns_storage_class['storage_class1']
secret = self.cns_secret['secret1']
@@ -49,6 +58,7 @@ class TestDynamicProvisioningP0(CnsBaseClass):
# Create storage class
self.sc_name = oc_create_sc(
self.node,
+ reclaim_policy=reclaim_policy,
resturl=sc['resturl'],
restuser=sc['restuser'], secretnamespace=sc['secretnamespace'],
secretname=self.secret_name,
@@ -69,13 +79,30 @@ class TestDynamicProvisioningP0(CnsBaseClass):
pvc_names.append(pvc_name)
self.addCleanup(
wait_for_resource_absence, self.node, 'pvc', pvc_name)
- for pvc_name in pvc_names:
- self.addCleanup(oc_delete, self.node, 'pvc', pvc_name,
- raise_on_absence=False)
# Wait for PVCs to be in bound state
- for pvc_name in pvc_names:
- verify_pvc_status_is_bound(self.node, pvc_name)
+ try:
+ for pvc_name in pvc_names:
+ verify_pvc_status_is_bound(self.node, pvc_name)
+ finally:
+ reclaim_policy = oc_get_custom_resource(
+ self.node, 'sc', ':.reclaimPolicy', self.sc_name)[0]
+
+ for pvc_name in pvc_names:
+ if reclaim_policy == 'Retain':
+ pv_name = get_pv_name_from_pvc(self.node, pvc_name)
+ self.addCleanup(oc_delete, self.node, 'pv', pv_name,
+ raise_on_absence=False)
+ custom = (':.metadata.annotations."gluster\.kubernetes'
+ '\.io\/heketi\-volume\-id"')
+ vol_id = oc_get_custom_resource(
+ self.node, 'pv', custom, pv_name)[0]
+ self.addCleanup(heketi_volume_delete,
+ self.heketi_client_node,
+ self.heketi_server_url, vol_id,
+ raise_on_error=False)
+ self.addCleanup(oc_delete, self.node, 'pvc', pvc_name,
+ raise_on_absence=False)
return pvc_names
@@ -360,3 +387,110 @@ class TestDynamicProvisioningP0(CnsBaseClass):
# create a new PVC
self._create_and_wait_for_pvc()
+
+ def test_validate_pvc_in_multiple_app_pods(self):
+ """Test case CNS-574"""
+ replicas = 5
+
+ # Create secret and storage class
+ self._create_storage_class()
+
+ # Create PVC
+ pvc_name = self._create_and_wait_for_pvc()
+
+ # Create DC with application PODs
+ dc_name = oc_create_app_dc_with_io(
+ self.node, pvc_name, replicas=replicas)
+ self.addCleanup(oc_delete, self.node, 'dc', dc_name)
+ self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
+
+ # Wait for all the PODs to be ready
+ pod_names = get_pod_names_from_dc(self.node, dc_name)
+ self.assertEqual(replicas, len(pod_names))
+ for pod_name in pod_names:
+ wait_for_pod_be_ready(self.node, pod_name)
+
+ # Create files in each of the PODs
+ for pod_name in pod_names:
+ self.cmd_run("oc exec {0} -- touch /mnt/temp_{0}".format(pod_name))
+
+ # Check that all the created files are available at once
+ ls_out = self.cmd_run("oc exec %s -- ls /mnt" % pod_names[0]).split()
+ for pod_name in pod_names:
+ self.assertIn("temp_%s" % pod_name, ls_out)
+
+ def test_pvc_deletion_while_pod_is_running(self):
+ # CNS-584 Verify PVC deletion while pod is running
+
+ if "v3.11" in oc_version(self.node):
+ self.skipTest("Blocked by BZ-1644696")
+
+ self._create_storage_class()
+ self._create_and_wait_for_pvc()
+
+ # Create DC with POD and attached PVC to it.
+ dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
+ self.addCleanup(oc_delete, self.node, 'dc', dc_name)
+ self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
+
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name, timeout=300, wait_step=10)
+
+ # delete PVC
+ oc_delete(self.node, 'pvc', self.pvc_name)
+
+ with self.assertRaises(ExecutionError):
+ wait_for_resource_absence(
+ self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
+
+ # Make sure we are able to work with files on the mounted volume
+ # after deleting pvc.
+ filepath = "/mnt/file_for_testing_volume.log"
+ cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
+ ret, out, err = oc_rsh(self.node, pod_name, cmd)
+ self.assertEqual(
+ ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
+
+ def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
+ # CNS-1390 - Retain policy - glusterfs - delete pvc
+
+ self._create_storage_class(reclaim_policy='Retain')
+ self._create_and_wait_for_pvc()
+
+ # get the name of the volume
+ pv_name = get_pv_name_from_pvc(self.node, self.pvc_name)
+ custom = [':.metadata.annotations.'
+ '"gluster\.kubernetes\.io\/heketi\-volume\-id"',
+ ':.spec.persistentVolumeReclaimPolicy']
+
+ vol_id, reclaim_policy = oc_get_custom_resource(
+ self.node, 'pv', custom, pv_name)
+
+ self.assertEqual(reclaim_policy, 'Retain')
+
+ # Create DC with POD and attached PVC to it.
+ try:
+ dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name)
+ finally:
+ scale_dc_pod_amount_and_wait(self.node, dc_name, 0)
+ oc_delete(self.node, 'dc', dc_name)
+ wait_for_resource_absence(self.node, 'pod', pod_name)
+
+ oc_delete(self.node, 'pvc', self.pvc_name)
+
+ with self.assertRaises(ExecutionError):
+ wait_for_resource_absence(
+ self.node, 'pvc', self.pvc_name, interval=3, timeout=30)
+
+ heketi_volume_delete(self.heketi_client_node,
+ self.heketi_server_url, vol_id)
+
+ vol_list = heketi_volume_list(self.heketi_client_node,
+ self.heketi_server_url)
+
+ self.assertNotIn(vol_id, vol_list)
+
+ oc_delete(self.node, 'pv', pv_name)
+ wait_for_resource_absence(self.node, 'pv', pv_name)