diff options
Diffstat (limited to 'tests')
9 files changed, 277 insertions, 304 deletions
diff --git a/tests/cns_tests_sample_config.yml b/tests/cns_tests_sample_config.yml index 00f304db..4e1c7919 100644 --- a/tests/cns_tests_sample_config.yml +++ b/tests/cns_tests_sample_config.yml @@ -1,17 +1,11 @@ -log_file: /var/log/tests/cns_tests.log -log_level: DEBUG - # 'ocp_servers' is info about ocp master, client and worker nodes. -# 'region' can be <primary|infra>. # This section has to be defined. ocp_servers: master: master_node1: hostname: master_node1 - region: master_node2: hostname: master_node2 - region: client: client_node1: hostname: client_node1 @@ -20,10 +14,8 @@ ocp_servers: nodes: ocp_node1: hostname: ocp_node1 - region: ocp_node2: hostname: ocp_node2 - region: # 'gluster_servers' section covers the details of the nodes where gluster # servers are run. In the case of CNS, these are the nodes where gluster @@ -44,16 +36,6 @@ gluster_servers: devices: [device1, device2] additional_devices: [device3, device4] -# 'additional_gluster_servers' section covers the details of the -# additional gluster nodes to add to the gluster cluster. -additional_gluster_servers: - gluster_server3: - manage: gluster_server3 - storage: gluster_server3 - zone : 3 - devices: [device1, device2] - additional_devices: [device3, device4] - cns: setup: routing_config: "cloudapps.mystorage.com" @@ -81,54 +63,25 @@ cns: heketi_ssh_key: "/etc/heketi/heketi_key" heketi_config_file: "/etc/heketi/heketi.json" heketi_volume: - size: - name: - expand_size: + size: 1 + name: "autotests-heketi-vol-name" + expand_size: 2 dynamic_provisioning: - pods_info: - nginx: - size: 5 - number_of_pods: 3 - mongo: - size: 6 - number_of_pods: 7 storage_classes: - storage_class1: - name: storage_class1 - provisioner: + file_storage_class: + provisioner: "kubernetes.io/glusterfs" resturl: restuser: - secretnamespace: - secretname: + secretnamespace: "<fake-namespace-name>" volumenameprefix: "cns-vol" - storage_class2: - name: storage_class2 - provisioner: + block_storage_class: + provisioner: "gluster.org/glusterblock" resturl: restuser: - restsecretnamespace: - restsecretname: + restsecretnamespace: "<fake-namespace-name>" hacount: "3" chapauthenabled: "true" volumenameprefix: "cns-vol" - secrets: - secret1: - secret_name: secret1 - namespace: - data_key: - type: - secret2: - secret_name: secret2 - namespace: - data_key: - type: - start_count_for_pvc: 1 - pvc_size_number: - 10: 2 - 20: 1 - app_pvc_count_dict: - nginx: 2 - scale: - type: jenkins instances: 1 diff --git a/tests/functional/common/arbiter/test_arbiter.py b/tests/functional/common/arbiter/test_arbiter.py index 2567483c..1cd7d134 100644 --- a/tests/functional/common/arbiter/test_arbiter.py +++ b/tests/functional/common/arbiter/test_arbiter.py @@ -26,19 +26,24 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): if self.deployment_type != "cns": raise self.skipTest("This test can run only on CNS deployment.") self.node = self.ocp_master_node[0] + self.sc = self.cns_storage_class.get( + 'storage_class1', self.cns_storage_class.get('file_storage_class')) # Mark one of the Heketi nodes as arbiter-supported if none of # existent nodes or devices already enabled to support it. - heketi_server_url = self.cns_storage_class['storage_class1']['resturl'] + self.heketi_server_url = self.cns_storage_class.get( + 'storage_class1', + self.cns_storage_class.get('file_storage_class'))['resturl'] arbiter_tags = ('required', 'supported') arbiter_already_supported = False self.node_id_list = heketi_ops.heketi_node_list( - self.heketi_client_node, heketi_server_url) + self.heketi_client_node, self.heketi_server_url) for node_id in self.node_id_list[::-1]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) if node_info.get('tags', {}).get('arbiter') in arbiter_tags: arbiter_already_supported = True break @@ -51,7 +56,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): break if not arbiter_already_supported: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, 'node', self.node_id_list[0], 'supported') def _set_arbiter_tag_with_further_revert(self, node, server_url, @@ -75,13 +80,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): node, server_url, source, source_id) def _create_storage_class(self, avg_file_size=None): - sc = self.cns_storage_class['storage_class1'] - secret = self.cns_secret['secret1'] - # Create secret file for usage in storage class self.secret_name = oc_create_secret( - self.node, namespace=secret['namespace'], - data_key=self.heketi_cli_key, secret_type=secret['type']) + self.node, namespace=self.sc.get('secretnamespace', 'default'), + data_key=self.heketi_cli_key, + secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs')) self.addCleanup( oc_delete, self.node, 'secret', self.secret_name) @@ -91,8 +94,9 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): # Create storage class self.sc_name = oc_create_sc( - self.node, resturl=sc['resturl'], - restuser=sc['restuser'], secretnamespace=sc['secretnamespace'], + self.node, resturl=self.sc['resturl'], + restuser=self.sc['restuser'], + secretnamespace=self.sc['secretnamespace'], secretname=self.secret_name, volumeoptions=vol_options, ) @@ -213,11 +217,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): """Test case CNS-942""" # Set arbiter:disabled tag to the data devices and get their info - heketi_server_url = self.cns_storage_class['storage_class1']['resturl'] data_nodes = [] for node_id in self.node_id_list[0:2]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) if len(node_info['devices']) < 2: self.skipTest( @@ -228,11 +232,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): "Devices are expected to have more than 3Gb of free space") for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled', device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled', node_info.get('tags', {}).get('arbiter')) @@ -241,14 +245,15 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): # Set arbiter:required tag to all other nodes and their devices for node_id in self.node_id_list[2:]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'required', node_info.get('tags', {}).get('arbiter')) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'required', device.get('tags', {}).get('arbiter')) @@ -300,14 +305,14 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): # to reduce its size, then enable smaller device back. try: out = heketi_ops.heketi_device_disable( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) self._create_and_wait_for_pvc( int(helper_vol_size_kb / 1024.0**2) + 1) finally: out = heketi_ops.heketi_device_enable( - self.heketi_client_node, heketi_server_url, + self.heketi_client_node, self.heketi_server_url, smaller_device_id) self.assertTrue(out) @@ -432,22 +437,21 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): pvc_amount = 3 # Get Heketi nodes info - heketi_server_url = self.cns_storage_class['storage_class1']['resturl'] node_id_list = heketi_ops.heketi_node_list( - self.heketi_client_node, heketi_server_url) + self.heketi_client_node, self.heketi_server_url) # Set arbiter:required tags arbiter_node = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id_list[0], + self.heketi_client_node, self.heketi_server_url, node_id_list[0], json=True) arbiter_nodes_ip_addresses = arbiter_node['hostnames']['storage'] self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'node', + self.heketi_client_node, self.heketi_server_url, 'node', node_id_list[0], ('required' if node_with_tag else None), revert_to=arbiter_node.get('tags', {}).get('arbiter')) for device in arbiter_node['devices']: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'device', + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], (None if node_with_tag else 'required'), revert_to=device.get('tags', {}).get('arbiter')) @@ -455,7 +459,8 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): data_nodes, data_nodes_ip_addresses = [], [] for node_id in node_id_list[1:]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) if not any([int(d['storage']['free']) > (pvc_amount * 1024**2) for d in node_info['devices']]): self.skipTest( @@ -464,11 +469,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): data_nodes_ip_addresses.extend(node_info['hostnames']['storage']) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'device', + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], (None if node_with_tag else 'disabled'), revert_to=device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'node', + self.heketi_client_node, self.heketi_server_url, 'node', node_id, ('disabled' if node_with_tag else None), revert_to=node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) @@ -504,11 +509,11 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): # Set arbiter:disabled tags to the first 2 nodes data_nodes = [] biggest_disks = [] - heketi_server_url = self.cns_storage_class['storage_class1']['resturl'] self.assertGreater(len(self.node_id_list), 2) for node_id in self.node_id_list[0:2]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) biggest_disk_free_space = 0 for device in node_info['devices']: disk_free_space = int(device['storage']['free']) @@ -519,12 +524,12 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): if disk_free_space > biggest_disk_free_space: biggest_disk_free_space = disk_free_space self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'device', + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'disabled', revert_to=device.get('tags', {}).get('arbiter')) biggest_disks.append(biggest_disk_free_space) self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'node', + self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'disabled', revert_to=node_info.get('tags', {}).get('arbiter')) data_nodes.append(node_info) @@ -533,14 +538,15 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.CnsBaseClass): arbiter_nodes = [] for node_id in self.node_id_list[2:]: node_info = heketi_ops.heketi_node_info( - self.heketi_client_node, heketi_server_url, node_id, json=True) + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) for device in node_info['devices']: self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'device', + self.heketi_client_node, self.heketi_server_url, 'device', device['id'], 'required', revert_to=device.get('tags', {}).get('arbiter')) self._set_arbiter_tag_with_further_revert( - self.heketi_client_node, heketi_server_url, 'node', + self.heketi_client_node, self.heketi_server_url, 'node', node_id, 'required', revert_to=node_info.get('tags', {}).get('arbiter')) arbiter_nodes.append(node_info) diff --git a/tests/functional/common/gluster_stability/test_gluster_services_restart.py b/tests/functional/common/gluster_stability/test_gluster_services_restart.py index 2cc09099..0a5d4e5e 100644 --- a/tests/functional/common/gluster_stability/test_gluster_services_restart.py +++ b/tests/functional/common/gluster_stability/test_gluster_services_restart.py @@ -51,17 +51,14 @@ class GlusterStabilityTestSetup(CnsBaseClass): # which uses time and date of test case self.prefix = "autotest-%s" % (self.glustotest_run_id.replace("_", "")) - _cns_storage_class = self.cns_storage_class['storage_class2'] + _cns_storage_class = self.cns_storage_class.get( + 'storage_class2', + self.cns_storage_class.get('block_storage_class')) self.provisioner = _cns_storage_class["provisioner"] - self.restsecretname = _cns_storage_class["restsecretname"] self.restsecretnamespace = _cns_storage_class["restsecretnamespace"] self.restuser = _cns_storage_class["restuser"] self.resturl = _cns_storage_class["resturl"] - _cns_secret = self.cns_secret['secret2'] - self.secretnamespace = _cns_secret['namespace'] - self.secrettype = _cns_secret['type'] - # using pvc size count as 1 by default self.pvcsize = 1 @@ -112,8 +109,8 @@ class GlusterStabilityTestSetup(CnsBaseClass): secretname (str): created secret file name """ secretname = oc_create_secret( - self.oc_node, namespace=self.secretnamespace, - data_key=self.heketi_cli_key, secret_type=self.secrettype) + self.oc_node, namespace=self.restsecretnamespace, + data_key=self.heketi_cli_key, secret_type=self.provisioner) self.addCleanup(oc_delete, self.oc_node, 'secret', secretname) sc_name = oc_create_sc( diff --git a/tests/functional/common/heketi/heketi_tests/test_check_entry.py b/tests/functional/common/heketi/heketi_tests/test_check_entry.py deleted file mode 100644 index 47a0b3f2..00000000 --- a/tests/functional/common/heketi/heketi_tests/test_check_entry.py +++ /dev/null @@ -1,132 +0,0 @@ -from glusto.core import Glusto as g -from glustolibs.gluster.exceptions import ConfigError - -from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass -from cnslibs.common.heketi_ops import (heketi_volume_create, - heketi_volume_list, - heketi_volume_delete) -from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names -from cnslibs.common import podcmd - - -class TestHeketiVolume(HeketiClientSetupBaseClass): - """ - Check /etc/fstab entry - """ - @classmethod - def setUpClass(cls): - super(TestHeketiVolume, cls).setUpClass() - cls.volume_size = cls.heketi_volume['size'] - - @podcmd.GlustoPod() - def test_to_check_entry_in_fstab_file(self): - """ - Create a heketi volume and check entry - in /etc/fstab and delete heketi volume - and check corresponding brick entry must - be removed - """ - - # Create heketi volume - g.log.info("Creating a heketi volume") - out = heketi_volume_create(self.heketi_client_node, - self.heketi_server_url, - self.volume_size, json=True) - self.assertTrue(out, ("Failed to create heketi volume " - "of size %s" % str(self.volume_size))) - g.log.info("Heketi volume successfully created" % out) - self.volume_id = out["bricks"][0]["volume"] - path = [] - for i in out["bricks"]: - path.append(i["path"].rstrip("/brick")) - - # Listing heketi volumes - g.log.info("List heketi volumes") - out = heketi_volume_list(self.heketi_client_node, - self.heketi_server_url) - self.assertTrue(out, ("Failed to list heketi volumes")) - g.log.info("Heketi volume successfully listed") - - gluster_pod = get_ocp_gluster_pod_names( - self.heketi_client_node)[1] - - cmd = "oc rsync " + gluster_pod + ":/var/lib/heketi/fstab /tmp" - out = g.run(self.heketi_client_node, cmd) - self.assertTrue(out, ("Failed to copy the file")) - g.log.info("Copied the file") - out = g.run_local( - "scp -r root@%s:/tmp/fstab " - "/tmp/file.txt" % self.heketi_client_node) - self.assertTrue(out, ("Failed to copy a file to /tmp/file.txt")) - g.log.info("Successfully copied to /tmp/file.txt") - out = g.run_local("ls /tmp") - self.assertTrue(out, ("Failed to list")) - g.log.info("Successfully listed") - - # open /tmp/fstab file - datafile = open("/tmp/file.txt") - # Check if the brick is mounted - for i in path: - string_to_search = i - rcode, rout, rerr = g.run_local( - 'grep %s %s' % (string_to_search, "/tmp/file.txt")) - if rcode == 0: - g.log.info("Brick %s is mounted" % i) - datafile.close() - - out = g.run(self.heketi_client_node, "rm -rf /tmp/fstab") - self.assertTrue(out, ("Failed to delete a file /tmp/fstab")) - g.log.info("Successfully removed /tmp/fstab") - out = g.run_local("rm -rf /tmp/file.txt") - self.assertTrue(out, ("Failed to delete a file /tmp/file.txt")) - g.log.info("Successfully removed /tmp/file.txt") - - # Delete heketi volume - g.log.info("Deleting heketi volumes") - out = heketi_volume_delete(self.heketi_client_node, - self.heketi_server_url, - self.volume_id) - self.assertTrue(out, ("Failed to delete " - "heketi volume %s" % self.volume_id)) - g.log.info("Heketi volume successfully deleted %s" % self.volume_id) - - # Listing heketi volumes - g.log.info("List heketi volumes") - out = heketi_volume_list(self.heketi_client_node, - self.heketi_server_url) - self.assertTrue(out, ("Failed to list or No volumes to list")) - g.log.info("Heketi volume successfully listed") - - # Check entry /etc/fstab - gluster_pod = get_ocp_gluster_pod_names( - self.heketi_client_node)[0] - - cmd = "oc rsync " + gluster_pod + ":/var/lib/heketi/fstab /" - out = g.run(self.heketi_client_node, cmd) - self.assertTrue(out, ("Failed to copy the file")) - g.log.info("Copied the file") - out = g.run_local( - "scp -r root@%s:/fstab /tmp/newfile.txt" % self.heketi_client_node) - self.assertTrue(out, ("Failed to copy to the file newfile.txt")) - g.log.info("Successfully copied to the file newfile.txt") - out = g.run_local("ls /tmp") - self.assertTrue(out, ("Failed to list")) - g.log.info("Successfully listed") - - # open /tmp/newfile.txt file - datafile = open("/tmp/newfile.txt") - # Check if the brick is mounted - for i in path: - string_to_search = i - rcode, rout, rerr = g.run_local( - 'grep %s %s' % (string_to_search, "/tmp/newfile.txt")) - if rcode == 0: - raise ConfigError("Particular %s brick entry is found" % i) - datafile.close() - - out = g.run(self.heketi_client_node, "rm -rf /fstab") - self.assertTrue(out, ("Failed to delete a file /fstab")) - g.log.info("Successfully removed /fstab") - out = g.run_local("rm -rf /tmp/newfile.txt") - self.assertTrue(out, ("Failed to delete a file /tmp/newfile.txt")) - g.log.info("Successfully removed /tmp/file.txt") diff --git a/tests/functional/common/heketi/test_check_entries.py b/tests/functional/common/heketi/test_check_entries.py new file mode 100644 index 00000000..be7add9e --- /dev/null +++ b/tests/functional/common/heketi/test_check_entries.py @@ -0,0 +1,54 @@ +from glusto.core import Glusto as g + +from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass +from cnslibs.common.heketi_ops import (heketi_volume_create, + heketi_volume_delete) +from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names + + +class TestHeketiVolume(HeketiClientSetupBaseClass): + """Check volume bricks presence in fstab files on Gluster PODs.""" + + def _find_bricks_in_fstab_files(self, brick_paths, present): + """Make sure that vol brick paths either exist or not in fstab file.""" + oc_node = self.ocp_master_nodes[0] + gluster_pods = get_ocp_gluster_pod_names(oc_node) + get_fstab_entries_cmd = "oc exec %s -- cat /var/lib/heketi/fstab" + fstab_files_data = '' + for gluster_pod in gluster_pods: + ret, out, err = g.run(oc_node, get_fstab_entries_cmd % gluster_pod) + self.assertEqual( + ret, 0, + "Failed to read fstab file on '%s' gluster POD. " + "\nOut: %s \nError: %s" % (gluster_pod, out, err)) + fstab_files_data += '%s\n' % out + assertion_method = self.assertIn if present else self.assertNotIn + for brick_path in brick_paths: + assertion_method(brick_path, fstab_files_data) + + def test_to_check_entry_in_fstab_file(self): + """Test case CNS-778""" + + # Create heketi volume + vol = heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, size=1, json=True) + self.assertTrue(vol, "Failed to create 1Gb heketi volume") + vol_id = vol["bricks"][0]["volume"] + self.addCleanup( + heketi_volume_delete, + self.heketi_client_node, self.heketi_server_url, vol_id, + raise_on_error=False) + + # Gather brick paths + brick_paths = [p['path'].rstrip("/brick") for p in vol["bricks"]] + + # Make sure that volume's brick paths exist in the fstab files + self._find_bricks_in_fstab_files(brick_paths, present=True) + + # Delete heketi volume + out = heketi_volume_delete( + self.heketi_client_node, self.heketi_server_url, vol_id) + self.assertTrue(out, "Failed to delete heketi volume %s" % vol_id) + + # Make sure that volume's brick paths are absent in the fstab file + self._find_bricks_in_fstab_files(brick_paths, present=False) diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py index ecd47176..81fec14e 100644 --- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py @@ -37,16 +37,18 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): def setUp(self): super(TestDynamicProvisioningBlockP0, self).setUp() self.node = self.ocp_master_node[0] - self.sc = self.cns_storage_class['storage_class2'] + self.sc = self.cns_storage_class.get( + 'storage_class2', + self.cns_storage_class.get('block_storage_class')) def _create_storage_class(self, hacount=True, create_name_prefix=False, reclaim_policy="Delete"): - secret = self.cns_secret['secret2'] - # Create secret file self.secret_name = oc_create_secret( - self.node, namespace=secret['namespace'], - data_key=self.heketi_cli_key, secret_type=secret['type']) + self.node, + namespace=self.sc.get('restsecretnamespace', 'default'), + data_key=self.heketi_cli_key, + secret_type=self.sc.get('provisioner', 'gluster.org/glusterblock')) self.addCleanup(oc_delete, self.node, 'secret', self.secret_name) # create storage class diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py index 2f2a0aa3..6d789aa3 100644 --- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py @@ -40,17 +40,18 @@ class TestDynamicProvisioningP0(CnsBaseClass): def setUp(self): super(TestDynamicProvisioningP0, self).setUp() self.node = self.ocp_master_node[0] - self.sc = self.cns_storage_class['storage_class1'] + self.sc = self.cns_storage_class.get( + 'storage_class1', self.cns_storage_class.get('file_storage_class')) def _create_storage_class( self, create_name_prefix=False, reclaim_policy='Delete'): - sc = self.cns_storage_class['storage_class1'] - secret = self.cns_secret['secret1'] # Create secret file for usage in storage class self.secret_name = oc_create_secret( - self.node, namespace=secret['namespace'], - data_key=self.heketi_cli_key, secret_type=secret['type']) + self.node, + namespace=self.sc.get('secretnamespace', 'default'), + data_key=self.heketi_cli_key, + secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs')) self.addCleanup( oc_delete, self.node, 'secret', self.secret_name) @@ -58,10 +59,11 @@ class TestDynamicProvisioningP0(CnsBaseClass): self.sc_name = oc_create_sc( self.node, reclaim_policy=reclaim_policy, - resturl=sc['resturl'], - restuser=sc['restuser'], secretnamespace=sc['secretnamespace'], + resturl=self.sc['resturl'], + restuser=self.sc['restuser'], + secretnamespace=self.sc['secretnamespace'], secretname=self.secret_name, - **({"volumenameprefix": sc['volumenameprefix']} + **({"volumenameprefix": self.sc['volumenameprefix']} if create_name_prefix else {}) ) self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) @@ -295,58 +297,56 @@ class TestDynamicProvisioningP0(CnsBaseClass): self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node)) def test_storage_class_mandatory_params_glusterfile(self): - # CNS-442 storage-class mandatory parameters - sc = self.cns_storage_class['storage_class1'] - secret = self.cns_secret['secret1'] - node = self.ocp_master_node[0] + """Test case CNS-442 - storage-class mandatory parameters""" + # create secret self.secret_name = oc_create_secret( - node, - namespace=secret['namespace'], + self.node, + namespace=self.sc.get('secretnamespace', 'default'), data_key=self.heketi_cli_key, - secret_type=secret['type']) + secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs')) self.addCleanup( - oc_delete, node, 'secret', self.secret_name) + oc_delete, self.node, 'secret', self.secret_name) # create storage class with mandatory parameters only self.sc_name = oc_create_sc( - node, provisioner='kubernetes.io/glusterfs', - resturl=sc['resturl'], restuser=sc['restuser'], - secretnamespace=sc['secretnamespace'], + self.node, provisioner='kubernetes.io/glusterfs', + resturl=self.sc['resturl'], restuser=self.sc['restuser'], + secretnamespace=self.sc['secretnamespace'], secretname=self.secret_name ) - self.addCleanup(oc_delete, node, 'sc', self.sc_name) + self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) # Create PVC - pvc_name = oc_create_pvc(node, self.sc_name) - self.addCleanup(wait_for_resource_absence, node, 'pvc', pvc_name) - self.addCleanup(oc_delete, node, 'pvc', pvc_name) - verify_pvc_status_is_bound(node, pvc_name) + pvc_name = oc_create_pvc(self.node, self.sc_name) + self.addCleanup(wait_for_resource_absence, self.node, 'pvc', pvc_name) + self.addCleanup(oc_delete, self.node, 'pvc', pvc_name) + verify_pvc_status_is_bound(self.node, pvc_name) # Create DC with POD and attached PVC to it. - dc_name = oc_create_app_dc_with_io(node, pvc_name) - self.addCleanup(oc_delete, node, 'dc', dc_name) - self.addCleanup(scale_dc_pod_amount_and_wait, node, dc_name, 0) + dc_name = oc_create_app_dc_with_io(self.node, pvc_name) + self.addCleanup(oc_delete, self.node, 'dc', dc_name) + self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0) - pod_name = get_pod_name_from_dc(node, dc_name) - wait_for_pod_be_ready(node, pod_name) + pod_name = get_pod_name_from_dc(self.node, dc_name) + wait_for_pod_be_ready(self.node, pod_name) # Make sure we are able to work with files on the mounted volume filepath = "/mnt/file_for_testing_sc.log" cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath - ret, out, err = oc_rsh(node, pod_name, cmd) + ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( - ret, 0, "Failed to execute command %s on %s" % (cmd, node)) + ret, 0, "Failed to execute command %s on %s" % (cmd, self.node)) cmd = "ls -lrt %s" % filepath - ret, out, err = oc_rsh(node, pod_name, cmd) + ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( - ret, 0, "Failed to execute command %s on %s" % (cmd, node)) + ret, 0, "Failed to execute command %s on %s" % (cmd, self.node)) cmd = "rm -rf %s" % filepath - ret, out, err = oc_rsh(node, pod_name, cmd) + ret, out, err = oc_rsh(self.node, pod_name, cmd) self.assertEqual( - ret, 0, "Failed to execute command %s on %s" % (cmd, node)) + ret, 0, "Failed to execute command %s on %s" % (cmd, self.node)) def test_dynamic_provisioning_glusterfile_heketidown_pvc_delete(self): """ Delete PVC's when heketi is down CNS-438 """ @@ -490,3 +490,58 @@ class TestDynamicProvisioningP0(CnsBaseClass): oc_delete(self.node, 'pv', pv_name) wait_for_resource_absence(self.node, 'pv', pv_name) + + def test_usage_of_default_storage_class(self): + """Test case CNS-928""" + + # Unset 'default' option from all the existing Storage Classes + unset_sc_annotation_cmd = ( + r"""oc annotate sc %s """ + r""""storageclass%s.kubernetes.io/is-default-class"-""") + set_sc_annotation_cmd = ( + r"""oc patch storageclass %s -p'{"metadata": {"annotations": """ + r"""{"storageclass%s.kubernetes.io/is-default-class": "%s"}}}'""") + get_sc_cmd = ( + r'oc get sc --no-headers ' + r'-o=custom-columns=:.metadata.name,' + r':".metadata.annotations.storageclass\.' + r'kubernetes\.io\/is-default-class",:".metadata.annotations.' + r'storageclass\.beta\.kubernetes\.io\/is-default-class"') + sc_list = self.cmd_run(get_sc_cmd) + for sc in sc_list.split("\n"): + sc = sc.split() + if len(sc) != 3: + self.skipTest( + "Unexpected output for list of storage classes. " + "Following is expected to contain 3 keys:: %s" % sc) + for value, api_type in ((sc[1], ''), (sc[2], '.beta')): + if value == '<none>': + continue + self.cmd_run(unset_sc_annotation_cmd % (sc[0], api_type)) + self.addCleanup( + self.cmd_run, + set_sc_annotation_cmd % (sc[0], api_type, value)) + + # Create new SC + prefix = "autotests-default-sc" + self._create_storage_class(prefix) + + # Make new SC be the default one and sleep for 1 sec to avoid races + self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '', 'true')) + self.cmd_run(set_sc_annotation_cmd % (self.sc_name, '.beta', 'true')) + time.sleep(1) + + # Create PVC without specification of SC + pvc_name = oc_create_pvc( + self.node, sc_name=None, pvc_name_prefix=prefix) + self.addCleanup( + wait_for_resource_absence, self.node, 'pvc', pvc_name) + self.addCleanup(oc_delete, self.node, 'pvc', pvc_name) + + # Wait for successful creation of PVC and check its SC + verify_pvc_status_is_bound(self.node, pvc_name) + get_sc_of_pvc_cmd = ( + "oc get pvc %s --no-headers " + "-o=custom-columns=:.spec.storageClassName" % pvc_name) + out = self.cmd_run(get_sc_of_pvc_cmd) + self.assertEqual(out, self.sc_name) diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py index 2552bf56..5412b5fd 100644 --- a/tests/functional/common/provisioning/test_pv_resize.py +++ b/tests/functional/common/provisioning/test_pv_resize.py @@ -46,27 +46,26 @@ class TestPvResizeClass(CnsBaseClass): "version %s " % self.version) g.log.error(msg) raise self.skipTest(msg) + self.sc = self.cns_storage_class.get( + 'storage_class1', self.cns_storage_class.get('file_storage_class')) def _create_storage_class(self, volname_prefix=False): - sc = self.cns_storage_class['storage_class1'] - secret = self.cns_secret['secret1'] - # create secret self.secret_name = oc_create_secret( self.node, - namespace=secret['namespace'], + namespace=self.sc.get('secretnamespace', 'default'), data_key=self.heketi_cli_key, - secret_type=secret['type']) + secret_type=self.sc.get('provisioner', 'kubernetes.io/glusterfs')) self.addCleanup(oc_delete, self.node, 'secret', self.secret_name) # create storageclass self.sc_name = oc_create_sc( self.node, provisioner='kubernetes.io/glusterfs', - resturl=sc['resturl'], restuser=sc['restuser'], - secretnamespace=sc['secretnamespace'], + resturl=self.sc['resturl'], restuser=self.sc['restuser'], + secretnamespace=self.sc['secretnamespace'], secretname=self.secret_name, allow_volume_expansion=True, - **({"volumenameprefix": sc['volumenameprefix']} + **({"volumenameprefix": self.sc['volumenameprefix']} if volname_prefix else {}) ) self.addCleanup(oc_delete, self.node, 'sc', self.sc_name) @@ -96,10 +95,9 @@ class TestPvResizeClass(CnsBaseClass): pod_name = get_pod_name_from_dc(node, dc_name) wait_for_pod_be_ready(node, pod_name) if volname_prefix: - storage_class = self.cns_storage_class['storage_class1'] ret = heketi_ops.verify_volume_name_prefix( - node, storage_class['volumenameprefix'], - storage_class['secretnamespace'], + node, self.sc['volumenameprefix'], + self.sc['secretnamespace'], pvc_name, self.heketi_server_url) self.assertTrue(ret, "verify volnameprefix failed") cmd = ("dd if=/dev/urandom of=%sfile " @@ -127,11 +125,9 @@ class TestPvResizeClass(CnsBaseClass): self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( cmd, node)) - def test_pv_resize_no_free_space(self): - """Test case CNS-1040""" + def _pv_resize(self, exceed_free_space): dir_path = "/mnt" - pvc_size_gb = 1 - min_free_space_gb = 3 + pvc_size_gb, min_free_space_gb = 1, 3 # Get available free space disabling redundant devices and nodes heketi_url = self.heketi_server_url @@ -189,17 +185,51 @@ class TestPvResizeClass(CnsBaseClass): pod_name = get_pod_name_from_dc(self.node, dc_name) wait_for_pod_be_ready(self.node, pod_name) - # Try to expand existing PVC exceeding free space - resize_pvc(self.node, pvc_name, available_size_gb) - wait_for_events( - self.node, obj_name=pvc_name, event_reason='VolumeResizeFailed') + if exceed_free_space: + # Try to expand existing PVC exceeding free space + resize_pvc(self.node, pvc_name, available_size_gb) + wait_for_events(self.node, obj_name=pvc_name, + event_reason='VolumeResizeFailed') - # Check that app POD is up and runnig then try to write data - wait_for_pod_be_ready(self.node, pod_name) - cmd = "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path - ret, out, err = oc_rsh(self.node, pod_name, cmd) - self.assertEqual( - ret, 0, "Failed to write data after failed attempt to expand PVC.") + # Check that app POD is up and runnig then try to write data + wait_for_pod_be_ready(self.node, pod_name) + cmd = ( + "dd if=/dev/urandom of=%s/autotest bs=100K count=1" % dir_path) + ret, out, err = oc_rsh(self.node, pod_name, cmd) + self.assertEqual( + ret, 0, + "Failed to write data after failed attempt to expand PVC.") + else: + # Expand existing PVC using all the available free space + expand_size_gb = available_size_gb - pvc_size_gb + resize_pvc(self.node, pvc_name, expand_size_gb) + verify_pvc_size(self.node, pvc_name, expand_size_gb) + pv_name = get_pv_name_from_pvc(self.node, pvc_name) + verify_pv_size(self.node, pv_name, expand_size_gb) + wait_for_events( + self.node, obj_name=pvc_name, + event_reason='VolumeResizeSuccessful') + + # Recreate app POD + oc_delete(self.node, 'pod', pod_name) + wait_for_resource_absence(self.node, 'pod', pod_name) + pod_name = get_pod_name_from_dc(self.node, dc_name) + wait_for_pod_be_ready(self.node, pod_name) + + # Write data on the expanded PVC + cmd = ("dd if=/dev/urandom of=%s/autotest " + "bs=1M count=1025" % dir_path) + ret, out, err = oc_rsh(self.node, pod_name, cmd) + self.assertEqual( + ret, 0, "Failed to write data on the expanded PVC") + + def test_pv_resize_no_free_space(self): + """Test case CNS-1040""" + self._pv_resize(exceed_free_space=True) + + def test_pv_resize_by_exact_free_space(self): + """Test case CNS-1041""" + self._pv_resize(exceed_free_space=False) def test_pv_resize_try_shrink_pv_size(self): """testcase CNS-1039 """ diff --git a/tests/functional/common/provisioning/test_storage_class_cases.py b/tests/functional/common/provisioning/test_storage_class_cases.py index 7e318eb0..52ac761a 100644 --- a/tests/functional/common/provisioning/test_storage_class_cases.py +++ b/tests/functional/common/provisioning/test_storage_class_cases.py @@ -34,12 +34,16 @@ class TestStorageClassCases(cns_baseclass.CnsBaseClass): parameter (dict): dictionary with storage class parameters """ if vol_type == "glusterfile": - sc = self.cns_storage_class['storage_class1'] - secret = self.cns_secret['secret1'] + sc = self.cns_storage_class.get( + 'storage_class1', + self.cns_storage_class.get('file_storage_class')) + # Create secret file for usage in storage class self.secret_name = oc_create_secret( - self.ocp_master_node[0], namespace=secret['namespace'], - data_key=self.heketi_cli_key, secret_type=secret['type']) + self.ocp_master_node[0], + namespace=sc.get('secretnamespace', 'default'), + data_key=self.heketi_cli_key, + secret_type=sc.get('provisioner', 'kubernetes.io/glusterfs')) self.addCleanup( oc_delete, self.ocp_master_node[0], 'secret', self.secret_name) sc_parameter = { @@ -48,12 +52,16 @@ class TestStorageClassCases(cns_baseclass.CnsBaseClass): "volumetype": "replicate:3" } elif vol_type == "glusterblock": - sc = self.cns_storage_class['storage_class2'] - secret = self.cns_secret['secret2'] + sc = self.cns_storage_class.get( + 'storage_class2', + self.cns_storage_class.get('block_storage_class')) + # Create secret file for usage in storage class self.secret_name = oc_create_secret( - self.ocp_master_node[0], namespace=secret['namespace'], - data_key=self.heketi_cli_key, secret_type=secret['type']) + self.ocp_master_node[0], + namespace=sc.get('restsecretnamespace', 'default'), + data_key=self.heketi_cli_key, + secret_type=sc.get('provisioner', 'gluster.org/glusterblock')) self.addCleanup( oc_delete, self.ocp_master_node[0], 'secret', self.secret_name) sc_parameter = { |
