diff options
Diffstat (limited to 'tests')
7 files changed, 1829 insertions, 0 deletions
diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/tests/__init__.py diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/tests/functional/__init__.py diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py new file mode 100644 index 00000000..767680eb --- /dev/null +++ b/tests/functional/common/heketi/test_volume_expansion_and_devices.py @@ -0,0 +1,726 @@ +from __future__ import division +import json +import math +import unittest + +from glusto.core import Glusto as g +from glustolibs.gluster import volume_ops, rebalance_ops + +from cnslibs.common.exceptions import ExecutionError, ConfigError +from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass +from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names +from cnslibs.common import heketi_ops, podcmd + + +class TestVolumeExpansionAndDevicesTestCases(HeketiClientSetupBaseClass): + """ + Class for volume expansion and devices addition related test cases + """ + + @podcmd.GlustoPod() + def get_num_of_bricks(self, volume_name): + """ + Method to determine number of + bricks at present in the volume + """ + brick_info = [] + + if self.deployment_type == "cns": + + gluster_pod = get_ocp_gluster_pod_names( + self.heketi_client_node)[1] + + p = podcmd.Pod(self.heketi_client_node, gluster_pod) + + volume_info_before_expansion = volume_ops.get_volume_info( + p, volume_name) + + elif self.deployment_type == "crs": + volume_info_before_expansion = volume_ops.get_volume_info( + self.heketi_client_node, volume_name) + + self.assertIsNotNone( + volume_info_before_expansion, + "Volume info is None") + + for brick_details in (volume_info_before_expansion + [volume_name]["bricks"]["brick"]): + + brick_info.append(brick_details["name"]) + + num_of_bricks = len(brick_info) + + return num_of_bricks + + @podcmd.GlustoPod() + def get_rebalance_status(self, volume_name): + """ + Rebalance status after expansion + """ + if self.deployment_type == "cns": + gluster_pod = get_ocp_gluster_pod_names( + self.heketi_client_node)[1] + + p = podcmd.Pod(self.heketi_client_node, gluster_pod) + + wait_reb = rebalance_ops.wait_for_rebalance_to_complete( + p, volume_name) + self.assertTrue(wait_reb, "Rebalance not complete") + + reb_status = rebalance_ops.get_rebalance_status( + p, volume_name) + + elif self.deployment_type == "crs": + wait_reb = rebalance_ops.wait_for_rebalance_to_complete( + self.heketi_client_node, volume_name) + self.assertTrue(wait_reb, "Rebalance not complete") + + reb_status = rebalance_ops.get_rebalance_status( + self.heketi_client_node, volume_name) + + self.assertEqual(reb_status["aggregate"]["statusStr"], + "completed", "Rebalance not yet completed") + + @podcmd.GlustoPod() + def get_brick_and_volume_status(self, volume_name): + """ + Status of each brick in a volume + for background validation + """ + brick_info = [] + + if self.deployment_type == "cns": + gluster_pod = get_ocp_gluster_pod_names( + self.heketi_client_node)[1] + + p = podcmd.Pod(self.heketi_client_node, gluster_pod) + + volume_info = volume_ops.get_volume_info(p, volume_name) + volume_status = volume_ops.get_volume_status(p, volume_name) + + elif self.deployment_type == "crs": + volume_info = volume_ops.get_volume_info( + self.heketi_client_node, volume_name) + volume_status = volume_ops.get_volume_status( + self.heketi_client_node, volume_name) + + self.assertIsNotNone(volume_info, "Volume info is empty") + self.assertIsNotNone(volume_status, "Volume status is empty") + + self.assertEqual(int(volume_info[volume_name]["status"]), 1, + "Volume not up") + for brick_details in volume_info[volume_name]["bricks"]["brick"]: + brick_info.append(brick_details["name"]) + + if brick_info == []: + raise ExecutionError("Brick details empty for %s" % volume_name) + + for brick in brick_info: + brick_data = brick.strip().split(":") + brick_ip = brick_data[0] + brick_name = brick_data[1] + self.assertEqual(int(volume_status[volume_name][brick_ip] + [brick_name]["status"]), 1, + "Brick %s not up" % brick_name) + + def enable_disable_devices(self, additional_devices_attached, enable=True): + """ + Method to enable and disable devices + """ + op = 'enable' if enable else 'disable' + for node_id in additional_devices_attached.keys(): + node_info = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + + if not enable: + self.assertNotEqual(node_info, False, + "Node info for node %s failed" % node_id) + + for device in node_info["devices"]: + if device["name"] == additional_devices_attached[node_id]: + out = getattr(heketi_ops, 'heketi_device_%s' % op)( + self.heketi_client_node, + self.heketi_server_url, + device["id"], + json=True) + if out is False: + g.log.info("Device %s could not be %sd" + % (device["id"], op)) + else: + g.log.info("Device %s %sd" % (device["id"], op)) + + def enable_devices(self, additional_devices_attached): + """ + Method to call enable_disable_devices to enable devices + """ + return self.enable_disable_devices(additional_devices_attached, True) + + def disable_devices(self, additional_devices_attached): + """ + Method to call enable_disable_devices to disable devices + """ + return self.enable_disable_devices(additional_devices_attached, False) + + def get_devices_summary_free_space(self): + """ + Calculates minimum free space per device and + returns total free space across all devices + """ + + heketi_node_id_list = [] + free_spaces = [] + + heketi_node_list_string = heketi_ops.heketi_node_list( + self.heketi_client_node, + self.heketi_server_url, mode="cli", json=True) + + self.assertNotEqual( + heketi_node_list_string, False, + "Heketi node list empty") + + for line in heketi_node_list_string.strip().split("\n"): + heketi_node_id_list.append(line.strip().split( + "Cluster")[0].strip().split(":")[1]) + + for node_id in heketi_node_id_list: + node_info_dict = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + total_free_space = 0 + for device in node_info_dict["devices"]: + total_free_space += device["storage"]["free"] + free_spaces.append(total_free_space) + + total_free_space = sum(free_spaces)/(1024 ** 2) + total_free_space = int(math.floor(total_free_space)) + + return total_free_space + + def detach_devices_attached(self, device_id_list): + """ + All the devices attached are gracefully + detached in this function + """ + for device_id in device_id_list: + device_disable = heketi_ops.heketi_device_disable( + self.heketi_client_node, self.heketi_server_url, device_id) + self.assertNotEqual( + device_disable, False, + "Device %s could not be disabled" % device_id) + device_remove = heketi_ops.heketi_device_remove( + self.heketi_client_node, self.heketi_server_url, device_id) + self.assertNotEqual( + device_remove, False, + "Device %s could not be removed" % device_id) + device_delete = heketi_ops.heketi_device_delete( + self.heketi_client_node, self.heketi_server_url, device_id) + self.assertNotEqual( + device_delete, False, + "Device %s could not be deleted" % device_id) + + @podcmd.GlustoPod() + def test_add_device_heketi_cli(self): + """ + Method to test heketi device addition with background + gluster validation + """ + node_id_list = [] + device_id_list = [] + hosts = [] + gluster_servers = [] + + node_list_info = heketi_ops.heketi_node_list( + self.heketi_client_node, self.heketi_server_url) + + self.assertNotEqual(node_list_info, False, + "heketi node list command failed") + + lines = node_list_info.strip().split("\n") + + for line in lines: + node_id_list.append(line.strip().split("Cluster") + [0].strip().split(":")[1]) + + creation_info = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, 100, json=True) + + self.assertNotEqual(creation_info, False, + "Volume creation failed") + + self.addCleanup(self.delete_volumes, creation_info["id"]) + + ret, out, err = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, 620, json=True, + raw_cli_output=True) + + self.assertEqual("Error: No space", err.strip()) + + if ret == 0: + out_json = json.loads(out) + self.addCleanup(self.delete_volumes, out_json["id"]) + + for node_id in node_id_list: + device_present = False + node_info = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + + self.assertNotEqual( + node_info, False, + "Heketi node info on node %s failed" % node_id) + + node_ip = node_info["hostnames"]["storage"][0] + + for gluster_server in g.config["gluster_servers"].keys(): + gluster_server_ip = (g.config["gluster_servers"] + [gluster_server]["storage"]) + if gluster_server_ip == node_ip: + device_name = (g.config["gluster_servers"][gluster_server] + ["additional_devices"][0]) + break + device_addition_info = heketi_ops.heketi_device_add( + self.heketi_client_node, self.heketi_server_url, + device_name, node_id, json=True) + + self.assertNotEqual(device_addition_info, False, + "Device %s addition failed" % device_name) + + node_info_after_addition = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + for device in node_info_after_addition["devices"]: + if device["name"] == device_name: + device_present = True + device_id_list.append(device["id"]) + + self.assertEqual(device_present, True, + "device %s not present" % device["id"]) + + self.addCleanup(self.detach_devices_attached, device_id_list) + + output_dict = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, + 620, json=True) + + self.assertNotEqual(output_dict, False, "Volume creation failed") + self.addCleanup(self.delete_volumes, output_dict["id"]) + + self.assertEqual(output_dict["durability"]["replicate"]["replica"], 3) + self.assertEqual(output_dict["size"], 620) + mount_node = (output_dict["mount"]["glusterfs"] + ["device"].strip().split(":")[0]) + + hosts.append(mount_node) + backup_volfile_server_list = ( + output_dict["mount"]["glusterfs"]["options"] + ["backup-volfile-servers"].strip().split(",")) + + for backup_volfile_server in backup_volfile_server_list: + hosts.append(backup_volfile_server) + for gluster_server in g.config["gluster_servers"].keys(): + gluster_servers.append(g.config["gluster_servers"] + [gluster_server]["storage"]) + self.assertEqual( + set(hosts), set(gluster_servers), + "Hosts do not match gluster servers for %s" % output_dict["id"]) + + volume_name = output_dict["name"] + + self.get_brick_and_volume_status(volume_name) + + def test_volume_expansion_expanded_volume(self): + """ + To test volume expansion with brick and rebalance + validation + """ + creation_info = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, 10, json=True) + + self.assertNotEqual(creation_info, False, "Volume creation failed") + + volume_name = creation_info["name"] + volume_id = creation_info["id"] + + free_space_after_creation = self.get_devices_summary_free_space() + + volume_info_before_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual( + volume_info_before_expansion, False, + "Heketi volume info for %s failed" % volume_id) + + heketi_vol_info_size_before_expansion = ( + volume_info_before_expansion["size"]) + + num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name) + + self.get_brick_and_volume_status(volume_name) + + expansion_info = heketi_ops.heketi_volume_expand( + self.heketi_client_node, + self.heketi_server_url, + volume_id, 3) + + self.assertNotEqual(expansion_info, False, + "Volume %s expansion failed" % volume_id) + + free_space_after_expansion = self.get_devices_summary_free_space() + + self.assertTrue( + free_space_after_creation > free_space_after_expansion, + "Expansion of %s did not consume free space" % volume_id) + + num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name) + + self.get_brick_and_volume_status(volume_name) + self.get_rebalance_status(volume_name) + + volume_info_after_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual( + volume_info_after_expansion, False, + "Heketi volume info for %s command failed" % volume_id) + + heketi_vol_info_size_after_expansion = ( + volume_info_after_expansion["size"]) + + difference_size_after_expansion = ( + heketi_vol_info_size_after_expansion - + heketi_vol_info_size_before_expansion) + + self.assertTrue( + difference_size_after_expansion > 0, + "Volume expansion for %s did not consume free space" % volume_id) + + num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion - + num_of_bricks_before_expansion) + + self.assertEqual( + num_of_bricks_added_after_expansion, 3, + "Number of bricks added in %s after expansion is not 3" + % volume_name) + + further_expansion_info = heketi_ops.heketi_volume_expand( + self.heketi_client_node, + self.heketi_server_url, + volume_id, 3) + + self.assertNotEqual(further_expansion_info, False, + "Volume expansion failed for %s" % volume_id) + + free_space_after_further_expansion = ( + self.get_devices_summary_free_space()) + self.assertTrue( + free_space_after_expansion > free_space_after_further_expansion, + "Further expansion of %s did not consume free space" % volume_id) + + num_of_bricks_after_further_expansion = ( + self.get_num_of_bricks(volume_name)) + + self.get_brick_and_volume_status(volume_name) + + self.get_rebalance_status(volume_name) + + volume_info_after_further_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual( + volume_info_after_further_expansion, False, + "Heketi volume info for %s failed" % volume_id) + + heketi_vol_info_size_after_further_expansion = ( + volume_info_after_further_expansion["size"]) + + difference_size_after_further_expansion = ( + heketi_vol_info_size_after_further_expansion - + heketi_vol_info_size_after_expansion) + + self.assertTrue( + difference_size_after_further_expansion > 0, + "Size of volume %s did not increase" % volume_id) + + num_of_bricks_added_after_further_expansion = ( + num_of_bricks_after_further_expansion - + num_of_bricks_after_expansion) + + self.assertEqual( + num_of_bricks_added_after_further_expansion, 3, + "Number of bricks added is not 3 for %s" % volume_id) + + free_space_before_deletion = self.get_devices_summary_free_space() + + volume_delete = heketi_ops.heketi_volume_delete( + self.heketi_client_node, self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual(volume_delete, False, "Deletion of %s failed" + % volume_id) + + free_space_after_deletion = self.get_devices_summary_free_space() + + self.assertTrue(free_space_after_deletion > free_space_before_deletion, + "Free space not reclaimed after deletion of %s" + % volume_id) + + def test_volume_expansion_no_free_space(self): + """ + To test volume expansion when there is no free + space + """ + + heketi_node_id_list = [] + additional_devices_attached = {} + heketi_node_list_string = heketi_ops.heketi_node_list( + self.heketi_client_node, + self.heketi_server_url, mode="cli", json=True) + + self.assertNotEqual(heketi_node_list_string, False, + "Heketi node list command failed") + + for line in heketi_node_list_string.strip().split("\n"): + heketi_node_id_list.append(line.strip().split( + "Cluster")[0].strip().split(":")[1]) + + for node_id in heketi_node_id_list: + node_info_dict = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + self.assertNotEqual(node_info_dict, False, + "Heketi node info for %s failed" % node_id) + for gluster_server in self.gluster_servers: + gluster_server_ip = ( + self.gluster_servers_info[gluster_server]["storage"]) + node_ip = node_info_dict["hostnames"]["storage"][0] + + if gluster_server_ip == node_ip: + addition_status = ( + heketi_ops.heketi_device_add( + self.heketi_client_node, + self.heketi_server_url, + self.gluster_servers_info[gluster_server] + ["additional_devices"][0], node_id)) + + self.assertNotEqual(addition_status, False, + "Addition of device %s failed" + % self.gluster_servers_info + [gluster_server] + ["additional_devices"][0]) + + additional_devices_attached.update({node_id: + self.gluster_servers_info + [gluster_server] + ["additional_devices"][0]}) + + additional_devices_ids = [] + for node_id in additional_devices_attached.keys(): + node_info = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + + for device in node_info["devices"]: + if device["name"] == additional_devices_attached[node_id]: + additional_devices_ids.append(device["id"]) + + self.addCleanup(self.detach_devices_attached, + additional_devices_ids) + + for node_id in additional_devices_attached.keys(): + flag_device_added = False + node_info = heketi_ops.heketi_node_info( + self.heketi_client_node, self.heketi_server_url, + node_id, json=True) + for device in node_info["devices"]: + if device["name"] == additional_devices_attached[node_id]: + flag_device_added = True + + self.assertTrue(flag_device_added) + + self.disable_devices(additional_devices_attached) + + creation_info = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, 675, json=True) + + self.assertNotEqual(creation_info, False, "Volume creation failed") + + volume_name = creation_info["name"] + volume_id = creation_info["id"] + + volume_info_before_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + heketi_vol_info_size_before_expansion = ( + volume_info_before_expansion["size"]) + + num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name) + + self.get_brick_and_volume_status(volume_name) + + free_space_after_creation = self.get_devices_summary_free_space() + + ret, out, err = heketi_ops.heketi_volume_expand( + self.heketi_client_node, self.heketi_server_url, + volume_id, 50, raw_cli_output=True) + + emsg = "Error: Maximum number of bricks reached." + + self.assertEqual(emsg, err.strip(), + "Expansion failed with invalid reason") + + if ret == 0: + out_json = json.loads(out) + self.addCleanup(self.delete_volumes, out_json["id"]) + + self.enable_devices(additional_devices_attached) + + expansion_info = heketi_ops.heketi_volume_expand( + self.heketi_client_node, self.heketi_server_url, + volume_id, 50, json=True) + + self.assertNotEqual(expansion_info, False, + "Volume %s could not be expanded" % volume_id) + + free_space_after_expansion = self.get_devices_summary_free_space() + + self.assertTrue( + free_space_after_creation > free_space_after_expansion, + "Free space not consumed after expansion of %s" % volume_id) + + num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name) + + self.get_brick_and_volume_status(volume_name) + + volume_info_after_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual( + volume_info_after_expansion, False, + "Heketi volume info for %s failed" % volume_id) + + heketi_vol_info_size_after_expansion = ( + volume_info_after_expansion["size"]) + + difference_size_after_expansion = ( + heketi_vol_info_size_after_expansion - + heketi_vol_info_size_before_expansion) + + self.assertTrue(difference_size_after_expansion > 0, + "Size of %s not increased" % volume_id) + + num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion - + num_of_bricks_before_expansion) + + self.assertEqual(num_of_bricks_added_after_expansion, 3) + + deletion_info = heketi_ops.heketi_volume_delete( + self.heketi_client_node, self.heketi_server_url, volume_id, + json=True) + + self.assertNotEqual(deletion_info, False, + "Deletion of %s not successful" % volume_id) + + free_space_after_deletion = self.get_devices_summary_free_space() + + self.assertTrue( + free_space_after_deletion > free_space_after_expansion, + "Free space not reclaimed after deletion of volume %s" % volume_id) + + @podcmd.GlustoPod() + def test_volume_expansion_rebalance_brick(self): + """ + To test volume expansion with brick and rebalance + validation + """ + creation_info = heketi_ops.heketi_volume_create( + self.heketi_client_node, self.heketi_server_url, 10, json=True) + + self.assertNotEqual(creation_info, False, "Volume creation failed") + + volume_name = creation_info["name"] + volume_id = creation_info["id"] + + free_space_after_creation = self.get_devices_summary_free_space() + + volume_info_before_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual(volume_info_before_expansion, False, + "Volume info for %s failed" % volume_id) + + heketi_vol_info_size_before_expansion = ( + volume_info_before_expansion["size"]) + + self.get_brick_and_volume_status(volume_name) + num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name) + + expansion_info = heketi_ops.heketi_volume_expand( + self.heketi_client_node, + self.heketi_server_url, + volume_id, 5) + + self.assertNotEqual(expansion_info, False, + "Volume expansion of %s failed" % volume_id) + + free_space_after_expansion = self.get_devices_summary_free_space() + self.assertTrue( + free_space_after_creation > free_space_after_expansion, + "Free space not consumed after expansion of %s" % volume_id) + + volume_info_after_expansion = heketi_ops.heketi_volume_info( + self.heketi_client_node, + self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual(volume_info_after_expansion, False, + "Volume info failed for %s" % volume_id) + + heketi_vol_info_size_after_expansion = ( + volume_info_after_expansion["size"]) + + difference_size = (heketi_vol_info_size_after_expansion - + heketi_vol_info_size_before_expansion) + + self.assertTrue( + difference_size > 0, + "Size not increased after expansion of %s" % volume_id) + + self.get_brick_and_volume_status(volume_name) + num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name) + + num_of_bricks_added = (num_of_bricks_after_expansion - + num_of_bricks_before_expansion) + + self.assertEqual( + num_of_bricks_added, 3, + "Number of bricks added is not 3 for %s" % volume_id) + + self.get_rebalance_status(volume_name) + + deletion_info = heketi_ops.heketi_volume_delete( + self.heketi_client_node, self.heketi_server_url, + volume_id, json=True) + + self.assertNotEqual(deletion_info, False, + "Deletion of volume %s failed" % volume_id) + + free_space_after_deletion = self.get_devices_summary_free_space() + + self.assertTrue( + free_space_after_deletion > free_space_after_expansion, + "Free space is not reclaimed after volume deletion of %s" + % volume_id) + diff --git a/tests/functional/common/heketi/test_volume_multi_req.py b/tests/functional/common/heketi/test_volume_multi_req.py new file mode 100644 index 00000000..fbf95086 --- /dev/null +++ b/tests/functional/common/heketi/test_volume_multi_req.py @@ -0,0 +1,371 @@ +"""Test cases that create and delete multiple volumes. +""" + +import contextlib +import threading +import time + +import ddt +import yaml + +from glusto.core import Glusto as g + +from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass +from cnslibs.common.heketi_ops import ( + heketi_volume_list) +from cnslibs.common.naming import ( + make_unique_label, extract_method_name) +from cnslibs.common.openshift_ops import ( + oc_create, oc_delete, oc_get_pvc, oc_get_pv, oc_get_all_pvs) +from cnslibs.common.waiter import Waiter + + +def build_storage_class(name, resturl, restuser='foo', restuserkey='foo'): + """Build s simple structure for a storage class. + """ + return { + 'apiVersion': 'storage.k8s.io/v1beta1', + 'kind': 'StorageClass', + 'provisioner': 'kubernetes.io/glusterfs', + 'metadata': { + 'name': name, + }, + 'parameters': { + 'resturl': resturl, + 'restuser': restuser, + 'restuserkey': restuserkey, + } + } + + +def build_pvc(name, storageclass, size, accessmodes=None): + """Build a simple structture for a PVC defintion. + """ + annotations = { + 'volume.beta.kubernetes.io/storage-class': storageclass, + } + accessmodes = accessmodes if accessmodes else ['ReadWriteOnce'] + if not isinstance(size, str): + size = '%dGi' % size + return { + 'apiVersion': 'v1', + 'kind': 'PersistentVolumeClaim', + 'metadata': { + 'name': name, + 'annotations': annotations, + }, + 'spec': { + 'accessModes': accessmodes, + 'resources': { + 'requests': {'storage': size}, + } + } + } + + +@contextlib.contextmanager +def temp_config(ocp_node, cfg): + """Context manager to help define YAML files on the remote node + that can be in turn fed to 'oc create'. Must be used as a context + manager (with-statement). + + Example: + >>> d = {'foo': True, 'bar': 22, 'baz': [1, 5, 9]} + >>> with temp_config(node, d) as fpath: + ... func_that_takes_a_path(fpath) + + Here, the data dictionary `d` is serialized to YAML and written + to a temporary file at `fpath`. Then, `fpath` can be used by + a function that takes a file path. When the context manager exits + the temporary file is automatically cleaned up. + + Args: + ocp_node (str): The node to create the temp file on. + cfg (dict): A data structure to be converted to YAML and + saved in a tempfile on the node. + Returns: + str: A path to a temporary file. + """ + conn = g.rpyc_get_connection(ocp_node, user="root") + tmp = conn.modules.tempfile.NamedTemporaryFile() + try: + tmp.write(yaml.safe_dump(cfg)) + tmp.flush() + filename = tmp.name + yield filename + finally: + tmp.close() + + +def wait_for_claim(ocp_node, pvc_name, timeout=60, interval=2): + """Wait for a claim to be created & bound up to the given timeout. + """ + for w in Waiter(timeout, interval): + sts = oc_get_pvc(ocp_node, pvc_name) + if sts and sts.get('status', {}).get('phase') == 'Bound': + return sts + raise AssertionError('wait_for_claim on pvc %s timed out' + % (pvc_name,)) + + +def wait_for_sc_unused(ocp_node, sc_name, timeout=60, interval=1): + for w in Waiter(timeout, interval): + sts = oc_get_all_pvs(ocp_node) + items = (sts and sts.get('items')) or [] + if not any(i.get('spec', {}).get('storageClassName') == sc_name + for i in items): + return + raise AssertionError('wait_for_sc_unused on %s timed out' + % (sc_name,)) + + +def delete_storageclass(ocp_node, sc_name, timeout=60): + wait_for_sc_unused(ocp_node, sc_name, timeout) + oc_delete(ocp_node, 'storageclass', sc_name) + + +class ClaimInfo(object): + """Helper class to organize data as we go from PVC to PV to + volume w/in heketi. + """ + pvc_name = None + vol_name = None + vol_uuid = None + sc_name = None + req = None + info = None + pv_info = None + + def __init__(self, name, storageclass, size): + self.pvc_name = name + self.req = build_pvc( + name=self.pvc_name, + storageclass=storageclass, + size=size) + + def create_pvc(self, ocp_node): + assert self.req + with temp_config(ocp_node, self.req) as tmpfn: + oc_create(ocp_node, tmpfn) + + def update_pvc_info(self, ocp_node, timeout=60): + self.info = wait_for_claim(ocp_node, self.pvc_name, timeout) + + def delete_pvc(self, ocp_node): + oc_delete(ocp_node, 'pvc', self.pvc_name) + + def update_pv_info(self, ocp_node): + self.pv_info = oc_get_pv(ocp_node, self.volumeName) + + @property + def volumeName(self): + return self.info.get('spec', {}).get('volumeName') + + @property + def heketiVolumeName(self): + return self.pv_info.get('spec', {}).get('glusterfs', {}).get('path') + + +def _heketi_vols(ocp_node, url): + # Unfortunately, getting json from heketi-cli only gets the ids + # To get a mapping of ids & volume names without a lot of + # back and forth between the test and the ocp_node we end up having + # to scrape the output of 'volume list' + # TODO: This probably should be made into a utility function + out = heketi_volume_list(ocp_node, url, json=False) + res = [] + for line in out.splitlines(): + if not line.startswith('Id:'): + continue + row = {} + for section in line.split(): + if ':' in section: + key, value = section.split(':', 1) + row[key.lower()] = value.strip() + res.append(row) + return res + + +def _heketi_name_id_map(vols): + return {vol['name']: vol['id'] for vol in vols} + + +@ddt.ddt +class TestVolumeMultiReq(HeketiClientSetupBaseClass): + def setUp(self): + super(TestVolumeMultiReq, self).setUp() + self.volcount = self._count_vols() + + def wait_to_settle(self, timeout=120, interval=1): + # This was originally going to be a tearDown, but oddly enough + # tearDown is called *before* the cleanup functions, so it + # could never succeed. This needs to be added as a cleanup + # function first so that we run after our test's other cleanup + # functions but before we go on to the next test in order + # to prevent the async cleanups in kubernetes from steping + # on the next test's "toes". + for w in Waiter(timeout): + nvols = self._count_vols() + if nvols == self.volcount: + return + raise AssertionError( + 'wait for volume count to settle timed out') + + def _count_vols(self): + ocp_node = g.config['ocp_servers']['master'].keys()[0] + return len(_heketi_vols(ocp_node, self.heketi_server_url)) + + def test_simple_serial_vol_create(self): + """Test that serially creating PVCs causes heketi to add volumes. + """ + self.addCleanup(self.wait_to_settle) + # TODO A nice thing to add to this test would be to also verify + # the gluster volumes also exist. + tname = make_unique_label(extract_method_name(self.id())) + ocp_node = g.config['ocp_servers']['master'].keys()[0] + # deploy a temporary storage class + sc = build_storage_class( + name=tname, + resturl=self.heketi_server_url) + with temp_config(ocp_node, sc) as tmpfn: + oc_create(ocp_node, tmpfn) + self.addCleanup(delete_storageclass, ocp_node, tname) + orig_vols = _heketi_name_id_map( + _heketi_vols(ocp_node, self.heketi_server_url)) + + # deploy a persistent volume claim + c1 = ClaimInfo( + name='-'.join((tname, 'pvc1')), + storageclass=tname, + size=2) + c1.create_pvc(ocp_node) + self.addCleanup(c1.delete_pvc, ocp_node) + c1.update_pvc_info(ocp_node) + # verify volume exists + self.assertTrue(c1.volumeName) + c1.update_pv_info(ocp_node) + self.assertTrue(c1.heketiVolumeName) + + # verify this is a new volume to heketi + now_vols = _heketi_name_id_map( + _heketi_vols(ocp_node, self.heketi_server_url)) + self.assertEqual(len(orig_vols) + 1, len(now_vols)) + self.assertIn(c1.heketiVolumeName, now_vols) + self.assertNotIn(c1.heketiVolumeName, orig_vols) + + # deploy a 2nd pvc + c2 = ClaimInfo( + name='-'.join((tname, 'pvc2')), + storageclass=tname, + size=2) + c2.create_pvc(ocp_node) + self.addCleanup(c2.delete_pvc, ocp_node) + c2.update_pvc_info(ocp_node) + # verify volume exists + self.assertTrue(c2.volumeName) + c2.update_pv_info(ocp_node) + self.assertTrue(c2.heketiVolumeName) + + # verify this is a new volume to heketi + now_vols = _heketi_name_id_map( + _heketi_vols(ocp_node, self.heketi_server_url)) + self.assertEqual(len(orig_vols) + 2, len(now_vols)) + self.assertIn(c2.heketiVolumeName, now_vols) + self.assertNotIn(c2.heketiVolumeName, orig_vols) + + def test_multiple_vol_create(self): + """Test creating two volumes via PVCs with no waiting between + the PVC requests. + + We do wait after all the PVCs are submitted to get statuses. + """ + self.addCleanup(self.wait_to_settle) + tname = make_unique_label(extract_method_name(self.id())) + ocp_node = g.config['ocp_servers']['master'].keys()[0] + # deploy a temporary storage class + sc = build_storage_class( + name=tname, + resturl=self.heketi_server_url) + with temp_config(ocp_node, sc) as tmpfn: + oc_create(ocp_node, tmpfn) + self.addCleanup(delete_storageclass, ocp_node, tname) + + # deploy two persistent volume claims + c1 = ClaimInfo( + name='-'.join((tname, 'pvc1')), + storageclass=tname, + size=2) + c1.create_pvc(ocp_node) + self.addCleanup(c1.delete_pvc, ocp_node) + c2 = ClaimInfo( + name='-'.join((tname, 'pvc2')), + storageclass=tname, + size=2) + c2.create_pvc(ocp_node) + self.addCleanup(c2.delete_pvc, ocp_node) + + # wait for pvcs/volumes to complete + c1.update_pvc_info(ocp_node) + c2.update_pvc_info(ocp_node) + now_vols = _heketi_name_id_map( + _heketi_vols(ocp_node, self.heketi_server_url)) + + # verify first volume exists + self.assertTrue(c1.volumeName) + c1.update_pv_info(ocp_node) + self.assertTrue(c1.heketiVolumeName) + # verify this volume in heketi + self.assertIn(c1.heketiVolumeName, now_vols) + + # verify second volume exists + self.assertTrue(c2.volumeName) + c2.update_pv_info(ocp_node) + self.assertTrue(c2.heketiVolumeName) + # verify this volume in heketi + self.assertIn(c2.heketiVolumeName, now_vols) + + # NOTE(jjm): I've noticed that on the system I'm using (RHEL7). + # with count=8 things start to back up a bit. + # I needed to increase some timeouts to get this to pass. + @ddt.data(2, 4, 8) + def test_threaded_multi_request(self, count): + """Test creating volumes via PVCs where the pvc create + commands are launched in parallell via threads. + """ + self.addCleanup(self.wait_to_settle) + tname = make_unique_label(extract_method_name(self.id())) + ocp_node = g.config['ocp_servers']['master'].keys()[0] + # deploy a temporary storage class + sc = build_storage_class( + name=tname, + resturl=self.heketi_server_url) + with temp_config(ocp_node, sc) as tmpfn: + oc_create(ocp_node, tmpfn) + self.addCleanup(delete_storageclass, ocp_node, tname) + + # prepare the persistent volume claims + claims = [ + ClaimInfo(name='-'.join((tname, ('pvc%d' % n))), + storageclass=tname, + size=2) + for n in range(count)] + + # create a "bunch" of pvc all at once + def create(ci): + ci.create_pvc(ocp_node) + self.addCleanup(ci.delete_pvc, ocp_node) + threads = [ + threading.Thread(target=create, args=[c]) + for c in claims] + for t in threads: + t.start() + for t in threads: + t.join() + + for c in claims: + c.update_pvc_info(ocp_node, timeout=120) + now_vols = _heketi_name_id_map( + _heketi_vols(ocp_node, self.heketi_server_url)) + for c in claims: + c.update_pv_info(ocp_node) + self.assertIn(c.heketiVolumeName, now_vols) diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py new file mode 100644 index 00000000..3c01427e --- /dev/null +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py @@ -0,0 +1,379 @@ +from cnslibs.common.dynamic_provisioning import ( + create_mongodb_pod, + create_secret_file, + create_storage_class_file, + get_pvc_status, + verify_pod_status_running) +from cnslibs.cns.cns_baseclass import CnsGlusterBlockBaseClass +from cnslibs.common.openshift_ops import ( + get_ocp_gluster_pod_names, + oc_create, + oc_delete, + oc_rsh) +from cnslibs.common.waiter import Waiter +from glusto.core import Glusto as g +import time + + +class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass): + ''' + Class that contain P0 dynamic provisioning test cases + for block volume + ''' + def test_dynamic_provisioning_glusterblock(self): + g.log.info("test_dynamic_provisioning_glusterblock") + storage_class = self.cns_storage_class['storage_class2'] + cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl'] + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list " + "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1" % ( + storage_class['resturl'])) + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cluster_id = out.strip().split("\n")[0] + sc_name = storage_class['name'] + pvc_name1 = "mongodb1-block" + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_ip = out.strip().split("\n")[0] + resturl_block = "http://%s:8080" % heketi_cluster_ip + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + resturl_block, + storage_class['provisioner'], + restuser=storage_class['restuser'], + restsecretnamespace=storage_class['restsecretnamespace'], + restsecretname=storage_class['restsecretname'], + hacount=storage_class['hacount'], + clusterids=cluster_id) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret2'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name1, 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name1) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name1) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name1) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name1) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name1 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + oc_delete(self.ocp_master_node[0], 'pod', pod_name) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name1) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name1 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + oc_delete(self.ocp_master_node[0], 'pod', pod_name) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name1) + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name1 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = "ls -lrt /var/lib/mongodb/data/file" + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + cmd = "rm -rf /var/lib/mongodb/data/file" + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + + def test_dynamic_provisioning_glusterblock_heketipod_failure(self): + g.log.info("test_dynamic_provisioning_glusterblock_Heketipod_Failure") + storage_class = self.cns_storage_class['storage_class2'] + cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl'] + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list " + "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") % ( + storage_class['resturl']) + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cluster_id = out.strip().split("\n")[0] + sc_name = storage_class['name'] + pvc_name2 = "mongodb2-block" + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_ip = out.strip().split("\n")[0] + resturl_block = "http://%s:8080" % heketi_cluster_ip + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + resturl_block, + storage_class['provisioner'], + restuser=storage_class['restuser'], + restsecretnamespace=storage_class['restsecretnamespace'], + restsecretname=storage_class['restsecretname'], + hacount=storage_class['hacount'], + clusterids=cluster_id) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret2'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name2, 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name2) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name2) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name2) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name2) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name2 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + oc_delete(self.ocp_master_node[0], 'dc', "heketi") + oc_delete(self.ocp_master_node[0], 'service', "heketi") + oc_delete(self.ocp_master_node[0], 'route', "heketi") + pvc_name3 = "mongodb3-block" + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name3, 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name3) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name3) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name3) + ret, status = get_pvc_status(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3) + self.assertEqual(status, "Pending", "pvc status of " + "%s is not in Pending state" % pvc_name3) + cmd = "oc process heketi | oc create -f -" + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + ret = verify_pod_status_running(self.ocp_master_node[0], "heketi") + self.assertTrue(ret, "verify heketi pod status as running failed") + oc_delete(self.ocp_master_node[0], 'sc', sc_name) + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_ip = out.strip().split("\n")[0] + resturl_block = "http://%s:8080" % heketi_cluster_ip + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + resturl_block, + storage_class['provisioner'], + restuser=storage_class['restuser'], + restsecretnamespace=storage_class['restsecretnamespace'], + restsecretname=storage_class['restsecretname'], + hacount=storage_class['hacount'], + clusterids=cluster_id) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + for w in Waiter(300, 30): + ret, status = get_pvc_status(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "failed to get pvc status of %s" % ( + pvc_name3)) + if status != "Bound": + g.log.info("pvc status of %s is not in Bound state," + " sleeping for 30 sec" % pvc_name3) + continue + else: + break + if w.expired: + error_msg = ("exceeded timeout 300 sec, pvc %s not in" + " Bound state" % pvc_name3) + g.log.error(error_msg) + raise ExecutionError(error_msg) + self.assertEqual(status, "Bound", "pvc status of %s " + "is not in Bound state, its state is %s" % ( + pvc_name3, status)) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "verify %s pod status as " + "running failed" % pvc_name3) + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + + def test_dynamic_provisioning_glusterblock_glusterpod_failure(self): + g.log.info("test_dynamic_provisioning_glusterblock_Glusterpod_Failure") + storage_class = self.cns_storage_class['storage_class2'] + cmd = "export HEKETI_CLI_SERVER=%s" % storage_class['resturl'] + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cmd = ("export HEKETI_CLI_SERVER=%s && heketi-cli cluster list " + "| grep Id | cut -d ':' -f 2 | cut -d '[' -f 1") % ( + storage_class['resturl']) + ret, out, err = g.run(self.ocp_client[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_client[0])) + cluster_id = out.strip().split("\n")[0] + sc_name = storage_class['name'] + pvc_name4 = "mongodb-4-block" + cmd = ("oc get svc | grep heketi | grep -v endpoints " + "| awk '{print $2}'") + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + heketi_cluster_ip = out.strip().split("\n")[0] + resturl_block = "http://%s:8080" % heketi_cluster_ip + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + resturl_block, + storage_class['provisioner'], + restuser=storage_class['restuser'], + restsecretnamespace=storage_class['restsecretnamespace'], + restsecretname=storage_class['restsecretname'], + hacount=storage_class['hacount'], + clusterids=cluster_id) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret2'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name4, 30, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name4) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name4) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name4) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name4) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name4 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + io_cmd = ("oc rsh %s dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1000K count=1000") % pod_name + proc = g.run_async(self.ocp_master_node[0], io_cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + gluster_pod_list = get_ocp_gluster_pod_names(self.ocp_master_node[0]) + g.log.info("gluster_pod_list - %s" % gluster_pod_list) + gluster_pod_name = gluster_pod_list[0] + cmd = ("oc get pods -o wide | grep %s | grep -v deploy " + "| awk '{print $7}'") % gluster_pod_name + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + gluster_pod_node_name = out.strip().split("\n")[0].strip() + oc_delete(self.ocp_master_node[0], 'pod', gluster_pod_name) + cmd = ("oc get pods -o wide | grep glusterfs | grep %s | " + "grep -v Terminating | awk '{print $1}'") % ( + gluster_pod_node_name) + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + new_gluster_pod_name = out.strip().split("\n")[0].strip() + ret = verify_pod_status_running(self.ocp_master_node[0], + new_gluster_pod_name) + self.assertTrue(ret, "verify %s pod status as running " + "failed" % new_gluster_pod_name) + ret, out, err = proc.async_communicate() + self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, + self.ocp_master_node[0])) diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py new file mode 100644 index 00000000..9ae0e987 --- /dev/null +++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py @@ -0,0 +1,267 @@ +from cnslibs.common.dynamic_provisioning import ( + create_mongodb_pod, + create_secret_file, + create_storage_class_file, + get_pvc_status, + verify_pod_status_running) +from cnslibs.common.openshift_ops import ( + get_ocp_gluster_pod_names, + oc_rsh) +from cnslibs.cns.cns_baseclass import CnsBaseClass +from cnslibs.common.openshift_ops import ( + oc_create, + oc_delete) +from glusto.core import Glusto as g + + +class TestDynamicProvisioningP0(CnsBaseClass): + ''' + Class that contain P0 dynamic provisioning test cases for + glusterfile volume + ''' + + def test_dynamic_provisioning_glusterfile(self): + g.log.info("test_dynamic_provisioning_glusterfile") + storage_class = self.cns_storage_class['storage_class1'] + sc_name = storage_class['name'] + pvc_name1 = "mongodb1" + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + storage_class['resturl'], + storage_class['provisioner'], + restuser=storage_class['restuser'], + secretnamespace=storage_class['secretnamespace'], + secretname=storage_class['secretname']) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret1'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name1, 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name1) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name1) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name1) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name1) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name1 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + oc_delete(self.ocp_master_node[0], 'pod', pod_name) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name1) + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "| awk {'print $1'}") % pvc_name1 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = "ls -lrt /var/lib/mongodb/data/file" + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + cmd = "rm -rf /var/lib/mongodb/data/file" + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + + def test_dynamic_provisioning_glusterfile_heketipod_failure(self): + g.log.info("test_dynamic_provisioning_glusterfile_Heketipod_Failure") + storage_class = self.cns_storage_class['storage_class1'] + sc_name = storage_class['name'] + pvc_name2 = "mongodb2" + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + storage_class['resturl'], + storage_class['provisioner'], + restuser=storage_class['restuser'], + secretnamespace=storage_class['secretnamespace'], + secretname=storage_class['secretname']) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret1'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], pvc_name2, + 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name2) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name2) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name2) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name2) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "|awk {'print $1'}") % pvc_name2 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + oc_delete(self.ocp_master_node[0], 'dc', "heketi") + oc_delete(self.ocp_master_node[0], 'service', "heketi") + oc_delete(self.ocp_master_node[0], 'route', "heketi") + pvc_name3 = "mongodb3" + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name3, 10, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name3) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name3) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name3) + ret, status = get_pvc_status(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3) + self.assertEqual(status, "Pending", "pvc status of " + "%s is not in Pending state" % pvc_name3) + cmd = "oc process heketi | oc create -f -" + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + ret = verify_pod_status_running(self.ocp_master_node[0], "heketi") + self.assertTrue(ret, "verify heketi pod status as running failed") + ret, status = get_pvc_status(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "failed to get pvc status of %s" % pvc_name3) + self.assertEqual(status, "Bound", "pvc status of %s " + "is not in Bound state" % pvc_name3) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name3) + self.assertTrue(ret, "verify %s pod status " + "as running failed" % pvc_name3) + cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1K count=100") + ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd) + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + + def test_dynamic_provisioning_glusterfile_glusterpod_failure(self): + g.log.info("test_dynamic_provisioning_glusterfile_Glusterpod_Failure") + storage_class = self.cns_storage_class['storage_class1'] + sc_name = storage_class['name'] + pvc_name4 = "mongodb4" + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + storage_class['resturl'], + storage_class['provisioner'], + restuser=storage_class['restuser'], + secretnamespace=storage_class['secretnamespace'], + secretname=storage_class['secretname']) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = "/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1]) + oc_create(self.ocp_master_node[0], file_path) + self.addCleanup(oc_delete, self.ocp_master_node[0], + 'sc', sc_name) + secret = self.cns_secret['secret1'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'secret', + secret['secret_name']) + ret = create_mongodb_pod(self.ocp_master_node[0], + pvc_name4, 30, sc_name) + self.assertTrue(ret, "creation of mongodb pod failed") + self.addCleanup(oc_delete, self.ocp_master_node[0], 'service', + pvc_name4) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', + pvc_name4) + self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', + pvc_name4) + ret = verify_pod_status_running(self.ocp_master_node[0], + pvc_name4) + self.assertTrue(ret, "verify mongodb pod status as running failed") + cmd = ("oc get pods | grep %s | grep -v deploy " + "|awk {'print $1'}") % pvc_name4 + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + pod_name = out.strip().split("\n")[0] + io_cmd = ("oc rsh %s dd if=/dev/urandom of=/var/lib/mongodb/data/file " + "bs=1000K count=1000") % pod_name + proc = g.run_async(self.ocp_master_node[0], io_cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + gluster_pod_list = get_ocp_gluster_pod_names(self.ocp_master_node[0]) + g.log.info("gluster_pod_list - %s" % gluster_pod_list) + gluster_pod_name = gluster_pod_list[0] + cmd = ("oc get pods -o wide | grep %s | grep -v deploy " + "|awk '{print $7}'") % gluster_pod_name + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + gluster_pod_node_name = out.strip().split("\n")[0].strip() + oc_delete(self.ocp_master_node[0], 'pod', gluster_pod_name) + cmd = ("oc get pods -o wide | grep glusterfs | grep %s | " + "grep -v Terminating | awk '{print $1}'") % ( + gluster_pod_node_name) + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute command %s on %s" % ( + cmd, self.ocp_master_node[0])) + new_gluster_pod_name = out.strip().split("\n")[0].strip() + ret = verify_pod_status_running(self.ocp_master_node[0], + new_gluster_pod_name) + self.assertTrue(ret, "verify %s pod status as running " + "failed" % new_gluster_pod_name) + ret, out, err = proc.async_communicate() + self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, + self.ocp_master_node[0])) diff --git a/tests/functional/common/test_dynamic_provisioning.py b/tests/functional/common/test_dynamic_provisioning.py new file mode 100644 index 00000000..8428f2e6 --- /dev/null +++ b/tests/functional/common/test_dynamic_provisioning.py @@ -0,0 +1,86 @@ +from cnslibs.cns.cns_baseclass import CnsSetupBaseClass +from cnslibs.common.dynamic_provisioning import ( + create_secret_file, + create_storage_class_file, + create_pvc_file, + create_app_pod_file) +from cnslibs.common.openshift_ops import oc_create +from glusto.core import Glusto as g + + +class TestDynamicProvisioning(CnsSetupBaseClass): + ''' + Class for basic dynamic provisioning + ''' + @classmethod + def setUpClass(cls): + super(TestDynamicProvisioning, cls).setUpClass() + super(TestDynamicProvisioning, cls).cns_deploy() + + def test_dynamic_provisioning(self): + g.log.info("testcase to test basic dynamic provisioning") + storage_class = self.cns_storage_class['storage_class1'] + sc_name = storage_class['name'] + ret = create_storage_class_file( + self.ocp_master_node[0], + sc_name, + storage_class['resturl'], + storage_class['provisioner'], + restuser=storage_class['restuser'], + secretnamespace=storage_class['secretnamespace'], + secretname=storage_class['secretname']) + self.assertTrue(ret, "creation of storage-class file failed") + provisioner_name = storage_class['provisioner'].split("/") + file_path = ("/%s-%s-storage-class.yaml" % ( + sc_name, provisioner_name[1])) + oc_create(self.ocp_master_node[0], file_path) + secret = self.cns_secret['secret1'] + ret = create_secret_file(self.ocp_master_node[0], + secret['secret_name'], + secret['namespace'], + secret['data_key'], + secret['type']) + self.assertTrue(ret, "creation of heketi-secret file failed") + oc_create(self.ocp_master_node[0], + "/%s.yaml" % secret['secret_name']) + count = self.start_count_for_pvc + for size, pvc in self.cns_pvc_size_number_dict.items(): + for i in range(1, pvc + 1): + pvc_name = "pvc-claim%d" % count + g.log.info("starting creation of claim file " + "for %s", pvc_name) + ret = create_pvc_file(self.ocp_master_node[0], + pvc_name, sc_name, size) + self.assertTrue(ret, "create pvc file - %s failed" % pvc_name) + file_path = "/pvc-claim%d.json" % count + g.log.info("starting to create claim %s", pvc_name) + oc_create(self.ocp_master_node[0], file_path) + count = count + 1 + cmd = 'oc get pvc | grep pvc-claim | awk \'{print $1}\'' + ret, out, err = g.run(self.ocp_master_node[0], cmd, "root") + self.assertEqual(ret, 0, "failed to execute cmd %s on %s err %s" % ( + cmd, self.ocp_master_node[0], out)) + complete_pvc_list = out.strip().split("\n") + complete_pvc_list = map(str.strip, complete_pvc_list) + count = self.start_count_for_pvc + exisisting_pvc_list = [] + for i in range(1, count): + exisisting_pvc_list.append("pvc-claim%d" % i) + pvc_list = list(set(complete_pvc_list) - set(exisisting_pvc_list)) + index = 0 + for key, value in self.app_pvc_count_dict.items(): + for i in range(1, value + 1): + claim_name = pvc_list[index] + app_name = key + str(count) + sample_app_name = key + g.log.info("starting to create app_pod_file for %s", app_name) + ret = create_app_pod_file( + self.ocp_master_node[0], claim_name, + app_name, sample_app_name) + self.assertTrue( + ret, "creating app-pod file - %s failed" % app_name) + file_path = "/%s.yaml" % app_name + g.log.info("starting to create app_pod_%s", app_name) + oc_create(self.ocp_master_node[0], file_path) + index = index + 1 + count = count + 1 |