summaryrefslogtreecommitdiffstats
path: root/tests/functional/common/heketi
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/common/heketi')
-rw-r--r--tests/functional/common/heketi/test_volume_expansion_and_devices.py726
-rw-r--r--tests/functional/common/heketi/test_volume_multi_req.py371
2 files changed, 1097 insertions, 0 deletions
diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
new file mode 100644
index 00000000..767680eb
--- /dev/null
+++ b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
@@ -0,0 +1,726 @@
+from __future__ import division
+import json
+import math
+import unittest
+
+from glusto.core import Glusto as g
+from glustolibs.gluster import volume_ops, rebalance_ops
+
+from cnslibs.common.exceptions import ExecutionError, ConfigError
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
+from cnslibs.common import heketi_ops, podcmd
+
+
+class TestVolumeExpansionAndDevicesTestCases(HeketiClientSetupBaseClass):
+ """
+ Class for volume expansion and devices addition related test cases
+ """
+
+ @podcmd.GlustoPod()
+ def get_num_of_bricks(self, volume_name):
+ """
+ Method to determine number of
+ bricks at present in the volume
+ """
+ brick_info = []
+
+ if self.deployment_type == "cns":
+
+ gluster_pod = get_ocp_gluster_pod_names(
+ self.heketi_client_node)[1]
+
+ p = podcmd.Pod(self.heketi_client_node, gluster_pod)
+
+ volume_info_before_expansion = volume_ops.get_volume_info(
+ p, volume_name)
+
+ elif self.deployment_type == "crs":
+ volume_info_before_expansion = volume_ops.get_volume_info(
+ self.heketi_client_node, volume_name)
+
+ self.assertIsNotNone(
+ volume_info_before_expansion,
+ "Volume info is None")
+
+ for brick_details in (volume_info_before_expansion
+ [volume_name]["bricks"]["brick"]):
+
+ brick_info.append(brick_details["name"])
+
+ num_of_bricks = len(brick_info)
+
+ return num_of_bricks
+
+ @podcmd.GlustoPod()
+ def get_rebalance_status(self, volume_name):
+ """
+ Rebalance status after expansion
+ """
+ if self.deployment_type == "cns":
+ gluster_pod = get_ocp_gluster_pod_names(
+ self.heketi_client_node)[1]
+
+ p = podcmd.Pod(self.heketi_client_node, gluster_pod)
+
+ wait_reb = rebalance_ops.wait_for_rebalance_to_complete(
+ p, volume_name)
+ self.assertTrue(wait_reb, "Rebalance not complete")
+
+ reb_status = rebalance_ops.get_rebalance_status(
+ p, volume_name)
+
+ elif self.deployment_type == "crs":
+ wait_reb = rebalance_ops.wait_for_rebalance_to_complete(
+ self.heketi_client_node, volume_name)
+ self.assertTrue(wait_reb, "Rebalance not complete")
+
+ reb_status = rebalance_ops.get_rebalance_status(
+ self.heketi_client_node, volume_name)
+
+ self.assertEqual(reb_status["aggregate"]["statusStr"],
+ "completed", "Rebalance not yet completed")
+
+ @podcmd.GlustoPod()
+ def get_brick_and_volume_status(self, volume_name):
+ """
+ Status of each brick in a volume
+ for background validation
+ """
+ brick_info = []
+
+ if self.deployment_type == "cns":
+ gluster_pod = get_ocp_gluster_pod_names(
+ self.heketi_client_node)[1]
+
+ p = podcmd.Pod(self.heketi_client_node, gluster_pod)
+
+ volume_info = volume_ops.get_volume_info(p, volume_name)
+ volume_status = volume_ops.get_volume_status(p, volume_name)
+
+ elif self.deployment_type == "crs":
+ volume_info = volume_ops.get_volume_info(
+ self.heketi_client_node, volume_name)
+ volume_status = volume_ops.get_volume_status(
+ self.heketi_client_node, volume_name)
+
+ self.assertIsNotNone(volume_info, "Volume info is empty")
+ self.assertIsNotNone(volume_status, "Volume status is empty")
+
+ self.assertEqual(int(volume_info[volume_name]["status"]), 1,
+ "Volume not up")
+ for brick_details in volume_info[volume_name]["bricks"]["brick"]:
+ brick_info.append(brick_details["name"])
+
+ if brick_info == []:
+ raise ExecutionError("Brick details empty for %s" % volume_name)
+
+ for brick in brick_info:
+ brick_data = brick.strip().split(":")
+ brick_ip = brick_data[0]
+ brick_name = brick_data[1]
+ self.assertEqual(int(volume_status[volume_name][brick_ip]
+ [brick_name]["status"]), 1,
+ "Brick %s not up" % brick_name)
+
+ def enable_disable_devices(self, additional_devices_attached, enable=True):
+ """
+ Method to enable and disable devices
+ """
+ op = 'enable' if enable else 'disable'
+ for node_id in additional_devices_attached.keys():
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+
+ if not enable:
+ self.assertNotEqual(node_info, False,
+ "Node info for node %s failed" % node_id)
+
+ for device in node_info["devices"]:
+ if device["name"] == additional_devices_attached[node_id]:
+ out = getattr(heketi_ops, 'heketi_device_%s' % op)(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ device["id"],
+ json=True)
+ if out is False:
+ g.log.info("Device %s could not be %sd"
+ % (device["id"], op))
+ else:
+ g.log.info("Device %s %sd" % (device["id"], op))
+
+ def enable_devices(self, additional_devices_attached):
+ """
+ Method to call enable_disable_devices to enable devices
+ """
+ return self.enable_disable_devices(additional_devices_attached, True)
+
+ def disable_devices(self, additional_devices_attached):
+ """
+ Method to call enable_disable_devices to disable devices
+ """
+ return self.enable_disable_devices(additional_devices_attached, False)
+
+ def get_devices_summary_free_space(self):
+ """
+ Calculates minimum free space per device and
+ returns total free space across all devices
+ """
+
+ heketi_node_id_list = []
+ free_spaces = []
+
+ heketi_node_list_string = heketi_ops.heketi_node_list(
+ self.heketi_client_node,
+ self.heketi_server_url, mode="cli", json=True)
+
+ self.assertNotEqual(
+ heketi_node_list_string, False,
+ "Heketi node list empty")
+
+ for line in heketi_node_list_string.strip().split("\n"):
+ heketi_node_id_list.append(line.strip().split(
+ "Cluster")[0].strip().split(":")[1])
+
+ for node_id in heketi_node_id_list:
+ node_info_dict = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ total_free_space = 0
+ for device in node_info_dict["devices"]:
+ total_free_space += device["storage"]["free"]
+ free_spaces.append(total_free_space)
+
+ total_free_space = sum(free_spaces)/(1024 ** 2)
+ total_free_space = int(math.floor(total_free_space))
+
+ return total_free_space
+
+ def detach_devices_attached(self, device_id_list):
+ """
+ All the devices attached are gracefully
+ detached in this function
+ """
+ for device_id in device_id_list:
+ device_disable = heketi_ops.heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_disable, False,
+ "Device %s could not be disabled" % device_id)
+ device_remove = heketi_ops.heketi_device_remove(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_remove, False,
+ "Device %s could not be removed" % device_id)
+ device_delete = heketi_ops.heketi_device_delete(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.assertNotEqual(
+ device_delete, False,
+ "Device %s could not be deleted" % device_id)
+
+ @podcmd.GlustoPod()
+ def test_add_device_heketi_cli(self):
+ """
+ Method to test heketi device addition with background
+ gluster validation
+ """
+ node_id_list = []
+ device_id_list = []
+ hosts = []
+ gluster_servers = []
+
+ node_list_info = heketi_ops.heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+
+ self.assertNotEqual(node_list_info, False,
+ "heketi node list command failed")
+
+ lines = node_list_info.strip().split("\n")
+
+ for line in lines:
+ node_id_list.append(line.strip().split("Cluster")
+ [0].strip().split(":")[1])
+
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 100, json=True)
+
+ self.assertNotEqual(creation_info, False,
+ "Volume creation failed")
+
+ self.addCleanup(self.delete_volumes, creation_info["id"])
+
+ ret, out, err = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 620, json=True,
+ raw_cli_output=True)
+
+ self.assertEqual("Error: No space", err.strip())
+
+ if ret == 0:
+ out_json = json.loads(out)
+ self.addCleanup(self.delete_volumes, out_json["id"])
+
+ for node_id in node_id_list:
+ device_present = False
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+
+ self.assertNotEqual(
+ node_info, False,
+ "Heketi node info on node %s failed" % node_id)
+
+ node_ip = node_info["hostnames"]["storage"][0]
+
+ for gluster_server in g.config["gluster_servers"].keys():
+ gluster_server_ip = (g.config["gluster_servers"]
+ [gluster_server]["storage"])
+ if gluster_server_ip == node_ip:
+ device_name = (g.config["gluster_servers"][gluster_server]
+ ["additional_devices"][0])
+ break
+ device_addition_info = heketi_ops.heketi_device_add(
+ self.heketi_client_node, self.heketi_server_url,
+ device_name, node_id, json=True)
+
+ self.assertNotEqual(device_addition_info, False,
+ "Device %s addition failed" % device_name)
+
+ node_info_after_addition = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ for device in node_info_after_addition["devices"]:
+ if device["name"] == device_name:
+ device_present = True
+ device_id_list.append(device["id"])
+
+ self.assertEqual(device_present, True,
+ "device %s not present" % device["id"])
+
+ self.addCleanup(self.detach_devices_attached, device_id_list)
+
+ output_dict = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ 620, json=True)
+
+ self.assertNotEqual(output_dict, False, "Volume creation failed")
+ self.addCleanup(self.delete_volumes, output_dict["id"])
+
+ self.assertEqual(output_dict["durability"]["replicate"]["replica"], 3)
+ self.assertEqual(output_dict["size"], 620)
+ mount_node = (output_dict["mount"]["glusterfs"]
+ ["device"].strip().split(":")[0])
+
+ hosts.append(mount_node)
+ backup_volfile_server_list = (
+ output_dict["mount"]["glusterfs"]["options"]
+ ["backup-volfile-servers"].strip().split(","))
+
+ for backup_volfile_server in backup_volfile_server_list:
+ hosts.append(backup_volfile_server)
+ for gluster_server in g.config["gluster_servers"].keys():
+ gluster_servers.append(g.config["gluster_servers"]
+ [gluster_server]["storage"])
+ self.assertEqual(
+ set(hosts), set(gluster_servers),
+ "Hosts do not match gluster servers for %s" % output_dict["id"])
+
+ volume_name = output_dict["name"]
+
+ self.get_brick_and_volume_status(volume_name)
+
+ def test_volume_expansion_expanded_volume(self):
+ """
+ To test volume expansion with brick and rebalance
+ validation
+ """
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 10, json=True)
+
+ self.assertNotEqual(creation_info, False, "Volume creation failed")
+
+ volume_name = creation_info["name"]
+ volume_id = creation_info["id"]
+
+ free_space_after_creation = self.get_devices_summary_free_space()
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_before_expansion, False,
+ "Heketi volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_before_expansion = (
+ volume_info_before_expansion["size"])
+
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+
+ expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 3)
+
+ self.assertNotEqual(expansion_info, False,
+ "Volume %s expansion failed" % volume_id)
+
+ free_space_after_expansion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_creation > free_space_after_expansion,
+ "Expansion of %s did not consume free space" % volume_id)
+
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+ self.get_rebalance_status(volume_name)
+
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_after_expansion, False,
+ "Heketi volume info for %s command failed" % volume_id)
+
+ heketi_vol_info_size_after_expansion = (
+ volume_info_after_expansion["size"])
+
+ difference_size_after_expansion = (
+ heketi_vol_info_size_after_expansion -
+ heketi_vol_info_size_before_expansion)
+
+ self.assertTrue(
+ difference_size_after_expansion > 0,
+ "Volume expansion for %s did not consume free space" % volume_id)
+
+ num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion -
+ num_of_bricks_before_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added_after_expansion, 3,
+ "Number of bricks added in %s after expansion is not 3"
+ % volume_name)
+
+ further_expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 3)
+
+ self.assertNotEqual(further_expansion_info, False,
+ "Volume expansion failed for %s" % volume_id)
+
+ free_space_after_further_expansion = (
+ self.get_devices_summary_free_space())
+ self.assertTrue(
+ free_space_after_expansion > free_space_after_further_expansion,
+ "Further expansion of %s did not consume free space" % volume_id)
+
+ num_of_bricks_after_further_expansion = (
+ self.get_num_of_bricks(volume_name))
+
+ self.get_brick_and_volume_status(volume_name)
+
+ self.get_rebalance_status(volume_name)
+
+ volume_info_after_further_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_after_further_expansion, False,
+ "Heketi volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_after_further_expansion = (
+ volume_info_after_further_expansion["size"])
+
+ difference_size_after_further_expansion = (
+ heketi_vol_info_size_after_further_expansion -
+ heketi_vol_info_size_after_expansion)
+
+ self.assertTrue(
+ difference_size_after_further_expansion > 0,
+ "Size of volume %s did not increase" % volume_id)
+
+ num_of_bricks_added_after_further_expansion = (
+ num_of_bricks_after_further_expansion -
+ num_of_bricks_after_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added_after_further_expansion, 3,
+ "Number of bricks added is not 3 for %s" % volume_id)
+
+ free_space_before_deletion = self.get_devices_summary_free_space()
+
+ volume_delete = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_delete, False, "Deletion of %s failed"
+ % volume_id)
+
+ free_space_after_deletion = self.get_devices_summary_free_space()
+
+ self.assertTrue(free_space_after_deletion > free_space_before_deletion,
+ "Free space not reclaimed after deletion of %s"
+ % volume_id)
+
+ def test_volume_expansion_no_free_space(self):
+ """
+ To test volume expansion when there is no free
+ space
+ """
+
+ heketi_node_id_list = []
+ additional_devices_attached = {}
+ heketi_node_list_string = heketi_ops.heketi_node_list(
+ self.heketi_client_node,
+ self.heketi_server_url, mode="cli", json=True)
+
+ self.assertNotEqual(heketi_node_list_string, False,
+ "Heketi node list command failed")
+
+ for line in heketi_node_list_string.strip().split("\n"):
+ heketi_node_id_list.append(line.strip().split(
+ "Cluster")[0].strip().split(":")[1])
+
+ for node_id in heketi_node_id_list:
+ node_info_dict = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ self.assertNotEqual(node_info_dict, False,
+ "Heketi node info for %s failed" % node_id)
+ for gluster_server in self.gluster_servers:
+ gluster_server_ip = (
+ self.gluster_servers_info[gluster_server]["storage"])
+ node_ip = node_info_dict["hostnames"]["storage"][0]
+
+ if gluster_server_ip == node_ip:
+ addition_status = (
+ heketi_ops.heketi_device_add(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ self.gluster_servers_info[gluster_server]
+ ["additional_devices"][0], node_id))
+
+ self.assertNotEqual(addition_status, False,
+ "Addition of device %s failed"
+ % self.gluster_servers_info
+ [gluster_server]
+ ["additional_devices"][0])
+
+ additional_devices_attached.update({node_id:
+ self.gluster_servers_info
+ [gluster_server]
+ ["additional_devices"][0]})
+
+ additional_devices_ids = []
+ for node_id in additional_devices_attached.keys():
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+
+ for device in node_info["devices"]:
+ if device["name"] == additional_devices_attached[node_id]:
+ additional_devices_ids.append(device["id"])
+
+ self.addCleanup(self.detach_devices_attached,
+ additional_devices_ids)
+
+ for node_id in additional_devices_attached.keys():
+ flag_device_added = False
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ for device in node_info["devices"]:
+ if device["name"] == additional_devices_attached[node_id]:
+ flag_device_added = True
+
+ self.assertTrue(flag_device_added)
+
+ self.disable_devices(additional_devices_attached)
+
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 675, json=True)
+
+ self.assertNotEqual(creation_info, False, "Volume creation failed")
+
+ volume_name = creation_info["name"]
+ volume_id = creation_info["id"]
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ heketi_vol_info_size_before_expansion = (
+ volume_info_before_expansion["size"])
+
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+
+ free_space_after_creation = self.get_devices_summary_free_space()
+
+ ret, out, err = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, 50, raw_cli_output=True)
+
+ emsg = "Error: Maximum number of bricks reached."
+
+ self.assertEqual(emsg, err.strip(),
+ "Expansion failed with invalid reason")
+
+ if ret == 0:
+ out_json = json.loads(out)
+ self.addCleanup(self.delete_volumes, out_json["id"])
+
+ self.enable_devices(additional_devices_attached)
+
+ expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, 50, json=True)
+
+ self.assertNotEqual(expansion_info, False,
+ "Volume %s could not be expanded" % volume_id)
+
+ free_space_after_expansion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_creation > free_space_after_expansion,
+ "Free space not consumed after expansion of %s" % volume_id)
+
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+
+ self.get_brick_and_volume_status(volume_name)
+
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(
+ volume_info_after_expansion, False,
+ "Heketi volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_after_expansion = (
+ volume_info_after_expansion["size"])
+
+ difference_size_after_expansion = (
+ heketi_vol_info_size_after_expansion -
+ heketi_vol_info_size_before_expansion)
+
+ self.assertTrue(difference_size_after_expansion > 0,
+ "Size of %s not increased" % volume_id)
+
+ num_of_bricks_added_after_expansion = (num_of_bricks_after_expansion -
+ num_of_bricks_before_expansion)
+
+ self.assertEqual(num_of_bricks_added_after_expansion, 3)
+
+ deletion_info = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url, volume_id,
+ json=True)
+
+ self.assertNotEqual(deletion_info, False,
+ "Deletion of %s not successful" % volume_id)
+
+ free_space_after_deletion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_deletion > free_space_after_expansion,
+ "Free space not reclaimed after deletion of volume %s" % volume_id)
+
+ @podcmd.GlustoPod()
+ def test_volume_expansion_rebalance_brick(self):
+ """
+ To test volume expansion with brick and rebalance
+ validation
+ """
+ creation_info = heketi_ops.heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, 10, json=True)
+
+ self.assertNotEqual(creation_info, False, "Volume creation failed")
+
+ volume_name = creation_info["name"]
+ volume_id = creation_info["id"]
+
+ free_space_after_creation = self.get_devices_summary_free_space()
+
+ volume_info_before_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_info_before_expansion, False,
+ "Volume info for %s failed" % volume_id)
+
+ heketi_vol_info_size_before_expansion = (
+ volume_info_before_expansion["size"])
+
+ self.get_brick_and_volume_status(volume_name)
+ num_of_bricks_before_expansion = self.get_num_of_bricks(volume_name)
+
+ expansion_info = heketi_ops.heketi_volume_expand(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, 5)
+
+ self.assertNotEqual(expansion_info, False,
+ "Volume expansion of %s failed" % volume_id)
+
+ free_space_after_expansion = self.get_devices_summary_free_space()
+ self.assertTrue(
+ free_space_after_creation > free_space_after_expansion,
+ "Free space not consumed after expansion of %s" % volume_id)
+
+ volume_info_after_expansion = heketi_ops.heketi_volume_info(
+ self.heketi_client_node,
+ self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(volume_info_after_expansion, False,
+ "Volume info failed for %s" % volume_id)
+
+ heketi_vol_info_size_after_expansion = (
+ volume_info_after_expansion["size"])
+
+ difference_size = (heketi_vol_info_size_after_expansion -
+ heketi_vol_info_size_before_expansion)
+
+ self.assertTrue(
+ difference_size > 0,
+ "Size not increased after expansion of %s" % volume_id)
+
+ self.get_brick_and_volume_status(volume_name)
+ num_of_bricks_after_expansion = self.get_num_of_bricks(volume_name)
+
+ num_of_bricks_added = (num_of_bricks_after_expansion -
+ num_of_bricks_before_expansion)
+
+ self.assertEqual(
+ num_of_bricks_added, 3,
+ "Number of bricks added is not 3 for %s" % volume_id)
+
+ self.get_rebalance_status(volume_name)
+
+ deletion_info = heketi_ops.heketi_volume_delete(
+ self.heketi_client_node, self.heketi_server_url,
+ volume_id, json=True)
+
+ self.assertNotEqual(deletion_info, False,
+ "Deletion of volume %s failed" % volume_id)
+
+ free_space_after_deletion = self.get_devices_summary_free_space()
+
+ self.assertTrue(
+ free_space_after_deletion > free_space_after_expansion,
+ "Free space is not reclaimed after volume deletion of %s"
+ % volume_id)
+
diff --git a/tests/functional/common/heketi/test_volume_multi_req.py b/tests/functional/common/heketi/test_volume_multi_req.py
new file mode 100644
index 00000000..fbf95086
--- /dev/null
+++ b/tests/functional/common/heketi/test_volume_multi_req.py
@@ -0,0 +1,371 @@
+"""Test cases that create and delete multiple volumes.
+"""
+
+import contextlib
+import threading
+import time
+
+import ddt
+import yaml
+
+from glusto.core import Glusto as g
+
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_ops import (
+ heketi_volume_list)
+from cnslibs.common.naming import (
+ make_unique_label, extract_method_name)
+from cnslibs.common.openshift_ops import (
+ oc_create, oc_delete, oc_get_pvc, oc_get_pv, oc_get_all_pvs)
+from cnslibs.common.waiter import Waiter
+
+
+def build_storage_class(name, resturl, restuser='foo', restuserkey='foo'):
+ """Build s simple structure for a storage class.
+ """
+ return {
+ 'apiVersion': 'storage.k8s.io/v1beta1',
+ 'kind': 'StorageClass',
+ 'provisioner': 'kubernetes.io/glusterfs',
+ 'metadata': {
+ 'name': name,
+ },
+ 'parameters': {
+ 'resturl': resturl,
+ 'restuser': restuser,
+ 'restuserkey': restuserkey,
+ }
+ }
+
+
+def build_pvc(name, storageclass, size, accessmodes=None):
+ """Build a simple structture for a PVC defintion.
+ """
+ annotations = {
+ 'volume.beta.kubernetes.io/storage-class': storageclass,
+ }
+ accessmodes = accessmodes if accessmodes else ['ReadWriteOnce']
+ if not isinstance(size, str):
+ size = '%dGi' % size
+ return {
+ 'apiVersion': 'v1',
+ 'kind': 'PersistentVolumeClaim',
+ 'metadata': {
+ 'name': name,
+ 'annotations': annotations,
+ },
+ 'spec': {
+ 'accessModes': accessmodes,
+ 'resources': {
+ 'requests': {'storage': size},
+ }
+ }
+ }
+
+
+@contextlib.contextmanager
+def temp_config(ocp_node, cfg):
+ """Context manager to help define YAML files on the remote node
+ that can be in turn fed to 'oc create'. Must be used as a context
+ manager (with-statement).
+
+ Example:
+ >>> d = {'foo': True, 'bar': 22, 'baz': [1, 5, 9]}
+ >>> with temp_config(node, d) as fpath:
+ ... func_that_takes_a_path(fpath)
+
+ Here, the data dictionary `d` is serialized to YAML and written
+ to a temporary file at `fpath`. Then, `fpath` can be used by
+ a function that takes a file path. When the context manager exits
+ the temporary file is automatically cleaned up.
+
+ Args:
+ ocp_node (str): The node to create the temp file on.
+ cfg (dict): A data structure to be converted to YAML and
+ saved in a tempfile on the node.
+ Returns:
+ str: A path to a temporary file.
+ """
+ conn = g.rpyc_get_connection(ocp_node, user="root")
+ tmp = conn.modules.tempfile.NamedTemporaryFile()
+ try:
+ tmp.write(yaml.safe_dump(cfg))
+ tmp.flush()
+ filename = tmp.name
+ yield filename
+ finally:
+ tmp.close()
+
+
+def wait_for_claim(ocp_node, pvc_name, timeout=60, interval=2):
+ """Wait for a claim to be created & bound up to the given timeout.
+ """
+ for w in Waiter(timeout, interval):
+ sts = oc_get_pvc(ocp_node, pvc_name)
+ if sts and sts.get('status', {}).get('phase') == 'Bound':
+ return sts
+ raise AssertionError('wait_for_claim on pvc %s timed out'
+ % (pvc_name,))
+
+
+def wait_for_sc_unused(ocp_node, sc_name, timeout=60, interval=1):
+ for w in Waiter(timeout, interval):
+ sts = oc_get_all_pvs(ocp_node)
+ items = (sts and sts.get('items')) or []
+ if not any(i.get('spec', {}).get('storageClassName') == sc_name
+ for i in items):
+ return
+ raise AssertionError('wait_for_sc_unused on %s timed out'
+ % (sc_name,))
+
+
+def delete_storageclass(ocp_node, sc_name, timeout=60):
+ wait_for_sc_unused(ocp_node, sc_name, timeout)
+ oc_delete(ocp_node, 'storageclass', sc_name)
+
+
+class ClaimInfo(object):
+ """Helper class to organize data as we go from PVC to PV to
+ volume w/in heketi.
+ """
+ pvc_name = None
+ vol_name = None
+ vol_uuid = None
+ sc_name = None
+ req = None
+ info = None
+ pv_info = None
+
+ def __init__(self, name, storageclass, size):
+ self.pvc_name = name
+ self.req = build_pvc(
+ name=self.pvc_name,
+ storageclass=storageclass,
+ size=size)
+
+ def create_pvc(self, ocp_node):
+ assert self.req
+ with temp_config(ocp_node, self.req) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+
+ def update_pvc_info(self, ocp_node, timeout=60):
+ self.info = wait_for_claim(ocp_node, self.pvc_name, timeout)
+
+ def delete_pvc(self, ocp_node):
+ oc_delete(ocp_node, 'pvc', self.pvc_name)
+
+ def update_pv_info(self, ocp_node):
+ self.pv_info = oc_get_pv(ocp_node, self.volumeName)
+
+ @property
+ def volumeName(self):
+ return self.info.get('spec', {}).get('volumeName')
+
+ @property
+ def heketiVolumeName(self):
+ return self.pv_info.get('spec', {}).get('glusterfs', {}).get('path')
+
+
+def _heketi_vols(ocp_node, url):
+ # Unfortunately, getting json from heketi-cli only gets the ids
+ # To get a mapping of ids & volume names without a lot of
+ # back and forth between the test and the ocp_node we end up having
+ # to scrape the output of 'volume list'
+ # TODO: This probably should be made into a utility function
+ out = heketi_volume_list(ocp_node, url, json=False)
+ res = []
+ for line in out.splitlines():
+ if not line.startswith('Id:'):
+ continue
+ row = {}
+ for section in line.split():
+ if ':' in section:
+ key, value = section.split(':', 1)
+ row[key.lower()] = value.strip()
+ res.append(row)
+ return res
+
+
+def _heketi_name_id_map(vols):
+ return {vol['name']: vol['id'] for vol in vols}
+
+
+@ddt.ddt
+class TestVolumeMultiReq(HeketiClientSetupBaseClass):
+ def setUp(self):
+ super(TestVolumeMultiReq, self).setUp()
+ self.volcount = self._count_vols()
+
+ def wait_to_settle(self, timeout=120, interval=1):
+ # This was originally going to be a tearDown, but oddly enough
+ # tearDown is called *before* the cleanup functions, so it
+ # could never succeed. This needs to be added as a cleanup
+ # function first so that we run after our test's other cleanup
+ # functions but before we go on to the next test in order
+ # to prevent the async cleanups in kubernetes from steping
+ # on the next test's "toes".
+ for w in Waiter(timeout):
+ nvols = self._count_vols()
+ if nvols == self.volcount:
+ return
+ raise AssertionError(
+ 'wait for volume count to settle timed out')
+
+ def _count_vols(self):
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ return len(_heketi_vols(ocp_node, self.heketi_server_url))
+
+ def test_simple_serial_vol_create(self):
+ """Test that serially creating PVCs causes heketi to add volumes.
+ """
+ self.addCleanup(self.wait_to_settle)
+ # TODO A nice thing to add to this test would be to also verify
+ # the gluster volumes also exist.
+ tname = make_unique_label(extract_method_name(self.id()))
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ # deploy a temporary storage class
+ sc = build_storage_class(
+ name=tname,
+ resturl=self.heketi_server_url)
+ with temp_config(ocp_node, sc) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+ self.addCleanup(delete_storageclass, ocp_node, tname)
+ orig_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+
+ # deploy a persistent volume claim
+ c1 = ClaimInfo(
+ name='-'.join((tname, 'pvc1')),
+ storageclass=tname,
+ size=2)
+ c1.create_pvc(ocp_node)
+ self.addCleanup(c1.delete_pvc, ocp_node)
+ c1.update_pvc_info(ocp_node)
+ # verify volume exists
+ self.assertTrue(c1.volumeName)
+ c1.update_pv_info(ocp_node)
+ self.assertTrue(c1.heketiVolumeName)
+
+ # verify this is a new volume to heketi
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ self.assertEqual(len(orig_vols) + 1, len(now_vols))
+ self.assertIn(c1.heketiVolumeName, now_vols)
+ self.assertNotIn(c1.heketiVolumeName, orig_vols)
+
+ # deploy a 2nd pvc
+ c2 = ClaimInfo(
+ name='-'.join((tname, 'pvc2')),
+ storageclass=tname,
+ size=2)
+ c2.create_pvc(ocp_node)
+ self.addCleanup(c2.delete_pvc, ocp_node)
+ c2.update_pvc_info(ocp_node)
+ # verify volume exists
+ self.assertTrue(c2.volumeName)
+ c2.update_pv_info(ocp_node)
+ self.assertTrue(c2.heketiVolumeName)
+
+ # verify this is a new volume to heketi
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ self.assertEqual(len(orig_vols) + 2, len(now_vols))
+ self.assertIn(c2.heketiVolumeName, now_vols)
+ self.assertNotIn(c2.heketiVolumeName, orig_vols)
+
+ def test_multiple_vol_create(self):
+ """Test creating two volumes via PVCs with no waiting between
+ the PVC requests.
+
+ We do wait after all the PVCs are submitted to get statuses.
+ """
+ self.addCleanup(self.wait_to_settle)
+ tname = make_unique_label(extract_method_name(self.id()))
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ # deploy a temporary storage class
+ sc = build_storage_class(
+ name=tname,
+ resturl=self.heketi_server_url)
+ with temp_config(ocp_node, sc) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+ self.addCleanup(delete_storageclass, ocp_node, tname)
+
+ # deploy two persistent volume claims
+ c1 = ClaimInfo(
+ name='-'.join((tname, 'pvc1')),
+ storageclass=tname,
+ size=2)
+ c1.create_pvc(ocp_node)
+ self.addCleanup(c1.delete_pvc, ocp_node)
+ c2 = ClaimInfo(
+ name='-'.join((tname, 'pvc2')),
+ storageclass=tname,
+ size=2)
+ c2.create_pvc(ocp_node)
+ self.addCleanup(c2.delete_pvc, ocp_node)
+
+ # wait for pvcs/volumes to complete
+ c1.update_pvc_info(ocp_node)
+ c2.update_pvc_info(ocp_node)
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+
+ # verify first volume exists
+ self.assertTrue(c1.volumeName)
+ c1.update_pv_info(ocp_node)
+ self.assertTrue(c1.heketiVolumeName)
+ # verify this volume in heketi
+ self.assertIn(c1.heketiVolumeName, now_vols)
+
+ # verify second volume exists
+ self.assertTrue(c2.volumeName)
+ c2.update_pv_info(ocp_node)
+ self.assertTrue(c2.heketiVolumeName)
+ # verify this volume in heketi
+ self.assertIn(c2.heketiVolumeName, now_vols)
+
+ # NOTE(jjm): I've noticed that on the system I'm using (RHEL7).
+ # with count=8 things start to back up a bit.
+ # I needed to increase some timeouts to get this to pass.
+ @ddt.data(2, 4, 8)
+ def test_threaded_multi_request(self, count):
+ """Test creating volumes via PVCs where the pvc create
+ commands are launched in parallell via threads.
+ """
+ self.addCleanup(self.wait_to_settle)
+ tname = make_unique_label(extract_method_name(self.id()))
+ ocp_node = g.config['ocp_servers']['master'].keys()[0]
+ # deploy a temporary storage class
+ sc = build_storage_class(
+ name=tname,
+ resturl=self.heketi_server_url)
+ with temp_config(ocp_node, sc) as tmpfn:
+ oc_create(ocp_node, tmpfn)
+ self.addCleanup(delete_storageclass, ocp_node, tname)
+
+ # prepare the persistent volume claims
+ claims = [
+ ClaimInfo(name='-'.join((tname, ('pvc%d' % n))),
+ storageclass=tname,
+ size=2)
+ for n in range(count)]
+
+ # create a "bunch" of pvc all at once
+ def create(ci):
+ ci.create_pvc(ocp_node)
+ self.addCleanup(ci.delete_pvc, ocp_node)
+ threads = [
+ threading.Thread(target=create, args=[c])
+ for c in claims]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ for c in claims:
+ c.update_pvc_info(ocp_node, timeout=120)
+ now_vols = _heketi_name_id_map(
+ _heketi_vols(ocp_node, self.heketi_server_url))
+ for c in claims:
+ c.update_pv_info(ocp_node)
+ self.assertIn(c.heketiVolumeName, now_vols)