summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_check_entry.py24
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py34
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py59
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py140
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_node_info.py18
-rw-r--r--tests/functional/common/heketi/test_volume_deletion.py7
-rw-r--r--tests/functional/common/heketi/test_volume_expansion_and_devices.py3
-rw-r--r--tests/functional/common/heketi/test_volume_multi_req.py2
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py42
-rw-r--r--tests/functional/common/provisioning/test_pv_resize.py129
10 files changed, 400 insertions, 58 deletions
diff --git a/tests/functional/common/heketi/heketi_tests/test_check_entry.py b/tests/functional/common/heketi/heketi_tests/test_check_entry.py
index 16fbe085..47a0b3f2 100644
--- a/tests/functional/common/heketi/heketi_tests/test_check_entry.py
+++ b/tests/functional/common/heketi/heketi_tests/test_check_entry.py
@@ -1,13 +1,12 @@
-#!/usr/bin/python
-
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ConfigError
+
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
from cnslibs.common.heketi_ops import (heketi_volume_create,
heketi_volume_list,
heketi_volume_delete)
-from cnslibs.common import heketi_ops, podcmd
-from cnslibs.common.openshift_ops import oc_rsh, get_ocp_gluster_pod_names
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
+from cnslibs.common import podcmd
class TestHeketiVolume(HeketiClientSetupBaseClass):
@@ -51,11 +50,13 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
gluster_pod = get_ocp_gluster_pod_names(
self.heketi_client_node)[1]
- cmd = "oc rsync "+ gluster_pod +":/var/lib/heketi/fstab /tmp"
+ cmd = "oc rsync " + gluster_pod + ":/var/lib/heketi/fstab /tmp"
out = g.run(self.heketi_client_node, cmd)
self.assertTrue(out, ("Failed to copy the file"))
g.log.info("Copied the file")
- out = g.run_local("scp -r root@" +self.heketi_client_node+":/tmp/fstab /tmp/file.txt")
+ out = g.run_local(
+ "scp -r root@%s:/tmp/fstab "
+ "/tmp/file.txt" % self.heketi_client_node)
self.assertTrue(out, ("Failed to copy a file to /tmp/file.txt"))
g.log.info("Successfully copied to /tmp/file.txt")
out = g.run_local("ls /tmp")
@@ -67,7 +68,8 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# Check if the brick is mounted
for i in path:
string_to_search = i
- rcode, rout, rerr = g.run_local('grep %s %s' % (string_to_search, "/tmp/file.txt"))
+ rcode, rout, rerr = g.run_local(
+ 'grep %s %s' % (string_to_search, "/tmp/file.txt"))
if rcode == 0:
g.log.info("Brick %s is mounted" % i)
datafile.close()
@@ -99,11 +101,12 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
gluster_pod = get_ocp_gluster_pod_names(
self.heketi_client_node)[0]
- cmd = "oc rsync "+ gluster_pod +":/var/lib/heketi/fstab /"
+ cmd = "oc rsync " + gluster_pod + ":/var/lib/heketi/fstab /"
out = g.run(self.heketi_client_node, cmd)
self.assertTrue(out, ("Failed to copy the file"))
g.log.info("Copied the file")
- out = g.run_local("scp -r root@" +self.heketi_client_node+":/fstab /tmp/newfile.txt")
+ out = g.run_local(
+ "scp -r root@%s:/fstab /tmp/newfile.txt" % self.heketi_client_node)
self.assertTrue(out, ("Failed to copy to the file newfile.txt"))
g.log.info("Successfully copied to the file newfile.txt")
out = g.run_local("ls /tmp")
@@ -115,7 +118,8 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# Check if the brick is mounted
for i in path:
string_to_search = i
- rcode, rout, rerr = g.run_local('grep %s %s' % (string_to_search, "/tmp/newfile.txt"))
+ rcode, rout, rerr = g.run_local(
+ 'grep %s %s' % (string_to_search, "/tmp/newfile.txt"))
if rcode == 0:
raise ConfigError("Particular %s brick entry is found" % i)
datafile.close()
diff --git a/tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py b/tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py
index d871be30..29b39513 100644
--- a/tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py
+++ b/tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py
@@ -1,21 +1,19 @@
-#!/usr/bin/python
-
from __future__ import division
import math
from glusto.core import Glusto as g
-from glustolibs.gluster.exceptions import ConfigError
from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
+
+from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
from cnslibs.common.heketi_ops import (heketi_node_list,
heketi_node_info,
heketi_volume_create,
heketi_volume_list,
heketi_volume_info,
- heketi_volume_delete,
- heketi_topology_info)
-from cnslibs.common import heketi_ops, podcmd
-from cnslibs.common.openshift_ops import oc_rsh, get_ocp_gluster_pod_names
+ heketi_volume_delete)
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
+from cnslibs.common import podcmd
class TestHeketiVolume(HeketiClientSetupBaseClass):
@@ -25,7 +23,6 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
Get free space in each devices
"""
free_spaces = []
- device_list = []
heketi_node_id_list = heketi_node_list(
self.heketi_client_node, self.heketi_server_url)
for node_id in heketi_node_id_list:
@@ -36,10 +33,7 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
for device in node_info_dict["devices"]:
total_free_space += device["storage"]["free"]
free_spaces.append(total_free_space)
- min_free_space = min(free_spaces)
total_free_space = sum(free_spaces)/(1024**2)
- optimum_space = min_free_space / (1024 * 1024 * 10)
- free_space = int(math.floor(optimum_space))
total_free_space = int(math.floor(total_free_space))
return total_free_space, free_spaces
@@ -70,7 +64,9 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
mount_node = (out["mount"]["glusterfs"]
["device"].strip().split(":")[0])
hosts.append(mount_node)
- backup_volfile_server_list = (out["mount"]["glusterfs"]["options"] ["backup-volfile-servers"].strip().split(","))
+ backup_volfile_server_list = (
+ out["mount"]["glusterfs"]["options"][
+ "backup-volfile-servers"].strip().split(","))
for backup_volfile_server in backup_volfile_server_list:
hosts.append(backup_volfile_server)
for gluster_server in g.config["gluster_servers"].keys():
@@ -81,9 +77,9 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# Retrieve heketi volume info
g.log.info("Retrieving heketi volume info")
- out = heketi_ops.heketi_volume_info(self.heketi_client_node,
- self.heketi_server_url,
- volume_id, json=True)
+ out = heketi_volume_info(
+ self.heketi_client_node, self.heketi_server_url, volume_id,
+ json=True)
self.assertTrue(out, ("Failed to get heketi volume info"))
g.log.info("Successfully got the heketi volume info")
name = out["name"]
@@ -187,9 +183,11 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# Compare the free size before and after deleting volume
g.log.info("Comparing the free space before and after"
" deleting volume")
- self.assertTrue(free_space_after_creating_vol < free_space_after_deleting_vol)
+ self.assertTrue(
+ free_space_after_creating_vol < free_space_after_deleting_vol)
g.log.info("Volume successfully deleted and space is"
" reallocated. Free space after creating"
" volume %s, Free space after deleting"
- " volume %s" % ((free_space_after_creating_vol),
- (free_space_after_deleting_vol)))
+ " volume %s" % (
+ free_space_after_creating_vol,
+ free_space_after_deleting_vol))
diff --git a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
index 38f6c4e9..c28f455b 100644
--- a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
+++ b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
@@ -1,19 +1,21 @@
-#!/usr/bin/python
-
from glustolibs.gluster.exceptions import ExecutionError, ConfigError
from glusto.core import Glusto as g
from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
+
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
from cnslibs.common.heketi_ops import (heketi_volume_create,
heketi_volume_list,
heketi_volume_info,
- heketi_volume_delete,
+ heketi_blockvolume_create,
+ heketi_blockvolume_delete,
heketi_cluster_list,
heketi_cluster_delete,
+ heketi_node_info,
heketi_node_list,
heketi_node_delete)
-from cnslibs.common import heketi_ops, podcmd
-from cnslibs.common.openshift_ops import oc_rsh, get_ocp_gluster_pod_names
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
+from cnslibs.common import podcmd
+
class TestHeketiVolume(HeketiClientSetupBaseClass):
"""
@@ -39,7 +41,6 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
g.log.info("Heketi volume successfully created" % out)
volume_id = out["bricks"][0]["volume"]
self.addCleanup(self.delete_volumes, volume_id)
- name = out["name"]
g.log.info("List heketi volumes")
volumes = heketi_volume_list(self.heketi_client_node,
@@ -85,9 +86,9 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
self.addCleanup(self.delete_volumes, volume_id)
g.log.info("Retrieving heketi volume info")
- out = heketi_ops.heketi_volume_info(self.heketi_client_node,
- self.heketi_server_url,
- volume_id, json=True)
+ out = heketi_volume_info(
+ self.heketi_client_node, self.heketi_server_url, volume_id,
+ json=True)
self.assertTrue(out, ("Failed to get heketi volume info"))
g.log.info("Successfully got the heketi volume info")
name = out["name"]
@@ -113,7 +114,7 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
volumes = heketi_volume_list(self.heketi_client_node,
self.heketi_server_url,
json=True)
- if (len(volumes["volumes"])== 0):
+ if (len(volumes["volumes"]) == 0):
g.log.info("Creating heketi volume")
out = heketi_volume_create(self.heketi_client_node,
self.heketi_server_url,
@@ -168,7 +169,7 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
g.log.info("Successfully got the list of nodes")
for node_id in heketi_node_id_list:
g.log.info("Retrieve the node info")
- node_info_dict = heketi_ops.heketi_node_info(
+ node_info_dict = heketi_node_info(
self.heketi_client_node, self.heketi_server_url,
node_id, json=True)
if not(node_info_dict["devices"][1]["storage"]["used"]):
@@ -196,3 +197,39 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
self.heketi_server_url)
self.assertTrue(node_list, ("Failed to list heketi nodes"))
g.log.info("Successfully got the list of nodes")
+
+ def test_blockvolume_create_no_free_space(self):
+ """Test case CNS-550"""
+
+ # Create first small blockvolume
+ blockvol1 = heketi_blockvolume_create(
+ self.heketi_client_node, self.heketi_server_url, 1, json=True)
+ self.assertTrue(blockvol1, "Failed to create block volume.")
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, blockvol1['id'])
+
+ # Get info about block hosting volume available space
+ file_volumes = heketi_volume_list(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+ self.assertTrue(file_volumes)
+ max_freesize = 0
+ for vol_id in file_volumes["volumes"]:
+ vol = heketi_volume_info(
+ self.heketi_client_node, self.heketi_server_url,
+ vol_id, json=True)
+ current_freesize = vol.get("blockinfo", {}).get("freesize", 0)
+ if current_freesize > max_freesize:
+ max_freesize = current_freesize
+ self.assertGreater(max_freesize, 0)
+
+ # Try to create blockvolume with size bigger than available
+ too_big_vol_size = max_freesize + 1
+ blockvol2 = heketi_blockvolume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ too_big_vol_size, json=True)
+ if blockvol2 and blockvol2.get('id'):
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, blockvol2['id'])
+ self.assertFalse(blockvol2, 'Volume unexpectedly was created')
diff --git a/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py b/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
new file mode 100644
index 00000000..bce565c4
--- /dev/null
+++ b/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
@@ -0,0 +1,140 @@
+"""Test cases to disable and enable node in heketi."""
+import json
+
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_ops import (heketi_node_enable,
+ heketi_node_info,
+ heketi_node_disable,
+ heketi_node_list,
+ heketi_volume_create)
+from glusto.core import Glusto as g
+
+
+class TestHeketiNodeState(HeketiClientSetupBaseClass):
+ """Test node enable and disable functionality."""
+
+ def enable_node(self, node_id):
+ """
+ Enable node through heketi-cli.
+
+ :param node_id: str node ID
+ """
+ out = heketi_node_enable(self.heketi_client_node,
+ self.heketi_server_url,
+ node_id)
+
+ self.assertNotEqual(out, False,
+ "Failed to enable node of"
+ " id %s" % node_id)
+
+ def disable_node(self, node_id):
+ """
+ Disable node through heketi-cli.
+
+ :param node_id: str node ID
+ """
+ out = heketi_node_disable(self.heketi_client_node,
+ self.heketi_server_url,
+ node_id)
+
+ self.assertNotEqual(out, False,
+ "Failed to disable node of"
+ " id %s" % node_id)
+
+ def get_node_info(self, node_id):
+ """
+ Get node information from node_id.
+
+ :param node_id: str node ID
+ :return node_info: list node information
+ """
+ node_info = heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ self.assertNotEqual(node_info, False,
+ "Node info on %s failed" % node_id)
+ return node_info
+
+ def get_online_nodes(self, node_list):
+ """
+ Get online nodes information from node_list.
+
+ :param node_list: list of node ID's
+ :return: list node information of online nodes
+ """
+ online_hosts_info = []
+
+ for node in node_list:
+ node_info = self.get_node_info(node)
+ if node_info["state"] == "online":
+ online_hosts_info.append(node_info)
+
+ return online_hosts_info
+
+ def test_node_state(self):
+ """
+ Test node enable and disable functionality.
+
+ If we have 4 gluster servers, if we disable 1/4 nodes from heketi
+ and create a volume, the volume creation should be successful.
+
+ If we disable 2/4 nodes from heketi-cli and create a volume
+ the volume creation should fail.
+
+ If we enable back one gluster server and create a volume
+ the volume creation should be successful.
+ """
+ g.log.info("Disable node in heketi")
+ node_list = heketi_node_list(self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertTrue(node_list, "Failed to list heketi nodes")
+ g.log.info("Successfully got the list of nodes")
+ online_hosts = self.get_online_nodes(node_list)
+
+ if len(online_hosts) < 3:
+ raise self.skipTest(
+ "This test can run only if online hosts are more "
+ "than 2")
+ # if we have n nodes, disable n-3 nodes
+ for node_info in online_hosts[3:]:
+ node_id = node_info["id"]
+ g.log.info("going to disable node id %s", node_id)
+ self.disable_node(node_id)
+ self.addCleanup(self.enable_node, node_id)
+
+ vol_size = 1
+ # create volume when 3 nodes are online
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.assertTrue(vol_info, (
+ "Failed to create heketi volume of size %d" % vol_size))
+ self.addCleanup(self.delete_volumes, vol_info['id'])
+
+ node_id = online_hosts[0]['id']
+ g.log.info("going to disable node id %s", node_id)
+ self.disable_node(node_id)
+ self.addCleanup(self.enable_node, node_id)
+
+ # try to create a volume, volume creation should fail
+ ret, out, err = heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ vol_size, raw_cli_output=True)
+ if ret == 0:
+ out_json = json.loads(out)
+ self.addCleanup(self.delete_volumes, out_json["id"])
+ self.assertNotEqual(ret, 0,
+ ("Volume creation did not fail ret- %s "
+ "out- %s err- %s" % (ret, out, err)))
+
+ g.log.info("Volume creation failed as expected, err- %s", err)
+ # enable node
+ self.enable_node(node_id)
+
+ # create volume when node is enabled
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.assertTrue(vol_info, (
+ "Failed to create heketi volume of size %d" % vol_size))
+ self.addCleanup(self.delete_volumes, vol_info['id'])
diff --git a/tests/functional/common/heketi/heketi_tests/test_node_info.py b/tests/functional/common/heketi/heketi_tests/test_node_info.py
index 81462906..26ac56f7 100644
--- a/tests/functional/common/heketi/heketi_tests/test_node_info.py
+++ b/tests/functional/common/heketi/heketi_tests/test_node_info.py
@@ -1,13 +1,10 @@
-#!/usr/bin/python
-
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.peer_ops import (get_pool_list)
+from glustolibs.gluster.peer_ops import get_pool_list
+
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
-from cnslibs.common.heketi_ops import (heketi_node_info,
- heketi_node_list)
from cnslibs.common import heketi_ops, podcmd
-from cnslibs.common.openshift_ops import oc_rsh, get_ocp_gluster_pod_names
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
class TestHeketiVolume(HeketiClientSetupBaseClass):
@@ -25,7 +22,7 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# List all list
ip = []
g.log.info("Listing the node id")
- heketi_node_id_list = heketi_node_list(
+ heketi_node_id_list = heketi_ops.heketi_node_list(
self.heketi_client_node, self.heketi_server_url)
g.log.info("Successfully listed the node")
@@ -61,8 +58,9 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
hostname.append(pool["hostname"])
if (len(heketi_node_id_list) != len(list_of_pools)):
- raise ExecutionError("Heketi volume list %s is not equal"
- " to gluster volume list %s" % ((ip), (hostname)))
+ raise ExecutionError(
+ "Heketi volume list %s is not equal "
+ "to gluster volume list %s" % ((ip), (hostname)))
g.log.info("The node IP's from node info and list"
" is : %s/n and pool list from gluster"
" pods/nodes is %s" % ((ip), (hostname)))
@@ -74,7 +72,7 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# List all list
g.log.info("Listing the node id")
- heketi_node_id_list = heketi_node_list(
+ heketi_node_id_list = heketi_ops.heketi_node_list(
self.heketi_client_node, self.heketi_server_url)
self.assertTrue(heketi_node_id_list, ("Node Id list is empty."))
g.log.info("Successfully listed the node")
diff --git a/tests/functional/common/heketi/test_volume_deletion.py b/tests/functional/common/heketi/test_volume_deletion.py
index be7b2e61..8b0adf98 100644
--- a/tests/functional/common/heketi/test_volume_deletion.py
+++ b/tests/functional/common/heketi/test_volume_deletion.py
@@ -1,10 +1,6 @@
from __future__ import division
-import math
-import unittest
-from glusto.core import Glusto as g
-
-from cnslibs.common.exceptions import ExecutionError, ConfigError
+from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
from cnslibs.common import heketi_ops
@@ -110,4 +106,3 @@ class TestVolumeDeleteTestCases(HeketiClientSetupBaseClass):
if not heketidbexists:
raise ExecutionError(
"Warning: heketidbstorage doesn't exist in list of volumes")
-
diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
index e1d912c3..17ed5d9d 100644
--- a/tests/functional/common/heketi/test_volume_expansion_and_devices.py
+++ b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
@@ -6,7 +6,7 @@ import unittest
from glusto.core import Glusto as g
from glustolibs.gluster import volume_ops, rebalance_ops
-from cnslibs.common.exceptions import ExecutionError, ConfigError
+from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
from cnslibs.common import heketi_ops, podcmd
@@ -696,4 +696,3 @@ class TestVolumeExpansionAndDevicesTestCases(HeketiClientSetupBaseClass):
free_space_after_deletion > free_space_after_expansion,
"Free space is not reclaimed after volume deletion of %s"
% volume_id)
-
diff --git a/tests/functional/common/heketi/test_volume_multi_req.py b/tests/functional/common/heketi/test_volume_multi_req.py
index 957476eb..244131e9 100644
--- a/tests/functional/common/heketi/test_volume_multi_req.py
+++ b/tests/functional/common/heketi/test_volume_multi_req.py
@@ -397,6 +397,7 @@ class TestVolumeMultiReq(HeketiClientSetupBaseClass):
# make this a condition
done = threading.Event()
short_tc_name = "volumes-concurrently"
+
def background_ops():
subname = make_unique_label(short_tc_name)
for i, w in enumerate(Waiter(60 * 60)):
@@ -414,6 +415,7 @@ class TestVolumeMultiReq(HeketiClientSetupBaseClass):
if done.is_set():
break
failures = []
+
def checked_background_ops():
try:
background_ops()
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
index 2e0268cd..76e1d317 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -65,7 +65,8 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
self.addCleanup(
wait_for_resource_absence, self.node, 'pvc', pvc_name)
for pvc_name in pvc_names:
- self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
+ self.addCleanup(oc_delete, self.node, 'pvc', pvc_name,
+ raise_on_absence=False)
# Wait for PVCs to be in bound state
for pvc_name in pvc_names:
@@ -262,3 +263,42 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
self.assertFalse(err, "Error output is not empty: \n%s" % err)
self.assertEqual(ret, 0, "Failed to exec '%s' command." % cmd)
self.assertTrue(out, "Command '%s' output is empty." % cmd)
+
+ def test_dynamic_provisioning_glusterblock_heketidown_pvc_delete(self):
+ """ Delete PVC's when heketi is down CNS-439 """
+
+ # Create storage class and secret objects
+ self._create_storage_class()
+
+ self.pvc_name_list = self._create_and_wait_for_pvcs(
+ 1, 'pvc-heketi-down', 3)
+
+ # remove heketi-pod
+ scale_dc_pod_amount_and_wait(self.ocp_client[0],
+ self.heketi_dc_name,
+ 0,
+ self.cns_project_name)
+ try:
+ # delete pvc
+ for pvc in self.pvc_name_list:
+ oc_delete(self.ocp_client[0], 'pvc', pvc)
+ for pvc in self.pvc_name_list:
+ with self.assertRaises(ExecutionError):
+ wait_for_resource_absence(
+ self.ocp_client[0], 'pvc', pvc,
+ interval=3, timeout=30)
+ finally:
+ # bring back heketi-pod
+ scale_dc_pod_amount_and_wait(self.ocp_client[0],
+ self.heketi_dc_name,
+ 1,
+ self.cns_project_name)
+
+ # verify PVC's are deleted
+ for pvc in self.pvc_name_list:
+ wait_for_resource_absence(self.ocp_client[0], 'pvc',
+ pvc,
+ interval=1, timeout=120)
+
+ # create a new PVC
+ self._create_and_wait_for_pvc()
diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py
new file mode 100644
index 00000000..1e92efe9
--- /dev/null
+++ b/tests/functional/common/provisioning/test_pv_resize.py
@@ -0,0 +1,129 @@
+import ddt
+from cnslibs.common.cns_libs import (
+ enable_pvc_resize)
+from cnslibs.common.heketi_ops import (
+ verify_volume_name_prefix)
+from cnslibs.common.openshift_ops import (
+ resize_pvc,
+ get_pod_name_from_dc,
+ get_pv_name_from_pvc,
+ oc_create_app_dc_with_io,
+ oc_create_pvc,
+ oc_create_secret,
+ oc_create_sc,
+ oc_delete,
+ oc_rsh,
+ oc_version,
+ scale_dc_pod_amount_and_wait,
+ verify_pv_size,
+ verify_pvc_size,
+ verify_pvc_status_is_bound,
+ wait_for_pod_be_ready,
+ wait_for_resource_absence)
+from cnslibs.cns.cns_baseclass import CnsBaseClass
+from glusto.core import Glusto as g
+
+
+@ddt.ddt
+class TestPvResizeClass(CnsBaseClass):
+ '''
+ Class that contain test cases for
+ pv resize
+ '''
+ @classmethod
+ def setUpClass(cls):
+ super(TestPvResizeClass, cls).setUpClass()
+ version = oc_version(cls.ocp_master_node[0])
+ if any(v in version for v in ("3.6", "3.7", "3.8")):
+ return
+ enable_pvc_resize(cls.ocp_master_node[0])
+
+ def setUp(self):
+ super(TestPvResizeClass, self).setUp()
+ version = oc_version(self.ocp_master_node[0])
+ if any(v in version for v in ("3.6", "3.7", "3.8")):
+ msg = ("pv resize is not available in openshift "
+ "version %s " % version)
+ g.log.error(msg)
+ raise self.skipTest(msg)
+
+ def _create_storage_class(self, volname_prefix=False):
+ sc = self.cns_storage_class['storage_class1']
+ secret = self.cns_secret['secret1']
+
+ # create secret
+ self.secret_name = oc_create_secret(
+ self.ocp_master_node[0],
+ namespace=secret['namespace'],
+ data_key=self.heketi_cli_key,
+ secret_type=secret['type'])
+ self.addCleanup(
+ oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
+
+ # create storageclass
+ self.sc_name = oc_create_sc(
+ self.ocp_master_node[0], provisioner='kubernetes.io/glusterfs',
+ resturl=sc['resturl'], restuser=sc['restuser'],
+ secretnamespace=sc['secretnamespace'],
+ secretname=self.secret_name,
+ allow_volume_expansion=True,
+ **({"volumenameprefix": sc['volumenameprefix']}
+ if volname_prefix else {})
+ )
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'sc', self.sc_name)
+
+ return self.sc_name
+
+ @ddt.data(False, True)
+ def test_pv_resize_with_prefix_for_name(self, volname_prefix=False):
+ """testcases CNS-1037 and CNS-1038 """
+ dir_path = "/mnt/"
+ self._create_storage_class(volname_prefix)
+ node = self.ocp_master_node[0]
+
+ # Create PVC
+ pvc_name = oc_create_pvc(node, self.sc_name, pvc_size=1)
+ self.addCleanup(wait_for_resource_absence,
+ node, 'pvc', pvc_name)
+ self.addCleanup(oc_delete, node, 'pvc', pvc_name)
+ verify_pvc_status_is_bound(node, pvc_name)
+
+ # Create DC with POD and attached PVC to it.
+ dc_name = oc_create_app_dc_with_io(node, pvc_name)
+ self.addCleanup(oc_delete, node, 'dc', dc_name)
+ self.addCleanup(scale_dc_pod_amount_and_wait,
+ node, dc_name, 0)
+
+ pod_name = get_pod_name_from_dc(node, dc_name)
+ wait_for_pod_be_ready(node, pod_name)
+ if volname_prefix:
+ storage_class = self.cns_storage_class['storage_class1']
+ ret = verify_volume_name_prefix(node,
+ storage_class['volumenameprefix'],
+ storage_class['secretnamespace'],
+ pvc_name, self.heketi_server_url)
+ self.assertTrue(ret, "verify volnameprefix failed")
+ cmd = ("dd if=/dev/urandom of=%sfile "
+ "bs=100K count=1000") % dir_path
+ ret, out, err = oc_rsh(node, pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, node))
+ cmd = ("dd if=/dev/urandom of=%sfile2 "
+ "bs=100K count=10000") % dir_path
+ ret, out, err = oc_rsh(node, pod_name, cmd)
+ self.assertNotEqual(ret, 0, " This IO did not fail as expected "
+ "command %s on %s" % (cmd, node))
+ pvc_size = 2
+ resize_pvc(node, pvc_name, pvc_size)
+ verify_pvc_size(node, pvc_name, pvc_size)
+ pv_name = get_pv_name_from_pvc(node, pvc_name)
+ verify_pv_size(node, pv_name, pvc_size)
+ oc_delete(node, 'pod', pod_name)
+ wait_for_resource_absence(node, 'pod', pod_name)
+ pod_name = get_pod_name_from_dc(node, dc_name)
+ wait_for_pod_be_ready(node, pod_name)
+ cmd = ("dd if=/dev/urandom of=%sfile_new "
+ "bs=50K count=10000") % dir_path
+ ret, out, err = oc_rsh(node, pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, node))