summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/cns_tests_sample_config.yml11
-rw-r--r--tests/functional/common/gluster_block/test_restart_gluster_block.py4
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_disabling_device.py2
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py41
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_heketi_enable_device.py245
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py4
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_node_info.py4
-rw-r--r--tests/functional/common/heketi/test_block_volumes_heketi.py7
-rw-r--r--tests/functional/common/heketi/test_check_entries.py4
-rw-r--r--tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py4
-rw-r--r--tests/functional/common/heketi/test_device_info.py2
-rw-r--r--tests/functional/common/heketi/test_heketi_device_operations.py287
-rw-r--r--tests/functional/common/heketi/test_heketi_metrics.py7
-rw-r--r--tests/functional/common/heketi/test_heketi_volume_operations.py24
-rw-r--r--tests/functional/common/heketi/test_volume_creation.py2
-rw-r--r--tests/functional/common/heketi/test_volume_deletion.py4
-rw-r--r--tests/functional/common/heketi/test_volume_expansion_and_devices.py20
-rw-r--r--tests/functional/common/heketi/test_volume_multi_req.py4
-rw-r--r--tests/functional/common/provisioning/test_pv_resize.py5
-rw-r--r--tests/functional/common/provisioning/test_storage_class_cases.py6
-rw-r--r--tests/functional/common/test_heketi_restart.py4
-rw-r--r--tests/functional/common/test_node_restart.py150
22 files changed, 506 insertions, 335 deletions
diff --git a/tests/cns_tests_sample_config.yml b/tests/cns_tests_sample_config.yml
index 4e1c7919..8d4a73b1 100644
--- a/tests/cns_tests_sample_config.yml
+++ b/tests/cns_tests_sample_config.yml
@@ -38,19 +38,12 @@ gluster_servers:
cns:
setup:
- routing_config: "cloudapps.mystorage.com"
- insecure_registry: "INSECURE_REGISTRY='--insecure-registry registry.access.redhat.com"
- add_registry: "ADD_REGISTRY='--add-registry registry.access.redhat.com"
cns_project_name: "storage-project"
cns_username: "test-admin"
cns_password:
- # 'deployment_type' can be crs_heketi_outside_openshift|crs_heketi_inside_openshift|cns
- deployment_type:
- # 'executor' can be ssh|kube|mock
- executor: ssh
- executor_user: root
- executor_port: 22
+ # 'deployment_type' can be cns|crs
+ deployment_type: 'cns'
trusted_storage_pool_list:
- [gluster_server1, gluster_server2]
heketi_config:
diff --git a/tests/functional/common/gluster_block/test_restart_gluster_block.py b/tests/functional/common/gluster_block/test_restart_gluster_block.py
index c46460a0..aa2b1e62 100644
--- a/tests/functional/common/gluster_block/test_restart_gluster_block.py
+++ b/tests/functional/common/gluster_block/test_restart_gluster_block.py
@@ -1,4 +1,4 @@
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_libs import HeketiBaseClass
from cnslibs.common.heketi_ops import (
heketi_blockvolume_create,
heketi_blockvolume_delete)
@@ -9,7 +9,7 @@ from cnslibs.common.openshift_ops import (
wait_for_resource_absence)
-class TestRestartGlusterBlockPod(HeketiClientSetupBaseClass):
+class TestRestartGlusterBlockPod(HeketiBaseClass):
def test_restart_gluster_block_provisioner_pod(self):
# CNS-542 - Restart gluster-block provisioner pod
diff --git a/tests/functional/common/heketi/heketi_tests/test_disabling_device.py b/tests/functional/common/heketi/heketi_tests/test_disabling_device.py
index 41cd560c..43d222a2 100644
--- a/tests/functional/common/heketi/heketi_tests/test_disabling_device.py
+++ b/tests/functional/common/heketi/heketi_tests/test_disabling_device.py
@@ -8,7 +8,7 @@ from cnslibs.common import openshift_ops
from cnslibs.common import podcmd
-class TestDisableHeketiDevice(heketi_libs.HeketiClientSetupBaseClass):
+class TestDisableHeketiDevice(heketi_libs.HeketiBaseClass):
@podcmd.GlustoPod()
def test_create_volumes_enabling_and_disabling_heketi_devices(self):
"""Test case CNS-763"""
diff --git a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
index fcc00535..261f5cc2 100644
--- a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
+++ b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
@@ -1,11 +1,11 @@
import time
-from glustolibs.gluster.exceptions import ExecutionError
from glusto.core import Glusto as g
from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
import six
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.exceptions import ExecutionError
+from cnslibs.common.heketi_libs import HeketiBaseClass
from cnslibs.common.heketi_ops import (heketi_volume_create,
heketi_volume_list,
heketi_volume_info,
@@ -20,7 +20,7 @@ from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
from cnslibs.common import podcmd
-class TestHeketiVolume(HeketiClientSetupBaseClass):
+class TestHeketiVolume(HeketiBaseClass):
"""
Class to test heketi volume create
"""
@@ -64,10 +64,11 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
g.log.info("Successfully got the volumes list")
# Check the volume count are equal
- if (len(volumes["volumes"]) != len(out)):
- raise ExecutionError("Heketi volume list %s is"
- " not equal to gluster"
- " volume list %s" % ((volumes), (out)))
+ self.assertEqual(
+ len(volumes["volumes"]), len(out),
+ "Lengths of gluster '%s' and heketi '%s' volume lists are "
+ "not equal." % (out, volumes)
+ )
g.log.info("Heketi volumes list %s and"
" gluster volumes list %s" % ((volumes), (out)))
@@ -141,11 +142,11 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
g.log.info("Trying to delete a heketi cluster"
" which contains volumes and/or nodes:"
" Expected to fail")
- out = heketi_cluster_delete(self.heketi_client_node,
- self.heketi_server_url,
- cluster_id)
- self.assertFalse(out, ("Successfully deleted a "
- "cluster %s" % cluster_id))
+ self.assertRaises(
+ ExecutionError,
+ heketi_cluster_delete,
+ self.heketi_client_node, self.heketi_server_url, cluster_id,
+ )
g.log.info("Expected result: Unable to delete cluster %s"
" because it contains volumes "
" and/or nodes" % cluster_id)
@@ -194,8 +195,10 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# Try to delete the node by its ID
g.log.info("Trying to delete the node which contains devices in it. "
"Expecting failure.")
- out = heketi_node_delete(self.heketi_client_node, heketi_url, node_id)
- self.assertFalse(out, "Node '%s' got unexpectedly deleted." % node_id)
+ self.assertRaises(
+ ExecutionError,
+ heketi_node_delete,
+ self.heketi_client_node, heketi_url, node_id)
# Make sure our node hasn't been deleted
g.log.info("Listing heketi node list")
@@ -244,9 +247,13 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# Try to create blockvolume with size bigger than available
too_big_vol_size = max_freesize + 1
- blockvol2 = heketi_blockvolume_create(
- self.heketi_client_node, self.heketi_server_url,
- too_big_vol_size, json=True)
+ try:
+ blockvol2 = heketi_blockvolume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ too_big_vol_size, json=True)
+ except ExecutionError:
+ return
+
if blockvol2 and blockvol2.get('id'):
self.addCleanup(
heketi_blockvolume_delete, self.heketi_client_node,
diff --git a/tests/functional/common/heketi/heketi_tests/test_heketi_enable_device.py b/tests/functional/common/heketi/heketi_tests/test_heketi_enable_device.py
deleted file mode 100644
index b5a8ef32..00000000
--- a/tests/functional/common/heketi/heketi_tests/test_heketi_enable_device.py
+++ /dev/null
@@ -1,245 +0,0 @@
-"""Test cases to enable device in heketi."""
-import json
-
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
-from cnslibs.common.heketi_ops import (heketi_node_enable,
- heketi_node_info,
- heketi_node_disable,
- heketi_node_list,
- heketi_volume_create,
- heketi_device_disable,
- heketi_device_info,
- heketi_device_enable)
-from glusto.core import Glusto as g
-
-
-class TestHeketiDeviceEnable(HeketiClientSetupBaseClass):
- """Test device enable functionality from heketi-cli."""
-
- def enable_node(self, node_id):
- """
- Enable node through heketi-cli.
-
- :param node_id: str node ID
- """
- if node_id is None:
- return
- out = heketi_node_enable(self.heketi_client_node,
- self.heketi_server_url,
- node_id)
- self.assertNotEqual(out, False,
- "Failed to enable node of"
- " id %s" % node_id)
-
- def disable_node(self, node_id):
- """
- Disable node through heketi-cli.
-
- :param node_id: str node ID
- """
- if node_id is None:
- return
- out = heketi_node_disable(self.heketi_client_node,
- self.heketi_server_url,
- node_id)
- self.assertNotEqual(out, False,
- "Failed to disable node of"
- " id %s" % node_id)
-
- def get_node_info(self, node_id):
- """
- Get node information from node_id.
-
- :param node_id: str node ID
- :return node_info: list node information
- """
- if node_id is None:
- return
- node_info = heketi_node_info(
- self.heketi_client_node, self.heketi_server_url,
- node_id, json=True)
- self.assertNotEqual(node_info, False,
- "Node info on %s failed" % node_id)
- return node_info
-
- def get_online_nodes(self, node_list):
- """
- Get online nodes information from node_list.
-
- :param node_list: list of node ID's
- :return: list node information of online nodes
- """
- online_hosts_info = []
-
- for node in node_list:
- node_info = self.get_node_info(node)
- if node_info["state"] == "online":
- online_hosts_info.append(node_info)
-
- return online_hosts_info
-
- def disable_device(self, device_id):
- """
- Disable device from heketi.
-
- :param device_id: str device ID to be disabled
- """
- if device_id is None:
- return
- out = heketi_device_disable(
- self.heketi_client_node, self.heketi_server_url,
- device_id)
-
- self.assertNotEqual(out, False, "Failed to disable device of"
- " id %s" % device_id)
-
- def enable_device(self, device_id):
- """
- Enable device from heketi.
-
- :param device_id: str device ID to be enabled
- """
- if device_id is None:
- return
- out = heketi_device_enable(
- self.heketi_client_node, self.heketi_server_url,
- device_id)
- self.assertNotEqual(out, False, "Failed to enable device of"
- " id %s" % device_id)
-
- def get_device_info(self, device_id):
- """
- Get device information from heketi.
-
- :param device_id: str device ID to fetch information
- :return device_info: dict device information
- """
- if device_id is None:
- return
- device_info = heketi_device_info(self.heketi_client_node,
- self.heketi_server_url,
- device_id,
- json=True)
- self.assertNotEqual(device_info, False,
- "Device info on %s failed" % device_id)
-
- return device_info
-
- def check_any_of_bricks_present_in_device(self, bricks, device_id):
- """
- Check any of the bricks present in the device.
-
- :param bricks: list bricks of volume
- :param device_id: device ID
- :return True: bool if bricks are present on device
- :return False: bool if bricks are not present on device
- """
- if device_id is None:
- return False
- device_info = self.get_device_info(device_id)
- for brick in bricks:
- if brick['device'] != device_id:
- continue
- for brick_info in device_info['bricks']:
- if brick_info['path'] == brick['path']:
- return True
- return False
-
- def test_device_enable(self):
- """Test case CNS-764: Test device enable functionality."""
- g.log.info("Disable and Enable device in heketi")
- node_list = heketi_node_list(self.heketi_client_node,
- self.heketi_server_url)
- self.assertTrue(node_list, "Failed to list heketi nodes")
- g.log.info("Successfully got the list of nodes")
-
- # Fetch online nodes from node list
- online_hosts = self.get_online_nodes(node_list)
-
- # skip test if online node count is less than 3, to create replicate
- # volume we need at least 3 nodes to be online
- if len(online_hosts) < 3:
- raise self.skipTest(
- "This test can run only if online hosts are more than 2")
-
- # if we have n nodes, disable n-3 nodes
- for node_info in online_hosts[3:]:
- node_id = node_info["id"]
- g.log.info("going to disable node id %s", node_id)
- self.disable_node(node_id)
- self.addCleanup(self.enable_node, node_id)
-
- for host in online_hosts[1:3]:
- found_online = False
- for device in host["devices"]:
- if device["state"].strip().lower() == "online":
- found_online = True
- break
- if not found_online:
- self.skipTest(("no device online on node %s" % host["id"]))
-
- # on the first node, disable all but one device:
- online_device_id = ""
- for device in online_hosts[0]["devices"]:
- if device["state"].strip().lower() != "online":
- continue
- device_id = device["id"]
- if online_device_id == "":
- online_device_id = device_id
- else:
- g.log.info("going to disable device %s", device_id)
- self.disable_device(device_id)
- self.addCleanup(self.enable_device, device_id)
- if online_device_id == "":
- self.skipTest(
- ("no device online on node %s" % online_hosts[0]["id"]))
-
- # create volume when 1 device is online
- vol_size = 1
- vol_info = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url, vol_size,
- json=True)
- self.assertTrue(vol_info, (
- "Failed to create heketi volume of size %d" % vol_size))
- self.addCleanup(self.delete_volumes, vol_info['id'])
-
- # check created volume brick is present on the device
- present = self.check_any_of_bricks_present_in_device(
- vol_info['bricks'],
- online_device_id)
- self.assertTrue(present, "bricks is present on this device")
-
- g.log.info("going to disable device id %s", online_device_id)
-
- self.disable_device(online_device_id)
- self.addCleanup(self.enable_device, online_device_id)
-
- ret, out, err = heketi_volume_create(
- self.heketi_client_node, self.heketi_server_url,
- vol_size, json=True, raw_cli_output=True)
- if ret == 0:
- out_json = json.loads(out)
- self.addCleanup(self.delete_volumes, out_json["id"])
- self.assertNotEqual(ret, 0,
- ("Volume creation did not fail ret- %s "
- "out- %s err- %s" % (ret, out, err)))
-
- g.log.info("Volume creation failed as expected, err- %s", err)
-
- # enable back the device which was previously disabled
- g.log.info("going to enable device id %s", online_device_id)
- self.enable_device(online_device_id)
-
- # create volume when device is enabled
- vol_info = heketi_volume_create(self.heketi_client_node,
- self.heketi_server_url, vol_size,
- json=True)
- self.assertTrue(vol_info, (
- "Failed to create heketi volume of size %d" % vol_size))
- self.addCleanup(self.delete_volumes, vol_info['id'])
-
- # check created volume brick is present on the device
- present = self.check_any_of_bricks_present_in_device(
- vol_info['bricks'],
- online_device_id)
- self.assertTrue(present, "brick is present on this device")
diff --git a/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py b/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
index bce565c4..9fac9e01 100644
--- a/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
+++ b/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
@@ -1,7 +1,7 @@
"""Test cases to disable and enable node in heketi."""
import json
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_libs import HeketiBaseClass
from cnslibs.common.heketi_ops import (heketi_node_enable,
heketi_node_info,
heketi_node_disable,
@@ -10,7 +10,7 @@ from cnslibs.common.heketi_ops import (heketi_node_enable,
from glusto.core import Glusto as g
-class TestHeketiNodeState(HeketiClientSetupBaseClass):
+class TestHeketiNodeState(HeketiBaseClass):
"""Test node enable and disable functionality."""
def enable_node(self, node_id):
diff --git a/tests/functional/common/heketi/heketi_tests/test_node_info.py b/tests/functional/common/heketi/heketi_tests/test_node_info.py
index 26ac56f7..016b3ec4 100644
--- a/tests/functional/common/heketi/heketi_tests/test_node_info.py
+++ b/tests/functional/common/heketi/heketi_tests/test_node_info.py
@@ -2,12 +2,12 @@ from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.peer_ops import get_pool_list
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_libs import HeketiBaseClass
from cnslibs.common import heketi_ops, podcmd
from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
-class TestHeketiVolume(HeketiClientSetupBaseClass):
+class TestHeketiVolume(HeketiBaseClass):
"""
Class to test heketi volume create
"""
diff --git a/tests/functional/common/heketi/test_block_volumes_heketi.py b/tests/functional/common/heketi/test_block_volumes_heketi.py
index 4e405312..344ef9f7 100644
--- a/tests/functional/common/heketi/test_block_volumes_heketi.py
+++ b/tests/functional/common/heketi/test_block_volumes_heketi.py
@@ -5,10 +5,10 @@ from cnslibs.common.heketi_ops import (heketi_blockvolume_create,
heketi_volume_create,
heketi_volume_delete
)
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_libs import HeketiBaseClass
-class TestBlockVolumeOps(HeketiClientSetupBaseClass):
+class TestBlockVolumeOps(HeketiBaseClass):
"""
Class to test heketi block volume deletion with and without block
volumes existing, heketi block volume list, heketi block volume info
@@ -57,7 +57,8 @@ class TestBlockVolumeOps(HeketiClientSetupBaseClass):
self.assertNotEqual(block_vol, False,
"Block volume creation has failed")
self.addCleanup(heketi_blockvolume_delete, self.heketi_client_node,
- self.heketi_server_url, block_vol["id"])
+ self.heketi_server_url, block_vol["id"],
+ raise_on_error=False)
block_delete_output = heketi_blockvolume_delete(
self.heketi_client_node, self.heketi_server_url,
block_vol["id"], json=True)
diff --git a/tests/functional/common/heketi/test_check_entries.py b/tests/functional/common/heketi/test_check_entries.py
index be7add9e..92e682d9 100644
--- a/tests/functional/common/heketi/test_check_entries.py
+++ b/tests/functional/common/heketi/test_check_entries.py
@@ -1,12 +1,12 @@
from glusto.core import Glusto as g
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_libs import HeketiBaseClass
from cnslibs.common.heketi_ops import (heketi_volume_create,
heketi_volume_delete)
from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
-class TestHeketiVolume(HeketiClientSetupBaseClass):
+class TestHeketiVolume(HeketiBaseClass):
"""Check volume bricks presence in fstab files on Gluster PODs."""
def _find_bricks_in_fstab_files(self, brick_paths, present):
diff --git a/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py b/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
index cbee7550..3dd4230b 100644
--- a/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
+++ b/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
@@ -4,7 +4,7 @@ import math
from glusto.core import Glusto as g
from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_libs import HeketiBaseClass
from cnslibs.common.heketi_ops import (heketi_node_list,
heketi_node_enable,
heketi_node_disable,
@@ -18,7 +18,7 @@ from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
from cnslibs.common import podcmd
-class TestHeketiVolume(HeketiClientSetupBaseClass):
+class TestHeketiVolume(HeketiBaseClass):
def setUp(self):
super(TestHeketiVolume, self).setUp()
diff --git a/tests/functional/common/heketi/test_device_info.py b/tests/functional/common/heketi/test_device_info.py
index 2086245d..b24390ad 100644
--- a/tests/functional/common/heketi/test_device_info.py
+++ b/tests/functional/common/heketi/test_device_info.py
@@ -2,7 +2,7 @@ from cnslibs.common import heketi_libs
from cnslibs.common import heketi_ops
-class TestHeketiDeviceInfo(heketi_libs.HeketiClientSetupBaseClass):
+class TestHeketiDeviceInfo(heketi_libs.HeketiBaseClass):
def test_heketi_devices_info_verification(self):
"""Test case CNS-765"""
diff --git a/tests/functional/common/heketi/test_heketi_device_operations.py b/tests/functional/common/heketi/test_heketi_device_operations.py
new file mode 100644
index 00000000..202da90f
--- /dev/null
+++ b/tests/functional/common/heketi/test_heketi_device_operations.py
@@ -0,0 +1,287 @@
+import json
+
+from glusto.core import Glusto as g
+
+from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.heketi_ops import (heketi_node_enable,
+ heketi_node_info,
+ heketi_node_disable,
+ heketi_node_list,
+ heketi_volume_create,
+ heketi_device_add,
+ heketi_device_delete,
+ heketi_device_disable,
+ heketi_device_remove,
+ heketi_device_info,
+ heketi_device_enable,
+ heketi_topology_info)
+
+
+class TestHeketiDeviceOperations(HeketiBaseClass):
+ """Test Heketi device enable/disable and remove functionality."""
+
+ def check_any_of_bricks_present_in_device(self, bricks, device_id):
+ """
+ Check any of the bricks present in the device.
+
+ :param bricks: list bricks of volume
+ :param device_id: device ID
+ :return True: bool if bricks are present on device
+ :return False: bool if bricks are not present on device
+ """
+ if device_id is None:
+ return False
+ device_info = heketi_device_info(self.heketi_client_node,
+ self.heketi_server_url,
+ device_id,
+ json=True)
+ self.assertNotEqual(device_info, False,
+ "Device info on %s failed" % device_id)
+ for brick in bricks:
+ if brick['device'] != device_id:
+ continue
+ for brick_info in device_info['bricks']:
+ if brick_info['path'] == brick['path']:
+ return True
+ return False
+
+ def get_online_nodes_disable_redundant(self):
+ """
+ Find online nodes and disable n-3 nodes and return
+ list of online nodes
+ """
+ node_list = heketi_node_list(self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertTrue(node_list, "Failed to list heketi nodes")
+ g.log.info("Successfully got the list of nodes")
+ # Fetch online nodes from node list
+ online_hosts = []
+
+ for node in node_list:
+ node_info = heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node, json=True)
+ if node_info["state"] == "online":
+ online_hosts.append(node_info)
+
+ # Skip test if online node count is less than 3i
+ if len(online_hosts) < 3:
+ raise self.skipTest(
+ "This test can run only if online hosts are more than 2")
+ # if we have n nodes, disable n-3 nodes
+ for node_info in online_hosts[3:]:
+ node_id = node_info["id"]
+ g.log.info("going to disable node id %s", node_id)
+ heketi_node_disable(self.heketi_client_node,
+ self.heketi_server_url,
+ node_id)
+ self.addCleanup(heketi_node_enable,
+ self.heketi_client_node,
+ self.heketi_server_url,
+ node_id)
+
+ for host in online_hosts[1:3]:
+ found_online = False
+ for device in host["devices"]:
+ if device["state"].strip().lower() == "online":
+ found_online = True
+ break
+ if not found_online:
+ self.skipTest(("no device online on node %s" % host["id"]))
+
+ return online_hosts
+
+ def test_device_enable_disable(self):
+ """Test case CNS-764. Test device enable and disable functionality."""
+
+ # Disable all but one device on the first online node
+ online_hosts = self.get_online_nodes_disable_redundant()
+ online_device_id = ""
+ for device in online_hosts[0]["devices"]:
+ if device["state"].strip().lower() != "online":
+ continue
+ device_id = device["id"]
+ if online_device_id == "":
+ online_device_id = device_id
+ else:
+ g.log.info("going to disable device %s", device_id)
+ heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ self.addCleanup(
+ heketi_device_enable,
+ self.heketi_client_node, self.heketi_server_url, device_id)
+ if online_device_id == "":
+ self.skipTest(
+ "No device online on node %s" % online_hosts[0]["id"])
+
+ # Create volume when only 1 device is online
+ vol_size = 1
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.assertTrue(vol_info, (
+ "Failed to create heketi volume of size %d" % vol_size))
+ self.addCleanup(self.delete_volumes, vol_info['id'])
+
+ # Check that one of volume's bricks is present on the device
+ present = self.check_any_of_bricks_present_in_device(
+ vol_info['bricks'], online_device_id)
+ self.assertTrue(
+ present,
+ "None of '%s' volume bricks is present on the '%s' device." % (
+ vol_info['id'], online_device_id))
+
+ g.log.info("Going to disable device id %s", online_device_id)
+ heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url, online_device_id)
+ self.addCleanup(heketi_device_enable, self.heketi_client_node,
+ self.heketi_server_url, online_device_id)
+
+ ret, out, err = heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ vol_size, json=True, raw_cli_output=True)
+ if ret == 0:
+ self.addCleanup(self.delete_volumes, json.loads(out)["id"])
+ self.assertNotEqual(ret, 0,
+ ("Volume creation did not fail. ret- %s "
+ "out- %s err- %s" % (ret, out, err)))
+ g.log.info("Volume creation failed as expected, err- %s", err)
+
+ # Enable back the device which was previously disabled
+ g.log.info("Going to enable device id %s", online_device_id)
+ heketi_device_enable(
+ self.heketi_client_node, self.heketi_server_url, online_device_id)
+
+ # Create volume when device is enabled
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.assertTrue(vol_info, (
+ "Failed to create heketi volume of size %d" % vol_size))
+ self.addCleanup(self.delete_volumes, vol_info['id'])
+
+ # Check that one of volume's bricks is present on the device
+ present = self.check_any_of_bricks_present_in_device(
+ vol_info['bricks'], online_device_id)
+ self.assertTrue(
+ present,
+ "None of '%s' volume bricks is present on the '%s' device." % (
+ vol_info['id'], online_device_id))
+
+ def test_device_remove_operation(self):
+ """Test case CNS-766. Test device remove functionality."""
+ gluster_server_0 = g.config["gluster_servers"].values()[0]
+ try:
+ device_name = gluster_server_0["additional_devices"][0]
+ except IndexError:
+ self.skipTest("Additional disk is not specified for node.")
+ manage_hostname = gluster_server_0["manage"]
+
+ # Get node ID of the Gluster hostname
+ topo_info = heketi_topology_info(self.heketi_client_node,
+ self.heketi_server_url, json=True)
+ self.assertTrue(
+ topo_info["clusters"][0]["nodes"],
+ "Cluster info command returned empty list of nodes.")
+
+ node_id = None
+ for node in topo_info["clusters"][0]["nodes"]:
+ if manage_hostname == node['hostnames']["manage"][0]:
+ node_id = node["id"]
+ break
+ self.assertNotEqual(
+ node_id, None,
+ "No information about node_id for %s" % manage_hostname)
+
+ # Iterate chosen node devices and pick the smallest online one.
+ lowest_device_size = lowest_device_id = None
+ online_hosts = self.get_online_nodes_disable_redundant()
+ for host in online_hosts[0:3]:
+ if node_id != host["id"]:
+ continue
+ for device in host["devices"]:
+ if device["state"].strip().lower() != "online":
+ continue
+ if (lowest_device_size is None or
+ device["storage"]["total"] < lowest_device_size):
+ lowest_device_size = device["storage"]["total"]
+ lowest_device_id = device["id"]
+ if lowest_device_id is None:
+ self.skipTest(
+ "Didn't find suitable device for disablement on '%s' node." % (
+ node_id))
+
+ # Create volume
+ vol_size = 1
+ vol_info = heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url, vol_size,
+ json=True)
+ self.assertTrue(vol_info, (
+ "Failed to create heketi volume of size %d" % vol_size))
+ self.addCleanup(self.delete_volumes, vol_info['id'])
+
+ # Add extra device, then remember it's ID and size
+ heketi_device_add(self.heketi_client_node, self.heketi_server_url,
+ device_name, node_id)
+ node_info_after_addition = heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url, node_id,
+ json=True)
+ for device in node_info_after_addition["devices"]:
+ if device["name"] != device_name:
+ continue
+ device_id_new = device["id"]
+ device_size_new = device["storage"]["total"]
+ self.addCleanup(heketi_device_delete, self.heketi_client_node,
+ self.heketi_server_url, device_id_new)
+ self.addCleanup(heketi_device_remove, self.heketi_client_node,
+ self.heketi_server_url, device_id_new)
+ self.addCleanup(heketi_device_disable, self.heketi_client_node,
+ self.heketi_server_url, device_id_new)
+
+ if lowest_device_size > device_size_new:
+ skip_msg = ("Skip test case, because newly added disk %s is "
+ "smaller than device which we want to remove %s." % (
+ device_size_new, lowest_device_size))
+ self.skipTest(skip_msg)
+
+ g.log.info("Removing device id %s" % lowest_device_id)
+ ret, out, err = heketi_device_remove(
+ self.heketi_client_node, self.heketi_server_url,
+ lowest_device_id, raw_cli_output=True)
+ if ret == 0:
+ self.addCleanup(heketi_device_enable, self.heketi_client_node,
+ self.heketi_server_url, lowest_device_id)
+ self.addCleanup(heketi_device_disable, self.heketi_client_node,
+ self.heketi_server_url, lowest_device_id)
+ self.assertNotEqual(ret, 0, (
+ "Device removal did not fail. ret: %s, out: %s, err: %s." % (
+ ret, out, err)))
+ g.log.info("Device removal failed as expected, err- %s", err)
+
+ # Need to disable device before removing
+ heketi_device_disable(
+ self.heketi_client_node, self.heketi_server_url, lowest_device_id)
+ self.addCleanup(heketi_device_enable, self.heketi_client_node,
+ self.heketi_server_url, lowest_device_id)
+
+ # Remove device from Heketi
+ heketi_device_remove(
+ self.heketi_client_node, self.heketi_server_url, lowest_device_id)
+ self.addCleanup(heketi_device_disable, self.heketi_client_node,
+ self.heketi_server_url, lowest_device_id)
+
+ # Create volume
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.assertTrue(vol_info, (
+ "Failed to create heketi volume of size %d" % vol_size))
+ self.addCleanup(self.delete_volumes, vol_info['id'])
+
+ # Check that none of volume's bricks is present on the device
+ present = self.check_any_of_bricks_present_in_device(
+ vol_info['bricks'], lowest_device_id)
+ self.assertFalse(
+ present,
+ "Some of the '%s' volume bricks is present of the removed "
+ "'%s' device." % (vol_info['id'], lowest_device_id))
diff --git a/tests/functional/common/heketi/test_heketi_metrics.py b/tests/functional/common/heketi/test_heketi_metrics.py
index cf7e2d40..0e29b738 100644
--- a/tests/functional/common/heketi/test_heketi_metrics.py
+++ b/tests/functional/common/heketi/test_heketi_metrics.py
@@ -1,4 +1,5 @@
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common import exceptions
+from cnslibs.common.heketi_libs import HeketiBaseClass
from cnslibs.common.heketi_ops import (
get_heketi_metrics,
heketi_cluster_info,
@@ -15,7 +16,7 @@ from cnslibs.common.openshift_ops import (
)
-class TestHeketiMetrics(HeketiClientSetupBaseClass):
+class TestHeketiMetrics(HeketiBaseClass):
def verify_heketi_metrics_with_topology_info(self):
topology = heketi_topology_info(
@@ -174,7 +175,7 @@ class TestHeketiMetrics(HeketiClientSetupBaseClass):
self.heketi_dc_name, pod_amount=1)
# verify that metrics is not accessable when heketi pod is down
- with self.assertRaises(AssertionError):
+ with self.assertRaises(exceptions.ExecutionError):
get_heketi_metrics(
self.heketi_client_node,
self.heketi_server_url,
diff --git a/tests/functional/common/heketi/test_heketi_volume_operations.py b/tests/functional/common/heketi/test_heketi_volume_operations.py
index 69d4e056..b0deb31e 100644
--- a/tests/functional/common/heketi/test_heketi_volume_operations.py
+++ b/tests/functional/common/heketi/test_heketi_volume_operations.py
@@ -1,9 +1,7 @@
from unittest import skip
from glusto.core import Glusto as g
-from cnslibs.common.heketi_ops import (heketi_create_topology,
- heketi_topology_load,
- heketi_volume_delete,
+from cnslibs.common.heketi_ops import (heketi_volume_delete,
heketi_volume_create,
heketi_volume_expand,
heketi_volume_info,
@@ -15,11 +13,11 @@ from cnslibs.common.heketi_ops import (heketi_create_topology,
heketi_device_delete,
heketi_node_info,
heketi_node_list)
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
-from cnslibs.common.exceptions import ExecutionError, ConfigError
+from cnslibs.common.heketi_libs import HeketiBaseClass
+from cnslibs.common.exceptions import ExecutionError
-class TestHeketiVolumeOperations(HeketiClientSetupBaseClass):
+class TestHeketiVolumeOperations(HeketiBaseClass):
"""
Class to test heketi volume operations - create, expand
"""
@@ -27,20 +25,6 @@ class TestHeketiVolumeOperations(HeketiClientSetupBaseClass):
@classmethod
def setUpClass(cls):
super(TestHeketiVolumeOperations, cls).setUpClass()
-
- if cls.deployment_type == "crs_heketi_outside_openshift":
- ret = heketi_create_topology(cls.heketi_client_node,
- cls.topology_info)
- if not ret:
- raise ConfigError("Failed to create heketi topology file on %s"
- % cls.heketi_client_node)
-
- ret = heketi_topology_load(cls.heketi_client_node,
- cls.heketi_server_url)
- if not ret:
- raise ConfigError("Failed to load heketi topology on %s"
- % cls.heketi_client_node)
-
cls.volume_id = None
def volume_cleanup(self, volume_id):
diff --git a/tests/functional/common/heketi/test_volume_creation.py b/tests/functional/common/heketi/test_volume_creation.py
index 55699136..b3db8446 100644
--- a/tests/functional/common/heketi/test_volume_creation.py
+++ b/tests/functional/common/heketi/test_volume_creation.py
@@ -8,7 +8,7 @@ from cnslibs.common import openshift_ops
from cnslibs.common import podcmd
-class TestVolumeCreationTestCases(heketi_libs.HeketiClientSetupBaseClass):
+class TestVolumeCreationTestCases(heketi_libs.HeketiBaseClass):
"""
Class for volume creation related test cases
"""
diff --git a/tests/functional/common/heketi/test_volume_deletion.py b/tests/functional/common/heketi/test_volume_deletion.py
index 8b0adf98..b1be795b 100644
--- a/tests/functional/common/heketi/test_volume_deletion.py
+++ b/tests/functional/common/heketi/test_volume_deletion.py
@@ -1,11 +1,11 @@
from __future__ import division
from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_libs import HeketiBaseClass
from cnslibs.common import heketi_ops
-class TestVolumeDeleteTestCases(HeketiClientSetupBaseClass):
+class TestVolumeDeleteTestCases(HeketiBaseClass):
"""
Class for volume deletion related test cases
diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
index 17ed5d9d..a003ceb7 100644
--- a/tests/functional/common/heketi/test_volume_expansion_and_devices.py
+++ b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
@@ -7,12 +7,12 @@ from glusto.core import Glusto as g
from glustolibs.gluster import volume_ops, rebalance_ops
from cnslibs.common.exceptions import ExecutionError
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_libs import HeketiBaseClass
from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
from cnslibs.common import heketi_ops, podcmd
-class TestVolumeExpansionAndDevicesTestCases(HeketiClientSetupBaseClass):
+class TestVolumeExpansionAndDevicesTestCases(HeketiBaseClass):
"""
Class for volume expansion and devices addition related test cases
"""
@@ -542,17 +542,11 @@ class TestVolumeExpansionAndDevicesTestCases(HeketiClientSetupBaseClass):
free_space_after_creation = self.get_devices_summary_free_space()
- ret, out, err = heketi_ops.heketi_volume_expand(
- self.heketi_client_node, self.heketi_server_url,
- volume_id, 50, raw_cli_output=True)
-
- self.assertEqual(ret, 255, "volume expansion did not fail ret- %s "
- "out- %s err= %s" % (ret, out, err))
- g.log.info("Volume expansion failed as expected, err- %s" % err)
-
- if ret == 0:
- out_json = json.loads(out)
- self.addCleanup(self.delete_volumes, out_json["id"])
+ self.assertRaises(
+ ExecutionError,
+ heketi_ops.heketi_volume_expand,
+ self.heketi_client_node, self.heketi_server_url, volume_id,
+ 50, raw_cli_output=True)
self.enable_devices(additional_devices_attached)
diff --git a/tests/functional/common/heketi/test_volume_multi_req.py b/tests/functional/common/heketi/test_volume_multi_req.py
index 244131e9..5b72cc91 100644
--- a/tests/functional/common/heketi/test_volume_multi_req.py
+++ b/tests/functional/common/heketi/test_volume_multi_req.py
@@ -11,7 +11,7 @@ import yaml
from glusto.core import Glusto as g
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_libs import HeketiBaseClass
from cnslibs.common.heketi_ops import (
heketi_volume_list)
from cnslibs.common.naming import (
@@ -192,7 +192,7 @@ def _heketi_name_id_map(vols):
@ddt.ddt
-class TestVolumeMultiReq(HeketiClientSetupBaseClass):
+class TestVolumeMultiReq(HeketiBaseClass):
def setUp(self):
super(TestVolumeMultiReq, self).setUp()
self.volcount = self._count_vols()
diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py
index 353aa220..41f5e113 100644
--- a/tests/functional/common/provisioning/test_pv_resize.py
+++ b/tests/functional/common/provisioning/test_pv_resize.py
@@ -9,13 +9,13 @@ from cnslibs.common.openshift_ops import (
oc_create_app_dc_with_io,
oc_delete,
oc_rsh,
- oc_version,
scale_dc_pod_amount_and_wait,
verify_pv_size,
verify_pvc_size,
wait_for_events,
wait_for_pod_be_ready,
wait_for_resource_absence)
+from cnslibs.common.openshift_version import get_openshift_version
from cnslibs.cns.cns_baseclass import CnsBaseClass
from cnslibs.common.exceptions import ExecutionError
from glusto.core import Glusto as g
@@ -29,8 +29,7 @@ class TestPvResizeClass(CnsBaseClass):
def setUpClass(cls):
super(TestPvResizeClass, cls).setUpClass()
cls.node = cls.ocp_master_node[0]
- cls.version = oc_version(cls.node)
- if any(v in cls.version for v in ("3.6", "3.7", "3.8")):
+ if get_openshift_version() < "3.9":
cls.skip_me = True
return
enable_pvc_resize(cls.node)
diff --git a/tests/functional/common/provisioning/test_storage_class_cases.py b/tests/functional/common/provisioning/test_storage_class_cases.py
index 52ac761a..027bd0f2 100644
--- a/tests/functional/common/provisioning/test_storage_class_cases.py
+++ b/tests/functional/common/provisioning/test_storage_class_cases.py
@@ -6,8 +6,8 @@ from glusto.core import Glusto as g
from cnslibs.cns import cns_baseclass
from cnslibs.common.cns_libs import validate_multipath_pod
from cnslibs.common.openshift_ops import (
+ get_amount_of_gluster_nodes,
get_gluster_blockvol_info_by_pvc_name,
- get_ocp_gluster_pod_names,
get_pod_name_from_dc,
oc_create_app_dc_with_io,
oc_create_pvc,
@@ -190,7 +190,7 @@ class TestStorageClassCases(cns_baseclass.CnsBaseClass):
gluster pods count
'''
# get hacount as no of gluster pods the pvc creation
- hacount = len(get_ocp_gluster_pod_names(self.ocp_master_node[0]))
+ hacount = get_amount_of_gluster_nodes(self.ocp_master_node[0])
# create storage class and pvc with given parameters
self.create_sc_with_parameter(
@@ -209,7 +209,7 @@ class TestStorageClassCases(cns_baseclass.CnsBaseClass):
value
'''
# get hacount as no of gluster pods + 1 to fail the pvc creation
- hacount = len(get_ocp_gluster_pod_names(self.ocp_master_node[0])) + 1
+ hacount = get_amount_of_gluster_nodes(self.ocp_master_node[0]) + 1
# create storage class and pvc with given parameters
self.create_sc_with_parameter(
diff --git a/tests/functional/common/test_heketi_restart.py b/tests/functional/common/test_heketi_restart.py
index 2d7da8c3..6fd0e10f 100644
--- a/tests/functional/common/test_heketi_restart.py
+++ b/tests/functional/common/test_heketi_restart.py
@@ -1,6 +1,6 @@
from jsondiff import diff
-from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_libs import HeketiBaseClass
from cnslibs.common.heketi_ops import (
hello_heketi,
heketi_volume_create,
@@ -12,7 +12,7 @@ from cnslibs.common.openshift_ops import (
wait_for_resource_absence)
-class TestRestartHeketi(HeketiClientSetupBaseClass):
+class TestRestartHeketi(HeketiBaseClass):
def test_restart_heketi_pod(self):
""" CNS-450 Restarting heketi pod """
diff --git a/tests/functional/common/test_node_restart.py b/tests/functional/common/test_node_restart.py
new file mode 100644
index 00000000..99f3ee67
--- /dev/null
+++ b/tests/functional/common/test_node_restart.py
@@ -0,0 +1,150 @@
+
+import time
+
+from unittest import skip
+from cnslibs.cns.cns_baseclass import CnsBaseClass
+from cnslibs.common.openshift_ops import (
+ check_service_status,
+ get_ocp_gluster_pod_names,
+ oc_rsh,
+ wait_for_pod_be_ready)
+from cnslibs.common.waiter import Waiter
+from cnslibs.common.exceptions import ExecutionError
+from glusto.core import Glusto as g
+
+
+class TestNodeRestart(CnsBaseClass):
+
+ def setUp(self):
+ super(TestNodeRestart, self).setUp()
+ self.oc_node = self.ocp_master_node[0]
+
+ self.gluster_pod_list = get_ocp_gluster_pod_names(self.oc_node)
+ self.gluster_pod_name = self.gluster_pod_list[0]
+
+ self.sc_name = self.create_storage_class()
+
+ self.pvc_names = self._create_volumes_with_io(3)
+
+ def _create_volumes_with_io(self, pvc_cnt, timeout=120, wait_step=3):
+ pvc_names = self.create_and_wait_for_pvcs(
+ pvc_amount=pvc_cnt, sc_name=self.sc_name,
+ timeout=timeout, wait_step=wait_step
+ )
+ err_msg = "failed to execute command %s on pod %s with error: %s"
+ for pvc_name in pvc_names:
+ dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
+
+ # Make sure we are able to work with files
+ # on the mounted volume
+ filepath = "/mnt/file_for_testing_io.log"
+ cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % filepath
+ ret, out, err = oc_rsh(self.oc_node, pod_name, cmd)
+ self.assertEqual(ret, 0, err_msg % (cmd, pod_name, err))
+
+ cmd = "ls -lrt %s" % filepath
+ ret, out, err = oc_rsh(self.oc_node, pod_name, cmd)
+ self.assertEqual(ret, 0, err_msg % (cmd, pod_name, err))
+
+ return pvc_names
+
+ def _check_fstab_and_df_entries(self, first_cmd, second_cmd):
+ # matches output of "df --out=target" and entries in fstab
+ # and vice-versa as per commands given in first_cmd and
+ # second_cmd
+ err_msg = "failed to execute command: %s with error: %s"
+
+ ret, out, err = oc_rsh(self.oc_node, self.gluster_pod_name, first_cmd)
+ self.assertEqual(ret, 0, err_msg % (first_cmd, err))
+
+ for mnt_path in (out.strip()).split("\n"):
+ ret, out, err = oc_rsh(
+ self.oc_node, self.gluster_pod_name, second_cmd % mnt_path
+ )
+ self.assertEqual(ret, 0, err_msg % (second_cmd, err))
+
+ def _wait_for_gluster_pod_to_be_ready(self):
+ for gluster_pod in self.gluster_pod_list:
+ for w in Waiter(timeout=600, interval=10):
+ try:
+ success = wait_for_pod_be_ready(
+ self.oc_node, gluster_pod, timeout=1, wait_step=1
+ )
+ if success:
+ break
+ except ExecutionError as e:
+ g.log.info("exception %s while validating gluster "
+ "pod %s" % (e, gluster_pod))
+
+ if w.expired:
+ error_msg = ("exceeded timeout 600 sec, pod '%s' is "
+ "not in 'running' state" % gluster_pod)
+ g.log.error(error_msg)
+ raise ExecutionError(error_msg)
+
+ def _node_reboot(self):
+ storage_hostname = (g.config["gluster_servers"]
+ [self.gluster_servers[0]]["storage"])
+
+ cmd = "sleep 3; /sbin/shutdown -r now 'Reboot triggered by Glusto'"
+ ret, out, err = g.run(storage_hostname, cmd)
+
+ self.addCleanup(self._wait_for_gluster_pod_to_be_ready)
+
+ if ret != 255:
+ err_msg = "failed to reboot host %s error: %s" % (
+ storage_hostname, err)
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+ try:
+ g.ssh_close_connection(storage_hostname)
+ except Exception as e:
+ g.log.error("failed to close connection with host %s"
+ " with error: %s" % (storage_hostname, e))
+ raise
+
+ # added sleep as node will restart after 3 sec
+ time.sleep(3)
+
+ for w in Waiter(timeout=600, interval=10):
+ try:
+ if g.rpyc_get_connection(storage_hostname, user="root"):
+ g.rpyc_close_connection(storage_hostname, user="root")
+ break
+ except Exception as err:
+ g.log.info("exception while getting connection: '%s'" % err)
+
+ if w.expired:
+ error_msg = ("exceeded timeout 600 sec, node '%s' is "
+ "not reachable" % storage_hostname)
+ g.log.error(error_msg)
+ raise ExecutionError(error_msg)
+
+ # wait for the gluster pod to be in 'Running' state
+ self._wait_for_gluster_pod_to_be_ready()
+
+ # glusterd and gluster-blockd service should be up and running
+ service_names = ("glusterd", "gluster-blockd", "tcmu-runner")
+ for gluster_pod in self.gluster_pod_list:
+ for service in service_names:
+ g.log.info("gluster_pod - '%s' : gluster_service '%s'" % (
+ gluster_pod, service))
+ check_service_status(
+ self.oc_node, gluster_pod, service, "running"
+ )
+
+ @skip("Blocked by BZ-1652913")
+ def test_node_restart_check_volume(self):
+ df_cmd = "df --out=target | sed 1d | grep /var/lib/heketi"
+ fstab_cmd = "grep '%s' /var/lib/heketi/fstab"
+ self._check_fstab_and_df_entries(df_cmd, fstab_cmd)
+
+ self._node_reboot()
+
+ fstab_cmd = ("grep '/var/lib/heketi' /var/lib/heketi/fstab "
+ "| cut -f2 -d ' '")
+ df_cmd = "df --out=target | sed 1d | grep '%s'"
+ self._check_fstab_and_df_entries(fstab_cmd, df_cmd)
+
+ self._create_volumes_with_io(pvc_cnt=1, timeout=300, wait_step=10)