summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorvamahaja <vamahaja@redhat.com>2019-01-08 13:10:59 +0530
committervamahaja <vamahaja@redhat.com>2019-01-11 16:00:49 +0530
commitefd7f6ffac516076a1201d2caeff3dc9edba3ab1 (patch)
tree55e9ad8d5cf4bc18baa01697a0f034b9f944d9bb
parente94f3f8aa30b62f4044db6aa6be6826d4e4420d5 (diff)
[RHGSQE-108] Removed polarion test case ID's and updated description
Change-Id: I9ab3c6bea56a02f76ce818ee095ffe4afc0e64c0 Signed-off-by: vamahaja <vamahaja@redhat.com>
-rw-r--r--tests/functional/common/arbiter/test_arbiter.py26
-rw-r--r--tests/functional/common/gluster_block/test_restart_gluster_block.py3
-rw-r--r--tests/functional/common/gluster_stability/test_gluster_services_restart.py14
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_disabling_device.py2
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py17
-rw-r--r--tests/functional/common/heketi/test_block_volumes_heketi.py22
-rw-r--r--tests/functional/common/heketi/test_check_entries.py2
-rw-r--r--tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py9
-rw-r--r--tests/functional/common/heketi/test_device_info.py2
-rw-r--r--tests/functional/common/heketi/test_heketi_device_operations.py6
-rw-r--r--tests/functional/common/heketi/test_heketi_metrics.py12
-rw-r--r--tests/functional/common/heketi/test_volume_creation.py7
-rw-r--r--tests/functional/common/heketi/test_volume_expansion_and_devices.py12
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py18
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py19
-rw-r--r--tests/functional/common/provisioning/test_pv_resize.py10
-rw-r--r--tests/functional/common/provisioning/test_storage_class_cases.py26
-rw-r--r--tests/functional/common/test_heketi_restart.py2
18 files changed, 104 insertions, 105 deletions
diff --git a/tests/functional/common/arbiter/test_arbiter.py b/tests/functional/common/arbiter/test_arbiter.py
index f8dbda81..63325bc2 100644
--- a/tests/functional/common/arbiter/test_arbiter.py
+++ b/tests/functional/common/arbiter/test_arbiter.py
@@ -113,7 +113,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.BaseClass):
return bricks
def test_arbiter_pvc_create(self):
- """Test case CNS-944"""
+ """Validate dynamic provision of an arbiter volume"""
# Create sc with gluster arbiter info
self.create_storage_class(is_arbiter_vol=True)
@@ -127,8 +127,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.BaseClass):
self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)
def test_arbiter_pvc_mount_on_pod(self):
- """Test case CNS-945"""
-
+ """Validate new volume creation using app pod"""
# Create sc with gluster arbiter info
self.create_storage_class(is_arbiter_vol=True)
@@ -194,7 +193,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.BaseClass):
self.cmd_run(write_data_cmd)
def test_create_arbiter_vol_with_more_than_one_brick_set(self):
- """Test case CNS-942"""
+ """Validate volume creation using heketi for more than six brick set"""
# Set arbiter:disabled tag to the data devices and get their info
data_nodes = []
@@ -322,7 +321,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.BaseClass):
@ddt.unpack
def test_verify_arbiter_brick_able_to_contain_expected_amount_of_files(
self, pvc_size_gb, avg_file_size):
- """Test cases CNS-1182-1190"""
+ """Validate arbiter brick creation with different avg file size"""
# Create sc with gluster arbiter info
self.create_storage_class(
@@ -374,7 +373,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.BaseClass):
@ddt.data(True, False)
def test_aribiter_required_tag_on_node_or_devices_other_disabled(
self, node_with_tag):
- """Test cases CNS-989 and CNS-997"""
+ """Validate arbiter vol creation with required node or device tag"""
pvc_amount = 3
@@ -445,7 +444,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.BaseClass):
data_brick.split(':')[0], data_nodes_ip_addresses)
def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self):
- """Test case CNS-1265"""
+ """Validate reuse of volume space after deletion of PVCs"""
min_storage_gb = 10
# Set arbiter:disabled tags to the first 2 nodes
@@ -541,7 +540,7 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.BaseClass):
wait_for_resource_absence(self.node, 'pvc', pvc_name)
def test_arbiter_volume_expand_using_pvc(self):
- """Test case CNS-954"""
+ """Validate arbiter volume expansion by PVC creation"""
# Create sc with gluster arbiter info
self.create_storage_class(
is_arbiter_vol=True, allow_volume_expansion=True)
@@ -566,10 +565,13 @@ class TestArbiterVolumeCreateExpandDelete(cns_baseclass.BaseClass):
@ddt.data(True, False)
def test_expand_arbiter_volume_setting_tags_on_nodes_or_devices(
self, node_tags):
- """Test case CNS-1523, CNS-1524
- This test case is going to run two tests.
- If value is True it is going to set tags on nodes and run test
- If value is False it is going to set tags on devices and run test
+ """Validate exapnsion of arbiter volume with defferent tags
+
+ This test case is going to run two tests:
+ 1. If value is True it is going to set tags
+ on nodes and run test
+ 2. If value is False it is going to set tags
+ on devices and run test
"""
data_nodes = []
diff --git a/tests/functional/common/gluster_block/test_restart_gluster_block.py b/tests/functional/common/gluster_block/test_restart_gluster_block.py
index b2d74d92..be5c4e9b 100644
--- a/tests/functional/common/gluster_block/test_restart_gluster_block.py
+++ b/tests/functional/common/gluster_block/test_restart_gluster_block.py
@@ -12,7 +12,8 @@ from cnslibs.common.openshift_ops import (
class TestRestartGlusterBlockPod(HeketiBaseClass):
def test_restart_gluster_block_provisioner_pod(self):
- # CNS-542 - Restart gluster-block provisioner pod
+ """Restart gluster-block provisioner pod
+ """
# create heketi block volume
vol_info = heketi_blockvolume_create(self.heketi_client_node,
diff --git a/tests/functional/common/gluster_stability/test_gluster_services_restart.py b/tests/functional/common/gluster_stability/test_gluster_services_restart.py
index db80b6a4..168ff466 100644
--- a/tests/functional/common/gluster_stability/test_gluster_services_restart.py
+++ b/tests/functional/common/gluster_stability/test_gluster_services_restart.py
@@ -47,7 +47,6 @@ SERVICE_TCMU = "tcmu-runner"
@ddt.ddt
class GlusterStabilityTestSetup(BaseClass):
"""class for gluster stability (restarts different servces) testcases
- TC No's: CNS-1393, CNS-1394, CNS-1395
"""
def setUp(self):
@@ -281,8 +280,7 @@ class GlusterStabilityTestSetup(BaseClass):
@ddt.data(SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET)
def test_restart_services_provision_volume_and_run_io(self, service):
- """[CNS-1393-1395] Restart gluster service then validate volumes
- """
+ """Restart gluster service then validate volumes"""
# restarts glusterfs service
restart_service_on_pod(self.oc_node, self.gluster_pod, service)
@@ -308,9 +306,7 @@ class GlusterStabilityTestSetup(BaseClass):
@skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477")
def test_target_side_failures_brick_failure_on_block_hosting_volume(self):
- """[CNS-1285] Target side failures - Brick failure on block
- hosting volume
- """
+ """Target side failures - Brick failure on block hosting volume"""
# get block hosting volume from pvc name
block_hosting_vol = self.get_block_hosting_volume_by_pvc_name(
self.pvc_name
@@ -337,8 +333,10 @@ class GlusterStabilityTestSetup(BaseClass):
@skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477")
def test_start_stop_block_volume_service(self):
- """[CNS-1314] Block hosting volume - stop/start block hosting
- volume when IO's and provisioning are going on
+ """Validate block hosting volume by start/stop operation
+
+ Perform stop/start operation on block hosting volume when
+ IO's and provisioning are going on
"""
# get block hosting volume from pvc name
block_hosting_vol = self.get_block_hosting_volume_by_pvc_name(
diff --git a/tests/functional/common/heketi/heketi_tests/test_disabling_device.py b/tests/functional/common/heketi/heketi_tests/test_disabling_device.py
index 5d5e867c..6139f64d 100644
--- a/tests/functional/common/heketi/heketi_tests/test_disabling_device.py
+++ b/tests/functional/common/heketi/heketi_tests/test_disabling_device.py
@@ -10,7 +10,7 @@ from cnslibs.common import podcmd
class TestDisableHeketiDevice(heketi_libs.HeketiBaseClass):
@podcmd.GlustoPod()
def test_create_volumes_enabling_and_disabling_heketi_devices(self):
- """Test case CNS-763"""
+ """Validate enable/disable of heketi device"""
# Get nodes info
node_id_list = heketi_ops.heketi_node_list(
diff --git a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
index e826de4d..b03e5e30 100644
--- a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
+++ b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
@@ -28,8 +28,7 @@ class TestHeketiVolume(HeketiBaseClass):
@podcmd.GlustoPod()
def test_volume_create_and_list_volume(self):
- """Make sure that Heketi vol creation creates just one Gluster vol."""
-
+ """Validate heketi and gluster volume list"""
g.log.info("List gluster volumes before Heketi volume creation")
existing_g_vol_list = get_volume_list('auto_get_gluster_endpoint')
self.assertTrue(existing_g_vol_list, ("Unable to get volumes list"))
@@ -73,10 +72,7 @@ class TestHeketiVolume(HeketiBaseClass):
@podcmd.GlustoPod()
def test_create_vol_and_retrieve_vol_info(self):
- """
- Create a heketi volume and retrieve the volume info
- and get gluster volume info
- """
+ """Validate heketi and gluster volume info"""
g.log.info("Create a heketi volume")
out = heketi_volume_create(self.heketi_client_node,
@@ -101,10 +97,7 @@ class TestHeketiVolume(HeketiBaseClass):
g.log.info("Successfully got the volume info %s" % name)
def test_to_check_deletion_of_cluster(self):
- """
- Deletion of a cluster with volumes
- and/ or nodes should fail
- """
+ """Validate deletion of cluster with volumes"""
# List heketi volumes
g.log.info("List heketi volumes")
volumes = heketi_volume_list(self.heketi_client_node,
@@ -152,7 +145,7 @@ class TestHeketiVolume(HeketiBaseClass):
g.log.info("All heketi cluster successfully listed")
def test_to_check_deletion_of_node(self):
- """Deletion of a node which contains devices"""
+ """Validate deletion of a node which contains devices"""
# Create Heketi volume to make sure we have devices with usages
heketi_url = self.heketi_server_url
@@ -202,7 +195,7 @@ class TestHeketiVolume(HeketiBaseClass):
self.assertEqual(node_info['state'].lower(), 'online')
def test_blockvolume_create_no_free_space(self):
- """Test case CNS-550"""
+ """Validate error is returned when free capacity is exhausted"""
# Create first small blockvolume
blockvol1 = heketi_blockvolume_create(
diff --git a/tests/functional/common/heketi/test_block_volumes_heketi.py b/tests/functional/common/heketi/test_block_volumes_heketi.py
index 344ef9f7..1a460a96 100644
--- a/tests/functional/common/heketi/test_block_volumes_heketi.py
+++ b/tests/functional/common/heketi/test_block_volumes_heketi.py
@@ -9,17 +9,16 @@ from cnslibs.common.heketi_libs import HeketiBaseClass
class TestBlockVolumeOps(HeketiBaseClass):
- """
- Class to test heketi block volume deletion with and without block
- volumes existing, heketi block volume list, heketi block volume info
- and heketi block volume creation with name and block volumes creation
- after manually creating a Block Hosting volume.
- Test cases : CNS-[530,535,532,807]
-
+ """Class to test heketi block volume deletion with and without block
+ volumes existing, heketi block volume list, heketi block volume info
+ and heketi block volume creation with name and block volumes creation
+ after manually creating a Block Hosting volume.
"""
def test_create_block_vol_after_host_vol_creation(self):
- """Test Case CNS-530 """
+ """Validate block-device after manual block hosting volume creation
+ using heketi
+ """
block_host_create_info = heketi_volume_create(
self.heketi_client_node, self.heketi_server_url, 5,
json=True, block=True)
@@ -33,7 +32,7 @@ class TestBlockVolumeOps(HeketiBaseClass):
self.addCleanup(self.delete_block_volumes, block_vol["id"])
def test_block_host_volume_delete_without_block_volumes(self):
- """Test Case CNS-535 """
+ """Validate deletion of empty block hosting volume"""
block_host_create_info = heketi_volume_create(
self.heketi_client_node, self.heketi_server_url, 1, json=True,
block=True)
@@ -51,7 +50,8 @@ class TestBlockVolumeOps(HeketiBaseClass):
"Block host volume delete failed, ID: %s" % block_hosting_vol_id)
def test_block_volume_delete(self):
- """Test Case CNS-532 """
+ """Validate deletion of gluster-block volume and capacity of used pool
+ """
block_vol = heketi_blockvolume_create(
self.heketi_client_node, self.heketi_server_url, 1, json=True)
self.assertNotEqual(block_vol, False,
@@ -72,7 +72,7 @@ class TestBlockVolumeOps(HeketiBaseClass):
" ID is %s" % block_vol["id"])
def test_block_volume_list(self):
- """Test Case CNS-807 """
+ """Validate heketi blockvolume list command works as expected"""
created_vol_ids = []
for count in range(3):
block_vol = heketi_blockvolume_create(
diff --git a/tests/functional/common/heketi/test_check_entries.py b/tests/functional/common/heketi/test_check_entries.py
index 92e682d9..e8479226 100644
--- a/tests/functional/common/heketi/test_check_entries.py
+++ b/tests/functional/common/heketi/test_check_entries.py
@@ -27,7 +27,7 @@ class TestHeketiVolume(HeketiBaseClass):
assertion_method(brick_path, fstab_files_data)
def test_to_check_entry_in_fstab_file(self):
- """Test case CNS-778"""
+ """Validate /etc/fstab entries after creation/deletion of volume"""
# Create heketi volume
vol = heketi_volume_create(
diff --git a/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py b/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
index 98a136d6..c79ae5ee 100644
--- a/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
+++ b/tests/functional/common/heketi/test_create_distributed_replica_heketi_volume.py
@@ -143,7 +143,8 @@ class TestHeketiVolume(HeketiBaseClass):
"Brick amount is expected to be bigger than 3. "
"Actual amount is '%s'." % brick_amount)
- # Run unique actions for CNS-798 test case else return
+ # Run unique actions to Validate whether deleting a dist-rep
+ # volume is handled by heketi else return
if not validate_cleanup:
return
@@ -193,10 +194,12 @@ class TestHeketiVolume(HeketiBaseClass):
@podcmd.GlustoPod()
def test_to_create_distribute_replicated_vol(self):
- """Test case CNS-797"""
+ """Validate 2x3 vol type creation when the volume cannot be
+ carved out of a single device
+ """
self._create_distributed_replica_vol(validate_cleanup=False)
@podcmd.GlustoPod()
def test_to_create_and_delete_dist_rep_vol(self):
- """Test case CNS-798"""
+ """Validate whether deleting a dist-rep volume is handled by heketi"""
self._create_distributed_replica_vol(validate_cleanup=True)
diff --git a/tests/functional/common/heketi/test_device_info.py b/tests/functional/common/heketi/test_device_info.py
index b24390ad..d1214537 100644
--- a/tests/functional/common/heketi/test_device_info.py
+++ b/tests/functional/common/heketi/test_device_info.py
@@ -5,7 +5,7 @@ from cnslibs.common import heketi_ops
class TestHeketiDeviceInfo(heketi_libs.HeketiBaseClass):
def test_heketi_devices_info_verification(self):
- """Test case CNS-765"""
+ """Validate whether device related information is displayed"""
# Get devices from topology info
devices_from_topology = {}
diff --git a/tests/functional/common/heketi/test_heketi_device_operations.py b/tests/functional/common/heketi/test_heketi_device_operations.py
index 1c4b5457..0ad81f48 100644
--- a/tests/functional/common/heketi/test_heketi_device_operations.py
+++ b/tests/functional/common/heketi/test_heketi_device_operations.py
@@ -95,7 +95,7 @@ class TestHeketiDeviceOperations(HeketiBaseClass):
return online_hosts
def test_device_enable_disable(self):
- """Test case CNS-764. Test device enable and disable functionality."""
+ """Validate device enable and disable functionality"""
# Disable all but one device on the first online node
online_hosts = self.get_online_nodes_disable_redundant()
@@ -173,7 +173,7 @@ class TestHeketiDeviceOperations(HeketiBaseClass):
@ddt.data(True, False)
def test_device_remove_operation(self, delete_device):
- """Test cases CNS-623,766."""
+ """Validate remove/delete device using heketi-cli"""
gluster_server_0 = g.config["gluster_servers"].values()[0]
try:
@@ -326,7 +326,7 @@ class TestHeketiDeviceOperations(HeketiBaseClass):
"'%s' device." % (vol_info['id'], lowest_device_id))
def test_heketi_with_device_removal_insuff_space(self):
- """Test case CNS-624"""
+ """Validate heketi with device removal insufficient space"""
# Disable 4+ nodes and 3+ devices on the first 3 nodes
min_free_space_gb = 5
diff --git a/tests/functional/common/heketi/test_heketi_metrics.py b/tests/functional/common/heketi/test_heketi_metrics.py
index 0e29b738..0b8ea53f 100644
--- a/tests/functional/common/heketi/test_heketi_metrics.py
+++ b/tests/functional/common/heketi/test_heketi_metrics.py
@@ -163,11 +163,11 @@ class TestHeketiMetrics(HeketiBaseClass):
self.assertEqual(vol_count['value'], len(cluster_info['volumes']))
def test_heketi_metrics_with_topology_info(self):
- # CNS-1243 - Heketi_metrics_generate
+ """Validate heketi metrics generation"""
self.verify_heketi_metrics_with_topology_info()
def test_heketi_metrics_heketipod_failure(self):
- # CNS-1262 - Heketi-metrics_validating_heketi_pod failure
+ """Validate heketi metrics after heketi pod failure"""
scale_dc_pod_amount_and_wait(
self.ocp_master_node, self.heketi_dc_name, pod_amount=0)
self.addCleanup(
@@ -211,7 +211,7 @@ class TestHeketiMetrics(HeketiBaseClass):
self.verify_heketi_metrics_with_topology_info()
def test_heketi_metrics_validating_vol_count_on_vol_creation(self):
- # CNS-1244 - Heketi_metrics_validating_VolumeCount_on_creation
+ """Validate heketi metrics VolumeCount after volume creation"""
for i in range(3):
# Create volume
@@ -235,7 +235,7 @@ class TestHeketiMetrics(HeketiBaseClass):
self.verify_volume_count()
def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
- # CNS-1245 - Heketi_metrics_validating_VolumeCount_on_deletion
+ """Validate heketi metrics VolumeCount after volume deletion"""
vol_list = []
@@ -274,7 +274,7 @@ class TestHeketiMetrics(HeketiBaseClass):
self.verify_volume_count()
def test_heketi_metrics_validating_cluster_count(self):
- # CNS-1246 - Heketi_metrics_validating_cluster_count
+ """Validate 'cluster count' in heketi metrics"""
cluster_list = heketi_cluster_list(
self.heketi_client_node, self.heketi_server_url, json=True)
@@ -291,7 +291,7 @@ class TestHeketiMetrics(HeketiBaseClass):
len(cluster_list['clusters']), metrics['heketi_cluster_count'])
def test_heketi_metrics_validating_existing_node_count(self):
- # CNS-1247 - Heketi_metrics_validating_existing_node_count
+ """Validate existing 'node count' in heketi metrics"""
metrics = get_heketi_metrics(
self.heketi_client_node, self.heketi_server_url)
diff --git a/tests/functional/common/heketi/test_volume_creation.py b/tests/functional/common/heketi/test_volume_creation.py
index b9f2a680..5820b789 100644
--- a/tests/functional/common/heketi/test_volume_creation.py
+++ b/tests/functional/common/heketi/test_volume_creation.py
@@ -14,10 +14,7 @@ class TestVolumeCreationTestCases(heketi_libs.HeketiBaseClass):
@podcmd.GlustoPod()
def test_create_heketi_volume(self):
- """
- Method to test heketi volume creation and
- background gluster validation
- """
+ """Test heketi volume creation and background gluster validation"""
hosts = []
gluster_servers = []
@@ -86,7 +83,7 @@ class TestVolumeCreationTestCases(heketi_libs.HeketiBaseClass):
"Brick %s is not up" % brick_name)
def test_volume_creation_no_free_devices(self):
- """Test case CNS-804"""
+ """Validate heketi error is returned when no free devices available"""
node, server_url = self.heketi_client_node, self.heketi_server_url
# Get nodes info
diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
index 90574f61..279be053 100644
--- a/tests/functional/common/heketi/test_volume_expansion_and_devices.py
+++ b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
@@ -160,10 +160,7 @@ class TestVolumeExpansionAndDevicesTestCases(HeketiBaseClass):
"Device %s could not be deleted" % device_id)
def test_volume_expansion_expanded_volume(self):
- """
- To test volume expansion with brick and rebalance
- validation
- """
+ """Validate volume expansion with brick and check rebalance"""
creation_info = heketi_ops.heketi_volume_create(
self.heketi_client_node, self.heketi_server_url, 10, json=True)
@@ -302,7 +299,7 @@ class TestVolumeExpansionAndDevicesTestCases(HeketiBaseClass):
% volume_id)
def test_volume_expansion_no_free_space(self):
- """Test case CNS-467: volume expansion when there is no free space."""
+ """Validate volume expansion when there is no free space"""
vol_size, expand_size, additional_devices_attached = None, 10, {}
h_node, h_server_url = self.heketi_client_node, self.heketi_server_url
@@ -439,10 +436,7 @@ class TestVolumeExpansionAndDevicesTestCases(HeketiBaseClass):
@podcmd.GlustoPod()
def test_volume_expansion_rebalance_brick(self):
- """
- To test volume expansion with brick and rebalance
- validation
- """
+ """Validate volume expansion with brick and check rebalance"""
creation_info = heketi_ops.heketi_volume_create(
self.heketi_client_node, self.heketi_server_url, 10, json=True)
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
index 43bbec84..bca0bf66 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -57,14 +57,17 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
"Failed to execute '%s' command on '%s'." % (cmd, self.node))
def test_dynamic_provisioning_glusterblock_hacount_true(self):
- """ CNS-435 dynamic provisioning glusterblock """
+ """Validate dynamic provisioning for glusterblock
+ """
self.dynamic_provisioning_glusterblock(set_hacount=True)
def test_dynamic_provisioning_glusterblock_hacount_false(self):
- """ CNS-716 storage-class mandatory parameters for block """
+ """Validate storage-class mandatory parameters for block
+ """
self.dynamic_provisioning_glusterblock(set_hacount=False)
def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
+ """Validate PVC with glusterblock creation when heketi pod is down"""
datafile_path = '/mnt/fake_file_for_%s' % self.id()
# Create DC with attached PVC
@@ -131,6 +134,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
@skip("Blocked by BZ-1632873")
def test_dynamic_provisioning_glusterblock_glusterpod_failure(self):
+ """Create glusterblock PVC when gluster pod is down"""
datafile_path = '/mnt/fake_file_for_%s' % self.id()
# Create DC with attached PVC
@@ -173,7 +177,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
def test_glusterblock_logs_presence_verification(self):
- # Verify presence of glusterblock provisioner POD and its status
+ """Validate presence of glusterblock provisioner POD and it's status"""
gb_prov_cmd = ("oc get pods --all-namespaces "
"-l glusterfs=block-%s-provisioner-pod "
"-o=custom-columns=:.metadata.name,:.status.phase" % (
@@ -215,7 +219,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.assertTrue(out, "Command '%s' output is empty." % cmd)
def test_dynamic_provisioning_glusterblock_heketidown_pvc_delete(self):
- """ Delete PVC's when heketi is down CNS-439 """
+ """Validate PVC deletion when heketi is down"""
# Create Secret, SC and PVCs
self.create_storage_class()
@@ -253,7 +257,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.create_and_wait_for_pvc()
def test_recreate_app_pod_with_attached_block_pv(self):
- """Test Case CNS-1392"""
+ """Validate app pod attached block device I/O after restart"""
datafile_path = '/mnt/temporary_test_file'
# Create DC with POD and attached PVC to it
@@ -281,7 +285,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.cmd_run(write_cmd % (new_pod_name, datafile_path))
def test_volname_prefix_glusterblock(self):
- # CNS-926 - custom_volname_prefix_blockvol
+ """Validate custom volname prefix blockvol"""
self.dynamic_provisioning_glusterblock(
set_hacount=False, create_vol_name_prefix=True)
@@ -300,7 +304,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.sc.get('volumenameprefix', 'autotest')))
def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
- # CNS-1391 - Retain policy - gluster-block - delete pvc
+ """Validate retain policy for gluster-block after PVC deletion"""
self.create_storage_class(reclaim_policy='Retain')
self.create_and_wait_for_pvc()
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
index d550e51c..81329e08 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
@@ -76,14 +76,19 @@ class TestDynamicProvisioningP0(BaseClass):
"Failed to execute '%s' command on %s" % (cmd, self.node))
def test_dynamic_provisioning_glusterfile(self):
+ """Validate dynamic provisioning for gluster file"""
g.log.info("test_dynamic_provisioning_glusterfile")
self.dynamic_provisioning_glusterfile(False)
def test_dynamic_provisioning_glusterfile_volname_prefix(self):
+ """Validate dynamic provisioning for gluster file with vol name prefix
+ """
g.log.info("test_dynamic_provisioning_glusterfile volname prefix")
self.dynamic_provisioning_glusterfile(True)
def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
+ """Validate dynamic provisioning for gluster file when heketi pod down
+ """
mount_path = "/mnt"
datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())
@@ -165,6 +170,8 @@ class TestDynamicProvisioningP0(BaseClass):
@skip("Blocked by BZ-1632873")
def test_dynamic_provisioning_glusterfile_glusterpod_failure(self):
+ """Validate dynamic provisioning for gluster file when gluster pod down
+ """
mount_path = "/mnt"
datafile_path = '%s/fake_file_for_%s' % (mount_path, self.id())
@@ -220,7 +227,7 @@ class TestDynamicProvisioningP0(BaseClass):
self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
def test_storage_class_mandatory_params_glusterfile(self):
- """Test case CNS-442 - storage-class mandatory parameters"""
+ """Validate storage-class creation with mandatory parameters"""
# create secret
self.secret_name = oc_create_secret(
@@ -269,7 +276,7 @@ class TestDynamicProvisioningP0(BaseClass):
ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
def test_dynamic_provisioning_glusterfile_heketidown_pvc_delete(self):
- """ Delete PVC's when heketi is down CNS-438 """
+ """Validate deletion of PVC's when heketi is down"""
# Create storage class, secret and PVCs
self.create_storage_class()
@@ -307,7 +314,7 @@ class TestDynamicProvisioningP0(BaseClass):
self.create_and_wait_for_pvc()
def test_validate_pvc_in_multiple_app_pods(self):
- """Test case CNS-574"""
+ """Validate the use of a same claim in multiple app pods"""
replicas = 5
# Create PVC
@@ -336,7 +343,7 @@ class TestDynamicProvisioningP0(BaseClass):
self.assertIn("temp_%s" % pod_name, ls_out)
def test_pvc_deletion_while_pod_is_running(self):
- """Test case CNS-584 - Verify PVC deletion while pod is running"""
+ """Validate PVC deletion while pod is running"""
# Create DC with POD and attached PVC to it
sc_name = self.create_storage_class()
@@ -359,7 +366,7 @@ class TestDynamicProvisioningP0(BaseClass):
ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
- # CNS-1390 - Retain policy - glusterfs - delete pvc
+ """Validate retain policy for glusterfs after deletion of pvc"""
self.create_storage_class(reclaim_policy='Retain')
self.create_and_wait_for_pvc()
@@ -403,7 +410,7 @@ class TestDynamicProvisioningP0(BaseClass):
wait_for_resource_absence(self.node, 'pv', pv_name)
def test_usage_of_default_storage_class(self):
- """Test case CNS-928"""
+ """Validate PVs creation for SC with default custom volname prefix"""
# Unset 'default' option from all the existing Storage Classes
unset_sc_annotation_cmd = (
diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py
index 52a5915f..99c1e451 100644
--- a/tests/functional/common/provisioning/test_pv_resize.py
+++ b/tests/functional/common/provisioning/test_pv_resize.py
@@ -23,7 +23,7 @@ from glusto.core import Glusto as g
@ddt.ddt
class TestPvResizeClass(BaseClass):
- """Test cases for PV resize."""
+ """Test cases for PV resize"""
@classmethod
def setUpClass(cls):
@@ -45,7 +45,7 @@ class TestPvResizeClass(BaseClass):
@ddt.data(False, True)
def test_pv_resize_with_prefix_for_name(self,
create_vol_name_prefix=False):
- """testcases CNS-1037 and CNS-1038 """
+ """Validate PV resize with and without name prefix"""
dir_path = "/mnt/"
node = self.ocp_client[0]
@@ -190,15 +190,15 @@ class TestPvResizeClass(BaseClass):
ret, 0, "Failed to write data on the expanded PVC")
def test_pv_resize_no_free_space(self):
- """Test case CNS-1040"""
+ """Validate PVC resize fails if there is no free space available"""
self._pv_resize(exceed_free_space=True)
def test_pv_resize_by_exact_free_space(self):
- """Test case CNS-1041"""
+ """Validate PVC resize when resized by exact available free space"""
self._pv_resize(exceed_free_space=False)
def test_pv_resize_try_shrink_pv_size(self):
- """testcase CNS-1039 """
+ """Validate whether reducing the PV size is allowed"""
dir_path = "/mnt/"
node = self.ocp_master_node[0]
diff --git a/tests/functional/common/provisioning/test_storage_class_cases.py b/tests/functional/common/provisioning/test_storage_class_cases.py
index 8fd001dd..e9dc8dbe 100644
--- a/tests/functional/common/provisioning/test_storage_class_cases.py
+++ b/tests/functional/common/provisioning/test_storage_class_cases.py
@@ -151,7 +151,7 @@ class TestStorageClassCases(cns_baseclass.BaseClass):
{"volumenameprefix": "dept_qe"},
)
def test_sc_glusterfile_incorrect_parameter(self, parameter={}):
- """Polarion testcase id- CNS-708,709,713,714,715,921"""
+ """Validate glusterfile storage with different incorrect parameters"""
self.create_sc_with_parameter("glusterfile", parameter=parameter)
@ddt.data(
@@ -161,15 +161,15 @@ class TestStorageClassCases(cns_baseclass.BaseClass):
{"restuser": "fakeuser"},
)
def test_sc_glusterblock_incorrect_parameter(self, parameter={}):
- """ Polarion testcase id- CNS-727,725,728"""
+ """Validate glusterblock storage with different incorrect parameters"""
self.create_sc_with_parameter("glusterblock", parameter=parameter)
@skip("Blocked by BZ-1609703")
@ddt.data(1, 2)
def test_gluster_block_provisioning_with_valid_ha_count(self, hacount):
- '''[CNS-544][CNS-1453] gluster-block provisioning with different valid
- 'hacount' values
- '''
+ """Validate gluster-block provisioning with different valid 'hacount'
+ values
+ """
# create storage class and pvc with given parameters
self.create_sc_with_parameter(
'glusterblock', success=True, parameter={'hacount': str(hacount)}
@@ -186,9 +186,9 @@ class TestStorageClassCases(cns_baseclass.BaseClass):
self.validate_multipath_info(hacount)
def test_gluster_block_provisioning_with_ha_count_as_glusterpod(self):
- '''[CNS-1443] gluster-block provisioning with "hacount" value equal to
- gluster pods count
- '''
+ """Validate gluster-block provisioning with "hacount" value equal
+ to gluster pods count
+ """
# get hacount as no of gluster pods the pvc creation
hacount = get_amount_of_gluster_nodes(self.ocp_master_node[0])
@@ -205,9 +205,9 @@ class TestStorageClassCases(cns_baseclass.BaseClass):
@skip("Blocked by BZ-1644685")
def test_gluster_block_provisioning_with_invalid_ha_count(self):
- '''[CNS-1444] gluster-block provisioning with any invalid 'hacount'
+ """Validate gluster-block provisioning with any invalid 'hacount'
value
- '''
+ """
# get hacount as no of gluster pods + 1 to fail the pvc creation
hacount = get_amount_of_gluster_nodes(self.ocp_master_node[0]) + 1
@@ -218,9 +218,9 @@ class TestStorageClassCases(cns_baseclass.BaseClass):
@ddt.data('true', 'false', '')
def test_gluster_block_chapauthenabled_parameter(self, chapauthenabled):
- '''[CNS-545][CNS-1445][CNS-1446] gluster-block provisioning with
- different 'chapauthenabled' values
- '''
+ """Validate gluster-block provisioning with different
+ 'chapauthenabled' values
+ """
parameter = {}
if chapauthenabled:
parameter = {"chapauthenabled": chapauthenabled}
diff --git a/tests/functional/common/test_heketi_restart.py b/tests/functional/common/test_heketi_restart.py
index 6fd0e10f..6e9f2115 100644
--- a/tests/functional/common/test_heketi_restart.py
+++ b/tests/functional/common/test_heketi_restart.py
@@ -15,7 +15,7 @@ from cnslibs.common.openshift_ops import (
class TestRestartHeketi(HeketiBaseClass):
def test_restart_heketi_pod(self):
- """ CNS-450 Restarting heketi pod """
+ """Validate restarting heketi pod"""
# create heketi volume
vol_info = heketi_volume_create(self.heketi_client_node,