summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/ISSUE_TEMPLATE38
-rw-r--r--README.rst2
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/baseclass.py94
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/cloundproviders/vmware.py178
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/gluster_ops.py91
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/heketi_ops.py86
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/node_ops.py69
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_ops.py183
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py23
-rw-r--r--pytest.ini4
-rwxr-xr-xtests/functional/arbiter/test_arbiter.py38
-rw-r--r--tests/functional/gluster_stability/test_brickmux_stability.py2
-rw-r--r--tests/functional/gluster_stability/test_gluster_block_stability.py52
-rw-r--r--tests/functional/gluster_stability/test_restart_gluster_block_prov_pod.py2
-rw-r--r--tests/functional/gluster_stability/test_restart_gluster_services.py6
-rw-r--r--tests/functional/heketi/test_block_volumes_heketi.py226
-rw-r--r--tests/functional/heketi/test_check_brick_paths.py2
-rw-r--r--tests/functional/heketi/test_create_distributed_replica_heketi_volume.py6
-rw-r--r--tests/functional/heketi/test_disabling_device.py2
-rw-r--r--tests/functional/heketi/test_heketi_authentication.py33
-rw-r--r--tests/functional/heketi/test_heketi_brick_evict.py180
-rw-r--r--tests/functional/heketi/test_heketi_cluster_operations.py12
-rw-r--r--tests/functional/heketi/test_heketi_create_volume.py643
-rwxr-xr-xtests/functional/heketi/test_heketi_device_operations.py110
-rw-r--r--tests/functional/heketi/test_heketi_lvm_wrapper.py277
-rw-r--r--tests/functional/heketi/test_heketi_metrics.py12
-rw-r--r--tests/functional/heketi/test_heketi_node_operations.py16
-rw-r--r--tests/functional/heketi/test_heketi_volume_operations.py12
-rw-r--r--tests/functional/heketi/test_heketi_zones.py23
-rw-r--r--tests/functional/heketi/test_restart_heketi_pod.py61
-rw-r--r--tests/functional/heketi/test_server_state_examine_gluster.py12
-rw-r--r--tests/functional/heketi/test_volume_creation.py14
-rw-r--r--tests/functional/heketi/test_volume_deletion.py117
-rw-r--r--tests/functional/heketi/test_volume_expansion_and_devices.py50
-rw-r--r--tests/functional/heketi/test_volume_multi_req.py8
-rw-r--r--tests/functional/logging/__init__.py0
-rw-r--r--tests/functional/logging/test_logging_validations.py396
-rw-r--r--tests/functional/metrics/test_metrics_validation.py45
-rw-r--r--tests/functional/prometheous/__init__.py0
-rw-r--r--tests/functional/prometheous/test_prometheus_validations.py976
-rw-r--r--tests/functional/prometheous/test_prometheus_validations_file.py335
-rw-r--r--tests/functional/provisioning/test_dev_path_mapping_block.py303
-rw-r--r--tests/functional/provisioning/test_dev_path_mapping_file.py794
-rwxr-xr-xtests/functional/provisioning/test_dynamic_provisioning_block.py35
-rw-r--r--tests/functional/provisioning/test_dynamic_provisioning_file.py80
-rw-r--r--tests/functional/provisioning/test_pv_resize.py25
-rw-r--r--tests/functional/provisioning/test_storage_class_cases.py86
-rw-r--r--tests/functional/test_gluster_ops_check.py4
-rw-r--r--tests/functional/test_node_restart.py2
-rw-r--r--tests/glusterfs-containers-tests-config.yaml20
50 files changed, 5256 insertions, 529 deletions
diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE
new file mode 100644
index 00000000..9a28a968
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE
@@ -0,0 +1,38 @@
+<!-- Please use this template while reporting an issue, providing as much information as possible to make debugging quicker. Thank you! -->
+
+**Description of problem:**
+
+
+**Operating system:**
+
+**Python Version:**
+
+**Terminal dump of the issue observed:**
+<details>
+
+</details>
+
+**Exact glusto-tests log where the issue was observed:**
+<details>
+
+
+
+</details>
+
+**Setup info:**
+<details>
+Number of nodes:
+
+Number of clients:
+
+Number of servers:
+
+Testcase or library:
+
+How reproducible:
+
+</details>
+
+**Additional info:**
+
+
diff --git a/README.rst b/README.rst
index dc7c32e1..65d3c137 100644
--- a/README.rst
+++ b/README.rst
@@ -173,7 +173,7 @@ For example:
$ tox -e functional -- glusto -c 'config.yml' \
-l /tmp/glustotests-ocp.log --log-level DEBUG \
- --pytest='-v -x tests -m ocp'
+ '--pytest=-v -x tests -m ocp'
One can configure log files, log levels in the test cases as well. For details
on how to use `glusto` framework for configuring logs in tests, refer `docs
diff --git a/openshift-storage-libs/openshiftstoragelibs/baseclass.py b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
index 52cbfcce..b5969764 100644
--- a/openshift-storage-libs/openshiftstoragelibs/baseclass.py
+++ b/openshift-storage-libs/openshiftstoragelibs/baseclass.py
@@ -4,6 +4,8 @@ import re
import unittest
from glusto.core import Glusto as g
+from glustolibs.gluster.block_libs import get_block_list
+from glustolibs.gluster.volume_ops import get_volume_list
import six
from openshiftstoragelibs import command
@@ -17,16 +19,21 @@ from openshiftstoragelibs.gluster_ops import (
get_gluster_vol_status,
)
from openshiftstoragelibs.heketi_ops import (
+ get_block_hosting_volume_list,
hello_heketi,
heketi_blockvolume_delete,
heketi_blockvolume_info,
+ heketi_blockvolume_list,
heketi_db_check,
+ heketi_topology_info,
heketi_volume_create,
heketi_volume_delete,
heketi_volume_info,
heketi_volume_list,
)
from openshiftstoragelibs.node_ops import (
+ attach_existing_vmdk_from_vmstore,
+ detach_disk_from_vm,
node_add_iptables_rules,
node_delete_iptables_rules,
power_off_vm_by_name,
@@ -38,7 +45,6 @@ from openshiftstoragelibs.openshift_ops import (
get_pod_name_from_rc,
get_pv_name_from_pvc,
oc_create_app_dc_with_io,
- oc_create_busybox_app_dc_with_io,
oc_create_pvc,
oc_create_sc,
oc_create_secret,
@@ -120,6 +126,11 @@ class BaseClass(unittest.TestCase):
cls.heketi_logs_before_delete = bool(
g.config.get("common", {}).get("heketi_logs_before_delete", False))
+ cls.io_container_image_cirros = cls.openshift_config.get(
+ "io_container_images", {}).get("cirros", "cirros")
+ cls.io_container_image_busybox = cls.openshift_config.get(
+ "io_container_images", {}).get("busybox", "busybox")
+
cmd = "echo -n %s | base64" % cls.heketi_cli_key
ret, out, err = g.run(cls.ocp_master_node[0], cmd, "root")
if ret != 0:
@@ -426,8 +437,8 @@ class BaseClass(unittest.TestCase):
def create_dcs_with_pvc(
self, pvc_names, timeout=600, wait_step=5,
- dc_name_prefix='autotests-dc', label=None,
- skip_cleanup=False, is_busybox=False):
+ dc_name_prefix='autotests-dc', space_to_use=1048576, label=None,
+ skip_cleanup=False, image=None):
"""Create bunch of DCs with app PODs which use unique PVCs.
Args:
@@ -436,8 +447,9 @@ class BaseClass(unittest.TestCase):
timeout (int): timeout value, default value is 600 seconds.
wait_step( int): wait step, default value is 5 seconds.
dc_name_prefix(str): name prefix for deployement config.
+ space_to_use(int): space to use for io's in KB.
label (dict): keys and value for adding label into DC.
- is_busybox (bool): True for busybox app pod else default is False
+ image (str): container image used for I/O.
Returns: dictionary with following structure:
{
"pvc_name_1": ("dc_name_1", "pod_name_1"),
@@ -446,15 +458,17 @@ class BaseClass(unittest.TestCase):
"pvc_name_n": ("dc_name_n", "pod_name_n"),
}
"""
+ if not image:
+ image = self.io_container_image_cirros
+
pvc_names = (
pvc_names
if isinstance(pvc_names, (list, set, tuple)) else [pvc_names])
dc_and_pod_names, dc_names = {}, {}
- function = (oc_create_busybox_app_dc_with_io if is_busybox else
- oc_create_app_dc_with_io)
for pvc_name in pvc_names:
- dc_name = function(self.ocp_client[0], pvc_name,
- dc_name_prefix=dc_name_prefix, label=label)
+ dc_name = oc_create_app_dc_with_io(
+ self.ocp_client[0], pvc_name, space_to_use=space_to_use,
+ dc_name_prefix=dc_name_prefix, label=label, image=image)
dc_names[pvc_name] = dc_name
if not skip_cleanup:
self.addCleanup(oc_delete, self.ocp_client[0], 'dc', dc_name)
@@ -475,11 +489,11 @@ class BaseClass(unittest.TestCase):
def create_dc_with_pvc(
self, pvc_name, timeout=300, wait_step=10,
dc_name_prefix='autotests-dc', label=None,
- skip_cleanup=False, is_busybox=False):
+ skip_cleanup=False, image=None):
return self.create_dcs_with_pvc(
pvc_name, timeout, wait_step,
dc_name_prefix=dc_name_prefix, label=label,
- skip_cleanup=skip_cleanup, is_busybox=is_busybox)[pvc_name]
+ skip_cleanup=skip_cleanup, image=image)[pvc_name]
def create_heketi_volume_with_name_and_wait(
self, name, size, raise_on_cleanup_error=True,
@@ -684,6 +698,46 @@ class BaseClass(unittest.TestCase):
self.addCleanup(self.power_on_vm, vm_name)
self.power_off_vm(vm_name)
+ def detach_and_attach_vmdk(self, vm_name, node_hostname, devices_list):
+
+ # Detach devices list and attach existing vmdk present
+ vmdk_list, modified_device_list = [], []
+ devices_list.reverse()
+ self.addCleanup(self.power_on_gluster_node_vm, vm_name, node_hostname)
+ for device in devices_list:
+ # Detach disks from vm
+ vmdk = detach_disk_from_vm(vm_name, device)
+ self.addCleanup(
+ attach_existing_vmdk_from_vmstore, vm_name, device, vmdk)
+ vmdk_list.append(vmdk)
+ vmdk_list.reverse()
+ devices_list.reverse()
+ modified_vmdk_list = vmdk_list[-1:] + vmdk_list[:-1]
+ for device, vmdk in zip(devices_list, modified_vmdk_list):
+ modified_device_list.append((device, vmdk))
+
+ # Power off gluster node
+ power_off_vm_by_name(vm_name)
+ self.addCleanup(power_off_vm_by_name, vm_name)
+ for device, vdisk in modified_device_list:
+ attach_existing_vmdk_from_vmstore(vm_name, device, vdisk)
+ self.addCleanup(detach_disk_from_vm, vm_name, device)
+ self.power_on_gluster_node_vm(vm_name, node_hostname)
+ devices_list.sort()
+
+ def validate_file_volumes_count(self, h_node, h_server, node_ip):
+
+ # check volume count from heketi and gluster are same
+ heketi_topology_info(h_node, h_server, json=True)
+ h_volume_list = heketi_volume_list(h_node, h_server, json=True)
+ vol_list = get_volume_list(node_ip)
+ self.assertIsNotNone(
+ vol_list, "Failed to get volumes list")
+ self.assertEqual(
+ len(h_volume_list['volumes']), len(vol_list),
+ "Failed to verify volume count Expected:'{}', Actual:'{}'".format(
+ len(h_volume_list['volumes']), len(vol_list)))
+
class GlusterBlockBaseClass(BaseClass):
"""Base class for gluster-block test cases."""
@@ -818,6 +872,26 @@ class GlusterBlockBaseClass(BaseClass):
return block_hosting_vol
+ def validate_block_volumes_count(self, h_node, h_server, node_ip):
+
+ # get list of block volumes using heketi
+ h_blockvol_list = heketi_blockvolume_list(
+ h_node, h_server, json=True)
+ # Get existing BHV list
+ bhv_list = list(
+ get_block_hosting_volume_list(h_node, h_server).keys())
+ for vol in bhv_list:
+ bhv_info = heketi_volume_info(h_node, h_server, vol, json=True)
+ bhv_name = bhv_info['name']
+ gluster_block_list = get_block_list(node_ip, volname=bhv_name)
+ self.assertIsNotNone(
+ gluster_block_list, "Failed to get gluster block list")
+ self.assertEqual(
+ len(h_blockvol_list['blockvolumes']), len(gluster_block_list),
+ "Failed to verify blockvolume count Expected:'{}', "
+ "Actual:'{}'".format(
+ len(h_blockvol_list['blockvolumes']), len(gluster_block_list)))
+
class ScaleUpBaseClass(GlusterBlockBaseClass):
"""Base class for ScaleUp test cases."""
diff --git a/openshift-storage-libs/openshiftstoragelibs/cloundproviders/vmware.py b/openshift-storage-libs/openshiftstoragelibs/cloundproviders/vmware.py
index 1d4d4c38..959fc7da 100644
--- a/openshift-storage-libs/openshiftstoragelibs/cloundproviders/vmware.py
+++ b/openshift-storage-libs/openshiftstoragelibs/cloundproviders/vmware.py
@@ -3,6 +3,7 @@ Note: Do not use this module directly in the Test Cases. This module can be
used with the help of 'node_ops'
"""
import re
+import string
from glusto.core import Glusto as g
from pyVim import connect
@@ -239,3 +240,180 @@ class VmWare(object):
tasks = [vm[0].PowerOff()]
self._wait_for_tasks(tasks, self.vsphere_client)
+
+ def get_obj(self, name, vimtype):
+ """
+ Retrieves the managed object for the name and type specified
+ Args:
+ name (str): Name of the VM.
+ vimtype (str): Type of managed object
+ Returns:
+ obj (str): Object for specified vimtype and name
+ Example:
+ 'vim.VirtualMachine:vm-1268'
+ Raises:
+ CloudProviderError: In case of any failures.
+ """
+ obj = None
+ content = self.vsphere_client.content.viewManager.CreateContainerView(
+ self.vsphere_client.content.rootFolder, vimtype, True)
+ for c in content.view:
+ if c.name == name:
+ obj = c
+ break
+ if not obj:
+ msg = "Virtual machine with {} name not found.".format(name)
+ g.log.error(msg)
+ raise exceptions.CloudProviderError(msg)
+ return obj
+
+ def get_disk_labels(self, vm_name):
+ """Retrieve disk labels which are attached to vm.
+
+ Args:
+ vm_name (str): Name of the VM.
+ Returns:
+ disk_labels (list): list of disks labels which are attached to vm.
+ Example:
+ ['Hard disk 1', 'Hard disk 2', 'Hard disk 3']
+ Raises:
+ CloudProviderError: In case of any failures.
+ """
+
+ # Find vm
+ vm = self.get_obj(vm_name, vimtype=[vim.VirtualMachine])
+
+ disk_labels = []
+ for dev in vm.config.hardware.device:
+ disk_labels.append(dev.deviceInfo.label)
+ return disk_labels
+
+ def detach_disk(self, vm_name, disk_path):
+ """Detach disk for given vmname by diskPath.
+
+ Args:
+ vm_name (str): Name of the VM.
+ disk_path (str): Disk path which needs to be unplugged.
+ Example:
+ '/dev/sdd'
+ '/dev/sde'
+ Returns:
+ vdisk_path (str): Path of vmdk file to be detached from vm.
+ Raises:
+ CloudProviderError: In case of any failures.
+ """
+
+ # Translate given disk to a disk label of vmware.
+ letter = disk_path[-1]
+ ucase = string.ascii_uppercase
+ pos = ucase.find(letter.upper()) + 1
+ if pos:
+ disk_label = 'Hard disk {}'.format(str(pos))
+ else:
+ raise exceptions.CloudProviderError(
+ "Hard disk '{}' missing from vm '{}'".format(pos, vm_name))
+
+ # Find vm
+ vm = self.get_obj(vm_name, vimtype=[vim.VirtualMachine])
+
+ # Find if the given hard disk is attached to the system.
+ virtual_hdd_device = None
+ for dev in vm.config.hardware.device:
+ if dev.deviceInfo.label == disk_label:
+ virtual_hdd_device = dev
+ vdisk_path = virtual_hdd_device.backing.fileName
+ break
+
+ if not virtual_hdd_device:
+ raise exceptions.CloudProviderError(
+ 'Virtual disk label {} could not be found'.format(disk_label))
+ disk_labels = self.get_disk_labels(vm_name)
+ if disk_label in disk_labels:
+
+ # Remove disk from the vm
+ virtual_hdd_spec = vim.vm.device.VirtualDeviceSpec()
+ virtual_hdd_spec.operation = (
+ vim.vm.device.VirtualDeviceSpec.Operation.remove)
+ virtual_hdd_spec.device = virtual_hdd_device
+
+ # Wait for the task to be completed.
+ spec = vim.vm.ConfigSpec()
+ spec.deviceChange = [virtual_hdd_spec]
+ task = vm.ReconfigVM_Task(spec=spec)
+ self._wait_for_tasks([task], self.vsphere_client)
+ else:
+ msg = ("Could not find provided disk {} in list of disks {}"
+ " in vm {}".format(disk_label, disk_labels, vm_name))
+ g.log.error(msg)
+ raise exceptions.CloudProviderError(msg)
+ return vdisk_path
+
+ def attach_existing_vmdk(self, vm_name, disk_path, vmdk_name):
+ """
+ Attach already existing disk to vm
+ Args:
+ vm_name (str): Name of the VM.
+ disk_path (str): Disk path which needs to be unplugged.
+ Example:
+ '/dev/sdd'
+ '/dev/sde'
+ vmdk_name (str): Path of vmdk file to attach in vm.
+ Returns:
+ None
+ Raises:
+ CloudProviderError: In case of any failures.
+ """
+
+ # Find vm
+ vm = self.get_obj(vm_name, vimtype=[vim.VirtualMachine])
+
+ # Translate given disk to a disk label of vmware.
+ letter = disk_path[-1]
+ ucase = string.ascii_uppercase
+ pos = ucase.find(letter.upper()) + 1
+ if pos:
+ disk_label = 'Hard disk {}'.format(str(pos))
+ else:
+ raise exceptions.CloudProviderError(
+ "Hard disk '{}' missing from vm '{}'".format(pos, vm_name))
+
+ # Find if the given hard disk is not attached to the vm
+ for dev in vm.config.hardware.device:
+ if dev.deviceInfo.label == disk_label:
+ raise exceptions.CloudProviderError(
+ 'Virtual disk label {} already exists'.format(disk_label))
+
+ # Find unit number for attaching vmdk
+ unit_number = 0
+ for dev in vm.config.hardware.device:
+ if hasattr(dev.backing, 'fileName'):
+ unit_number = int(dev.unitNumber) + 1
+
+ # unit_number 7 reserved for scsi controller, max(16)
+ if unit_number == 7:
+ unit_number += 1
+ if unit_number >= 16:
+ raise Exception(
+ "SCSI controller is full. Cannot attach vmdk file")
+ if isinstance(dev, vim.vm.device.VirtualSCSIController):
+ controller = dev
+
+ # Attach vmdk file to the disk and setting backings
+ spec = vim.vm.ConfigSpec()
+ disk_spec = vim.vm.device.VirtualDeviceSpec()
+ disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ disk_spec.device = vim.vm.device.VirtualDisk()
+ disk_spec.device.backing = (
+ vim.vm.device.VirtualDisk.FlatVer2BackingInfo())
+ disk_spec.device.backing.diskMode = 'persistent'
+ disk_spec.device.backing.fileName = vmdk_name
+ disk_spec.device.backing.thinProvisioned = True
+ disk_spec.device.unitNumber = unit_number
+ disk_spec.device.controllerKey = controller.key
+
+ # creating the list
+ dev_changes = []
+ dev_changes.append(disk_spec)
+ spec.deviceChange = dev_changes
+ task = vm.ReconfigVM_Task(spec=spec)
+ self._wait_for_tasks([task], self.vsphere_client)
diff --git a/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
index f621a860..7f5f5535 100644
--- a/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
@@ -4,7 +4,12 @@ try:
except ImportError:
# py2
import json
+try:
+ import xml.etree.cElementTree as etree
+except ImportError:
+ import xml.etree.ElementTree as etree
import re
+import six
import time
from glusto.core import Glusto as g
@@ -20,7 +25,10 @@ from glustolibs.gluster.volume_ops import (
from openshiftstoragelibs import exceptions
from openshiftstoragelibs.heketi_ops import heketi_blockvolume_info
-from openshiftstoragelibs.openshift_ops import cmd_run_on_gluster_pod_or_node
+from openshiftstoragelibs.openshift_ops import (
+ cmd_run_on_gluster_pod_or_node,
+ get_ocp_gluster_pod_details,
+)
from openshiftstoragelibs import podcmd
from openshiftstoragelibs import waiter
@@ -352,3 +360,84 @@ def get_gluster_vol_free_inodes_with_hosts_of_bricks(vol_name):
inodes_info = {brick_process: process_data["inodesFree"]}
hosts_with_inodes_info[g_node].update(inodes_info)
return hosts_with_inodes_info
+
+
+def _get_gluster_cmd(target, command):
+
+ if isinstance(command, six.string_types):
+ command = [command]
+ ocp_client_node = list(g.config['ocp_servers']['client'].keys())[0]
+ gluster_pods = get_ocp_gluster_pod_details(ocp_client_node)
+
+ if target == 'auto_get_gluster_endpoint':
+ if gluster_pods:
+ target = podcmd.Pod(ocp_client_node, gluster_pods[0]["pod_name"])
+ else:
+ target = list(g.config.get("gluster_servers", {}).keys())[0]
+ elif not isinstance(target, podcmd.Pod) and gluster_pods:
+ for g_pod in gluster_pods:
+ if target in (g_pod['pod_host_ip'], g_pod['pod_hostname']):
+ target = podcmd.Pod(ocp_client_node, g_pod['pod_name'])
+ break
+
+ if isinstance(target, podcmd.Pod):
+ return target.node, ' '.join(['oc', 'rsh', target.podname] + command)
+
+ return target, ' '.join(command)
+
+
+def get_peer_status(mnode):
+ """Parse the output of command 'gluster peer status' using run_async.
+
+ Args:
+ mnode (str): Node on which command has to be executed.
+
+ Returns:
+ NoneType: None if command execution fails or parse errors.
+ list: list of dicts on success.
+
+ Examples:
+ >>> get_peer_status(mnode = 'abc.lab.eng.xyz.com')
+ [{'uuid': '77dc299a-32f7-43d8-9977-7345a344c398',
+ 'hostname': 'ijk.lab.eng.xyz.com',
+ 'state': '3',
+ 'hostnames' : ['ijk.lab.eng.xyz.com'],
+ 'connected': '1',
+ 'stateStr': 'Peer in Cluster'},
+
+ {'uuid': 'b15b8337-9f8e-4ec3-8bdb-200d6a67ae12',
+ 'hostname': 'def.lab.eng.xyz.com',
+ 'state': '3',
+ 'hostnames': ['def.lab.eng.xyz.com'],
+ 'connected': '1',
+ 'stateStr': 'Peer in Cluster'}
+ ]
+ """
+ mnode, cmd = _get_gluster_cmd(mnode, "gluster peer status --xml")
+ obj = g.run_async(mnode, cmd, log_level='DEBUG')
+ ret, out, err = obj.async_communicate()
+
+ if ret:
+ g.log.error(
+ "Failed to execute peer status command on node {} with error "
+ "{}".format(mnode, err))
+ return None
+
+ try:
+ root = etree.XML(out)
+ except etree.ParseError:
+ g.log.error("Failed to parse the gluster peer status xml output.")
+ return None
+
+ peer_status_list = []
+ for peer in root.findall("peerStatus/peer"):
+ peer_dict = {}
+ for element in peer.getchildren():
+ if element.tag == "hostnames":
+ hostnames_list = []
+ for hostname in element.getchildren():
+ hostnames_list.append(hostname.text)
+ element.text = hostnames_list
+ peer_dict[element.tag] = element.text
+ peer_status_list.append(peer_dict)
+ return peer_status_list
diff --git a/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py b/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py
index 77be7883..1f6a7705 100644
--- a/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/heketi_ops.py
@@ -293,6 +293,55 @@ def heketi_volume_expand(heketi_client_node, heketi_server_url, volume_id,
return out
+def heketi_blockvolume_expand(heketi_client_node, heketi_server_url,
+ blockvolume_id, new_size, raise_on_error=True,
+ **kwargs):
+ """Executes heketi blockvolume expand command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ blockvolume_id (str): blockvolume ID
+ new_size (str): blockvolume net new size
+ raise_on_error (bool): whether or not to raise exception
+ in case of an error.
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - json : (bool)
+ - secret : (str)|None
+ - user : (str)|None
+
+ Returns:
+ dict: if json if True, then it returns raw string output.
+ string: returns raw string output if json is False
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+ """
+
+ version = heketi_version.get_heketi_version(heketi_client_node)
+ if version < '9.0.0-14':
+ msg = ("heketi-client package {} does not support blockvolume "
+ "expand".format(version.v_str))
+ g.log.error(msg)
+ raise NotImplementedError(msg)
+
+ heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = ("heketi-cli -s {} blockvolume expand {} "
+ "--new-size={} {} {} {}".format(
+ heketi_server_url, blockvolume_id, new_size, json_arg,
+ admin_key, user))
+ cmd = TIMEOUT_PREFIX + cmd
+ out = heketi_cmd_run(
+ heketi_client_node, cmd, raise_on_error=raise_on_error)
+ if json_arg and out:
+ return json.loads(out)
+ return out
+
+
def heketi_volume_delete(heketi_client_node, heketi_server_url, volume_id,
raise_on_error=True, **kwargs):
"""Executes heketi volume delete command.
@@ -2067,3 +2116,40 @@ def validate_dev_path_vg_and_uuid(
# Compare the uuid from node and heketi
return n_uuid == h_uuid
+
+
+def heketi_brick_evict(heketi_client_node, heketi_server_url, brick_id,
+ raise_on_error=True, **kwargs):
+ """Executes heketi brick evict command.
+
+ Args:
+ heketi_client_node (str): Node on which cmd has to be executed.
+ heketi_server_url (str): Heketi server url
+ brick_id (str): Brick ID
+ raise_on_error (bool): whether or not to raise exception
+ in case of an error.
+
+ Kwargs:
+ The keys, values in kwargs are:
+ - secret : (str)|None
+ - user : (str)|None
+
+ Raises:
+ exceptions.ExecutionError: if command fails.
+ """
+
+ version = heketi_version.get_heketi_version(heketi_client_node)
+ if version < '9.0.0-14':
+ msg = (
+ "heketi-client package {} does not support brick evict".format(
+ version.v_str))
+ raise NotImplementedError(msg)
+
+ heketi_server_url, _, admin_key, user = _set_heketi_global_flags(
+ heketi_server_url, **kwargs)
+
+ cmd = "heketi-cli -s {} brick evict {} {} {}".format(
+ heketi_server_url, brick_id, admin_key, user)
+ cmd = TIMEOUT_PREFIX + cmd
+ heketi_cmd_run(
+ heketi_client_node, cmd, raise_on_error=raise_on_error)
diff --git a/openshift-storage-libs/openshiftstoragelibs/node_ops.py b/openshift-storage-libs/openshiftstoragelibs/node_ops.py
index 943ad194..5ae8cf2a 100644
--- a/openshift-storage-libs/openshiftstoragelibs/node_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/node_ops.py
@@ -215,3 +215,72 @@ def node_delete_iptables_rules(node, chain, rules, raise_on_error=True):
command.cmd_run(
delete_iptables_rule_cmd % (chain, rule), node,
raise_on_error=raise_on_error)
+
+
+def attach_disk_to_vm(name, disk_size, disk_type='thin'):
+ """Add the disk specified to virtual machine.
+
+ Args:
+ name (str): name of the VM for which disk needs to be added.
+ disk_size (int) : Specify disk size in KB
+ disk_type (str) : Type of the disk, could be thick or thin.
+ Default value is "thin".
+ Returns:
+ None
+ """
+ cloudProvider = _get_cloud_provider()
+
+ vm_name = find_vm_name_by_ip_or_hostname(name)
+ cloudProvider.attach_disk(vm_name, disk_size, disk_type)
+
+
+def attach_existing_vmdk_from_vmstore(name, disk_path, vmdk_name):
+ """Attach existing disk vmdk specified to virtual machine.
+
+ Args:
+ name (str): name of the VM for which disk needs to be added.
+ vmdk_name (str) : name of the vmdk file which needs to be added.
+
+ Returns:
+ None
+ """
+ cloudProvider = _get_cloud_provider()
+
+ vm_name = find_vm_name_by_ip_or_hostname(name)
+ cloudProvider.attach_existing_vmdk(vm_name, disk_path, vmdk_name)
+
+
+def detach_disk_from_vm(name, disk_name):
+ """Remove the disk specified from virtual machine.
+
+ Args:
+ name (str): name of the VM from where the disk needs to be removed.
+ disk_name (str) : name of the disk which needs to be removed.
+ Example:
+ '/dev/sdd'
+ '/dev/sde'
+ Returns:
+ vdisk (str): vmdk filepath of removed disk
+ """
+ cloudProvider = _get_cloud_provider()
+
+ vm_name = find_vm_name_by_ip_or_hostname(name)
+ vdisk = cloudProvider.detach_disk(vm_name, disk_name)
+ return vdisk
+
+
+def get_disk_labels(name):
+ """Remove the disk specified from virtual machine.
+
+ Args:
+ name (str) : name of the disk which needs to be removed.
+ Example:
+ '/dev/sdd'
+ '/dev/sde'
+ Returns:
+ None
+ """
+ cloudProvider = _get_cloud_provider()
+ vm_name = find_vm_name_by_ip_or_hostname(name)
+ disk_labels = cloudProvider.get_all_disks(vm_name)
+ return disk_labels
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
index cf1e342b..a228e190 100644
--- a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
@@ -39,21 +39,24 @@ KILL_SERVICE = "kill -9 %s"
IS_ACTIVE_SERVICE = "systemctl is-active %s"
-def oc_get_pods(ocp_node, selector=None):
+def oc_get_pods(ocp_node, selector=None, name=None):
"""Gets the pods info with 'wide' option in the current project.
Args:
ocp_node (str): Node in which ocp command will be executed.
selector (str): optional option. Selector for OCP pods.
example: "glusterfs-node=pod" for filtering out only Gluster PODs.
+ name (str): name of the pod to get details.
Returns:
dict : dict of pods info in the current project.
"""
- cmd = "oc get -o wide --no-headers=true pods"
+ cmd = "oc get -o wide --no-headers=true pods "
if selector:
cmd += " --selector %s" % selector
+ if name:
+ cmd += name
out = command.cmd_run(cmd, hostname=ocp_node)
return _parse_wide_pods_output(out)
@@ -408,7 +411,7 @@ def _oc_create_app_dc_with_io_image(hostname, pvc_name, dc_name_prefix,
def oc_create_app_dc_with_io(
hostname, pvc_name, dc_name_prefix="autotests-dc-with-app-io",
- replicas=1, space_to_use=1048576, label=None):
+ replicas=1, space_to_use=1048576, label=None, image="cirros"):
"""Create DC with app PODs and attached PVC, constantly running I/O.
Args:
@@ -420,34 +423,15 @@ def oc_create_app_dc_with_io(
replicas (int): amount of application POD replicas.
space_to_use (int): value in bytes which will be used for I/O.
label (dict): dict of keys and values to add labels in DC.
+ image (str): Container image for I/O.
"""
return _oc_create_app_dc_with_io_image(
hostname, pvc_name, dc_name_prefix, replicas, space_to_use,
- label, "cirros")
-
-
-def oc_create_busybox_app_dc_with_io(
- hostname, pvc_name, dc_name_prefix="autotests-dc-with-app-io",
- replicas=1, space_to_use=1048576, label=None):
- """Create DC with app PODs and attached PVC, constantly running I/O.
-
- Args:
- hostname (str): Node on which 'oc create' command will be executed.
- pvc_name (str): name of the Persistent Volume Claim to attach to
- the application PODs where constant I/O will run.
- dc_name_prefix (str): DC name will consist of this prefix and
- random str.
- replicas (int): amount of application POD replicas.
- space_to_use (int): value in bytes which will be used for I/O.
- label (dict): dict of keys and values to add labels in DC.
- """
- return _oc_create_app_dc_with_io_image(
- hostname, pvc_name, dc_name_prefix, replicas, space_to_use,
- label, "busybox")
+ label, image=image)
def oc_create_tiny_pod_with_volume(hostname, pvc_name, pod_name_prefix='',
- mount_path='/mnt'):
+ mount_path='/mnt', image='cirros'):
"""Create tiny POD from image in 10Mb with attached volume at /mnt"""
pod_name = "%s-%s" % (pod_name_prefix, utils.get_random_str())
pod_data = json.dumps({
@@ -460,7 +444,7 @@ def oc_create_tiny_pod_with_volume(hostname, pvc_name, pod_name_prefix='',
"terminationGracePeriodSeconds": 20,
"containers": [{
"name": pod_name,
- "image": "cirros", # noqa: 10 Mb! linux image
+ "image": image, # noqa: 10 Mb! linux image
"volumeMounts": [{"mountPath": mount_path, "name": "vol"}],
"command": [
"/bin/sh", "-ec",
@@ -480,7 +464,8 @@ def oc_create_tiny_pod_with_volume(hostname, pvc_name, pod_name_prefix='',
def oc_delete(
- ocp_node, rtype, name, raise_on_absence=True, collect_logs=False):
+ ocp_node, rtype, name, raise_on_absence=True, collect_logs=False,
+ skip_res_validation=True, is_force=False):
"""Delete an OCP resource by name
Args:
@@ -492,8 +477,11 @@ def oc_delete(
else return
default value: True
collect_logs (bool): Collect logs before deleting resource
+ skip_res_validation(bool): To validate before deletion of resource.
+ is_force (bool): True for deleting forcefully, default is False
"""
- if not oc_get_yaml(ocp_node, rtype, name, raise_on_error=raise_on_absence):
+ if skip_res_validation and not oc_get_yaml(
+ ocp_node, rtype, name, raise_on_error=raise_on_absence):
return
if rtype == "pod" and collect_logs:
@@ -506,6 +494,10 @@ def oc_delete(
if openshift_version.get_openshift_version() >= '3.11':
cmd.append('--wait=false')
+ # Forcefully delete
+ if is_force:
+ cmd.append("--grace-period 0 --force")
+
command.cmd_run(cmd, hostname=ocp_node)
@@ -1081,7 +1073,7 @@ def wait_for_pod_be_ready(hostname, pod_name,
g.log.info("pod %s is in ready state and is "
"Running" % pod_name)
return True
- elif output[1] == "Error":
+ elif output[1] in ["Error", "CrashBackOffLoop"]:
msg = ("pod %s status error" % pod_name)
g.log.error(msg)
raise exceptions.ExecutionError(msg)
@@ -1100,13 +1092,15 @@ def wait_for_pod_be_ready(hostname, pod_name,
def wait_for_pods_be_ready(
- hostname, pod_count, selector, timeout=600, wait_step=10):
+ hostname, pod_count, selector=None, field_selector=None,
+ timeout=600, wait_step=10):
"""Wait to 'pod_count' gluster pods be in Ready state.
Args:
hostname (str): Node where we want to run our commands.
pod_count (int): No of pods to be waited for.
selector (str): Selector to select pods of given label.
+ field_selector (str): Selector to select pods.
timeout (int): Seconds to wait for Node to be Ready.
wait_step (int): Interval in seconds to wait before checking
status again.
@@ -1115,20 +1109,30 @@ def wait_for_pods_be_ready(
AssertionError: In case it fails to get pods.
ExecutionError: In case pods won't get in ready state for given time.
"""
- if not selector:
+ if not selector and not field_selector:
raise exceptions.ExecutionError(
- "selector parameter should be provided")
+ "Either selector or field-selector parameter should be provided")
custom = (
r':.metadata.name,":.status.conditions[?(@.type==\"Ready\")]".status')
pod_status = None
for w in waiter.Waiter(timeout, wait_step):
pod_status = oc_get_custom_resource(
- hostname, "pod", custom, selector=selector)
-
- if not pod_status:
- raise exceptions.ExecutionError(
- "Unable to find pod with selector %s" % selector)
+ hostname, "pod", custom, selector=selector,
+ field_selector=field_selector)
+
+ if not pod_status and pod_count != 0:
+ selection_text = ''
+ if selector and field_selector:
+ selection_text += 'selector {} and field-selector {}'.format(
+ selector, field_selector)
+ elif selector:
+ selection_text += 'selector {}'.format(selector)
+ else:
+ selection_text += 'field-selector {}'.format(field_selector)
+ raise exceptions.ExecutionError(
+ "Unable to find pods with mentioned {}".format(
+ selection_text))
status = [status for _, status in pod_status]
if len(status) == pod_count == status.count("True"):
return
@@ -2040,3 +2044,108 @@ def match_pv_and_heketi_volumes(hostname, heketi_volumes, pvc_prefix):
"PV: {}, Heketi volumes {}, "
"Difference: {}".format(pv_volumes, heketi_volumes, vol_diff))
assert not vol_diff, err_msg
+
+
+def oc_create_offline_block_volume_expand_job(
+ hostname, pvc_name, job_name_prefix='block-expand-job',
+ mount_path='/mnt'):
+ """Create Block Volume Expand Job with block PVC mounted at /mnt
+
+ Args:
+ hostname (str): Hostname on which we want to run oc commands
+ pvc_name (str): Name of a block PVC to attach to block expand job
+ job_name_prefix (str): Job name prefix given by user at the time
+ of job creation
+ mount_path (str): Where PVC should be mounted
+
+ Returns:
+ string: Name of the created job
+ """
+
+ # Find MOUNTPOINT on host node wrt to the mount_path on pod and run
+ # xfs_growfs on host MOUNTPOINT
+ command = [
+ 'sh', '-c', 'echo -e "# df -Th {0}" && df -Th {0} && '
+ 'DEVICE=$(df --output=source {0} | sed -e /^Filesystem/d) && '
+ 'MOUNTPOINT=$($EXEC_ON_HOST lsblk $DEVICE -n -o MOUNTPOINT) && '
+ '$EXEC_ON_HOST xfs_growfs $MOUNTPOINT > /dev/null && '
+ 'echo -e "\n# df -Th {0}" && df -Th {0}'.format(mount_path)
+ ]
+
+ # This will be a privileged container be careful while playing with it
+ job_name = "%s-%s" % (job_name_prefix, utils.get_random_str())
+ job_data = json.dumps({
+ "apiVersion": "batch/v1",
+ "kind": "Job",
+ "metadata": {"name": job_name},
+ "spec": {
+ "completions": 1,
+ "template": {
+ "spec": {
+ "containers": [{
+ "image": "rhel7",
+ "env": [
+ {
+ "name": "HOST_ROOTFS",
+ "value": "/rootfs"
+ },
+ {
+ "name": "EXEC_ON_HOST",
+ "value": "nsenter --root=$(HOST_ROOTFS) "
+ "nsenter -t 1 -m"
+ }
+ ],
+ "command": command,
+ "name": "rhel7",
+ "volumeMounts": [
+ {"mountPath": mount_path, "name": "block-pvc"},
+ {"mountPath": "/dev", "name": "host-dev"},
+ {"mountPath": "/rootfs", "name": "host-rootfs"}
+ ],
+ "securityContext": {"privileged": True}
+ }],
+ "volumes": [
+ {
+ "name": "block-pvc", "persistentVolumeClaim": {
+ "claimName": pvc_name
+ }
+ },
+ {
+ "name": "host-dev", "hostPath": {
+ "path": "/dev"
+ }
+ },
+ {
+ "name": "host-rootfs", "hostPath": {
+ "path": "/"
+ }
+ }
+ ],
+ "restartPolicy": "Never"
+ }
+ }
+ }
+ })
+
+ oc_create(hostname, job_data, 'stdin')
+ return job_name
+
+
+def is_job_complete(hostname, job_name, namespace=""):
+ """Check job completion status
+
+ Args:
+ hostname (str): Hostname on which we want to run command
+ job_name (str): k8s job name
+ namespace (str): k8s namespace name
+ Return:
+ bool
+ """
+
+ cmd = ['oc', 'get', 'jobs', '-o=custom-columns=:.status.succeeded',
+ '--no-headers', job_name]
+
+ cmd += ['-n', namespace] if namespace else []
+
+ out = command.cmd_run(cmd, hostname=hostname)
+ return out == "1"
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py b/openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py
index d17edb5b..bbac8d29 100644
--- a/openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_storage_libs.py
@@ -11,6 +11,7 @@ from openshiftstoragelibs.exceptions import (
NotSupportedException,
)
from openshiftstoragelibs.openshift_ops import (
+ cmd_run_on_gluster_pod_or_node,
oc_get_custom_resource)
from openshiftstoragelibs.openshift_version import get_openshift_version
from openshiftstoragelibs import waiter
@@ -236,3 +237,25 @@ def get_active_and_enabled_devices_from_mpath(node, mpath):
'active': active,
'enabled': enabled}
return out_dic
+
+
+def get_pvs_info(node, gluster_node_ip, devices_list, raise_on_error=True):
+ """Get pv_name, pv_uuid and vg_name from given node.
+
+ Args:
+ node (str): ocp client node ip.
+ gluster_node_ip (str): where we want to run the command.
+ devices_list (list): list of device list to get pvs info.
+ Returns:
+ pvs_info (list): pvs info for devices_list
+ Raises:
+ ExecutionError: In case of any failure if raise_on_error=True.
+ """
+
+ pvs_info = []
+ for device in devices_list:
+ cmd = ("pvs -o pv_name,pv_uuid,vg_name | grep {}".format(device))
+ out = cmd_run_on_gluster_pod_or_node(
+ node, cmd, gluster_node_ip, raise_on_error=raise_on_error)
+ pvs_info.append(out.split())
+ return pvs_info
diff --git a/pytest.ini b/pytest.ini
index 20ec3833..af8fd83d 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -1,8 +1,8 @@
[pytest]
junit_family=legacy
markers =
- tier0: Tier0 functional test cases
tier1: Tier1 functional test cases
tier2: Tier2 functional test cases
- tier3: Tier3 non functional test cases
+ tier3: Tier3 functional test cases
+ tier4: Tier4 functional test cases
scaleUp: ScaleUp non functional test case
diff --git a/tests/functional/arbiter/test_arbiter.py b/tests/functional/arbiter/test_arbiter.py
index 06beef67..8a0a8f9e 100755
--- a/tests/functional/arbiter/test_arbiter.py
+++ b/tests/functional/arbiter/test_arbiter.py
@@ -126,7 +126,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
return bricks
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_arbiter_pvc_create(self):
"""Validate dynamic provision of an arbiter volume"""
@@ -142,7 +142,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
self.verify_amount_and_proportion_of_arbiter_and_data_bricks(vol_info)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_arbiter_pvc_mount_on_pod(self):
"""Validate new volume creation using app pod"""
# Create sc with gluster arbiter info
@@ -155,7 +155,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
mount_path = "/mnt"
pod_name = openshift_ops.oc_create_tiny_pod_with_volume(
self.node, self.pvc_name, "test-arbiter-pvc-mount-on-app-pod",
- mount_path=mount_path)
+ mount_path=mount_path, image=self.io_container_image_cirros)
self.addCleanup(openshift_ops.oc_delete, self.node, 'pod', pod_name)
# Wait for POD be up and running
@@ -211,7 +211,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
mount_path, available_size))
self.cmd_run(write_data_cmd)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_create_arbiter_vol_with_more_than_one_brick_set(self):
"""Validate volume creation using heketi for more than six brick set"""
@@ -325,7 +325,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
vol_info, arbiter_bricks=2, data_bricks=4)
- @pytest.mark.tier1
+ @pytest.mark.tier2
# NOTE(vponomar): do not create big volumes setting value less than 64
# for 'avg_file_size'. It will cause creation of very huge amount of files
# making one test run very loooooooong.
@@ -395,7 +395,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
"Arbiter brick '%s' was not verified. Looks like it was "
"not found on any of gluster PODs/nodes." % brick["name"])
- @pytest.mark.tier1
+ @pytest.mark.tier3
@ddt.data(
(False, False, True, True),
(True, True, False, False),
@@ -483,7 +483,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
self.assertIn(
data_brick.split(':')[0], data_nodes_ip_addresses)
- @pytest.mark.tier2
+ @pytest.mark.tier3
def test_create_delete_pvcs_to_make_gluster_reuse_released_space(self):
"""Validate reuse of volume space after deletion of PVCs"""
min_storage_gb = 10
@@ -568,7 +568,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
openshift_ops.wait_for_resource_absence(self.node, 'pvc', pvc_name)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_arbiter_volume_expand_using_pvc(self):
"""Validate arbiter volume expansion by PVC creation"""
# Create sc with gluster arbiter info
@@ -594,7 +594,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
self.verify_amount_and_proportion_of_arbiter_and_data_bricks(
vol_info, arbiter_bricks=2, data_bricks=4)
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data(True, False)
def test_expand_arbiter_volume_setting_tags_on_nodes_or_devices(
self, node_tags):
@@ -682,7 +682,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
for brick in bricks['data_list']:
self.assertIn(brick['name'].split(':')[0], data_hosts)
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data(
(4, '250M', True),
(8, '122M', True),
@@ -758,7 +758,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
openshift_ops.verify_pvc_size(self.node, self.pvc_name, pvc_size)
vol_expanded = True
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_arbiter_volume_delete_using_pvc(self):
"""Test Arbiter volume delete using pvc when volume is not mounted
@@ -845,7 +845,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
gluster_ops.match_heketi_and_gluster_volumes_by_prefix(
heketi_volume_names, "{}_".format(prefix))
- @pytest.mark.tier1
+ @pytest.mark.tier2
@podcmd.GlustoPod()
def test_arbiter_volume_node_tag_removal(self):
"""Test remove tags from nodes and check if arbiter volume is
@@ -909,7 +909,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
"{}".format(arbiter_brick_ip))
self.assertGreaterEqual(len(arbiter_brick_ip), 1, err_msg)
- @pytest.mark.tier1
+ @pytest.mark.tier3
@podcmd.GlustoPod()
def test_arbiter_volume_delete_using_pvc_mounted_on_app_pod(self):
"""Test Arbiter volume delete using a pvc when volume is mounted
@@ -1003,7 +1003,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
self.assertEqual(
out, err_msg, "LV {} still present".format(lv))
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_arbiter_volume_create_device_size_greater_than_volume_size(self):
"""Validate creation of arbiter volume through heketi"""
@@ -1219,7 +1219,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
"is created {} and and number of nodes having the arbiter"
" tag {}".format(arbiter_brick_ip, arbiter_tag_node_ip))
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data(
((None, None, None, True),
('required', 'disabled', 'disabled', True)),
@@ -1241,7 +1241,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
else:
self._arbiter_volume_node_tag_operations(node_tags)
- @pytest.mark.tier1
+ @pytest.mark.tier2
@podcmd.GlustoPod()
def test_create_arbiter_volume_with_avg_file_size_and_expand(self):
"""
@@ -1492,7 +1492,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
"expansion".format(
arbiter_brick_size_after, arbiter_brick_size_before))
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_poweroff_gluster_nodes_after_filling_inodes_arbiter_brick(self):
"""Validate io after filling up the arbiter brick and node poweroff"""
@@ -1513,7 +1513,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
# Create PVC and corresponding App pod
self.create_and_wait_for_pvc(sc_name=sc_name)
dc_name, pod_name = self.create_dc_with_pvc(
- self.pvc_name, is_busybox=True)
+ self.pvc_name, image=self.io_container_image_busybox)
# Get vol info
vol_info = openshift_ops.get_gluster_vol_info_by_pvc_name(
@@ -1838,7 +1838,7 @@ class TestArbiterVolumeCreateExpandDelete(baseclass.BaseClass):
"Failed to match vg {} from gluster side with vg {} "
"from heketi side".format(device, h_devices_with_tag))
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data(
(1, False),
(2, True),
diff --git a/tests/functional/gluster_stability/test_brickmux_stability.py b/tests/functional/gluster_stability/test_brickmux_stability.py
index a2134fc2..21e3daf9 100644
--- a/tests/functional/gluster_stability/test_brickmux_stability.py
+++ b/tests/functional/gluster_stability/test_brickmux_stability.py
@@ -23,7 +23,7 @@ class TestBrickMux(BaseClass):
super(TestBrickMux, self).setUp()
self.node = self.ocp_master_node[0]
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_brick_multiplex_pids_with_diff_vol_option_values(self):
"""Test Brick Pid's should be same when values of vol options are diff
"""
diff --git a/tests/functional/gluster_stability/test_gluster_block_stability.py b/tests/functional/gluster_stability/test_gluster_block_stability.py
index 4ac68ca8..3cf62e48 100644
--- a/tests/functional/gluster_stability/test_gluster_block_stability.py
+++ b/tests/functional/gluster_stability/test_gluster_block_stability.py
@@ -201,7 +201,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
oc_adm_manage_node, self.node, '--schedulable=true',
nodes=g_nodes)
- @pytest.mark.tier1
+ @pytest.mark.tier4
def test_initiator_side_failures_initiator_and_target_on_different_node(
self):
@@ -211,7 +211,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
# Perform validation of intiator side failures
self.initiator_side_failures()
- @pytest.mark.tier1
+ @pytest.mark.tier4
def test_initiator_side_failures_initiator_and_target_on_same_node(self):
# Note: This test case is supported for containerized gluster only.
@@ -226,12 +226,6 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
# Get the list of nodes other than gluster
o_nodes = list((set(nodes) - set(g_nodes)))
- # Skip the test case if it is crs setup
- if not g_nodes:
- self.skipTest("skipping test case because it is not a "
- "containerized gluster setup. "
- "This test case is for containerized gluster only.")
-
# Make other nodes unschedulable
oc_adm_manage_node(
self.node, '--schedulable=false', nodes=o_nodes)
@@ -242,7 +236,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.initiator_side_failures()
- @pytest.mark.tier1
+ @pytest.mark.tier4
def test_target_side_failures_gluster_blockd_kill_when_ios_going_on(self):
"""Run I/Os on block volume while gluster-blockd is stoped"""
self.create_and_wait_for_pvc()
@@ -297,7 +291,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
mpath_dev_new = get_active_and_enabled_devices_from_mpath(node, mpath)
self.assertEqual(mpath_dev['active'][0], mpath_dev_new['active'][0])
- @pytest.mark.tier1
+ @pytest.mark.tier4
def test_target_side_failures_tcmu_runner_kill_when_ios_going_on(self):
"""Run I/Os on block volume while tcmu-runner is stoped"""
self.create_and_wait_for_pvc()
@@ -388,7 +382,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
# Verify that all the paths are up
self.verify_all_paths_are_up_in_multipath(mpath, hacount, node)
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_initiator_side_failure_restart_pod_when_target_node_is_down(self):
"""Restart app pod when one gluster node is down"""
# Skip test if does not meets requirements
@@ -580,7 +574,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
return initiator_nodes[0]
- @pytest.mark.tier1
+ @pytest.mark.tier4
def test_initiator_and_target_on_same_node_app_pod_deletion(self):
"""Test iscsi login and logout functionality on deletion of an app
pod when initiator and target are on the same node.
@@ -603,7 +597,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
# Perform app pod creation and deletion along with block validations
self._validate_app_creation_and_deletion_along_block_validations()
- @pytest.mark.tier1
+ @pytest.mark.tier4
def test_initiator_and_target_on_different_node_app_pod_deletion(self):
"""Perform block validation during app pod deletion and when initiator
and target nodes are different"""
@@ -687,7 +681,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.verify_iscsi_sessions_and_multipath(pvc, dc_name)
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_initiator_and_target_on_diff_node_abrupt_reboot_of_initiator_node(
self):
"""Abrupt reboot initiator node to make sure paths rediscovery is
@@ -702,7 +696,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
# Validate iscsi and multipath of app pods after initiator node reboot
self._perform_initiator_node_reboot_and_block_validations(ini_node)
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_initiator_and_target_on_same_node_abrupt_reboot_of_initiator_node(
self):
"""Abrupt reboot initiator node to make sure paths rediscovery is
@@ -719,7 +713,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self._perform_initiator_node_reboot_and_block_validations(
ini_node, is_ini_taget_same=True)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_validate_gluster_ip_utilized_by_blockvolumes(self):
""" Validate if all gluster nodes IP are
utilized by blockvolume when using HA=2
@@ -785,7 +779,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
"Could not match glusterips in pv describe, difference is %s "
% unmatched_tpips)
- @pytest.mark.tier1
+ @pytest.mark.tier4
@ddt.data('tcmu-runner', 'gluster-blockd')
def test_volume_create_delete_when_block_services_are_down(self, service):
"""Create and Delete PVC's when block related services gluster-blockd,
@@ -952,7 +946,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.heketi_client_node, self.heketi_server_url)
self.assertNotIn(vol_name_prefix, h_vol_list)
- @pytest.mark.tier1
+ @pytest.mark.tier3
def test_path_failures_on_initiator_node_migration_and_pod_restart(self):
"""Verify path failures on initiator node migration
and app pod restart. Also, make sure that existing
@@ -1043,7 +1037,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
"in out '%s'" % (file_size, _file, out))
self.assertIn(six.text_type(file_size), out, msg)
- @pytest.mark.tier1
+ @pytest.mark.tier4
def test_tcmu_runner_failure_while_creating_and_deleting_pvc(self):
"""Kill the tcmu-runner service while creating and deleting PVC's"""
@@ -1111,7 +1105,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
'volume count is 9 ' % volume_count)
self.assertEqual(9, volume_count, msg)
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_delete_block_volume_with_one_node_down(self):
"""Validate deletion of block volume when one node is down"""
@@ -1152,7 +1146,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.heketi_client_node, self.heketi_server_url,
block_volume["id"])
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_create_block_pvcs_with_network_failure(self):
"""Block port 24010 while creating PVC's, run I/O's and verify
multipath"""
@@ -1187,7 +1181,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.verify_iscsi_sessions_and_multipath(pvc_name, dc_with_pod[0])
oc_rsh(self.node, dc_with_pod[1], cmd_run_io % 'file3')
- @pytest.mark.tier2
+ @pytest.mark.tier4
@ddt.data('active', 'passive', 'all_passive')
def test_run_io_and_block_port_on_active_path_network_failure(
self, path='active'):
@@ -1357,7 +1351,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.verify_all_paths_are_up_in_multipath(
list(mpaths)[0], hacount, ini_node, timeout=1)
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_initiator_failures_reboot_initiator_node_when_target_node_is_down(
self):
"""Restart initiator node when gluster node is down, to make sure paths
@@ -1366,7 +1360,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self._perform_block_validations_when_target_node_is_down(
is_reboot_initiator=True)
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_block_behaviour_when_target_node_is_down(self):
"""Test block behaviour of 4 block PVC's accross 2 BHV's when target
node is down and make sure paths rediscovery is happening.
@@ -1449,7 +1443,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
# Restore targetcli workability
loop_for_killing_targetcli_process._proc.terminate()
- @pytest.mark.tier2
+ @pytest.mark.tier4
@ddt.data(True, False)
def test_delete_block_pvcs_with_network_failure(self, is_close_port=True):
"""Validate heketi pod logs while producing network faliure and
@@ -1517,7 +1511,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
.format(final_free_storage, initial_free_storage))
raise AssertionError(err_msg)
- @pytest.mark.tier1
+ @pytest.mark.tier3
@podcmd.GlustoPod()
def test_delete_block_device_pvc_while_io_in_progress(self):
"""Delete block device or pvc while io is in progress"""
@@ -1649,7 +1643,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
for blockvol in gluster_vol_info:
self.assertNotIn("blockvol_", blockvol)
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_create_and_delete_block_pvcs_with_network_failure(self):
"""Create and delete volumes after blocking the port 24010 on 51% of
the nodes"""
@@ -1700,7 +1694,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.assertNotIn(
vol_id, blockvolume_list, msg % (vol_id, blockvolume_list))
- @pytest.mark.tier2
+ @pytest.mark.tier3
@podcmd.GlustoPod()
def test_pvc_state_when_node_is_power_on_and_off(self):
"""Verify PVC gets bound after gluster node is powered off and on
@@ -1871,7 +1865,7 @@ class TestGlusterBlockStability(GlusterBlockBaseClass):
self.node, delete_log_level.format(TCMU_CONF), gluster_node=g_node)
@skip("Blocked by BZ-1755903")
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_tcmu_log_levels(self):
"""Check tcmu log levels and verify log levels"""
g_node, get_system_time = self.gluster_servers[0], "date '+%F %T'"
diff --git a/tests/functional/gluster_stability/test_restart_gluster_block_prov_pod.py b/tests/functional/gluster_stability/test_restart_gluster_block_prov_pod.py
index 7551011a..199c7552 100644
--- a/tests/functional/gluster_stability/test_restart_gluster_block_prov_pod.py
+++ b/tests/functional/gluster_stability/test_restart_gluster_block_prov_pod.py
@@ -15,7 +15,7 @@ from openshiftstoragelibs.openshift_ops import (
class TestRestartGlusterBlockPod(BaseClass):
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_restart_gluster_block_provisioner_pod(self):
"""Restart gluster-block provisioner pod."""
diff --git a/tests/functional/gluster_stability/test_restart_gluster_services.py b/tests/functional/gluster_stability/test_restart_gluster_services.py
index d292ba5c..ea188f36 100644
--- a/tests/functional/gluster_stability/test_restart_gluster_services.py
+++ b/tests/functional/gluster_stability/test_restart_gluster_services.py
@@ -169,7 +169,7 @@ class GlusterStabilityTestSetup(GlusterBlockBaseClass):
wait_to_heal_complete()
@skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477")
- @pytest.mark.tier1
+ @pytest.mark.tier4
@ddt.data(SERVICE_BLOCKD, SERVICE_TCMU, SERVICE_TARGET)
def test_restart_services_provision_volume_and_run_io(self, service):
"""Restart gluster service then validate volumes"""
@@ -212,7 +212,7 @@ class GlusterStabilityTestSetup(GlusterBlockBaseClass):
self.validate_volumes_and_blocks()
@skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477")
- @pytest.mark.tier1
+ @pytest.mark.tier4
def test_target_side_failures_brick_failure_on_block_hosting_volume(self):
"""Target side failures - Brick failure on block hosting volume"""
skip_msg = (
@@ -250,7 +250,7 @@ class GlusterStabilityTestSetup(GlusterBlockBaseClass):
self.validate_volumes_and_blocks()
@skip("Blocked by BZ-1634745, BZ-1635736, BZ-1636477")
- @pytest.mark.tier1
+ @pytest.mark.tier4
def test_start_stop_block_volume_service(self):
"""Validate block hosting volume by start/stop operation
diff --git a/tests/functional/heketi/test_block_volumes_heketi.py b/tests/functional/heketi/test_block_volumes_heketi.py
index c5418eb0..cee48242 100644
--- a/tests/functional/heketi/test_block_volumes_heketi.py
+++ b/tests/functional/heketi/test_block_volumes_heketi.py
@@ -11,7 +11,7 @@ from glustolibs.gluster.volume_ops import (
)
import pytest
-from openshiftstoragelibs.baseclass import BaseClass
+from openshiftstoragelibs.baseclass import GlusterBlockBaseClass
from openshiftstoragelibs import exceptions
from openshiftstoragelibs.gluster_ops import (
get_block_hosting_volume_name,
@@ -21,8 +21,10 @@ from openshiftstoragelibs.heketi_ops import (
get_total_free_space,
heketi_blockvolume_create,
heketi_blockvolume_delete,
+ heketi_blockvolume_expand,
heketi_blockvolume_info,
heketi_blockvolume_list,
+ heketi_blockvolume_list_by_name_prefix,
heketi_node_info,
heketi_node_list,
heketi_volume_create,
@@ -30,25 +32,39 @@ from openshiftstoragelibs.heketi_ops import (
heketi_volume_info,
hello_heketi,
)
+from openshiftstoragelibs import heketi_version
from openshiftstoragelibs.openshift_ops import (
cmd_run_on_gluster_pod_or_node,
get_default_block_hosting_volume_size,
+ get_pod_name_from_dc,
+ get_pv_name_from_pvc,
+ is_job_complete,
+ oc_create_offline_block_volume_expand_job,
+ oc_delete,
+ oc_get_custom_resource,
+ oc_rsh,
restart_service_on_gluster_pod_or_node,
+ scale_dc_pod_amount_and_wait,
wait_for_service_status_on_gluster_pod_or_node,
)
+from openshiftstoragelibs.openshift_storage_libs import (
+ get_iscsi_block_devices_by_path,
+ get_mpath_name_from_device_name,
+)
from openshiftstoragelibs import podcmd
+from openshiftstoragelibs import waiter
from openshiftstoragelibs import utils
@ddt.ddt
-class TestBlockVolumeOps(BaseClass):
+class TestBlockVolumeOps(GlusterBlockBaseClass):
"""Class to test heketi block volume deletion with and without block
volumes existing, heketi block volume list, heketi block volume info
and heketi block volume creation with name and block volumes creation
after manually creating a Block Hosting volume.
"""
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_create_block_vol_after_host_vol_creation(self):
"""Validate block-device after manual block hosting volume creation
using heketi
@@ -66,7 +82,7 @@ class TestBlockVolumeOps(BaseClass):
heketi_blockvolume_delete, self.heketi_client_node,
self.heketi_server_url, block_vol["id"])
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_block_host_volume_delete_without_block_volumes(self):
"""Validate deletion of empty block hosting volume"""
block_host_create_info = heketi_volume_create(
@@ -82,7 +98,7 @@ class TestBlockVolumeOps(BaseClass):
self.heketi_client_node, self.heketi_server_url,
block_hosting_vol_id, json=True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_block_volume_delete(self):
"""Validate deletion of gluster-block volume and capacity of used pool
"""
@@ -102,7 +118,7 @@ class TestBlockVolumeOps(BaseClass):
"The block volume has not been successfully deleted,"
" ID is %s" % block_vol["id"])
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_block_volume_list(self):
"""Validate heketi blockvolume list command works as expected"""
created_vol_ids = []
@@ -124,7 +140,7 @@ class TestBlockVolumeOps(BaseClass):
"Block vol with '%s' ID is absent in the "
"list of block volumes." % vol_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_block_host_volume_delete_block_volume_delete(self):
"""Validate block volume and BHV removal using heketi"""
free_space, nodenum = get_total_free_space(
@@ -166,7 +182,7 @@ class TestBlockVolumeOps(BaseClass):
self.assertIn(
block_vol_info["id"], bhv_info["blockinfo"]["blockvolume"])
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_validate_gluster_voloptions_blockhostvolume(self):
"""Validate gluster volume options which are set for
@@ -222,7 +238,7 @@ class TestBlockVolumeOps(BaseClass):
self.assertEqual(v, vol_info[bhv_name]
["options"][k])
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data(True, False)
def test_create_blockvolume_with_different_auth_values(self, auth_value):
"""To validate block volume creation with different auth values"""
@@ -250,7 +266,7 @@ class TestBlockVolumeOps(BaseClass):
("Password is %spresent in %s", (assertion_msg_part,
block_vol["id"])))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_block_volume_create_with_name(self):
"""Validate creation of block volume with name"""
vol_name = "autotests-heketi-volume-%s" % utils.get_random_str()
@@ -270,7 +286,7 @@ class TestBlockVolumeOps(BaseClass):
("Block volume Names are not same %s as %s",
(block_vol_info["name"], vol_name)))
- @pytest.mark.tier1
+ @pytest.mark.tier2
@podcmd.GlustoPod()
def test_create_max_num_blockhostingvolumes(self):
num_of_bv = 10
@@ -304,8 +320,12 @@ class TestBlockVolumeOps(BaseClass):
heketi_volume_delete, self.heketi_client_node,
self.heketi_server_url, block_host_create_info["id"],
raise_on_error=False)
- block_vol_size = int(
- block_host_create_info["blockinfo"]["freesize"] / num_of_bv)
+
+ free_size = block_host_create_info["blockinfo"]["freesize"]
+ if free_size > num_of_bv:
+ block_vol_size = int(free_size / num_of_bv)
+ else:
+ block_vol_size, num_of_bv = 1, free_size
# Create specified number of BV's in BHV's created
for i in range(0, num_of_bv):
@@ -367,7 +387,7 @@ class TestBlockVolumeOps(BaseClass):
# Check if all blockhosting volumes are deleted from heketi
self.assertFalse(new_bhv_list)
- @pytest.mark.tier2
+ @pytest.mark.tier3
@podcmd.GlustoPod()
def test_targetcli_when_block_hosting_volume_down(self):
"""Validate no inconsistencies occur in targetcli when block volumes
@@ -442,7 +462,7 @@ class TestBlockVolumeOps(BaseClass):
bhv_name, h_node, err)
raise exceptions.ExecutionError(err)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_heket_block_volume_info_with_gluster_block_volume_info(self):
"""Verify heketi block volume info with the backend gluster
@@ -522,3 +542,179 @@ class TestBlockVolumeOps(BaseClass):
h_block_vol_ha, g_block_vol_ha,
err_msg.format(
"ha", h_block_vol_ha, g_block_vol_ha, err_msg))
+
+ @pytest.mark.tier1
+ def test_dynamic_provisioning_block_vol_with_custom_prefix(self):
+ """Verify creation of block volume with custom prefix
+ """
+ node = self.ocp_master_node[0]
+ prefix = "autotest-{}".format(utils.get_random_str())
+
+ # cmd to get available space
+ cmd_get_free_space = "df -h | grep '/mnt'| awk '{{print $4}}'"
+
+ # cmd to create a 100M file
+ cmd_run_io = 'dd if=/dev/zero of=/mnt/testfile bs=1024 count=102400'
+
+ # Create sc with prefix
+ sc_name = self.create_storage_class(
+ sc_name_prefix=prefix,
+ create_vol_name_prefix=True, vol_name_prefix=prefix)
+
+ # Create pvc and wait for it to be in bound state
+ pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name, pvc_size=1)
+
+ # Verify blockvolume list with prefix
+ h_block_vol = heketi_blockvolume_list_by_name_prefix(
+ self.heketi_client_node, self.heketi_server_url, prefix)
+ self.assertIsNotNone(
+ h_block_vol,
+ "Failed to find blockvolume with prefix {}".format(prefix))
+ self.assertTrue(
+ h_block_vol[0][2].startswith(prefix),
+ "Failed to create blockvolume with the prefix {}".format(prefix))
+
+ # Create app pod
+ dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
+
+ err_msg = ("Failed to get the free space for the mount point of the "
+ "app pod {} with error {}")
+ # Get free space of app pod before IO run
+ _, free_space_before, err = oc_rsh(node, pod_name, cmd_get_free_space)
+ self.assertTrue(free_space_before, err_msg.format(pod_name, err))
+
+ # Running IO on the app pod
+ ret, _, err = oc_rsh(node, pod_name, cmd_run_io)
+ self.assertFalse(
+ ret, "Failed to run the Io with the error msg {}".format(err))
+
+ # Get free space of app pod after IO run
+ _, free_space_after, err = oc_rsh(node, pod_name, cmd_get_free_space)
+ self.assertTrue(free_space_after, err_msg.format(pod_name, err))
+ self.assertGreaterEqual(
+ free_space_before, free_space_after,
+ "Expecting free space in app pod before {} should be greater than"
+ " {} as 100M file is created".format(
+ free_space_before, free_space_after))
+
+ def _block_vol_expand_common_offline_vs_online(self, is_online_expand):
+ node = self.ocp_master_node[0]
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+
+ version = heketi_version.get_heketi_version(h_node)
+ if version < '9.0.0-14':
+ self.skipTest("heketi-client package {} does not support "
+ "blockvolume expand".format(version.v_str))
+
+ pvc_name = self.create_and_wait_for_pvc()
+ dc_name = self.create_dc_with_pvc(pvc_name)
+ pv_name = get_pv_name_from_pvc(node, pvc_name)
+
+ # get block volume id
+ custom = r":.metadata.annotations.'gluster\.org\/volume-id'"
+ bvol_id = oc_get_custom_resource(node, 'pv', custom, pv_name)
+ self.assertNotEqual(
+ bvol_id[0], "<none>",
+ "volume name not found from pv {}".format(pv_name))
+ bvol_info = heketi_blockvolume_info(
+ h_node, h_server, bvol_id[0], json=True)
+
+ # verify required blockhostingvolume free size
+ bhv_id = bvol_info["blockhostingvolume"]
+ bhv_info = heketi_volume_info(h_node, h_server, bhv_id, json=True)
+ if bhv_info["blockinfo"]["freesize"] < 1:
+ self.skipTest("blockhostingvolume doesn't have required freespace")
+
+ if not is_online_expand:
+ scale_dc_pod_amount_and_wait(node, dc_name[0], pod_amount=0)
+
+ # expand block volume and verify usable size
+ bvol_info = heketi_blockvolume_expand(
+ h_node, h_server, bvol_id[0], 2, json=True)
+ self.assertEqual(
+ bvol_info["size"], 2, "Block volume expand does not works")
+ self.assertEqual(
+ bvol_info["size"], bvol_info["usablesize"],
+ "block volume size is not equal to the usablesize: {}".format(
+ bvol_info))
+
+ return pvc_name, dc_name, bvol_info
+
+ @pytest.mark.tier1
+ def test_block_vol_offline_expand(self):
+ """Test blockvol expansion while PVC is not in use"""
+ node = self.ocp_master_node[0]
+
+ pvc_name, dc_name, bvol_info = (
+ self._block_vol_expand_common_offline_vs_online(False))
+
+ # create and wait for job to be completed
+ jobname = oc_create_offline_block_volume_expand_job(node, pvc_name)
+ self.addCleanup(oc_delete, node, 'job', jobname)
+ for w in waiter.Waiter(300, 5):
+ if is_job_complete(node, jobname):
+ break
+ if w.expired:
+ raise AssertionError(
+ "block expand job {} is not completed".format(jobname))
+
+ # verify expand size
+ scale_dc_pod_amount_and_wait(node, dc_name[0], pod_amount=1)
+ pod_name = get_pod_name_from_dc(node, dc_name[0])
+ ret, size, _ = oc_rsh(
+ node, pod_name,
+ 'df -kh /mnt | sed "/Filesystem/d" | awk \'{print $2}\' '
+ '| sed "s/G//"')
+ self.assertFalse(ret, "Failed to get size from client side")
+ self.assertEqual(
+ int(float(size)), bvol_info["size"], "new size is not "
+ "reflected at mount point after block volume expand")
+
+ @pytest.mark.tier1
+ def test_block_vol_online_expand(self):
+ """Test blockvol expansion while PVC is in use"""
+ node = self.ocp_master_node[0]
+
+ pvc_name, dc_name, bvol_info = (
+ self._block_vol_expand_common_offline_vs_online(True))
+
+ # get pod hostname
+ iqn, _, pod_hostname = self.verify_iscsi_sessions_and_multipath(
+ pvc_name, dc_name[0])
+
+ # Get the paths info from the node
+ device = list(
+ get_iscsi_block_devices_by_path(pod_hostname, iqn).keys())[0]
+
+ # Get mpath name
+ mpath = get_mpath_name_from_device_name(pod_hostname, device)
+
+ # rescan the devices on pod_hostname
+ cmd = "iscsiadm -m node -R -T {}".format(iqn)
+ self.cmd_run(cmd, pod_hostname)
+
+ # refresh multipath device size
+ cmd = "multipathd -k'resize map {}'".format(mpath)
+ self.cmd_run(cmd, pod_hostname)
+
+ # get mount point
+ cmd = "lsblk /dev/{} --output MOUNTPOINT --noheadings".format(device)
+ mount_point = self.cmd_run(cmd, pod_hostname)
+
+ cmd = "xfs_growfs {}".format(mount_point)
+ self.cmd_run(cmd, pod_hostname)
+
+ cmd = ("df -h {} | sed '/Filesystem/d' | awk '{{print $2}}' |"
+ " sed 's/G//'")
+ size = self.cmd_run(cmd.format(mount_point), pod_hostname)
+ self.assertEqual(
+ int(float(size)), bvol_info["size"], "new size is not "
+ "reflected at host mount point after block volume expand")
+
+ # verify expand size
+ pod_name = get_pod_name_from_dc(node, dc_name[0])
+ ret, size, _ = oc_rsh(node, pod_name, cmd.format("/mnt"))
+ self.assertFalse(ret, "Failed to get size from client side")
+ self.assertEqual(
+ int(float(size)), bvol_info["size"], "new size is not "
+ "reflected at mount point after block volume expand")
diff --git a/tests/functional/heketi/test_check_brick_paths.py b/tests/functional/heketi/test_check_brick_paths.py
index de453de8..67c3bc59 100644
--- a/tests/functional/heketi/test_check_brick_paths.py
+++ b/tests/functional/heketi/test_check_brick_paths.py
@@ -28,7 +28,7 @@ class TestHeketiVolume(BaseClass):
results.append(out)
assertion_method('present', results)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_validate_brick_paths_on_gluster_pods_or_nodes(self):
"""Validate brick paths after creation and deletion of a volume."""
diff --git a/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py b/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py
index 64ba4d90..ba8f7f61 100644
--- a/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py
+++ b/tests/functional/heketi/test_create_distributed_replica_heketi_volume.py
@@ -195,21 +195,21 @@ class TestHeketiVolume(BaseClass):
free_space_after_creating_vol,
free_space_after_deleting_vol))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_to_create_and_delete_dist_rep_vol(self):
"""Validate 2x3 vol type creation when the volume cannot be
carved out of a single device and the delete the volume
"""
self._create_distributed_replica_vol(validate_cleanup=True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data(True, False)
def test_create_and_delete_dist_replicated_bhv(self, validate_cleanup):
"""Validate distributed replicated bhv using heketi-cli"""
self._create_distributed_replica_vol(
validate_cleanup, block=True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_to_create_dist_rep_vol(self):
"""Validate 2x3 vol type creation when the volume cannot be
carved out of a single device
diff --git a/tests/functional/heketi/test_disabling_device.py b/tests/functional/heketi/test_disabling_device.py
index 5c287d43..74a6c3f7 100644
--- a/tests/functional/heketi/test_disabling_device.py
+++ b/tests/functional/heketi/test_disabling_device.py
@@ -8,7 +8,7 @@ from openshiftstoragelibs import podcmd
class TestDisableHeketiDevice(baseclass.BaseClass):
- @pytest.mark.tier2
+ @pytest.mark.tier3
@podcmd.GlustoPod()
def test_create_volumes_enabling_and_disabling_heketi_devices(self):
"""Validate enable/disable of heketi device"""
diff --git a/tests/functional/heketi/test_heketi_authentication.py b/tests/functional/heketi/test_heketi_authentication.py
new file mode 100644
index 00000000..5979c808
--- /dev/null
+++ b/tests/functional/heketi/test_heketi_authentication.py
@@ -0,0 +1,33 @@
+from glusto.core import Glusto as g
+import pytest
+
+from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import heketi_ops
+
+
+class TestHeketiAuthenticationFromOCPClient(baseclass.BaseClass):
+ """Class to test heketi-client authentication"""
+
+ @pytest.mark.tier1
+ def test_heketi_authentication_with_user_credentials(self):
+ """Heketi command authentication with invalid and valid credentials"""
+
+ h_client, h_server = self.heketi_client_node, self.heketi_server_url
+ err_msg = "Error: Invalid JWT token: Token missing iss claim"
+
+ # Run heketi commands with invalid credentials
+ for each_cmd in ("volume list", "topology info"):
+ cmd = "timeout 120 heketi-cli -s {} {}".format(
+ self.heketi_server_url, each_cmd)
+ ret, _, err = g.run(h_client, cmd)
+ self.assertTrue(ret, "Command execution with invalid credentials"
+ " should not succeed")
+ self.assertEqual(
+ err_msg, err.strip(), "Error is different from the command"
+ " execution {}".format(err.strip()))
+
+ # Run heketi commands with valid credentials
+ kwar = {'json_arg': True, 'secret': self.heketi_cli_key,
+ 'user': self.heketi_cli_user}
+ heketi_ops.heketi_volume_list(h_client, h_server, **kwar)
+ heketi_ops.heketi_topology_info(h_client, h_server, **kwar)
diff --git a/tests/functional/heketi/test_heketi_brick_evict.py b/tests/functional/heketi/test_heketi_brick_evict.py
new file mode 100644
index 00000000..1cba24c4
--- /dev/null
+++ b/tests/functional/heketi/test_heketi_brick_evict.py
@@ -0,0 +1,180 @@
+import pytest
+
+from glustolibs.gluster import volume_ops
+import six
+
+from openshiftstoragelibs.baseclass import BaseClass
+from openshiftstoragelibs import exceptions
+from openshiftstoragelibs import heketi_ops
+from openshiftstoragelibs import heketi_version
+from openshiftstoragelibs import node_ops
+from openshiftstoragelibs import openshift_ops
+from openshiftstoragelibs import podcmd
+from openshiftstoragelibs import waiter
+
+
+class TestHeketiBrickEvict(BaseClass):
+ """Test Heketi brick evict functionality."""
+
+ def setUp(self):
+ super(TestHeketiBrickEvict, self).setUp()
+
+ version = heketi_version.get_heketi_version(self.heketi_client_node)
+ if version < '9.0.0-14':
+ self.skipTest(
+ "heketi-client package {} does not support brick evict".format(
+ version.v_str))
+
+ self.ocp_client = self.ocp_master_node[0]
+
+ node_list = heketi_ops.heketi_node_list(
+ self.heketi_client_node, self.heketi_server_url)
+
+ if len(node_list) > 3:
+ return
+
+ for node_id in node_list:
+ node_info = heketi_ops.heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url, node_id,
+ json=True)
+ if len(node_info["devices"]) < 2:
+ self.skipTest("does not have extra device/node to evict brick")
+
+ @podcmd.GlustoPod()
+ def _get_gluster_vol_info(self, file_vol):
+ """Get Gluster vol info.
+
+ Args:
+ ocp_client (str): Node to execute OCP commands.
+ file_vol (str): file volume name.
+
+ Returns:
+ dict: Info of the given gluster vol.
+ """
+ g_vol_info = volume_ops.get_volume_info(
+ "auto_get_gluster_endpoint", file_vol)
+
+ if not g_vol_info:
+ raise AssertionError("Failed to get volume info for gluster "
+ "volume {}".format(file_vol))
+ if file_vol in g_vol_info:
+ g_vol_info = g_vol_info.get(file_vol)
+ return g_vol_info
+
+ @pytest.mark.tier1
+ def test_heketi_brick_evict(self):
+ """Test brick evict basic functionality and verify it replace a brick
+ properly
+ """
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+
+ size = 1
+ vol_info_old = heketi_ops.heketi_volume_create(
+ h_node, h_server, size, json=True)
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, h_node, h_server,
+ vol_info_old['id'])
+ heketi_ops.heketi_brick_evict(
+ h_node, h_server, vol_info_old["bricks"][0]['id'])
+
+ vol_info_new = heketi_ops.heketi_volume_info(
+ h_node, h_server, vol_info_old['id'], json=True)
+
+ bricks_old = set({brick['path'] for brick in vol_info_old["bricks"]})
+ bricks_new = set({brick['path'] for brick in vol_info_new["bricks"]})
+ self.assertEqual(
+ len(bricks_new - bricks_old), 1,
+ "Brick was not replaced with brick evict for vol \n {}".format(
+ vol_info_new))
+
+ gvol_info = self._get_gluster_vol_info(vol_info_new['name'])
+ gbricks = set(
+ {brick['name'].split(":")[1]
+ for brick in gvol_info["bricks"]["brick"]})
+ self.assertEqual(
+ bricks_new, gbricks, "gluster vol info and heketi vol info "
+ "mismatched after brick evict {} \n {}".format(
+ gvol_info, vol_info_new))
+
+ def _wait_for_gluster_pod_after_node_reboot(self, node_hostname):
+ """Wait for glusterfs pod to be ready after node reboot"""
+ openshift_ops.wait_for_ocp_node_be_ready(
+ self.ocp_client, node_hostname)
+ gluster_pod = openshift_ops.get_gluster_pod_name_for_specific_node(
+ self.ocp_client, node_hostname)
+ openshift_ops.wait_for_pod_be_ready(self.ocp_client, gluster_pod)
+ services = (
+ ("glusterd", "running"), ("gluster-blockd", "running"),
+ ("tcmu-runner", "running"), ("gluster-block-target", "exited"))
+ for service, state in services:
+ openshift_ops.check_service_status_on_pod(
+ self.ocp_client, gluster_pod, service, "active", state)
+
+ @pytest.mark.tier4
+ def test_brick_evict_with_node_down(self):
+ """Test brick evict basic functionality and verify brick evict
+ after node down"""
+
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+
+ # Disable node if more than 3
+ node_list = heketi_ops.heketi_node_list(h_node, h_server)
+ if len(node_list) > 3:
+ for node_id in node_list[3:]:
+ heketi_ops.heketi_node_disable(h_node, h_server, node_id)
+ self.addCleanup(
+ heketi_ops.heketi_node_enable, h_node, h_server, node_id)
+
+ # Create heketi volume
+ vol_info = heketi_ops.heketi_volume_create(
+ h_node, h_server, 1, json=True)
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete,
+ h_node, h_server, vol_info.get('id'))
+
+ # Get node on which heketi pod is scheduled
+ heketi_pod = openshift_ops.get_pod_name_from_dc(
+ self.ocp_client, self.heketi_dc_name)
+ heketi_node = openshift_ops.oc_get_custom_resource(
+ self.ocp_client, 'pod', '.:spec.nodeName', heketi_pod)[0]
+
+ # Get list of hostname from node id
+ host_list = []
+ for node_id in node_list[3:]:
+ node_info = heketi_ops.heketi_node_info(
+ h_node, h_server, node_id, json=True)
+ host_list.append(node_info.get('hostnames').get('manage')[0])
+
+ # Get brick id and glusterfs node which is not heketi node
+ for node in vol_info.get('bricks', {}):
+ node_info = heketi_ops.heketi_node_info(
+ h_node, h_server, node.get('node'), json=True)
+ hostname = node_info.get('hostnames').get('manage')[0]
+ if (hostname != heketi_node) and (hostname not in host_list):
+ brick_id = node.get('id')
+ break
+
+ # Bring down the glusterfs node
+ vm_name = node_ops.find_vm_name_by_ip_or_hostname(hostname)
+ self.addCleanup(
+ self._wait_for_gluster_pod_after_node_reboot, hostname)
+ self.addCleanup(node_ops.power_on_vm_by_name, vm_name)
+ node_ops.power_off_vm_by_name(vm_name)
+
+ # Wait glusterfs node to become NotReady
+ custom = r'":.status.conditions[?(@.type==\"Ready\")]".status'
+ for w in waiter.Waiter(300, 20):
+ status = openshift_ops.oc_get_custom_resource(
+ self.ocp_client, 'node', custom, hostname)
+ if status[0] in ['False', 'Unknown']:
+ break
+ if w.expired:
+ raise exceptions.ExecutionError(
+ "Failed to bring down node {}".format(hostname))
+
+ # Perform brick evict operation
+ try:
+ heketi_ops.heketi_brick_evict(h_node, h_server, brick_id)
+ except AssertionError as e:
+ if ('No Replacement was found' not in six.text_type(e)):
+ raise
diff --git a/tests/functional/heketi/test_heketi_cluster_operations.py b/tests/functional/heketi/test_heketi_cluster_operations.py
index 68239965..c9289ff5 100644
--- a/tests/functional/heketi/test_heketi_cluster_operations.py
+++ b/tests/functional/heketi/test_heketi_cluster_operations.py
@@ -12,7 +12,7 @@ from openshiftstoragelibs.waiter import Waiter
class TestClusterOperationsTestCases(baseclass.BaseClass):
"""Class for heketi cluster creation related test cases"""
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data("", "block", "file")
def test_heketi_cluster_create(self, disable_volume_type):
"""Test heketi cluster creation"""
@@ -38,7 +38,7 @@ class TestClusterOperationsTestCases(baseclass.BaseClass):
cluster_info["block"], err_msg % ("block", "False"))
self.assertTrue(cluster_info["file"], err_msg % ("file", "False"))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_cluster_list(self):
"""Test and validateheketi cluster list operation"""
# Create heketi cluster
@@ -57,7 +57,7 @@ class TestClusterOperationsTestCases(baseclass.BaseClass):
% (cluster_info["id"], cluster_list["clusters"]))
self.assertIn(cluster_info["id"], cluster_list["clusters"], err_msg)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_cluster_info(self):
"""Test and validateheketi cluster info operation"""
# Create heketi cluster
@@ -83,7 +83,7 @@ class TestClusterOperationsTestCases(baseclass.BaseClass):
for param, value in params:
self.assertEqual(get_cluster_info[param], value)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_cluster_delete(self):
"""Test and validateheketi cluster delete operation"""
# Create heketi cluster
@@ -103,7 +103,7 @@ class TestClusterOperationsTestCases(baseclass.BaseClass):
% (cluster_info["id"], cluster_list["clusters"]))
self.assertNotIn(cluster_info["id"], cluster_list["clusters"], err_msg)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_create_heketi_cluster_and_add_node(self):
"""Test heketi node add to a newly created cluster"""
storage_host_info = g.config.get("additional_gluster_servers")
@@ -212,7 +212,7 @@ class TestClusterOperationsTestCases(baseclass.BaseClass):
self.assertEqual(
zone, storage_zone, err_msg % ("zone", zone, storage_zone))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_server_operations_cleanup_on_idle_setup(self):
"""Run heketi db clean up on an idle setup"""
h_node, h_url = self.heketi_client_node, self.heketi_server_url
diff --git a/tests/functional/heketi/test_heketi_create_volume.py b/tests/functional/heketi/test_heketi_create_volume.py
index a964206c..938a568e 100644
--- a/tests/functional/heketi/test_heketi_create_volume.py
+++ b/tests/functional/heketi/test_heketi_create_volume.py
@@ -22,26 +22,40 @@ from openshiftstoragelibs import command
from openshiftstoragelibs.heketi_ops import (
get_block_hosting_volume_list,
get_heketi_volume_and_brick_count_list,
+ get_total_free_space,
heketi_blockvolume_create,
heketi_blockvolume_delete,
+ heketi_blockvolume_info,
heketi_cluster_delete,
heketi_cluster_list,
heketi_db_check,
heketi_node_delete,
+ heketi_node_enable,
heketi_node_info,
heketi_node_list,
+ heketi_node_disable,
+ heketi_server_operation_cleanup,
heketi_volume_create,
heketi_volume_delete,
heketi_volume_expand,
heketi_volume_info,
heketi_volume_list,
+ hello_heketi,
)
from openshiftstoragelibs.openshift_ops import (
cmd_run_on_gluster_pod_or_node,
+ get_default_block_hosting_volume_size,
+ get_pod_name_from_dc,
+ kill_service_on_gluster_pod_or_node,
+ oc_delete,
+ restart_service_on_gluster_pod_or_node,
+ wait_for_pod_be_ready,
+ wait_for_resource_absence,
wait_for_service_status_on_gluster_pod_or_node,
)
from openshiftstoragelibs import exceptions
from openshiftstoragelibs import podcmd
+from openshiftstoragelibs import utils
from openshiftstoragelibs import waiter
@@ -55,7 +69,7 @@ class TestHeketiVolume(BaseClass):
super(TestHeketiVolume, cls).setUpClass()
cls.volume_size = 1
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_volume_create_and_list_volume(self):
"""Validate heketi and gluster volume list"""
@@ -102,7 +116,7 @@ class TestHeketiVolume(BaseClass):
"of Heketi volumes before and after volume creation: %s\n%s" % (
existing_h_vol_list, h_vol_list))
- @pytest.mark.tier1
+ @pytest.mark.tier2
@podcmd.GlustoPod()
def test_create_vol_and_retrieve_vol_info(self):
"""Validate heketi and gluster volume info"""
@@ -131,7 +145,7 @@ class TestHeketiVolume(BaseClass):
self.assertTrue(vol_info, "Failed to get volume info %s" % name)
g.log.info("Successfully got the volume info %s" % name)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_create_vol_and_retrieve_topology_info(self):
volume_names = []
volume_ids = []
@@ -186,7 +200,7 @@ class TestHeketiVolume(BaseClass):
"\n%s" % (volume_ids[2], existing_volumes))
g.log.info("Sucessfully verified the topology info")
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_to_check_deletion_of_cluster(self):
"""Validate deletion of cluster with volumes"""
# List heketi volumes
@@ -237,7 +251,7 @@ class TestHeketiVolume(BaseClass):
self.assertTrue(out, ("Failed to list heketi cluster"))
g.log.info("All heketi cluster successfully listed")
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_to_check_deletion_of_node(self):
"""Validate deletion of a node which contains devices"""
@@ -290,7 +304,7 @@ class TestHeketiVolume(BaseClass):
self.heketi_client_node, heketi_url, node_id, json=True)
self.assertEqual(node_info['state'].lower(), 'online')
- @pytest.mark.tier1
+ @pytest.mark.tier3
def test_blockvolume_create_no_free_space(self):
"""Validate error is returned when free capacity is exhausted"""
@@ -350,7 +364,7 @@ class TestHeketiVolume(BaseClass):
max_block_hosting_vol_size, blockvol2, block_hosting_vol,
'\n'.join(file_volumes_debug_info))))
- @pytest.mark.tier2
+ @pytest.mark.tier4
@podcmd.GlustoPod()
def test_heketi_volume_create_with_cluster_node_down(self):
if len(self.gluster_servers) < 5:
@@ -394,85 +408,114 @@ class TestHeketiVolume(BaseClass):
volume_name, g_vol_list)
self.assertIn(volume_name, g_vol_list, msg)
- @pytest.mark.tier1
- @ddt.data('', 'block')
+ def _respin_heketi_pod(self):
+ h_node, h_url = self.heketi_client_node, self.heketi_server_url
+ ocp_node = self.ocp_master_node[0]
+
+ # get heketi-pod name
+ heketi_pod_name = get_pod_name_from_dc(ocp_node, self.heketi_dc_name)
+ # delete heketi-pod (it restarts the pod)
+ oc_delete(
+ ocp_node, "pod", heketi_pod_name,
+ collect_logs=self.heketi_logs_before_delete)
+ wait_for_resource_absence(ocp_node, "pod", heketi_pod_name)
+
+ # get new heketi-pod name
+ heketi_pod_name = get_pod_name_from_dc(ocp_node, self.heketi_dc_name)
+ wait_for_pod_be_ready(ocp_node, heketi_pod_name)
+
+ # check heketi server is running
+ err_msg = "Heketi server %s is not alive" % h_url
+ self.assertTrue(hello_heketi(h_node, h_url), err_msg)
+
+ def _cleanup_heketi_volumes(self, existing_volumes):
+ h_node, h_url = self.heketi_client_node, self.heketi_server_url
+
+ volumes = heketi_volume_list(h_node, h_url, json=True).get("volumes")
+ new_volumes = list(set(volumes) - set(existing_volumes))
+ for volume in new_volumes:
+ h_vol_info = heketi_volume_info(h_node, h_url, volume, json=True)
+ if h_vol_info.get("block"):
+ for block_vol in (
+ h_vol_info.get("blockinfo").get("blockvolume")):
+ heketi_blockvolume_delete(h_node, h_url, block_vol)
+ heketi_volume_delete(h_node, h_url, volume, raise_on_error=False)
+
+ @pytest.mark.tier2
+ @ddt.data("", "block")
def test_verify_delete_heketi_volumes_pending_entries_in_db(
self, vol_type):
- """Verify pending entries of blockvolumes/volumes and bricks in db
- during heketi blockvolume/volume delete operation.
+ """Verify pending entries of blockvolumes/volumes and bricks in heketi
+ db during blockvolume/volume delete operation.
"""
-
# Create a large volumes to observe the pending operation
- h_volume_size, vol_count, volume_ids, async_obj = 50, 5, [], []
+ vol_count, volume_ids, async_obj = 10, [], []
h_node, h_url = self.heketi_client_node, self.heketi_server_url
+ # Verify file/block volumes pending operation before creation,
h_db_check_before = heketi_db_check(h_node, h_url)
- h_db_check_bricks_before = h_db_check_before["bricks"]
- h_db_check_vol_before = h_db_check_before["{}volumes".format(vol_type)]
+ h_db_check_bricks_before = h_db_check_before.get("bricks")
+ h_db_check_vol_before = (
+ h_db_check_before.get("{}volumes".format(vol_type)))
- # Verify file/block volumes pending operation before creation
- # Wait for few min's if found and pending operation or skip tc
- for w in waiter.Waiter(timeout=300, interval=10):
- h_db_check_before = heketi_db_check(h_node, h_url)
- h_db_check_bricks_before = h_db_check_before["bricks"]
- h_db_check_vol_before = h_db_check_before["{}volumes".format(
- vol_type)]
+ # Get existing heketi volume list
+ existing_volumes = heketi_volume_list(h_node, h_url, json=True)
- if(not(h_db_check_vol_before["pending"]
- and h_db_check_bricks_before["pending"])):
- break
-
- if w.expired:
- self.skipTest(
- "Skip TC due to unexpected {} volumes or {} bricks pending"
- " operations for {}volume".format(
- h_db_check_vol_before["pending"],
- h_db_check_bricks_before["pending"],
- vol_type))
-
- # Fetch BHV list
- if vol_type == 'block':
- h_bhv_list_before = {
- bhv for bhv in (
- get_block_hosting_volume_list(h_node, h_url).keys())}
-
- # Create file/block volumes to find out pending operations
+ # Add cleanup function to clean stale volumes created during test
+ self.addCleanup(
+ self._cleanup_heketi_volumes, existing_volumes.get("volumes"))
+
+ # Delete heketi pod to clean db operations
+ if(h_db_check_bricks_before.get("pending")
+ or h_db_check_vol_before.get("pending")):
+ self._respin_heketi_pod()
+
+ # Calculate heketi volume size
+ free_space, nodenum = get_total_free_space(h_node, h_url)
+ free_space_available = int(free_space / nodenum)
+ if free_space_available > vol_count:
+ h_volume_size = int(free_space_available / vol_count)
+ if h_volume_size > 50:
+ h_volume_size = 50
+ else:
+ h_volume_size, vol_count = 1, free_space_available
+
+ # Create BHV in case blockvolume size is greater than default BHV size
+ if vol_type:
+ default_bhv_size = get_default_block_hosting_volume_size(
+ h_node, self.heketi_dc_name)
+ if default_bhv_size < h_volume_size:
+ h_volume_name = "autotest-{}".format(utils.get_random_str())
+ bhv_info = self.create_heketi_volume_with_name_and_wait(
+ h_volume_name, free_space_available,
+ raise_on_cleanup_error=False, block=True, json=True)
+ free_space_available -= (
+ int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
+ h_volume_size = int(free_space_available / vol_count)
+
+ # Create file/block volumes
for _ in range(vol_count):
- vol_info = eval("heketi_{}volume_create".format(vol_type))(
- h_node, h_url, h_volume_size, json=True)
- volume_ids.append(vol_info["id"])
+ vol_id = eval("heketi_{}volume_create".format(vol_type))(
+ h_node, h_url, h_volume_size, json=True).get("id")
+ volume_ids.append(vol_id)
self.addCleanup(
eval("heketi_{}volume_delete".format(vol_type)),
- h_node, h_url, vol_info["id"], raise_on_error=False)
-
- h_db_check_after = heketi_db_check(h_node, h_url)
- h_db_check_bricks_after = h_db_check_after["bricks"]
- h_db_check_vol_after = h_db_check_after["{}volumes".format(
- vol_type)]
-
- # Verify file/block volumes pending operation after creation
- err_msg = (
- "Expecting heketi db {}volume pending operation to be "
- "0 but found {}")
- self.assertFalse(
- h_db_check_vol_after["pending"],
- err_msg.format(vol_type, h_db_check_vol_after["pending"]))
-
- # Verify bricks pending operation after volume creation
- err_msg = (
- "Expecting heketi db bricks pending operation to be "
- "0 but found {} after {}volume creation")
- self.assertFalse(
- h_db_check_bricks_after["pending"],
- err_msg.format(
- h_db_check_bricks_after["pending"], vol_type))
+ h_node, h_url, vol_id, raise_on_error=False)
def run_async(cmd, hostname, raise_on_error=True):
async_op = g.run_async(host=hostname, command=cmd)
async_obj.append(async_op)
return async_op
+ bhv_list = []
for vol_id in volume_ids:
+ # Get BHV ids to delete in case of block volumes
+ if vol_type:
+ vol_info = (
+ heketi_blockvolume_info(h_node, h_url, vol_id, json=True))
+ if not vol_info.get("blockhostingvolume") in bhv_list:
+ bhv_list.append(vol_info.get("blockhostingvolume"))
+
# Temporary replace g.run with g.async_run in heketi_volume_delete
# and heketi_blockvolume_delete func to be able to run it in
# background.
@@ -481,151 +524,137 @@ class TestHeketiVolume(BaseClass):
eval("heketi_{}volume_delete".format(vol_type))(
h_node, h_url, vol_id)
- for w in waiter.Waiter(timeout=10, interval=1):
+ # Wait for pending operations to get generate
+ for w in waiter.Waiter(timeout=30, interval=3):
h_db_check = heketi_db_check(h_node, h_url)
- h_db_check_bricks = h_db_check["bricks"]
- h_db_check_vol = h_db_check["{}volumes".format(vol_type)]
-
- if h_db_check_vol["pending"]:
+ h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
+ if h_db_check_vol.get("pending"):
+ h_db_check_bricks = h_db_check.get("bricks")
break
-
if w.expired:
- err_msg = ("Expected some pending operations found {} operation"
- " for {}volume in Heketi db")
- g.log.error(err_msg.format(h_db_check_vol["pending"], vol_type))
raise exceptions.ExecutionError(
- err_msg.format(h_db_check_vol["pending"], vol_type))
-
- # Verify pending operation during file/block volumes delete
- err_msg = ("Expecting pending operations for {}volume during"
- " deletion")
- self.assertTrue(h_db_check_vol["pending"], err_msg.format(vol_type))
+ "No any pending operations found during {}volumes deletion "
+ "{}".format(vol_type, h_db_check_vol.get("pending")))
- # Verify brick pending operation during delete
- err_msg = ("Expecting bricks pending in multiple of 3 but found {}")
- if vol_type == '':
+ # Verify bricks pending operation during creation
+ if not vol_type:
+ self.assertTrue(
+ h_db_check_bricks.get("pending"),
+ "Expecting at least one bricks pending count")
self.assertFalse(
- h_db_check_bricks["pending"] % 3,
- err_msg.format(h_db_check_bricks["pending"]))
-
- # Verify volume/blockvolume pending operation during delete
- for w in waiter.Waiter(timeout=100, interval=5):
- h_db_check_vol = heketi_db_check(h_node, h_url)
- h_db_check_bricks = h_db_check_vol["bricks"]
- h_db_check_vol = h_db_check_vol["{}volumes".format(vol_type)]
-
- # verify if file/block volumes and bricks are properly deleted
- if (((not vol_type) and (not h_db_check_bricks["pending"]))
- or (not h_db_check_vol["pending"])):
- break
+ h_db_check_bricks.get("pending") % 3,
+ "Expecting bricks pending count to be multiple of 3 but "
+ "found {}".format(h_db_check_bricks.get("pending")))
+ # Verify file/block volume pending operation during delete
+ for w in waiter.Waiter(timeout=120, interval=10):
+ h_db_check = heketi_db_check(h_node, h_url)
+ h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
+ h_db_check_bricks = h_db_check.get("bricks")
+ if ((not h_db_check_bricks.get("pending"))
+ and (not h_db_check_vol.get("pending"))):
+ break
if w.expired:
- err_msg = ("Failed to delete {}volumes after waiting for 100 secs")
- raise exceptions.AssertionError(err_msg.format(vol_type))
+ raise exceptions.AssertionError(
+ "Failed to delete {}volumes after 120 secs".format(vol_type))
# Check that all background processes got exited
for obj in async_obj:
ret, out, err = obj.async_communicate()
- self.assertFalse(ret, err)
-
- if vol_type == 'block':
- h_bhv_list_after = {
- bhv for bhv in (
- get_block_hosting_volume_list(h_node, h_url).keys())}
- self.assertTrue(
- h_bhv_list_after,
- "Failed to get the BHV list"
- "{}".format(get_block_hosting_volume_list(h_node, h_url)))
-
- # Get to total number of BHV created
- total_bhvs = h_bhv_list_after - h_bhv_list_before
+ self.assertFalse(
+ ret, "Failed to delete {}volume due to error: {}".format(
+ vol_type, err))
- for bhv_id in total_bhvs:
+ # Delete BHV created during block volume creation
+ if vol_type:
+ for bhv_id in bhv_list:
heketi_volume_delete(h_node, h_url, bhv_id)
- # Verify if BHV is delete and no pending operations left
- for w in waiter.Waiter(timeout=20, interval=1):
- h_db_check = heketi_db_check(h_node, h_url)
- if ((not h_db_check["volumes"]["pending"])
- and (not h_db_check["bricks"]["pending"])):
- break
-
- if w.expired:
- err_msg = ("Failed to delete BHV after waiting for 20 secs")
- raise exceptions.AssertionError(err_msg.format(vol_type))
-
+ # Verify bricks and volume pending operations
h_db_check_after = heketi_db_check(h_node, h_url)
- h_db_check_bricks_after = h_db_check_after["bricks"]
- h_db_check_vol_after = h_db_check_after["{}volumes".format(vol_type)]
+ h_db_check_bricks_after = h_db_check_after.get("bricks")
+ h_db_check_vol_after = (
+ h_db_check_after.get("{}volumes".format(vol_type)))
+ act_brick_count = h_db_check_bricks_after.get("pending")
+ act_vol_count = h_db_check_vol_after.get("pending")
# Verify bricks pending operation after delete
- if vol_type == "":
- err_msg = ("Expecting 0 bricks pending operations after deletion"
- " but found {} after {}volume deletion")
+ err_msg = "{} operations are pending for {} after {}volume deletion"
+ if not vol_type:
self.assertFalse(
- h_db_check_bricks_after["pending"],
- err_msg.format(h_db_check_bricks_after["pending"], vol_type))
+ act_brick_count, err_msg.format(
+ act_brick_count, "brick", vol_type))
- # Verify volumes/bockvolumes pending operation after delete
- err_msg = ("Expecting 0 {}volume pending operations after deletion"
- " but found {}")
+ # Verify file/bock volumes pending operation after delete
self.assertFalse(
- h_db_check_vol_after["pending"],
- err_msg.format(vol_type, h_db_check_vol_after["pending"]))
+ act_vol_count, err_msg.format(act_vol_count, "volume", vol_type))
- # Verify if initial and final volumes/blockvolumes are same
- err_msg = ("Total volume before {} and after {} creation not matched"
- .format(h_db_check_vol_after["total"],
- h_db_check_vol_before["total"]))
+ act_brick_count = h_db_check_bricks_after.get("total")
+ act_vol_count = h_db_check_vol_after.get("total")
+ exp_brick_count = h_db_check_bricks_before.get("total")
+ exp_vol_count = h_db_check_vol_before.get("total")
+ err_msg = "Actual {} and expected {} {} counts are not matched"
+
+ # Verify if initial and final file/block volumes are same
self.assertEqual(
- h_db_check_vol_after["total"], h_db_check_vol_before["total"],
- err_msg)
+ act_vol_count, exp_vol_count,
+ err_msg.format(act_vol_count, exp_vol_count, "volume"))
# Verify if initial and final bricks are same
- err_msg = ("Total bricks before {} and after {} creation not matched"
- .format(h_db_check_bricks_after["total"],
- h_db_check_bricks_before["total"]))
self.assertEqual(
- h_db_check_bricks_after["total"],
- h_db_check_bricks_before["total"],
- err_msg)
+ act_brick_count, exp_brick_count,
+ err_msg.format(act_brick_count, exp_brick_count, "brick"))
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data('', 'block')
def test_verify_create_heketi_volumes_pending_entries_in_db(
self, vol_type):
"""Verify pending entries of file/block volumes in db during
- volumes creation from heketi side
+ volumes creation from heketi side
"""
-
# Create large volumes to observe the pending operations
- h_volume_size, vol_count, h_vol_creation_async_op = 50, 3, []
+ vol_count, h_vol_creation_async_op = 3, []
h_node, h_url = self.heketi_client_node, self.heketi_server_url
- # Verify file/block volumes pending operation before creation
- # Wait for few min's if found and pending operation or skip tc
- for w in waiter.Waiter(timeout=300, interval=10):
- h_db_check_before = heketi_db_check(h_node, h_url)
- h_db_check_bricks_before = h_db_check_before["bricks"]
- h_db_check_vol_before = h_db_check_before["{}volumes".format(
- vol_type)]
+ # Verify file/block volumes pending operation before creation,
+ h_db_check_before = heketi_db_check(h_node, h_url)
+ h_db_check_vol_before = (
+ h_db_check_before.get("{}volumes".format(vol_type)))
+
+ # Delete heketi pod to clean db operations
+ if(h_db_check_vol_before.get("pending")
+ or h_db_check_before.get("bricks").get("pending")):
+ self._respin_heketi_pod()
+
+ # Calculate heketi volume size
+ free_space, nodenum = get_total_free_space(h_node, h_url)
+ free_space_available = int(free_space / nodenum)
+ if free_space_available > vol_count:
+ h_volume_size = int(free_space_available / vol_count)
+ if h_volume_size > 30:
+ h_volume_size = 30
+ else:
+ h_volume_size, vol_count = 1, free_space_available
- if(not(h_db_check_vol_before["pending"]
- and h_db_check_bricks_before["pending"])):
- break
+ # Get existing heketi volume list
+ existing_volumes = heketi_volume_list(h_node, h_url, json=True)
- if w.expired:
- self.skipTest(
- "Skip TC due to unexpected {} volumes or {} bricks pending"
- " operations for {}volume".format(
- h_db_check_vol_before["pending"],
- h_db_check_bricks_before["pending"],
- vol_type))
-
- if vol_type == 'block':
- h_bhv_list_before = {
- bhv for bhv in (
- get_block_hosting_volume_list(h_node, h_url).keys())}
+ # Add cleanup function to clean stale volumes created during test
+ self.addCleanup(
+ self._cleanup_heketi_volumes, existing_volumes.get("volumes"))
+
+ # Create BHV in case blockvolume size is greater than default BHV size
+ if vol_type:
+ default_bhv_size = get_default_block_hosting_volume_size(
+ h_node, self.heketi_dc_name)
+ if default_bhv_size < h_volume_size:
+ h_volume_name = "autotest-{}".format(utils.get_random_str())
+ bhv_info = self.create_heketi_volume_with_name_and_wait(
+ h_volume_name, free_space_available,
+ raise_on_cleanup_error=False, block=True, json=True)
+ free_space_available -= (
+ int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
+ h_volume_size = int(free_space_available / vol_count)
# Temporary replace g.run with g.async_run in heketi_blockvolume_create
# func to be able to run it in background.Also, avoid parsing the
@@ -644,116 +673,66 @@ class TestHeketiVolume(BaseClass):
# Check for pending operations
for w in waiter.Waiter(timeout=120, interval=10):
- h_db_chk_during = heketi_db_check(h_node, h_url)
-
- h_db_check_bricks_during = h_db_chk_during["bricks"]
- h_db_check_vol_during = h_db_chk_during["{}volumes".format(
- vol_type)]
- if vol_type == 'block':
- if h_db_check_vol_during["total"] != vol_count:
- continue
- if h_db_check_vol_during["pending"]:
+ h_db_check = heketi_db_check(h_node, h_url)
+ h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
+ if h_db_check_vol.get("pending"):
+ h_db_check_bricks = h_db_check.get("bricks")
break
-
if w.expired:
- err_msg = ("Expected some pending operations found {} operation"
- " for {}volume in Heketi db")
raise exceptions.ExecutionError(
- err_msg.format(h_db_check_vol_during["pending"], vol_type))
-
- # Verify file/block volumes pending operation during creation
- self.assertTrue(
- h_db_check_vol_during["pending"],
- "Expecting some pending operations during {}volumes creation but "
- "found {}".format(vol_type, h_db_check_vol_during["pending"]))
+ "No any pending operations found during {}volumes creation "
+ "{}".format(vol_type, h_db_check_vol.get("pending")))
# Verify bricks pending operation during creation
- err_msg = "Expecting bricks pending in multiple of 3 but found {}"
- if vol_type == '':
+ if not vol_type:
+ self.assertTrue(
+ h_db_check_bricks.get("pending"),
+ "Expecting at least one bricks pending count")
self.assertFalse(
- h_db_check_bricks_during["pending"] % 3,
- err_msg.format(h_db_check_bricks_during["pending"]))
+ h_db_check_bricks.get("pending") % 3,
+ "Expecting bricks pending count to be multiple of 3 but "
+ "found {}".format(h_db_check_bricks.get("pending")))
# Wait for all counts of pending operations to be zero
- for w in waiter.Waiter(timeout=120, interval=10):
- h_db_chk_during = heketi_db_check(h_node, h_url)
- h_db_check_vol_during = h_db_chk_during["{}volumes".format(
- vol_type)]
- if h_db_check_vol_during["pending"] == 0:
+ for w in waiter.Waiter(timeout=300, interval=10):
+ h_db_check = heketi_db_check(h_node, h_url)
+ h_db_check_vol = h_db_check.get("{}volumes".format(vol_type))
+ if not h_db_check_vol.get("pending"):
break
-
if w.expired:
- err_msg = ("Expected no pending operations found {} operation"
- " for {}volume in Heketi db")
raise exceptions.ExecutionError(
- err_msg.format(h_db_check_vol_during["pending"], vol_type))
-
- if vol_type == 'block':
- h_bhv_list_after = {
- bhv for bhv in (
- get_block_hosting_volume_list(h_node, h_url).keys())}
- self.assertTrue(
- h_bhv_list_after,
- "Failed to get the BHV list "
- "{}".format(get_block_hosting_volume_list(h_node, h_url)))
-
- # Get to total number of BHV created
- total_bhvs = h_bhv_list_after - h_bhv_list_before
- for bhv_id in total_bhvs:
- self.addCleanup(heketi_volume_delete, h_node, h_url, bhv_id)
-
- # Fetch volume id to perform cleanup of volumes
- for count in range(vol_count):
- _, stdout, _ = h_vol_creation_async_op[count].async_communicate()
- heketi_vol = json.loads(stdout)
- self.addCleanup(
- eval("heketi_{}volume_delete".format(vol_type)),
- h_node, h_url, heketi_vol["id"], raise_on_error=False)
+ "Expecting no pending operations after 300 sec but "
+ "found {} operation".format(h_db_check_vol.get("pending")))
- h_db_check_after = heketi_db_check(
- self.heketi_client_node, self.heketi_server_url)
- h_db_check_vol_after = h_db_check_after["{}volumes".format(vol_type)]
- h_db_check_bricks_after = h_db_check_after["bricks"]
+ # Get heketi server DB details
+ h_db_check_after = heketi_db_check(h_node, h_url)
+ h_db_check_vol_after = (
+ h_db_check_after.get("{}volumes".format(vol_type)))
+ h_db_check_bricks_after = h_db_check_after.get("bricks")
# Verify if initial and final file/block volumes are same
- err_msg = ("Total {}volume before {} and after {} creation not matched"
- .format(vol_type,
- h_db_check_vol_after["total"],
- h_db_check_vol_before["total"]))
- self.assertEqual(
- h_db_check_vol_after["total"],
- h_db_check_vol_before["total"] + vol_count,
- err_msg)
+ act_vol_count = h_db_check_vol_after.get("total")
+ exp_vol_count = h_db_check_vol_before.get("total") + vol_count
+ err_msg = (
+ "Actual {} and expected {} {}volume counts are not matched".format(
+ act_vol_count, exp_vol_count, vol_type))
+ self.assertEqual(act_vol_count, exp_vol_count, err_msg)
# Verify if initial and final bricks are same for file volume
- err_msg = "Total bricks after {} and before {} creation not matched"
- if vol_type == '':
- self.assertEqual(
- h_db_check_bricks_after["total"],
- h_db_check_bricks_before["total"] + (vol_count * 3),
- err_msg.format(
- h_db_check_bricks_after["total"],
- h_db_check_bricks_before["total"] + (vol_count * 3)))
-
- # Verify if initial and final volumes/bricks are same for block volumes
- elif vol_type == 'block':
- self.assertEqual(
- h_db_check_after["volumes"]["total"],
- h_db_check_before["volumes"]["total"] + len(total_bhvs),
- "Total volume after {} and before {} creation not"
- " matched".format(
- h_db_check_after["volumes"]["total"],
- h_db_check_before["volumes"]["total"]))
- self.assertEqual(
- h_db_check_bricks_after["total"],
- h_db_check_bricks_before["total"] + (
- len(total_bhvs) * 3),
- err_msg.format(
- h_db_check_bricks_after["total"],
- h_db_check_bricks_before["total"] + (
- len(total_bhvs) * 3)))
+ volumes = heketi_volume_list(h_node, h_url, json=True).get("volumes")
+ new_volumes = list(set(volumes) - set(existing_volumes))
+ exp_brick_count = 0
+ for volume in new_volumes:
+ vol_info = heketi_volume_info(h_node, h_url, volume, json=True)
+ exp_brick_count += len(vol_info.get("bricks"))
+
+ err_msg = "Actual {} and expected {} bricks counts are not matched"
+ act_brick_count = h_db_check_bricks_after.get("total")
+ self.assertEqual(
+ act_brick_count, exp_brick_count, err_msg.format(
+ act_brick_count, exp_brick_count))
- @pytest.mark.tier1
+ @pytest.mark.tier4
@podcmd.GlustoPod()
def test_volume_creation_after_stopping_heketidb_volume(self):
"""Validate volume creation after stopping heketidb volume"""
@@ -781,7 +760,7 @@ class TestHeketiVolume(BaseClass):
self.assertIn(
"transport endpoint is not connected", six.text_type(e.exception))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_heketi_volume_create_with_clusterid(self):
"""Validate creation of heketi volume with clusters argument"""
h_node, h_url = self.heketi_client_node, self.heketi_server_url
@@ -807,3 +786,129 @@ class TestHeketiVolume(BaseClass):
info_cluster_id, creation_cluster_id,
"Volume creation cluster id {} not matching the info cluster id "
"{}".format(creation_cluster_id, info_cluster_id))
+
+ def _check_for_pending_operations(self, h_node, h_url):
+ # Check for pending operations
+ for w in waiter.Waiter(timeout=120, interval=10):
+ h_db_check = heketi_db_check(h_node, h_url)
+ h_db_check_vol = h_db_check.get("blockvolumes")
+ if h_db_check_vol.get("pending"):
+ break
+ if w.expired:
+ raise exceptions.ExecutionError(
+ "No pending operations found during blockvolumes creation "
+ "{}".format(h_db_check_vol.get("pending")))
+
+ @pytest.mark.tier2
+ def test_heketi_manual_cleanup_operation_in_bhv(self):
+ """Validate heketi db cleanup will resolve the mismatch
+ in the free size of the block hosting volume with failed
+ block device create operations.
+ """
+ bhv_size_before, bhv_size_after, vol_count = [], [], 5
+ ocp_node, g_node = self.ocp_master_node[0], self.gluster_servers[0]
+ h_node, h_url = self.heketi_client_node, self.heketi_server_url
+
+ # Get existing heketi volume list
+ existing_volumes = heketi_volume_list(h_node, h_url, json=True)
+
+ # Add function to clean stale volumes created during test
+ self.addCleanup(
+ self._cleanup_heketi_volumes, existing_volumes.get("volumes"))
+
+ # Get nodes id list
+ node_id_list = heketi_node_list(h_node, h_url)
+
+ # Disable 4th and other nodes
+ for node_id in node_id_list[3:]:
+ heketi_node_disable(h_node, h_url, node_id)
+ self.addCleanup(heketi_node_enable, h_node, h_url, node_id)
+
+ # Calculate heketi volume size
+ free_space, nodenum = get_total_free_space(h_node, h_url)
+ free_space_available = int(free_space / nodenum)
+ if free_space_available > vol_count:
+ h_volume_size = int(free_space_available / vol_count)
+ if h_volume_size > 50:
+ h_volume_size = 50
+ else:
+ h_volume_size, vol_count = 1, free_space_available
+
+ # Create BHV in case blockvolume size is greater than default BHV size
+ default_bhv_size = get_default_block_hosting_volume_size(
+ h_node, self.heketi_dc_name)
+ if default_bhv_size < h_volume_size:
+ h_volume_name = "autotest-{}".format(utils.get_random_str())
+ bhv_info = self.create_heketi_volume_with_name_and_wait(
+ h_volume_name, free_space_available,
+ raise_on_cleanup_error=False, block=True, json=True)
+ free_space_available -= (
+ int(bhv_info.get("blockinfo").get("reservedsize")) + 1)
+ h_volume_size = int(free_space_available / vol_count)
+
+ # Get BHV list
+ h_bhv_list = get_block_hosting_volume_list(h_node, h_url).keys()
+ self.assertTrue(h_bhv_list, "Failed to get the BHV list")
+
+ # Get BHV size
+ for bhv in h_bhv_list:
+ vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
+ bhv_vol_size_before = vol_info.get("freesize")
+ bhv_size_before.append(bhv_vol_size_before)
+
+ # Kill Tcmu-runner service
+ services = ("tcmu-runner", "gluster-block-target", "gluster-blockd")
+ kill_service_on_gluster_pod_or_node(ocp_node, "tcmu-runner", g_node)
+
+ # Restart the services
+ for service in services:
+ state = (
+ 'exited' if service == 'gluster-block-target' else 'running')
+ self.addCleanup(
+ wait_for_service_status_on_gluster_pod_or_node,
+ ocp_node, service, 'active', state, g_node)
+ self.addCleanup(
+ restart_service_on_gluster_pod_or_node,
+ ocp_node, service, g_node)
+
+ def run_async(cmd, hostname, raise_on_error=True):
+ return g.run_async(host=hostname, command=cmd)
+
+ # Create stale block volumes in async
+ for count in range(vol_count):
+ with mock.patch.object(json, 'loads', side_effect=(lambda j: j)):
+ with mock.patch.object(
+ command, 'cmd_run', side_effect=run_async):
+ heketi_blockvolume_create(
+ h_node, h_url, h_volume_size, json=True)
+
+ # Wait for pending operation to get generated
+ self._check_for_pending_operations(h_node, h_url)
+
+ # Restart the services
+ for service in services:
+ state = (
+ 'exited' if service == 'gluster-block-target' else 'running')
+ restart_service_on_gluster_pod_or_node(
+ ocp_node, service, g_node)
+ wait_for_service_status_on_gluster_pod_or_node(
+ ocp_node, service, 'active', state, g_node)
+
+ # Cleanup pending operation
+ heketi_server_operation_cleanup(h_node, h_url)
+
+ # wait for pending operation to get cleaned up
+ for w in waiter.Waiter(timeout=120, interval=10):
+ # Get BHV size
+ for bhv in h_bhv_list:
+ vol_info = heketi_volume_info(h_node, h_url, bhv, json=True)
+ bhv_vol_size_after = vol_info.get("freesize")
+ bhv_size_after.append(bhv_vol_size_after)
+
+ if(set(bhv_size_before) == set(bhv_size_after)):
+ break
+ if w.expired:
+ raise exceptions.ExecutionError(
+ "Failed to Validate volume size Actual:{},"
+ " Expected:{}".format(
+ set(bhv_size_before), set(bhv_size_after)))
diff --git a/tests/functional/heketi/test_heketi_device_operations.py b/tests/functional/heketi/test_heketi_device_operations.py
index 1d97a7f5..05f16ef9 100755
--- a/tests/functional/heketi/test_heketi_device_operations.py
+++ b/tests/functional/heketi/test_heketi_device_operations.py
@@ -1,6 +1,7 @@
import ddt
from glusto.core import Glusto as g
import pytest
+import six
from openshiftstoragelibs.baseclass import BaseClass
from openshiftstoragelibs.heketi_ops import (
@@ -17,6 +18,8 @@ from openshiftstoragelibs.heketi_ops import (
heketi_topology_info,
heketi_volume_create,
heketi_volume_delete,
+ rm_tags,
+ set_tags,
validate_dev_path_vg_and_uuid,
)
from openshiftstoragelibs import utils
@@ -97,7 +100,7 @@ class TestHeketiDeviceOperations(BaseClass):
return online_hosts
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_heketi_device_enable_disable(self):
"""Validate device enable and disable functionality"""
@@ -298,7 +301,7 @@ class TestHeketiDeviceOperations(BaseClass):
"Some of the '%s' volume bricks is present of the removed "
"'%s' device." % (vol_info['id'], lowest_device_id))
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data((True, False), (False, False), (True, True))
@ddt.unpack
def test_heketi_device_remove_delete(
@@ -339,7 +342,7 @@ class TestHeketiDeviceOperations(BaseClass):
is_delete_device, deleted_device, node_id, add_back_again,
skip_cleanup_addition=True)
- @pytest.mark.tier2
+ @pytest.mark.tier3
def test_heketi_device_removal_with_insuff_space(self):
"""Validate heketi with device removal insufficient space"""
@@ -428,7 +431,7 @@ class TestHeketiDeviceOperations(BaseClass):
heketi_device_disable, heketi_node, heketi_url, device_id)
raise
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_device_delete(self):
"""Test Heketi device delete operation"""
@@ -488,7 +491,7 @@ class TestHeketiDeviceOperations(BaseClass):
"after the device deletion" % (device_id, node_id))
self.assertNotIn(device_id, node_info_after_deletion, msg)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_device_info(self):
"""Validate whether device related information is displayed"""
@@ -554,7 +557,7 @@ class TestHeketiDeviceOperations(BaseClass):
self.assertEqual(brick[key], brick_from_t[key])
self.assertEqual(brick_match_count, len(device_info['bricks']))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_device_delete_with_bricks(self):
"""Validate device deletion with existing bricks on the device"""
h_node, h_url = self.heketi_client_node, self.heketi_server_url
@@ -580,7 +583,7 @@ class TestHeketiDeviceOperations(BaseClass):
self.addCleanup(
heketi_device_add, h_node, h_url, device_name, node_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dev_path_mapping_basic_validation(self):
"""Validate dev_path of all the devices"""
node_with_devices = dict()
@@ -600,3 +603,96 @@ class TestHeketiDeviceOperations(BaseClass):
h_node, h_url, node, dev)
self.assertTrue(is_true, "Failed to verify dv_path for the "
"device {}".format(dev))
+
+ @pytest.mark.tier3
+ def test_volume_create_as_tag_maching_rule(self):
+ """Validate settags operation only on one device in the cluster"""
+
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+
+ # Set tag on any one device in cluster
+ node_list = heketi_node_list(h_node, h_server, json=True)
+ node_info = heketi_node_info(h_node, h_server, node_list[0], json=True)
+ device_id = node_info.get('devices', {})[0].get('id')
+ set_tags(h_node, h_server, 'device', device_id, "tier:it")
+ self.addCleanup(rm_tags, h_node, h_server, 'device', device_id, 'tier')
+
+ # Volume creation should fail
+ try:
+ heketi_volume_create(
+ h_node, h_server, 2,
+ gluster_volume_options="user.heketi.device-tag-match tier=it")
+ except AssertionError as e:
+ if ("Failed to allocate new volume" not in six.text_type(e)):
+ raise
+
+ @pytest.mark.tier4
+ def test_device_settags_tier_option(self):
+ """Validate volume creation with a tag-matching rule"""
+
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+ initial_brick_count, before_brick_count, after_brick_count = [], [], []
+
+ # Set tag on device on 3 different nodes
+ node_list = heketi_node_list(h_node, h_server, json=True)
+ device_list = []
+ for node_id in node_list[:3]:
+ node_info = heketi_node_info(h_node, h_server, node_id, json=True)
+ device_id = node_info.get('devices', {})[0].get('id')
+ device_list.append(device_id)
+ set_tags(h_node, h_server, 'device', device_id, "tier:test")
+ self.addCleanup(
+ rm_tags, h_node, h_server, 'device', device_id, "tier",
+ raise_on_error=False)
+
+ # Get initial number of bricks present on device
+ for device_id in device_list:
+ device_info = heketi_device_info(
+ h_node, h_server, device_id, json=True)
+ initial_brick_count.append(len(device_info.get("bricks")))
+
+ # Create volume with device tag option
+ volume_info = heketi_volume_create(
+ h_node, h_server, 2,
+ gluster_volume_options="user.heketi.device-tag-match tier=test",
+ json=True)
+ self.addCleanup(
+ heketi_volume_delete, h_node, h_server, volume_info.get("id"))
+
+ # Get number of bricks present on device after volume create
+ for device_id in device_list:
+ device_info = heketi_device_info(
+ h_node, h_server, device_id, json=True)
+ before_brick_count.append(len(device_info.get("bricks")))
+
+ # Validate volume has created on tag devices
+ self.assertGreater(
+ before_brick_count, initial_brick_count,
+ "Volume {} has not created on tag devices".format(
+ volume_info.get("id")))
+
+ # Create volume with not equal to tag option
+ volume_info = heketi_volume_create(
+ h_node, h_server, 2,
+ gluster_volume_options="user.heketi.device-tag-match tier!=test",
+ json=True)
+ self.addCleanup(
+ heketi_volume_delete, h_node, h_server, volume_info.get("id"))
+
+ # Get number of bricks present on device after volume create
+ for device_id in device_list:
+ device_info = heketi_device_info(
+ h_node, h_server, device_id, json=True)
+ after_brick_count.append(len(device_info.get("bricks")))
+
+ # Validate volume has not created on tag devices
+ self.assertEqual(
+ before_brick_count, after_brick_count,
+ "Volume {} has created on tag devices".format(
+ volume_info.get("id")))
+
+ # Update the tag on device
+ for device_id in device_list:
+ set_tags(h_node, h_server, 'device', device_id, "tier:test_update")
+ self.addCleanup(
+ rm_tags, h_node, h_server, 'device', device_id, "tier")
diff --git a/tests/functional/heketi/test_heketi_lvm_wrapper.py b/tests/functional/heketi/test_heketi_lvm_wrapper.py
new file mode 100644
index 00000000..5817f57b
--- /dev/null
+++ b/tests/functional/heketi/test_heketi_lvm_wrapper.py
@@ -0,0 +1,277 @@
+import re
+
+import ddt
+import pytest
+
+from openshiftstoragelibs import command
+from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import heketi_ops
+from openshiftstoragelibs import heketi_version
+from openshiftstoragelibs import openshift_ops
+from openshiftstoragelibs import openshift_version
+from openshiftstoragelibs import waiter
+
+# The script exec-on-host prevents from executing LVM commands on pod.
+# It has been introduced as LVM wrapper in heketi v9.0.0-9
+ENV_NAME = "HEKETI_LVM_WRAPPER"
+ENV_VALUE = "/usr/sbin/exec-on-host"
+ENV_FALSE_VALUE = "/usr/bin/false"
+DOCKER_SERVICE = "systemctl {} docker"
+SERVICE_STATUS_REGEX = r"Active: (.*) \((.*)\)"
+
+
+@ddt.ddt
+class TestHeketiLvmWrapper(baseclass.BaseClass):
+ """Class to validate heketi LVM wrapper functionality"""
+
+ def setUp(self):
+ super(TestHeketiLvmWrapper, self).setUp()
+
+ self.oc_node = self.ocp_master_node[0]
+ self.pod_name = openshift_ops.get_ocp_gluster_pod_details(self.oc_node)
+ self.h_pod_name = openshift_ops.get_pod_name_from_dc(
+ self.oc_node, self.heketi_dc_name)
+ self.volume_size = 2
+
+ ocp_version = openshift_version.get_openshift_version()
+ if ocp_version < "3.11.170":
+ self.skipTest("Heketi LVM Wrapper functionality does not "
+ "support on OCP {}".format(ocp_version.v_str))
+ h_version = heketi_version.get_heketi_version(self.heketi_client_node)
+ if h_version < '9.0.0-9':
+ self.skipTest("heketi-client package {} does not support Heketi "
+ "LVM Wrapper functionality".format(h_version.v_str))
+
+ def _check_heketi_pod_to_come_up_after_changing_env(self):
+ heketi_pod = openshift_ops.get_pod_names_from_dc(
+ self.oc_node, self.heketi_dc_name)[0]
+ openshift_ops.wait_for_resource_absence(
+ self.oc_node, "pod", heketi_pod)
+ new_heketi_pod = openshift_ops.get_pod_names_from_dc(
+ self.oc_node, self.heketi_dc_name)[0]
+ openshift_ops.wait_for_pod_be_ready(
+ self.oc_node, new_heketi_pod, wait_step=20)
+
+ def _wait_for_docker_service_status(self, pod_host_ip, status, state):
+ for w in waiter.Waiter(30, 3):
+ out = command.cmd_run(DOCKER_SERVICE.format("status"), pod_host_ip)
+ for line in out.splitlines():
+ status_match = re.search(SERVICE_STATUS_REGEX, line)
+ if (status_match and status_match.group(1) == status
+ and status_match.group(2) == state):
+ return True
+
+ def _check_docker_status_is_active(self, pod_host_ip):
+ try:
+ command.cmd_run(DOCKER_SERVICE.format("is-active"), pod_host_ip)
+ except Exception as err:
+ if "inactive" in err:
+ command.cmd_run(DOCKER_SERVICE.format("start"), pod_host_ip)
+ self._wait_for_docker_service_status(
+ pod_host_ip, "active", "running")
+
+ @pytest.mark.tier1
+ def test_lvm_script_and_wrapper_environments(self):
+ """Validate lvm script present on glusterfs pods
+ lvm wrapper environment is present on heketi pod"""
+
+ # Check script /usr/sbin/exec-on-host is present in pod
+ if self.is_containerized_gluster():
+ cmd = "ls -lrt {}".format(ENV_VALUE)
+ ret, out, err = openshift_ops.oc_rsh(
+ self.oc_node, self.pod_name[0]['pod_name'], cmd)
+ self.assertFalse(
+ ret, "failed to execute command {} on pod {} with error:"
+ " {}".format(cmd, self.pod_name[0]['pod_name'], err))
+ self.assertIn(ENV_VALUE, out)
+
+ # Get a value associated with HEKETI_LVM_WRAPPER
+ custom = (r'":spec.containers[*].env[?(@.name==\"{}\")]'
+ r'.value"'.format(ENV_NAME))
+ env_var_value = openshift_ops.oc_get_custom_resource(
+ self.oc_node, "pod", custom, self.h_pod_name)
+
+ # Check value /usr/sbin/exec-on-host is present in converged mode
+ # and absent in independent mode deployment
+ err_msg = "Heketi LVM environment {} match failed".format(ENV_VALUE)
+ if self.is_containerized_gluster():
+ self.assertEqual(env_var_value[0], ENV_VALUE, err_msg)
+ else:
+ self.assertIsNotNone(env_var_value[0], err_msg)
+
+ @pytest.mark.tier1
+ def test_lvm_script_executable_on_host(self):
+ """Validate lvm script is executable on host instead
+ of container"""
+
+ # Skip the TC if independent mode deployment
+ if not self.is_containerized_gluster():
+ self.skipTest(
+ "Skipping this test as LVM script is not available in "
+ "independent mode deployment")
+
+ pod_name = self.pod_name[0]['pod_name']
+ gluster_pod_label = "glusterfs=storage-pod"
+
+ # Remove LVM banaries to validate /usr/sbin/exec-on-host script
+ # is execute LVM commands on host instead on pod
+ cmd = "rm /usr/sbin/lvm"
+ ret, _, err = openshift_ops.oc_rsh(self.oc_node, pod_name, cmd)
+ self.addCleanup(
+ openshift_ops.wait_for_pods_be_ready, self.oc_node,
+ len(self.gluster_servers), gluster_pod_label)
+ self.addCleanup(
+ openshift_ops.wait_for_resource_absence, self.oc_node, "pod",
+ pod_name)
+ self.addCleanup(
+ openshift_ops.oc_delete, self.oc_node, "pod", pod_name)
+ err_msg = (
+ "failed to execute command {} on pod {} with error: {}"
+ "".format(cmd, pod_name, err))
+ self.assertFalse(ret, err_msg)
+
+ # Validate LVM command is not executable in pod
+ cmd = "oc rsh {} lvs".format(pod_name)
+ stdout = command.cmd_run(cmd, self.oc_node, raise_on_error=False)
+ self.assertIn(
+ 'exec: \\"lvs\\": executable file not found in $PATH', stdout)
+
+ # Run LVM command with /usr/sbin/exec-on-host
+ cmd = "{} lvs".format(ENV_VALUE)
+ ret, out, err = openshift_ops.oc_rsh(self.oc_node, pod_name, cmd)
+ err_msg = (
+ "failed to execute command {} on pod {} with error: {}"
+ "".format(cmd, pod_name, err))
+ self.assertFalse(ret, err_msg)
+ self.assertIn("VG", out)
+
+ @pytest.mark.tier1
+ @ddt.data(ENV_FALSE_VALUE, ENV_VALUE, "")
+ def test_lvm_script_with_wrapper_environment_value(self, env_var_value):
+ """Validate the creation, deletion, etc operations when
+ HEKETI_LVM_WRAPPER has different values assigned"""
+
+ # Skip the TC if independent mode deployment
+ if not self.is_containerized_gluster():
+ self.skipTest(
+ "Skipping this test as LVM script is not available in "
+ "independent mode deployment")
+
+ h_client, h_url = self.heketi_client_node, self.heketi_server_url
+
+ # Set different values to HEKETI_LVM_WRAPPER
+ if env_var_value != ENV_VALUE:
+ cmd = 'oc set env dc/{} {}={}'
+ command.cmd_run(
+ cmd.format(self.heketi_dc_name, ENV_NAME, env_var_value),
+ self.oc_node)
+ self.addCleanup(
+ self._check_heketi_pod_to_come_up_after_changing_env)
+ self.addCleanup(
+ command.cmd_run,
+ cmd.format(self.heketi_dc_name, ENV_NAME, ENV_VALUE),
+ self.oc_node)
+ self._check_heketi_pod_to_come_up_after_changing_env()
+
+ # Get new value associated with HEKETI_LVM_WRAPPER
+ heketi_pod = openshift_ops.get_pod_names_from_dc(
+ self.oc_node, self.heketi_dc_name)[0]
+ custom = (
+ "{{.spec.containers[*].env[?(@.name==\"{}\")].value}}".format(
+ ENV_NAME))
+ cmd = ("oc get pod {} -o=jsonpath='{}'".format(heketi_pod, custom))
+ get_env_value = command.cmd_run(cmd, self.oc_node)
+
+ # Validate new value assigned to heketi pod
+ err_msg = "Failed to assign new value {} to {}".format(
+ env_var_value, heketi_pod)
+ self.assertEqual(get_env_value, env_var_value, err_msg)
+
+ # Get the date before creating heketi volume
+ cmd_date = "date -u '+%Y-%m-%d %T'"
+ _date, _time = command.cmd_run(cmd_date, self.oc_node).split(" ")
+
+ if env_var_value == ENV_FALSE_VALUE:
+ # Heketi volume creation should fail when HEKETI_LVM_WRAPPER
+ # assigned to /usr/bin/false
+ err_msg = "Unexpectedly: volume has been created"
+ with self.assertRaises(AssertionError, msg=err_msg):
+ vol_info = heketi_ops.heketi_volume_create(
+ h_client, h_url, self.volume_size, json=True)
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, h_client,
+ h_url, vol_info["bricks"][0]["volume"])
+ else:
+ # Heketi volume creation should succeed when HEKETI_LVM_WRAPPER
+ # assigned value other than /usr/bin/false
+ vol_info = heketi_ops.heketi_volume_create(
+ h_client, h_url, self.volume_size, json=True)
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete,
+ h_client, h_url, vol_info["bricks"][0]["volume"])
+ self.assertTrue(vol_info, ("Failed to create heketi "
+ "volume of size {}".format(self.volume_size)))
+
+ # Get heketi logs with specific time
+ cmd_logs = "oc logs {} --since-time {}T{}Z | grep {}".format(
+ heketi_pod, _date, _time, "/usr/sbin/lvm")
+
+ # Validate assigned value of HEKETI_LVM_WRAPPER is present in
+ # heketi log
+ for w in waiter.Waiter(60, 10):
+ logs = command.cmd_run(cmd_logs, self.oc_node)
+ status_match = re.search(env_var_value, logs)
+ if status_match:
+ break
+ err_msg = "Heketi unable to execute LVM commands with {}".format(
+ env_var_value)
+ self.assertTrue(status_match, err_msg)
+
+ @pytest.mark.tier2
+ def test_docker_service_restart(self):
+ """Validate docker service should not fail after restart"""
+
+ # Skip the TC if independent mode deployment
+ if not self.is_containerized_gluster():
+ self.skipTest(
+ "Skipping this test case as LVM script is not available in "
+ "independent mode deployment")
+
+ # Skip the TC if docker storage driver other than devicemapper
+ pod_host_ip = self.pod_name[0]["pod_host_ip"]
+ cmd = "docker info -f '{{json .Driver}}'"
+ device_driver = command.cmd_run(cmd, pod_host_ip)
+ if device_driver != '"devicemapper"':
+ self.skipTest(
+ "Skipping this test case as docker storage driver is not "
+ "set to devicemapper")
+
+ # Validate LVM environment is present
+ custom = (r'":spec.containers[*].env[?(@.name==\"{}\")]'
+ r'.value"'.format(ENV_NAME))
+ env_var_value = openshift_ops.oc_get_custom_resource(
+ self.oc_node, "pod", custom, self.h_pod_name)[0]
+ err_msg = "Heketi {} environment should has {}".format(
+ ENV_NAME, ENV_VALUE)
+ self.assertEqual(env_var_value, ENV_VALUE, err_msg)
+
+ # Check docker status is active
+ command.cmd_run(DOCKER_SERVICE.format("is-active"), pod_host_ip)
+
+ # Restart the docker service
+ self.addCleanup(self._check_docker_status_is_active, pod_host_ip)
+ command.cmd_run(DOCKER_SERVICE.format("restart"), pod_host_ip)
+
+ # Wait for docker service to become active
+ self._wait_for_docker_service_status(pod_host_ip, "active", "running")
+
+ # Wait for glusterfs pods to be ready
+ openshift_ops.wait_for_pods_be_ready(
+ self.oc_node, len(self.gluster_servers), "glusterfs=storage-pod")
+
+ # Check the docker pool is available after docker restart
+ cmd = "ls -lrt /dev/docker-vg/docker-pool"
+ command.cmd_run(cmd, pod_host_ip)
+
+ # Create PVC after docker restart
+ self.create_and_wait_for_pvcs()
diff --git a/tests/functional/heketi/test_heketi_metrics.py b/tests/functional/heketi/test_heketi_metrics.py
index 9f161607..2b59b7c7 100644
--- a/tests/functional/heketi/test_heketi_metrics.py
+++ b/tests/functional/heketi/test_heketi_metrics.py
@@ -172,12 +172,12 @@ class TestHeketiMetrics(BaseClass):
vol_count['cluster'], json=True)
self.assertEqual(vol_count['value'], len(cluster_info['volumes']))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_metrics_with_topology_info(self):
"""Validate heketi metrics generation"""
self.verify_heketi_metrics_with_topology_info()
- @pytest.mark.tier1
+ @pytest.mark.tier4
def test_heketi_metrics_heketipod_failure(self):
"""Validate heketi metrics after heketi pod failure"""
scale_dc_pod_amount_and_wait(
@@ -222,7 +222,7 @@ class TestHeketiMetrics(BaseClass):
self.verify_heketi_metrics_with_topology_info()
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_metrics_validating_vol_count_on_vol_creation(self):
"""Validate heketi metrics VolumeCount after volume creation"""
@@ -247,7 +247,7 @@ class TestHeketiMetrics(BaseClass):
self.verify_volume_count()
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_metrics_validating_vol_count_on_vol_deletion(self):
"""Validate heketi metrics VolumeCount after volume deletion"""
@@ -287,7 +287,7 @@ class TestHeketiMetrics(BaseClass):
self.assertNotIn(vol['id'], volume_list)
self.verify_volume_count()
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_metrics_validating_cluster_count(self):
"""Validate 'cluster count' in heketi metrics"""
cluster_list = heketi_cluster_list(
@@ -305,7 +305,7 @@ class TestHeketiMetrics(BaseClass):
self.assertEqual(
len(cluster_list['clusters']), metrics['heketi_cluster_count'])
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_metrics_validating_existing_node_count(self):
"""Validate existing 'node count' in heketi metrics"""
metrics = get_heketi_metrics(
diff --git a/tests/functional/heketi/test_heketi_node_operations.py b/tests/functional/heketi/test_heketi_node_operations.py
index 387bfae4..72267c35 100644
--- a/tests/functional/heketi/test_heketi_node_operations.py
+++ b/tests/functional/heketi/test_heketi_node_operations.py
@@ -25,7 +25,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
self.h_node = self.heketi_client_node
self.h_url = self.heketi_server_url
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_heketi_node_list(self):
"""Test node list operation
@@ -54,7 +54,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
"Heketi volume list %s is not equal to gluster volume list %s"
% (node_ips, hostnames))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_node_info(self):
"""Test heketi node info operation
"""
@@ -73,7 +73,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
"Failed to match node ID. Exp: %s, Act: %s" % (
node_id, node_info["id"]))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_heketi_node_states_enable_disable(self):
"""Test node enable and disable functionality
"""
@@ -237,7 +237,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
storage_ip, ep_addresses)
self.assertIn(storage_ip, ep_addresses, err_msg)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_heketi_node_add_with_valid_cluster(self):
"""Test heketi node add operation with valid cluster id"""
if not self.is_containerized_gluster():
@@ -254,7 +254,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
# Add node to valid cluster id
self.heketi_node_add_with_valid_cluster()
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_validate_heketi_node_add_with_db_check(self):
"""Test heketi db check after node add operation"""
if not self.is_containerized_gluster():
@@ -285,7 +285,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
" not as expected".format(initial_node_count, final_node_count))
self.assertEqual(initial_node_count + 1, final_node_count, msg)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_node_add_with_invalid_cluster(self):
"""Test heketi node add operation with invalid cluster id"""
storage_hostname, cluster_id = None, utils.get_random_str(size=33)
@@ -461,7 +461,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
self.h_node, self.h_url, device['id'])
heketi_ops.heketi_node_delete(self.h_node, self.h_url, node_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data('remove', 'delete')
def test_heketi_node_remove_or_delete(self, operation='delete'):
"""Test node remove and delete functionality of heketi and validate
@@ -572,7 +572,7 @@ class TestHeketiNodeOperations(baseclass.BaseClass):
for node_id in h_nodes_list[2:]:
self.addCleanup(h.heketi_node_enable, h_node, h_url, node_id)
- @pytest.mark.tier2
+ @pytest.mark.tier4
@ddt.data(
("volume", "create"),
("volume", "delete"),
diff --git a/tests/functional/heketi/test_heketi_volume_operations.py b/tests/functional/heketi/test_heketi_volume_operations.py
index ef9ae213..b3192d02 100644
--- a/tests/functional/heketi/test_heketi_volume_operations.py
+++ b/tests/functional/heketi/test_heketi_volume_operations.py
@@ -32,7 +32,7 @@ class TestHeketiVolumeOperations(BaseClass):
super(TestHeketiVolumeOperations, cls).setUpClass()
cls.volume_size = 1
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_with_default_options(self):
"""
Test to create volume with default options.
@@ -52,7 +52,7 @@ class TestHeketiVolumeOperations(BaseClass):
"Expected Size: %s, Actual Size: %s"
% (self.volume_size, vol_info['size'])))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_with_expand_volume(self):
"""
Test volume expand and size if updated correctly in heketi-cli info
@@ -86,7 +86,7 @@ class TestHeketiVolumeOperations(BaseClass):
"Size: %s" % (str(expected_size),
str(volume_info['size']))))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_volume_mount(self):
self.node = self.ocp_master_node[0]
try:
@@ -179,7 +179,7 @@ class TestHeketiVolumeOperations(BaseClass):
pids.append([g_node, pid])
return pids
- @pytest.mark.tier1
+ @pytest.mark.tier2
@podcmd.GlustoPod()
def test_heketi_volume_snapshot_create_with_one_brick_down(self):
"""
@@ -242,7 +242,7 @@ class TestHeketiVolumeOperations(BaseClass):
"Expecting Snapshot count before {} and after creation {} to be "
"same".format(snap_list_before, snap_list_after))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_heketi_volume_create_mutiple_sizes(self):
"""Validate creation of heketi volume with differnt sizes"""
sizes, required_space = [15, 50, 100], 495
@@ -259,7 +259,7 @@ class TestHeketiVolumeOperations(BaseClass):
vol_id = heketi_volume_create(h_node, h_url, size, json=True)['id']
self.addCleanup(heketi_volume_delete, h_node, h_url, vol_id)
- @pytest.mark.tier1
+ @pytest.mark.tier2
@podcmd.GlustoPod()
def test_heketi_volume_snapshot_delete(self):
"""Test heketi volume snapshot delete operation"""
diff --git a/tests/functional/heketi/test_heketi_zones.py b/tests/functional/heketi/test_heketi_zones.py
index a5e6fd3b..5c5d3df5 100644
--- a/tests/functional/heketi/test_heketi_zones.py
+++ b/tests/functional/heketi/test_heketi_zones.py
@@ -227,7 +227,7 @@ class TestHeketiZones(baseclass.BaseClass):
new_env_list = command.cmd_run(cmd_list_env, hostname=self.node)
self.assertIn(env, new_env_list, "Failed to set env {}".format(env))
- @pytest.mark.tier1
+ @pytest.mark.tier3
@ddt.data(
(1, "none"),
(2, "none"),
@@ -282,7 +282,7 @@ class TestHeketiZones(baseclass.BaseClass):
# Create app DC with the above PVC
self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
- @pytest.mark.tier1
+ @pytest.mark.tier3
@ddt.data(
(1, "none"),
(2, "none"),
@@ -341,7 +341,7 @@ class TestHeketiZones(baseclass.BaseClass):
# Create app DC with the above PVC
self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
- @pytest.mark.tier1
+ @pytest.mark.tier3
@ddt.data(
(3, "strict"),
(1, "none"),
@@ -379,7 +379,7 @@ class TestHeketiZones(baseclass.BaseClass):
# Create app DC with the above PVC
self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
- @pytest.mark.tier1
+ @pytest.mark.tier3
@ddt.data(
(3, "strict"),
(1, "none"),
@@ -419,7 +419,7 @@ class TestHeketiZones(baseclass.BaseClass):
# Create app DC with the above PVC
self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
- @pytest.mark.tier1
+ @pytest.mark.tier3
@ddt.data(3, 4)
def test_pvc_placement_with_zone_check_set_in_dc(self, zone_count):
heketi_zone_checking = "strict"
@@ -447,7 +447,7 @@ class TestHeketiZones(baseclass.BaseClass):
# Create app DC with the above PVC
self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
- @pytest.mark.tier1
+ @pytest.mark.tier3
@ddt.data(3, 4)
def test_check_arbiter_pvc_placement_zone_check_in_dc(self, zone_count):
heketi_zone_checking = "strict"
@@ -483,7 +483,7 @@ class TestHeketiZones(baseclass.BaseClass):
# Create app DC with the above PVC
self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
- @pytest.mark.tier1
+ @pytest.mark.tier3
@ddt.data(
(1, False),
(1, True),
@@ -526,7 +526,7 @@ class TestHeketiZones(baseclass.BaseClass):
# Create app DC with the above PVC
self.create_dc_with_pvc(pvc_name, timeout=120, wait_step=3)
- @pytest.mark.tier1
+ @pytest.mark.tier3
@ddt.data(
("strict", "strict"),
("none", "strict"),
@@ -626,7 +626,8 @@ class TestHeketiZones(baseclass.BaseClass):
# Create app dcs with I/O
for pvc_name in pvc_names:
app_dc = openshift_ops.oc_create_app_dc_with_io(
- self.node, pvc_name=pvc_name, dc_name_prefix=prefix)
+ self.node, pvc_name=pvc_name,
+ dc_name_prefix=prefix, image=self.io_container_image_cirros)
self.addCleanup(openshift_ops.oc_delete, self.node, 'dc', app_dc)
# Get pod names and label them
@@ -643,7 +644,7 @@ class TestHeketiZones(baseclass.BaseClass):
return app_pods
- @pytest.mark.tier1
+ @pytest.mark.tier3
@ddt.data(
(3, False),
(3, True),
@@ -733,7 +734,7 @@ class TestHeketiZones(baseclass.BaseClass):
openshift_ops.wait_for_pod_be_ready(
self.node, pod_name, timeout=5, wait_step=2)
- @pytest.mark.tier1
+ @pytest.mark.tier3
@ddt.data(
(3, False),
(3, True),
diff --git a/tests/functional/heketi/test_restart_heketi_pod.py b/tests/functional/heketi/test_restart_heketi_pod.py
index d88080c2..caab3407 100644
--- a/tests/functional/heketi/test_restart_heketi_pod.py
+++ b/tests/functional/heketi/test_restart_heketi_pod.py
@@ -7,8 +7,11 @@ except ImportError:
import json
import pytest
+import re
from openshiftstoragelibs.baseclass import BaseClass
+from openshiftstoragelibs.command import cmd_run
+from openshiftstoragelibs.exceptions import ExecutionError
from openshiftstoragelibs.heketi_ops import (
heketi_topology_info,
heketi_volume_create,
@@ -28,7 +31,25 @@ from openshiftstoragelibs.openshift_ops import (
class TestRestartHeketi(BaseClass):
- @pytest.mark.tier0
+ def _heketi_pod_delete_cleanup(self):
+ """Cleanup for deletion of heketi pod using force delete"""
+ try:
+ pod_name = get_pod_name_from_dc(
+ self.ocp_master_node[0], self.heketi_dc_name)
+
+ # Check if heketi pod name is ready state
+ wait_for_pod_be_ready(self.ocp_master_node[0], pod_name, timeout=1)
+ except ExecutionError:
+ # Force delete and wait for new pod to come up
+ oc_delete(self.ocp_master_node[0], 'pod', pod_name, is_force=True)
+ wait_for_resource_absence(self.ocp_master_node[0], 'pod', pod_name)
+
+ # Fetch heketi pod after force delete
+ pod_name = get_pod_name_from_dc(
+ self.ocp_master_node[0], self.heketi_dc_name)
+ wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)
+
+ @pytest.mark.tier1
def test_restart_heketi_pod(self):
"""Validate restarting heketi pod"""
@@ -81,7 +102,7 @@ class TestRestartHeketi(BaseClass):
heketi_volume_delete(
self.heketi_client_node, self.heketi_server_url, vol_info['id'])
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_set_heketi_vol_size_and_brick_amount_limits(self):
# Get Heketi secret name
cmd_get_heketi_secret_name = (
@@ -172,3 +193,39 @@ class TestRestartHeketi(BaseClass):
h_client, h_server, size=(brick_max_size_gb + 1), json=True)
self.addCleanup(heketi_volume_delete, h_client, h_server, vol_5['id'])
heketi_volume_expand(h_client, h_server, vol_5['id'], 2)
+
+ @pytest.mark.tier1
+ def test_heketi_logs_after_heketi_pod_restart(self):
+
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+ find_string_in_log = r"Started background pending operations cleaner"
+ ocp_node = self.ocp_master_node[0]
+
+ # Restart heketi pod
+ heketi_pod_name = get_pod_name_from_dc(ocp_node, self.heketi_dc_name)
+ oc_delete(
+ ocp_node, 'pod', heketi_pod_name,
+ collect_logs=self.heketi_logs_before_delete)
+ self.addCleanup(self._heketi_pod_delete_cleanup)
+ wait_for_resource_absence(ocp_node, 'pod', heketi_pod_name)
+ heketi_pod_name = get_pod_name_from_dc(ocp_node, self.heketi_dc_name)
+ wait_for_pod_be_ready(ocp_node, heketi_pod_name)
+ self.assertTrue(
+ hello_heketi(h_node, h_server),
+ "Heketi server {} is not alive".format(h_server))
+
+ # Collect logs after heketi pod restart
+ cmd = "oc logs {}".format(heketi_pod_name)
+ out = cmd_run(cmd, hostname=ocp_node)
+
+ # Validate string is present in heketi logs
+ pending_check = re.compile(find_string_in_log)
+ entry_list = pending_check.findall(out)
+ self.assertIsNotNone(
+ entry_list, "Failed to find entries in heketi logs")
+
+ for entry in entry_list:
+ self.assertEqual(
+ entry, find_string_in_log,
+ "Failed to validate, Expected {}; Actual {}". format(
+ find_string_in_log, entry))
diff --git a/tests/functional/heketi/test_server_state_examine_gluster.py b/tests/functional/heketi/test_server_state_examine_gluster.py
index bbba966b..f802c68c 100644
--- a/tests/functional/heketi/test_server_state_examine_gluster.py
+++ b/tests/functional/heketi/test_server_state_examine_gluster.py
@@ -27,7 +27,7 @@ class TestHeketiServerStateExamineGluster(BaseClass):
self.skipTest("heketi-client package %s does not support server "
"state examine gluster" % version.v_str)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_volume_inconsistencies(self):
# Examine Gluster cluster and Heketi that there is no inconsistencies
out = heketi_ops.heketi_examine_gluster(
@@ -59,7 +59,7 @@ class TestHeketiServerStateExamineGluster(BaseClass):
"heketi volume list matches with volume list of all nodes",
out['report'])
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data('', 'block')
def test_compare_real_vol_count_with_db_check_info(self, vol_type):
"""Validate file/block volumes using heketi db check."""
@@ -83,7 +83,7 @@ class TestHeketiServerStateExamineGluster(BaseClass):
"%svolume count doesn't match expected "
"result %s, actual result is %s" % (vol_type, count, vol_count))
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data('device_count', 'node_count', 'bricks_count')
def test_verify_db_check(self, count_type):
"""Validate the nodes, devices and bricks count in heketi db"""
@@ -130,7 +130,7 @@ class TestHeketiServerStateExamineGluster(BaseClass):
db_bricks_count, topology_bricks_count))
self.assertEqual(topology_bricks_count, db_bricks_count, msg)
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data('', 'block')
def test_compare_heketi_volumes(self, vol_type):
"""Validate file/block volume count using heketi gluster examine"""
@@ -166,7 +166,7 @@ class TestHeketiServerStateExamineGluster(BaseClass):
"gluster examine {} are not same".format(
vol_type, heketi_volumes, examine_volumes))
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_validate_report_after_node_poweroff(self):
"""Validate node report in heketi gluster examine after poweroff"""
# Skip test if not able to connect to Cloud Provider
@@ -188,7 +188,7 @@ class TestHeketiServerStateExamineGluster(BaseClass):
examine_msg, msg, "Failed to generate error report for node {} in"
" gluster examine output".format(g_node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_compare_brick_mount_status(self):
"""Compare the brick mount status from all nodes"""
diff --git a/tests/functional/heketi/test_volume_creation.py b/tests/functional/heketi/test_volume_creation.py
index 92b9dac3..0094c689 100644
--- a/tests/functional/heketi/test_volume_creation.py
+++ b/tests/functional/heketi/test_volume_creation.py
@@ -21,7 +21,7 @@ class TestVolumeCreationTestCases(BaseClass):
super(TestVolumeCreationTestCases, self).setUp()
self.node = self.ocp_master_node[0]
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_create_heketi_volume(self):
"""Test heketi volume creation and background gluster validation"""
@@ -94,7 +94,7 @@ class TestVolumeCreationTestCases(BaseClass):
[brick_name]["status"]), 1,
"Brick %s is not up" % brick_name)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_volume_creation_no_free_devices(self):
"""Validate heketi error is returned when no free devices available"""
node, server_url = self.heketi_client_node, self.heketi_server_url
@@ -158,7 +158,7 @@ class TestVolumeCreationTestCases(BaseClass):
vol_fail,
"Volume should have not been created. Out: %s" % vol_fail)
- @pytest.mark.tier1
+ @pytest.mark.tier2
@podcmd.GlustoPod()
def test_volume_create_replica_2(self):
"""Validate creation of a replica 2 volume"""
@@ -189,7 +189,7 @@ class TestVolumeCreationTestCases(BaseClass):
"Brick amount is expected to be 2. "
"Actual amount is '%s'" % brick_amount)
- @pytest.mark.tier1
+ @pytest.mark.tier2
@podcmd.GlustoPod()
def test_volume_create_snapshot_enabled(self):
"""Validate volume creation with snapshot enabled"""
@@ -249,7 +249,7 @@ class TestVolumeCreationTestCases(BaseClass):
g_vol_info = g_vol_info.get(file_vol)
return g_vol_info
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_volume_creation_of_size_greater_than_the_device_size(self):
"""Validate creation of a volume of size greater than the size of a
device.
@@ -386,7 +386,7 @@ class TestVolumeCreationTestCases(BaseClass):
% (vol_name, gluster_v_info['brickCount']))
self.assertFalse(int(gluster_v_info['brickCount']) % 3, msg)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_create_volume_with_same_name(self):
"""Test create two volumes with the same name and verify that 2nd one
is failing with the appropriate error.
@@ -419,7 +419,7 @@ class TestVolumeCreationTestCases(BaseClass):
'unexpectedly.' % (vol_info, vol_info_new))
self.assertFalse(vol_info_new, msg)
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_heketi_volume_provision_after_node_reboot(self):
"""Provision volume before and after node reboot"""
# Skip test if not able to connect to Cloud Provider
diff --git a/tests/functional/heketi/test_volume_deletion.py b/tests/functional/heketi/test_volume_deletion.py
index 6cb9018a..6d2e41e0 100644
--- a/tests/functional/heketi/test_volume_deletion.py
+++ b/tests/functional/heketi/test_volume_deletion.py
@@ -1,13 +1,23 @@
from __future__ import division
+from glusto.core import Glusto as g
+import mock
import pytest
-from openshiftstoragelibs.baseclass import BaseClass
-from openshiftstoragelibs.exceptions import ExecutionError
+from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import command
+from openshiftstoragelibs import exceptions
from openshiftstoragelibs import heketi_ops
+from openshiftstoragelibs import openshift_ops
+from openshiftstoragelibs import waiter
-class TestVolumeDeleteTestCases(BaseClass):
+HEKETI_COMMAND_TIMEOUT = g.config.get("common", {}).get(
+ "heketi_command_timeout", 120)
+TIMEOUT_PREFIX = "timeout %s " % HEKETI_COMMAND_TIMEOUT
+
+
+class TestVolumeDeleteTestCases(baseclass.BaseClass):
"""
Class for volume deletion related test cases
@@ -32,7 +42,26 @@ class TestVolumeDeleteTestCases(BaseClass):
return total_free_space
- @pytest.mark.tier0
+ def _heketi_pod_delete_cleanup(self, ocp_node):
+ """Cleanup for deletion of heketi pod using force delete"""
+ try:
+ pod_name = openshift_ops.get_pod_name_from_dc(
+ ocp_node, self.heketi_dc_name)
+
+ # Check if heketi pod name is ready state
+ openshift_ops.wait_for_pod_be_ready(ocp_node, pod_name, timeout=1)
+ except exceptions.ExecutionError:
+ # Force delete and wait for new pod to come up
+ openshift_ops.oc_delete(ocp_node, 'pod', pod_name, is_force=True)
+ openshift_ops.wait_for_resource_absence(
+ self.ocp_master_node[0], 'pod', pod_name)
+
+ # Fetch heketi pod after force delete
+ pod_name = openshift_ops.get_pod_name_from_dc(
+ ocp_node, self.heketi_dc_name)
+ openshift_ops.wait_for_pod_be_ready(ocp_node, pod_name)
+
+ @pytest.mark.tier1
def test_delete_heketi_volume(self):
"""
Method to test heketi volume deletion and whether it
@@ -60,7 +89,7 @@ class TestVolumeDeleteTestCases(BaseClass):
"Free space is not reclaimed after deletion "
"of %s" % volume_info["id"])
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_delete_heketidb_volume(self):
"""Method to test heketidb volume deletion via heketi-cli."""
for i in range(0, 2):
@@ -89,5 +118,81 @@ class TestVolumeDeleteTestCases(BaseClass):
heketi_ops.heketi_volume_delete,
self.heketi_client_node, self.heketi_server_url, volume_id)
return
- raise ExecutionError(
+ raise exceptions.ExecutionError(
"Warning: heketidbstorage doesn't exist in list of volumes")
+
+ @pytest.mark.tier2
+ def test_heketi_server_stale_operations_during_heketi_pod_reboot(self):
+ """
+ Validate failed/stale entries in db and performs a cleanup
+ of those entries
+ """
+ volume_id_list, async_obj, ocp_node = [], [], self.ocp_master_node[0]
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+ for i in range(0, 8):
+ volume_info = heketi_ops.heketi_volume_create(
+ h_node, h_server, 1, json=True)
+ volume_id_list.append(volume_info["id"])
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, h_node, h_server,
+ volume_info["id"], raise_on_error=False)
+
+ def run_async(cmd, hostname, raise_on_error=True):
+ async_op = g.run_async(host=hostname, command=cmd)
+ async_obj.append(async_op)
+ return async_op
+
+ # Temporary replace g.run with g.async_run in heketi_volume_delete
+ # to be able to run it in background.
+ for vol_id in volume_id_list:
+ with mock.patch.object(command, 'cmd_run', side_effect=run_async):
+ heketi_ops.heketi_volume_delete(h_node, h_server, vol_id)
+
+ # Restart heketi pod and check pod is running
+ heketi_pod_name = openshift_ops.get_pod_name_from_dc(
+ ocp_node, self.heketi_dc_name)
+ openshift_ops.oc_delete(
+ ocp_node, 'pod', heketi_pod_name,
+ collect_logs=self.heketi_logs_before_delete)
+ self.addCleanup(self._heketi_pod_delete_cleanup, ocp_node)
+ openshift_ops.wait_for_resource_absence(
+ ocp_node, 'pod', heketi_pod_name)
+ heketi_pod_name = openshift_ops.get_pod_name_from_dc(
+ ocp_node, self.heketi_dc_name)
+ openshift_ops.wait_for_pod_be_ready(ocp_node, heketi_pod_name)
+ self.assertTrue(
+ heketi_ops.hello_heketi(h_node, h_server),
+ "Heketi server {} is not alive".format(h_server))
+
+ # Wait for pending operations to get generate
+ for w in waiter.Waiter(timeout=30, interval=3):
+ h_db_check = heketi_ops.heketi_db_check(h_node, h_server)
+ h_db_check_vol = h_db_check.get("volumes")
+ h_db_check_bricks = h_db_check.get("bricks")
+ if ((h_db_check_vol.get("pending"))
+ and (h_db_check_bricks.get("pending"))):
+ break
+ if w.expired:
+ raise exceptions.ExecutionError(
+ "No any pending operations found during volumes deletion "
+ "volumes:{}, Bricks:{} ".format(
+ h_db_check_vol.get("pending"),
+ h_db_check_bricks.get("pending")))
+
+ # Verify pending bricks are multiples of 3
+ self.assertFalse(
+ h_db_check_bricks.get("pending") % 3,
+ "Expecting bricks pending count to be multiple of 3 but "
+ "found {}".format(h_db_check_bricks.get("pending")))
+
+ # Verify and Wait for pending operations to complete
+ for w in waiter.Waiter(timeout=120, interval=10):
+ h_db_check = heketi_ops.heketi_db_check(h_node, h_server)
+ h_db_check_vol = h_db_check.get("volumes")
+ h_db_check_bricks = h_db_check.get("bricks")
+ if ((not h_db_check_bricks.get("pending"))
+ and (not h_db_check_vol.get("pending"))):
+ break
+ if w.expired:
+ raise exceptions.AssertionError(
+ "Failed to delete volumes after 120 secs")
diff --git a/tests/functional/heketi/test_volume_expansion_and_devices.py b/tests/functional/heketi/test_volume_expansion_and_devices.py
index 5270c7c8..fa78b1aa 100644
--- a/tests/functional/heketi/test_volume_expansion_and_devices.py
+++ b/tests/functional/heketi/test_volume_expansion_and_devices.py
@@ -10,6 +10,7 @@ from openshiftstoragelibs import (
heketi_ops,
podcmd,
)
+from openshiftstoragelibs import utils
class TestVolumeExpansionAndDevicesTestCases(BaseClass):
@@ -162,7 +163,7 @@ class TestVolumeExpansionAndDevicesTestCases(BaseClass):
device_delete, False,
"Device %s could not be deleted" % device_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_volume_expansion_expanded_volume(self):
"""Validate volume expansion with brick and check rebalance"""
creation_info = heketi_ops.heketi_volume_create(
@@ -302,7 +303,7 @@ class TestVolumeExpansionAndDevicesTestCases(BaseClass):
"Free space not reclaimed after deletion of %s"
% volume_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_volume_expansion_no_free_space(self):
"""Validate volume expansion when there is no free space"""
@@ -388,7 +389,7 @@ class TestVolumeExpansionAndDevicesTestCases(BaseClass):
self.disable_devices(additional_devices_attached)
# Create volume and save info about it
- vol_size = int(smallest_size / (1024**2)) - 1
+ vol_size = int(smallest_size / (1024**2) * 0.98)
creation_info = heketi_ops.heketi_volume_create(
h_node, h_server_url, vol_size, json=True)
volume_name, volume_id = creation_info["name"], creation_info["id"]
@@ -437,7 +438,7 @@ class TestVolumeExpansionAndDevicesTestCases(BaseClass):
free_space_after_deletion, free_space_after_expansion,
"Free space not reclaimed after deletion of volume %s" % volume_id)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_volume_expansion_rebalance_brick(self):
"""Validate volume expansion with brick and check rebalance"""
@@ -521,3 +522,44 @@ class TestVolumeExpansionAndDevicesTestCases(BaseClass):
free_space_after_deletion > free_space_after_expansion,
"Free space is not reclaimed after volume deletion of %s"
% volume_id)
+
+ @pytest.mark.tier2
+ @podcmd.GlustoPod()
+ def test_replica_volume_expand(self):
+ """
+ Test expansion of a replica volume
+ """
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+ volume_name = (
+ "autotests-heketi-volume-{}".format(utils.get_random_str()))
+ volume_size = 10
+ creation_info = self.create_heketi_volume_with_name_and_wait(
+ volume_name, volume_size, json=True, raise_on_cleanup_error=False)
+ volume_id = creation_info["id"]
+ volume_info = heketi_ops.heketi_volume_info(
+ h_node, h_server, volume_id, json=True)
+
+ # Get gluster volume info
+ gluster_vol = volume_ops.get_volume_info(
+ 'auto_get_gluster_endpoint', volname=volume_name)
+ self.assertTrue(
+ gluster_vol, "Failed to get volume {} info".format(volume_name))
+ vol_name = gluster_vol[volume_name]
+ self.assertEqual(
+ vol_name['replicaCount'], "3",
+ "Replica count is different for volume {} Actual:{} "
+ "Expected : 3".format(vol_name, vol_name['replicaCount']))
+
+ expand_size = 5
+ heketi_ops.heketi_volume_expand(
+ h_node, h_server, volume_id, expand_size)
+ volume_info = heketi_ops.heketi_volume_info(
+ h_node, h_server, volume_id, json=True)
+ expected_size = volume_size + expand_size
+ self.assertEqual(
+ volume_info['size'], expected_size,
+ "Volume Expansion failed, Expected Size: {}, Actual "
+ "Size: {}".format(str(expected_size), str(volume_info['size'])))
+
+ self.get_brick_and_volume_status(volume_name)
+ self.get_rebalance_status(volume_name)
diff --git a/tests/functional/heketi/test_volume_multi_req.py b/tests/functional/heketi/test_volume_multi_req.py
index 3fdc4381..44ea803b 100644
--- a/tests/functional/heketi/test_volume_multi_req.py
+++ b/tests/functional/heketi/test_volume_multi_req.py
@@ -221,7 +221,7 @@ class TestVolumeMultiReq(BaseClass):
ocp_node = list(g.config['ocp_servers']['master'].keys())[0]
return len(_heketi_vols(ocp_node, self.heketi_server_url))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_simple_serial_vol_create(self):
"""Test that serially creating PVCs causes heketi to add volumes.
"""
@@ -282,7 +282,7 @@ class TestVolumeMultiReq(BaseClass):
self.assertIn(c2.heketiVolumeName, now_vols)
self.assertNotIn(c2.heketiVolumeName, orig_vols)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_multiple_vol_create(self):
"""Test creating two volumes via PVCs with no waiting between
the PVC requests.
@@ -336,7 +336,7 @@ class TestVolumeMultiReq(BaseClass):
# verify this volume in heketi
self.assertIn(c2.heketiVolumeName, now_vols)
- @pytest.mark.tier1
+ @pytest.mark.tier2
# NOTE(jjm): I've noticed that on the system I'm using (RHEL7).
# with count=8 things start to back up a bit.
# I needed to increase some timeouts to get this to pass.
@@ -385,7 +385,7 @@ class TestVolumeMultiReq(BaseClass):
c.update_pv_info(ocp_node)
self.assertIn(c.heketiVolumeName, now_vols)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_create_delete_volumes_concurrently(self):
"""Test creating volume when "other processes" are creating
and deleting other volumes in the background.
diff --git a/tests/functional/logging/__init__.py b/tests/functional/logging/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/functional/logging/__init__.py
diff --git a/tests/functional/logging/test_logging_validations.py b/tests/functional/logging/test_logging_validations.py
new file mode 100644
index 00000000..509c71d8
--- /dev/null
+++ b/tests/functional/logging/test_logging_validations.py
@@ -0,0 +1,396 @@
+from pkg_resources import parse_version
+
+import ddt
+from glusto.core import Glusto as g
+import pytest
+
+from openshiftstoragelibs.baseclass import GlusterBlockBaseClass
+from openshiftstoragelibs import command
+from openshiftstoragelibs import exceptions
+from openshiftstoragelibs import gluster_ops
+from openshiftstoragelibs import openshift_ops
+from openshiftstoragelibs import waiter
+
+
+@ddt.ddt
+class TestLoggingAndGlusterRegistryValidation(GlusterBlockBaseClass):
+
+ def setUp(self):
+ """Initialize all the variables necessary for test cases."""
+ super(TestLoggingAndGlusterRegistryValidation, self).setUp()
+
+ try:
+ logging_config = g.config['openshift']['logging']
+ self._logging_project_name = logging_config['logging_project_name']
+ self._logging_fluentd_ds = logging_config['logging_fluentd_ds']
+ self._logging_es_dc = logging_config['logging_es_dc']
+ self._logging_kibana_dc = logging_config['logging_kibana_dc']
+ self._registry_heketi_server_url = (
+ g.config['openshift']['registry_heketi_config'][
+ 'heketi_server_url'])
+ self._registry_project_name = (
+ g.config['openshift']['registry_project_name'])
+ self._registry_servers_info = g.config['gluster_registry_servers']
+ except KeyError as err:
+ msg = "Config file doesn't have key {}".format(err)
+ g.log.error(msg)
+ self.skipTest(msg)
+
+ # Skip the test if iscsi-initiator-utils version is not the expected
+ cmd = ("rpm -q iscsi-initiator-utils "
+ "--queryformat '%{version}-%{release}\n'"
+ "| cut -d '.' -f 1,2,3,4")
+ e_pkg_version = "6.2.0.874-17"
+ for g_server in self.gluster_servers:
+ out = self.cmd_run(cmd, g_server)
+ if parse_version(out) < parse_version(e_pkg_version):
+ msg = ("Skip test since isci initiator utils version actual: "
+ "{out} is less than expected: {ver} on node {server},"
+ " for more info refer to BZ-1624670"
+ .format(out=out, ver=e_pkg_version, server=g_server))
+ g.log.error(msg)
+ self.skipTest(msg)
+
+ self._master = self.ocp_master_node[0]
+ cmd = "oc project --short=true"
+ current_project = command.cmd_run(cmd, self._master)
+ openshift_ops.switch_oc_project(
+ self._master, self._logging_project_name)
+ self.addCleanup(
+ openshift_ops.switch_oc_project, self._master, current_project)
+
+ def _get_es_pod_and_verify_iscsi_sessions(self):
+ """Fetch es pod and verify iscsi sessions"""
+ pvc_custom = ":.spec.volumes[*].persistentVolumeClaim.claimName"
+
+ # Get the elasticsearch pod name nad PVC name
+ es_pod = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_es_dc)
+ pvc_name = openshift_ops.oc_get_custom_resource(
+ self._master, "pod", pvc_custom, es_pod)[0]
+
+ # Validate iscsi and multipath
+ self.verify_iscsi_sessions_and_multipath(
+ pvc_name, self._logging_es_dc,
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+ return es_pod, pvc_name
+
+ def _get_newly_deployed_gluster_pod(self, g_pod_list_before):
+ # Fetch pod after delete
+ g_pod_list_after = [
+ pod["pod_name"]
+ for pod in openshift_ops.get_ocp_gluster_pod_details(self._master)]
+
+ # Fetch the new gluster pod
+ g_new_pod = list(set(g_pod_list_after) - set(g_pod_list_before))
+ self.assertTrue(g_new_pod, "No new gluster pod deployed after delete")
+ return g_new_pod
+
+ def _guster_pod_delete_cleanup(self, g_pod_list_before):
+ """Cleanup for deletion of gluster pod using force delete"""
+ # Switch to gluster project
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+ try:
+ # Fetch gluster pod after delete
+ pod_name = self._get_newly_deployed_gluster_pod(g_pod_list_before)
+
+ # Check if pod name is empty i.e no new pod come up so use old pod
+ openshift_ops.wait_for_pod_be_ready(
+ self._master,
+ pod_name[0] if pod_name else g_pod_list_before[0], timeout=1)
+ except exceptions.ExecutionError:
+ # Force delete and wait for new pod to come up
+ openshift_ops.oc_delete(
+ self._master, 'pod', g_pod_list_before[0], is_force=True)
+ openshift_ops.wait_for_resource_absence(
+ self._master, 'pod', g_pod_list_before[0])
+
+ # Fetch gluster pod after force delete
+ g_new_pod = self._get_newly_deployed_gluster_pod(g_pod_list_before)
+ openshift_ops.wait_for_pod_be_ready(self._master, g_new_pod[0])
+
+ @pytest.mark.tier3
+ def test_validate_logging_pods_and_pvc(self):
+ """Validate logging pods and PVC"""
+
+ # Wait for kibana pod to be ready
+ kibana_pod = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_kibana_dc)
+ openshift_ops.wait_for_pod_be_ready(self._master, kibana_pod)
+
+ # Wait for fluentd pods to be ready
+ fluentd_custom = [":.status.desiredNumberScheduled",
+ ":.spec.template.metadata.labels"]
+ count_and_selector = openshift_ops.oc_get_custom_resource(
+ self._master, "ds", fluentd_custom, self._logging_fluentd_ds)
+ selector = count_and_selector[1][4:].replace(":", "=")
+ openshift_ops.wait_for_pods_be_ready(
+ self._master, int(count_and_selector[0]), selector)
+
+ # Wait for PVC to be bound and elasticsearch pod to be ready
+ es_pod = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_es_dc)
+ pvc_custom = ":.spec.volumes[*].persistentVolumeClaim.claimName"
+ pvc_name = openshift_ops.oc_get_custom_resource(
+ self._master, "pod", pvc_custom, es_pod)[0]
+ openshift_ops.verify_pvc_status_is_bound(self._master, pvc_name)
+ openshift_ops.wait_for_pod_be_ready(self._master, es_pod)
+
+ # Validate iscsi and multipath
+ self.verify_iscsi_sessions_and_multipath(
+ pvc_name, self._logging_es_dc,
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+
+ @pytest.mark.tier3
+ def test_logging_es_pod_pvc_all_freespace_utilization(self):
+ """Validate logging by utilizing all the free space of block PVC bound
+ to elsaticsearch pod"""
+
+ # Fetch pod and validate iscsi and multipath
+ es_pod, _ = self._get_es_pod_and_verify_iscsi_sessions()
+
+ # Get the available free space
+ mount_point = '/elasticsearch/persistent'
+ cmd_free_space = (
+ "df -kh {} | awk '{{print $4}}' | tail -1".format(mount_point))
+ old_available_space = openshift_ops.oc_rsh(
+ self._master, es_pod, cmd_free_space)[1]
+
+ # Fill the all the available space
+ file_name = '{}/file'.format(mount_point)
+ cmd_fill_space = (
+ "fallocate -l {} {}".format(old_available_space, file_name))
+ with self.assertRaises(AssertionError):
+ openshift_ops.oc_rsh(self._master, es_pod, cmd_fill_space)
+
+ # Cleanup the filled space
+ cmd_remove_file = 'rm {}'.format(file_name)
+ self.addCleanup(
+ openshift_ops.oc_rsh, self._master, es_pod, cmd_remove_file)
+
+ @pytest.mark.tier4
+ def test_resping_gluster_pod(self):
+ """Validate gluster pod restart with no disruption to elasticsearch pod
+ """
+ restart_custom = ":status.containerStatuses[0].restartCount"
+
+ # Fetch pod and validate iscsi and multipath
+ es_pod, _ = self._get_es_pod_and_verify_iscsi_sessions()
+
+ # Fetch the restart count for the es pod
+ restart_count_before = openshift_ops.oc_get_custom_resource(
+ self._master, "pod", restart_custom, es_pod)[0]
+
+ # Switch to gluster project
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+
+ # Fetch the gluster pod list before
+ g_pod_list_before = [
+ pod["pod_name"]
+ for pod in openshift_ops.get_ocp_gluster_pod_details(self._master)]
+
+ # Respin a gluster pod
+ openshift_ops.oc_delete(self._master, "pod", g_pod_list_before[0])
+ self.addCleanup(self._guster_pod_delete_cleanup, g_pod_list_before)
+
+ # Wait for pod to get absent
+ openshift_ops.wait_for_resource_absence(
+ self._master, "pod", g_pod_list_before[0])
+
+ # Fetch gluster pod after delete
+ g_new_pod = self._get_newly_deployed_gluster_pod(g_pod_list_before)
+ openshift_ops.wait_for_pod_be_ready(self._master, g_new_pod[0])
+
+ # Switch to logging project
+ openshift_ops.switch_oc_project(
+ self._master, self._logging_project_name)
+
+ # Fetch the restart count for the es pod
+ restart_count_after = openshift_ops.oc_get_custom_resource(
+ self._master, "pod", restart_custom, es_pod)[0]
+ self.assertEqual(
+ restart_count_before, restart_count_after,
+ "Failed disruption to es pod found expecting restart count before"
+ " {} and after {} for es pod to be equal after gluster pod"
+ " respin".format(restart_count_before, restart_count_after))
+
+ @pytest.mark.tier4
+ def test_kill_bhv_fsd_while_es_pod_running(self):
+ """Validate killing of bhv fsd won't effect es pod io's"""
+
+ # Fetch pod and PVC names and validate iscsi and multipath
+ es_pod, pvc_name = self._get_es_pod_and_verify_iscsi_sessions()
+
+ # Get the bhv name
+ gluster_node = list(self._registry_servers_info.keys())[0]
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+ bhv_name = self.get_block_hosting_volume_by_pvc_name(
+ pvc_name, heketi_server_url=self._registry_heketi_server_url,
+ gluster_node=gluster_node)
+
+ # Get one of the bricks pid of the bhv
+ gluster_volume_status = gluster_ops.get_gluster_vol_status(bhv_name)
+ pid = None
+ for g_node, g_node_data in gluster_volume_status.items():
+ if g_node != gluster_node:
+ continue
+ for process_name, process_data in g_node_data.items():
+ if not process_name.startswith("/var"):
+ continue
+ pid = process_data["pid"]
+ # When birck is down, pid of the brick is returned as -1.
+ # Which is unexepeted situation. So, add appropriate assertion.
+ self.assertNotEqual(
+ pid, "-1", "Got unexpected PID (-1) for '{}' gluster vol "
+ "on '{}' node.".format(bhv_name, gluster_node))
+ break
+ self.assertTrue(
+ pid, "Could not find 'pid' in Gluster vol data for '{}' "
+ "Gluster node. Data: {}".format(
+ gluster_node, gluster_volume_status))
+ break
+
+ # Kill gluster vol brick process using found pid
+ cmd_kill = "kill -9 {}".format(pid)
+ cmd_start_vol = "gluster v start {} force".format(bhv_name)
+ openshift_ops.cmd_run_on_gluster_pod_or_node(
+ self._master, cmd_kill, gluster_node)
+ self.addCleanup(openshift_ops.cmd_run_on_gluster_pod_or_node,
+ self._master, cmd_start_vol, gluster_node)
+ self.addCleanup(openshift_ops.switch_oc_project,
+ self._master, self._registry_project_name)
+
+ # Run I/O on ES pod
+ openshift_ops.switch_oc_project(
+ self._master, self._logging_project_name)
+ file_name = '/elasticsearch/persistent/file1'
+ cmd_run_io = 'dd if=/dev/urandom of={} bs=4k count=10000'.format(
+ file_name)
+ cmd_remove_file = 'rm {}'.format(file_name)
+ openshift_ops.oc_rsh(self._master, es_pod, cmd_run_io)
+ self.addCleanup(
+ openshift_ops.oc_rsh, self._master, es_pod, cmd_remove_file)
+
+ def _delete_and_wait_for_new_es_pod_to_come_up(self):
+
+ # Force delete and wait for es pod to come up
+ openshift_ops.switch_oc_project(
+ self._master, self._logging_project_name)
+ pod_name = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_es_dc)
+ openshift_ops.oc_delete(self._master, 'pod', pod_name, is_force=True)
+ openshift_ops.wait_for_resource_absence(self._master, 'pod', pod_name)
+ new_pod_name = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_es_dc)
+ openshift_ops.wait_for_pod_be_ready(
+ self._master, new_pod_name, timeout=1800)
+
+ @pytest.mark.tier2
+ @ddt.data('delete', 'drain')
+ def test_respin_es_pod(self, motive):
+ """Validate respin of elastic search pod"""
+
+ # Get the pod name and PVC name
+ es_pod = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_es_dc)
+ pvc_custom = ":.spec.volumes[*].persistentVolumeClaim.claimName"
+ pvc_name = openshift_ops.oc_get_custom_resource(
+ self._master, "pod", pvc_custom, es_pod)[0]
+
+ # Validate iscsi and multipath
+ _, _, node = self.verify_iscsi_sessions_and_multipath(
+ pvc_name, self._logging_es_dc,
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+ if motive == 'delete':
+
+ # Delete the es pod
+ self.addCleanup(self._delete_and_wait_for_new_es_pod_to_come_up)
+ openshift_ops.oc_delete(self._master, "pod", es_pod)
+ elif motive == 'drain':
+
+ # Get the number of infra nodes
+ infra_node_count_cmd = (
+ 'oc get nodes '
+ '--no-headers -l node-role.kubernetes.io/infra=true|wc -l')
+ infra_node_count = command.cmd_run(
+ infra_node_count_cmd, self._master)
+
+ # Skip test case if number infra nodes are less than #2
+ if int(infra_node_count) < 2:
+ self.skipTest('Available number of infra nodes "{}", it should'
+ ' be more than 1'.format(infra_node_count))
+
+ # Cleanup to make node schedulable
+ cmd_schedule = (
+ 'oc adm manage-node {} --schedulable=true'.format(node))
+ self.addCleanup(
+ command.cmd_run, cmd_schedule, hostname=self._master)
+
+ # Drain the node
+ drain_cmd = ('oc adm drain {} --force=true --ignore-daemonsets '
+ '--delete-local-data'.format(node))
+ command.cmd_run(drain_cmd, hostname=self._master)
+
+ # Wait for pod to get absent
+ openshift_ops.wait_for_resource_absence(self._master, "pod", es_pod)
+
+ # Wait for new pod to come up
+ try:
+ pod_name = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_es_dc)
+ openshift_ops.wait_for_pod_be_ready(self._master, pod_name)
+ except exceptions.ExecutionError:
+ self._delete_and_wait_for_new_es_pod_to_come_up()
+
+ # Validate iscsi and multipath
+ self.verify_iscsi_sessions_and_multipath(
+ pvc_name, self._logging_es_dc,
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+
+ @pytest.mark.tier3
+ def test_run_workload_with_logging(self):
+ """Validate logs are being generated aifter running workload"""
+
+ # Get the size of used space of logs
+ es_pod = openshift_ops.get_pod_name_from_dc(
+ self._master, self._logging_es_dc)
+ mount_point = "/elasticsearch/persistent"
+ cmd_space_check = ('df -kh --output=used {} | sed "/Used/d" |'
+ 'sed "s/G//"'.format(mount_point))
+ ret, initial_used_percent, err = openshift_ops.oc_rsh(
+ self._master, es_pod, cmd_space_check)
+ err_msg = "Failed to fetch the size of used space, error {}"
+ self.assertFalse(ret, err_msg.format(err))
+
+ # Create 20 pvcs and app pods with io
+ openshift_ops.switch_oc_project(
+ self._master, self.storage_project_name)
+ pvc_count, batch_count = 5, 4
+ for _ in range(batch_count):
+ pvcs = self.create_and_wait_for_pvcs(pvc_amount=pvc_count)
+ self.create_dcs_with_pvc(pvcs)
+ self.addCleanup(
+ openshift_ops.switch_oc_project,
+ self._master, self.storage_project_name)
+
+ # Get and verify the final used size of used space of logs
+ openshift_ops.switch_oc_project(
+ self._master, self._logging_project_name)
+ for w in waiter.Waiter(600, 30):
+ ret, final_used_percent, err = openshift_ops.oc_rsh(
+ self._master, es_pod, cmd_space_check)
+ self.assertFalse(ret, err_msg.format(err))
+ if int(initial_used_percent) < int(final_used_percent):
+ break
+ if w.expired:
+ raise AssertionError(
+ "Initial used space {} for logs is not less than final "
+ "used space {}".format(
+ initial_used_percent, final_used_percent))
diff --git a/tests/functional/metrics/test_metrics_validation.py b/tests/functional/metrics/test_metrics_validation.py
index 12e3b90d..e16fe349 100644
--- a/tests/functional/metrics/test_metrics_validation.py
+++ b/tests/functional/metrics/test_metrics_validation.py
@@ -27,6 +27,7 @@ from openshiftstoragelibs.openshift_storage_libs import (
get_iscsi_block_devices_by_path,
get_mpath_name_from_device_name,
)
+from openshiftstoragelibs import waiter
@ddt.ddt
@@ -114,7 +115,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass):
is_registry_gluster=True)
return hawkular_cassandra, pvc_name, iqn, hacount, node
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_verify_metrics_data_during_gluster_pod_respin(self):
# Add check for CRS version
switch_oc_project(self.master, self.registry_project_name)
@@ -180,7 +181,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass):
if raise_on_error:
raise err
- @pytest.mark.tier2
+ @pytest.mark.tier4
@ddt.data('delete', 'drain')
def test_metrics_during_cassandra_pod_respin(self, motive='delete'):
"""Validate cassandra pod respin"""
@@ -257,7 +258,7 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass):
self.addCleanup(
oc_rsh, self.master, hawkular_cassandra, cmd_remove_file)
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_metrics_cassandra_pod_with_bhv_brick_process_down(self):
"""Validate metrics during restart of brick process of bhv"""
@@ -274,3 +275,41 @@ class TestMetricsAndGlusterRegistryValidation(GlusterBlockBaseClass):
restart_gluster_vol_brick_processes(
self.master, bhv_name, list(self.registry_servers_info.keys()))
self.addCleanup(self.cassandra_pod_delete_cleanup, raise_on_error=True)
+
+ @pytest.mark.tier3
+ def test_run_workload_with_metrics(self):
+ """Validate if logs are being generated after running workload"""
+
+ # Get the size of used space of logs
+ cassandra_pod = get_pod_name_from_rc(
+ self.master, self.metrics_rc_hawkular_cassandra)
+ mount_point = "/cassandra_data"
+ cmd_space_check = ('df -k --output=used {} | sed "/Used/d" |'
+ 'sed "s/G//"'.format(mount_point))
+ ret, initial_used_percent, err = oc_rsh(
+ self.master, cassandra_pod, cmd_space_check)
+ err_msg = "Failed to fetch the size of used space, error {}"
+ self.assertFalse(ret, err_msg.format(err))
+
+ # Create 20 PVCs and app pods with IO
+ switch_oc_project(self.master, self.storage_project_name)
+ pvc_count, batch_count = 5, 4
+ for _ in range(batch_count):
+ pvcs = self.create_and_wait_for_pvcs(pvc_amount=pvc_count)
+ self.create_dcs_with_pvc(pvcs)
+ self.addCleanup(
+ switch_oc_project, self.master, self.storage_project_name)
+
+ # Get and verify the final size of used space of logs
+ switch_oc_project(self.master, self.metrics_project_name)
+ for w in waiter.Waiter(600, 30):
+ ret, final_used_percent, err = oc_rsh(
+ self.master, cassandra_pod, cmd_space_check)
+ self.assertFalse(ret, err_msg.format(err))
+ if int(initial_used_percent) < int(final_used_percent):
+ break
+ if w.expired:
+ raise AssertionError(
+ "Initial used space {} for logs is not less than final "
+ "used space {}".format(
+ initial_used_percent, final_used_percent))
diff --git a/tests/functional/prometheous/__init__.py b/tests/functional/prometheous/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/functional/prometheous/__init__.py
diff --git a/tests/functional/prometheous/test_prometheus_validations.py b/tests/functional/prometheous/test_prometheus_validations.py
new file mode 100644
index 00000000..68b69212
--- /dev/null
+++ b/tests/functional/prometheous/test_prometheus_validations.py
@@ -0,0 +1,976 @@
+try:
+ # py2/3
+ import simplejson as json
+except ImportError:
+ # py2
+ import json
+from pkg_resources import parse_version
+from functools import reduce
+
+import ddt
+from glusto.core import Glusto as g
+from glustolibs.gluster import brick_libs
+from glustolibs.gluster import volume_ops
+import pytest
+
+from openshiftstoragelibs.baseclass import GlusterBlockBaseClass
+from openshiftstoragelibs import command
+from openshiftstoragelibs import exceptions
+from openshiftstoragelibs import heketi_ops
+from openshiftstoragelibs import gluster_ops
+from openshiftstoragelibs import node_ops
+from openshiftstoragelibs import openshift_ops
+from openshiftstoragelibs import openshift_storage_libs
+from openshiftstoragelibs import podcmd
+from openshiftstoragelibs import waiter
+
+
+@ddt.ddt
+class TestPrometheusAndGlusterRegistryValidation(GlusterBlockBaseClass):
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestPrometheusAndGlusterRegistryValidation, cls).setUpClass()
+
+ cls.metrics = ('heketi_volumes_count',
+ 'heketi_block_volumes_count',
+ 'heketi_device_brick_count',
+ 'heketi_device_free_bytes',
+ 'heketi_nodes_count',
+ 'heketi_device_used_bytes',
+ 'heketi_device_size_bytes')
+
+ def setUp(self):
+ """Initialize all the variables which are necessary for test cases"""
+ super(TestPrometheusAndGlusterRegistryValidation, self).setUp()
+
+ try:
+ prometheus_config = g.config['openshift']['prometheus']
+ self._prometheus_project_name = prometheus_config[
+ 'prometheus_project_name']
+ self._prometheus_resources_selector = prometheus_config[
+ 'prometheus_resources_selector']
+ self._alertmanager_resources_selector = prometheus_config[
+ 'alertmanager_resources_selector']
+ self._registry_heketi_server_url = (
+ g.config['openshift']['registry_heketi_config'][
+ 'heketi_server_url'])
+ self._registry_project_name = (
+ g.config['openshift']['registry_project_name'])
+ self._registry_servers_info = (
+ g.config['gluster_registry_servers'])
+ except KeyError as err:
+ self.skipTest("Config file doesn't have key {}".format(err))
+
+ # Skip the test if iscsi-initiator-utils version is not the expected
+ cmd = ("rpm -q iscsi-initiator-utils "
+ "--queryformat '%{version}-%{release}\n'"
+ "| cut -d '.' -f 1,2,3,4")
+ e_pkg_version = "6.2.0.874-17"
+ for g_server in self.gluster_servers:
+ out = self.cmd_run(cmd, g_server)
+ if parse_version(out) < parse_version(e_pkg_version):
+ self.skipTest(
+ "Skip the test as iscsi-initiator-utils package version {}"
+ "is less than version {} found on the node {}, for more "
+ "info refer to BZ-1624670".format(
+ out, e_pkg_version, g_server))
+
+ self._master = self.ocp_master_node[0]
+
+ # Switch to namespace conatining prometheus pods
+ cmd = "oc project --short=true"
+ current_project = command.cmd_run(cmd, self._master)
+ openshift_ops.switch_oc_project(
+ self._master, self._prometheus_project_name)
+ self.addCleanup(
+ openshift_ops.switch_oc_project, self._master, current_project)
+
+ def _fetch_metric_from_promtheus_pod(self, metric):
+ """Fetch metric from prometheus pod using api call"""
+ prometheus_pods = list(openshift_ops.oc_get_pods(
+ self._master, selector=self._prometheus_resources_selector).keys())
+ fetch_metric_cmd = ("curl 'http://localhost:9090/api/v1/query"
+ "?query={}'".format(metric))
+ ret, metric_data, _ = openshift_ops.oc_rsh(
+ self._master, prometheus_pods[0], fetch_metric_cmd)
+ metric_result = json.loads(metric_data)["data"]["result"]
+ if (not metric_result) or ret:
+ raise exceptions.ExecutionError(
+ "Failed to fecth data for metric {}, output {}".format(
+ metric, metric_result))
+ return metric_result
+
+ def _get_pod_names_and_pvc_names(self):
+ # Get pod names and PVC names
+ pod_custom = ".:metadata.name"
+ pvc_custom = ":.spec.volumes[*].persistentVolumeClaim.claimName"
+ pvc_names, pod_names = [], []
+ for selector in (self._prometheus_resources_selector,
+ self._alertmanager_resources_selector):
+ pods = openshift_ops.oc_get_custom_resource(
+ self._master, "pod", pod_custom, selector=selector)
+ pod_names.extend(pods)
+ for pod_name in pods:
+ pvc_name = openshift_ops.oc_get_custom_resource(
+ self._master, "pod", pvc_custom, pod_name[0])[0]
+ pvc_names.append(pvc_name)
+
+ return pod_names, pvc_names
+
+ @podcmd.GlustoPod()
+ def _guster_volume_cleanup(self, vol_name):
+ # Check brick status. Restart vol if bricks are offline
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+ brick_list = brick_libs.get_all_bricks(
+ "auto_get_gluster_endpoint", vol_name)
+ self.assertIsNotNone(brick_list, "Failed to get brick list")
+ check_bricks = brick_libs.are_bricks_online(
+ "auto_get_gluster_endpoint", vol_name, brick_list)
+ if not check_bricks:
+ start_vol, _, _ = volume_ops.volume_start(
+ "auto_get_gluster_endpoint", vol_name, force=True)
+ self.assertFalse(
+ start_vol, "Failed to start volume using force")
+
+ def _get_newly_deployed_gluster_pod(self, g_pod_list_before):
+
+ # Fetch pod after delete
+ g_pod_list_after = [
+ pod["pod_name"]
+ for pod in openshift_ops.get_ocp_gluster_pod_details(self._master)]
+
+ # Fetch the new gluster pod
+ g_new_pod = list(set(g_pod_list_after) - set(g_pod_list_before))
+ self.assertTrue(g_new_pod, "No new gluster pod deployed after delete")
+ return g_new_pod
+
+ def _guster_pod_delete(self, g_pod_list_before):
+ """Delete the gluster pod using force delete"""
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+
+ # Fetch newly deployed gluster pod after delete
+ try:
+ pod_name = self._get_newly_deployed_gluster_pod(g_pod_list_before)
+ openshift_ops.wait_for_pod_be_ready(
+ self._master,
+ pod_name[0] if pod_name else g_pod_list_before[0],
+ timeout=120, wait_step=6)
+ except exceptions.ExecutionError:
+ openshift_ops.oc_delete(
+ self._master, 'pod', g_pod_list_before[0], is_force=True)
+ openshift_ops.wait_for_resource_absence(
+ self._master, 'pod', g_pod_list_before[0])
+ g_new_pod = self._get_newly_deployed_gluster_pod(g_pod_list_before)
+ openshift_ops.wait_for_pod_be_ready(self._master, g_new_pod[0])
+
+ def _wait_for_gluster_pod_be_ready(self, g_pod_list_before):
+ """Wait for the gluster pods to be in ready state"""
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+
+ # Check if the gluster pods are in ready state
+ try:
+ pod_count = len(self._registry_servers_info.keys())
+ openshift_ops.wait_for_pods_be_ready(
+ self._master, pod_count, "glusterfs-node=pod",
+ timeout=120, wait_step=6)
+ except exceptions.ExecutionError:
+ self._guster_pod_delete(g_pod_list_before)
+
+ @pytest.mark.tier2
+ def test_promethoues_pods_and_pvcs(self):
+ """Validate prometheus pods and PVC"""
+ # Wait for PVCs to be bound
+ pod_names, pvc_names = self._get_pod_names_and_pvc_names()
+ openshift_ops.wait_for_pvcs_be_bound(self._master, pvc_names)
+
+ # Validate that there should be no or zero pods in non-running state
+ field_selector, pod_count = "status.phase!=Running", 0
+ openshift_ops.wait_for_pods_be_ready(
+ self._master, pod_count, field_selector=field_selector)
+
+ # Validate iscsi and multipath
+ for (pvc_name, pod_name) in zip(pvc_names, pod_names):
+ self.verify_iscsi_sessions_and_multipath(
+ pvc_name, pod_name[0], rtype='pod',
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+
+ # Try to fetch metric from prometheus pod
+ self._fetch_metric_from_promtheus_pod(metric='kube_node_info')
+
+ @ddt.data('delete', 'drain')
+ @pytest.mark.tier2
+ def test_respin_prometheus_pod(self, motive="delete"):
+ """Validate respin of prometheus pod"""
+ if motive == 'drain':
+
+ # Get the number of infra nodes
+ infra_node_count_cmd = (
+ 'oc get nodes '
+ '--no-headers -l node-role.kubernetes.io/infra=true|wc -l')
+ infra_node_count = command.cmd_run(
+ infra_node_count_cmd, self._master)
+
+ # Skip test case if number infra nodes are less than #2
+ if int(infra_node_count) < 2:
+ self.skipTest('Available number of infra nodes "{}", it should'
+ ' be more than 1'.format(infra_node_count))
+
+ # Get PVC names and pod names
+ pod_names, pvc_names = self._get_pod_names_and_pvc_names()
+
+ # Validate iscsi and multipath
+ for (pvc_name, pod_name) in zip(pvc_names, pod_names):
+ _, _, node = self.verify_iscsi_sessions_and_multipath(
+ pvc_name, pod_name[0], rtype='pod',
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+
+ # Delete the prometheus pods
+ if motive == 'delete':
+ for pod_name in pod_names:
+ openshift_ops.oc_delete(self._master, 'pod', pod_name[0])
+
+ # Drain the node
+ elif motive == 'drain':
+ drain_cmd = ('oc adm drain {} --force=true --ignore-daemonsets '
+ '--delete-local-data'.format(node))
+ command.cmd_run(drain_cmd, hostname=self._master)
+
+ # Cleanup to make node schedulable
+ cmd_schedule = (
+ 'oc adm manage-node {} --schedulable=true'.format(node))
+ self.addCleanup(
+ command.cmd_run, cmd_schedule, hostname=self._master)
+
+ # Validate that there should be no or zero pods in non-running state
+ field_selector, pod_count = "status.phase!=Running", 0
+ openshift_ops.wait_for_pods_be_ready(
+ self._master, pod_count, field_selector=field_selector)
+
+ # Validate iscsi and multipath
+ for (pvc_name, pod_name) in zip(pvc_names, pod_names):
+ self.verify_iscsi_sessions_and_multipath(
+ pvc_name, pod_name[0], rtype='pod',
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+
+ # Try to fetch metric from prometheus pod
+ self._fetch_metric_from_promtheus_pod(metric='kube_node_info')
+
+ @pytest.mark.tier2
+ def test_heketi_and_prometheus_device_count(self):
+ """Check if device count is same in heketi and promtheus"""
+
+ cluster_ids_metrics, cluster_ids_promtheus = [], []
+ hostnames_metrics, hostnames_promtheus = [], []
+ total_value_metrics, total_value_promtheus = 0, 0
+
+ metrics = heketi_ops.get_heketi_metrics(
+ self.heketi_client_node, self.heketi_server_url)
+ heketi_device_count_metric = metrics.get('heketi_device_count')
+ for result in heketi_device_count_metric:
+ cluster_ids_metrics.append(result.get('cluster'))
+ hostnames_metrics.append(result.get('hostname'))
+ total_value_metrics += int(result.get('value'))
+
+ metric_result = self._fetch_metric_from_promtheus_pod(
+ metric='heketi_device_count')
+ for result in metric_result:
+ total_value_promtheus += int(result.get('value')[1])
+ cluster_ids_promtheus.append(result.get('metric')['cluster'])
+ hostnames_promtheus.append(result.get('metric')['hostname'])
+
+ self.assertEqual(cluster_ids_metrics, cluster_ids_promtheus,
+ "Cluster ID's are not same")
+ self.assertEqual(hostnames_metrics, hostnames_promtheus,
+ "Hostnames are not same")
+ self.assertEqual(total_value_metrics, total_value_promtheus,
+ "Total device counts are not same")
+
+ def _get_and_manipulate_metric_data(self, metrics):
+ """Create a dict of metric names and total values"""
+ metric_data = dict()
+ for metric in metrics:
+ out = self._fetch_metric_from_promtheus_pod(metric)
+ total_value = 0
+ for matric_result in out:
+ total_value += int(matric_result["value"][1])
+ metric_data[out[0]["metric"]["__name__"]] = total_value
+ return metric_data
+
+ @pytest.mark.tier2
+ @ddt.data('creation', 'expansion')
+ def test_promethoues_validation_while_creation_or_expansion(self, motive):
+ """Validate mertics data after volume creation or expansion"""
+
+ # Define the variables to perform validations
+ metrics = ['heketi_device_size_bytes', 'heketi_device_free_bytes',
+ 'heketi_device_used_bytes', 'heketi_device_brick_count']
+ h_client, h_server = self.heketi_client_node, self.heketi_server_url
+ vol_size = 1
+
+ # Collect the metrics data from prometheus pod
+ if motive == 'creation':
+ initial_result = self._get_and_manipulate_metric_data(metrics)
+
+ # Create a volume
+ volume_id = heketi_ops.heketi_volume_create(
+ h_client, h_server, vol_size, json=True)["bricks"][0]["volume"]
+ self.addCleanup(
+ heketi_ops.heketi_volume_delete, h_client, h_server, volume_id)
+
+ # Expand the volume
+ if motive == 'expansion':
+ initial_result = self._get_and_manipulate_metric_data(metrics)
+ heketi_ops.heketi_volume_expand(
+ h_client, h_server, volume_id, vol_size)
+
+ # Fetch the latest metrics data form prometheus pod
+ final_result = self._get_and_manipulate_metric_data(metrics)
+
+ # Validate the data variation
+ for metric in metrics:
+ msg = (
+ "intial {} and final value {} of metric '{} should be".format(
+ initial_result[metric], final_result[metric], metric))
+ if metric == 'heketi_device_size_bytes':
+ self.assertEqual(initial_result[metric], final_result[metric],
+ msg + " same")
+ if metric == 'heketi_device_free_bytes':
+ self.assertGreater(initial_result[metric],
+ final_result[metric], msg + " differnt")
+ if metric == ('heketi_device_used_bytes'
+ or 'heketi_device_brick_count'):
+ self.assertLess(
+ initial_result[metric], final_result[metric],
+ msg + " differnt")
+
+ @ddt.data('add', 'delete')
+ @pytest.mark.tier3
+ def test_heketi_prometheus_device_count_after_operation(self, operation):
+ """Do operation and validate device count in heketi and prometheus"""
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+
+ # Get list of additional devices for one of the Gluster nodes
+ gluster_server_0 = list(self.gluster_servers_info.values())[0]
+ manage_hostname = gluster_server_0.get("manage")
+ self.assertTrue(
+ manage_hostname, "IP Address is not specified for "
+ "node {}".format(gluster_server_0))
+ device_name = gluster_server_0.get("additional_devices")[0]
+ self.assertTrue(
+ device_name, "Additional devices are not specified for "
+ "node {}".format(gluster_server_0))
+
+ # Get node ID of the Gluster hostname
+ node_list = heketi_ops.heketi_topology_info(
+ h_node, h_server, json=True).get("clusters")[0].get("nodes")
+ self.assertTrue(
+ node_list, "Cluster info command returned empty list of nodes")
+ node_id = None
+ for node in node_list:
+ if manage_hostname == node.get("hostnames").get("manage")[0]:
+ node_id = node.get("id")
+ break
+ self.assertTrue(
+ node_id, "Failed to get node_id for {}".format(manage_hostname))
+
+ # Adding heketi device
+ heketi_ops.heketi_device_add(h_node, h_server, device_name, node_id)
+ node_info_after_addition = heketi_ops.heketi_node_info(
+ h_node, h_server, node_id, json=True)
+ device_id, bricks = None, None
+ for device in node_info_after_addition.get("devices"):
+ if device.get("name") == device_name:
+ device_id, bricks = (
+ device.get("id"), len(device.get("bricks")))
+ break
+ self.addCleanup(
+ heketi_ops.heketi_device_delete, h_node, h_server, device_id,
+ raise_on_error=False)
+ self.addCleanup(
+ heketi_ops.heketi_device_remove, h_node, h_server, device_id,
+ raise_on_error=False)
+ self.addCleanup(
+ heketi_ops.heketi_device_disable, h_node, h_server, device_id,
+ raise_on_error=False)
+
+ if operation == "delete":
+ # Disable,Remove and Delete heketi device
+ heketi_ops.heketi_device_disable(h_node, h_server, device_id)
+ heketi_ops.heketi_device_remove(h_node, h_server, device_id)
+ heketi_ops.heketi_device_delete(h_node, h_server, device_id)
+ # Verify zero bricks on the deleted device and device deletion
+ msg = (
+ "Number of bricks on the device {} of the nodes should be"
+ "zero".format(device_name))
+ self.assertFalse(bricks, msg)
+ node_info_after_deletion = (
+ heketi_ops.heketi_node_info(h_node, h_server, node_id))
+ msg = ("Device {} should not be shown in node info of the node {}"
+ "after the device deletion".format(device_id, node_id))
+ self.assertNotIn(device_id, node_info_after_deletion, msg)
+
+ # Validate heketi and prometheus device count
+ for w in waiter.Waiter(timeout=60, interval=10):
+ total_value_prometheus, total_value_metrics = 0, 0
+ openshift_ops.switch_oc_project(
+ self.ocp_master_node[0], 'openshift-monitoring')
+ metric_result = self._fetch_metric_from_promtheus_pod(
+ metric='heketi_device_count')
+ for result in metric_result:
+ total_value_prometheus += int(result.get('value')[1])
+ openshift_ops.switch_oc_project(
+ self.ocp_master_node[0], 'glusterfs')
+ metrics = heketi_ops.get_heketi_metrics(h_node, h_server)
+ heketi_device_count_metric = metrics.get('heketi_device_count')
+ for result in heketi_device_count_metric:
+ total_value_metrics += int(result.get('value'))
+
+ if total_value_prometheus == total_value_metrics:
+ break
+ if w.expired:
+ raise exceptions.ExecutionError(
+ "Failed to update device details in prometheus")
+
+ @ddt.data('usedbytes', 'brickcount')
+ @pytest.mark.tier3
+ def test_heketi_prometheus_usedbytes_brickcount_on_device_delete(
+ self, operation):
+ """Validate used bytes,device count on heketi and prometheus"""
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+
+ # Get list of additional devices for one of the Gluster nodes
+ gluster_server_0 = list(self.gluster_servers_info.values())[0]
+ manage_hostname = gluster_server_0.get("manage")
+ self.assertTrue(
+ manage_hostname, "IP Address is not specified for "
+ "node {}".format(gluster_server_0))
+ device_name = gluster_server_0.get("additional_devices")[0]
+ self.assertTrue(
+ device_name, "Additional devices are not specified for "
+ "node {}".format(gluster_server_0))
+
+ # Get node ID of the Gluster hostname
+ node_list = heketi_ops.heketi_topology_info(
+ h_node, h_server, json=True).get("clusters")[0].get("nodes")
+ self.assertTrue(
+ node_list, "Cluster info command returned empty list of nodes")
+ node_id = [
+ node.get("id")
+ for node in node_list
+ if manage_hostname == node.get("hostnames").get("manage")[0]]
+ self.assertTrue(
+ node_id, "Failed to get node_id for {}".format(manage_hostname))
+ node_id = node_id[0]
+
+ # Adding heketi device
+ heketi_ops.heketi_device_add(h_node, h_server, device_name, node_id)
+ node_info_after_addition = heketi_ops.heketi_node_info(
+ h_node, h_server, node_id, json=True)
+ device_id, bricks = None, None
+ for device in node_info_after_addition.get("devices"):
+ if device.get("name") == device_name:
+ device_id, bricks = (
+ device.get("id"), len(device.get("bricks")))
+ break
+
+ # Verify zero bricks on the device
+ msg = (
+ "Number of bricks on the device {} of the nodes should be"
+ "zero".format(device_name))
+ self.assertFalse(bricks, msg)
+ self.addCleanup(
+ heketi_ops.heketi_device_delete, h_node, h_server, device_id,
+ raise_on_error=False)
+ self.addCleanup(
+ heketi_ops.heketi_device_remove, h_node, h_server, device_id,
+ raise_on_error=False)
+ self.addCleanup(
+ heketi_ops.heketi_device_disable, h_node, h_server, device_id,
+ raise_on_error=False)
+
+ # Disable,Remove and Delete heketi device
+ heketi_ops.heketi_device_disable(h_node, h_server, device_id)
+ heketi_ops.heketi_device_remove(h_node, h_server, device_id)
+ heketi_ops.heketi_device_delete(h_node, h_server, device_id)
+
+ # Verify device deletion
+ node_info_after_deletion = (
+ heketi_ops.heketi_node_info(h_node, h_server, node_id))
+ msg = ("Device {} should not be shown in node info of the node {}"
+ "after the device deletion".format(device_id, node_id))
+ self.assertNotIn(device_id, node_info_after_deletion, msg)
+
+ if operation == "usedbytes":
+ # Validate heketi and prometheus device used bytes
+ for w in waiter.Waiter(timeout=60, interval=10):
+ device_used_bytes_prometheus = 0
+ device_used_bytes_metrics = 0
+ openshift_ops.switch_oc_project(
+ self.ocp_master_node[0], 'openshift-monitoring')
+ metric_result = self._fetch_metric_from_promtheus_pod(
+ metric='heketi_device_used_bytes')
+ for result in metric_result:
+ if (node_id == result.get('cluster')
+ and device_name == result.get('device')):
+ device_used_bytes_prometheus += (
+ int(result.get('value')[1]))
+ openshift_ops.switch_oc_project(
+ self.ocp_master_node[0], 'glusterfs')
+ metrics = heketi_ops.get_heketi_metrics(h_node, h_server)
+ heketi_device_count_metric = (
+ metrics.get('heketi_device_used_bytes'))
+ for result in heketi_device_count_metric:
+ if (node_id == result.get('cluster')
+ and device_name == result.get('device')):
+ device_used_bytes_metrics = int(result.get('value'))
+ if device_used_bytes_prometheus == device_used_bytes_metrics:
+ break
+ if w.expired:
+ raise exceptions.ExecutionError(
+ "Failed to update device details in prometheus")
+
+ elif operation == "brickcount":
+ # Validate heketi and prometheus device brick count
+ for w in waiter.Waiter(timeout=60, interval=10):
+ device_brick_count_prometheus = 0
+ device_brick_count_metrics = 0
+ metrics = heketi_ops.get_heketi_metrics(h_node, h_server)
+ heketi_device_count_metric = metrics.get(
+ 'heketi_device_brick_count')
+ for result in heketi_device_count_metric:
+ device_brick_count_metrics += int(result.get('value'))
+ openshift_ops.switch_oc_project(
+ self.ocp_master_node[0], 'openshift-monitoring')
+ metric_result = self._fetch_metric_from_promtheus_pod(
+ metric='heketi_device_brick_count')
+ for result in metric_result:
+ device_brick_count_prometheus += (
+ int(result.get('value')[1]))
+ if device_brick_count_prometheus == device_brick_count_metrics:
+ break
+ if w.expired:
+ raise exceptions.ExecutionError(
+ "Failed to update device details in prometheus")
+
+ @pytest.mark.tier2
+ @podcmd.GlustoPod()
+ def test_prometheous_kill_bhv_brick_process(self):
+ """Validate kill brick process of block hosting
+ volume with prometheus workload running"""
+
+ # Add check for CRS version
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+ if not self.is_containerized_gluster():
+ self.skipTest("Skipping this test case as CRS"
+ " version check can not be implemented")
+
+ # Get one of the prometheus pod name and respective pvc name
+ openshift_ops.switch_oc_project(
+ self._master, self._prometheus_project_name)
+ prometheus_pods = openshift_ops.oc_get_pods(
+ self._master, selector=self._prometheus_resources_selector)
+ if not prometheus_pods:
+ self.skipTest(
+ prometheus_pods, "Skipping test as prometheus"
+ " pod is not present")
+
+ # Validate iscsi and multipath
+ prometheus_pod = list(prometheus_pods.keys())[0]
+ pvc_name = openshift_ops.oc_get_custom_resource(
+ self._master, "pod",
+ ":.spec.volumes[*].persistentVolumeClaim.claimName",
+ prometheus_pod)
+ self.assertTrue(pvc_name, "Failed to get PVC name")
+ pvc_name = pvc_name[0]
+ self.verify_iscsi_sessions_and_multipath(
+ pvc_name, prometheus_pod, rtype='pod',
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+
+ # Try to fetch metric from prometheus pod
+ self._fetch_metric_from_promtheus_pod(
+ metric='heketi_device_brick_count')
+
+ # Kill the brick process of a BHV
+ gluster_node = list(self._registry_servers_info.keys())[0]
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+ bhv_name = self.get_block_hosting_volume_by_pvc_name(
+ pvc_name, heketi_server_url=self._registry_heketi_server_url,
+ gluster_node=gluster_node, ocp_client_node=self._master)
+ vol_status = gluster_ops.get_gluster_vol_status(bhv_name)
+ gluster_node_ip, brick_pid = None, None
+ for g_node, g_node_data in vol_status.items():
+ for process_name, process_data in g_node_data.items():
+ if process_name.startswith("/var"):
+ gluster_node_ip = g_node
+ brick_pid = process_data["pid"]
+ break
+ if gluster_node_ip and brick_pid:
+ break
+ self.assertIsNotNone(brick_pid, "Could not find pid for brick")
+ cmd = "kill -9 {}".format(brick_pid)
+ openshift_ops.cmd_run_on_gluster_pod_or_node(
+ self._master, cmd, gluster_node_ip)
+ self.addCleanup(self._guster_volume_cleanup, bhv_name)
+
+ # Check if the brick-process has been killed
+ killed_pid_cmd = (
+ "ps -p {} -o pid --no-headers".format(brick_pid))
+ try:
+ openshift_ops.cmd_run_on_gluster_pod_or_node(
+ self._master, killed_pid_cmd, gluster_node_ip)
+ except exceptions.ExecutionError:
+ g.log.info("Brick process {} was killed"
+ "successfully".format(brick_pid))
+
+ # Try to fetch metric from prometheus pod
+ openshift_ops.switch_oc_project(
+ self._master, self._prometheus_project_name)
+ self._fetch_metric_from_promtheus_pod(
+ metric='heketi_device_brick_count')
+
+ # Start the bhv using force
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+ start_vol, _, _ = volume_ops.volume_start(
+ gluster_node_ip, bhv_name, force=True)
+ self.assertFalse(
+ start_vol, "Failed to start volume {}"
+ " using force".format(bhv_name))
+
+ # Validate iscsi and multipath
+ openshift_ops.switch_oc_project(
+ self._master, self._prometheus_project_name)
+ self.verify_iscsi_sessions_and_multipath(
+ pvc_name, prometheus_pod, rtype='pod',
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+
+ # Try to fetch metric from prometheus pod
+ self._fetch_metric_from_promtheus_pod(
+ metric='heketi_device_brick_count')
+
+ def _check_heketi_and_gluster_pod_after_node_reboot(self, heketi_node):
+ openshift_ops.switch_oc_project(
+ self._master, self.storage_project_name)
+ heketi_pod = openshift_ops.get_pod_names_from_dc(
+ self._master, self.heketi_dc_name)[0]
+
+ # Wait for heketi pod to become ready and running
+ openshift_ops.wait_for_pod_be_ready(self._master, heketi_pod)
+ heketi_ops.hello_heketi(self._master, self.heketi_server_url)
+
+ # Wait for glusterfs pods to become ready if hosted on same node
+ heketi_node_ip = openshift_ops.oc_get_custom_resource(
+ self._master, 'pod', '.:status.hostIP', heketi_pod)[0]
+ if heketi_node_ip in self.gluster_servers:
+ gluster_pod = openshift_ops.get_gluster_pod_name_for_specific_node(
+ self._master, heketi_node)
+
+ # Wait for glusterfs pod to become ready
+ openshift_ops.wait_for_pod_be_ready(self._master, gluster_pod)
+ services = (
+ ("glusterd", "running"), ("gluster-blockd", "running"),
+ ("tcmu-runner", "running"), ("gluster-block-target", "exited"))
+ for service, state in services:
+ openshift_ops.check_service_status_on_pod(
+ self._master, gluster_pod, service, "active", state)
+
+ @pytest.mark.tier4
+ def test_heketi_metrics_validation_with_node_reboot(self):
+ """Validate heketi metrics after node reboot using prometheus"""
+
+ initial_metrics, final_metrics = {}, {}
+
+ # Use storage project
+ openshift_ops.switch_oc_project(
+ self._master, self.storage_project_name)
+
+ # Get initial metrics result
+ h_node, h_server = self.heketi_client_node, self.heketi_server_url
+ initial_metrics = tuple(
+ heketi_ops.get_heketi_metrics(h_node, h_server).get(metric)[0]
+ for metric in self.metrics)
+
+ # Use prometheus project
+ openshift_ops.switch_oc_project(
+ self._master, self._prometheus_project_name)
+
+ # Get initial prometheus result
+ initial_prometheus = self._get_and_manipulate_metric_data(
+ self.metrics)
+
+ # Get hosted node IP of heketi pod
+ openshift_ops.switch_oc_project(
+ self._master, self.storage_project_name)
+ heketi_pod = openshift_ops.get_pod_name_from_dc(
+ self._master, self.heketi_dc_name)
+ heketi_node = openshift_ops.oc_get_custom_resource(
+ self._master, 'pod', '.:spec.nodeName', heketi_pod)[0]
+
+ # Reboot the node on which heketi pod is scheduled
+ self.addCleanup(
+ self._check_heketi_and_gluster_pod_after_node_reboot, heketi_node)
+ node_ops.node_reboot_by_command(heketi_node)
+
+ # Wait node to become NotReady
+ custom = r'":.status.conditions[?(@.type==\"Ready\")]".status'
+ for w in waiter.Waiter(300, 10):
+ status = openshift_ops.oc_get_custom_resource(
+ self._master, 'node', custom, heketi_node)
+ if status[0] == 'False':
+ break
+ if w.expired:
+ raise exceptions.ExecutionError(
+ "Failed to bring down node {}".format(heketi_node))
+
+ # Wait for node to become ready
+ openshift_ops.wait_for_ocp_node_be_ready(self._master, heketi_node)
+
+ # Wait for heketi and glusterfs pod to become ready
+ self._check_heketi_and_gluster_pod_after_node_reboot(heketi_node)
+
+ # Use prometheus project
+ openshift_ops.switch_oc_project(
+ self._master, self._prometheus_project_name)
+
+ # Get final metrics result
+ final_metrics = tuple(
+ heketi_ops.get_heketi_metrics(h_node, h_server).get(metric)[0]
+ for metric in self.metrics)
+
+ # Get final prometheus result
+ final_prometheus = self._get_and_manipulate_metric_data(
+ self.metrics)
+
+ err_msg = "Initial value {} is not same as final value {}"
+ self.assertEqual(
+ initial_metrics, final_metrics, err_msg.format(
+ initial_metrics, final_metrics))
+ self.assertEqual(
+ initial_prometheus, final_prometheus, err_msg.format(
+ initial_prometheus, final_prometheus))
+
+ @pytest.mark.tier4
+ @ddt.data('add', 'delete')
+ def test_heketi_metrics_validation_after_node(self, condition):
+ """Validate heketi metrics after adding and remove node"""
+
+ # Get additional node
+ additional_host_info = g.config.get("additional_gluster_servers")
+ if not additional_host_info:
+ self.skipTest(
+ "Skipping this test case as additional gluster server is "
+ "not provied in config file")
+
+ additional_host_info = list(additional_host_info.values())[0]
+ storage_hostname = additional_host_info.get("manage")
+ storage_ip = additional_host_info.get("storage")
+ if not (storage_hostname and storage_ip):
+ self.skipTest(
+ "Config options 'additional_gluster_servers.manage' "
+ "and 'additional_gluster_servers.storage' must be set.")
+
+ h_client, h_server = self.heketi_client_node, self.heketi_server_url
+ initial_node_count, final_node_count = 0, 0
+
+ # Get initial node count from prometheus metrics
+ metric_result = self._fetch_metric_from_promtheus_pod(
+ metric='heketi_nodes_count')
+ initial_node_count = reduce(
+ lambda x, y: x + y,
+ [result.get('value')[1] for result in metric_result])
+
+ # Switch to storage project
+ openshift_ops.switch_oc_project(
+ self._master, self.storage_project_name)
+
+ # Configure node before adding node
+ self.configure_node_to_run_gluster(storage_hostname)
+
+ # Get cluster list
+ cluster_info = heketi_ops.heketi_cluster_list(
+ h_client, h_server, json=True)
+
+ # Add node to the cluster
+ heketi_node_info = heketi_ops.heketi_node_add(
+ h_client, h_server,
+ len(self.gluster_servers), cluster_info.get('clusters')[0],
+ storage_hostname, storage_ip, json=True)
+ heketi_node_id = heketi_node_info.get("id")
+ self.addCleanup(
+ heketi_ops.heketi_node_delete,
+ h_client, h_server, heketi_node_id, raise_on_error=False)
+ self.addCleanup(
+ heketi_ops.heketi_node_remove,
+ h_client, h_server, heketi_node_id, raise_on_error=False)
+ self.addCleanup(
+ heketi_ops.heketi_node_disable,
+ h_client, h_server, heketi_node_id, raise_on_error=False)
+ self.addCleanup(
+ openshift_ops.switch_oc_project,
+ self._master, self.storage_project_name)
+
+ if condition == 'delete':
+ # Switch to openshift-monitoring project
+ openshift_ops.switch_oc_project(
+ self.ocp_master_node[0], self._prometheus_project_name)
+
+ # Get initial node count from prometheus metrics
+ for w in waiter.Waiter(timeout=60, interval=10):
+ metric_result = self._fetch_metric_from_promtheus_pod(
+ metric='heketi_nodes_count')
+ node_count = reduce(
+ lambda x, y: x + y,
+ [result.get('value')[1] for result in metric_result])
+ if node_count != initial_node_count:
+ break
+
+ if w.expired:
+ raise exceptions.ExecutionError(
+ "Failed to get updated node details from prometheus")
+
+ # Remove node from cluster
+ heketi_ops.heketi_node_disable(h_client, h_server, heketi_node_id)
+ heketi_ops.heketi_node_remove(h_client, h_server, heketi_node_id)
+ for device in heketi_node_info.get('devices'):
+ heketi_ops.heketi_device_delete(
+ h_client, h_server, device.get('id'))
+ heketi_ops.heketi_node_delete(h_client, h_server, heketi_node_id)
+
+ # Switch to openshift-monitoring project
+ openshift_ops.switch_oc_project(
+ self.ocp_master_node[0], self._prometheus_project_name)
+
+ # Get final node count from prometheus metrics
+ for w in waiter.Waiter(timeout=60, interval=10):
+ metric_result = self._fetch_metric_from_promtheus_pod(
+ metric='heketi_nodes_count')
+ final_node_count = reduce(
+ lambda x, y: x + y,
+ [result.get('value')[1] for result in metric_result])
+
+ if condition == 'delete':
+ if final_node_count < node_count:
+ break
+ else:
+ if final_node_count > initial_node_count:
+ break
+
+ if w.expired:
+ raise exceptions.ExecutionError(
+ "Failed to update node details in prometheus")
+
+ @pytest.mark.tier2
+ def test_restart_prometheus_glusterfs_pod(self):
+ """Validate restarting glusterfs pod"""
+
+ # Add check for CRS version
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+ if not self.is_containerized_gluster():
+ self.skipTest(
+ "Skipping this test case as CRS version check "
+ "can not be implemented")
+
+ # Get one of the prometheus pod name and respective pvc name
+ openshift_ops.switch_oc_project(
+ self._master, self._prometheus_project_name)
+ prometheus_pods = openshift_ops.oc_get_pods(
+ self._master, selector=self._prometheus_resources_selector)
+ if not prometheus_pods:
+ self.skipTest(
+ prometheus_pods, "Skipping test as prometheus"
+ " pod is not present")
+ prometheus_pod = list(prometheus_pods.keys())[0]
+ pvc_name = openshift_ops.oc_get_custom_resource(
+ self._master, "pod",
+ ":.spec.volumes[*].persistentVolumeClaim.claimName",
+ prometheus_pod)[0]
+ self.assertTrue(
+ pvc_name,
+ "Failed to get pvc name from {} pod".format(prometheus_pod))
+ iqn, _, node = self.verify_iscsi_sessions_and_multipath(
+ pvc_name, prometheus_pod, rtype='pod',
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+
+ # Get the ip of active path
+ devices = openshift_storage_libs.get_iscsi_block_devices_by_path(
+ node, iqn)
+ mpath = openshift_storage_libs.get_mpath_name_from_device_name(
+ node, list(devices.keys())[0])
+ mpath_dev = (
+ openshift_storage_libs.get_active_and_enabled_devices_from_mpath(
+ node, mpath))
+ node_ip = devices[mpath_dev['active'][0]]
+
+ # Get the name of gluster pod from the ip
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+ gluster_pods = openshift_ops.get_ocp_gluster_pod_details(
+ self._master)
+ active_pod_name = list(
+ filter(lambda pod: (pod["pod_host_ip"] == node_ip), gluster_pods)
+ )[0]["pod_name"]
+ err_msg = "Failed to get the gluster pod name {} with active path"
+ self.assertTrue(active_pod_name, err_msg.format(active_pod_name))
+ g_pods = [pod['pod_name'] for pod in gluster_pods]
+ g_pods.remove(active_pod_name)
+ pod_list = [active_pod_name, g_pods[0]]
+ for pod_name in pod_list:
+
+ # Delete the glusterfs pods
+ openshift_ops.switch_oc_project(
+ self._master, self._prometheus_project_name)
+ self._fetch_metric_from_promtheus_pod(
+ metric='heketi_device_brick_count')
+
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+ g_pod_list_before = [
+ pod["pod_name"]
+ for pod in openshift_ops.get_ocp_gluster_pod_details(
+ self._master)]
+
+ openshift_ops.oc_delete(self._master, 'pod', pod_name)
+ self.addCleanup(
+ self._guster_pod_delete, g_pod_list_before)
+
+ # Wait for gluster pod to be absent
+ openshift_ops.wait_for_resource_absence(
+ self._master, 'pod', pod_name)
+
+ # Try to fetch metric from prometheus pod
+ openshift_ops.switch_oc_project(
+ self._master, self._prometheus_project_name)
+ self._fetch_metric_from_promtheus_pod(
+ metric='heketi_device_brick_count')
+
+ # Wait for new pod to come up
+ openshift_ops.switch_oc_project(
+ self._master, self._registry_project_name)
+ self.assertTrue(self._get_newly_deployed_gluster_pod(
+ g_pod_list_before), "Failed to get new pod")
+ self._wait_for_gluster_pod_be_ready(g_pod_list_before)
+
+ # Validate iscsi and multipath
+ openshift_ops.switch_oc_project(
+ self._master, self._prometheus_project_name)
+ self.verify_iscsi_sessions_and_multipath(
+ pvc_name, prometheus_pod, rtype='pod',
+ heketi_server_url=self._registry_heketi_server_url,
+ is_registry_gluster=True)
+
+ # Try to fetch metric from prometheus pod
+ self._fetch_metric_from_promtheus_pod(
+ metric='heketi_device_brick_count')
diff --git a/tests/functional/prometheous/test_prometheus_validations_file.py b/tests/functional/prometheous/test_prometheus_validations_file.py
new file mode 100644
index 00000000..bbf4aedc
--- /dev/null
+++ b/tests/functional/prometheous/test_prometheus_validations_file.py
@@ -0,0 +1,335 @@
+try:
+ # py2/3
+ import simplejson as json
+except ImportError:
+ # py2
+ import json
+import time
+
+import ddt
+from glusto.core import Glusto as g
+from glustolibs.gluster import rebalance_ops
+import pytest
+
+from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import exceptions
+from openshiftstoragelibs import heketi_ops
+from openshiftstoragelibs import openshift_ops
+from openshiftstoragelibs import podcmd
+from openshiftstoragelibs import waiter
+
+
+@ddt.ddt
+class TestPrometheusValidationFile(baseclass.BaseClass):
+ """Prometheus Validations for file volumes"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestPrometheusValidationFile, cls).setUpClass()
+
+ # Metrics of which the data need to retrieve in this class
+ cls.metrics = ('kubelet_volume_stats_inodes_free',
+ 'kubelet_volume_stats_inodes',
+ 'kubelet_volume_stats_inodes_used',
+ 'kubelet_volume_stats_available_bytes',
+ 'kubelet_volume_stats_capacity_bytes',
+ 'kubelet_volume_stats_used_bytes')
+
+ def setUp(self):
+ """Initialize all the variables which are necessary for test cases"""
+ super(TestPrometheusValidationFile, self).setUp()
+
+ try:
+ prometheus_config = g.config['openshift']['prometheus']
+ self._prometheus_project_name = prometheus_config[
+ 'prometheus_project_name']
+ self._prometheus_resources_selector = prometheus_config[
+ 'prometheus_resources_selector']
+ self._alertmanager_resources_selector = prometheus_config[
+ 'alertmanager_resources_selector']
+ except KeyError as err:
+ self.skipTest("Config file doesn't have key {}".format(err))
+
+ self._master = self.ocp_master_node[0]
+
+ def _fetch_metric_from_promtheus_pod(self, metric):
+ """Fetch metric from prometheus pod using api call"""
+
+ prometheus_pods = list(openshift_ops.oc_get_pods(
+ self._master, selector=self._prometheus_resources_selector).keys())
+ fetch_metric_cmd = ("curl 'http://localhost:9090/api/v1/query"
+ "?query={}'".format(metric))
+ ret, metric_data, _ = openshift_ops.oc_rsh(
+ self._master, prometheus_pods[0], fetch_metric_cmd)
+ metric_result = json.loads(metric_data)["data"]["result"]
+ if (not metric_result) or ret:
+ raise exceptions.ExecutionError(
+ "Failed to fecth data for metric {}, output {}".format(
+ metric, metric_result))
+ return metric_result
+
+ def _get_and_manipulate_metric_data(self, metrics, pvc):
+ """Create a dict of metric names and total values"""
+
+ # Switch to namespace containing prometheus pods
+ openshift_ops.switch_oc_project(self._master,
+ self._prometheus_project_name)
+ self.addCleanup(openshift_ops.switch_oc_project,
+ self._master, self.storage_project_name)
+
+ metric_data = dict()
+ for metric in metrics:
+ out = self._fetch_metric_from_promtheus_pod(metric)
+ for matric_result in out:
+ if matric_result["metric"]["persistentvolumeclaim"] == pvc:
+ metric_data[matric_result["metric"][
+ "__name__"]] = matric_result["value"][1]
+ return metric_data
+
+ def _fetch_initial_metrics(self, vol_name_prefix=None,
+ volume_expansion=False):
+
+ # Create PVC and wait for it to be in 'Bound' state
+ sc_name = self.create_storage_class(
+ vol_name_prefix=vol_name_prefix,
+ allow_volume_expansion=volume_expansion)
+ pvc_name = self.create_and_wait_for_pvc(
+ pvc_name_prefix=vol_name_prefix, sc_name=sc_name)
+
+ # Create DC and attach with pvc
+ self.dc_name, pod_name = self.create_dc_with_pvc(pvc_name)
+ for w in waiter.Waiter(120, 10):
+ initial_metrics = self._get_and_manipulate_metric_data(
+ self.metrics, pvc_name)
+ if bool(initial_metrics) and len(initial_metrics) == 6:
+ break
+ if w.expired:
+ raise AssertionError("Unable to fetch metrics for the pvc")
+ return pvc_name, pod_name, initial_metrics
+
+ def _perform_io_and_fetch_metrics(
+ self, pod_name, pvc_name, filename, dirname,
+ metric_data, operation):
+ """Create 1000 files and dirs and validate with old metrics"""
+ openshift_ops.switch_oc_project(
+ self._master, self.storage_project_name)
+ if operation == "create":
+ cmds = ("touch /mnt/{}{{1..1000}}".format(filename),
+ "mkdir /mnt/{}{{1..1000}}".format(dirname))
+ else:
+ cmds = ("rm -rf /mnt/large_file",
+ "rm -rf /mnt/{}{{1..1000}}".format(filename),
+ "rm -rf /mnt/{}{{1..1000}}".format(dirname))
+ for cmd in cmds:
+ self.cmd_run("oc rsh {} {}".format(pod_name, cmd))
+
+ # Fetch the new metrics and compare the inodes used and bytes used
+ for w in waiter.Waiter(120, 10):
+ after_io_metrics = self._get_and_manipulate_metric_data(
+ self.metrics, pvc_name)
+ if operation == "create":
+ if (int(after_io_metrics[
+ 'kubelet_volume_stats_inodes_used']) > int(
+ metric_data['kubelet_volume_stats_inodes_used']) and int(
+ after_io_metrics[
+ 'kubelet_volume_stats_used_bytes']) > int(
+ metric_data['kubelet_volume_stats_used_bytes'])):
+ break
+ else:
+ if int(metric_data[
+ 'kubelet_volume_stats_used_bytes']) > int(
+ after_io_metrics['kubelet_volume_stats_used_bytes']):
+ break
+ if w.expired:
+ raise AssertionError(
+ "After data is modified metrics like bytes used and inodes "
+ "used are not reflected in prometheus")
+
+ def _run_io_on_the_pod(self, pod_name, number_of_files):
+ for each in range(number_of_files):
+ cmd = "touch /mnt/file{}".format(each)
+ ret, _, err = openshift_ops.oc_rsh(self._master, pod_name, cmd)
+ self.assertFalse(ret, "Failed to run the IO with error msg {}".
+ format(err))
+
+ @podcmd.GlustoPod()
+ def _rebalance_completion(self, volume_name):
+ """Rebalance start and completion after expansion."""
+ ret, _, err = rebalance_ops.rebalance_start(
+ 'auto_get_gluster_endpoint', volume_name)
+ self.assertFalse(
+ ret, "Rebalance for {} volume not started with error {}".format(
+ volume_name, err))
+
+ for w in waiter.Waiter(240, 10):
+ reb_status = rebalance_ops.get_rebalance_status(
+ 'auto_get_gluster_endpoint', volume_name)
+ if reb_status["aggregate"]["statusStr"] == "completed":
+ break
+ if w.expired:
+ raise AssertionError(
+ "Failed to complete the rebalance in 240 seconds")
+
+ @pytest.mark.tier2
+ def test_prometheus_volume_metrics_on_pod_restart(self):
+ """Validate volume metrics using prometheus before and after pod
+ restart"""
+
+ # Create PVC and wait for it to be in 'Bound' state
+ pvc_name = self.create_and_wait_for_pvc()
+ pod_name = openshift_ops.oc_create_tiny_pod_with_volume(
+ self._master, pvc_name, "autotest-volume",
+ image=self.io_container_image_cirros)
+ self.addCleanup(openshift_ops.oc_delete, self._master, 'pod', pod_name,
+ raise_on_absence=False)
+
+ # Wait for POD be up and running
+ openshift_ops.wait_for_pod_be_ready(
+ self._master, pod_name, timeout=60, wait_step=2)
+
+ # Write data on the volume and wait for 2 mins and sleep is must for
+ # prometheus to get the exact values of the metrics
+ self._run_io_on_the_pod(pod_name, 30)
+ time.sleep(120)
+
+ # Fetching the metrics and storing in initial_metrics as dictionary
+ initial_metrics = self._get_and_manipulate_metric_data(
+ self.metrics, pvc_name)
+
+ # Mark the current node unschedulable on which app pod is running
+ openshift_ops.switch_oc_project(
+ self._master, self.storage_project_name)
+ pod_info = openshift_ops.oc_get_pods(self._master, name=pod_name)
+ openshift_ops.oc_adm_manage_node(
+ self._master, '--schedulable=false',
+ nodes=[pod_info[pod_name]["node"]])
+ self.addCleanup(
+ openshift_ops.oc_adm_manage_node, self._master,
+ '--schedulable=true', nodes=[pod_info[pod_name]["node"]])
+
+ # Delete the existing pod and create a new pod
+ openshift_ops.oc_delete(self._master, 'pod', pod_name)
+ pod_name = openshift_ops.oc_create_tiny_pod_with_volume(
+ self._master, pvc_name, "autotest-volume")
+ self.addCleanup(openshift_ops.oc_delete, self._master, 'pod', pod_name)
+
+ # Wait for POD be up and running and prometheus to refresh the data
+ openshift_ops.wait_for_pod_be_ready(
+ self._master, pod_name, timeout=60, wait_step=2)
+ time.sleep(120)
+
+ # Fetching the metrics and storing in final_metrics as dictionary and
+ # validating with initial_metrics
+ final_metrics = self._get_and_manipulate_metric_data(
+ self.metrics, pvc_name)
+ self.assertEqual(dict(initial_metrics), dict(final_metrics),
+ "Metrics are different post pod restart")
+
+ @pytest.mark.tier2
+ def test_prometheus_basic_validation(self):
+ """ Validate basic volume metrics using prometheus """
+
+ # Fetch the metrics and storing initial_metrics as dictionary
+ pvc_name, pod_name, initial_metrics = self._fetch_initial_metrics(
+ volume_expansion=False)
+
+ # Create 1000 files and fetch the metrics that the data is updated
+ self._perform_io_and_fetch_metrics(
+ pod_name=pod_name, pvc_name=pvc_name,
+ filename="filename1", dirname="dirname1",
+ metric_data=initial_metrics, operation="create")
+
+ # Write the IO half the size of the volume and validated from
+ # prometheus pod that the size change is reflected
+ size_to_write = int(initial_metrics[
+ 'kubelet_volume_stats_capacity_bytes']) // 2
+ openshift_ops.switch_oc_project(
+ self._master, self.storage_project_name)
+ cmd = ("dd if=/dev/urandom of=/mnt/large_file bs={} count=1024".
+ format(size_to_write // 1024))
+ ret, _, err = openshift_ops.oc_rsh(self._master, pod_name, cmd)
+ self.assertFalse(ret, 'Failed to write file due to err {}'.format(err))
+
+ # Fetching the metrics and validating the data change is reflected
+ for w in waiter.Waiter(120, 10):
+ half_io_metrics = self._get_and_manipulate_metric_data(
+ ['kubelet_volume_stats_used_bytes'], pvc_name)
+ if bool(half_io_metrics) and (int(
+ half_io_metrics['kubelet_volume_stats_used_bytes'])
+ > size_to_write):
+ break
+ if w.expired:
+ raise AssertionError(
+ "After Data is written on the pvc, metrics like inodes used "
+ "and bytes used are not reflected in the prometheus")
+
+ # Delete the files from the volume and wait for the
+ # updated details reflected in prometheus
+ self._perform_io_and_fetch_metrics(
+ pod_name=pod_name, pvc_name=pvc_name,
+ filename="filename1", dirname="dirname1",
+ metric_data=half_io_metrics, operation="delete")
+
+ @pytest.mark.tier2
+ def test_prometheus_pv_resize(self):
+ """ Validate prometheus metrics with pv resize"""
+
+ # Fetch the metrics and storing initial_metrics as dictionary
+ pvc_name, pod_name, initial_metrics = self._fetch_initial_metrics(
+ vol_name_prefix="for-pv-resize", volume_expansion=True)
+
+ # Write data on the pvc and confirm it is reflected in the prometheus
+ self._perform_io_and_fetch_metrics(
+ pod_name=pod_name, pvc_name=pvc_name,
+ filename="filename1", dirname="dirname1",
+ metric_data=initial_metrics, operation="create")
+
+ # Resize the pvc to 2GiB
+ openshift_ops.switch_oc_project(
+ self._master, self.storage_project_name)
+ pvc_size = 2
+ openshift_ops.resize_pvc(self._master, pvc_name, pvc_size)
+ openshift_ops.wait_for_events(self._master, obj_name=pvc_name,
+ event_reason='VolumeResizeSuccessful')
+ openshift_ops.verify_pvc_size(self._master, pvc_name, pvc_size)
+ pv_name = openshift_ops.get_pv_name_from_pvc(
+ self._master, pvc_name)
+ openshift_ops.verify_pv_size(self._master, pv_name, pvc_size)
+
+ heketi_volume_name = heketi_ops.heketi_volume_list_by_name_prefix(
+ self.heketi_client_node, self.heketi_server_url,
+ "for-pv-resize", json=True)[0][2]
+ self.assertIsNotNone(
+ heketi_volume_name, "Failed to fetch volume with prefix {}".
+ format("for-pv-resize"))
+
+ openshift_ops.oc_delete(self._master, 'pod', pod_name)
+ openshift_ops.wait_for_resource_absence(self._master, 'pod', pod_name)
+ pod_name = openshift_ops.get_pod_name_from_dc(
+ self._master, self.dc_name)
+ openshift_ops.wait_for_pod_be_ready(self._master, pod_name)
+
+ # Check whether the metrics are updated or not
+ for w in waiter.Waiter(120, 10):
+ resize_metrics = self._get_and_manipulate_metric_data(
+ self.metrics, pvc_name)
+ if bool(resize_metrics) and int(resize_metrics[
+ 'kubelet_volume_stats_capacity_bytes']) > int(
+ initial_metrics['kubelet_volume_stats_capacity_bytes']):
+ break
+ if w.expired:
+ raise AssertionError("Failed to reflect PVC Size after resizing")
+ openshift_ops.switch_oc_project(
+ self._master, self.storage_project_name)
+ time.sleep(240)
+
+ # Lookup and trigger rebalance and wait for the its completion
+ for _ in range(100):
+ self.cmd_run("oc rsh {} ls /mnt/".format(pod_name))
+ self._rebalance_completion(heketi_volume_name)
+
+ # Write data on the resized pvc and compared with the resized_metrics
+ self._perform_io_and_fetch_metrics(
+ pod_name=pod_name, pvc_name=pvc_name,
+ filename="secondfilename", dirname="seconddirname",
+ metric_data=resize_metrics, operation="create")
diff --git a/tests/functional/provisioning/test_dev_path_mapping_block.py b/tests/functional/provisioning/test_dev_path_mapping_block.py
new file mode 100644
index 00000000..b0955f5d
--- /dev/null
+++ b/tests/functional/provisioning/test_dev_path_mapping_block.py
@@ -0,0 +1,303 @@
+import ddt
+import pytest
+from glusto.core import Glusto as g
+from glustolibs.gluster import block_libs
+
+from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import command
+from openshiftstoragelibs import heketi_ops
+from openshiftstoragelibs import node_ops
+from openshiftstoragelibs import openshift_ops
+from openshiftstoragelibs import openshift_storage_libs
+from openshiftstoragelibs import podcmd
+
+
+@ddt.ddt
+class TestDevPathMapping(baseclass.GlusterBlockBaseClass):
+ '''Class that contain dev path mapping test cases for
+ gluster file & block volumes
+ '''
+
+ def setUp(self):
+ super(TestDevPathMapping, self).setUp()
+ self.node = self.ocp_master_node[0]
+ self.h_node, self.h_server = (
+ self.heketi_client_node, self.heketi_server_url)
+ h_nodes_list = heketi_ops.heketi_node_list(self.h_node, self.h_server)
+ h_node_count = len(h_nodes_list)
+ if h_node_count < 3:
+ self.skipTest(
+ "At least 3 nodes are required, found {}".format(
+ h_node_count))
+
+ # Disable 4th and other nodes
+ for node_id in h_nodes_list[3:]:
+ self.addCleanup(
+ heketi_ops.heketi_node_enable,
+ self.h_node, self.h_server, node_id)
+ heketi_ops.heketi_node_disable(
+ self.h_node, self.h_server, node_id)
+
+ h_info = heketi_ops.heketi_node_info(
+ self.h_node, self.h_server, h_nodes_list[0], json=True)
+ self.assertTrue(
+ h_info, "Failed to get the heketi node info for node id"
+ " {}".format(h_nodes_list[0]))
+
+ self.node_ip = h_info['hostnames']['storage'][0]
+ self.node_hostname = h_info["hostnames"]["manage"][0]
+ self.vm_name = node_ops.find_vm_name_by_ip_or_hostname(
+ self.node_hostname)
+ self.devices_list = [device['name'] for device in h_info["devices"]]
+
+ # Get list of additional devices for one of the Gluster nodes
+ for gluster_server in list(g.config["gluster_servers"].values()):
+ if gluster_server['storage'] == self.node_ip:
+ additional_device = gluster_server.get("additional_devices")
+ if additional_device:
+ self.devices_list.extend(additional_device)
+
+ # sort the devices list
+ self.devices_list.sort()
+
+ def _cleanup_heketi_volumes(self, existing_volumes):
+ """Cleanup created BHV and BV"""
+
+ volumes = heketi_ops.heketi_volume_list(
+ self.h_node, self.h_server, json=True).get("volumes")
+ new_volumes = list(set(volumes) - set(existing_volumes))
+ for volume in new_volumes:
+ h_vol_info = heketi_ops.heketi_volume_info(
+ self.h_node, self.h_server, volume, json=True)
+ if h_vol_info.get("block"):
+ for block_vol in (
+ h_vol_info.get("blockinfo").get("blockvolume")):
+ heketi_ops.heketi_blockvolume_delete(
+ self.h_node, self.h_server, block_vol,
+ raise_on_error=False)
+ heketi_ops.heketi_volume_delete(
+ self.h_node, self.h_server, volume, raise_on_error=False)
+
+ @pytest.mark.tier4
+ @podcmd.GlustoPod()
+ def test_dev_path_block_volume_create(self):
+ """Validate dev path mapping for block volumes"""
+
+ pvc_size, pvc_amount = 2, 5
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected:{},"
+ "Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected:"
+ "{}, Actual:{}".format(vg_name, _vg_name))
+
+ # Create block volumes
+ pvcs = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_amount=pvc_amount)
+ self.create_dcs_with_pvc(pvcs)
+ self.validate_block_volumes_count(
+ self.h_node, self.h_server, self.node_ip)
+
+ def _get_space_use_percent_in_app_pod(self, pod_name):
+ """Check if IO's are running in the app pod"""
+
+ use_percent = []
+ cmd = "oc exec {} -- df -h /mnt | tail -1"
+
+ # Run 10 times to track the percentage used
+ for _ in range(10):
+ out = command.cmd_run(cmd.format(pod_name), self.node).split()[3]
+ self.assertTrue(
+ out, "Failed to fetch mount point details from the pod "
+ "{}".format(pod_name))
+ use_percent.append(out[:-1])
+ return use_percent
+
+ def _create_app_pod_and_verify_pvs(self):
+ """Create block volume with app pod and verify IO's. Compare path,
+ uuid, vg_name.
+ """
+ pvc_size, pvc_amount = 2, 1
+
+ # Space to use for io's in KB
+ space_to_use = 104857600
+
+ # Create block volumes
+ pvc_name = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_amount=pvc_amount)
+
+ # Create dcs and app pods with I/O on it
+ dc_name = self.create_dcs_with_pvc(pvc_name, space_to_use=space_to_use)
+
+ # Pod names list
+ pod_name = [pod_name for _, pod_name in list(dc_name.values())][0]
+ self.assertTrue(
+ pod_name, "Failed to get the pod name from {}".format(dc_name))
+
+ # Fetch dc list
+ dc_name = [pod for pod, _ in list(dc_name.values())][0]
+
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Check if IO's are running
+ use_percent_before = self._get_space_use_percent_in_app_pod(pod_name)
+
+ # Compare volumes
+ self.validate_file_volumes_count(
+ self.h_node, self.h_server, self.node_ip)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+
+ # Check if IO's are running
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent_before, use_percent_after,
+ "Failed to execute IO's in the app pod {}".format(
+ pod_name))
+
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected: {},"
+ " Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected: {},"
+ " Actual:{}".format(vg_name, _vg_name))
+ return pod_name, dc_name, use_percent_before
+
+ @pytest.mark.tier4
+ @podcmd.GlustoPod()
+ def test_dev_path_mapping_app_pod_with_block_volume_reboot(self):
+ """Validate dev path mapping for app pods with block volume after reboot
+ """
+ # Create block volume with app pod and verify IO's
+ # and Compare path, uuid, vg_name
+ pod_name, dc_name, use_percent = self._create_app_pod_and_verify_pvs()
+
+ # Delete app pods
+ openshift_ops.oc_delete(self.node, 'pod', pod_name)
+ openshift_ops.wait_for_resource_absence(self.node, 'pod', pod_name)
+
+ # Wait for the new app pod to come up
+ self.assertTrue(
+ dc_name, "Failed to get the dc name from {}".format(dc_name))
+ pod_name = openshift_ops.get_pod_name_from_dc(self.node, dc_name)
+ openshift_ops.wait_for_pod_be_ready(self.node, pod_name)
+
+ # Check if IO's are running after respin of app pod
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))
+
+ @pytest.mark.tier4
+ @podcmd.GlustoPod()
+ def test_dev_path_block_volume_delete(self):
+ """Validate device path name changes the deletion of
+ already existing file volumes
+ """
+
+ pvc_size, pvc_amount = 2, 5
+ pvc_names, gluster_block_list, vol_details = [], [], []
+
+ # Fetch BHV list
+ h_bhv_list_before = heketi_ops.get_block_hosting_volume_list(
+ self.h_node, self.h_server).keys()
+
+ # Create storage class
+ sc_name = self.create_storage_class()
+
+ # Delete created BHV and BV as cleanup during failures
+ self.addCleanup(self._cleanup_heketi_volumes, h_bhv_list_before)
+
+ # Create PVC's
+ for i in range(0, pvc_amount):
+ pvc_name = openshift_ops.oc_create_pvc(
+ self.node, sc_name, pvc_size=pvc_size)
+ pvc_names.append(pvc_name)
+ self.addCleanup(
+ openshift_ops.wait_for_resource_absence,
+ self.node, 'pvc', pvc_name)
+ self.addCleanup(
+ openshift_ops.oc_delete, self.node, 'pvc', pvc_name,
+ raise_on_absence=False)
+
+ # Wait for PVC's to be bound
+ openshift_ops.wait_for_pvc_be_bound(self.node, pvc_names)
+
+ # Get volume name list
+ for pvc_name in pvc_names:
+ pv_name = openshift_ops.get_pv_name_from_pvc(self.node, pvc_name)
+ volume_name = openshift_ops.get_vol_names_from_pv(
+ self.node, pv_name, vol_type='block')
+ vol_details.append(volume_name)
+
+ # Get BHV list after BV creation
+ h_bhv_list_after = heketi_ops.get_block_hosting_volume_list(
+ self.h_node, self.h_server).keys()
+ self.assertTrue(h_bhv_list_after, "Failed to get the BHV list")
+
+ # Validate BV's count
+ self.validate_block_volumes_count(
+ self.h_node, self.h_server, self.node_ip)
+
+ # Collect pvs info and detach disks and collect pvs info
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected:{},"
+ "Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected:"
+ "{}, Actual:{}".format(vg_name, _vg_name))
+
+ # Delete created PVC's
+ for pvc_name in pvc_names:
+ openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
+
+ # Wait for pvc to get deleted
+ openshift_ops.wait_for_resources_absence(self.node, 'pvc', pvc_names)
+
+ # Get existing BHV list
+ for bhv_name in h_bhv_list_after:
+ b_list = block_libs.get_block_list(self.node_ip, volname=bhv_name)
+ self.assertIsNotNone(
+ gluster_block_list, "Failed to get gluster block list")
+ gluster_block_list.append(b_list)
+
+ # Get list of block volumes using heketi
+ h_blockvol_list = heketi_ops.heketi_blockvolume_list(
+ self.h_node, self.h_server, json=True)
+
+ # Validate volumes created are not present
+ for vol in vol_details:
+ self.assertNotIn(
+ vol, gluster_block_list,
+ "Failed to delete volume {}".format(vol))
+ self.assertNotIn(
+ vol, h_blockvol_list['blockvolumes'],
+ "Failed to delete blockvolume '{}'".format(vol))
diff --git a/tests/functional/provisioning/test_dev_path_mapping_file.py b/tests/functional/provisioning/test_dev_path_mapping_file.py
new file mode 100644
index 00000000..fe4e9834
--- /dev/null
+++ b/tests/functional/provisioning/test_dev_path_mapping_file.py
@@ -0,0 +1,794 @@
+import ddt
+import pytest
+from glusto.core import Glusto as g
+from glustolibs.gluster import volume_ops
+
+from openshiftstoragelibs import baseclass
+from openshiftstoragelibs import command
+from openshiftstoragelibs import exceptions
+from openshiftstoragelibs import heketi_ops
+from openshiftstoragelibs import node_ops
+from openshiftstoragelibs import openshift_ops
+from openshiftstoragelibs import openshift_storage_libs
+from openshiftstoragelibs import podcmd
+
+
+@ddt.ddt
+class TestDevPathMapping(baseclass.BaseClass):
+ '''Class that contain dev path mapping test cases for
+ gluster file & block volumes
+ '''
+
+ def setUp(self):
+ super(TestDevPathMapping, self).setUp()
+ self.node = self.ocp_master_node[0]
+ self.h_node, self.h_server = (
+ self.heketi_client_node, self.heketi_server_url)
+ h_nodes_list = heketi_ops.heketi_node_list(self.h_node, self.h_server)
+ h_node_count = len(h_nodes_list)
+ if h_node_count < 3:
+ self.skipTest(
+ "At least 3 nodes are required, found {}".format(
+ h_node_count))
+
+ # Disable 4th and other nodes
+ for node_id in h_nodes_list[3:]:
+ self.addCleanup(
+ heketi_ops.heketi_node_enable,
+ self.h_node, self.h_server, node_id)
+ heketi_ops.heketi_node_disable(
+ self.h_node, self.h_server, node_id)
+
+ h_info = heketi_ops.heketi_node_info(
+ self.h_node, self.h_server, h_nodes_list[0], json=True)
+ self.assertTrue(
+ h_info, "Failed to get the heketi node info for node id"
+ " {}".format(h_nodes_list[0]))
+
+ self.node_ip = h_info['hostnames']['storage'][0]
+ self.node_hostname = h_info["hostnames"]["manage"][0]
+ self.vm_name = node_ops.find_vm_name_by_ip_or_hostname(
+ self.node_hostname)
+ self.devices_list = [device['name'] for device in h_info["devices"]]
+
+ # Get list of additional devices for one of the Gluster nodes
+ for gluster_server in list(g.config["gluster_servers"].values()):
+ if gluster_server['storage'] == self.node_ip:
+ additional_device = gluster_server.get("additional_devices")
+ if additional_device:
+ self.devices_list.extend(additional_device)
+
+ # sort the devices list
+ self.devices_list.sort()
+
+ @pytest.mark.tier4
+ @podcmd.GlustoPod()
+ def test_dev_path_file_volume_create(self):
+ """Validate dev path mapping for file volumes"""
+
+ pvc_size, pvc_amount = 2, 5
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected:{},"
+ "Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected:"
+ "{}, Actual:{}".format(vg_name, _vg_name))
+
+ # Create file volumes
+ pvcs = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_amount=pvc_amount)
+ self.create_dcs_with_pvc(pvcs)
+ self.validate_file_volumes_count(
+ self.h_node, self.h_server, self.node_ip)
+
+ def _get_space_use_percent_in_app_pod(self, pod_name):
+ """Check if IO's are running in the app pod"""
+
+ use_percent = []
+ cmd = "oc exec {} -- df -h /mnt | tail -1"
+
+ # Run 10 times to track the percentage used
+ for _ in range(10):
+ out = command.cmd_run(cmd.format(pod_name), self.node).split()[3]
+ self.assertTrue(
+ out, "Failed to fetch mount point details from the pod "
+ "{}".format(pod_name))
+ use_percent.append(out[:-1])
+ return use_percent
+
+ def _create_app_pod_and_verify_pvs(self):
+ """Create file volume with app pod and verify IO's. Compare path,
+ uuid, vg_name.
+ """
+ pvc_size, pvc_amount = 2, 1
+
+ # Space to use for io's in KB
+ space_to_use = 104857600
+
+ # Create file volumes
+ pvc_name = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_amount=pvc_amount)
+
+ # Create dcs and app pods with I/O on it
+ dc_name = self.create_dcs_with_pvc(pvc_name, space_to_use=space_to_use)
+
+ # Pod names list
+ pod_name = [pod_name for _, pod_name in list(dc_name.values())][0]
+ self.assertTrue(
+ pod_name, "Failed to get the pod name from {}".format(dc_name))
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Check if IO's are running
+ use_percent_before = self._get_space_use_percent_in_app_pod(pod_name)
+
+ # Compare volumes
+ self.validate_file_volumes_count(
+ self.h_node, self.h_server, self.node_ip)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+
+ # Check if IO's are running
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent_before, use_percent_after,
+ "Failed to execute IO's in the app pod {}".format(
+ pod_name))
+
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected: {},"
+ " Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected: {},"
+ " Actual:{}".format(vg_name, _vg_name))
+ return pod_name, dc_name, use_percent_before
+
+ @pytest.mark.tier4
+ @podcmd.GlustoPod()
+ def test_dev_path_mapping_app_pod_with_file_volume_reboot(self):
+ """Validate dev path mapping for app pods with file volume after reboot
+ """
+ # Create file volume with app pod and verify IO's
+ # and Compare path, uuid, vg_name
+ pod_name, dc_name, use_percent = self._create_app_pod_and_verify_pvs()
+
+ # Delete app pods
+ openshift_ops.oc_delete(self.node, 'pod', pod_name)
+ openshift_ops.wait_for_resource_absence(self.node, 'pod', pod_name)
+
+ # Wait for the new app pod to come up
+ dc_name = [pod for pod, _ in list(dc_name.values())][0]
+ self.assertTrue(
+ dc_name, "Failed to get the dc name from {}".format(dc_name))
+ pod_name = openshift_ops.get_pod_name_from_dc(self.node, dc_name)
+ openshift_ops.wait_for_pod_be_ready(self.node, pod_name)
+
+ # Check if IO's are running after respin of app pod
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))
+
+ @pytest.mark.tier4
+ @podcmd.GlustoPod()
+ def test_dev_path_file_volume_delete(self):
+ """Validate device path name changes the deletion of
+ already existing file volumes
+ """
+
+ pvc_size, pvc_amount = 2, 5
+ vol_details, pvc_names = [], []
+
+ # Create PVC's
+ sc_name = self.create_storage_class()
+ for i in range(0, pvc_amount):
+ pvc_name = openshift_ops.oc_create_pvc(
+ self.node, sc_name, pvc_size=pvc_size)
+ pvc_names.append(pvc_name)
+ self.addCleanup(
+ openshift_ops.wait_for_resource_absence,
+ self.node, 'pvc', pvc_name)
+ self.addCleanup(
+ openshift_ops.oc_delete, self.node, 'pvc', pvc_name,
+ raise_on_absence=False)
+
+ # Wait for PVC's to be bound
+ openshift_ops.wait_for_pvcs_be_bound(self.node, pvc_names)
+
+ # Get Volumes name and validate volumes count
+ for pvc_name in pvc_names:
+ pv_name = openshift_ops.get_pv_name_from_pvc(self.node, pvc_name)
+ volume_name = openshift_ops.get_vol_names_from_pv(
+ self.node, pv_name)
+ vol_details.append(volume_name)
+
+ # Verify file volumes count
+ self.validate_file_volumes_count(
+ self.h_node, self.h_server, self.node_ip)
+
+ # Collect pvs info and detach disks and get pvs info
+ pvs_info_before = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+ self.detach_and_attach_vmdk(
+ self.vm_name, self.node_hostname, self.devices_list)
+ pvs_info_after = openshift_storage_libs.get_pvs_info(
+ self.node, self.node_ip, self.devices_list, raise_on_error=False)
+
+ # Compare pvs info before and after
+ for (path, uuid, vg_name), (_path, _uuid, _vg_name) in zip(
+ pvs_info_before[:-1], pvs_info_after[1:]):
+ self.assertEqual(
+ uuid, _uuid, "pv_uuid check failed. Expected:{},"
+ "Actual: {}".format(uuid, _uuid))
+ self.assertEqual(
+ vg_name, _vg_name, "vg_name check failed. Expected:"
+ "{}, Actual:{}".format(vg_name, _vg_name))
+
+ # Delete created PVC's
+ for pvc_name in pvc_names:
+ openshift_ops.oc_delete(self.node, 'pvc', pvc_name)
+
+ # Wait for resource absence and get volume list
+ openshift_ops.wait_for_resources_absence(self.node, 'pvc', pvc_names)
+ vol_list = volume_ops.get_volume_list(self.node_ip)
+ self.assertIsNotNone(vol_list, "Failed to get volumes list")
+
+ # Validate volumes created are not present
+ for vol in vol_details:
+ self.assertNotIn(
+ vol, vol_list, "Failed to delete volume {}".format(vol))
+
+ def _heketi_pod_delete_cleanup(self):
+ """Cleanup for deletion of heketi pod using force delete"""
+ try:
+ # Fetch heketi pod after delete
+ pod_name = openshift_ops.get_pod_name_from_dc(
+ self.node, self.heketi_dc_name)
+ openshift_ops.wait_for_pod_be_ready(
+ self.node, pod_name, timeout=1)
+ except exceptions.ExecutionError:
+
+ # Force delete and wait for new pod to come up
+ openshift_ops.oc_delete(
+ self.node, 'pod', pod_name, is_force=True)
+ openshift_ops.wait_for_resource_absence(
+ self.node, 'pod', pod_name)
+ new_pod_name = openshift_ops.get_pod_name_from_dc(
+ self.node, self.heketi_dc_name)
+ openshift_ops.wait_for_pod_be_ready(self.node, new_pod_name)
+
+ @pytest.mark.tier4
+ @podcmd.GlustoPod()
+ def test_dev_path_mapping_heketi_pod_reboot(self):
+ """Validate dev path mapping for heketi pod reboot
+ """
+ self.node = self.ocp_master_node[0]
+ h_node, h_url = self.heketi_client_node, self.heketi_server_url
+
+ # Create file volume with app pod and verify IO's
+ # and Compare path, uuid, vg_name
+ pod_name, dc_name, use_percent = self._create_app_pod_and_verify_pvs()
+
+ # Fetch heketi-pod name
+ heketi_pod_name = openshift_ops.get_pod_name_from_dc(
+ self.node, self.heketi_dc_name)
+
+ # Respin heketi-pod (it restarts the pod)
+ openshift_ops.oc_delete(
+ self.node, "pod", heketi_pod_name,
+ collect_logs=self.heketi_logs_before_delete)
+ self.addCleanup(self._heketi_pod_delete_cleanup)
+ openshift_ops.wait_for_resource_absence(
+ self.node, "pod", heketi_pod_name)
+
+ # Fetch new heketi-pod name
+ heketi_pod_name = openshift_ops.get_pod_name_from_dc(
+ self.node, self.heketi_dc_name)
+ openshift_ops.wait_for_pod_be_ready(self.node, heketi_pod_name)
+
+ # Check heketi server is running
+ self.assertTrue(
+ heketi_ops.hello_heketi(h_node, h_url),
+ "Heketi server {} is not alive".format(h_url))
+
+ # Check if IO's are running after respin of heketi pod
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))
+
+ def _get_gluster_pod(self):
+ """Fetch gluster pods"""
+ # Fetch one gluster pod from its nodes
+ g_hostname = list(self.gluster_servers_info.values())[0].get('manage')
+ self.assertTrue(g_hostname, "Failed to fetch gluster hostname")
+ g_pod = openshift_ops.get_gluster_pod_name_for_specific_node(
+ self.node, g_hostname)
+ return g_pod
+
+ def _guster_pod_delete_cleanup(self):
+ """Cleanup for deletion of gluster pod using force delete"""
+ try:
+ # Fetch gluster pod after delete
+ pod_name = self._get_gluster_pod()
+
+ # Check if gluster pod name is ready state
+ openshift_ops.wait_for_pod_be_ready(self.node, pod_name, timeout=1)
+ except exceptions.ExecutionError:
+ # Force delete and wait for new pod to come up
+ openshift_ops.oc_delete(self.node, 'pod', pod_name, is_force=True)
+ openshift_ops.wait_for_resource_absence(self.node, 'pod', pod_name)
+
+ # Fetch gluster pod after force delete
+ g_new_pod = self._get_gluster_pod()
+ openshift_ops.wait_for_pod_be_ready(self.node, g_new_pod)
+
+ @pytest.mark.tier4
+ @podcmd.GlustoPod()
+ def test_dev_path_mapping_gluster_pod_reboot(self):
+ """Validate dev path mapping for app pods with file volume after reboot
+ """
+ # Skip the tc for independent mode
+ if not self.is_containerized_gluster():
+ self.skipTest("Skip TC as it is not supported in independent mode")
+
+ # Create file volume with app pod and verify IO's
+ # and Compare path, uuid, vg_name
+ pod_name, dc_name, use_percent = self._create_app_pod_and_verify_pvs()
+
+ # Fetch the gluster pod name from node
+ g_pod = self._get_gluster_pod()
+
+ # Respin a gluster pod
+ openshift_ops.oc_delete(self.node, "pod", g_pod)
+ self.addCleanup(self._guster_pod_delete_cleanup)
+
+ # Wait for pod to get absent
+ openshift_ops.wait_for_resource_absence(self.node, "pod", g_pod)
+
+ # Fetch gluster pod after delete
+ g_pod = self._get_gluster_pod()
+ openshift_ops.wait_for_pod_be_ready(self.node, g_pod)
+
+ # Check if IO's are running after respin of gluster pod
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))
+
+ def _get_bricks_and_device_details(self):
+ """Fetch bricks count and device id list from the node where dev path
+ operation is performed
+ """
+
+ h_client, h_url = self.heketi_client_node, self.heketi_server_url
+ h_node_details = []
+
+ # Fetch bricks on the devices
+ h_nodes = heketi_ops.heketi_node_list(h_client, h_url)
+ for h_node in h_nodes:
+ h_node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_node, json=True)
+ h_node_hostname = h_node_info.get("hostnames").get("manage")[0]
+
+ # Fetch bricks count and device list
+ if h_node_hostname == self.node_hostname:
+ h_node_details = [
+ [node_info['id'], len(node_info['bricks']),
+ node_info['name']]
+ for node_info in h_node_info['devices']]
+ return h_node_details, h_node
+
+ @pytest.mark.tier4
+ @podcmd.GlustoPod()
+ def test_dev_path_mapping_heketi_device_delete(self):
+ """Validate dev path mapping for heketi device delete lifecycle"""
+ h_client, h_url = self.heketi_client_node, self.heketi_server_url
+
+ node_ids = heketi_ops.heketi_node_list(h_client, h_url)
+ self.assertTrue(node_ids, "Failed to get heketi node list")
+
+ # Fetch #4th node for the operations
+ h_disable_node = node_ids[3]
+
+ # Fetch bricks on the devices before volume create
+ h_node_details_before, h_node = self._get_bricks_and_device_details()
+
+ # Bricks count on the node before pvc creation
+ brick_count_before = [count[1] for count in h_node_details_before]
+
+ # Create file volume with app pod and verify IO's
+ # and compare path, UUID, vg_name
+ pod_name, dc_name, use_percent = self._create_app_pod_and_verify_pvs()
+
+ # Check if IO's are running
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))
+
+ # Fetch bricks on the devices after volume create
+ h_node_details_after, h_node = self._get_bricks_and_device_details()
+
+ # Bricks count on the node after pvc creation
+ brick_count_after = [count[1] for count in h_node_details_after]
+
+ self.assertGreater(
+ sum(brick_count_after), sum(brick_count_before),
+ "Failed to add bricks on the node {}".format(h_node))
+
+ # Enable the #4th node
+ heketi_ops.heketi_node_enable(h_client, h_url, h_disable_node)
+ node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_disable_node, json=True)
+ h_node_id = node_info['id']
+ self.assertEqual(
+ node_info['state'], "online",
+ "Failed to enable node {}".format(h_disable_node))
+
+ # Fetch device list i.e to be deleted
+ h_node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_node, json=True)
+ devices_list = [
+ [device['id'], device['name']]
+ for device in h_node_info['devices']]
+
+ # Device deletion operation
+ for device in devices_list:
+ device_id, device_name = device[0], device[1]
+ self.addCleanup(
+ heketi_ops.heketi_device_enable, h_client, h_url,
+ device_id, raise_on_error=False)
+
+ # Disable device from heketi
+ device_disable = heketi_ops.heketi_device_disable(
+ h_client, h_url, device_id)
+ self.assertTrue(
+ device_disable,
+ "Device {} could not be disabled".format(device_id))
+
+ device_info = heketi_ops.heketi_device_info(
+ h_client, h_url, device_id, json=True)
+ self.assertEqual(
+ device_info['state'], "offline",
+ "Failed to disable device {}".format(device_id))
+
+ # Remove device from heketi
+ device_remove = heketi_ops.heketi_device_remove(
+ h_client, h_url, device_id)
+ self.assertTrue(
+ device_remove,
+ "Device {} could not be removed".format(device_id))
+
+ # Bricks after device removal
+ device_info = heketi_ops.heketi_device_info(
+ h_client, h_url, device_id, json=True)
+ bricks_count_after = len(device_info['bricks'])
+ self.assertFalse(
+ bricks_count_after,
+ "Failed to remove the bricks from the device {}".format(
+ device_id))
+
+ # Delete device from heketi
+ self.addCleanup(
+ heketi_ops. heketi_device_add, h_client, h_url,
+ device_name, h_node, raise_on_error=False)
+ device_delete = heketi_ops.heketi_device_delete(
+ h_client, h_url, device_id)
+ self.assertTrue(
+ device_delete,
+ "Device {} could not be deleted".format(device_id))
+
+ # Check if IO's are running after device is deleted
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))
+
+ # Add device operations
+ for device in devices_list:
+ device_name = device[1]
+
+ # Add device back to the node
+ heketi_ops.heketi_device_add(h_client, h_url, device_name, h_node)
+
+ # Fetch device info after device add
+ node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_node, json=True)
+ device_id = None
+ for device in node_info["devices"]:
+ if device["name"] == device_name:
+ device_id = device["id"]
+ break
+ self.assertTrue(
+ device_id,
+ "Failed to add device {} on node"
+ " {}".format(device_name, h_node))
+
+ # Disable the #4th node
+ heketi_ops.heketi_node_disable(h_client, h_url, h_node_id)
+ node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_node_id, json=True)
+ self.assertEqual(
+ node_info['state'], "offline",
+ "Failed to disable node {}".format(h_node_id))
+ pvc_amount, pvc_size = 5, 1
+
+ # Fetch bricks on the devices before volume create
+ h_node_details_before, h_node = self._get_bricks_and_device_details()
+
+ # Bricks count on the node before pvc creation
+ brick_count_before = [count[1] for count in h_node_details_before]
+
+ # Create file volumes
+ pvc_name = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_amount=pvc_amount)
+ self.assertEqual(
+ len(pvc_name), pvc_amount,
+ "Failed to create {} pvc".format(pvc_amount))
+
+ # Fetch bricks on the devices after volume create
+ h_node_details_after, h_node = self._get_bricks_and_device_details()
+
+ # Bricks count on the node after pvc creation
+ brick_count_after = [count[1] for count in h_node_details_after]
+
+ self.assertGreater(
+ sum(brick_count_after), sum(brick_count_before),
+ "Failed to add bricks on the node {}".format(h_node))
+
+ # Check if IO's are running after new device is added
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))
+
+ def _get_bricks_counts_and_device_name(self):
+ """Fetch bricks count and device name from all the nodes"""
+ h_client, h_url = self.heketi_client_node, self.heketi_server_url
+
+ # Fetch bricks on the devices
+ h_nodes = heketi_ops.heketi_node_list(h_client, h_url)
+
+ node_details = {}
+ for h_node in h_nodes:
+ h_node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_node, json=True)
+ node_details[h_node] = [[], []]
+ for device in h_node_info['devices']:
+ node_details[h_node][0].append(len(device['bricks']))
+ node_details[h_node][1].append(device['id'])
+ return node_details
+
+ @pytest.mark.tier4
+ @podcmd.GlustoPod()
+ def test_dev_path_mapping_heketi_node_delete(self):
+ """Validate dev path mapping for heketi node deletion lifecycle"""
+ h_client, h_url = self.heketi_client_node, self.heketi_server_url
+
+ node_ids = heketi_ops.heketi_node_list(h_client, h_url)
+ self.assertTrue(node_ids, "Failed to get heketi node list")
+
+ # Fetch #4th node for the operations
+ h_disable_node = node_ids[3]
+
+ # Fetch bricks on the devices before volume create
+ h_node_details_before, h_node = self._get_bricks_and_device_details()
+
+ # Bricks count on the node before pvc creation
+ brick_count_before = [count[1] for count in h_node_details_before]
+
+ # Create file volume with app pod and verify IO's
+ # and compare path, UUID, vg_name
+ pod_name, dc_name, use_percent = self._create_app_pod_and_verify_pvs()
+
+ # Check if IO's are running
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))
+
+ # Fetch bricks on the devices after volume create
+ h_node_details_after, h_node = self._get_bricks_and_device_details()
+
+ # Bricks count on the node after pvc creation
+ brick_count_after = [count[1] for count in h_node_details_after]
+
+ self.assertGreater(
+ sum(brick_count_after), sum(brick_count_before),
+ "Failed to add bricks on the node {}".format(h_node))
+ self.addCleanup(
+ heketi_ops.heketi_node_disable, h_client, h_url, h_disable_node)
+
+ # Enable the #4th node
+ heketi_ops.heketi_node_enable(h_client, h_url, h_disable_node)
+ node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_disable_node, json=True)
+ h_node_id = node_info['id']
+ self.assertEqual(
+ node_info['state'], "online",
+ "Failed to enable node {}".format(h_disable_node))
+
+ # Disable the node and check for brick migrations
+ self.addCleanup(
+ heketi_ops.heketi_node_enable, h_client, h_url, h_node,
+ raise_on_error=False)
+ heketi_ops.heketi_node_disable(h_client, h_url, h_node)
+ node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_node, json=True)
+ self.assertEqual(
+ node_info['state'], "offline",
+ "Failed to disable node {}".format(h_node))
+
+ # Before bricks migration
+ h_node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_node, json=True)
+
+ # Bricks before migration on the node i.e to be deleted
+ bricks_counts_before = 0
+ for device in h_node_info['devices']:
+ bricks_counts_before += (len(device['bricks']))
+
+ # Remove the node
+ heketi_ops.heketi_node_remove(h_client, h_url, h_node)
+
+ # After bricks migration
+ h_node_info_after = heketi_ops.heketi_node_info(
+ h_client, h_url, h_node, json=True)
+
+ # Bricks after migration on the node i.e to be delete
+ bricks_counts = 0
+ for device in h_node_info_after['devices']:
+ bricks_counts += (len(device['bricks']))
+
+ self.assertFalse(
+ bricks_counts,
+ "Failed to remove all the bricks from node {}".format(h_node))
+
+ # Old node which is to deleted, new node were bricks resides
+ old_node, new_node = h_node, h_node_id
+
+ # Node info for the new node were brick reside after migration
+ h_node_info_new = heketi_ops.heketi_node_info(
+ h_client, h_url, new_node, json=True)
+
+ bricks_counts_after = 0
+ for device in h_node_info_new['devices']:
+ bricks_counts_after += (len(device['bricks']))
+
+ self.assertEqual(
+ bricks_counts_before, bricks_counts_after,
+ "Failed to migrated bricks from {} node to {}".format(
+ old_node, new_node))
+
+ # Fetch device list i.e to be deleted
+ h_node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_node, json=True)
+ devices_list = [
+ [device['id'], device['name']]
+ for device in h_node_info['devices']]
+
+ for device in devices_list:
+ device_id = device[0]
+ device_name = device[1]
+ self.addCleanup(
+ heketi_ops.heketi_device_add, h_client, h_url,
+ device_name, h_node, raise_on_error=False)
+
+ # Device deletion from heketi node
+ device_delete = heketi_ops.heketi_device_delete(
+ h_client, h_url, device_id)
+ self.assertTrue(
+ device_delete,
+ "Failed to delete the device {}".format(device_id))
+
+ node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_node, json=True)
+ cluster_id = node_info['cluster']
+ zone = node_info['zone']
+ storage_hostname = node_info['hostnames']['manage'][0]
+ storage_ip = node_info['hostnames']['storage'][0]
+
+ # Delete the node
+ self.addCleanup(
+ heketi_ops.heketi_node_add, h_client, h_url,
+ zone, cluster_id, storage_hostname, storage_ip,
+ raise_on_error=False)
+ heketi_ops.heketi_node_delete(h_client, h_url, h_node)
+
+ # Verify if the node is deleted
+ node_ids = heketi_ops.heketi_node_list(h_client, h_url)
+ self.assertNotIn(
+ old_node, node_ids,
+ "Failed to delete the node {}".format(old_node))
+
+ # Check if IO's are running
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))
+
+ # Adding node back
+ h_node_info = heketi_ops.heketi_node_add(
+ h_client, h_url, zone, cluster_id,
+ storage_hostname, storage_ip, json=True)
+ self.assertTrue(
+ h_node_info,
+ "Failed to add the node in the cluster {}".format(cluster_id))
+ h_node_id = h_node_info["id"]
+
+ # Adding devices to the new node
+ for device in devices_list:
+ storage_device = device[1]
+
+ # Add device to the new heketi node
+ heketi_ops.heketi_device_add(
+ h_client, h_url, storage_device, h_node_id)
+ heketi_node_info = heketi_ops.heketi_node_info(
+ h_client, h_url, h_node_id, json=True)
+ device_id = None
+ for device in heketi_node_info["devices"]:
+ if device["name"] == storage_device:
+ device_id = device["id"]
+ break
+
+ self.assertTrue(
+ device_id, "Failed to add device {} on node {}".format(
+ storage_device, h_node_id))
+
+ # Create n pvc in order to verfiy if the bricks reside on the new node
+ pvc_amount, pvc_size = 5, 1
+
+ # Fetch bricks on the devices before volume create
+ h_node_details_before, h_node = self._get_bricks_and_device_details()
+
+ # Bricks count on the node before pvc creation
+ brick_count_before = [count[1] for count in h_node_details_before]
+
+ # Create file volumes
+ pvc_name = self.create_and_wait_for_pvcs(
+ pvc_size=pvc_size, pvc_amount=pvc_amount)
+ self.assertEqual(
+ len(pvc_name), pvc_amount,
+ "Failed to create {} pvc".format(pvc_amount))
+
+ # Fetch bricks on the devices before volume create
+ h_node_details_after, h_node = self._get_bricks_and_device_details()
+
+ # Bricks count on the node after pvc creation
+ brick_count_after = [count[1] for count in h_node_details_after]
+
+ self.assertGreater(
+ sum(brick_count_after), sum(brick_count_before),
+ "Failed to add bricks on the new node {}".format(new_node))
+
+ # Check if IO's are running after new node is added
+ use_percent_after = self._get_space_use_percent_in_app_pod(pod_name)
+ self.assertNotEqual(
+ use_percent, use_percent_after,
+ "Failed to execute IO's in the app pod {} after respin".format(
+ pod_name))
diff --git a/tests/functional/provisioning/test_dynamic_provisioning_block.py b/tests/functional/provisioning/test_dynamic_provisioning_block.py
index 9d2b128d..c852f846 100755
--- a/tests/functional/provisioning/test_dynamic_provisioning_block.py
+++ b/tests/functional/provisioning/test_dynamic_provisioning_block.py
@@ -112,19 +112,19 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.addCleanup(
oc_delete, self.node, 'pvc', pvc_name, raise_on_absence=True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_hacount_true(self):
"""Validate dynamic provisioning for glusterblock
"""
self.dynamic_provisioning_glusterblock(set_hacount=True)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_dynamic_provisioning_glusterblock_hacount_false(self):
"""Validate storage-class mandatory parameters for block
"""
self.dynamic_provisioning_glusterblock(set_hacount=False)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_heketipod_failure(self):
"""Validate PVC with glusterblock creation when heketi pod is down"""
datafile_path = '/mnt/fake_file_for_%s' % self.id()
@@ -191,7 +191,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
ret, 0,
"Failed to execute command %s on %s" % (write_data_cmd, self.node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_gluster_pod_or_node_failure(
self):
"""Create glusterblock PVC when gluster pod or node is down."""
@@ -255,7 +255,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
ret, out, err = async_io.async_communicate()
self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_glusterblock_logs_presence_verification(self):
"""Validate presence of glusterblock provisioner POD and it's status"""
@@ -290,7 +290,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.ocp_client[0], cmd % log, gluster_node=g_host)
self.assertTrue(out, "Command '%s' output is empty." % cmd)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_heketidown_pvc_delete(self):
"""Validate PVC deletion when heketi is down"""
@@ -328,7 +328,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
# create a new PVC
self.create_and_wait_for_pvc()
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_recreate_app_pod_with_attached_block_pv(self):
"""Validate app pod attached block device I/O after restart"""
datafile_path = '/mnt/temporary_test_file'
@@ -357,7 +357,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
# Perform I/O on the new POD
self.cmd_run(write_cmd % (new_pod_name, datafile_path))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_volname_prefix_glusterblock(self):
"""Validate custom volname prefix blockvol"""
@@ -377,7 +377,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.assertTrue(vol_name.startswith(
self.sc.get('volumenameprefix', 'autotest')))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterblock_reclaim_policy_retain(self):
"""Validate retain policy for gluster-block after PVC deletion"""
@@ -467,7 +467,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
"only %s free space is available"
% (free_space, free_size))
- @pytest.mark.tier1
+ @pytest.mark.tier3
def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_neg(
self):
"""Verify that block volume creation fails when we create block
@@ -523,7 +523,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
verify_pvc_status_is_bound(self.node, pvc_name)
- @pytest.mark.tier1
+ @pytest.mark.tier3
def test_creation_of_block_vol_greater_than_the_default_size_of_BHV_pos(
self):
"""Verify that block volume creation succeed when we create BHV
@@ -549,7 +549,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
# create a block pvc greater than default BHV size
self.create_and_wait_for_pvc(pvc_size=(default_bhv_size + 1))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_expansion_of_block_hosting_volume_using_heketi(self):
"""Verify that after expanding block hosting volume we are able to
consume the expanded space"""
@@ -628,7 +628,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
pvc_size=(expand_size - 1), pvc_amount=1)
@skip("Blocked by BZ-1769426")
- @pytest.mark.tier1
+ @pytest.mark.tier4
def test_targetcli_failure_during_block_pvc_creation(self):
h_node, h_server = self.heketi_client_node, self.heketi_server_url
@@ -750,7 +750,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
# Wait for all the PVCs to be in bound state
wait_for_pvcs_be_bound(self.node, pvc_names, timeout=300, wait_step=5)
- @pytest.mark.tier2
+ @pytest.mark.tier3
def test_creation_of_pvc_when_one_node_is_down(self):
"""Test PVC creation when one node is down than hacount"""
node_count = len(self.gluster_servers)
@@ -787,7 +787,8 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
events = wait_for_events(
self.node, obj_name=pvc_name, obj_type='PersistentVolumeClaim',
- event_type='Warning', event_reason='ProvisioningFailed')
+ event_type='Warning', event_reason='ProvisioningFailed',
+ timeout=180)
error = 'insufficient block hosts online'
err_msg = (
"Haven't found expected error message containing "
@@ -799,7 +800,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
sc_name = self.create_storage_class(hacount=(node_count - 1))
self.create_and_wait_for_pvc(sc_name=sc_name)
- @pytest.mark.tier1
+ @pytest.mark.tier3
def test_heketi_block_volume_create_with_size_more_than_bhv_free_space(
self):
""" Test to create heketi block volume of size greater than
@@ -838,7 +839,7 @@ class TestDynamicProvisioningBlockP0(GlusterBlockBaseClass):
" No. of BHV after the test : {}".format(len(bhv_list), bhv_post))
self.assertEqual(bhv_post, (len(bhv_list) + 2), err_msg)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_100gb_block_pvc_create_and_delete_twice(self):
"""Validate creation and deletion of blockvoume of size 100GB"""
# Define required space, bhv size required for on 100GB block PVC
diff --git a/tests/functional/provisioning/test_dynamic_provisioning_file.py b/tests/functional/provisioning/test_dynamic_provisioning_file.py
index bc24d517..87ff754a 100644
--- a/tests/functional/provisioning/test_dynamic_provisioning_file.py
+++ b/tests/functional/provisioning/test_dynamic_provisioning_file.py
@@ -4,6 +4,7 @@ from glusto.core import Glusto as g
import pytest
from openshiftstoragelibs.baseclass import BaseClass
+from openshiftstoragelibs import command
from openshiftstoragelibs.exceptions import ExecutionError
from openshiftstoragelibs.heketi_ops import (
heketi_node_info,
@@ -13,7 +14,12 @@ from openshiftstoragelibs.heketi_ops import (
heketi_volume_list,
verify_volume_name_prefix,
)
-from openshiftstoragelibs.node_ops import node_reboot_by_command
+from openshiftstoragelibs.node_ops import (
+ find_vm_name_by_ip_or_hostname,
+ node_reboot_by_command,
+ power_off_vm_by_name,
+ power_on_vm_by_name
+)
from openshiftstoragelibs.openshift_ops import (
cmd_run_on_gluster_pod_or_node,
get_gluster_host_ips_by_pvc_name,
@@ -56,7 +62,8 @@ class TestDynamicProvisioningP0(BaseClass):
pvc_name = self.create_and_wait_for_pvc()
# Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
+ dc_name = oc_create_app_dc_with_io(
+ self.node, pvc_name, image=self.io_container_image_cirros)
self.addCleanup(oc_delete, self.node, 'dc', dc_name)
self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
@@ -108,13 +115,13 @@ class TestDynamicProvisioningP0(BaseClass):
ret, 0,
"Failed to execute '%s' command on %s" % (cmd, self.node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterfile(self):
"""Validate dynamic provisioning for gluster file"""
g.log.info("test_dynamic_provisioning_glusterfile")
self.dynamic_provisioning_glusterfile(False)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_dynamic_provisioning_glusterfile_volname_prefix(self):
"""Validate dynamic provisioning for gluster file with vol name prefix
"""
@@ -126,7 +133,7 @@ class TestDynamicProvisioningP0(BaseClass):
g.log.info("test_dynamic_provisioning_glusterfile volname prefix")
self.dynamic_provisioning_glusterfile(True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterfile_heketipod_failure(self):
"""Validate dynamic provisioning for gluster file when heketi pod down
"""
@@ -144,7 +151,7 @@ class TestDynamicProvisioningP0(BaseClass):
# Create app POD with attached volume
app_1_pod_name = oc_create_tiny_pod_with_volume(
self.node, app_1_pvc_name, "test-pvc-mount-on-app-pod",
- mount_path=mount_path)
+ mount_path=mount_path, image=self.io_container_image_cirros)
self.addCleanup(
wait_for_resource_absence, self.node, 'pod', app_1_pod_name)
self.addCleanup(oc_delete, self.node, 'pod', app_1_pod_name)
@@ -184,7 +191,7 @@ class TestDynamicProvisioningP0(BaseClass):
# Create second app POD
app_2_pod_name = oc_create_tiny_pod_with_volume(
self.node, app_2_pvc_name, "test-pvc-mount-on-app-pod",
- mount_path=mount_path)
+ mount_path=mount_path, image=self.io_container_image_cirros)
self.addCleanup(
wait_for_resource_absence, self.node, 'pod', app_2_pod_name)
self.addCleanup(oc_delete, self.node, 'pod', app_2_pod_name)
@@ -209,7 +216,7 @@ class TestDynamicProvisioningP0(BaseClass):
ret, 0,
"Failed to execute command %s on %s" % (write_data_cmd, self.node))
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_dynamic_provisioning_glusterfile_gluster_pod_or_node_failure(
self):
"""Create glusterblock PVC when gluster pod or node is down."""
@@ -225,7 +232,7 @@ class TestDynamicProvisioningP0(BaseClass):
# Create app POD with attached volume
pod_name = oc_create_tiny_pod_with_volume(
self.node, pvc_name, "test-pvc-mount-on-app-pod",
- mount_path=mount_path)
+ mount_path=mount_path, image=self.io_container_image_cirros)
self.addCleanup(
wait_for_resource_absence, self.node, 'pod', pod_name)
self.addCleanup(oc_delete, self.node, 'pod', pod_name)
@@ -286,7 +293,7 @@ class TestDynamicProvisioningP0(BaseClass):
ret, out, err = async_io.async_communicate()
self.assertEqual(ret, 0, "IO %s failed on %s" % (io_cmd, self.node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_storage_class_mandatory_params_glusterfile(self):
"""Validate storage-class creation with mandatory parameters"""
@@ -312,7 +319,8 @@ class TestDynamicProvisioningP0(BaseClass):
pvc_name = self.create_and_wait_for_pvc(sc_name=sc_name)
# Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
+ dc_name = oc_create_app_dc_with_io(
+ self.node, pvc_name, image=self.io_container_image_cirros)
self.addCleanup(oc_delete, self.node, 'dc', dc_name)
self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
@@ -336,7 +344,7 @@ class TestDynamicProvisioningP0(BaseClass):
self.assertEqual(
ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterfile_heketidown_pvc_delete(self):
"""Validate deletion of PVC's when heketi is down"""
@@ -374,7 +382,7 @@ class TestDynamicProvisioningP0(BaseClass):
# create a new PVC
self.create_and_wait_for_pvc()
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_validate_pvc_in_multiple_app_pods(self):
"""Validate the use of a same claim in multiple app pods"""
replicas = 5
@@ -385,7 +393,8 @@ class TestDynamicProvisioningP0(BaseClass):
# Create DC with application PODs
dc_name = oc_create_app_dc_with_io(
- self.node, pvc_name, replicas=replicas)
+ self.node, pvc_name, replicas=replicas,
+ image=self.io_container_image_cirros)
self.addCleanup(oc_delete, self.node, 'dc', dc_name)
self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
@@ -404,7 +413,7 @@ class TestDynamicProvisioningP0(BaseClass):
for pod_name in pod_names:
self.assertIn("temp_%s" % pod_name, ls_out)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_pvc_deletion_while_pod_is_running(self):
"""Validate PVC deletion while pod is running"""
if get_openshift_version() <= "3.9":
@@ -432,7 +441,7 @@ class TestDynamicProvisioningP0(BaseClass):
self.assertEqual(
ret, 0, "Failed to execute command %s on %s" % (cmd, self.node))
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_dynamic_provisioning_glusterfile_reclaim_policy_retain(self):
"""Validate retain policy for glusterfs after deletion of pvc"""
@@ -456,7 +465,8 @@ class TestDynamicProvisioningP0(BaseClass):
# Create DC with POD and attached PVC to it.
try:
- dc_name = oc_create_app_dc_with_io(self.node, self.pvc_name)
+ dc_name = oc_create_app_dc_with_io(
+ self.node, self.pvc_name, image=self.io_container_image_cirros)
pod_name = get_pod_name_from_dc(self.node, dc_name)
wait_for_pod_be_ready(self.node, pod_name)
finally:
@@ -481,7 +491,7 @@ class TestDynamicProvisioningP0(BaseClass):
oc_delete(self.node, 'pv', pv_name)
wait_for_resource_absence(self.node, 'pv', pv_name)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_usage_of_default_storage_class(self):
"""Validate PVs creation for SC with default custom volname prefix"""
@@ -541,3 +551,37 @@ class TestDynamicProvisioningP0(BaseClass):
"-o=custom-columns=:.spec.storageClassName" % pvc_name)
out = self.cmd_run(get_sc_of_pvc_cmd)
self.assertEqual(out, self.sc_name)
+
+ @pytest.mark.tier2
+ def test_node_failure_pv_mounted(self):
+ """Test node failure when PV is mounted with app pods running"""
+ filepath = "/mnt/file_for_testing_volume.log"
+ pvc_name = self.create_and_wait_for_pvc()
+
+ dc_and_pod_names = self.create_dcs_with_pvc(pvc_name)
+ dc_name, pod_name = dc_and_pod_names[pvc_name]
+
+ mount_point = "df -kh /mnt -P | tail -1 | awk '{{print $1}}'"
+ pod_cmd = "oc exec {} -- {}".format(pod_name, mount_point)
+ hostname = command.cmd_run(pod_cmd, hostname=self.node)
+ hostname = hostname.split(":")[0]
+
+ vm_name = find_vm_name_by_ip_or_hostname(hostname)
+ self.addCleanup(power_on_vm_by_name, vm_name)
+ power_off_vm_by_name(vm_name)
+
+ cmd = "dd if=/dev/urandom of={} bs=1K count=100".format(filepath)
+ ret, _, err = oc_rsh(self.node, pod_name, cmd)
+ self.assertFalse(
+ ret, "Failed to execute command {} on {} with error {}"
+ .format(cmd, self.node, err))
+
+ oc_delete(self.node, 'pod', pod_name)
+ wait_for_resource_absence(self.node, 'pod', pod_name)
+ pod_name = get_pod_name_from_dc(self.node, dc_name)
+ wait_for_pod_be_ready(self.node, pod_name)
+
+ ret, _, err = oc_rsh(self.node, pod_name, cmd)
+ self.assertFalse(
+ ret, "Failed to execute command {} on {} with error {}"
+ .format(cmd, self.node, err))
diff --git a/tests/functional/provisioning/test_pv_resize.py b/tests/functional/provisioning/test_pv_resize.py
index abca7c17..f5833a99 100644
--- a/tests/functional/provisioning/test_pv_resize.py
+++ b/tests/functional/provisioning/test_pv_resize.py
@@ -50,7 +50,7 @@ class TestPvResizeClass(BaseClass):
g.log.error(msg)
raise self.skipTest(msg)
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data(
(True, True),
(False, True),
@@ -70,7 +70,8 @@ class TestPvResizeClass(BaseClass):
pvc_name = self.create_and_wait_for_pvc()
# Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(node, pvc_name)
+ dc_name = oc_create_app_dc_with_io(
+ node, pvc_name, image=self.io_container_image_cirros)
self.addCleanup(oc_delete, node, 'dc', dc_name)
self.addCleanup(scale_dc_pod_amount_and_wait,
node, dc_name, 0)
@@ -194,7 +195,8 @@ class TestPvResizeClass(BaseClass):
pvc_name = self.create_and_wait_for_pvc(pvc_size=pvc_size_gb)
# Create DC with POD and attached PVC to it
- dc_name = oc_create_app_dc_with_io(self.node, pvc_name)
+ dc_name = oc_create_app_dc_with_io(
+ self.node, pvc_name, image=self.io_container_image_cirros)
self.addCleanup(oc_delete, self.node, 'dc', dc_name)
self.addCleanup(scale_dc_pod_amount_and_wait, self.node, dc_name, 0)
pod_name = get_pod_name_from_dc(self.node, dc_name)
@@ -238,7 +240,7 @@ class TestPvResizeClass(BaseClass):
self.assertEqual(
ret, 0, "Failed to write data on the expanded PVC")
- @pytest.mark.tier2
+ @pytest.mark.tier3
def test_pv_resize_no_free_space(self):
"""Validate PVC resize fails if there is no free space available"""
if not self.is_containerized_gluster():
@@ -253,12 +255,12 @@ class TestPvResizeClass(BaseClass):
self._pv_resize(exceed_free_space=True)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_pv_resize_by_exact_free_space(self):
"""Validate PVC resize when resized by exact available free space"""
self._pv_resize(exceed_free_space=False)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_pv_resize_try_shrink_pv_size(self):
"""Validate whether reducing the PV size is allowed"""
dir_path = "/mnt/"
@@ -270,7 +272,8 @@ class TestPvResizeClass(BaseClass):
pvc_name = self.create_and_wait_for_pvc(pvc_size=pv_size)
# Create DC with POD and attached PVC to it.
- dc_name = oc_create_app_dc_with_io(node, pvc_name)
+ dc_name = oc_create_app_dc_with_io(
+ node, pvc_name, image=self.io_container_image_cirros)
self.addCleanup(oc_delete, node, 'dc', dc_name)
self.addCleanup(scale_dc_pod_amount_and_wait,
node, dc_name, 0)
@@ -295,7 +298,7 @@ class TestPvResizeClass(BaseClass):
self.assertEqual(
ret, 0, "Failed to execute command %s on %s" % (cmd, node))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_pv_resize_when_heketi_down(self):
"""Create a PVC and try to expand it when heketi is down, It should
fail. After heketi is up, expand PVC should work.
@@ -363,7 +366,7 @@ class TestPvResizeClass(BaseClass):
# Verify pod is running
wait_for_pod_be_ready(self.node, pod_name, 10, 5)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_pvc_resize_while_ios_are_running(self):
"""Re-size PVC while IO's are running"""
@@ -392,7 +395,7 @@ class TestPvResizeClass(BaseClass):
raise ExecutionError("Failed to run io, error {}".format(str(err)))
@skip("Blocked by BZ-1547069")
- @pytest.mark.tier2
+ @pytest.mark.tier3
def test_pvc_resize_size_greater_than_available_space(self):
"""Re-size PVC to greater value than available volume size and then
expand volume to support maximum size.
@@ -418,7 +421,7 @@ class TestPvResizeClass(BaseClass):
resize_pvc(self.ocp_master_node[0], pvc_name, available_size_gb)
verify_pvc_size(self.ocp_master_node[0], pvc_name, available_size_gb)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_pv_resize_device_disabled(self):
"""Validate resize after disabling all devices except one"""
h_node, h_url = self.heketi_client_node, self.heketi_server_url
diff --git a/tests/functional/provisioning/test_storage_class_cases.py b/tests/functional/provisioning/test_storage_class_cases.py
index 96f56ceb..976398db 100644
--- a/tests/functional/provisioning/test_storage_class_cases.py
+++ b/tests/functional/provisioning/test_storage_class_cases.py
@@ -16,6 +16,7 @@ from openshiftstoragelibs.openshift_storage_libs import (
validate_multipath_pod,
)
from openshiftstoragelibs.openshift_ops import (
+ cmd_run_on_gluster_pod_or_node,
get_amount_of_gluster_nodes,
get_gluster_blockvol_info_by_pvc_name,
get_pod_name_from_dc,
@@ -27,11 +28,13 @@ from openshiftstoragelibs.openshift_ops import (
oc_delete,
oc_get_custom_resource,
oc_get_pods,
+ restart_service_on_gluster_pod_or_node,
scale_dc_pod_amount_and_wait,
verify_pvc_status_is_bound,
wait_for_events,
wait_for_pod_be_ready,
wait_for_resource_absence,
+ wait_for_service_status_on_gluster_pod_or_node,
)
from openshiftstoragelibs.openshift_storage_version import (
get_openshift_storage_version
@@ -143,7 +146,8 @@ class TestStorageClassCases(BaseClass):
"""
# create pod using pvc created
dc_name = oc_create_app_dc_with_io(
- self.ocp_master_node[0], self.pvc_name
+ self.ocp_master_node[0], self.pvc_name,
+ image=self.io_container_image_cirros
)
pod_name = get_pod_name_from_dc(self.ocp_master_node[0], dc_name)
self.addCleanup(oc_delete, self.ocp_master_node[0], "dc", dc_name)
@@ -181,7 +185,7 @@ class TestStorageClassCases(BaseClass):
validate_multipath_pod(
self.ocp_master_node[0], pod_name, hacount, list(mpaths)[0])
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data(
{"volumetype": "dist-rep:3"},
{"resturl": "http://10.0.0.1:8080"},
@@ -195,7 +199,7 @@ class TestStorageClassCases(BaseClass):
"""Validate glusterfile storage with different incorrect parameters"""
self.create_sc_with_parameter("glusterfile", parameter=parameter)
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data(
{"resturl": "http://10.0.0.1:8080"},
{"restsecretname": "fakerestsecretname",
@@ -207,7 +211,7 @@ class TestStorageClassCases(BaseClass):
"""Validate glusterblock storage with different incorrect parameters"""
self.create_sc_with_parameter("glusterblock", parameter=parameter)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data(1, 2)
def test_gluster_block_provisioning_with_valid_ha_count(self, hacount):
"""Validate gluster-block provisioning with different valid 'hacount'
@@ -239,7 +243,7 @@ class TestStorageClassCases(BaseClass):
if hacount > 1:
self.validate_multipath_info(hacount)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_gluster_block_provisioning_with_ha_count_as_glusterpod(self):
"""Validate gluster-block provisioning with "hacount" value equal
to gluster pods count
@@ -269,7 +273,7 @@ class TestStorageClassCases(BaseClass):
)
self.validate_multipath_info(hacount)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_gluster_block_provisioning_with_invalid_ha_count(self):
"""Validate gluster-block provisioning with any invalid 'hacount'
value
@@ -301,7 +305,7 @@ class TestStorageClassCases(BaseClass):
)
self.validate_multipath_info(gluster_pod_count)
- @pytest.mark.tier0
+ @pytest.mark.tier1
@ddt.data('true', 'false', '')
def test_gluster_block_chapauthenabled_parameter(self, chapauthenabled):
"""Validate gluster-block provisioning with different
@@ -331,7 +335,7 @@ class TestStorageClassCases(BaseClass):
"Invalid chapauthenabled value '%s'" % chapauthenabled
)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_create_and_verify_pvc_with_volume_name_prefix(self):
"""create and verify pvc with volname prefix on an app pod"""
if get_openshift_version() < "3.9":
@@ -358,7 +362,7 @@ class TestStorageClassCases(BaseClass):
"Failed to read Endpoints of %s on %s " % (
pv_name, self.ocp_master_node[0]))
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_try_to_create_sc_with_duplicated_name(self):
"""Verify SC creation fails with duplicate name"""
sc_name = "test-sc-duplicated-name-" + utils.get_random_str()
@@ -367,7 +371,7 @@ class TestStorageClassCases(BaseClass):
with self.assertRaises(AssertionError):
self.create_storage_class(sc_name=sc_name)
- @pytest.mark.tier1
+ @pytest.mark.tier2
@ddt.data('secretName', 'secretNamespace', None)
def test_sc_glusterfile_missing_parameter(self, parameter):
"""Validate glusterfile storage with missing parameters"""
@@ -397,7 +401,7 @@ class TestStorageClassCases(BaseClass):
with self.assertRaises(ExecutionError):
verify_pvc_status_is_bound(node, pvc_name, timeout=1)
- @pytest.mark.tier1
+ @pytest.mark.tier2
def test_sc_create_with_clusterid(self):
"""Create storage class with 'cluster id'"""
h_cluster_list = heketi_cluster_list(
@@ -421,3 +425,63 @@ class TestStorageClassCases(BaseClass):
"Cluster ID %s has NOT been used to"
"create the PVC %s. Found %s" %
(cluster_id, pvc_name, volume_info["cluster"]))
+
+ def _validate_permission(
+ self, ocp_node, gluster_node, dir_perm, file_perm):
+ """Validate /etc/target and /etc/target/backup permissions"""
+
+ target_dir_perm = "ls -ld /etc/target | awk '{print $1}'"
+ target_file_perm = (
+ "ls -l /etc/target/backup | awk '{print $1}' | sed 1D")
+
+ dir_perm_result = cmd_run_on_gluster_pod_or_node(
+ ocp_node, target_dir_perm, gluster_node)
+ self.assertEqual(
+ dir_perm_result, dir_perm,
+ "Failed to validate permission of '/etc/target'")
+ results = cmd_run_on_gluster_pod_or_node(
+ ocp_node, target_file_perm, gluster_node)
+ file_perm_results = list(results.split("\n"))
+ for perm in file_perm_results:
+ self.assertEqual(
+ perm, file_perm, "Failed to validate permission"
+ " in '/etc/target/backup'")
+
+ @pytest.mark.tier1
+ def test_targetcli_weak_permissions_config_files(self):
+ """Validate permissions on config files"""
+
+ ocp_node = self.ocp_master_node[0]
+ gluster_node = self.gluster_servers[0]
+ dir_perm_before, dir_perm_after = "drwxrwxrwx.", "drw-------."
+ file_perm_before, file_perm_after = "-rwxrwxrwx.", "-rw-------."
+ services = ("tcmu-runner", "gluster-block-target", "gluster-blockd")
+ cmd = "chmod -R 777 /etc/target/"
+
+ # Check the permissions on '/etc/target' and '/etc/target/backup'
+ cmd_run_on_gluster_pod_or_node(ocp_node, cmd, gluster_node)
+ for service in services:
+ state = (
+ 'exited' if service == 'gluster-block-target' else 'running')
+ self.addCleanup(
+ wait_for_service_status_on_gluster_pod_or_node,
+ ocp_node, service, 'active', state, gluster_node)
+ self.addCleanup(
+ restart_service_on_gluster_pod_or_node,
+ ocp_node, service, gluster_node)
+
+ self._validate_permission(
+ ocp_node, gluster_node, dir_perm_before, file_perm_before)
+
+ # Restart the services
+ for service in services:
+ state = (
+ 'exited' if service == 'gluster-block-target' else 'running')
+ restart_service_on_gluster_pod_or_node(
+ ocp_node, service, gluster_node)
+ wait_for_service_status_on_gluster_pod_or_node(
+ ocp_node, service, 'active', state, gluster_node)
+
+ # Permission on '/etc/target' should be changed to default
+ self._validate_permission(
+ ocp_node, gluster_node, dir_perm_after, file_perm_after)
diff --git a/tests/functional/test_gluster_ops_check.py b/tests/functional/test_gluster_ops_check.py
index a184aa2f..bf6db7b2 100644
--- a/tests/functional/test_gluster_ops_check.py
+++ b/tests/functional/test_gluster_ops_check.py
@@ -8,7 +8,7 @@ from openshiftstoragelibs import podcmd
class TestOpsCheck(BaseClass):
- @pytest.mark.tier0
+ @pytest.mark.tier1
@podcmd.GlustoPod()
def test_check_bmux_enabled(self):
"""Check if the brickmultiplexing is enalbed"""
@@ -19,7 +19,7 @@ class TestOpsCheck(BaseClass):
err_msg = ("Brick multiplex is not enabled")
self.assertTrue(bmux_status, err_msg)
- @pytest.mark.tier0
+ @pytest.mark.tier1
def test_check_max_brick_per_process(self):
"""Check if the max-brick process is set to 250"""
diff --git a/tests/functional/test_node_restart.py b/tests/functional/test_node_restart.py
index a03b6238..1d44f025 100644
--- a/tests/functional/test_node_restart.py
+++ b/tests/functional/test_node_restart.py
@@ -100,7 +100,7 @@ class TestNodeRestart(BaseClass):
self.oc_node, gluster_pod, service, "active", state)
@skip("Blocked by BZ-1652913")
- @pytest.mark.tier2
+ @pytest.mark.tier4
def test_node_restart_check_volume(self):
df_cmd = "df --out=target | sed 1d | grep /var/lib/heketi"
fstab_cmd = "grep '%s' /var/lib/heketi/fstab"
diff --git a/tests/glusterfs-containers-tests-config.yaml b/tests/glusterfs-containers-tests-config.yaml
index 3629f3a6..cbccfbf6 100644
--- a/tests/glusterfs-containers-tests-config.yaml
+++ b/tests/glusterfs-containers-tests-config.yaml
@@ -100,6 +100,26 @@ openshift:
metrics_rc_hawkular_metrics: "<hawkular-metrics-rc-name>"
metrics_rc_heapster: "<heapster-rc-name>"
+ # 'logging' section covers the details of resources related to OCP logging
+ logging:
+ logging_project_name: "<logging-project-name>"
+ logging_fluentd_ds: "<fluentd-ds-name>"
+ logging_es_dc: "<elasticsearch-dc-name"
+ logging_kibana_dc: "<kibana-dc-name>"
+
+ # 'prometheus' section covers the details of resources related to
+ # prometheus
+ prometheus:
+ prometheus_project_name: "<prometheus_project_name>"
+ prometheus_resources_selector: "<prometheus_recources_selector>"
+ alertmanager_resources_selector: "<alertmanager_resources_selector>"
+
+ # 'io_container_images' section covers the details of container images
+ # used for I/O
+ io_container_images:
+ cirros: quay.io/libpod/cirros
+ busybox: quay.io/prometheus/busybox
+
common:
allow_heketi_zones_update: False
check_heketi_db_inconsistencies: True