summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cns-libs/cnslibs/common/cns_libs.py79
-rw-r--r--cns-libs/cnslibs/common/exceptions.py8
-rw-r--r--cns-libs/cnslibs/common/heketi_ops.py68
-rw-r--r--cns-libs/cnslibs/common/openshift_ops.py303
-rw-r--r--cns-libs/cnslibs/common/podcmd.py14
-rw-r--r--cns-libs/cnslibs/common/waiter.py1
-rw-r--r--cns-libs/setup.py4
-rw-r--r--cns-tools/setup.py4
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_check_entry.py24
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py34
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py59
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py140
-rw-r--r--tests/functional/common/heketi/heketi_tests/test_node_info.py18
-rw-r--r--tests/functional/common/heketi/test_volume_deletion.py7
-rw-r--r--tests/functional/common/heketi/test_volume_expansion_and_devices.py3
-rw-r--r--tests/functional/common/heketi/test_volume_multi_req.py2
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py42
-rw-r--r--tests/functional/common/provisioning/test_pv_resize.py129
18 files changed, 830 insertions, 109 deletions
diff --git a/cns-libs/cnslibs/common/cns_libs.py b/cns-libs/cnslibs/common/cns_libs.py
index dbb78dcf..5b9a3027 100644
--- a/cns-libs/cnslibs/common/cns_libs.py
+++ b/cns-libs/cnslibs/common/cns_libs.py
@@ -1,8 +1,10 @@
from cnslibs.common.exceptions import (
- ExecutionError)
+ ExecutionError,
+ NotSupportedException)
from cnslibs.common.openshift_ops import (
get_ocp_gluster_pod_names,
- oc_rsh)
+ oc_rsh,
+ oc_version)
from cnslibs.common.waiter import Waiter
from glusto.core import Glusto as g
import yaml
@@ -464,3 +466,76 @@ def validate_gluster_blockd_service_gluster_pod(hostname):
g.log.info("gluster-blockd service is running on all "
"gluster-pods %s" % gluster_pod_list)
return True
+
+
+def enable_pvc_resize(master_node):
+ '''
+ This function edits the /etc/origin/master/master-config.yaml
+ file - to enable pv_resize feature
+ and restarts atomic-openshift service on master node
+ Args:
+ master_node (str): hostname of masternode on which
+ want to edit the
+ master-config.yaml file
+ Returns:
+ bool: True if successful,
+ otherwise raise Exception
+ '''
+ version = oc_version(master_node)
+ if any(v in version for v in ("3.6", "3.7", "3.8")):
+ msg = ("pv resize is not available in openshift "
+ "version %s " % version)
+ g.log.error(msg)
+ raise NotSupportedException(msg)
+
+ try:
+ conn = g.rpyc_get_connection(master_node, user="root")
+ if conn is None:
+ err_msg = ("Failed to get rpyc connection of node %s"
+ % master_node)
+ g.log.error(err_msg)
+ raise ExecutionError(err_msg)
+
+ with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'r') as f:
+ data = yaml.load(f)
+ dict_add = data['admissionConfig']['pluginConfig']
+ if "PersistentVolumeClaimResize" in dict_add:
+ g.log.info("master-config.yaml file is already edited")
+ return True
+ dict_add['PersistentVolumeClaimResize'] = {
+ 'configuration': {
+ 'apiVersion': 'v1',
+ 'disable': 'false',
+ 'kind': 'DefaultAdmissionConfig'}}
+ data['admissionConfig']['pluginConfig'] = dict_add
+ kube_config = data['kubernetesMasterConfig']
+ for key in ('apiServerArguments', 'controllerArguments'):
+ kube_config[key] = (
+ kube_config.get(key)
+ if isinstance(kube_config.get(key), dict) else {})
+ value = ['ExpandPersistentVolumes=true']
+ kube_config[key]['feature-gates'] = value
+ with conn.builtin.open(MASTER_CONFIG_FILEPATH, 'w+') as f:
+ yaml.dump(data, f, default_flow_style=False)
+ except Exception as err:
+ raise ExecutionError("failed to edit master-config.yaml file "
+ "%s on %s" % (err, master_node))
+ finally:
+ g.rpyc_close_connection(master_node, user="root")
+
+ g.log.info("successfully edited master-config.yaml file "
+ "%s" % master_node)
+ if "3.9" in version:
+ cmd = ("systemctl restart atomic-openshift-master-api "
+ "atomic-openshift-master-controllers")
+ else:
+ cmd = ("/usr/local/bin/master-restart api && "
+ "/usr/local/bin/master-restart controllers")
+ ret, out, err = g.run(master_node, cmd, "root")
+ if ret != 0 or out == "":
+ err_msg = ("failed to execute cmd %s on %s, err %s"
+ % (cmd, master_node, out))
+ g.log.error(err_msg)
+ raise ExecutionError(err_msg)
+
+ return True
diff --git a/cns-libs/cnslibs/common/exceptions.py b/cns-libs/cnslibs/common/exceptions.py
index 38fb27e6..44daee12 100644
--- a/cns-libs/cnslibs/common/exceptions.py
+++ b/cns-libs/cnslibs/common/exceptions.py
@@ -11,5 +11,13 @@ class ExecutionError(Exception):
unrecoverable error.
For example, all hosts are not in peer state or a volume cannot be setup.
+ '''
+
+
+class NotSupportedException(Exception):
+ '''
+ Custom exception thrown when we do not support a particular feature in
+ particular product version
+ For example, pv resize is not supported in OCP version < 3.9
'''
diff --git a/cns-libs/cnslibs/common/heketi_ops.py b/cns-libs/cnslibs/common/heketi_ops.py
index 3c3646ed..af021599 100644
--- a/cns-libs/cnslibs/common/heketi_ops.py
+++ b/cns-libs/cnslibs/common/heketi_ops.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-
"""
Description: Library for heketi operations.
"""
@@ -7,10 +5,9 @@
from glusto.core import Glusto as g
from collections import OrderedDict
import json
-import re
try:
from heketi import HeketiClient
-except:
+except ImportError:
g.log.error("Please install python-client for heketi and re-run the test")
from cnslibs.common import exceptions
@@ -130,7 +127,7 @@ def modify_heketi_executor(heketi_client_node, executor, keyfile, user, port,
with conn.builtin.open(heketi_config_file, 'w') as fh_write:
conn.modules.json.dump(config_data, fh_write, sort_keys=False,
indent=4, ensure_ascii=False)
- except:
+ except Exception:
g.log.error("Failed to modify heketi executor in %s"
% heketi_config_file)
finally:
@@ -335,7 +332,7 @@ def heketi_create_topology(heketi_client_node, topology_info,
with conn.builtin.open(topology_file, 'w') as fh_write:
conn.modules.json.dump(modified_topology_info, fh_write, indent=4)
- except:
+ except Exception:
g.log.error("Failed to create topology file in %s"
% heketi_client_node)
finally:
@@ -496,7 +493,8 @@ def heketi_volume_create(heketi_client_node, heketi_server_url, size,
% float(kwargs.get("snapshot_factor"))
if kwargs.get("snapshot_factor") else "")
json_arg = "--json" if kwargs.get("json") else ""
- secret_arg = "--secret %s" % kwargs.get("secret") if kwargs.get("secret") else ""
+ secret_arg = (
+ "--secret %s" % kwargs.get("secret") if kwargs.get("secret") else "")
user_arg = "--user %s" % kwargs.get("user") if kwargs.get("user") else ""
err_msg = "Failed to create volume. "
@@ -595,7 +593,7 @@ def heketi_volume_info(heketi_client_node, heketi_server_url, volume_id,
volume_info = conn.volume_info(volume_id)
if volume_info is None:
return False
- except:
+ except Exception:
g.log.error("Failed to get volume info using heketi")
return False
return volume_info
@@ -666,7 +664,7 @@ def heketi_volume_expand(heketi_client_node, heketi_server_url, volume_id,
vol_req = {}
vol_req['expand_size'] = int(expand_size)
volume_expand_info = conn.volume_expand(volume_id, vol_req)
- except:
+ except Exception:
g.log.error("Failed to do volume expansion info using heketi")
return False
@@ -781,7 +779,7 @@ def heketi_volume_list(heketi_client_node, heketi_server_url, mode='cli',
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
volume_list = conn.volume_list()
- except:
+ except Exception:
g.log.error("Failed to do volume list using heketi")
return False
return volume_list
@@ -879,7 +877,7 @@ def hello_heketi(heketi_client_node, heketi_server_url, mode='cli', **kwargs):
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
ret = conn.hello()
- except:
+ except Exception:
g.log.error("Failed to execute heketi hello command")
return False
return ret
@@ -960,7 +958,7 @@ def heketi_cluster_create(heketi_client_node, heketi_server_url,
cluster_req["block"] = block_arg
cluster_req["file"] = file_arg
cluster_create_info = conn.volume_create(cluster_req)
- except:
+ except Exception:
g.log.error("Failed to do cluster create using heketi")
return False
return cluster_create_info
@@ -1013,7 +1011,7 @@ def heketi_cluster_delete(heketi_client_node, heketi_server_url, cluster_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
ret = conn.cluster_delete(cluster_id)
- except:
+ except Exception:
g.log.error("Failed to do volume delete using heketi")
return False
return ret
@@ -1067,7 +1065,7 @@ def heketi_cluster_info(heketi_client_node, heketi_server_url, cluster_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
cluster_info = conn.cluster_info(cluster_id)
- except:
+ except Exception:
g.log.error("Failed to get cluster info using heketi")
return False
return cluster_info
@@ -1122,7 +1120,7 @@ def heketi_cluster_list(heketi_client_node, heketi_server_url, mode='cli',
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
cluster_list = conn.cluster_list()
- except:
+ except Exception:
g.log.error("Failed to do cluster list using heketi")
return False
return cluster_list
@@ -1188,7 +1186,7 @@ def heketi_device_add(heketi_client_node, heketi_server_url, device_name,
device_req["name"] = device_name
device_req["node"] = node_id
device = conn.device_add(device_req)
- except:
+ except Exception:
g.log.error("Failed to do device add using heketi")
return False
return device
@@ -1248,7 +1246,7 @@ def heketi_device_delete(heketi_client_node, heketi_server_url, device_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
device = conn.device_delete(device_id)
- except:
+ except Exception:
g.log.error("Failed to do device delete using heketi")
return False
return device
@@ -1308,7 +1306,7 @@ def heketi_device_disable(heketi_client_node, heketi_server_url, device_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
device = conn.device_disable(device_id)
- except:
+ except Exception:
g.log.error("Failed to do device disable using heketi")
return False
return device
@@ -1368,7 +1366,7 @@ def heketi_device_enable(heketi_client_node, heketi_server_url, device_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
device = conn.device_enable(device_id)
- except:
+ except Exception:
g.log.error("Failed to do device enable using heketi")
return False
return device
@@ -1428,7 +1426,7 @@ def heketi_device_info(heketi_client_node, heketi_server_url, device_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
cluster_info = conn.device_info(device_id)
- except:
+ except Exception:
g.log.error("Failed to get device info using heketi")
return False
return cluster_info
@@ -1488,7 +1486,7 @@ def heketi_device_remove(heketi_client_node, heketi_server_url, device_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
device = conn.device_remove(device_id)
- except:
+ except Exception:
g.log.error("Failed to do device remove using heketi")
return False
return device
@@ -1558,7 +1556,7 @@ def heketi_node_add(heketi_client_node, heketi_server_url, zone, cluster_id,
node_req['hostnames'] = {"manage": [management_host_name],
"storage": [storage_host_name]}
node_add_info = conn.node_add(node_req)
- except:
+ except Exception:
g.log.error("Failed to do node add using heketi")
return False
return node_add_info
@@ -1613,7 +1611,7 @@ def heketi_node_delete(heketi_client_node, heketi_server_url, node_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
device = conn.node_delete(node_id)
- except:
+ except Exception:
g.log.error("Failed to do node delete using heketi")
return False
return device
@@ -1668,7 +1666,7 @@ def heketi_node_disable(heketi_client_node, heketi_server_url, node_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
node = conn.device_disable(node_id)
- except:
+ except Exception:
g.log.error("Failed to do node disable using heketi")
return False
return node
@@ -1723,7 +1721,7 @@ def heketi_node_enable(heketi_client_node, heketi_server_url, node_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
device = conn.node_enable(node_id)
- except:
+ except Exception:
g.log.error("Failed to do node enable using heketi")
return False
return device
@@ -1778,7 +1776,7 @@ def heketi_node_info(heketi_client_node, heketi_server_url, node_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
node_info = conn.node_info(node_id)
- except:
+ except Exception:
g.log.error("Failed to get node info using heketi")
return False
return node_info
@@ -1833,7 +1831,7 @@ def heketi_node_remove(heketi_client_node, heketi_server_url, node_id,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
node = conn.node_remove(node_id)
- except:
+ except Exception:
g.log.error("Failed to do node remove using heketi")
return False
return node
@@ -1850,7 +1848,7 @@ def heketi_node_list(heketi_client_node, heketi_server_url,
heketi_secret (str): Secret for 'heketi_user'
Returns:
list of strings which are node IDs
- Raises: cnslibs.common.exceptions.ExecutionError in case CLI command failed.
+ Raises: cnslibs.common.exceptions.ExecutionError when CLI command fails.
"""
heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(
@@ -1925,7 +1923,7 @@ def heketi_blockvolume_info(heketi_client_node, heketi_server_url,
block_volume_info = conn.blockvolume_info(block_volume_id)
if block_volume_info is None:
return False
- except:
+ except Exception:
g.log.error("Failed to get blockvolume info using heketi")
return False
return block_volume_info
@@ -2000,9 +1998,9 @@ def heketi_blockvolume_create(heketi_client_node, heketi_server_url, size,
if mode == 'cli':
cmd = ("heketi-cli -s %s blockvolume create --size=%s %s %s %s %s "
- "%s %s %s" % (heketi_server_url, str(size), auth_arg,
- clusters_arg, ha_arg, name_arg, name_arg,
- admin_key, user))
+ "%s %s %s %s" % (heketi_server_url, str(size), auth_arg,
+ clusters_arg, ha_arg, name_arg, name_arg,
+ admin_key, user, json_arg))
ret, out, _ = g.run(heketi_client_node, cmd)
if ret != 0:
@@ -2023,7 +2021,7 @@ def heketi_blockvolume_create(heketi_client_node, heketi_server_url, size,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
block_volume_create_info = conn.blockvolume_create(**kwargs)
- except:
+ except Exception:
g.log.error("Failed to do blockvolume create using heketi")
return False
@@ -2079,7 +2077,7 @@ def heketi_blockvolume_delete(heketi_client_node, heketi_server_url,
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
return conn.blockvolume_delete(block_volume_id)
- except:
+ except Exception:
g.log.error("Failed to do blockvolume delete using heketi")
return False
@@ -2134,7 +2132,7 @@ def heketi_blockvolume_list(heketi_client_node, heketi_server_url, mode='cli',
admin_key = admin_key.split('t ')[-1] if admin_key else admin_key
conn = HeketiClient(heketi_server_url, user, admin_key)
return conn.blockvolume_list()
- except:
+ except Exception:
g.log.error("Failed to do blockvolume list using heketi")
return False
diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py
index f6d73992..830dc215 100644
--- a/cns-libs/cnslibs/common/openshift_ops.py
+++ b/cns-libs/cnslibs/common/openshift_ops.py
@@ -10,14 +10,20 @@ import types
from glusto.core import Glusto as g
from glustolibs.gluster import volume_ops
+from glustolibs.gluster.brick_libs import (
+ are_bricks_online,
+ get_all_bricks,
+ get_online_bricks_list)
import mock
import yaml
from cnslibs.common import command
from cnslibs.common import exceptions
+from cnslibs.common import podcmd
from cnslibs.common import utils
from cnslibs.common import waiter
+
PODS_WIDE_RE = re.compile(
'(\S+)\s+(\S+)\s+(\w+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(\S+).*\n')
@@ -365,10 +371,10 @@ def oc_create_app_dc_with_io(
"-ec",
"trap \"rm -f /mnt/random-data-$HOSTNAME.log ; exit 0\" SIGTERM; "
"while true; do "
- " (mount | grep '/mnt') && "
- " (head -c %s < /dev/urandom > /mnt/random-data-$HOSTNAME.log) || "
- " exit 1; "
- " sleep 1 ; "
+ " (mount | grep '/mnt') && "
+ " (head -c %s < /dev/urandom > /mnt/random-data-$HOSTNAME.log) ||"
+ " exit 1; "
+ " sleep 1 ; "
"done" % space_to_use,
],
"livenessProbe": {
@@ -441,23 +447,27 @@ def oc_create_tiny_pod_with_volume(hostname, pvc_name, pod_name_prefix='',
return pod_name
-def oc_delete(ocp_node, rtype, name):
+def oc_delete(ocp_node, rtype, name, raise_on_absence=True):
"""Delete an OCP resource by name.
Args:
ocp_node (str): Node on which the ocp command will run.
rtype (str): Name of the resource type (pod, storageClass, etc).
name (str): Name of the resource to delete.
- Raises:
- AssertionError: Raised when resource fails to create.
+ raise_on_absence (bool): if resource absent raise
+ exception if value is true,
+ else return
+ default value: True
"""
+ if not oc_get_yaml(ocp_node, rtype, name,
+ raise_on_error=raise_on_absence):
+ return
ret, out, err = g.run(ocp_node, ['oc', 'delete', rtype, name])
if ret != 0:
g.log.error('Failed to delete resource: %s, %s: %r; %r',
rtype, name, out, err)
raise AssertionError('failed to delete resource: %r; %r' % (out, err))
g.log.info('Deleted resource: %r %r', rtype, name)
- return
def oc_get_yaml(ocp_node, rtype, name=None, raise_on_error=True):
@@ -901,3 +911,280 @@ def verify_pvc_status_is_bound(hostname, pvc_name, timeout=120, wait_step=3):
"to reach the 'Bound' status." % (timeout, pvc_name))
g.log.error(msg)
raise AssertionError(msg)
+
+
+def oc_version(hostname):
+ '''
+ Get Openshift version from oc version command
+ Args:
+ hostname (str): Node on which the ocp command will run.
+ Returns:
+ str : oc version if successful,
+ otherwise raise Exception
+ '''
+ cmd = "oc version | grep openshift | cut -d ' ' -f 2"
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ msg = ("failed to get oc version err %s; out %s" % (err, out))
+ g.log.error(msg)
+ raise AssertionError(msg)
+ if not out:
+ error_msg = "Empty string found for oc version"
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+
+ return out.strip()
+
+
+def resize_pvc(hostname, pvc_name, size):
+ '''
+ Resize PVC
+ Args:
+ hostname (str): hostname on which we want
+ to edit the pvc status
+ pvc_name (str): pod_name for which we
+ edit the storage capacity
+ size (int): size of pvc to change
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ cmd = ("oc patch pvc %s "
+ "-p='{\"spec\": {\"resources\": {\"requests\": "
+ "{\"storage\": \"%dGi\"}}}}'" % (pvc_name, size))
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ error_msg = ("failed to execute cmd %s "
+ "out- %s err %s" % (cmd, out, err))
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+
+ g.log.info("successfully edited storage capacity"
+ "of pvc %s . out- %s" % (pvc_name, out))
+ return True
+
+
+def verify_pvc_size(hostname, pvc_name, size,
+ timeout=120, wait_step=5):
+ '''
+ Verify size of PVC
+ Args:
+ hostname (str): hostname on which we want
+ to verify the size of pvc
+ pvc_name (str): pvc_name for which we
+ verify its size
+ size (int): size of pvc
+ timeout (int): timeout value,
+ verifies the size after wait_step
+ value till timeout
+ default value is 120 sec
+ wait_step( int): wait step,
+ default value is 5 sec
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ cmd = ("oc get pvc %s -o=custom-columns="
+ ":.spec.resources.requests.storage,"
+ ":.status.capacity.storage" % pvc_name)
+ for w in waiter.Waiter(timeout, wait_step):
+ sizes = command.cmd_run(cmd, hostname=hostname).split()
+ spec_size = int(sizes[0].replace("Gi", ""))
+ actual_size = int(sizes[1].replace("Gi", ""))
+ if spec_size == actual_size == size:
+ g.log.info("verification of pvc %s of size %d "
+ "successful" % (pvc_name, size))
+ return True
+ else:
+ g.log.info("sleeping for %s sec" % wait_step)
+ continue
+
+ err_msg = ("verification of pvc %s size of %d failed -"
+ "spec_size- %d actual_size %d" % (
+ pvc_name, size, spec_size, actual_size))
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+
+def verify_pv_size(hostname, pv_name, size,
+ timeout=120, wait_step=5):
+ '''
+ Verify size of PV
+ Args:
+ hostname (str): hostname on which we want
+ to verify the size of pv
+ pv_name (str): pv_name for which we
+ verify its size
+ size (int): size of pv
+ timeout (int): timeout value,
+ verifies the size after wait_step
+ value till timeout
+ default value is 120 sec
+ wait_step( int): wait step,
+ default value is 5 sec
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ cmd = ("oc get pv %s -o=custom-columns=:."
+ "spec.capacity.storage" % pv_name)
+ for w in waiter.Waiter(timeout, wait_step):
+ pv_size = command.cmd_run(cmd, hostname=hostname).split()[0]
+ pv_size = int(pv_size.replace("Gi", ""))
+ if pv_size == size:
+ g.log.info("verification of pv %s of size %d "
+ "successful" % (pv_name, size))
+ return True
+ else:
+ g.log.info("sleeping for %s sec" % wait_step)
+ continue
+
+ err_msg = ("verification of pv %s size of %d failed -"
+ "pv_size- %d" % (pv_name, size, pv_size))
+ g.log.error(err_msg)
+ raise AssertionError(err_msg)
+
+
+def get_pv_name_from_pvc(hostname, pvc_name):
+ '''
+ Returns PV name of the corresponding PVC name
+ Args:
+ hostname (str): hostname on which we want
+ to find pv name
+ pvc_name (str): pvc_name for which we
+ want to find corresponding
+ pv name
+ Returns:
+ pv_name (str): pv name if successful,
+ otherwise raise Exception
+ '''
+ cmd = ("oc get pvc %s -o=custom-columns=:."
+ "spec.volumeName" % pvc_name)
+ pv_name = command.cmd_run(cmd, hostname=hostname)
+ g.log.info("pv name is %s for pvc %s" % (
+ pv_name, pvc_name))
+
+ return pv_name
+
+
+def get_vol_names_from_pv(hostname, pv_name):
+ '''
+ Returns the heketi and gluster
+ vol names of the corresponding PV
+ Args:
+ hostname (str): hostname on which we want
+ to find vol names
+ pv_name (str): pv_name for which we
+ want to find corresponding
+ vol names
+ Returns:
+ volname (dict): dict if successful
+ {"heketi_vol": heketi_vol_name,
+ "gluster_vol": gluster_vol_name
+ ex: {"heketi_vol": " xxxx",
+ "gluster_vol": "vol_xxxx"]
+ otherwise raise Exception
+ '''
+ vol_dict = {}
+ cmd = ("oc get pv %s -o=custom-columns="
+ ":.metadata.annotations."
+ "'gluster\.kubernetes\.io\/heketi\-volume\-id',"
+ ":.spec.glusterfs.path"
+ % pv_name)
+ vol_list = command.cmd_run(cmd, hostname=hostname).split()
+ vol_dict = {"heketi_vol": vol_list[0],
+ "gluster_vol": vol_list[1]}
+ g.log.info("gluster vol name is %s and heketi vol name"
+ " is %s for pv %s"
+ % (vol_list[1], vol_list[0], pv_name))
+ return vol_dict
+
+
+@podcmd.GlustoPod()
+def verify_brick_count_gluster_vol(hostname, brick_count,
+ gluster_vol):
+ '''
+ Verify brick count for gluster volume
+ Args:
+ hostname (str): hostname on which we want
+ to check brick count
+ brick_count (int): integer value to verify
+ gluster_vol (str): gluster vol name
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ gluster_pod = get_ocp_gluster_pod_names(hostname)[1]
+ p = podcmd.Pod(hostname, gluster_pod)
+ out = get_online_bricks_list(p, gluster_vol)
+ if brick_count == len(out):
+ g.log.info("successfully verified brick count %s "
+ "for vol %s" % (brick_count, gluster_vol))
+ return True
+ err_msg = ("verification of brick count %s for vol %s"
+ "failed, count found %s" % (
+ brick_count, gluster_vol, len(out)))
+ raise AssertionError(err_msg)
+
+
+@podcmd.GlustoPod()
+def verify_brick_status_online_gluster_vol(hostname,
+ gluster_vol):
+ '''
+ Verify if all the bricks are online for the
+ gluster volume
+ Args:
+ hostname (str): hostname on which we want
+ to check brick status
+ gluster_vol (str): gluster vol name
+ Returns:
+ bool: True, if successful
+ otherwise raise Exception
+ '''
+ gluster_pod = get_ocp_gluster_pod_names(hostname)[1]
+ p = podcmd.Pod(hostname, gluster_pod)
+ brick_list = get_all_bricks(p, gluster_vol)
+ if brick_list is None:
+ error_msg = ("failed to get brick list for vol"
+ " %s" % gluster_vol)
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+ out = are_bricks_online(p, gluster_vol, brick_list)
+ if out:
+ g.log.info("verification of brick status as online"
+ " for gluster vol %s successful"
+ % gluster_vol)
+ return True
+ error_msg = ("verification of brick status as online"
+ " for gluster vol %s failed" % gluster_vol)
+
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+
+
+def verify_gluster_vol_for_pvc(hostname, pvc_name):
+ '''
+ Verify gluster volume has been created for
+ the corresponding PVC
+ Also checks if all the bricks of that gluster
+ volume are online
+ Args:
+ hostname (str): hostname on which we want
+ to find gluster vol
+ pvc_name (str): pvc_name for which we
+ want to find corresponding
+ gluster vol
+ Returns:
+ bool: True if successful
+ otherwise raise Exception
+ '''
+ verify_pvc_status_is_bound(hostname, pvc_name)
+ pv_name = get_pv_name_from_pvc(hostname, pvc_name)
+ vol_dict = get_vol_names_from_pv(hostname, pv_name)
+ gluster_vol = vol_dict["gluster_vol"]
+ verify_brick_status_online_gluster_vol(hostname,
+ gluster_vol)
+
+ g.log.info("verification of gluster vol %s for pvc %s is"
+ "successful" % (gluster_vol, pvc_name))
+ return True
diff --git a/cns-libs/cnslibs/common/podcmd.py b/cns-libs/cnslibs/common/podcmd.py
index f8c89d5b..0613c206 100644
--- a/cns-libs/cnslibs/common/podcmd.py
+++ b/cns-libs/cnslibs/common/podcmd.py
@@ -47,11 +47,10 @@ lifetime of a function that addresses both hosts and pods.
from collections import namedtuple
from functools import partial, wraps
+import types
from glusto.core import Glusto as g
-from cnslibs.common.openshift_ops import oc_rsh
-
# Define a namedtuple that allows us to address pods instead of just
# hosts,
Pod = namedtuple('Pod', 'node podname')
@@ -80,8 +79,15 @@ def run(target, command, log_level=None, orig_run=g.run):
# definition time in order to capture the method before
# any additional monkeypatching by other code
if isinstance(target, Pod):
- return oc_rsh(target.node, target.podname, command,
- log_level=log_level)
+ prefix = ['oc', 'rsh', target.podname]
+ if isinstance(command, types.StringTypes):
+ cmd = ' '.join(prefix + [command])
+ else:
+ cmd = prefix + command
+
+ # unpack the tuple to make sure our return value exactly matches
+ # our docstring
+ return g.run(target.node, cmd, log_level=log_level)
else:
return orig_run(target, command, log_level=log_level)
diff --git a/cns-libs/cnslibs/common/waiter.py b/cns-libs/cnslibs/common/waiter.py
index 89a264df..0a6e72b5 100644
--- a/cns-libs/cnslibs/common/waiter.py
+++ b/cns-libs/cnslibs/common/waiter.py
@@ -7,6 +7,7 @@ It aims to avoid having to write boilerplate code comparing times.
import time
+
class Waiter(object):
"""A wait-retry loop as iterable.
This object abstracts away the wait logic allowing functions
diff --git a/cns-libs/setup.py b/cns-libs/setup.py
index bc376b58..1a4fe293 100644
--- a/cns-libs/setup.py
+++ b/cns-libs/setup.py
@@ -23,5 +23,7 @@ setup(
'Topic :: Software Development :: Testing'
],
install_requires=['glusto', 'ddt', 'mock', 'rtyaml'],
- dependency_links=['http://github.com/loadtheaccumulator/glusto/tarball/master#egg=glusto'],
+ dependency_links=[
+ 'http://github.com/loadtheaccumulator/glusto/tarball/master#egg=glusto'
+ ],
)
diff --git a/cns-tools/setup.py b/cns-tools/setup.py
index 2a803c28..f29e6af8 100644
--- a/cns-tools/setup.py
+++ b/cns-tools/setup.py
@@ -22,5 +22,7 @@ setup(
'Topic :: Software Development :: Testing'
],
install_requires=['glusto'],
- dependency_links=['http://github.com/loadtheaccumulator/glusto/tarball/master#egg=glusto'],
+ dependency_links=[
+ 'http://github.com/loadtheaccumulator/glusto/tarball/master#egg=glusto'
+ ],
)
diff --git a/tests/functional/common/heketi/heketi_tests/test_check_entry.py b/tests/functional/common/heketi/heketi_tests/test_check_entry.py
index 16fbe085..47a0b3f2 100644
--- a/tests/functional/common/heketi/heketi_tests/test_check_entry.py
+++ b/tests/functional/common/heketi/heketi_tests/test_check_entry.py
@@ -1,13 +1,12 @@
-#!/usr/bin/python
-
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ConfigError
+
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
from cnslibs.common.heketi_ops import (heketi_volume_create,
heketi_volume_list,
heketi_volume_delete)
-from cnslibs.common import heketi_ops, podcmd
-from cnslibs.common.openshift_ops import oc_rsh, get_ocp_gluster_pod_names
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
+from cnslibs.common import podcmd
class TestHeketiVolume(HeketiClientSetupBaseClass):
@@ -51,11 +50,13 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
gluster_pod = get_ocp_gluster_pod_names(
self.heketi_client_node)[1]
- cmd = "oc rsync "+ gluster_pod +":/var/lib/heketi/fstab /tmp"
+ cmd = "oc rsync " + gluster_pod + ":/var/lib/heketi/fstab /tmp"
out = g.run(self.heketi_client_node, cmd)
self.assertTrue(out, ("Failed to copy the file"))
g.log.info("Copied the file")
- out = g.run_local("scp -r root@" +self.heketi_client_node+":/tmp/fstab /tmp/file.txt")
+ out = g.run_local(
+ "scp -r root@%s:/tmp/fstab "
+ "/tmp/file.txt" % self.heketi_client_node)
self.assertTrue(out, ("Failed to copy a file to /tmp/file.txt"))
g.log.info("Successfully copied to /tmp/file.txt")
out = g.run_local("ls /tmp")
@@ -67,7 +68,8 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# Check if the brick is mounted
for i in path:
string_to_search = i
- rcode, rout, rerr = g.run_local('grep %s %s' % (string_to_search, "/tmp/file.txt"))
+ rcode, rout, rerr = g.run_local(
+ 'grep %s %s' % (string_to_search, "/tmp/file.txt"))
if rcode == 0:
g.log.info("Brick %s is mounted" % i)
datafile.close()
@@ -99,11 +101,12 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
gluster_pod = get_ocp_gluster_pod_names(
self.heketi_client_node)[0]
- cmd = "oc rsync "+ gluster_pod +":/var/lib/heketi/fstab /"
+ cmd = "oc rsync " + gluster_pod + ":/var/lib/heketi/fstab /"
out = g.run(self.heketi_client_node, cmd)
self.assertTrue(out, ("Failed to copy the file"))
g.log.info("Copied the file")
- out = g.run_local("scp -r root@" +self.heketi_client_node+":/fstab /tmp/newfile.txt")
+ out = g.run_local(
+ "scp -r root@%s:/fstab /tmp/newfile.txt" % self.heketi_client_node)
self.assertTrue(out, ("Failed to copy to the file newfile.txt"))
g.log.info("Successfully copied to the file newfile.txt")
out = g.run_local("ls /tmp")
@@ -115,7 +118,8 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# Check if the brick is mounted
for i in path:
string_to_search = i
- rcode, rout, rerr = g.run_local('grep %s %s' % (string_to_search, "/tmp/newfile.txt"))
+ rcode, rout, rerr = g.run_local(
+ 'grep %s %s' % (string_to_search, "/tmp/newfile.txt"))
if rcode == 0:
raise ConfigError("Particular %s brick entry is found" % i)
datafile.close()
diff --git a/tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py b/tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py
index d871be30..29b39513 100644
--- a/tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py
+++ b/tests/functional/common/heketi/heketi_tests/test_create_heketi_volume_size_60.py
@@ -1,21 +1,19 @@
-#!/usr/bin/python
-
from __future__ import division
import math
from glusto.core import Glusto as g
-from glustolibs.gluster.exceptions import ConfigError
from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
+
+from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
from cnslibs.common.heketi_ops import (heketi_node_list,
heketi_node_info,
heketi_volume_create,
heketi_volume_list,
heketi_volume_info,
- heketi_volume_delete,
- heketi_topology_info)
-from cnslibs.common import heketi_ops, podcmd
-from cnslibs.common.openshift_ops import oc_rsh, get_ocp_gluster_pod_names
+ heketi_volume_delete)
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
+from cnslibs.common import podcmd
class TestHeketiVolume(HeketiClientSetupBaseClass):
@@ -25,7 +23,6 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
Get free space in each devices
"""
free_spaces = []
- device_list = []
heketi_node_id_list = heketi_node_list(
self.heketi_client_node, self.heketi_server_url)
for node_id in heketi_node_id_list:
@@ -36,10 +33,7 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
for device in node_info_dict["devices"]:
total_free_space += device["storage"]["free"]
free_spaces.append(total_free_space)
- min_free_space = min(free_spaces)
total_free_space = sum(free_spaces)/(1024**2)
- optimum_space = min_free_space / (1024 * 1024 * 10)
- free_space = int(math.floor(optimum_space))
total_free_space = int(math.floor(total_free_space))
return total_free_space, free_spaces
@@ -70,7 +64,9 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
mount_node = (out["mount"]["glusterfs"]
["device"].strip().split(":")[0])
hosts.append(mount_node)
- backup_volfile_server_list = (out["mount"]["glusterfs"]["options"] ["backup-volfile-servers"].strip().split(","))
+ backup_volfile_server_list = (
+ out["mount"]["glusterfs"]["options"][
+ "backup-volfile-servers"].strip().split(","))
for backup_volfile_server in backup_volfile_server_list:
hosts.append(backup_volfile_server)
for gluster_server in g.config["gluster_servers"].keys():
@@ -81,9 +77,9 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# Retrieve heketi volume info
g.log.info("Retrieving heketi volume info")
- out = heketi_ops.heketi_volume_info(self.heketi_client_node,
- self.heketi_server_url,
- volume_id, json=True)
+ out = heketi_volume_info(
+ self.heketi_client_node, self.heketi_server_url, volume_id,
+ json=True)
self.assertTrue(out, ("Failed to get heketi volume info"))
g.log.info("Successfully got the heketi volume info")
name = out["name"]
@@ -187,9 +183,11 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# Compare the free size before and after deleting volume
g.log.info("Comparing the free space before and after"
" deleting volume")
- self.assertTrue(free_space_after_creating_vol < free_space_after_deleting_vol)
+ self.assertTrue(
+ free_space_after_creating_vol < free_space_after_deleting_vol)
g.log.info("Volume successfully deleted and space is"
" reallocated. Free space after creating"
" volume %s, Free space after deleting"
- " volume %s" % ((free_space_after_creating_vol),
- (free_space_after_deleting_vol)))
+ " volume %s" % (
+ free_space_after_creating_vol,
+ free_space_after_deleting_vol))
diff --git a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
index 38f6c4e9..c28f455b 100644
--- a/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
+++ b/tests/functional/common/heketi/heketi_tests/test_heketi_create_volume.py
@@ -1,19 +1,21 @@
-#!/usr/bin/python
-
from glustolibs.gluster.exceptions import ExecutionError, ConfigError
from glusto.core import Glusto as g
from glustolibs.gluster.volume_ops import get_volume_list, get_volume_info
+
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
from cnslibs.common.heketi_ops import (heketi_volume_create,
heketi_volume_list,
heketi_volume_info,
- heketi_volume_delete,
+ heketi_blockvolume_create,
+ heketi_blockvolume_delete,
heketi_cluster_list,
heketi_cluster_delete,
+ heketi_node_info,
heketi_node_list,
heketi_node_delete)
-from cnslibs.common import heketi_ops, podcmd
-from cnslibs.common.openshift_ops import oc_rsh, get_ocp_gluster_pod_names
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
+from cnslibs.common import podcmd
+
class TestHeketiVolume(HeketiClientSetupBaseClass):
"""
@@ -39,7 +41,6 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
g.log.info("Heketi volume successfully created" % out)
volume_id = out["bricks"][0]["volume"]
self.addCleanup(self.delete_volumes, volume_id)
- name = out["name"]
g.log.info("List heketi volumes")
volumes = heketi_volume_list(self.heketi_client_node,
@@ -85,9 +86,9 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
self.addCleanup(self.delete_volumes, volume_id)
g.log.info("Retrieving heketi volume info")
- out = heketi_ops.heketi_volume_info(self.heketi_client_node,
- self.heketi_server_url,
- volume_id, json=True)
+ out = heketi_volume_info(
+ self.heketi_client_node, self.heketi_server_url, volume_id,
+ json=True)
self.assertTrue(out, ("Failed to get heketi volume info"))
g.log.info("Successfully got the heketi volume info")
name = out["name"]
@@ -113,7 +114,7 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
volumes = heketi_volume_list(self.heketi_client_node,
self.heketi_server_url,
json=True)
- if (len(volumes["volumes"])== 0):
+ if (len(volumes["volumes"]) == 0):
g.log.info("Creating heketi volume")
out = heketi_volume_create(self.heketi_client_node,
self.heketi_server_url,
@@ -168,7 +169,7 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
g.log.info("Successfully got the list of nodes")
for node_id in heketi_node_id_list:
g.log.info("Retrieve the node info")
- node_info_dict = heketi_ops.heketi_node_info(
+ node_info_dict = heketi_node_info(
self.heketi_client_node, self.heketi_server_url,
node_id, json=True)
if not(node_info_dict["devices"][1]["storage"]["used"]):
@@ -196,3 +197,39 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
self.heketi_server_url)
self.assertTrue(node_list, ("Failed to list heketi nodes"))
g.log.info("Successfully got the list of nodes")
+
+ def test_blockvolume_create_no_free_space(self):
+ """Test case CNS-550"""
+
+ # Create first small blockvolume
+ blockvol1 = heketi_blockvolume_create(
+ self.heketi_client_node, self.heketi_server_url, 1, json=True)
+ self.assertTrue(blockvol1, "Failed to create block volume.")
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, blockvol1['id'])
+
+ # Get info about block hosting volume available space
+ file_volumes = heketi_volume_list(
+ self.heketi_client_node, self.heketi_server_url, json=True)
+ self.assertTrue(file_volumes)
+ max_freesize = 0
+ for vol_id in file_volumes["volumes"]:
+ vol = heketi_volume_info(
+ self.heketi_client_node, self.heketi_server_url,
+ vol_id, json=True)
+ current_freesize = vol.get("blockinfo", {}).get("freesize", 0)
+ if current_freesize > max_freesize:
+ max_freesize = current_freesize
+ self.assertGreater(max_freesize, 0)
+
+ # Try to create blockvolume with size bigger than available
+ too_big_vol_size = max_freesize + 1
+ blockvol2 = heketi_blockvolume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ too_big_vol_size, json=True)
+ if blockvol2 and blockvol2.get('id'):
+ self.addCleanup(
+ heketi_blockvolume_delete, self.heketi_client_node,
+ self.heketi_server_url, blockvol2['id'])
+ self.assertFalse(blockvol2, 'Volume unexpectedly was created')
diff --git a/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py b/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
new file mode 100644
index 00000000..bce565c4
--- /dev/null
+++ b/tests/functional/common/heketi/heketi_tests/test_node_enable_disable.py
@@ -0,0 +1,140 @@
+"""Test cases to disable and enable node in heketi."""
+import json
+
+from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
+from cnslibs.common.heketi_ops import (heketi_node_enable,
+ heketi_node_info,
+ heketi_node_disable,
+ heketi_node_list,
+ heketi_volume_create)
+from glusto.core import Glusto as g
+
+
+class TestHeketiNodeState(HeketiClientSetupBaseClass):
+ """Test node enable and disable functionality."""
+
+ def enable_node(self, node_id):
+ """
+ Enable node through heketi-cli.
+
+ :param node_id: str node ID
+ """
+ out = heketi_node_enable(self.heketi_client_node,
+ self.heketi_server_url,
+ node_id)
+
+ self.assertNotEqual(out, False,
+ "Failed to enable node of"
+ " id %s" % node_id)
+
+ def disable_node(self, node_id):
+ """
+ Disable node through heketi-cli.
+
+ :param node_id: str node ID
+ """
+ out = heketi_node_disable(self.heketi_client_node,
+ self.heketi_server_url,
+ node_id)
+
+ self.assertNotEqual(out, False,
+ "Failed to disable node of"
+ " id %s" % node_id)
+
+ def get_node_info(self, node_id):
+ """
+ Get node information from node_id.
+
+ :param node_id: str node ID
+ :return node_info: list node information
+ """
+ node_info = heketi_node_info(
+ self.heketi_client_node, self.heketi_server_url,
+ node_id, json=True)
+ self.assertNotEqual(node_info, False,
+ "Node info on %s failed" % node_id)
+ return node_info
+
+ def get_online_nodes(self, node_list):
+ """
+ Get online nodes information from node_list.
+
+ :param node_list: list of node ID's
+ :return: list node information of online nodes
+ """
+ online_hosts_info = []
+
+ for node in node_list:
+ node_info = self.get_node_info(node)
+ if node_info["state"] == "online":
+ online_hosts_info.append(node_info)
+
+ return online_hosts_info
+
+ def test_node_state(self):
+ """
+ Test node enable and disable functionality.
+
+ If we have 4 gluster servers, if we disable 1/4 nodes from heketi
+ and create a volume, the volume creation should be successful.
+
+ If we disable 2/4 nodes from heketi-cli and create a volume
+ the volume creation should fail.
+
+ If we enable back one gluster server and create a volume
+ the volume creation should be successful.
+ """
+ g.log.info("Disable node in heketi")
+ node_list = heketi_node_list(self.heketi_client_node,
+ self.heketi_server_url)
+ self.assertTrue(node_list, "Failed to list heketi nodes")
+ g.log.info("Successfully got the list of nodes")
+ online_hosts = self.get_online_nodes(node_list)
+
+ if len(online_hosts) < 3:
+ raise self.skipTest(
+ "This test can run only if online hosts are more "
+ "than 2")
+ # if we have n nodes, disable n-3 nodes
+ for node_info in online_hosts[3:]:
+ node_id = node_info["id"]
+ g.log.info("going to disable node id %s", node_id)
+ self.disable_node(node_id)
+ self.addCleanup(self.enable_node, node_id)
+
+ vol_size = 1
+ # create volume when 3 nodes are online
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.assertTrue(vol_info, (
+ "Failed to create heketi volume of size %d" % vol_size))
+ self.addCleanup(self.delete_volumes, vol_info['id'])
+
+ node_id = online_hosts[0]['id']
+ g.log.info("going to disable node id %s", node_id)
+ self.disable_node(node_id)
+ self.addCleanup(self.enable_node, node_id)
+
+ # try to create a volume, volume creation should fail
+ ret, out, err = heketi_volume_create(
+ self.heketi_client_node, self.heketi_server_url,
+ vol_size, raw_cli_output=True)
+ if ret == 0:
+ out_json = json.loads(out)
+ self.addCleanup(self.delete_volumes, out_json["id"])
+ self.assertNotEqual(ret, 0,
+ ("Volume creation did not fail ret- %s "
+ "out- %s err- %s" % (ret, out, err)))
+
+ g.log.info("Volume creation failed as expected, err- %s", err)
+ # enable node
+ self.enable_node(node_id)
+
+ # create volume when node is enabled
+ vol_info = heketi_volume_create(self.heketi_client_node,
+ self.heketi_server_url, vol_size,
+ json=True)
+ self.assertTrue(vol_info, (
+ "Failed to create heketi volume of size %d" % vol_size))
+ self.addCleanup(self.delete_volumes, vol_info['id'])
diff --git a/tests/functional/common/heketi/heketi_tests/test_node_info.py b/tests/functional/common/heketi/heketi_tests/test_node_info.py
index 81462906..26ac56f7 100644
--- a/tests/functional/common/heketi/heketi_tests/test_node_info.py
+++ b/tests/functional/common/heketi/heketi_tests/test_node_info.py
@@ -1,13 +1,10 @@
-#!/usr/bin/python
-
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
-from glustolibs.gluster.peer_ops import (get_pool_list)
+from glustolibs.gluster.peer_ops import get_pool_list
+
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
-from cnslibs.common.heketi_ops import (heketi_node_info,
- heketi_node_list)
from cnslibs.common import heketi_ops, podcmd
-from cnslibs.common.openshift_ops import oc_rsh, get_ocp_gluster_pod_names
+from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
class TestHeketiVolume(HeketiClientSetupBaseClass):
@@ -25,7 +22,7 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# List all list
ip = []
g.log.info("Listing the node id")
- heketi_node_id_list = heketi_node_list(
+ heketi_node_id_list = heketi_ops.heketi_node_list(
self.heketi_client_node, self.heketi_server_url)
g.log.info("Successfully listed the node")
@@ -61,8 +58,9 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
hostname.append(pool["hostname"])
if (len(heketi_node_id_list) != len(list_of_pools)):
- raise ExecutionError("Heketi volume list %s is not equal"
- " to gluster volume list %s" % ((ip), (hostname)))
+ raise ExecutionError(
+ "Heketi volume list %s is not equal "
+ "to gluster volume list %s" % ((ip), (hostname)))
g.log.info("The node IP's from node info and list"
" is : %s/n and pool list from gluster"
" pods/nodes is %s" % ((ip), (hostname)))
@@ -74,7 +72,7 @@ class TestHeketiVolume(HeketiClientSetupBaseClass):
# List all list
g.log.info("Listing the node id")
- heketi_node_id_list = heketi_node_list(
+ heketi_node_id_list = heketi_ops.heketi_node_list(
self.heketi_client_node, self.heketi_server_url)
self.assertTrue(heketi_node_id_list, ("Node Id list is empty."))
g.log.info("Successfully listed the node")
diff --git a/tests/functional/common/heketi/test_volume_deletion.py b/tests/functional/common/heketi/test_volume_deletion.py
index be7b2e61..8b0adf98 100644
--- a/tests/functional/common/heketi/test_volume_deletion.py
+++ b/tests/functional/common/heketi/test_volume_deletion.py
@@ -1,10 +1,6 @@
from __future__ import division
-import math
-import unittest
-from glusto.core import Glusto as g
-
-from cnslibs.common.exceptions import ExecutionError, ConfigError
+from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
from cnslibs.common import heketi_ops
@@ -110,4 +106,3 @@ class TestVolumeDeleteTestCases(HeketiClientSetupBaseClass):
if not heketidbexists:
raise ExecutionError(
"Warning: heketidbstorage doesn't exist in list of volumes")
-
diff --git a/tests/functional/common/heketi/test_volume_expansion_and_devices.py b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
index e1d912c3..17ed5d9d 100644
--- a/tests/functional/common/heketi/test_volume_expansion_and_devices.py
+++ b/tests/functional/common/heketi/test_volume_expansion_and_devices.py
@@ -6,7 +6,7 @@ import unittest
from glusto.core import Glusto as g
from glustolibs.gluster import volume_ops, rebalance_ops
-from cnslibs.common.exceptions import ExecutionError, ConfigError
+from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_libs import HeketiClientSetupBaseClass
from cnslibs.common.openshift_ops import get_ocp_gluster_pod_names
from cnslibs.common import heketi_ops, podcmd
@@ -696,4 +696,3 @@ class TestVolumeExpansionAndDevicesTestCases(HeketiClientSetupBaseClass):
free_space_after_deletion > free_space_after_expansion,
"Free space is not reclaimed after volume deletion of %s"
% volume_id)
-
diff --git a/tests/functional/common/heketi/test_volume_multi_req.py b/tests/functional/common/heketi/test_volume_multi_req.py
index 957476eb..244131e9 100644
--- a/tests/functional/common/heketi/test_volume_multi_req.py
+++ b/tests/functional/common/heketi/test_volume_multi_req.py
@@ -397,6 +397,7 @@ class TestVolumeMultiReq(HeketiClientSetupBaseClass):
# make this a condition
done = threading.Event()
short_tc_name = "volumes-concurrently"
+
def background_ops():
subname = make_unique_label(short_tc_name)
for i, w in enumerate(Waiter(60 * 60)):
@@ -414,6 +415,7 @@ class TestVolumeMultiReq(HeketiClientSetupBaseClass):
if done.is_set():
break
failures = []
+
def checked_background_ops():
try:
background_ops()
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
index 2e0268cd..76e1d317 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -65,7 +65,8 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
self.addCleanup(
wait_for_resource_absence, self.node, 'pvc', pvc_name)
for pvc_name in pvc_names:
- self.addCleanup(oc_delete, self.node, 'pvc', pvc_name)
+ self.addCleanup(oc_delete, self.node, 'pvc', pvc_name,
+ raise_on_absence=False)
# Wait for PVCs to be in bound state
for pvc_name in pvc_names:
@@ -262,3 +263,42 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
self.assertFalse(err, "Error output is not empty: \n%s" % err)
self.assertEqual(ret, 0, "Failed to exec '%s' command." % cmd)
self.assertTrue(out, "Command '%s' output is empty." % cmd)
+
+ def test_dynamic_provisioning_glusterblock_heketidown_pvc_delete(self):
+ """ Delete PVC's when heketi is down CNS-439 """
+
+ # Create storage class and secret objects
+ self._create_storage_class()
+
+ self.pvc_name_list = self._create_and_wait_for_pvcs(
+ 1, 'pvc-heketi-down', 3)
+
+ # remove heketi-pod
+ scale_dc_pod_amount_and_wait(self.ocp_client[0],
+ self.heketi_dc_name,
+ 0,
+ self.cns_project_name)
+ try:
+ # delete pvc
+ for pvc in self.pvc_name_list:
+ oc_delete(self.ocp_client[0], 'pvc', pvc)
+ for pvc in self.pvc_name_list:
+ with self.assertRaises(ExecutionError):
+ wait_for_resource_absence(
+ self.ocp_client[0], 'pvc', pvc,
+ interval=3, timeout=30)
+ finally:
+ # bring back heketi-pod
+ scale_dc_pod_amount_and_wait(self.ocp_client[0],
+ self.heketi_dc_name,
+ 1,
+ self.cns_project_name)
+
+ # verify PVC's are deleted
+ for pvc in self.pvc_name_list:
+ wait_for_resource_absence(self.ocp_client[0], 'pvc',
+ pvc,
+ interval=1, timeout=120)
+
+ # create a new PVC
+ self._create_and_wait_for_pvc()
diff --git a/tests/functional/common/provisioning/test_pv_resize.py b/tests/functional/common/provisioning/test_pv_resize.py
new file mode 100644
index 00000000..1e92efe9
--- /dev/null
+++ b/tests/functional/common/provisioning/test_pv_resize.py
@@ -0,0 +1,129 @@
+import ddt
+from cnslibs.common.cns_libs import (
+ enable_pvc_resize)
+from cnslibs.common.heketi_ops import (
+ verify_volume_name_prefix)
+from cnslibs.common.openshift_ops import (
+ resize_pvc,
+ get_pod_name_from_dc,
+ get_pv_name_from_pvc,
+ oc_create_app_dc_with_io,
+ oc_create_pvc,
+ oc_create_secret,
+ oc_create_sc,
+ oc_delete,
+ oc_rsh,
+ oc_version,
+ scale_dc_pod_amount_and_wait,
+ verify_pv_size,
+ verify_pvc_size,
+ verify_pvc_status_is_bound,
+ wait_for_pod_be_ready,
+ wait_for_resource_absence)
+from cnslibs.cns.cns_baseclass import CnsBaseClass
+from glusto.core import Glusto as g
+
+
+@ddt.ddt
+class TestPvResizeClass(CnsBaseClass):
+ '''
+ Class that contain test cases for
+ pv resize
+ '''
+ @classmethod
+ def setUpClass(cls):
+ super(TestPvResizeClass, cls).setUpClass()
+ version = oc_version(cls.ocp_master_node[0])
+ if any(v in version for v in ("3.6", "3.7", "3.8")):
+ return
+ enable_pvc_resize(cls.ocp_master_node[0])
+
+ def setUp(self):
+ super(TestPvResizeClass, self).setUp()
+ version = oc_version(self.ocp_master_node[0])
+ if any(v in version for v in ("3.6", "3.7", "3.8")):
+ msg = ("pv resize is not available in openshift "
+ "version %s " % version)
+ g.log.error(msg)
+ raise self.skipTest(msg)
+
+ def _create_storage_class(self, volname_prefix=False):
+ sc = self.cns_storage_class['storage_class1']
+ secret = self.cns_secret['secret1']
+
+ # create secret
+ self.secret_name = oc_create_secret(
+ self.ocp_master_node[0],
+ namespace=secret['namespace'],
+ data_key=self.heketi_cli_key,
+ secret_type=secret['type'])
+ self.addCleanup(
+ oc_delete, self.ocp_master_node[0], 'secret', self.secret_name)
+
+ # create storageclass
+ self.sc_name = oc_create_sc(
+ self.ocp_master_node[0], provisioner='kubernetes.io/glusterfs',
+ resturl=sc['resturl'], restuser=sc['restuser'],
+ secretnamespace=sc['secretnamespace'],
+ secretname=self.secret_name,
+ allow_volume_expansion=True,
+ **({"volumenameprefix": sc['volumenameprefix']}
+ if volname_prefix else {})
+ )
+ self.addCleanup(oc_delete, self.ocp_master_node[0], 'sc', self.sc_name)
+
+ return self.sc_name
+
+ @ddt.data(False, True)
+ def test_pv_resize_with_prefix_for_name(self, volname_prefix=False):
+ """testcases CNS-1037 and CNS-1038 """
+ dir_path = "/mnt/"
+ self._create_storage_class(volname_prefix)
+ node = self.ocp_master_node[0]
+
+ # Create PVC
+ pvc_name = oc_create_pvc(node, self.sc_name, pvc_size=1)
+ self.addCleanup(wait_for_resource_absence,
+ node, 'pvc', pvc_name)
+ self.addCleanup(oc_delete, node, 'pvc', pvc_name)
+ verify_pvc_status_is_bound(node, pvc_name)
+
+ # Create DC with POD and attached PVC to it.
+ dc_name = oc_create_app_dc_with_io(node, pvc_name)
+ self.addCleanup(oc_delete, node, 'dc', dc_name)
+ self.addCleanup(scale_dc_pod_amount_and_wait,
+ node, dc_name, 0)
+
+ pod_name = get_pod_name_from_dc(node, dc_name)
+ wait_for_pod_be_ready(node, pod_name)
+ if volname_prefix:
+ storage_class = self.cns_storage_class['storage_class1']
+ ret = verify_volume_name_prefix(node,
+ storage_class['volumenameprefix'],
+ storage_class['secretnamespace'],
+ pvc_name, self.heketi_server_url)
+ self.assertTrue(ret, "verify volnameprefix failed")
+ cmd = ("dd if=/dev/urandom of=%sfile "
+ "bs=100K count=1000") % dir_path
+ ret, out, err = oc_rsh(node, pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, node))
+ cmd = ("dd if=/dev/urandom of=%sfile2 "
+ "bs=100K count=10000") % dir_path
+ ret, out, err = oc_rsh(node, pod_name, cmd)
+ self.assertNotEqual(ret, 0, " This IO did not fail as expected "
+ "command %s on %s" % (cmd, node))
+ pvc_size = 2
+ resize_pvc(node, pvc_name, pvc_size)
+ verify_pvc_size(node, pvc_name, pvc_size)
+ pv_name = get_pv_name_from_pvc(node, pvc_name)
+ verify_pv_size(node, pv_name, pvc_size)
+ oc_delete(node, 'pod', pod_name)
+ wait_for_resource_absence(node, 'pod', pod_name)
+ pod_name = get_pod_name_from_dc(node, dc_name)
+ wait_for_pod_be_ready(node, pod_name)
+ cmd = ("dd if=/dev/urandom of=%sfile_new "
+ "bs=50K count=10000") % dir_path
+ ret, out, err = oc_rsh(node, pod_name, cmd)
+ self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
+ cmd, node))