summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorValerii Ponomarov <vponomar@redhat.com>2019-09-14 01:34:17 +0530
committervponomar <vponomar@redhat.com>2019-09-16 14:58:47 +0000
commite641245cf14f6dc014229805aecb88434bba6a0e (patch)
treeda6f1691c9e7432ac9a20e07328acbbe928db0ee
parent8dd2df0d75b6241da28ee24904ea00b718566360 (diff)
Fix multiple usages of waiters
Fix consists of 2 parts: - Calculate correctly left time for waiting in 'scale_dcs_pod_amount_and_wait' function. Before, it had bug, where we were waiting less time than requested. And it led to the unexpected timeout errors. Example: it could fail waiting only 180sec having 600sec as real timeout. - Reset 'attempts' on the instantiated 'waiter's to avoid redundant waiting reusing the single waiter instance. Example. On deletion of 20 PVCs we save about 1,5 minutes. In whole test suite we create much more PVCs than 20. Change-Id: I5d06a63dd0c2c5bd67fdb09fef87948d65e6bf22
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/gluster_ops.py3
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/node_ops.py3
-rw-r--r--openshift-storage-libs/openshiftstoragelibs/openshift_ops.py21
3 files changed, 17 insertions, 10 deletions
diff --git a/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
index 1e80149d..785bde58 100644
--- a/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/gluster_ops.py
@@ -36,6 +36,9 @@ def wait_to_heal_complete(timeout=300, wait_step=5):
for gluster_vol in gluster_vol_list:
for w in _waiter:
if is_heal_complete("auto_get_gluster_endpoint", gluster_vol):
+ # NOTE(vponomar): Reset attempts for waiter to avoid redundant
+ # sleep equal to 'interval' on the next usage.
+ _waiter._attempt = 0
break
if w.expired:
diff --git a/openshift-storage-libs/openshiftstoragelibs/node_ops.py b/openshift-storage-libs/openshiftstoragelibs/node_ops.py
index f456b325..8ca5674b 100644
--- a/openshift-storage-libs/openshiftstoragelibs/node_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/node_ops.py
@@ -160,6 +160,9 @@ def power_on_vm_by_name(name, timeout=600, interval=10):
for w in _waiter:
try:
hostname = cloudProvider.wait_for_hostname(name, 1, 1)
+ # NOTE(vponomar): Reset attempts for waiter to avoid redundant
+ # sleep equal to 'interval' on the next usage.
+ _waiter._attempt = 0
break
except Exception as e:
g.log.info(e)
diff --git a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
index 0816785b..9b81cdf4 100644
--- a/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
+++ b/openshift-storage-libs/openshiftstoragelibs/openshift_ops.py
@@ -575,6 +575,9 @@ def wait_for_resource_absence(ocp_node, rtype, name,
try:
resource = oc_get_yaml(ocp_node, rtype, name, raise_on_error=True)
except AssertionError:
+ # NOTE(vponomar): Reset attempts for waiter to avoid redundant
+ # sleep equal to 'interval' on the next usage.
+ _waiter._attempt = 0
break
if rtype == 'pvc':
cmd = "oc get pv -o=custom-columns=:.spec.claimRef.name | grep %s" % (
@@ -583,6 +586,7 @@ def wait_for_resource_absence(ocp_node, rtype, name,
try:
_pv_name = command.cmd_run(cmd, hostname=ocp_node)
except AssertionError:
+ _waiter._attempt = 0
break
finally:
if _pv_name and not pv_name:
@@ -639,23 +643,20 @@ def scale_dcs_pod_amount_and_wait(hostname, dc_names, pod_amount=1,
command.cmd_run(scale_cmd, hostname=hostname)
- _start_time = time.time()
for dc_name in dc_names:
dc_and_pod_names[dc_name] = get_pod_names_from_dc(hostname, dc_name)
- for pod_name in dc_and_pod_names[dc_name]:
+ _start_time, _timeout = time.time(), timeout
+ for pod_names in dc_and_pod_names.values():
+ for pod_name in pod_names:
if pod_amount == 0:
wait_for_resource_absence(
hostname, 'pod', pod_name,
- interval=wait_step, timeout=timeout)
+ interval=wait_step, timeout=_timeout)
else:
wait_for_pod_be_ready(
- hostname, pod_name,
- timeout=timeout, wait_step=wait_step)
- time_diff = time.time() - _start_time
- if time_diff > timeout:
- timeout = wait_step
- else:
- timeout -= time_diff
+ hostname, pod_name, timeout=_timeout, wait_step=wait_step)
+ _diff = time.time() - _start_time
+ _timeout = wait_step if _diff > timeout else timeout - _diff
return dc_and_pod_names