summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brick_libs.py18
-rw-r--r--glustolibs-gluster/glustolibs/gluster/brick_ops.py5
-rw-r--r--glustolibs-gluster/glustolibs/gluster/heal_libs.py2
-rw-r--r--glustolibs-gluster/glustolibs/gluster/heal_ops.py4
-rw-r--r--glustolibs-gluster/glustolibs/gluster/peer_ops.py4
-rw-r--r--glustolibs-gluster/glustolibs/gluster/volume_ops.py4
-rw-r--r--tests/functional/bvt/test_cvt.py25
7 files changed, 40 insertions, 22 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/brick_libs.py b/glustolibs-gluster/glustolibs/gluster/brick_libs.py
index b4d232859..6c32fe5b9 100644
--- a/glustolibs-gluster/glustolibs/gluster/brick_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/brick_libs.py
@@ -18,6 +18,7 @@
import random
from math import ceil
+import time
from glusto.core import Glusto as g
from glustolibs.gluster.volume_ops import (get_volume_info, get_volume_status)
from glustolibs.gluster.volume_libs import (get_subvols, is_tiered_volume,
@@ -245,6 +246,9 @@ def bring_bricks_online(mnode, volname, bricks_list,
elif isinstance(bring_bricks_online_methods, str):
bring_bricks_online_methods = [bring_bricks_online_methods]
+ g.log.info("Bringing bricks '%s' online with '%s'",
+ bricks_list, bring_bricks_online_methods)
+
_rc = True
failed_to_bring_online_list = []
for brick in bricks_list:
@@ -258,6 +262,9 @@ def bring_bricks_online(mnode, volname, bricks_list,
brick_node)
_rc = False
failed_to_bring_online_list.append(brick)
+ else:
+ g.log.info("Successfully restarted glusterd on node %s to "
+ "bring back brick %s online", brick_node, brick)
elif bring_brick_online_method == 'volume_start_force':
bring_brick_online_command = ("gluster volume start %s force" %
volname)
@@ -267,18 +274,17 @@ def bring_bricks_online(mnode, volname, bricks_list,
volname)
_rc = False
else:
+ g.log.info("Successfully restarted volume %s to bring all "
+ "the bricks '%s' online", volname, bricks_list)
break
else:
g.log.error("Invalid method '%s' to bring brick online",
bring_brick_online_method)
return False
- if not _rc:
- g.log.error("Unable to bring some of the bricks %s online",
- failed_to_bring_online_list)
- return False
- g.log.info("All the bricks : %s are brought online", bricks_list)
- return True
+ g.log.info("Waiting for 30 seconds for all the bricks to be online")
+ time.sleep(30)
+ return _rc
def are_bricks_offline(mnode, volname, bricks_list):
diff --git a/glustolibs-gluster/glustolibs/gluster/brick_ops.py b/glustolibs-gluster/glustolibs/gluster/brick_ops.py
index 0e99b955e..7ac34c690 100644
--- a/glustolibs-gluster/glustolibs/gluster/brick_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/brick_ops.py
@@ -115,11 +115,14 @@ def remove_brick(mnode, volname, bricks_list, option, xml=False, **kwargs):
xml_str = ''
if xml:
xml_str = "--xml"
+ log_level = 'DEBUG'
+ else:
+ log_level = 'INFO'
cmd = ("gluster volume remove-brick %s %s %s %s %s" %
(volname, replica, ' '.join(bricks_list), option, xml_str))
- return g.run(mnode, cmd)
+ return g.run(mnode, cmd, log_level=log_level)
def replace_brick(mnode, volname, src_brick, dst_brick):
diff --git a/glustolibs-gluster/glustolibs/gluster/heal_libs.py b/glustolibs-gluster/glustolibs/gluster/heal_libs.py
index 4d5594a11..10707df63 100644
--- a/glustolibs-gluster/glustolibs/gluster/heal_libs.py
+++ b/glustolibs-gluster/glustolibs/gluster/heal_libs.py
@@ -64,7 +64,7 @@ def is_heal_disabled(mnode, volname):
NoneType: None if unable to get the volume status shd or parse error.
"""
cmd = "gluster volume status %s shd --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
+ ret, out, _ = g.run(mnode, cmd, log_level='DEBUG')
if ret != 0:
g.log.error("Failed to get the self-heal-daemon status for the "
"volume" % volname)
diff --git a/glustolibs-gluster/glustolibs/gluster/heal_ops.py b/glustolibs-gluster/glustolibs/gluster/heal_ops.py
index 8c0b6c820..daae9a37d 100644
--- a/glustolibs-gluster/glustolibs/gluster/heal_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/heal_ops.py
@@ -271,7 +271,7 @@ def get_heal_info(mnode, volname):
heal_info data per brick.
"""
cmd = "gluster volume heal %s info --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
+ ret, out, _ = g.run(mnode, cmd, log_level='DEBUG')
if ret != 0:
g.log.error("Failed to get the heal info xml output for the volume %s."
"Hence failed to get the heal info summary." % volname)
@@ -357,7 +357,7 @@ def get_heal_info_split_brain(mnode, volname):
heal_info_split_brain data per brick.
"""
cmd = "gluster volume heal %s info split-brain --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
+ ret, out, _ = g.run(mnode, cmd, log_level='DEBUG')
if ret != 0:
g.log.error("Failed to get the heal info xml output for the volume %s."
"Hence failed to get the heal info summary." % volname)
diff --git a/glustolibs-gluster/glustolibs/gluster/peer_ops.py b/glustolibs-gluster/glustolibs/gluster/peer_ops.py
index e4d3e6ec3..54c2a38a9 100644
--- a/glustolibs-gluster/glustolibs/gluster/peer_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/peer_ops.py
@@ -275,7 +275,7 @@ def get_peer_status(mnode):
'stateStr': 'Peer in Cluster'}
]
"""
- ret, out, _ = g.run(mnode, "gluster peer status --xml")
+ ret, out, _ = g.run(mnode, "gluster peer status --xml", log_level='DEBUG')
if ret != 0:
g.log.error("Failed to execute peer status command on node '%s'. "
"Hence failed to parse the peer status.", mnode)
@@ -327,7 +327,7 @@ def get_pool_list(mnode):
'stateStr': 'Peer in Cluster'}
]
"""
- ret, out, _ = g.run(mnode, "gluster pool list --xml")
+ ret, out, _ = g.run(mnode, "gluster pool list --xml", log_level='DEBUG')
if ret != 0:
g.log.error("Failed to execute 'pool list' on node %s. "
"Hence failed to parse the pool list.", mnode)
diff --git a/glustolibs-gluster/glustolibs/gluster/volume_ops.py b/glustolibs-gluster/glustolibs/gluster/volume_ops.py
index cc400390b..6cf038dbf 100644
--- a/glustolibs-gluster/glustolibs/gluster/volume_ops.py
+++ b/glustolibs-gluster/glustolibs/gluster/volume_ops.py
@@ -384,7 +384,7 @@ def get_volume_status(mnode, volname='all', service='', options=''):
cmd = "gluster vol status %s %s %s --xml" % (volname, service, options)
- ret, out, _ = g.run(mnode, cmd)
+ ret, out, _ = g.run(mnode, cmd, log_level='DEBUG')
if ret != 0:
g.log.error("Failed to execute gluster volume status command")
return None
@@ -635,7 +635,7 @@ def get_volume_info(mnode, volname='all'):
"""
cmd = "gluster volume info %s --xml" % volname
- ret, out, _ = g.run(mnode, cmd)
+ ret, out, _ = g.run(mnode, cmd, log_level='DEBUG')
if ret != 0:
g.log.error("volume info returned error")
return None
diff --git a/tests/functional/bvt/test_cvt.py b/tests/functional/bvt/test_cvt.py
index b367004ea..e19adf862 100644
--- a/tests/functional/bvt/test_cvt.py
+++ b/tests/functional/bvt/test_cvt.py
@@ -684,14 +684,6 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
# Wait for gluster processes to be offline
time.sleep(10)
- # Validate if bricks are offline
- g.log.info("Validating if bricks: %s are offline",
- bricks_to_bring_offline)
- ret = are_bricks_offline(self.mnode, self.volname,
- bricks_to_bring_offline)
- self.assertTrue(ret, "Not all the bricks in list:%s are offline")
- g.log.info("Successfully validated that bricks: %s are all offline")
-
# Log Volume Info and Status
g.log.info("Logging volume info and Status after bringing bricks "
"offline from the volume %s", self.volname)
@@ -701,6 +693,14 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
g.log.info("Successful in logging volume info and status of volume %s",
self.volname)
+ # Validate if bricks are offline
+ g.log.info("Validating if bricks: %s are offline",
+ bricks_to_bring_offline)
+ ret = are_bricks_offline(self.mnode, self.volname,
+ bricks_to_bring_offline)
+ self.assertTrue(ret, "Not all the bricks in list:%s are offline")
+ g.log.info("Successfully validated that bricks: %s are all offline")
+
# Add delay before bringing bricks online
time.sleep(40)
@@ -716,6 +716,15 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
# Wait for gluster processes to be online
time.sleep(10)
+ # Log Volume Info and Status
+ g.log.info("Logging volume info and Status after bringing bricks "
+ "online from the volume %s", self.volname)
+ ret = log_volume_info_and_status(self.mnode, self.volname)
+ self.assertTrue(ret, ("Logging volume info and status failed on "
+ "volume %s", self.volname))
+ g.log.info("Successful in logging volume info and status of volume %s",
+ self.volname)
+
# Verify volume's all process are online
g.log.info("Verifying volume's all process are online")
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)