From dfe392e04ee3a126fad2a9b3b5c526f7f7594cf5 Mon Sep 17 00:00:00 2001 From: ShwethaHP Date: Wed, 23 Aug 2017 17:24:57 +0530 Subject: 1) bring_bricks_online: Wait for bricks to be online for 30 seconds after bringing them online. 2) log all the xml output/error to DEBUG log level. Change-Id: If6bb758ac728f299292def9d72c0ef166a1569ae Signed-off-by: ShwethaHP --- glustolibs-gluster/glustolibs/gluster/brick_libs.py | 18 ++++++++++++------ glustolibs-gluster/glustolibs/gluster/brick_ops.py | 5 ++++- glustolibs-gluster/glustolibs/gluster/heal_libs.py | 2 +- glustolibs-gluster/glustolibs/gluster/heal_ops.py | 4 ++-- glustolibs-gluster/glustolibs/gluster/peer_ops.py | 4 ++-- glustolibs-gluster/glustolibs/gluster/volume_ops.py | 4 ++-- 6 files changed, 23 insertions(+), 14 deletions(-) (limited to 'glustolibs-gluster/glustolibs') diff --git a/glustolibs-gluster/glustolibs/gluster/brick_libs.py b/glustolibs-gluster/glustolibs/gluster/brick_libs.py index b4d232859..6c32fe5b9 100644 --- a/glustolibs-gluster/glustolibs/gluster/brick_libs.py +++ b/glustolibs-gluster/glustolibs/gluster/brick_libs.py @@ -18,6 +18,7 @@ import random from math import ceil +import time from glusto.core import Glusto as g from glustolibs.gluster.volume_ops import (get_volume_info, get_volume_status) from glustolibs.gluster.volume_libs import (get_subvols, is_tiered_volume, @@ -245,6 +246,9 @@ def bring_bricks_online(mnode, volname, bricks_list, elif isinstance(bring_bricks_online_methods, str): bring_bricks_online_methods = [bring_bricks_online_methods] + g.log.info("Bringing bricks '%s' online with '%s'", + bricks_list, bring_bricks_online_methods) + _rc = True failed_to_bring_online_list = [] for brick in bricks_list: @@ -258,6 +262,9 @@ def bring_bricks_online(mnode, volname, bricks_list, brick_node) _rc = False failed_to_bring_online_list.append(brick) + else: + g.log.info("Successfully restarted glusterd on node %s to " + "bring back brick %s online", brick_node, brick) elif bring_brick_online_method == 'volume_start_force': bring_brick_online_command = ("gluster volume start %s force" % volname) @@ -267,18 +274,17 @@ def bring_bricks_online(mnode, volname, bricks_list, volname) _rc = False else: + g.log.info("Successfully restarted volume %s to bring all " + "the bricks '%s' online", volname, bricks_list) break else: g.log.error("Invalid method '%s' to bring brick online", bring_brick_online_method) return False - if not _rc: - g.log.error("Unable to bring some of the bricks %s online", - failed_to_bring_online_list) - return False - g.log.info("All the bricks : %s are brought online", bricks_list) - return True + g.log.info("Waiting for 30 seconds for all the bricks to be online") + time.sleep(30) + return _rc def are_bricks_offline(mnode, volname, bricks_list): diff --git a/glustolibs-gluster/glustolibs/gluster/brick_ops.py b/glustolibs-gluster/glustolibs/gluster/brick_ops.py index 0e99b955e..7ac34c690 100644 --- a/glustolibs-gluster/glustolibs/gluster/brick_ops.py +++ b/glustolibs-gluster/glustolibs/gluster/brick_ops.py @@ -115,11 +115,14 @@ def remove_brick(mnode, volname, bricks_list, option, xml=False, **kwargs): xml_str = '' if xml: xml_str = "--xml" + log_level = 'DEBUG' + else: + log_level = 'INFO' cmd = ("gluster volume remove-brick %s %s %s %s %s" % (volname, replica, ' '.join(bricks_list), option, xml_str)) - return g.run(mnode, cmd) + return g.run(mnode, cmd, log_level=log_level) def replace_brick(mnode, volname, src_brick, dst_brick): diff --git a/glustolibs-gluster/glustolibs/gluster/heal_libs.py b/glustolibs-gluster/glustolibs/gluster/heal_libs.py index 4d5594a11..10707df63 100644 --- a/glustolibs-gluster/glustolibs/gluster/heal_libs.py +++ b/glustolibs-gluster/glustolibs/gluster/heal_libs.py @@ -64,7 +64,7 @@ def is_heal_disabled(mnode, volname): NoneType: None if unable to get the volume status shd or parse error. """ cmd = "gluster volume status %s shd --xml" % volname - ret, out, _ = g.run(mnode, cmd) + ret, out, _ = g.run(mnode, cmd, log_level='DEBUG') if ret != 0: g.log.error("Failed to get the self-heal-daemon status for the " "volume" % volname) diff --git a/glustolibs-gluster/glustolibs/gluster/heal_ops.py b/glustolibs-gluster/glustolibs/gluster/heal_ops.py index 8c0b6c820..daae9a37d 100644 --- a/glustolibs-gluster/glustolibs/gluster/heal_ops.py +++ b/glustolibs-gluster/glustolibs/gluster/heal_ops.py @@ -271,7 +271,7 @@ def get_heal_info(mnode, volname): heal_info data per brick. """ cmd = "gluster volume heal %s info --xml" % volname - ret, out, _ = g.run(mnode, cmd) + ret, out, _ = g.run(mnode, cmd, log_level='DEBUG') if ret != 0: g.log.error("Failed to get the heal info xml output for the volume %s." "Hence failed to get the heal info summary." % volname) @@ -357,7 +357,7 @@ def get_heal_info_split_brain(mnode, volname): heal_info_split_brain data per brick. """ cmd = "gluster volume heal %s info split-brain --xml" % volname - ret, out, _ = g.run(mnode, cmd) + ret, out, _ = g.run(mnode, cmd, log_level='DEBUG') if ret != 0: g.log.error("Failed to get the heal info xml output for the volume %s." "Hence failed to get the heal info summary." % volname) diff --git a/glustolibs-gluster/glustolibs/gluster/peer_ops.py b/glustolibs-gluster/glustolibs/gluster/peer_ops.py index e4d3e6ec3..54c2a38a9 100644 --- a/glustolibs-gluster/glustolibs/gluster/peer_ops.py +++ b/glustolibs-gluster/glustolibs/gluster/peer_ops.py @@ -275,7 +275,7 @@ def get_peer_status(mnode): 'stateStr': 'Peer in Cluster'} ] """ - ret, out, _ = g.run(mnode, "gluster peer status --xml") + ret, out, _ = g.run(mnode, "gluster peer status --xml", log_level='DEBUG') if ret != 0: g.log.error("Failed to execute peer status command on node '%s'. " "Hence failed to parse the peer status.", mnode) @@ -327,7 +327,7 @@ def get_pool_list(mnode): 'stateStr': 'Peer in Cluster'} ] """ - ret, out, _ = g.run(mnode, "gluster pool list --xml") + ret, out, _ = g.run(mnode, "gluster pool list --xml", log_level='DEBUG') if ret != 0: g.log.error("Failed to execute 'pool list' on node %s. " "Hence failed to parse the pool list.", mnode) diff --git a/glustolibs-gluster/glustolibs/gluster/volume_ops.py b/glustolibs-gluster/glustolibs/gluster/volume_ops.py index cc400390b..6cf038dbf 100644 --- a/glustolibs-gluster/glustolibs/gluster/volume_ops.py +++ b/glustolibs-gluster/glustolibs/gluster/volume_ops.py @@ -384,7 +384,7 @@ def get_volume_status(mnode, volname='all', service='', options=''): cmd = "gluster vol status %s %s %s --xml" % (volname, service, options) - ret, out, _ = g.run(mnode, cmd) + ret, out, _ = g.run(mnode, cmd, log_level='DEBUG') if ret != 0: g.log.error("Failed to execute gluster volume status command") return None @@ -635,7 +635,7 @@ def get_volume_info(mnode, volname='all'): """ cmd = "gluster volume info %s --xml" % volname - ret, out, _ = g.run(mnode, cmd) + ret, out, _ = g.run(mnode, cmd, log_level='DEBUG') if ret != 0: g.log.error("volume info returned error") return None -- cgit