summaryrefslogtreecommitdiffstats
path: root/geo-replication/syncdaemon/monitor.py
diff options
context:
space:
mode:
authorKotresh HR <khiremat@redhat.com>2015-12-17 12:39:30 +0530
committerVenky Shankar <vshankar@redhat.com>2015-12-21 00:28:23 -0800
commitd677e195cb85bef28fcd9e2f45e487c9ea792311 (patch)
tree691ef8d6524b890e1815729fcae7332dc4389ecc /geo-replication/syncdaemon/monitor.py
parent2b7226f9d3470d8fe4c98c1fddb06e7f641e364d (diff)
geo-rep: Fix getting subvol number
Fix getting subvol number if the volume type is tier. If the volume type was tier, the subvol number was calculated incorrectly and hence few of workers didn't become ACTIVE resulting in files not being replicated from corresponding brick. This patch addresses the same. Change-Id: Ic10ad7f09a0fa91b4bf2aa361dea3bd48be74853 BUG: 1292084 Signed-off-by: Kotresh HR <khiremat@redhat.com> Reviewed-on: http://review.gluster.org/12994 Tested-by: NetBSD Build System <jenkins@build.gluster.org> Reviewed-by: Aravinda VK <avishwan@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com>
Diffstat (limited to 'geo-replication/syncdaemon/monitor.py')
-rw-r--r--geo-replication/syncdaemon/monitor.py62
1 files changed, 46 insertions, 16 deletions
diff --git a/geo-replication/syncdaemon/monitor.py b/geo-replication/syncdaemon/monitor.py
index 257a280b2f2..63c8e3365ca 100644
--- a/geo-replication/syncdaemon/monitor.py
+++ b/geo-replication/syncdaemon/monitor.py
@@ -32,14 +32,26 @@ from gsyncdstatus import GeorepStatus, set_monitor_status
ParseError = XET.ParseError if hasattr(XET, 'ParseError') else SyntaxError
-def get_subvol_num(brick_idx, replica_count, disperse_count):
+def get_subvol_num(brick_idx, vol, hot):
+ tier = vol.is_tier()
+ disperse_count = vol.disperse_count(tier, hot)
+ replica_count = vol.replica_count(tier, hot)
+
+ if (tier and not hot):
+ brick_idx = brick_idx - vol.get_hot_bricks_count(tier)
+
subvol_size = disperse_count if disperse_count > 0 else replica_count
cnt = int((brick_idx + 1) / subvol_size)
rem = (brick_idx + 1) % subvol_size
if rem > 0:
- return cnt + 1
+ cnt = cnt + 1
+
+ if (tier and hot):
+ return "hot_" + str(cnt)
+ elif (tier and not hot):
+ return "cold_" + str(cnt)
else:
- return cnt
+ return str(cnt)
def get_slave_bricks_status(host, vol):
@@ -99,6 +111,9 @@ class Volinfo(object):
def get(self, elem):
return self.tree.findall('.//' + elem)
+ def is_tier(self):
+ return (self.get('typeStr')[0].text == 'Tier')
+
def is_hot(self, brickpath):
logging.debug('brickpath: ' + repr(brickpath))
return brickpath in self.hot_bricks
@@ -120,21 +135,33 @@ class Volinfo(object):
"ambiguous uuid" % (self.volume, self.host))
return ids[0].text
- @property
- @memoize
- def replica_count(self):
- return int(self.get('replicaCount')[0].text)
+ def replica_count(self, tier, hot):
+ if (tier and hot):
+ return int(self.get('hotBricks/hotreplicaCount')[0].text)
+ elif (tier and not hot):
+ return int(self.get('coldBricks/coldreplicaCount')[0].text)
+ else:
+ return int(self.get('replicaCount')[0].text)
- @property
- @memoize
- def disperse_count(self):
- return int(self.get('disperseCount')[0].text)
+ def disperse_count(self, tier, hot):
+ if (tier and hot):
+ return int(self.get('hotBricks/hotdisperseCount')[0].text)
+ elif (tier and not hot):
+ return int(self.get('coldBricks/colddisperseCount')[0].text)
+ else:
+ return int(self.get('disperseCount')[0].text)
@property
@memoize
def hot_bricks(self):
return [b.text for b in self.get('hotBricks/brick')]
+ def get_hot_bricks_count(self, tier):
+ if (tier):
+ return int(self.get('hotBricks/hotbrickCount')[0].text)
+ else:
+ return 0
+
class Monitor(object):
"""class which spawns and manages gsyncd workers"""
@@ -417,11 +444,14 @@ def distribute(*resources):
else:
slaves = slavevols
- workerspex = [(brick['dir'], slaves[idx % len(slaves)],
- get_subvol_num(idx, mvol.replica_count, mvol.disperse_count),
- mvol.is_hot(":".join([brick['host'], brick['dir']])))
- for idx, brick in enumerate(mvol.bricks)
- if is_host_local(brick['host'])]
+ workerspex = []
+ for idx, brick in enumerate(mvol.bricks):
+ if is_host_local(brick['host']):
+ is_hot = mvol.is_hot(":".join([brick['host'], brick['dir']]))
+ workerspex.append((brick['dir'],
+ slaves[idx % len(slaves)],
+ get_subvol_num(idx, mvol, is_hot),
+ is_hot))
logging.info('worker specs: ' + repr(workerspex))
return workerspex, suuid, slave_vol, slave_host, master