summaryrefslogtreecommitdiffstats
path: root/geo-replication
diff options
context:
space:
mode:
authorKotresh HR <khiremat@redhat.com>2018-08-10 08:14:14 -0400
committerShyamsundar Ranganathan <srangana@redhat.com>2018-09-21 13:25:43 +0000
commit72514f20d2ae947529cd1c4b4b009f27bae7032a (patch)
treeabe9f9399113fae47519ecd91f373b6f89ca4760 /geo-replication
parent380ec25fdf439a0fd6a6fd01743625269ff85fa3 (diff)
geo-rep: Fix deadlock during worker start
Analysis: Monitor process spawns monitor threads (one per brick). Each monitor thread, forks worker and agent processes. Each monitor thread, while intializing, updates the monitor status file. It is synchronized using flock. The race is that, some thread can fork worker while other thread opened the status file resulting in holding the reference of fd in worker process. Cause: flock gets unlocked either by specifically unlocking it or by closing all duplicate fds referring to the file. The code was relying on fd close, hence a reference in worker/agent process by fork could cause the deadlock. Fix: 1. flock is unlocked specifically. 2. Also made sure to update status file in approriate places so that the reference is not leaked to worker/agent process. With this fix, both the deadlock and possible fd leaks is solved. Backport of: > Patch: https://review.gluster.org/20704 > BUG: bz#1614799 > Change-Id: I0d1ce93072dab07d0dbcc7e779287368cd9f093d > Signed-off-by: Kotresh HR <khiremat@redhat.com> fixes: bz#1630145 Change-Id: I0d1ce93072dab07d0dbcc7e779287368cd9f093d Signed-off-by: Kotresh HR <khiremat@redhat.com>
Diffstat (limited to 'geo-replication')
-rw-r--r--geo-replication/syncdaemon/gsyncdstatus.py1
-rw-r--r--geo-replication/syncdaemon/monitor.py18
2 files changed, 15 insertions, 4 deletions
diff --git a/geo-replication/syncdaemon/gsyncdstatus.py b/geo-replication/syncdaemon/gsyncdstatus.py
index e75f9dcd62c..e93e0519890 100644
--- a/geo-replication/syncdaemon/gsyncdstatus.py
+++ b/geo-replication/syncdaemon/gsyncdstatus.py
@@ -100,6 +100,7 @@ class LockedOpen(object):
return f
def __exit__(self, _exc_type, _exc_value, _traceback):
+ fcntl.flock(self.fileobj, fcntl.LOCK_UN)
self.fileobj.close()
diff --git a/geo-replication/syncdaemon/monitor.py b/geo-replication/syncdaemon/monitor.py
index 40818427bfe..047efa75346 100644
--- a/geo-replication/syncdaemon/monitor.py
+++ b/geo-replication/syncdaemon/monitor.py
@@ -105,10 +105,6 @@ class Monitor(object):
master,
"%s::%s" % (slave_host,
slave_vol))
-
- set_monitor_status(gconf.get("state-file"), self.ST_STARTED)
- self.status[w[0]['dir']].set_worker_status(self.ST_INIT)
-
ret = 0
def nwait(p, o=0):
@@ -153,6 +149,7 @@ class Monitor(object):
# Spawn the worker and agent in lock to avoid fd leak
self.lock.acquire()
+ self.status[w[0]['dir']].set_worker_status(self.ST_INIT)
logging.info(lf('starting gsyncd worker',
brick=w[0]['dir'],
slave_node=remote_host))
@@ -349,6 +346,19 @@ class Monitor(object):
t = Thread(target=wmon, args=[wx])
t.start()
ta.append(t)
+
+ # monitor status was being updated in each monitor thread. It
+ # should not be done as it can cause deadlock for a worker start.
+ # set_monitor_status uses flock to synchronize multple instances
+ # updating the file. Since each monitor thread forks worker and
+ # agent, these processes can hold the reference to fd of status
+ # file causing deadlock to workers which starts later as flock
+ # will not be release until all references to same fd is closed.
+ # It will also cause fd leaks.
+
+ self.lock.acquire()
+ set_monitor_status(gconf.get("state-file"), self.ST_STARTED)
+ self.lock.release()
for t in ta:
t.join()