summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--glusterfsd/src/glusterfsd.c73
-rw-r--r--libglusterfs/src/glusterfs.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c8
3 files changed, 47 insertions, 35 deletions
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index 58dc521b565..771af423ea5 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -1525,43 +1525,44 @@ cleanup_and_exit(int signum)
if (ctx->cleanup_started)
return;
+ pthread_mutex_lock(&ctx->cleanup_lock);
+ {
+ ctx->cleanup_started = 1;
- ctx->cleanup_started = 1;
-
- /* signout should be sent to all the bricks in case brick mux is enabled
- * and multiple brick instances are attached to this process
- */
- if (ctx->active) {
- top = ctx->active->first;
- for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
- victim = (*trav_p)->xlator;
- rpc_clnt_mgmt_pmap_signout(ctx, victim->name);
+ /* signout should be sent to all the bricks in case brick mux is enabled
+ * and multiple brick instances are attached to this process
+ */
+ if (ctx->active) {
+ top = ctx->active->first;
+ for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
+ victim = (*trav_p)->xlator;
+ rpc_clnt_mgmt_pmap_signout(ctx, victim->name);
+ }
+ } else {
+ rpc_clnt_mgmt_pmap_signout(ctx, NULL);
}
- } else {
- rpc_clnt_mgmt_pmap_signout(ctx, NULL);
- }
- /* below part is a racy code where the rpcsvc object is freed.
- * But in another thread (epoll thread), upon poll error in the
- * socket the transports are cleaned up where again rpcsvc object
- * is accessed (which is already freed by the below function).
- * Since the process is about to be killed don't execute the function
- * below.
- */
- /* if (ctx->listener) { */
- /* (void) glusterfs_listener_stop (ctx); */
- /* } */
-
- /* Call fini() of FUSE xlator first:
- * so there are no more requests coming and
- * 'umount' of mount point is done properly */
- trav = ctx->master;
- if (trav && trav->fini) {
- THIS = trav;
- trav->fini(trav);
- }
+ /* below part is a racy code where the rpcsvc object is freed.
+ * But in another thread (epoll thread), upon poll error in the
+ * socket the transports are cleaned up where again rpcsvc object
+ * is accessed (which is already freed by the below function).
+ * Since the process is about to be killed don't execute the function
+ * below.
+ */
+ /* if (ctx->listener) { */
+ /* (void) glusterfs_listener_stop (ctx); */
+ /* } */
+
+ /* Call fini() of FUSE xlator first:
+ * so there are no more requests coming and
+ * 'umount' of mount point is done properly */
+ trav = ctx->master;
+ if (trav && trav->fini) {
+ THIS = trav;
+ trav->fini(trav);
+ }
- glusterfs_pidfile_cleanup(ctx);
+ glusterfs_pidfile_cleanup(ctx);
#if 0
/* TODO: Properly do cleanup_and_exit(), with synchronization */
@@ -1572,8 +1573,9 @@ cleanup_and_exit(int signum)
}
#endif
- trav = NULL;
-
+ trav = NULL;
+ }
+ pthread_mutex_unlock(&ctx->cleanup_lock);
/* NOTE: Only the least significant 8 bits i.e (signum & 255)
will be available to parent process on calling exit() */
exit(abs(signum));
@@ -1743,6 +1745,7 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
goto out;
pthread_mutex_init(&ctx->notify_lock, NULL);
+ pthread_mutex_init(&ctx->cleanup_lock, NULL);
pthread_cond_init(&ctx->notify_cond, NULL);
ctx->clienttable = gf_clienttable_alloc();
diff --git a/libglusterfs/src/glusterfs.h b/libglusterfs/src/glusterfs.h
index 8335d170bd1..4d5bac322fd 100644
--- a/libglusterfs/src/glusterfs.h
+++ b/libglusterfs/src/glusterfs.h
@@ -689,6 +689,7 @@ struct _glusterfs_ctx {
char btbuf[GF_BACKTRACE_LEN];
pthread_mutex_t notify_lock;
+ pthread_mutex_t cleanup_lock;
pthread_cond_t notify_cond;
int notifying;
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index e37d22616ac..57ad7ca501d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -1768,9 +1768,16 @@ glusterd_store_volinfo(glusterd_volinfo_t *volinfo,
glusterd_volinfo_ver_ac_t ac)
{
int32_t ret = -1;
+ glusterfs_ctx_t *ctx = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
+ ctx = this->ctx;
+ GF_ASSERT(ctx);
GF_ASSERT(volinfo);
+ pthread_mutex_lock(&ctx->cleanup_lock);
pthread_mutex_lock(&volinfo->store_volinfo_lock);
{
glusterd_perform_volinfo_version_action(volinfo, ac);
@@ -1812,6 +1819,7 @@ glusterd_store_volinfo(glusterd_volinfo_t *volinfo,
}
unlock:
pthread_mutex_unlock(&volinfo->store_volinfo_lock);
+ pthread_mutex_unlock(&ctx->cleanup_lock);
if (ret)
glusterd_store_volume_cleanup_tmp(volinfo);