summaryrefslogtreecommitdiffstats
path: root/libglusterfs/src/stack.h
diff options
context:
space:
mode:
authorKrishnan Parthasarathi <kparthas@redhat.com>2015-06-05 10:33:11 +0530
committerNiels de Vos <ndevos@redhat.com>2015-06-22 00:41:43 -0700
commit79e4c7b2fad6db15863efb4e979525b1bd4862ea (patch)
tree5bcf810a74536b5751213661ebf90370b02d6ff3 /libglusterfs/src/stack.h
parent6b4add6b3f54b6d3202535a492eaf906d619ad75 (diff)
stack: use list_head for managing frames
PROBLEM -------- statedump requests that traverse call frames of all call stacks in execution may race with a STACK_RESET on a stack. This could crash the corresponding glusterfs process. For e.g, recently we observed this in a regression test case tests/basic/afr/sparse-self-heal.t. FIX --- gf_proc_dump_pending_frames takes a (TRY_LOCK) call_pool->lock before iterating through call frames of all call stacks in progress. With this fix, STACK_RESET removes its call frames under the same lock. Additional info ---------------- This fix makes call_stack_t to use struct list_head in place of custom doubly-linked list implementation. This makes call_frame_t manipulation easier to maintain in the context of STACK_WIND et al. BUG: 1229658 Change-Id: I7e43bccd3994cd9184ab982dba3dbc10618f0d94 Signed-off-by: Krishnan Parthasarathi <kparthas@redhat.com> Reviewed-on: http://review.gluster.org/11095 Reviewed-by: Niels de Vos <ndevos@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com> Tested-by: NetBSD Build System <jenkins@build.gluster.org>
Diffstat (limited to 'libglusterfs/src/stack.h')
-rw-r--r--libglusterfs/src/stack.h98
1 files changed, 62 insertions, 36 deletions
diff --git a/libglusterfs/src/stack.h b/libglusterfs/src/stack.h
index e0cc45eeb60..ffe5c1a1085 100644
--- a/libglusterfs/src/stack.h
+++ b/libglusterfs/src/stack.h
@@ -59,8 +59,7 @@ struct call_pool {
struct _call_frame_t {
call_stack_t *root; /* stack root */
call_frame_t *parent; /* previous BP */
- call_frame_t *next;
- call_frame_t *prev; /* maintenance list */
+ struct list_head frames;
void *local; /* local variables */
xlator_t *this; /* implicit object */
ret_fn_t ret; /* op_return address */
@@ -103,7 +102,8 @@ struct _call_stack_t {
gf_lkowner_t lk_owner;
glusterfs_ctx_t *ctx;
- call_frame_t frames;
+ struct list_head myframes; /* List of call_frame_t that go
+ to make the call stack */
int32_t op;
int8_t type;
@@ -133,10 +133,8 @@ static inline void
FRAME_DESTROY (call_frame_t *frame)
{
void *local = NULL;
- if (frame->next)
- frame->next->prev = frame->prev;
- if (frame->prev)
- frame->prev->next = frame->next;
+
+ list_del_init (&frame->frames);
if (frame->local) {
local = frame->local;
frame->local = NULL;
@@ -155,6 +153,8 @@ static inline void
STACK_DESTROY (call_stack_t *stack)
{
void *local = NULL;
+ call_frame_t *frame = NULL;
+ call_frame_t *tmp = NULL;
LOCK (&stack->pool->lock);
{
@@ -163,16 +163,10 @@ STACK_DESTROY (call_stack_t *stack)
}
UNLOCK (&stack->pool->lock);
- if (stack->frames.local) {
- local = stack->frames.local;
- stack->frames.local = NULL;
- }
-
- LOCK_DESTROY (&stack->frames.lock);
LOCK_DESTROY (&stack->stack_lock);
- while (stack->frames.next) {
- FRAME_DESTROY (stack->frames.next);
+ list_for_each_entry_safe (frame, tmp, &stack->myframes, frames) {
+ FRAME_DESTROY (frame);
}
GF_FREE (stack->groups_large);
@@ -187,14 +181,28 @@ static inline void
STACK_RESET (call_stack_t *stack)
{
void *local = NULL;
+ call_frame_t *frame = NULL;
+ call_frame_t *tmp = NULL;
+ call_frame_t *last = NULL;
+ struct list_head toreset = {0};
+
+ INIT_LIST_HEAD (&toreset);
+
+ /* We acquire call_pool->lock only to remove the frames from this stack
+ * to preserve atomicity. This synchronizes across concurrent requests
+ * like statedump, STACK_DESTROY etc. */
- if (stack->frames.local) {
- local = stack->frames.local;
- stack->frames.local = NULL;
+ LOCK (&stack->pool->lock);
+ {
+ last = list_last_entry (&stack->myframes, call_frame_t, frames);
+ list_del_init (&last->frames);
+ list_splice_init (&stack->myframes, &toreset);
+ list_add (&last->frames, &stack->myframes);
}
+ UNLOCK (&stack->pool->lock);
- while (stack->frames.next) {
- FRAME_DESTROY (stack->frames.next);
+ list_for_each_entry_safe (frame, tmp, &toreset, frames) {
+ FRAME_DESTROY (frame);
}
if (local)
@@ -244,11 +252,7 @@ STACK_RESET (call_stack_t *stack)
LOCK_INIT (&_new->lock); \
LOCK(&frame->root->stack_lock); \
{ \
- _new->next = frame->root->frames.next; \
- _new->prev = &frame->root->frames; \
- if (frame->root->frames.next) \
- frame->root->frames.next->prev = _new; \
- frame->root->frames.next = _new; \
+ list_add (&_new->frames, &frame->root->myframes);\
frame->ref_count++; \
} \
UNLOCK(&frame->root->stack_lock); \
@@ -298,12 +302,8 @@ STACK_RESET (call_stack_t *stack)
LOCK_INIT (&_new->lock); \
LOCK(&frame->root->stack_lock); \
{ \
+ list_add (&_new->frames, &frame->root->myframes);\
frame->ref_count++; \
- _new->next = frame->root->frames.next; \
- _new->prev = &frame->root->frames; \
- if (frame->root->frames.next) \
- frame->root->frames.next->prev = _new; \
- frame->root->frames.next = _new; \
} \
UNLOCK(&frame->root->stack_lock); \
fn##_cbk = rfn; \
@@ -391,11 +391,27 @@ call_stack_alloc_groups (call_stack_t *stack, int ngrps)
return 0;
}
+static inline
+int call_frames_count (call_stack_t *call_stack)
+{
+ call_frame_t *pos;
+ int32_t count = 0;
+
+ if (!call_stack)
+ return count;
+
+ list_for_each_entry (pos, &call_stack->myframes, frames)
+ count++;
+
+ return count;
+}
+
static inline call_frame_t *
copy_frame (call_frame_t *frame)
{
call_stack_t *newstack = NULL;
call_stack_t *oldstack = NULL;
+ call_frame_t *newframe = NULL;
if (!frame) {
return NULL;
@@ -406,6 +422,19 @@ copy_frame (call_frame_t *frame)
return NULL;
}
+ INIT_LIST_HEAD (&newstack->myframes);
+
+ newframe = mem_get0 (frame->root->pool->frame_mem_pool);
+ if (!newframe) {
+ mem_put (newstack);
+ return NULL;
+ }
+
+ newframe->this = frame->this;
+ newframe->root = newstack;
+ INIT_LIST_HEAD (&newframe->frames);
+ list_add (&newframe->frames, &newstack->myframes);
+
oldstack = frame->root;
newstack->uid = oldstack->uid;
@@ -421,9 +450,6 @@ copy_frame (call_frame_t *frame)
memcpy (newstack->groups, oldstack->groups,
sizeof (gid_t) * oldstack->ngrps);
newstack->unique = oldstack->unique;
-
- newstack->frames.this = frame->this;
- newstack->frames.root = newstack;
newstack->pool = oldstack->pool;
newstack->lk_owner = oldstack->lk_owner;
newstack->ctx = oldstack->ctx;
@@ -432,11 +458,11 @@ copy_frame (call_frame_t *frame)
if (gettimeofday (&newstack->tv, NULL) == -1)
gf_log ("stack", GF_LOG_ERROR, "gettimeofday () failed."
" (%s)", strerror (errno));
- memcpy (&newstack->frames.begin, &newstack->tv,
+ memcpy (&newframe->begin, &newstack->tv,
sizeof (newstack->tv));
}
- LOCK_INIT (&newstack->frames.lock);
+ LOCK_INIT (&newframe->lock);
LOCK_INIT (&newstack->stack_lock);
LOCK (&oldstack->pool->lock);
@@ -446,7 +472,7 @@ copy_frame (call_frame_t *frame)
}
UNLOCK (&oldstack->pool->lock);
- return &newstack->frames;
+ return newframe;
}
void gf_proc_dump_pending_frames(call_pool_t *call_pool);