summaryrefslogtreecommitdiffstats
path: root/xlators/features/qemu-block/src/coroutine-synctask.c
blob: e43988a953fac27791df812d8b5fb144bc31d868 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
/*
  Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
  This file is part of GlusterFS.

  This file is licensed to you under your choice of the GNU Lesser
  General Public License, version 3 or any later version (LGPLv3 or
  later), or the GNU General Public License, version 2 (GPLv2), in all
  cases as published by the Free Software Foundation.
*/


#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif

#include "glusterfs.h"
#include "logging.h"
#include "dict.h"
#include "xlator.h"
#include "syncop.h"
#include "qemu-block-memory-types.h"

#include "qemu-block.h"

/*
 * This code serves as the bridge from the main glusterfs context to the qemu
 * coroutine context via synctask. We create a single threaded syncenv with a
 * single synctask responsible for processing a queue of coroutines. The qemu
 * code invoked from within the synctask function handlers uses the ucontext
 * coroutine implementation and scheduling logic internal to qemu. This
 * effectively donates a thread of execution to qemu and its internal coroutine
 * management.
 *
 * NOTE: The existence of concurrent synctasks has proven quite racy with regard
 * to qemu coroutine management, particularly related to the lifecycle
 * differences with top-level synctasks and internally created coroutines and
 * interactions with qemu-internal queues (and locks, in turn). We explicitly
 * disallow this scenario, via the queue, until it is more well supported.
 */

static struct {
	struct list_head	queue;
	gf_lock_t		lock;
	struct synctask		*task;
} qb_co;

static void
init_qbco()
{
	INIT_LIST_HEAD(&qb_co.queue);
	LOCK_INIT(&qb_co.lock);
}

static int
synctask_nop_cbk (int ret, call_frame_t *frame, void *opaque)
{
	return 0;
}

static int
qb_synctask_wrap (void *opaque)
{
	qb_local_t *qb_local, *tmp;

	LOCK(&qb_co.lock);

	while (!list_empty(&qb_co.queue)) {
		list_for_each_entry_safe(qb_local, tmp, &qb_co.queue, list) {
			list_del_init(&qb_local->list);
			break;
		}

		UNLOCK(&qb_co.lock);

		qb_local->synctask_fn(qb_local);
		/* qb_local is now unwound and gone! */

		LOCK(&qb_co.lock);
	}

	qb_co.task = NULL;

	UNLOCK(&qb_co.lock);

	return 0;
}

int
qb_coroutine (call_frame_t *frame, synctask_fn_t fn)
{
	qb_local_t *qb_local = NULL;
	qb_conf_t *qb_conf = NULL;
	static int init = 0;

	qb_local = frame->local;
	qb_local->synctask_fn = fn;
	qb_conf = frame->this->private;

	if (!init) {
		init = 1;
		init_qbco();
	}

	LOCK(&qb_co.lock);

	if (!qb_co.task)
		qb_co.task = synctask_create(qb_conf->env, qb_synctask_wrap,
					     synctask_nop_cbk, frame, NULL);

	list_add_tail(&qb_local->list, &qb_co.queue);

	UNLOCK(&qb_co.lock);

	return 0;
}