diff options
Diffstat (limited to 'rpc/rpc-lib/src/rpcsvc.c')
| -rw-r--r-- | rpc/rpc-lib/src/rpcsvc.c | 563 |
1 files changed, 404 insertions, 159 deletions
diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c index c6545193a11..39910d481bf 100644 --- a/rpc/rpc-lib/src/rpcsvc.c +++ b/rpc/rpc-lib/src/rpcsvc.c @@ -10,19 +10,16 @@ #include "rpcsvc.h" #include "rpc-transport.h" -#include "dict.h" -#include "logging.h" -#include "byte-order.h" -#include "common-utils.h" -#include "compat-errno.h" -#include "list.h" +#include <glusterfs/dict.h> +#include <glusterfs/byte-order.h> +#include <glusterfs/compat-errno.h> +#include <glusterfs/statedump.h> #include "xdr-rpc.h" -#include "iobuf.h" -#include "globals.h" +#include <glusterfs/iobuf.h> #include "xdr-common.h" #include "xdr-generic.h" #include "rpc-common-xdr.h" -#include "syncop.h" +#include <glusterfs/syncop.h> #include "rpc-drc.h" #include "protocol-common.h" @@ -36,15 +33,20 @@ #include <fnmatch.h> #include <stdarg.h> #include <stdio.h> +#include <dlfcn.h> #ifdef IPV6_DEFAULT #include <netconfig.h> #endif #include "xdr-rpcclnt.h" -#include "glusterfs-acl.h" +#include <glusterfs/glusterfs-acl.h> -struct rpcsvc_program gluster_dump_prog; +#ifndef PTHREAD_MUTEX_ADAPTIVE_NP +#define PTHREAD_MUTEX_ADAPTIVE_NP PTHREAD_MUTEX_DEFAULT +#endif + +static struct rpcsvc_program gluster_dump_prog; #define rpcsvc_alloc_request(svc, request) \ do { \ @@ -63,10 +65,41 @@ rpcsvc_get_listener(rpcsvc_t *svc, uint16_t port, rpc_transport_t *trans); int rpcsvc_notify(rpc_transport_t *trans, void *mydata, rpc_transport_event_t event, void *data, ...); +void * +rpcsvc_request_handler(void *arg); static int rpcsvc_match_subnet_v4(const char *addrtok, const char *ipaddr); +static void +rpcsvc_toggle_queue_status(rpcsvc_program_t *prog, + rpcsvc_request_queue_t *queue, + unsigned long status[]) +{ + unsigned queue_index = queue - prog->request_queue; + + status[queue_index / __BITS_PER_LONG] ^= (1UL << (queue_index % + __BITS_PER_LONG)); +} + +int +rpcsvc_get_free_queue_index(rpcsvc_program_t *prog) +{ + unsigned i, j = 0; + + for (i = 0; i < EVENT_MAX_THREADS / __BITS_PER_LONG; i++) + if (prog->request_queue_status[i] != ULONG_MAX) { + j = __builtin_ctzl(~prog->request_queue_status[i]); + break; + } + + if (i == EVENT_MAX_THREADS / __BITS_PER_LONG) + return -1; + + prog->request_queue_status[i] |= (1UL << j); + return i * __BITS_PER_LONG + j; +} + rpcsvc_notify_wrapper_t * rpcsvc_notify_wrapper_alloc(void) { @@ -309,6 +342,10 @@ rpcsvc_program_actor(rpcsvc_request_t *req) goto err; } + if (svc->xl->ctx->measure_latency) { + timespec_now(&req->begin); + } + req->ownthread = program->ownthread; req->synctask = program->synctask; @@ -575,6 +612,73 @@ rpcsvc_check_and_reply_error(int ret, call_frame_t *frame, void *opaque) return 0; } +void +rpcsvc_queue_event_thread_death(rpcsvc_t *svc, rpcsvc_program_t *prog, int gen) +{ + rpcsvc_request_queue_t *queue = NULL; + int num = 0; + void *value = NULL; + rpcsvc_request_t *req = NULL; + char empty = 0; + + value = pthread_getspecific(prog->req_queue_key); + if (value == NULL) { + return; + } + + num = ((unsigned long)value) - 1; + + queue = &prog->request_queue[num]; + + if (queue->gen == gen) { + /* duplicate event */ + gf_log(GF_RPCSVC, GF_LOG_INFO, + "not queuing duplicate event thread death. " + "queue %d program %s", + num, prog->progname); + return; + } + + rpcsvc_alloc_request(svc, req); + req->prognum = RPCSVC_INFRA_PROGRAM; + req->procnum = RPCSVC_PROC_EVENT_THREAD_DEATH; + gf_log(GF_RPCSVC, GF_LOG_INFO, + "queuing event thread death request to queue %d of program %s", num, + prog->progname); + + pthread_mutex_lock(&queue->queue_lock); + { + empty = list_empty(&queue->request_queue); + + list_add_tail(&req->request_list, &queue->request_queue); + queue->gen = gen; + + if (empty && queue->waiting) + pthread_cond_signal(&queue->queue_cond); + } + pthread_mutex_unlock(&queue->queue_lock); + + return; +} + +int +rpcsvc_handle_event_thread_death(rpcsvc_t *svc, rpc_transport_t *trans, int gen) +{ + rpcsvc_program_t *prog = NULL; + + pthread_rwlock_rdlock(&svc->rpclock); + { + list_for_each_entry(prog, &svc->programs, program) + { + if (prog->ownthread) + rpcsvc_queue_event_thread_death(svc, prog, gen); + } + } + pthread_rwlock_unlock(&svc->rpclock); + + return 0; +} + int rpcsvc_handle_rpc_call(rpcsvc_t *svc, rpc_transport_t *trans, rpc_transport_pollin_t *msg) @@ -585,9 +689,12 @@ rpcsvc_handle_rpc_call(rpcsvc_t *svc, rpc_transport_t *trans, int ret = -1; uint16_t port = 0; gf_boolean_t is_unix = _gf_false, empty = _gf_false; - gf_boolean_t unprivileged = _gf_false; + gf_boolean_t unprivileged = _gf_false, spawn_request_handler = 0; drc_cached_op_t *reply = NULL; rpcsvc_drc_globals_t *drc = NULL; + rpcsvc_request_queue_t *queue = NULL; + long num = 0; + void *value = NULL; if (!trans || !svc) return -1; @@ -700,19 +807,81 @@ rpcsvc_handle_rpc_call(rpcsvc_t *svc, rpc_transport_t *trans, ret = synctask_new(THIS->ctx->env, (synctask_fn_t)actor_fn, rpcsvc_check_and_reply_error, NULL, req); } else if (req->ownthread) { - pthread_mutex_lock(&req->prog->queue_lock); + value = pthread_getspecific(req->prog->req_queue_key); + if (value == NULL) { + pthread_mutex_lock(&req->prog->thr_lock); + { + num = rpcsvc_get_free_queue_index(req->prog); + if (num != -1) { + num++; + value = (void *)num; + ret = pthread_setspecific(req->prog->req_queue_key, + value); + if (ret < 0) { + gf_log(GF_RPCSVC, GF_LOG_WARNING, + "setting request queue in TLS failed"); + rpcsvc_toggle_queue_status( + req->prog, &req->prog->request_queue[num - 1], + req->prog->request_queue_status); + num = -1; + } else { + spawn_request_handler = 1; + } + } + } + pthread_mutex_unlock(&req->prog->thr_lock); + } + + if (num == -1) + goto noqueue; + + num = ((unsigned long)value) - 1; + + queue = &req->prog->request_queue[num]; + + if (spawn_request_handler) { + ret = gf_thread_create(&queue->thread, NULL, + rpcsvc_request_handler, queue, + "rpcrqhnd"); + if (!ret) { + gf_log(GF_RPCSVC, GF_LOG_INFO, + "spawned a request handler thread for queue %d", + (int)num); + + req->prog->threadcount++; + } else { + gf_log( + GF_RPCSVC, GF_LOG_INFO, + "spawning a request handler thread for queue %d failed", + (int)num); + ret = pthread_setspecific(req->prog->req_queue_key, 0); + if (ret < 0) { + gf_log(GF_RPCSVC, GF_LOG_WARNING, + "resetting request queue in TLS failed"); + } + + rpcsvc_toggle_queue_status( + req->prog, &req->prog->request_queue[num - 1], + req->prog->request_queue_status); + + goto noqueue; + } + } + + pthread_mutex_lock(&queue->queue_lock); { - empty = list_empty(&req->prog->request_queue); + empty = list_empty(&queue->request_queue); - list_add_tail(&req->request_list, &req->prog->request_queue); + list_add_tail(&req->request_list, &queue->request_queue); - if (empty) - pthread_cond_signal(&req->prog->queue_cond); + if (empty && queue->waiting) + pthread_cond_signal(&queue->queue_cond); } - pthread_mutex_unlock(&req->prog->queue_lock); + pthread_mutex_unlock(&queue->queue_lock); ret = 0; } else { + noqueue: ret = actor_fn(req); } } @@ -839,6 +1008,12 @@ rpcsvc_notify(rpc_transport_t *trans, void *mydata, rpc_transport_event_t event, "got MAP_XID event, which should have not come"); ret = 0; break; + + case RPC_TRANSPORT_EVENT_THREAD_DIED: + rpcsvc_handle_event_thread_death(svc, trans, + (int)(unsigned long)data); + ret = 0; + break; } out: @@ -1164,9 +1339,9 @@ rpcsvc_transport_submit(rpc_transport_t *trans, struct iovec *rpchdr, int progpayloadcount, struct iobref *iobref, void *priv) { int ret = -1; - rpc_transport_reply_t reply = {{ + rpc_transport_reply_t reply = { 0, - }}; + }; if ((!trans) || (!rpchdr) || (!rpchdr->iov_base)) { goto out; @@ -1320,10 +1495,18 @@ rpcsvc_submit_generic(rpcsvc_request_t *req, struct iovec *proghdr, size_t hdrlen = 0; char new_iobref = 0; rpcsvc_drc_globals_t *drc = NULL; + gf_latency_t *lat = NULL; if ((!req) || (!req->trans)) return -1; + if (req->prog && req->begin.tv_sec) { + if ((req->procnum >= 0) && (req->procnum < req->prog->numactors)) { + timespec_now(&req->end); + lat = &req->prog->latencies[req->procnum]; + gf_latency_update(lat, &req->begin, &req->end); + } + } trans = req->trans; for (i = 0; i < hdrcount; i++) { @@ -1654,6 +1837,15 @@ rpcsvc_submit_message(rpcsvc_request_t *req, struct iovec *proghdr, iobref); } +void +rpcsvc_program_destroy(rpcsvc_program_t *program) +{ + if (program) { + GF_FREE(program->latencies); + GF_FREE(program); + } +} + int rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program) { @@ -1663,6 +1855,18 @@ rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program) goto out; } + pthread_rwlock_rdlock(&svc->rpclock); + { + list_for_each_entry(prog, &svc->programs, program) + { + if ((prog->prognum == program->prognum) && + (prog->progver == program->progver)) { + break; + } + } + } + pthread_rwlock_unlock(&svc->rpclock); + ret = rpcsvc_program_unregister_portmap(program); if (ret == -1) { gf_log(GF_RPCSVC, GF_LOG_ERROR, @@ -1679,17 +1883,6 @@ rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program) goto out; } #endif - pthread_rwlock_rdlock(&svc->rpclock); - { - list_for_each_entry(prog, &svc->programs, program) - { - if ((prog->prognum == program->prognum) && - (prog->progver == program->progver)) { - break; - } - } - } - pthread_rwlock_unlock(&svc->rpclock); gf_log(GF_RPCSVC, GF_LOG_DEBUG, "Program unregistered: %s, Num: %d," @@ -1710,6 +1903,8 @@ rpcsvc_program_unregister(rpcsvc_t *svc, rpcsvc_program_t *program) ret = 0; out: + rpcsvc_program_destroy(prog); + if (ret == -1) { if (program) { gf_log(GF_RPCSVC, GF_LOG_ERROR, @@ -1804,6 +1999,7 @@ rpcsvc_create_listener(rpcsvc_t *svc, dict_t *options, char *name) listener = rpcsvc_listener_alloc(svc, trans); if (listener == NULL) { + ret = -1; goto out; } @@ -1811,6 +2007,7 @@ rpcsvc_create_listener(rpcsvc_t *svc, dict_t *options, char *name) out: if (!listener && trans) { rpc_transport_disconnect(trans, _gf_true); + rpc_transport_cleanup(trans); } return ret; @@ -1877,6 +2074,7 @@ rpcsvc_create_listeners(rpcsvc_t *svc, dict_t *options, char *name) goto out; } + dict_del(options, "notify-poller-death"); GF_FREE(transport_name); transport_name = NULL; count++; @@ -1961,55 +2159,84 @@ out: void * rpcsvc_request_handler(void *arg) { - rpcsvc_program_t *program = arg; - rpcsvc_request_t *req = NULL; + rpcsvc_request_queue_t *queue = NULL; + rpcsvc_program_t *program = NULL; + rpcsvc_request_t *req = NULL, *tmp_req = NULL; rpcsvc_actor_t *actor = NULL; gf_boolean_t done = _gf_false; int ret = 0; + struct list_head tmp_list; + + queue = arg; + program = queue->program; + + INIT_LIST_HEAD(&tmp_list); if (!program) return NULL; while (1) { - pthread_mutex_lock(&program->queue_lock); + pthread_mutex_lock(&queue->queue_lock); { - if (!program->alive && list_empty(&program->request_queue)) { + if (!program->alive && list_empty(&queue->request_queue)) { done = 1; goto unlock; } - while (list_empty(&program->request_queue) && - (program->threadcount <= program->eventthreadcount)) { - pthread_cond_wait(&program->queue_cond, &program->queue_lock); + while (list_empty(&queue->request_queue)) { + queue->waiting = _gf_true; + pthread_cond_wait(&queue->queue_cond, &queue->queue_lock); } - if (program->threadcount > program->eventthreadcount) { - done = 1; - program->threadcount--; - - gf_log(GF_RPCSVC, GF_LOG_INFO, - "program '%s' thread terminated; " - "total count:%d", - program->progname, program->threadcount); - } else if (!list_empty(&program->request_queue)) { - req = list_entry(program->request_queue.next, typeof(*req), - request_list); + queue->waiting = _gf_false; - list_del_init(&req->request_list); + if (!list_empty(&queue->request_queue)) { + INIT_LIST_HEAD(&tmp_list); + list_splice_init(&queue->request_queue, &tmp_list); } } unlock: - pthread_mutex_unlock(&program->queue_lock); + pthread_mutex_unlock(&queue->queue_lock); - if (req) { - THIS = req->svc->xl; - actor = rpcsvc_program_actor(req); - ret = actor->actor(req); + list_for_each_entry_safe(req, tmp_req, &tmp_list, request_list) + { + if (req) { + list_del_init(&req->request_list); - if (ret != 0) { - rpcsvc_check_and_reply_error(ret, NULL, req); + if (req->prognum == RPCSVC_INFRA_PROGRAM) { + switch (req->procnum) { + case RPCSVC_PROC_EVENT_THREAD_DEATH: + gf_log(GF_RPCSVC, GF_LOG_INFO, + "event thread died, exiting request handler " + "thread for queue %d of program %s", + (int)(queue - &program->request_queue[0]), + program->progname); + done = 1; + pthread_mutex_lock(&program->thr_lock); + { + rpcsvc_toggle_queue_status( + program, queue, + program->request_queue_status); + program->threadcount--; + } + pthread_mutex_unlock(&program->thr_lock); + rpcsvc_request_destroy(req); + break; + + default: + break; + } + } else { + THIS = req->svc->xl; + actor = rpcsvc_program_actor(req); + ret = actor->actor(req); + + if (ret != 0) { + rpcsvc_check_and_reply_error(ret, NULL, req); + } + req = NULL; + } } - req = NULL; } if (done) @@ -2020,61 +2247,14 @@ rpcsvc_request_handler(void *arg) } int -rpcsvc_spawn_threads(rpcsvc_t *svc, rpcsvc_program_t *program) -{ - int ret = 0, delta = 0, creates = 0; - - if (!program || !svc) - goto out; - - pthread_mutex_lock(&program->queue_lock); - { - delta = program->eventthreadcount - program->threadcount; - - if (delta >= 0) { - while (delta--) { - ret = gf_thread_create(&program->thread, NULL, - rpcsvc_request_handler, program, - "rpcrqhnd"); - if (!ret) { - program->threadcount++; - creates++; - } - } - - if (creates) { - gf_log(GF_RPCSVC, GF_LOG_INFO, - "spawned %d threads for program '%s'; " - "total count:%d", - creates, program->progname, program->threadcount); - } - } else { - gf_log(GF_RPCSVC, GF_LOG_INFO, - "terminating %d threads for program '%s'", -delta, - program->progname); - - /* this signal is to just wake up the threads so they - * test for the change in eventthreadcount and kill - * themselves until the program thread count becomes - * equal to the event thread count - */ - pthread_cond_broadcast(&program->queue_cond); - } - } - pthread_mutex_unlock(&program->queue_lock); - -out: - return creates; -} - -int rpcsvc_program_register(rpcsvc_t *svc, rpcsvc_program_t *program, gf_boolean_t add_to_head) { - int ret = -1; - int creates = -1; + int ret = -1, i = 0; rpcsvc_program_t *newprog = NULL; char already_registered = 0; + pthread_mutexattr_t attr[EVENT_MAX_THREADS]; + pthread_mutexattr_t thr_attr; if (!svc) { goto out; @@ -2108,25 +2288,45 @@ rpcsvc_program_register(rpcsvc_t *svc, rpcsvc_program_t *program, } memcpy(newprog, program, sizeof(*program)); + newprog->latencies = gf_latency_new(program->numactors); + if (!newprog->latencies) { + rpcsvc_program_destroy(newprog); + goto out; + } INIT_LIST_HEAD(&newprog->program); - INIT_LIST_HEAD(&newprog->request_queue); - pthread_mutex_init(&newprog->queue_lock, NULL); - pthread_cond_init(&newprog->queue_cond, NULL); + pthread_mutexattr_init(&thr_attr); + pthread_mutexattr_settype(&thr_attr, PTHREAD_MUTEX_ADAPTIVE_NP); + + for (i = 0; i < EVENT_MAX_THREADS; i++) { + pthread_mutexattr_init(&attr[i]); + pthread_mutexattr_settype(&attr[i], PTHREAD_MUTEX_ADAPTIVE_NP); + INIT_LIST_HEAD(&newprog->request_queue[i].request_queue); + pthread_mutex_init(&newprog->request_queue[i].queue_lock, &attr[i]); + pthread_cond_init(&newprog->request_queue[i].queue_cond, NULL); + newprog->request_queue[i].program = newprog; + } + + pthread_mutex_init(&newprog->thr_lock, &thr_attr); + pthread_cond_init(&newprog->thr_cond, NULL); newprog->alive = _gf_true; + if (gf_async_ctrl.enabled) { + newprog->ownthread = _gf_false; + newprog->synctask = _gf_false; + } + /* make sure synctask gets priority over ownthread */ if (newprog->synctask) newprog->ownthread = _gf_false; if (newprog->ownthread) { - newprog->eventthreadcount = 1; - creates = rpcsvc_spawn_threads(svc, newprog); + struct event_pool *ep = svc->ctx->event_pool; + newprog->eventthreadcount = ep->eventthreadcount; - if (creates < 1) { - goto out; - } + pthread_key_create(&newprog->req_queue_key, NULL); + newprog->thr_queue = 1; } pthread_rwlock_wrlock(&svc->rpclock); @@ -2366,7 +2566,7 @@ rpcsvc_reconfigure_options(rpcsvc_t *svc, dict_t *options) */ dict_del(svc->options, srchkey); if (!dict_get_str(options, srchkey, &keyval)) { - ret = dict_set_str(svc->options, srchkey, keyval); + ret = dict_set_dynstr_with_alloc(svc->options, srchkey, keyval); if (ret < 0) { gf_log(GF_RPCSVC, GF_LOG_ERROR, "dict_set_str error"); GF_FREE(srchkey); @@ -2398,7 +2598,7 @@ rpcsvc_reconfigure_options(rpcsvc_t *svc, dict_t *options) */ dict_del(svc->options, srchkey); if (!dict_get_str(options, srchkey, &keyval)) { - ret = dict_set_str(svc->options, srchkey, keyval); + ret = dict_set_dynstr_with_alloc(svc->options, srchkey, keyval); if (ret < 0) { gf_log(GF_RPCSVC, GF_LOG_ERROR, "dict_set_str error"); GF_FREE(srchkey); @@ -2418,18 +2618,13 @@ rpcsvc_reconfigure_options(rpcsvc_t *svc, dict_t *options) } int -rpcsvc_transport_unix_options_build(dict_t **options, char *filepath) +rpcsvc_transport_unix_options_build(dict_t *dict, char *filepath) { - dict_t *dict = NULL; char *fpath = NULL; int ret = -1; GF_ASSERT(filepath); - GF_ASSERT(options); - - dict = dict_new(); - if (!dict) - goto out; + GF_VALIDATE_OR_GOTO("rpcsvc", dict, out); fpath = gf_strdup(filepath); if (!fpath) { @@ -2452,13 +2647,9 @@ rpcsvc_transport_unix_options_build(dict_t **options, char *filepath) ret = dict_set_str(dict, "transport-type", "socket"); if (ret) goto out; - - *options = dict; out: if (ret) { GF_FREE(fpath); - if (dict) - dict_unref(dict); } return ret; } @@ -2553,6 +2744,43 @@ rpcsvc_get_throttle(rpcsvc_t *svc) return svc->throttle; } +/* Function call to cleanup resources for svc + */ +int +rpcsvc_destroy(rpcsvc_t *svc) +{ + struct rpcsvc_auth_list *auth = NULL; + struct rpcsvc_auth_list *tmp = NULL; + rpcsvc_listener_t *listener = NULL; + rpcsvc_listener_t *next = NULL; + int ret = 0; + + if (!svc) + return ret; + + list_for_each_entry_safe(listener, next, &svc->listeners, list) + { + rpcsvc_listener_destroy(listener); + } + + list_for_each_entry_safe(auth, tmp, &svc->authschemes, authlist) + { + list_del_init(&auth->authlist); + GF_FREE(auth); + } + + rpcsvc_program_unregister(svc, &gluster_dump_prog); + if (svc->rxpool) { + mem_pool_destroy(svc->rxpool); + svc->rxpool = NULL; + } + + pthread_rwlock_destroy(&svc->rpclock); + GF_FREE(svc); + + return ret; +} + /* The global RPC service initializer. */ rpcsvc_t * @@ -2649,6 +2877,10 @@ rpcsvc_transport_peer_check_search(dict_t *options, char *pattern, char *ip, } dup_addrstr = gf_strdup(addrstr); + if (dup_addrstr == NULL) { + ret = -1; + goto err; + } addrtok = strtok_r(dup_addrstr, ",", &svptr); while (addrtok) { /* CASEFOLD not present on Solaris */ @@ -2972,10 +3204,6 @@ rpcsvc_match_subnet_v4(const char *addrtok, const char *ipaddr) if (inet_pton(AF_INET, ipaddr, &sin1.sin_addr) == 0) goto out; - /* Find the network socket addr of subnet pattern */ - if (inet_pton(AF_INET, netaddr, &sin2.sin_addr) == 0) - goto out; - slash = strchr(netaddr, '/'); if (slash) { *slash = '\0'; @@ -2988,9 +3216,16 @@ rpcsvc_match_subnet_v4(const char *addrtok, const char *ipaddr) if (prefixlen > 31) goto out; } else { + /* if there is no '/', then this function wouldn't be called */ goto out; } + /* Need to do this after removing '/', as inet_pton() take IP address as + * second argument. Once we get sin2, then comparison is oranges to orange + */ + if (inet_pton(AF_INET, netaddr, &sin2.sin_addr) == 0) + goto out; + shift = IPv4_ADDR_SIZE - prefixlen; mask.sin_addr.s_addr = htonl((uint32_t)~0 << shift); @@ -3003,45 +3238,55 @@ out: return ret; } -/* During reconfigure, Make sure to call this function after event-threads are - * reconfigured as programs' threadcount will be made equal to event threads. - */ - -int -rpcsvc_ownthread_reconf(rpcsvc_t *svc, int new_eventthreadcount) +void +rpcsvc_program_dump(rpcsvc_program_t *prog) { - int ret = -1; - rpcsvc_program_t *program = NULL; + char key_prefix[GF_DUMP_MAX_BUF_LEN]; + char key[GF_DUMP_MAX_BUF_LEN]; + int i; - if (!svc) { - ret = 0; - goto out; + snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s", prog->progname); + gf_proc_dump_add_section("%s", key_prefix); + + gf_proc_dump_build_key(key, key_prefix, "program-number"); + gf_proc_dump_write(key, "%d", prog->prognum); + + gf_proc_dump_build_key(key, key_prefix, "program-version"); + gf_proc_dump_write(key, "%d", prog->progver); + + strncat(key_prefix, ".latency", + sizeof(key_prefix) - strlen(key_prefix) - 1); + + for (i = 0; i < prog->numactors; i++) { + gf_proc_dump_build_key(key, key_prefix, "%s", prog->actors[i].procname); + gf_latency_statedump_and_reset(key, &prog->latencies[i]); } +} - pthread_rwlock_wrlock(&svc->rpclock); +void +rpcsvc_statedump(rpcsvc_t *svc) +{ + rpcsvc_program_t *prog = NULL; + int ret = 0; + ret = pthread_rwlock_tryrdlock(&svc->rpclock); + if (ret) + return; { - list_for_each_entry(program, &svc->programs, program) + list_for_each_entry(prog, &svc->programs, program) { - if (program->ownthread) { - program->eventthreadcount = new_eventthreadcount; - rpcsvc_spawn_threads(svc, program); - } + rpcsvc_program_dump(prog); } } pthread_rwlock_unlock(&svc->rpclock); - - ret = 0; -out: - return ret; } -rpcsvc_actor_t gluster_dump_actors[GF_DUMP_MAXVALUE] = { - [GF_DUMP_NULL] = {"NULL", GF_DUMP_NULL, NULL, NULL, 0, DRC_NA}, - [GF_DUMP_DUMP] = {"DUMP", GF_DUMP_DUMP, rpcsvc_dump, NULL, 0, DRC_NA}, - [GF_DUMP_PING] = {"PING", GF_DUMP_PING, rpcsvc_ping, NULL, 0, DRC_NA}, +static rpcsvc_actor_t gluster_dump_actors[GF_DUMP_MAXVALUE] = { + [GF_DUMP_NULL] = {"NULL", NULL, NULL, GF_DUMP_NULL, DRC_NA, 0}, + [GF_DUMP_DUMP] = {"DUMP", rpcsvc_dump, NULL, GF_DUMP_DUMP, DRC_NA, 0}, + [GF_DUMP_PING] = {"PING", rpcsvc_ping, NULL, GF_DUMP_PING, DRC_NA, 0}, }; -struct rpcsvc_program gluster_dump_prog = { +static struct rpcsvc_program gluster_dump_prog = { .progname = "GF-DUMP", .prognum = GLUSTER_DUMP_PROGRAM, .progver = GLUSTER_DUMP_VERSION, |
