From e9a37d5a3471db02e1c7922cee93bf290e2f326d Mon Sep 17 00:00:00 2001 From: Avra Sengupta Date: Tue, 5 May 2015 10:58:10 +0530 Subject: glusterd/snapshot: Return correct errno in events of failure - PATCH 1 Backport of patch http://review.gluster.org/#/c/10313/ RETCODE ERROR ------------------------------------------- 30800 Internal Error 30801 Another Transaction In Progress >Change-Id: Ica7fd2e513b2c28717b6df73cfb2667725dbf057 >BUG: 1226117 >Signed-off-by: Avra Sengupta >Reviewed-on: http://review.gluster.org/10313 >Reviewed-by: Rajesh Joseph >Tested-by: Gluster Build System >Tested-by: NetBSD Build System >Reviewed-by: Krishnan Parthasarathi >(cherry picked from commit 4397d7e72fdff6f01c59b72eebea421f23c1a392) Change-Id: Iace6ac0f150919cead94e6c5d99a23d28d45046e BUG: 1226117 Signed-off-by: Avra Sengupta Reviewed-on: http://review.gluster.org/11011 Tested-by: NetBSD Build System Reviewed-by: Kaushal M --- xlators/mgmt/glusterd/src/Makefile.am | 2 +- xlators/mgmt/glusterd/src/glusterd-errno.h | 18 ++++++++++++++++ xlators/mgmt/glusterd/src/glusterd-handler.c | 4 +++- xlators/mgmt/glusterd/src/glusterd-locks.c | 19 +++++++++++------ xlators/mgmt/glusterd/src/glusterd-locks.h | 5 +++-- xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c | 10 +++++---- xlators/mgmt/glusterd/src/glusterd-mgmt.c | 26 +++++++++++++++++------ xlators/mgmt/glusterd/src/glusterd-op-sm.c | 5 +++-- xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 3 ++- xlators/mgmt/glusterd/src/glusterd-syncop.c | 13 +++++++++--- 10 files changed, 78 insertions(+), 27 deletions(-) create mode 100644 xlators/mgmt/glusterd/src/glusterd-errno.h (limited to 'xlators/mgmt') diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am index 74198271f48..a8f27fb5310 100644 --- a/xlators/mgmt/glusterd/src/Makefile.am +++ b/xlators/mgmt/glusterd/src/Makefile.am @@ -38,7 +38,7 @@ noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \ glusterd-quotad-svc.h glusterd-svc-helper.h glusterd-snapd-svc.h \ glusterd-snapd-svc-helper.h glusterd-rcu.h glusterd-bitd-svc.h \ glusterd-scrub-svc.h $(CONTRIBDIR)/userspace-rcu/rculist-extra.h \ - glusterd-server-quorum.h + glusterd-server-quorum.h glusterd-errno.h AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \ -I$(rpclibdir) -I$(CONTRIBDIR)/rbtree \ diff --git a/xlators/mgmt/glusterd/src/glusterd-errno.h b/xlators/mgmt/glusterd/src/glusterd-errno.h new file mode 100644 index 00000000000..435b050c7da --- /dev/null +++ b/xlators/mgmt/glusterd/src/glusterd-errno.h @@ -0,0 +1,18 @@ +/* + Copyright (c) 2015 Red Hat, Inc. + This file is part of GlusterFS. + + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ +#ifndef _GLUSTERD_ERRNO_H +#define _GLUSTERD_ERRNO_H + +enum glusterd_op_errno { + EINTRNL = 30800, /* Internal Error */ + EANOTRANS = 30801, /* Another Transaction in Progress */ +}; + +#endif diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index b297ed16443..564d78796be 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -633,6 +633,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, uuid_t *txn_id = NULL; glusterd_op_info_t txn_op_info = {{0},}; glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE; + uint32_t op_errno = 0; GF_ASSERT (req); GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX)); @@ -696,7 +697,8 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx, goto out; } - ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol"); + ret = glusterd_mgmt_v3_lock (volname, MY_UUID, &op_errno, + "vol"); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Unable to acquire lock for %s", volname); diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c index c86dc8069da..0f9c2b26084 100644 --- a/xlators/mgmt/glusterd/src/glusterd-locks.c +++ b/xlators/mgmt/glusterd/src/glusterd-locks.c @@ -21,6 +21,7 @@ #include "glusterd-utils.h" #include "glusterd-volgen.h" #include "glusterd-locks.h" +#include "glusterd-errno.h" #include "run.h" #include "syscall.h" @@ -191,6 +192,7 @@ out: * volumes */ static int32_t glusterd_acquire_multiple_locks_per_entity (dict_t *dict, uuid_t uuid, + uint32_t *op_errno, int32_t count, char *type) { char name_buf[PATH_MAX] = ""; @@ -220,7 +222,7 @@ glusterd_acquire_multiple_locks_per_entity (dict_t *dict, uuid_t uuid, break; } - ret = glusterd_mgmt_v3_lock (name, uuid, type); + ret = glusterd_mgmt_v3_lock (name, uuid, op_errno, type); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Failed to acquire lock for %s %s " @@ -330,8 +332,8 @@ out: * if the type is "vol", this function will accordingly lock a single volume * * or multiple volumes */ static int32_t -glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, char *type, - gf_boolean_t default_value) +glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, uint32_t *op_errno, + char *type, gf_boolean_t default_value) { char name_buf[PATH_MAX] = ""; char *name = NULL; @@ -369,7 +371,7 @@ glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, char *type, goto out; } - ret = glusterd_mgmt_v3_lock (name, uuid, type); + ret = glusterd_mgmt_v3_lock (name, uuid, op_errno, type); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Failed to acquire lock for %s %s " @@ -381,6 +383,7 @@ glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, char *type, /* Locking one element name after another */ ret = glusterd_acquire_multiple_locks_per_entity (dict, uuid, + op_errno, count, type); if (ret) { @@ -437,7 +440,7 @@ out: /* Try to acquire locks on multiple entities like * * volume, snaps etc. */ int32_t -glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid) +glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid, uint32_t *op_errno) { int32_t i = -1; int32_t ret = -1; @@ -456,7 +459,7 @@ glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid) /* Locking one entity after other */ for (i = 0; valid_types[i].type; i++) { ret = glusterd_mgmt_v3_lock_entity - (dict, uuid, + (dict, uuid, op_errno, valid_types[i].type, valid_types[i].default_value); if (ret) { @@ -494,7 +497,8 @@ out: int32_t -glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, char *type) +glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, uint32_t *op_errno, + char *type) { char key[PATH_MAX] = ""; int32_t ret = -1; @@ -550,6 +554,7 @@ glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, char *type) "Lock for %s held by %s", name, uuid_utoa (owner)); ret = -1; + *op_errno = EANOTRANS; goto out; } diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h index b9cc8c0d1e4..de4d8fcd4d5 100644 --- a/xlators/mgmt/glusterd/src/glusterd-locks.h +++ b/xlators/mgmt/glusterd/src/glusterd-locks.h @@ -37,13 +37,14 @@ int32_t glusterd_get_mgmt_v3_lock_owner (char *volname, uuid_t *uuid); int32_t -glusterd_mgmt_v3_lock (const char *key, uuid_t uuid, char *type); +glusterd_mgmt_v3_lock (const char *key, uuid_t uuid, uint32_t *op_errno, + char *type); int32_t glusterd_mgmt_v3_unlock (const char *key, uuid_t uuid, char *type); int32_t -glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid); +glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid, uint32_t *op_errno); int32_t glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid); diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c index db5a19bf675..9ebaf00d32f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c @@ -28,7 +28,8 @@ glusterd_mgmt_v3_null (rpcsvc_request_t *req) } static int -glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status) +glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status, + uint32_t op_errno) { gd1_mgmt_v3_lock_rsp rsp = {{0},}; @@ -41,7 +42,7 @@ glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status) rsp.op_ret = status; if (rsp.op_ret) - rsp.op_errno = errno; + rsp.op_errno = op_errno; glusterd_get_uuid (&rsp.uuid); @@ -61,6 +62,7 @@ glusterd_synctasked_mgmt_v3_lock (rpcsvc_request_t *req, { int32_t ret = -1; xlator_t *this = NULL; + uint32_t op_errno = 0; this = THIS; GF_ASSERT (this); @@ -69,14 +71,14 @@ glusterd_synctasked_mgmt_v3_lock (rpcsvc_request_t *req, GF_ASSERT (ctx->dict); /* Trying to acquire multiple mgmt_v3 locks */ - ret = glusterd_multiple_mgmt_v3_lock (ctx->dict, ctx->uuid); + ret = glusterd_multiple_mgmt_v3_lock (ctx->dict, ctx->uuid, &op_errno); if (ret) gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL, "Failed to acquire mgmt_v3 locks for %s", uuid_utoa (ctx->uuid)); - ret = glusterd_mgmt_v3_lock_send_resp (req, ret); + ret = glusterd_mgmt_v3_lock_send_resp (req, ret, op_errno); gf_msg_trace (this->name, 0, "Returning %d", ret); return ret; diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c index 85d8c777884..a3eaf2ebdcd 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c @@ -23,6 +23,7 @@ #include "glusterd-store.h" #include "glusterd-snapshot-utils.h" #include "glusterd-messages.h" +#include "glusterd-errno.h" extern struct rpc_clnt_program gd_mgmt_v3_prog; @@ -397,7 +398,7 @@ out: int glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict, - char **op_errstr, + char **op_errstr, uint32_t *op_errno, gf_boolean_t *is_acquired, uint32_t txn_generation) { @@ -420,7 +421,7 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict, GF_ASSERT (is_acquired); /* Trying to acquire multiple mgmt_v3 locks on local node */ - ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID); + ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID, op_errno); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL, @@ -466,6 +467,7 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict, *op_errstr = gf_strdup (args.errstr); ret = args.op_ret; + *op_errno = args.op_errno; gf_msg_debug (this->name, 0, "Sent lock op req for %s " "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret); @@ -1701,6 +1703,7 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op, gf_boolean_t is_acquired = _gf_false; uuid_t *originator_uuid = NULL; uint32_t txn_generation = 0; + uint32_t op_errno = 0; this = THIS; GF_ASSERT (this); @@ -1758,7 +1761,8 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op, /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */ ret = glusterd_mgmt_v3_initiate_lockdown (op, dict, &op_errstr, - &is_acquired, txn_generation); + &op_errno, &is_acquired, + txn_generation); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCKDOWN_FAIL, @@ -1828,8 +1832,12 @@ out: } } + if (ret && (op_errno == 0)) + op_errno = EINTRNL; + /* SEND CLI RESPONSE */ - glusterd_op_send_cli_response (op, op_ret, 0, req, dict, op_errstr); + glusterd_op_send_cli_response (op, op_ret, op_errno, req, + dict, op_errstr); if (req_dict) dict_unref (req_dict); @@ -1929,6 +1937,7 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op, gf_boolean_t success = _gf_false; char *cli_errstr = NULL; uint32_t txn_generation = 0; + uint32_t op_errno = 0; this = THIS; GF_ASSERT (this); @@ -1986,7 +1995,8 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op, /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */ ret = glusterd_mgmt_v3_initiate_lockdown (op, dict, &op_errstr, - &is_acquired, txn_generation); + &op_errno, &is_acquired, + txn_generation); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCKDOWN_FAIL, @@ -2161,8 +2171,12 @@ out: } } + if (ret && (op_errno == 0)) + op_errno = EINTRNL; + /* SEND CLI RESPONSE */ - glusterd_op_send_cli_response (op, op_ret, 0, req, dict, op_errstr); + glusterd_op_send_cli_response (op, op_ret, op_errno, req, + dict, op_errstr); if (req_dict) dict_unref (req_dict); diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 5a23d2bf0c7..326968ea6b8 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -3210,6 +3210,7 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx) glusterd_op_lock_ctx_t *lock_ctx = NULL; glusterd_conf_t *priv = NULL; xlator_t *this = NULL; + uint32_t op_errno = 0; GF_ASSERT (event); GF_ASSERT (ctx); @@ -3232,7 +3233,7 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx) "Unable to acquire volname"); else { ret = glusterd_mgmt_v3_lock (volname, lock_ctx->uuid, - "vol"); + &op_errno, "vol"); if (ret) gf_log (this->name, GF_LOG_ERROR, "Unable to acquire lock for %s", @@ -3242,7 +3243,7 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx) ret = dict_get_str (lock_ctx->dict, "globalname", &globalname); if (!ret) { ret = glusterd_mgmt_v3_lock (globalname, lock_ctx->uuid, - "global"); + &op_errno, "global"); if (ret) gf_log (this->name, GF_LOG_ERROR, "Unable to acquire lock for %s", diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index 231c2f720ba..415d04f96f1 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -165,7 +165,8 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, } rsp.op_ret = op_ret; - rsp.op_errno = errno; + rsp.op_errno = op_errno; + if (errstr) rsp.op_errstr = errstr; else if (op_errstr) diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index 6f36478aeca..2ac947fd710 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -1692,6 +1692,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) gf_boolean_t is_global = _gf_false; uuid_t *txn_id = NULL; glusterd_op_info_t txn_opinfo = {{0},}; + uint32_t op_errno = 0; this = THIS; GF_ASSERT (this); @@ -1768,7 +1769,8 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) goto out; } - ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol"); + ret = glusterd_mgmt_v3_lock (volname, MY_UUID, + &op_errno, "vol"); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Unable to acquire lock for %s", volname); @@ -1782,7 +1784,8 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req) global: if (is_global) { - ret = glusterd_mgmt_v3_lock (global, MY_UUID, "global"); + ret = glusterd_mgmt_v3_lock (global, MY_UUID, &op_errno, + "global"); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Unable to acquire lock for %s", global); @@ -1856,7 +1859,11 @@ out: uuid_utoa (*txn_id)); } - glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr); + if (ret && (op_errno == 0)) + op_errno = -1; + + glusterd_op_send_cli_response (op, op_ret, op_errno, req, + op_ctx, op_errstr); if (volname) GF_FREE (volname); -- cgit