summaryrefslogtreecommitdiffstats
path: root/xlators/protocol
diff options
context:
space:
mode:
authorNiels de Vos <ndevos@redhat.com>2014-04-17 18:32:07 +0200
committerAnand Avati <avati@redhat.com>2014-05-09 12:22:39 -0700
commit2fd499d148fc8865c77de8b2c73fe0b7e1737882 (patch)
tree368fe211b31d82cd14c6efc773fd91693855f3de /xlators/protocol
parent47c33dd27150039a6e5e3295eacd8d2d5a7e0ce0 (diff)
rpc: implement server.manage-gids for group resolving on the bricks
The new volume option 'server.manage-gids' can be enabled in environments where a user belongs to more than the current absolute maximum of 93 groups. This option triggers the following behavior: 1. The AUTH_GLUSTERFS structure sent by GlusterFS clients (fuse, nfs or libgfapi) will contain only one (1) auxiliary group, instead of a full list. This reduces network usage and prevents problems in encoding the AUTH_GLUSTERFS structure which should fit in 400 bytes. 2. The single group in the RPC Calls received by the server is replaced by resolving the groups server-side. Permission checks and similar in lower xlators are applied against the full list of groups where the user belongs to, and not the single auxiliary group that the client sent. Change-Id: I9e540de13e3022f8b63ff893ecba511129a47b91 BUG: 1053579 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/7501 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Santosh Pradhan <spradhan@redhat.com> Reviewed-by: Harshavardhana <harsha@harshavardhana.net> Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'xlators/protocol')
-rw-r--r--xlators/protocol/client/src/client.c29
-rw-r--r--xlators/protocol/client/src/client.h2
-rw-r--r--xlators/protocol/server/src/server-helpers.c114
-rw-r--r--xlators/protocol/server/src/server.c36
-rw-r--r--xlators/protocol/server/src/server.h5
5 files changed, 185 insertions, 1 deletions
diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c
index 7726c0b8445..aecd8f8fb07 100644
--- a/xlators/protocol/client/src/client.c
+++ b/xlators/protocol/client/src/client.c
@@ -158,6 +158,8 @@ client_submit_request (xlator_t *this, void *req, call_frame_t *frame,
struct iobref *new_iobref = NULL;
ssize_t xdr_size = 0;
struct rpc_req rpcreq = {0, };
+ uint64_t ngroups = 0;
+ uint64_t gid = 0;
GF_VALIDATE_OR_GOTO ("client", this, out);
GF_VALIDATE_OR_GOTO (this->name, prog, out);
@@ -224,6 +226,18 @@ client_submit_request (xlator_t *this, void *req, call_frame_t *frame,
count = 1;
}
+ /* do not send all groups if they are resolved server-side */
+ if (!conf->send_gids) {
+ /* copy some values for restoring later */
+ ngroups = frame->root->ngrps;
+ frame->root->ngrps = 1;
+ if (ngroups <= SMALL_GROUP_COUNT) {
+ gid = frame->root->groups_small[0];
+ frame->root->groups_small[0] = frame->root->gid;
+ frame->root->groups = frame->root->groups_small;
+ }
+ }
+
/* Send the msg */
ret = rpc_clnt_submit (conf->rpc, prog, procnum, cbkfn, &iov, count,
NULL, 0, new_iobref, frame, rsphdr, rsphdr_count,
@@ -233,6 +247,13 @@ client_submit_request (xlator_t *this, void *req, call_frame_t *frame,
gf_log (this->name, GF_LOG_DEBUG, "rpc_clnt_submit failed");
}
+ if (!conf->send_gids) {
+ /* restore previous values */
+ frame->root->ngrps = ngroups;
+ if (ngroups <= SMALL_GROUP_COUNT)
+ frame->root->groups_small[0] = gid;
+ }
+
ret = 0;
if (new_iobref)
@@ -2314,6 +2335,8 @@ build_client_config (xlator_t *this, clnt_conf_t *conf)
GF_OPTION_INIT ("filter-O_DIRECT", conf->filter_o_direct,
bool, out);
+ GF_OPTION_INIT ("send-gids", conf->send_gids, bool, out);
+
ret = 0;
out:
return ret;
@@ -2501,6 +2524,8 @@ reconfigure (xlator_t *this, dict_t *options)
GF_OPTION_RECONF ("filter-O_DIRECT", conf->filter_o_direct,
options, bool, out);
+ GF_OPTION_RECONF ("send-gids", conf->send_gids, options, bool, out);
+
ret = client_init_grace_timer (this, options, conf);
if (ret)
goto out;
@@ -2856,5 +2881,9 @@ struct volume_options options[] = {
"still continue to cache the file. This works similar to NFS's "
"behavior of O_DIRECT",
},
+ { .key = {"send-gids"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "on",
+ },
{ .key = {NULL} },
};
diff --git a/xlators/protocol/client/src/client.h b/xlators/protocol/client/src/client.h
index bc0f5d0e9d2..7f7d511910e 100644
--- a/xlators/protocol/client/src/client.h
+++ b/xlators/protocol/client/src/client.h
@@ -125,6 +125,8 @@ typedef struct clnt_conf {
* how manytimes set_volume is called
*/
uint64_t setvol_count;
+
+ gf_boolean_t send_gids; /* let the server resolve gids */
} clnt_conf_t;
typedef struct _client_fd_ctx {
diff --git a/xlators/protocol/server/src/server-helpers.c b/xlators/protocol/server/src/server-helpers.c
index b349d7de1eb..9dcb55ce3aa 100644
--- a/xlators/protocol/server/src/server-helpers.c
+++ b/xlators/protocol/server/src/server-helpers.c
@@ -15,8 +15,117 @@
#include "server.h"
#include "server-helpers.h"
+#include "gidcache.h"
#include <fnmatch.h>
+#include <pwd.h>
+#include <grp.h>
+
+/* based on nfs_fix_aux_groups() */
+int
+gid_resolve (server_conf_t *conf, call_stack_t *root)
+{
+ int ret = 0;
+ struct passwd mypw;
+ char mystrs[1024];
+ struct passwd *result;
+ gid_t mygroups[GF_MAX_AUX_GROUPS];
+ gid_list_t gl;
+ const gid_list_t *agl;
+ int ngroups, i;
+
+ agl = gid_cache_lookup (&conf->gid_cache, root->uid, 0, 0);
+ if (agl) {
+ root->ngrps = agl->gl_count;
+ goto fill_groups;
+ }
+
+ ret = getpwuid_r (root->uid, &mypw, mystrs, sizeof(mystrs), &result);
+ if (ret != 0) {
+ gf_log("gid-cache", GF_LOG_ERROR, "getpwuid_r(%u) failed",
+ root->uid);
+ return -1;
+ }
+
+ if (!result) {
+ gf_log ("gid-cache", GF_LOG_ERROR, "getpwuid_r(%u) found "
+ "nothing", root->uid);
+ return -1;
+ }
+
+ gf_log ("gid-cache", GF_LOG_TRACE, "mapped %u => %s", root->uid,
+ result->pw_name);
+
+ ngroups = GF_MAX_AUX_GROUPS;
+ ret = getgrouplist (result->pw_name, root->gid, mygroups, &ngroups);
+ if (ret == -1) {
+ gf_log ("gid-cache", GF_LOG_ERROR, "could not map %s to group "
+ "list (%d gids)", result->pw_name, root->ngrps);
+ return -1;
+ }
+ root->ngrps = (uint16_t) ngroups;
+
+fill_groups:
+ if (agl) {
+ /* the gl is not complete, we only use gl.gl_list later on */
+ gl.gl_list = agl->gl_list;
+ } else {
+ /* setup a full gid_list_t to add it to the gid_cache */
+ gl.gl_id = root->uid;
+ gl.gl_uid = root->uid;
+ gl.gl_gid = root->gid;
+ gl.gl_count = root->ngrps;
+
+ gl.gl_list = GF_MALLOC (root->ngrps * sizeof(gid_t),
+ gf_common_mt_groups_t);
+ if (gl.gl_list)
+ memcpy (gl.gl_list, mygroups,
+ sizeof(gid_t) * root->ngrps);
+ else
+ return -1;
+ }
+
+ if (root->ngrps == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (call_stack_alloc_groups (root, root->ngrps) != 0) {
+ ret = -1;
+ goto out;
+ }
+
+ /* finally fill the groups from the */
+ for (i = 0; i < root->ngrps; ++i)
+ root->groups[i] = gl.gl_list[i];
+
+out:
+ if (agl) {
+ gid_cache_release (&conf->gid_cache, agl);
+ } else {
+ if (gid_cache_add (&conf->gid_cache, &gl) != 1)
+ GF_FREE (gl.gl_list);
+ }
+
+ return ret;
+}
+
+int
+server_resolve_groups (call_frame_t *frame, rpcsvc_request_t *req)
+{
+ xlator_t *this = NULL;
+ server_conf_t *conf = NULL;
+
+ GF_VALIDATE_OR_GOTO ("server", frame, out);
+ GF_VALIDATE_OR_GOTO ("server", req, out);
+
+ this = req->trans->xl;
+ conf = this->private;
+
+ return gid_resolve (conf, frame->root);
+out:
+ return -1;
+}
int
server_decode_groups (call_frame_t *frame, rpcsvc_request_t *req)
@@ -379,7 +488,10 @@ get_frame_from_request (rpcsvc_request_t *req)
frame->root->client = client;
frame->root->lk_owner = req->lk_owner;
- server_decode_groups (frame, req);
+ if (priv->server_manage_gids)
+ server_resolve_groups (frame, req);
+ else
+ server_decode_groups (frame, req);
frame->local = req;
out:
diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c
index 3d8e3d66d14..e551fd757a1 100644
--- a/xlators/protocol/server/src/server.c
+++ b/xlators/protocol/server/src/server.c
@@ -736,6 +736,17 @@ reconfigure (xlator_t *this, dict_t *options)
goto out;
}
+ GF_OPTION_RECONF ("manage-gids", conf->server_manage_gids, options,
+ bool, out);
+
+ GF_OPTION_RECONF ("gid-timeout", conf->gid_cache_timeout, options,
+ int32, out);
+ if (gid_cache_reconf (&conf->gid_cache, conf->gid_cache_timeout) < 0) {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to reconfigure group "
+ "cache.");
+ goto out;
+ }
+
rpc_conf = conf->rpc;
if (!rpc_conf) {
gf_log (this->name, GF_LOG_ERROR, "No rpc_conf !!!!");
@@ -863,6 +874,19 @@ init (xlator_t *this)
goto out;
}
+ ret = dict_get_str_boolean (this->options, "manage-gids", _gf_false);
+ if (ret == -1)
+ conf->server_manage_gids = _gf_false;
+ else
+ conf->server_manage_gids = ret;
+
+ GF_OPTION_INIT("gid-timeout", conf->gid_cache_timeout, int32, out);
+ if (gid_cache_init (&conf->gid_cache, conf->gid_cache_timeout) < 0) {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to initialize "
+ "group cache.");
+ goto out;
+ }
+
/* RPC related */
conf->rpc = rpcsvc_init (this, this->ctx, this->options, 0);
if (conf->rpc == NULL) {
@@ -1141,5 +1165,17 @@ struct volume_options options[] = {
"requests from a client. 0 means no limit (can "
"potentially run out of memory)"
},
+
+ { .key = {"manage-gids"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .description = "Resolve groups on the server-side."
+ },
+ { .key = {"gid-timeout"},
+ .type = GF_OPTION_TYPE_INT,
+ .default_value = "2",
+ .description = "Timeout in seconds for the cached groups to expire."
+ },
+
{ .key = {NULL} },
};
diff --git a/xlators/protocol/server/src/server.h b/xlators/protocol/server/src/server.h
index 4a1e10ca8b5..3e1feacb94b 100644
--- a/xlators/protocol/server/src/server.h
+++ b/xlators/protocol/server/src/server.h
@@ -22,6 +22,7 @@
#include "glusterfs3.h"
#include "timer.h"
#include "client_t.h"
+#include "gidcache.h"
#define DEFAULT_BLOCK_SIZE 4194304 /* 4MB */
#define DEFAULT_VOLUME_FILE_PATH CONFDIR "/glusterfs.vol"
@@ -58,6 +59,10 @@ struct server_conf {
pthread_mutex_t mutex;
struct list_head xprt_list;
pthread_t barrier_th;
+
+ gf_boolean_t server_manage_gids; /* resolve gids on brick */
+ gid_cache_t gid_cache;
+ int32_t gid_cache_timeout;
};
typedef struct server_conf server_conf_t;