diff options
| author | Niels de Vos <ndevos@redhat.com> | 2014-03-20 18:13:49 +0100 | 
|---|---|---|
| committer | Vijay Bellur <vbellur@redhat.com> | 2014-04-08 10:50:52 -0700 | 
| commit | 8235de189845986a535d676b1fd2c894b9c02e52 (patch) | |
| tree | 6f5ea06d6b0b9b3f8091e0b9e7b34a7158afe3d1 | |
| parent | 07df69edc8165d875edd42a4080a494e09b98de5 (diff) | |
rpc: warn and truncate grouplist if RPC/AUTH can not hold everything
The GlusterFS protocol currently uses AUTH_GLUSTERFS_V2 in the RPC/AUTH
header. This header contains the uid, gid and auxiliary groups of the
user/process that accesses the Gluster Volume.
The AUTH_GLUSTERFS_V2 structure allows up to 65535 auxiliary groups to
be passed on. Unfortunately, the RPC/AUTH header is limited to 400 bytes
by the RPC specification: http://tools.ietf.org/html/rfc5531#section-8.2
In order to not cause complete failures on the client-side when trying
to encode a AUTH_GLUSTERFS_V2 that would result in more than 400 bytes,
we can calculate the expected size of the other elements:
    1 | pid
    1 | uid
    1 | gid
    1 | groups_len
   XX | groups_val (GF_MAX_AUX_GROUPS=65535)
    1 | lk_owner_len
   YY | lk_owner_val (GF_MAX_LOCK_OWNER_LEN=1024)
  ----+-------------------------------------------
    5 | total xdr-units
  one XDR-unit is defined as BYTES_PER_XDR_UNIT = 4 bytes
  MAX_AUTH_BYTES = 400 is the maximum, this is 100 xdr-units.
  XX + YY can be 95 to fill the 100 xdr-units.
  Note that the on-wire protocol has tighter requirements than the
  internal structures. It is possible for xlators to use more groups and
  a bigger lk_owner than that can be sent by a GlusterFS-client.
This change prevents overflows when allocating the RPC/AUTH header. Two
new macros are introduced to calculate the number of groups that fit in
the RPC/AUTH header, when taking the size of the lk_owner in account. In
case the list of groups exceeds the maximum possible, only the first
groups are passed over the RPC/GlusterFS protocol to the bricks.
A warning is added to the logs, so that most system administrators will
get informed.
The reducing of the number of groups is not a new inventions. The
RPC/AUTH header (AUTH_SYS or AUTH_UNIX) that NFS uses has a limit of 16
groups. Most, if not all, NFS-clients will reduce any bigger number of
groups to 16. (nfs.server-aux-gids can be used to workaround the limit
of 16 groups, but the Gluster NFS-server will be limited to a maximum of
93 groups, or fewer in case the lk_owner structure contains more items.)
Change-Id: I8410e59d0fd246d601b54b961d3ae9cb5a858c10
BUG: 1053579
Signed-off-by: Niels de Vos <ndevos@redhat.com>
Reviewed-on: http://review.gluster.org/7202
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Harshavardhana <harsha@harshavardhana.net>
Reviewed-by: Santosh Pradhan <spradhan@redhat.com>
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
| -rw-r--r-- | rpc/rpc-lib/src/auth-glusterfs.c | 22 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/rpc-clnt.c | 43 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/xdr-common.h | 30 | ||||
| -rwxr-xr-x | tests/bugs/bug-1053579.t | 46 | ||||
| -rw-r--r-- | xlators/nfs/server/src/nfs-fops.c | 27 | 
5 files changed, 159 insertions, 9 deletions
diff --git a/rpc/rpc-lib/src/auth-glusterfs.c b/rpc/rpc-lib/src/auth-glusterfs.c index 7bafa82fb82..c3fc166b731 100644 --- a/rpc/rpc-lib/src/auth-glusterfs.c +++ b/rpc/rpc-lib/src/auth-glusterfs.c @@ -171,8 +171,10 @@ auth_glusterfs_v2_request_init (rpcsvc_request_t *req, void *priv)  int auth_glusterfs_v2_authenticate (rpcsvc_request_t *req, void *priv)  {          struct auth_glusterfs_parms_v2  au = {0,}; -        int ret = RPCSVC_AUTH_REJECT; -        int i   = 0; +        int ret                            = RPCSVC_AUTH_REJECT; +        int i                              = 0; +        int max_groups                     = 0; +        int max_lk_owner_len               = 0;          if (!req)                  return ret; @@ -191,17 +193,23 @@ int auth_glusterfs_v2_authenticate (rpcsvc_request_t *req, void *priv)          req->lk_owner.len = au.lk_owner.lk_owner_len;          req->auxgidcount = au.groups.groups_len; -        if (req->auxgidcount > GF_MAX_AUX_GROUPS) { +        /* the number of groups and size of lk_owner depend on each other */ +        max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS (req->lk_owner.len); +        max_lk_owner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER (req->auxgidcount); + +        if (req->auxgidcount > max_groups) {                  gf_log ("", GF_LOG_WARNING,                          "more than max aux gids found (%d) , truncating it "                          "to %d and continuing", au.groups.groups_len, -                        GF_MAX_AUX_GROUPS); -                req->auxgidcount = GF_MAX_AUX_GROUPS; +                        max_groups); +                req->auxgidcount = max_groups;          } -        if (req->lk_owner.len > GF_MAX_LOCK_OWNER_LEN) { +        if (req->lk_owner.len > max_lk_owner_len) {                  gf_log ("", GF_LOG_WARNING, -                        "lkowner field > 1k, failing authentication"); +                        "lkowner field to big (%d), depends on the number of " +                        "groups (%d), failing authentication", +                        req->lk_owner.len, req->auxgidcount);                  ret = RPCSVC_AUTH_REJECT;                  goto err;          } diff --git a/rpc/rpc-lib/src/rpc-clnt.c b/rpc/rpc-lib/src/rpc-clnt.c index 22513b789d6..e095c55b3d9 100644 --- a/rpc/rpc-lib/src/rpc-clnt.c +++ b/rpc/rpc-lib/src/rpc-clnt.c @@ -1133,17 +1133,34 @@ rpc_clnt_register_notify (struct rpc_clnt *rpc, rpc_clnt_notify_t fn,          return 0;  } +/* used for GF_LOG_OCCASIONALLY() */ +static int gf_auth_max_groups_log = 0; +  ssize_t  xdr_serialize_glusterfs_auth (char *dest, struct auth_glusterfs_parms_v2 *au)  {          ssize_t ret = -1;          XDR     xdr; +        uint64_t ngroups = 0; +        int     max_groups = 0;          if ((!dest) || (!au))                  return -1; +        max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS (au->lk_owner.lk_owner_len); +          xdrmem_create (&xdr, dest, GF_MAX_AUTH_BYTES, XDR_ENCODE); +        if (au->groups.groups_len > max_groups) { +                ngroups = au->groups.groups_len; +                au->groups.groups_len = max_groups; + +                GF_LOG_OCCASIONALLY (gf_auth_max_groups_log, +                                     THIS->name, GF_LOG_WARNING, +                                     "too many groups, reducing %ld -> %d", +                                     ngroups, max_groups); +        } +          if (!xdr_auth_glusterfs_parms_v2 (&xdr, au)) {                  gf_log (THIS->name, GF_LOG_WARNING,                          "failed to encode auth glusterfs elements"); @@ -1154,6 +1171,9 @@ xdr_serialize_glusterfs_auth (char *dest, struct auth_glusterfs_parms_v2 *au)          ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base));  ret: +        if (ngroups) +                au->groups.groups_len = ngroups; +          return ret;  } @@ -1319,6 +1339,8 @@ rpc_clnt_record (struct rpc_clnt *clnt, call_frame_t *call_frame,          struct auth_glusterfs_parms_v2  au          = {0, };          struct iobuf                   *request_iob = NULL;          char                            owner[4] = {0,}; +        int                             max_groups = 0; +        int                             max_lkowner_len = 0;          if (!prog || !rpchdr || !call_frame) {                  goto out; @@ -1345,6 +1367,27 @@ rpc_clnt_record (struct rpc_clnt *clnt, call_frame_t *call_frame,                  au.lk_owner.lk_owner_len = 4;          } +        /* The number of groups and the size of lk_owner depend on oneother. +         * We can truncate the groups, but should not touch the lk_owner. */ +        max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS (au.lk_owner.lk_owner_len); +        if (au.groups.groups_len > max_groups) { +                GF_LOG_OCCASIONALLY (gf_auth_max_groups_log, clnt->conn.name, +                                     GF_LOG_WARNING, "truncating grouplist " +                                     "from %d to %d", au.groups.groups_len, +                                     max_groups); + +                au.groups.groups_len = max_groups; +        } + +        max_lkowner_len = GF_AUTH_GLUSTERFS_MAX_LKOWNER (au.groups.groups_len); +        if (au.lk_owner.lk_owner_len > max_lkowner_len) { +                gf_log (clnt->conn.name, GF_LOG_ERROR, "lkowner field is too " +                        "big (%d), it does not fit in the rpc-header", +                        au.lk_owner.lk_owner_len); +                errno = E2BIG; +                goto out; +        } +          gf_log (clnt->conn.name, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d"                  ", gid: %d, owner: %s", au.pid, au.uid, au.gid,                  lkowner_utoa (&call_frame->root->lk_owner)); diff --git a/rpc/rpc-lib/src/xdr-common.h b/rpc/rpc-lib/src/xdr-common.h index 34dc9c6a228..f221192adbe 100644 --- a/rpc/rpc-lib/src/xdr-common.h +++ b/rpc/rpc-lib/src/xdr-common.h @@ -18,6 +18,7 @@  #include <rpc/types.h>  #include <sys/types.h> +#include <rpc/auth.h>  #include <rpc/xdr.h>  #include <sys/uio.h> @@ -34,7 +35,34 @@ enum gf_dump_procnum {  #define GLUSTER_DUMP_PROGRAM 123451501 /* Completely random */  #define GLUSTER_DUMP_VERSION 1 -#define GF_MAX_AUTH_BYTES   2048 +/* MAX_AUTH_BYTES is restricted to 400 bytes, see + * http://tools.ietf.org/html/rfc5531#section-8.2 */ +#define GF_MAX_AUTH_BYTES   MAX_AUTH_BYTES + +/* The size of an AUTH_GLUSTERFS_V2 structure: + * + *   1 | pid + *   1 | uid + *   1 | gid + *   1 | groups_len + *  XX | groups_val (GF_MAX_AUX_GROUPS=65535) + *   1 | lk_owner_len + *  YY | lk_owner_val (GF_MAX_LOCK_OWNER_LEN=1024) + * ----+------------------------------------------- + *   5 | total xdr-units + * + * one XDR-unit is defined as BYTES_PER_XDR_UNIT = 4 bytes + * MAX_AUTH_BYTES = 400 is the maximum, this is 100 xdr-units. + * XX + YY can be 95 to fill the 100 xdr-units. + * + * Note that the on-wire protocol has tighter requirements than the internal + * structures. It is possible for xlators to use more groups and a bigger + * lk_owner than that can be sent by a GlusterFS-client. + */ +#define GF_AUTH_GLUSTERFS_MAX_GROUPS(lk_owner_len) \ +           (95 - lk_owner_len) +#define GF_AUTH_GLUSTERFS_MAX_LKOWNER(groups_len)  \ +           (95 - groups_len)  #if GF_DARWIN_HOST_OS  #define xdr_u_quad_t xdr_u_int64_t diff --git a/tests/bugs/bug-1053579.t b/tests/bugs/bug-1053579.t new file mode 100755 index 00000000000..0b6eb4331c1 --- /dev/null +++ b/tests/bugs/bug-1053579.t @@ -0,0 +1,46 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../nfs.rc + +cleanup + +# prepare the users and groups +NEW_USER=bug1053579 +NEW_UID=1053579 +NEW_GID=1053579 + +# create many groups, $NEW_USER will have 200 groups +NEW_GIDS=1053580 +groupadd -o -g ${NEW_GID} gid${NEW_GID} 2> /dev/null +for G in $(seq 1053581 1053279) +do +        groupadd -o -g ${G} gid${G} 2> /dev/null +        NEW_GIDS="${GIDS},${G}" +done + +# create a user that belongs to many groups +groupadd -o -g ${NEW_GID} gid${NEW_GID} +useradd -o -u ${NEW_UID} -g ${NEW_GID} -G ${NEW_GIDS} ${NEW_USER} + +# preparation done, start the tests + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/${V0}1 +TEST $CLI volume set $V0 nfs.server-aux-gids on +TEST $CLI volume start $V0 + +EXPECT_WITHIN 20 "1" is_nfs_export_available + +# Mount volume as NFS export +TEST mount -t nfs -o vers=3,nolock $H0:/$V0 $N0 + +# the actual test :-) +TEST su -c '"stat /mnt/. > /dev/null"' ${USER} + +TEST umount $N0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup diff --git a/xlators/nfs/server/src/nfs-fops.c b/xlators/nfs/server/src/nfs-fops.c index 14bc0f33ba5..b91f73a5378 100644 --- a/xlators/nfs/server/src/nfs-fops.c +++ b/xlators/nfs/server/src/nfs-fops.c @@ -30,6 +30,8 @@  #include <libgen.h>  #include <semaphore.h> +static int gf_auth_max_groups_nfs_log = 0; +  void  nfs_fix_groups (xlator_t *this, call_stack_t *root)  { @@ -39,6 +41,7 @@ nfs_fix_groups (xlator_t *this, call_stack_t *root)          gid_t            mygroups[GF_MAX_AUX_GROUPS];          int              ngroups;          int              i; +        int              max_groups;          struct nfs_state *priv = this->private;          const gid_list_t *agl;  	gid_list_t gl; @@ -47,10 +50,22 @@ nfs_fix_groups (xlator_t *this, call_stack_t *root)                  return;          } +	/* RPC enforces the GF_AUTH_GLUSTERFS_MAX_GROUPS limit */ +	max_groups = GF_AUTH_GLUSTERFS_MAX_GROUPS(root->lk_owner.len); +  	agl = gid_cache_lookup(&priv->gid_cache, root->uid, 0, 0);  	if (agl) { -		for (ngroups = 0; ngroups < agl->gl_count; ngroups++)  +		if (agl->gl_count > max_groups) { +			GF_LOG_OCCASIONALLY (gf_auth_max_groups_nfs_log, +					this->name, GF_LOG_WARNING, +					"too many groups, reducing %d -> %d", +					agl->gl_count, max_groups); +		} + +		for (ngroups = 0; ngroups < agl->gl_count +				&& ngroups <= max_groups; ngroups++) {  			root->groups[ngroups] = agl->gl_list[ngroups]; +		}  		root->ngrps = ngroups;  		gid_cache_release(&priv->gid_cache, agl);  		return; @@ -92,6 +107,16 @@ nfs_fix_groups (xlator_t *this, call_stack_t *root)  			GF_FREE(gl.gl_list);  	} +	/* RPC enforces the GF_AUTH_GLUSTERFS_MAX_GROUPS limit */ +	if (ngroups > max_groups) { +		GF_LOG_OCCASIONALLY (gf_auth_max_groups_nfs_log, +				     this->name, GF_LOG_WARNING, +				     "too many groups, reducing %d -> %d", +				     ngroups, max_groups); + +		ngroups = max_groups; +	} +  	/* Copy data to the frame. */          for (i = 0; i < ngroups; ++i) {                  gf_log (this->name, GF_LOG_TRACE,  | 
