Release 4.11 net/sunrpc/svcauth_unix.c
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/sunrpc/addr.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include <linux/hash.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <linux/kernel.h>
#include <linux/user_namespace.h>
#define RPCDBG_FACILITY RPCDBG_AUTH
#include "netns.h"
/*
* AUTHUNIX and AUTHNULL credentials are both handled here.
* AUTHNULL is treated just like AUTHUNIX except that the uid/gid
* are always nobody (-2). i.e. we do the same IP address checks for
* AUTHNULL as for AUTHUNIX, and that is done here.
*/
struct unix_domain {
struct auth_domain h;
/* other stuff later */
};
extern struct auth_ops svcauth_null;
extern struct auth_ops svcauth_unix;
static void svcauth_unix_domain_release(struct auth_domain *dom)
{
struct unix_domain *ud = container_of(dom, struct unix_domain, h);
kfree(dom->name);
kfree(ud);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
J. Bruce Fields | 38 | 100.00% | 1 | 100.00% |
Total | 38 | 100.00% | 1 | 100.00% |
struct auth_domain *unix_domain_find(char *name)
{
struct auth_domain *rv;
struct unix_domain *new = NULL;
rv = auth_domain_lookup(name, NULL);
while(1) {
if (rv) {
if (new && rv != &new->h)
svcauth_unix_domain_release(&new->h);
if (rv->flavour != &svcauth_unix) {
auth_domain_put(rv);
return NULL;
}
return rv;
}
new = kmalloc(sizeof(*new), GFP_KERNEL);
if (new == NULL)
return NULL;
kref_init(&new->h.ref);
new->h.name = kstrdup(name, GFP_KERNEL);
if (new->h.name == NULL) {
kfree(new);
return NULL;
}
new->h.flavour = &svcauth_unix;
rv = auth_domain_lookup(name, &new->h);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 155 | 90.64% | 4 | 50.00% |
Petri T. Koistinen | 9 | 5.26% | 1 | 12.50% |
J. Bruce Fields | 4 | 2.34% | 2 | 25.00% |
Paulo Marques | 3 | 1.75% | 1 | 12.50% |
Total | 171 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL_GPL(unix_domain_find);
/**************************************************
* cache for IP address to unix_domain
* as needed by AUTH_UNIX
*/
#define IP_HASHBITS 8
#define IP_HASHMAX (1<<IP_HASHBITS)
struct ip_map {
struct cache_head h;
char m_class[8]; /* e.g. "nfsd" */
struct in6_addr m_addr;
struct unix_domain *m_client;
};
static void ip_map_put(struct kref *kref)
{
struct cache_head *item = container_of(kref, struct cache_head, ref);
struct ip_map *im = container_of(item, struct ip_map,h);
if (test_bit(CACHE_VALID, &item->flags) &&
!test_bit(CACHE_NEGATIVE, &item->flags))
auth_domain_put(&im->m_client->h);
kfree(im);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 78 | 98.73% | 3 | 75.00% |
Adrian Bunk | 1 | 1.27% | 1 | 25.00% |
Total | 79 | 100.00% | 4 | 100.00% |
static inline int hash_ip6(const struct in6_addr *ip)
{
return hash_32(ipv6_addr_hash(ip), IP_HASHBITS);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aurélien Charbon | 18 | 75.00% | 1 | 50.00% |
Eric Dumazet | 6 | 25.00% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
{
struct ip_map *orig = container_of(corig, struct ip_map, h);
struct ip_map *new = container_of(cnew, struct ip_map, h);
return strcmp(orig->m_class, new->m_class) == 0 &&
ipv6_addr_equal(&orig->m_addr, &new->m_addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 67 | 91.78% | 3 | 75.00% |
Aurélien Charbon | 6 | 8.22% | 1 | 25.00% |
Total | 73 | 100.00% | 4 | 100.00% |
static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
{
struct ip_map *new = container_of(cnew, struct ip_map, h);
struct ip_map *item = container_of(citem, struct ip_map, h);
strcpy(new->m_class, item->m_class);
new->m_addr = item->m_addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 61 | 93.85% | 3 | 60.00% |
Andrew Morton | 3 | 4.62% | 1 | 20.00% |
Alexey Dobriyan | 1 | 1.54% | 1 | 20.00% |
Total | 65 | 100.00% | 5 | 100.00% |
static void update(struct cache_head *cnew, struct cache_head *citem)
{
struct ip_map *new = container_of(cnew, struct ip_map, h);
struct ip_map *item = container_of(citem, struct ip_map, h);
kref_get(&item->m_client->h.ref);
new->m_client = item->m_client;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 66 | 100.00% | 3 | 100.00% |
Total | 66 | 100.00% | 3 | 100.00% |
static struct cache_head *ip_map_alloc(void)
{
struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
if (i)
return &i->h;
else
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
static void ip_map_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
{
char text_addr[40];
struct ip_map *im = container_of(h, struct ip_map, h);
if (ipv6_addr_v4mapped(&(im->m_addr))) {
snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
} else {
snprintf(text_addr, 40, "%pI6", &im->m_addr);
}
qword_add(bpp, blen, im->m_class);
qword_add(bpp, blen, text_addr);
(*bpp)[-1] = '\n';
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 89 | 70.08% | 2 | 33.33% |
Aurélien Charbon | 35 | 27.56% | 1 | 16.67% |
Harvey Harrison | 3 | 2.36% | 3 | 50.00% |
Total | 127 | 100.00% | 6 | 100.00% |
static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
static int ip_map_parse(struct cache_detail *cd,
char *mesg, int mlen)
{
/* class ipaddress [domainname] */
/* should be safe just to use the start of the input buffer
* for scratch: */
char *buf = mesg;
int len;
char class[8];
union {
struct sockaddr sa;
struct sockaddr_in s4;
struct sockaddr_in6 s6;
} address;
struct sockaddr_in6 sin6;
int err;
struct ip_map *ipmp;
struct auth_domain *dom;
time_t expiry;
if (mesg[mlen-1] != '\n')
return -EINVAL;
mesg[mlen-1] = 0;
/* class */
len = qword_get(&mesg, class, sizeof(class));
if (len <= 0) return -EINVAL;
/* ip address */
len = qword_get(&mesg, buf, mlen);
if (len <= 0) return -EINVAL;
if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0)
return -EINVAL;
switch (address.sa.sa_family) {
case AF_INET:
/* Form a mapped IPv4 address in sin6 */
sin6.sin6_family = AF_INET6;
ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
&sin6.sin6_addr);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
memcpy(&sin6, &address.s6, sizeof(sin6));
break;
#endif
default:
return -EINVAL;
}
expiry = get_expiry(&mesg);
if (expiry ==0)
return -EINVAL;
/* domainname, or empty for NEGATIVE */
len = qword_get(&mesg, buf, mlen);
if (len < 0) return -EINVAL;
if (len) {
dom = unix_domain_find(buf);
if (dom == NULL)
return -ENOENT;
} else
dom = NULL;
/* IPv6 scope IDs are ignored for now */
ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr);
if (ipmp) {
err = __ip_map_update(cd, ipmp,
container_of(dom, struct unix_domain, h),
expiry);
} else
err = -ENOMEM;
if (dom)
auth_domain_put(dom);
cache_flush();
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 261 | 69.41% | 6 | 46.15% |
Chuck Lever | 78 | 20.74% | 1 | 7.69% |
Aurélien Charbon | 19 | 5.05% | 1 | 7.69% |
Pavel Emelyanov | 13 | 3.46% | 2 | 15.38% |
Stanislav Kinsbursky | 4 | 1.06% | 2 | 15.38% |
Eric Dumazet | 1 | 0.27% | 1 | 7.69% |
Total | 376 | 100.00% | 13 | 100.00% |
static int ip_map_show(struct seq_file *m,
struct cache_detail *cd,
struct cache_head *h)
{
struct ip_map *im;
struct in6_addr addr;
char *dom = "-no-domain-";
if (h == NULL) {
seq_puts(m, "#class IP domain\n");
return 0;
}
im = container_of(h, struct ip_map, h);
/* class addr domain */
addr = im->m_addr;
if (test_bit(CACHE_VALID, &h->flags) &&
!test_bit(CACHE_NEGATIVE, &h->flags))
dom = im->m_client->h.name;
if (ipv6_addr_v4mapped(&addr)) {
seq_printf(m, "%s %pI4 %s\n",
im->m_class, &addr.s6_addr32[3], dom);
} else {
seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 123 | 77.36% | 2 | 28.57% |
Aurélien Charbon | 32 | 20.13% | 1 | 14.29% |
Harvey Harrison | 3 | 1.89% | 3 | 42.86% |
Alexey Dobriyan | 1 | 0.63% | 1 | 14.29% |
Total | 159 | 100.00% | 7 | 100.00% |
static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
struct in6_addr *addr)
{
struct ip_map ip;
struct cache_head *ch;
strcpy(ip.m_class, class);
ip.m_addr = *addr;
ch = sunrpc_cache_lookup(cd, &ip.h,
hash_str(class, IP_HASHBITS) ^
hash_ip6(addr));
if (ch)
return container_of(ch, struct ip_map, h);
else
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 78 | 86.67% | 3 | 50.00% |
Pavel Emelyanov | 7 | 7.78% | 1 | 16.67% |
Aurélien Charbon | 3 | 3.33% | 1 | 16.67% |
Alexey Dobriyan | 2 | 2.22% | 1 | 16.67% |
Total | 90 | 100.00% | 6 | 100.00% |
static inline struct ip_map *ip_map_lookup(struct net *net, char *class,
struct in6_addr *addr)
{
struct sunrpc_net *sn;
sn = net_generic(net, sunrpc_net_id);
return __ip_map_lookup(sn->ip_map_cache, class, addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 49 | 100.00% | 3 | 100.00% |
Total | 49 | 100.00% | 3 | 100.00% |
static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
struct unix_domain *udom, time_t expiry)
{
struct ip_map ip;
struct cache_head *ch;
ip.m_client = udom;
ip.h.flags = 0;
if (!udom)
set_bit(CACHE_NEGATIVE, &ip.h.flags);
ip.h.expiry_time = expiry;
ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
hash_str(ipm->m_class, IP_HASHBITS) ^
hash_ip6(&ipm->m_addr));
if (!ch)
return -ENOMEM;
cache_put(ch, cd);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 115 | 92.00% | 3 | 50.00% |
Pavel Emelyanov | 8 | 6.40% | 1 | 16.67% |
Aurélien Charbon | 1 | 0.80% | 1 | 16.67% |
Eric Dumazet | 1 | 0.80% | 1 | 16.67% |
Total | 125 | 100.00% | 6 | 100.00% |
static inline int ip_map_update(struct net *net, struct ip_map *ipm,
struct unix_domain *udom, time_t expiry)
{
struct sunrpc_net *sn;
sn = net_generic(net, sunrpc_net_id);
return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 53 | 100.00% | 3 | 100.00% |
Total | 53 | 100.00% | 3 | 100.00% |
void svcauth_unix_purge(struct net *net)
{
struct sunrpc_net *sn;
sn = net_generic(net, sunrpc_net_id);
cache_purge(sn->ip_map_cache);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 21 | 67.74% | 1 | 33.33% |
Neil Brown | 9 | 29.03% | 1 | 33.33% |
Stanislav Kinsbursky | 1 | 3.23% | 1 | 33.33% |
Total | 31 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(svcauth_unix_purge);
static inline struct ip_map *
ip_map_cached_get(struct svc_xprt *xprt)
{
struct ip_map *ipm = NULL;
struct sunrpc_net *sn;
if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
spin_lock(&xprt->xpt_lock);
ipm = xprt->xpt_auth_cache;
if (ipm != NULL) {
sn = net_generic(xprt->xpt_net, sunrpc_net_id);
if (cache_is_expired(sn->ip_map_cache, &ipm->h)) {
/*
* The entry has been invalidated since it was
* remembered, e.g. by a second mount from the
* same IP address.
*/
xprt->xpt_auth_cache = NULL;
spin_unlock(&xprt->xpt_lock);
cache_put(&ipm->h, sn->ip_map_cache);
return NULL;
}
cache_get(&ipm->h);
}
spin_unlock(&xprt->xpt_lock);
}
return ipm;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Banks | 53 | 38.41% | 1 | 12.50% |
Neil Brown | 39 | 28.26% | 4 | 50.00% |
Tom Tucker | 26 | 18.84% | 1 | 12.50% |
Pavel Emelyanov | 20 | 14.49% | 2 | 25.00% |
Total | 138 | 100.00% | 8 | 100.00% |
static inline void
ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm)
{
if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
spin_lock(&xprt->xpt_lock);
if (xprt->xpt_auth_cache == NULL) {
/* newly cached, keep the reference */
xprt->xpt_auth_cache = ipm;
ipm = NULL;
}
spin_unlock(&xprt->xpt_lock);
}
if (ipm) {
struct sunrpc_net *sn;
sn = net_generic(xprt->xpt_net, sunrpc_net_id);
cache_put(&ipm->h, sn->ip_map_cache);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Banks | 35 | 34.31% | 1 | 16.67% |
Neil Brown | 23 | 22.55% | 2 | 33.33% |
Pavel Emelyanov | 22 | 21.57% | 2 | 33.33% |
Tom Tucker | 22 | 21.57% | 1 | 16.67% |
Total | 102 | 100.00% | 6 | 100.00% |
void
svcauth_unix_info_release(struct svc_xprt *xpt)
{
struct ip_map *ipm;
ipm = xpt->xpt_auth_cache;
if (ipm != NULL) {
struct sunrpc_net *sn;
sn = net_generic(xpt->xpt_net, sunrpc_net_id);
cache_put(&ipm->h, sn->ip_map_cache);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 35 | 61.40% | 2 | 66.67% |
Greg Banks | 22 | 38.60% | 1 | 33.33% |
Total | 57 | 100.00% | 3 | 100.00% |
/****************************************************************************
* auth.unix.gid cache
* simple cache to map a UID to a list of GIDs
* because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS
*/
#define GID_HASHBITS 8
#define GID_HASHMAX (1<<GID_HASHBITS)
struct unix_gid {
struct cache_head h;
kuid_t uid;
struct group_info *gi;
};
static int unix_gid_hash(kuid_t uid)
{
return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static void unix_gid_put(struct kref *kref)
{
struct cache_head *item = container_of(kref, struct cache_head, ref);
struct unix_gid *ug = container_of(item, struct unix_gid, h);
if (test_bit(CACHE_VALID, &item->flags) &&
!test_bit(CACHE_NEGATIVE, &item->flags))
put_group_info(ug->gi);
kfree(ug);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 76 | 100.00% | 1 | 100.00% |
Total | 76 | 100.00% | 1 | 100.00% |
static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
{
struct unix_gid *orig = container_of(corig, struct unix_gid, h);
struct unix_gid *new = container_of(cnew, struct unix_gid, h);
return uid_eq(orig->uid, new->uid);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 54 | 93.10% | 1 | 50.00% |
Eric W. Biedermann | 4 | 6.90% | 1 | 50.00% |
Total | 58 | 100.00% | 2 | 100.00% |
static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
{
struct unix_gid *new = container_of(cnew, struct unix_gid, h);
struct unix_gid *item = container_of(citem, struct unix_gid, h);
new->uid = item->uid;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 54 | 100.00% | 1 | 100.00% |
Total | 54 | 100.00% | 1 | 100.00% |
static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
{
struct unix_gid *new = container_of(cnew, struct unix_gid, h);
struct unix_gid *item = container_of(citem, struct unix_gid, h);
get_group_info(item->gi);
new->gi = item->gi;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 61 | 100.00% | 1 | 100.00% |
Total | 61 | 100.00% | 1 | 100.00% |
static struct cache_head *unix_gid_alloc(void)
{
struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
if (g)
return &g->h;
else
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
static void unix_gid_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
{
char tuid[20];
struct unix_gid *ug = container_of(h, struct unix_gid, h);
snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid));
qword_add(bpp, blen, tuid);
(*bpp)[-1] = '\n';
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 79 | 92.94% | 1 | 50.00% |
Eric W. Biedermann | 6 | 7.06% | 1 | 50.00% |
Total | 85 | 100.00% | 2 | 100.00% |
static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
static int unix_gid_parse(struct cache_detail *cd,
char *mesg, int mlen)
{
/* uid expiry Ngid gid0 gid1 ... gidN-1 */
int id;
kuid_t uid;
int gids;
int rv;
int i;
int err;
time_t expiry;
struct unix_gid ug, *ugp;
if (mesg[mlen - 1] != '\n')
return -EINVAL;
mesg[mlen-1] = 0;
rv = get_int(&mesg, &id);
if (rv)
return -EINVAL;
uid = make_kuid(&init_user_ns, id);
ug.uid = uid;
expiry = get_expiry(&mesg);
if (expiry == 0)
return -EINVAL;
rv = get_int(&mesg, &gids);
if (rv || gids < 0 || gids > 8192)
return -EINVAL;
ug.gi = groups_alloc(gids);
if (!ug.gi)
return -ENOMEM;
for (i = 0 ; i < gids ; i++) {
int gid;
kgid_t kgid;
rv = get_int(&mesg, &gid);
err = -EINVAL;
if (rv)
goto out;
kgid = make_kgid(&init_user_ns, gid);
if (!gid_valid(kgid))
goto out;
ug.gi->gid[i] = kgid;
}
ugp = unix_gid_lookup(cd, uid);
if (ugp) {
struct cache_head *ch;
ug.h.flags = 0;
ug.h.expiry_time = expiry;
ch = sunrpc_cache_update(cd,
&ug.h, &ugp->h,
unix_gid_hash(uid));
if (!ch)
err = -ENOMEM;
else {
err = 0;
cache_put(ch, cd);
}
} else
err = -ENOMEM;
out:
if (ug.gi)
put_group_info(ug.gi);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 305 | 86.40% | 1 | 16.67% |
Eric W. Biedermann | 40 | 11.33% | 3 | 50.00% |
Stanislav Kinsbursky | 4 | 1.13% | 1 | 16.67% |
Alexey Dobriyan | 4 | 1.13% | 1 | 16.67% |
Total | 353 | 100.00% | 6 | 100.00% |
static int unix_gid_show(struct seq_file