cregit-Linux how code gets into the kernel

Release 4.15 include/linux/bpf-cgroup.h

Directory: include/linux
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BPF_CGROUP_H

#define _BPF_CGROUP_H

#include <linux/jump_label.h>
#include <uapi/linux/bpf.h>

struct sock;
struct cgroup;
struct sk_buff;
struct bpf_sock_ops_kern;

#ifdef CONFIG_CGROUP_BPF

extern struct static_key_false cgroup_bpf_enabled_key;

#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)


struct bpf_prog_list {
	
struct list_head node;
	
struct bpf_prog *prog;
};

struct bpf_prog_array;


struct cgroup_bpf {
	/* array of effective progs in this cgroup */
	
struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];

	/* attached progs to this cgroup and attach flags
         * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
         * have either zero or one element
         * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
         */
	
struct list_head progs[MAX_BPF_ATTACH_TYPE];
	
u32 flags[MAX_BPF_ATTACH_TYPE];

	/* temp storage for effective prog array used by prog_attach/detach */
	
struct bpf_prog_array __rcu *inactive;
};

void cgroup_bpf_put(struct cgroup *cgrp);
int cgroup_bpf_inherit(struct cgroup *cgrp);

int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
			enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
			enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
		       union bpf_attr __user *uattr);

/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
		      enum bpf_attach_type type, u32 flags);
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
		      enum bpf_attach_type type, u32 flags);
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
		     union bpf_attr __user *uattr);

int __cgroup_bpf_run_filter_skb(struct sock *sk,
				struct sk_buff *skb,
				enum bpf_attach_type type);

int __cgroup_bpf_run_filter_sk(struct sock *sk,
			       enum bpf_attach_type type);

int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
				     struct bpf_sock_ops_kern *sock_ops,
				     enum bpf_attach_type type);

int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
				      short access, enum bpf_attach_type type);

/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */

#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
({                                                                            \
        int __ret = 0;                                                        \
        if (cgroup_bpf_enabled)                                               \
                __ret = __cgroup_bpf_run_filter_skb(sk, skb,                  \
                                                    BPF_CGROUP_INET_INGRESS); \
                                                                              \
        __ret;                                                                \
})


#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
({                                                                             \
        int __ret = 0;                                                         \
        if (cgroup_bpf_enabled && sk && sk == skb->sk) {                       \
                typeof(sk) __sk = sk_to_full_sk(sk);                           \
                if (sk_fullsock(__sk))                                         \
                        __ret = __cgroup_bpf_run_filter_skb(__sk, skb,         \
                                                      BPF_CGROUP_INET_EGRESS); \
        }                                                                      \
        __ret;                                                                 \
})


#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
({                                                                             \
        int __ret = 0;                                                         \
        if (cgroup_bpf_enabled && sk) {                                        \
                __ret = __cgroup_bpf_run_filter_sk(sk,                         \
                                                 BPF_CGROUP_INET_SOCK_CREATE); \
        }                                                                      \
        __ret;                                                                 \
})


#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)				       \
({                                                                             \
        int __ret = 0;                                                         \
        if (cgroup_bpf_enabled && (sock_ops)->sk) {            \
                typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);               \
                if (__sk && sk_fullsock(__sk))                                 \
                        __ret = __cgroup_bpf_run_filter_sock_ops(__sk,         \
                                                                 sock_ops,     \
                                                         BPF_CGROUP_SOCK_OPS); \
        }                                                                      \
        __ret;                                                                 \
})


#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)	      \
({                                                                            \
        int __ret = 0;                                                        \
        if (cgroup_bpf_enabled)                                               \
                __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
                                                          access,             \
                                                          BPF_CGROUP_DEVICE); \
                                                                              \
        __ret;                                                                \
})
#else


struct cgroup_bpf {};

static inline void cgroup_bpf_put(struct cgroup *cgrp) {}

Contributors

PersonTokensPropCommitsCommitProp
Daniel Mack11100.00%1100.00%
Total11100.00%1100.00%


static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Mack960.00%150.00%
Alexei Starovoitov640.00%150.00%
Total15100.00%2100.00%

#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) #endif /* CONFIG_CGROUP_BPF */ #endif /* _BPF_CGROUP_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Daniel Mack18842.92%218.18%
Alexei Starovoitov13430.59%327.27%
Roman Gushchin4710.73%19.09%
Lawrence Brakmo357.99%19.09%
David Ahern327.31%218.18%
Américo Wang10.23%19.09%
Greg Kroah-Hartman10.23%19.09%
Total438100.00%11100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.