Release 4.9 include/linux/netfilter.h
#ifndef __LINUX_NETFILTER_H
#define __LINUX_NETFILTER_H
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/net.h>
#include <linux/if.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/static_key.h>
#include <linux/netfilter_defs.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
{
return -(verdict >> NF_VERDICT_QBITS);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| florian westphal | florian westphal | 18 | 100.00% | 1 | 100.00% |
| Total | 18 | 100.00% | 1 | 100.00% |
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
{
return a1->all[0] == a2->all[0] &&
a1->all[1] == a2->all[1] &&
a1->all[2] == a2->all[2] &&
a1->all[3] == a2->all[3];
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| patrick mchardy | patrick mchardy | 76 | 100.00% | 1 | 100.00% |
| Total | 76 | 100.00% | 1 | 100.00% |
static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
union nf_inet_addr *result,
const union nf_inet_addr *mask)
{
result->all[0] = a1->all[0] & mask->all[0];
result->all[1] = a1->all[1] & mask->all[1];
result->all[2] = a1->all[2] & mask->all[2];
result->all[3] = a1->all[3] & mask->all[3];
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| denys fedoryshchenko | denys fedoryshchenko | 108 | 100.00% | 1 | 100.00% |
| Total | 108 | 100.00% | 1 | 100.00% |
int netfilter_init(void);
struct sk_buff;
struct nf_hook_ops;
struct sock;
struct nf_hook_state {
unsigned int hook;
int thresh;
u_int8_t pf;
struct net_device *in;
struct net_device *out;
struct sock *sk;
struct net *net;
struct nf_hook_entry __rcu *hook_entries;
int (*okfn)(struct net *, struct sock *, struct sk_buff *);
};
typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
struct nf_hook_ops {
struct list_head list;
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
void *priv;
u_int8_t pf;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
};
struct nf_hook_entry {
struct nf_hook_entry __rcu *next;
struct nf_hook_ops ops;
const struct nf_hook_ops *orig_ops;
};
static inline void nf_hook_state_init(struct nf_hook_state *p,
struct nf_hook_entry *hook_entry,
unsigned int hook,
int thresh, u_int8_t pf,
struct net_device *indev,
struct net_device *outdev,
struct sock *sk,
struct net *net,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
p->hook = hook;
p->thresh = thresh;
p->pf = pf;
p->in = indev;
p->out = outdev;
p->sk = sk;
p->net = net;
RCU_INIT_POINTER(p->hook_entries, hook_entry);
p->okfn = okfn;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| david s. miller | david s. miller | 94 | 76.42% | 3 | 42.86% |
| eric w. biederman | eric w. biederman | 15 | 12.20% | 2 | 28.57% |
| aaron conole | aaron conole | 8 | 6.50% | 1 | 14.29% |
| pablo neira ayuso | pablo neira ayuso | 6 | 4.88% | 1 | 14.29% |
| Total | 123 | 100.00% | 7 | 100.00% |
struct nf_sockopt_ops {
struct list_head list;
u_int8_t pf;
/* Non-inclusive ranges: use 0/0/NULL to never get called. */
int set_optmin;
int set_optmax;
int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
#ifdef CONFIG_COMPAT
int (*compat_set)(struct sock *sk, int optval,
void __user *user, unsigned int len);
#endif
int get_optmin;
int get_optmax;
int (*get)(struct sock *sk, int optval, void __user *user, int *len);
#ifdef CONFIG_COMPAT
int (*compat_get)(struct sock *sk, int optval,
void __user *user, int *len);
#endif
/* Use the module struct to lock set/get code in place */
struct module *owner;
};
/* Function to register/unregister hook points. */
int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n);
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n);
int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
/* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */
int nf_register_sockopt(struct nf_sockopt_ops *reg);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#endif
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
/**
* nf_hook_thresh - call a netfilter hook
*
* Returns 1 if the hook has allowed the packet to pass. The function
* okfn must be invoked by the caller in this case. Any other return
* value indicates the packet has been consumed by the hook.
*/
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
struct net *net,
struct sock *sk,
struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
int thresh)
{
struct nf_hook_entry *hook_head;
int ret = 1;
#ifdef HAVE_JUMP_LABEL
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook) &&
!static_key_false(&nf_hooks_needed[pf][hook]))
return 1;
#endif
rcu_read_lock();
hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
if (hook_head) {
struct nf_hook_state state;
nf_hook_state_init(&state, hook_head, hook, thresh,
pf, indev, outdev, sk, net, okfn);
ret = nf_hook_slow(skb, &state);
}
rcu_read_unlock();
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| florian westphal | florian westphal | 44 | 25.29% | 2 | 11.11% |
| david s. miller | david s. miller | 37 | 21.26% | 4 | 22.22% |
| patrick mchardy | patrick mchardy | 32 | 18.39% | 1 | 5.56% |
| eric w. biederman | eric w. biederman | 24 | 13.79% | 5 | 27.78% |
| aaron conole | aaron conole | 17 | 9.77% | 1 | 5.56% |
| linus torvalds | linus torvalds | 14 | 8.05% | 1 | 5.56% |
| bart de schuymer | bart de schuymer | 3 | 1.72% | 1 | 5.56% |
| jan engelhardt | jan engelhardt | 1 | 0.57% | 1 | 5.56% |
| pablo neira ayuso | pablo neira ayuso | 1 | 0.57% | 1 | 5.56% |
| herbert xu | herbert xu | 1 | 0.57% | 1 | 5.56% |
| Total | 174 | 100.00% | 18 | 100.00% |
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct sock *sk, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| pre-git | pre-git | 32 | 40.00% | 1 | 12.50% |
| patrick mchardy | patrick mchardy | 23 | 28.75% | 1 | 12.50% |
| eric w. biederman | eric w. biederman | 11 | 13.75% | 3 | 37.50% |
| david s. miller | david s. miller | 11 | 13.75% | 1 | 12.50% |
| herbert xu | herbert xu | 2 | 2.50% | 1 | 12.50% |
| jan engelhardt | jan engelhardt | 1 | 1.25% | 1 | 12.50% |
| Total | 80 | 100.00% | 8 | 100.00% |
/* Activate hook; either okfn or kfree_skb called, unless a hook
returns NF_STOLEN (in which case, it's up to the hook to deal with
the consequences).
Returns -ERRNO if packet dropped. Zero means queued, stolen or
accepted.
*/
/* RR:
> I don't want nf_hook to return anything because people might forget
> about async and trust the return value to mean "packet was ok".
AK:
Just document it clearly, then you can expect some sense from kernel
coders :)
*/
static inline int
NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *in,
struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
int thresh)
{
int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh);
if (ret == 1)
ret = okfn(net, sk, skb);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| jan engelhardt | jan engelhardt | 65 | 61.90% | 1 | 12.50% |
| david s. miller | david s. miller | 13 | 12.38% | 1 | 12.50% |
| eric w. biederman | eric w. biederman | 13 | 12.38% | 3 | 37.50% |
| patrick mchardy | patrick mchardy | 11 | 10.48% | 1 | 12.50% |
| bart de schuymer | bart de schuymer | 2 | 1.90% | 1 | 12.50% |
| pre-git | pre-git | 1 | 0.95% | 1 | 12.50% |
| Total | 105 | 100.00% | 8 | 100.00% |
static inline int
NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
bool cond)
{
int ret;
if (!cond ||
((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1))
ret = okfn(net, sk, skb);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| jan engelhardt | jan engelhardt | 69 | 61.61% | 1 | 12.50% |
| patrick mchardy | patrick mchardy | 15 | 13.39% | 2 | 25.00% |
| david s. miller | david s. miller | 13 | 11.61% | 1 | 12.50% |
| eric w. biederman | eric w. biederman | 13 | 11.61% | 3 | 37.50% |
| eric paris | eric paris | 2 | 1.79% | 1 | 12.50% |
| Total | 112 | 100.00% | 8 | 100.00% |
static inline int
NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| jan engelhardt | jan engelhardt | 46 | 57.50% | 1 | 20.00% |
| patrick mchardy | patrick mchardy | 12 | 15.00% | 1 | 20.00% |
| david s. miller | david s. miller | 11 | 13.75% | 1 | 20.00% |
| eric w. biederman | eric w. biederman | 11 | 13.75% | 2 | 40.00% |
| Total | 80 | 100.00% | 5 | 100.00% |
/* Call setsockopt() */
int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
unsigned int len);
int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
int *len);
#ifdef CONFIG_COMPAT
int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
char __user *opt, unsigned int len);
int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
char __user *opt, int *len);
#endif
/* Call this before modifying an existing packet: ensures it is
modifiable and linear to the point you care about (writable_len).
Returns true or false. */
int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
struct flowi;
struct nf_queue_entry;
struct nf_afinfo {
unsigned short family;
__sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
__sum16 (*checksum_partial)(struct sk_buff *skb,
unsigned int hook,
unsigned int dataoff,
unsigned int len,
u_int8_t protocol);
int (*route)(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict);
void (*saveroute)(const struct sk_buff *skb,
struct nf_queue_entry *entry);
int (*reroute)(struct net *net, struct sk_buff *skb,
const struct nf_queue_entry *entry);
int route_key_size;
};
extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
{
return rcu_dereference(nf_afinfo[family]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| patrick mchardy | patrick mchardy | 21 | 91.30% | 2 | 66.67% |
| harald welte | harald welte | 2 | 8.70% | 1 | 33.33% |
| Total | 23 | 100.00% | 3 | 100.00% |
static inline __sum16
nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
u_int8_t protocol, unsigned short family)
{
const struct nf_afinfo *afinfo;
__sum16 csum = 0;
rcu_read_lock();
afinfo = nf_get_afinfo(family);
if (afinfo)
csum = afinfo->checksum(skb, hook, dataoff, protocol);
rcu_read_unlock();
return csum;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| patrick mchardy | patrick mchardy | 71 | 97.26% | 2 | 66.67% |
| al viro | al viro | 2 | 2.74% | 1 | 33.33% |
| Total | 73 | 100.00% | 3 | 100.00% |
static inline __sum16
nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
u_int8_t protocol, unsigned short family)
{
const struct nf_afinfo *afinfo;
__sum16 csum = 0;
rcu_read_lock();
afinfo = nf_get_afinfo(family);
if (afinfo)
csum = afinfo->checksum_partial(skb, hook, dataoff, len,
protocol);
rcu_read_unlock();
return csum;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| patrick mchardy | patrick mchardy | 79 | 100.00% | 1 | 100.00% |
| Total | 79 | 100.00% | 1 | 100.00% |
int nf_register_afinfo(const struct nf_afinfo *afinfo);
void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
#include <net/flow.h>
extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
#ifdef CONFIG_NF_NAT_NEEDED
void (*decodefn)(struct sk_buff *, struct flowi *);
rcu_read_lock();
decodefn = rcu_dereference(nf_nat_decode_session_hook);
if (decodefn)
decodefn(skb, fl);
rcu_read_unlock();
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| patrick mchardy | patrick mchardy | 62 | 96.88% | 3 | 60.00% |
| jozsef kadlecsik | jozsef kadlecsik | 1 | 1.56% | 1 | 20.00% |
| jan engelhardt | jan engelhardt | 1 | 1.56% | 1 | 20.00% |
| Total | 64 | 100.00% | 5 | 100.00% |
#else /* !CONFIG_NETFILTER */
static inline int
NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
bool cond)
{
return okfn(net, sk, skb);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| arnd bergmann | arnd bergmann | 56 | 78.87% | 1 | 25.00% |
| pre-git | pre-git | 11 | 15.49% | 1 | 25.00% |
| david s. miller | david s. miller | 2 | 2.82% | 1 | 25.00% |
| eric w. biederman | eric w. biederman | 2 | 2.82% | 1 | 25.00% |
| Total | 71 | 100.00% | 4 | 100.00% |
static inline int
NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
return okfn(net, sk, skb);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| arnd bergmann | arnd bergmann | 52 | 76.47% | 1 | 25.00% |
| patrick mchardy | patrick mchardy | 12 | 17.65% | 1 | 25.00% |
| david s. miller | david s. miller | 2 | 2.94% | 1 | 25.00% |
| eric w. biederman | eric w. biederman | 2 | 2.94% | 1 | 25.00% |
| Total | 68 | 100.00% | 4 | 100.00% |
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct sock *sk, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| david s. miller | david s. miller | 49 | 80.33% | 2 | 28.57% |
| eric w. biederman | eric w. biederman | 9 | 14.75% | 2 | 28.57% |
| patrick mchardy | patrick mchardy | 1 | 1.64% | 1 | 14.29% |
| herbert xu | herbert xu | 1 | 1.64% | 1 | 14.29% |
| jan engelhardt | jan engelhardt | 1 | 1.64% | 1 | 14.29% |
| Total | 61 | 100.00% | 7 | 100.00% |
struct flowi;
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| patrick mchardy | patrick mchardy | 17 | 89.47% | 1 | 50.00% |
| jan engelhardt | jan engelhardt | 2 | 10.53% | 1 | 50.00% |
| Total | 19 | 100.00% | 2 | 100.00% |
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <linux/netfilter/nf_conntrack_zones_common.h>
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| pablo neira ayuso | pablo neira ayuso | 16 | 100.00% | 1 | 100.00% |
| Total | 16 | 100.00% | 1 | 100.00% |
#endif
struct nf_conn;
enum ip_conntrack_info;
struct nlattr;
struct nfnl_ct_hook {
struct nf_conn *(*get_ct)(const struct sk_buff *skb,
enum ip_conntrack_info *ctinfo);
size_t (*build_size)(const struct nf_conn *ct);
int (*build)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
u_int16_t ct_attr, u_int16_t ct_info_attr);
int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
u32 portid, u32 report);
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, s32 off);
};
extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
/**
* nf_skb_duplicated - TEE target has sent a packet
*
* When a xtables target sends a packet, the OUTPUT and POSTROUTING
* hooks are traversed again, i.e. nft and xtables are invoked recursively.
*
* This is used by xtables TEE target to prevent the duplicated skb from
* being duplicated again.
*/
DECLARE_PER_CPU(bool, nf_skb_duplicated);
#endif /*__LINUX_NETFILTER_H*/
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| patrick mchardy | patrick mchardy | 603 | 25.17% | 20 | 23.81% |
| david s. miller | david s. miller | 287 | 11.98% | 6 | 7.14% |
| pre-git | pre-git | 243 | 10.14% | 5 | 5.95% |
| jan engelhardt | jan engelhardt | 198 | 8.26% | 4 | 4.76% |
| eric w. biederman | eric w. biederman | 188 | 7.85% | 7 | 8.33% |
| pablo neira ayuso | pablo neira ayuso | 172 | 7.18% | 9 | 10.71% |
| denys fedoryshchenko | denys fedoryshchenko | 108 | 4.51% | 1 | 1.19% |
| arnd bergmann | arnd bergmann | 108 | 4.51% | 1 | 1.19% |
| aaron conole | aaron conole | 104 | 4.34% | 1 | 1.19% |
| dmitry mishin | dmitry mishin | 93 | 3.88% | 1 | 1.19% |
| florian westphal | florian westphal | 78 | 3.26% | 6 | 7.14% |
| harald welte | harald welte | 61 | 2.55% | 2 | 2.38% |
| eric dumazet | eric dumazet | 30 | 1.25% | 2 | 2.38% |
| mahesh bandewar | mahesh bandewar | 26 | 1.09% | 1 | 1.19% |
| yasuyuki kozakai | yasuyuki kozakai | 26 | 1.09% | 2 | 2.38% |
| alexey dobriyan | alexey dobriyan | 15 | 0.63% | 1 | 1.19% |
| linus torvalds | linus torvalds | 14 | 0.58% | 1 | 1.19% |
| al viro | al viro | 7 | 0.29% | 2 | 2.38% |
| bart de schuymer | bart de schuymer | 7 | 0.29% | 1 | 1.19% |
| herbert xu | herbert xu | 6 | 0.25% | 2 | 2.38% |
| zhouyi zhou | zhouyi zhou | 6 | 0.25% | 1 | 1.19% |
| ken-ichirou matsuzawa | ken-ichirou matsuzawa | 4 | 0.17% | 2 | 2.38% |
| neil horman | neil horman | 3 | 0.13% | 1 | 1.19% |
| daniel borkmann | daniel borkmann | 3 | 0.13% | 1 | 1.19% |
| hideaki yoshifuji | hideaki yoshifuji | 2 | 0.08% | 1 | 1.19% |
| eric paris | eric paris | 2 | 0.08% | 1 | 1.19% |
| jozsef kadlecsik | jozsef kadlecsik | 1 | 0.04% | 1 | 1.19% |
| ingo molnar | ingo molnar | 1 | 0.04% | 1 | 1.19% |
| Total | 2396 | 100.00% | 84 | 100.00% |