Release 4.15 kernel/audit.c
/* audit.c -- Auditing support
* Gateway between the kernel (e.g., selinux) and the user-space audit daemon.
* System-call specific features have moved to auditsc.c
*
* Copyright 2003-2007 Red Hat Inc., Durham, North Carolina.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Written by Rickard E. (Rik) Faith <faith@redhat.com>
*
* Goals: 1) Integrate fully with Security Modules.
* 2) Minimal run-time overhead:
* a) Minimal when syscall auditing is disabled (audit_enable=0).
* b) Small when syscall auditing is enabled and no audit record
* is generated (defer as much work as possible to record
* generation time):
* i) context is allocated,
* ii) names from getname are stored without a copy, and
* iii) inode information stored from path_lookup.
* 3) Ability to disable syscall auditing at boot time (audit=0).
* 4) Usable by other parts of the kernel (if audit_log* is called,
* then a syscall record will be generated automatically for the
* current syscall).
* 5) Netlink interface to user-space.
* 6) Support low-overhead kernel-based filtering to minimize the
* information that must be passed to user-space.
*
* Example user-space utilities: http://people.redhat.com/sgrubb/audit/
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/file.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/pid.h>
#include <linux/slab.h>
#include <linux/audit.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <linux/skbuff.h>
#ifdef CONFIG_SECURITY
#include <linux/security.h>
#endif
#include <linux/freezer.h>
#include <linux/pid_namespace.h>
#include <net/netns/generic.h>
#include "audit.h"
/* No auditing will take place until audit_initialized == AUDIT_INITIALIZED.
* (Initialization happens after skb_init is called.) */
#define AUDIT_DISABLED -1
#define AUDIT_UNINITIALIZED 0
#define AUDIT_INITIALIZED 1
static int audit_initialized;
#define AUDIT_OFF 0
#define AUDIT_ON 1
#define AUDIT_LOCKED 2
u32 audit_enabled = AUDIT_OFF;
bool audit_ever_enabled = !!AUDIT_OFF;
EXPORT_SYMBOL_GPL(audit_enabled);
/* Default state when kernel boots without any parameters. */
static u32 audit_default = AUDIT_OFF;
/* If auditing cannot proceed, audit_failure selects what happens. */
static u32 audit_failure = AUDIT_FAIL_PRINTK;
/* private audit network namespace index */
static unsigned int audit_net_id;
/**
* struct audit_net - audit private network namespace data
* @sk: communication socket
*/
struct audit_net {
struct sock *sk;
};
/**
* struct auditd_connection - kernel/auditd connection state
* @pid: auditd PID
* @portid: netlink portid
* @net: the associated network namespace
* @rcu: RCU head
*
* Description:
* This struct is RCU protected; you must either hold the RCU lock for reading
* or the associated spinlock for writing.
*/
static struct auditd_connection {
struct pid *pid;
u32 portid;
struct net *net;
struct rcu_head rcu;
} *auditd_conn = NULL;
static DEFINE_SPINLOCK(auditd_conn_lock);
/* If audit_rate_limit is non-zero, limit the rate of sending audit records
* to that number per second. This prevents DoS attacks, but results in
* audit records being dropped. */
static u32 audit_rate_limit;
/* Number of outstanding audit_buffers allowed.
* When set to zero, this means unlimited. */
static u32 audit_backlog_limit = 64;
#define AUDIT_BACKLOG_WAIT_TIME (60 * HZ)
static u32 audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
/* The identity of the user shutting down the audit system. */
kuid_t audit_sig_uid = INVALID_UID;
pid_t audit_sig_pid = -1;
u32 audit_sig_sid = 0;
/* Records can be lost in several ways:
0) [suppressed in audit_alloc]
1) out of memory in audit_log_start [kmalloc of struct audit_buffer]
2) out of memory in audit_log_move [alloc_skb]
3) suppressed due to audit_rate_limit
4) suppressed due to audit_backlog_limit
*/
static atomic_t audit_lost = ATOMIC_INIT(0);
/* Hash for inode-based rules */
struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
static struct kmem_cache *audit_buffer_cache;
/* queue msgs to send via kauditd_task */
static struct sk_buff_head audit_queue;
/* queue msgs due to temporary unicast send problems */
static struct sk_buff_head audit_retry_queue;
/* queue msgs waiting for new auditd connection */
static struct sk_buff_head audit_hold_queue;
/* queue servicing thread */
static struct task_struct *kauditd_task;
static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait);
/* waitqueue for callers who are blocked on the audit backlog */
static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait);
static struct audit_features af = {.vers = AUDIT_FEATURE_VERSION,
.mask = -1,
.features = 0,
.lock = 0,};
static char *audit_feature_names[2] = {
"only_unset_loginuid",
"loginuid_immutable",
};
/* Serialize requests from userspace. */
DEFINE_MUTEX(audit_cmd_mutex);
/* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting
* audit records. Since printk uses a 1024 byte buffer, this buffer
* should be at least that large. */
#define AUDIT_BUFSIZ 1024
/* The audit_buffer is used when formatting an audit record. The caller
* locks briefly to get the record off the freelist or to allocate the
* buffer, and locks briefly to send the buffer to the netlink layer or
* to place it on a transmit queue. Multiple audit_buffers can be in
* use simultaneously. */
struct audit_buffer {
struct sk_buff *skb; /* formatted skb ready to send */
struct audit_context *ctx; /* NULL or associated context */
gfp_t gfp_mask;
};
struct audit_reply {
__u32 portid;
struct net *net;
struct sk_buff *skb;
};
/**
* auditd_test_task - Check to see if a given task is an audit daemon
* @task: the task to check
*
* Description:
* Return 1 if the task is a registered audit daemon, 0 otherwise.
*/
int auditd_test_task(struct task_struct *task)
{
int rc;
struct auditd_connection *ac;
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
rc = (ac && ac->pid == task_tgid(task) ? 1 : 0);
rcu_read_unlock();
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 53 | 100.00% | 3 | 100.00% |
Total | 53 | 100.00% | 3 | 100.00% |
/**
* auditd_pid_vnr - Return the auditd PID relative to the namespace
*
* Description:
* Returns the PID in relation to the namespace, 0 on failure.
*/
static pid_t auditd_pid_vnr(void)
{
pid_t pid;
const struct auditd_connection *ac;
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
if (!ac || !ac->pid)
pid = 0;
else
pid = pid_vnr(ac->pid);
rcu_read_unlock();
return pid;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 57 | 100.00% | 2 | 100.00% |
Total | 57 | 100.00% | 2 | 100.00% |
/**
* audit_get_sk - Return the audit socket for the given network namespace
* @net: the destination network namespace
*
* Description:
* Returns the sock pointer if valid, NULL otherwise. The caller must ensure
* that a reference is held for the network namespace while the sock is in use.
*/
static struct sock *audit_get_sk(const struct net *net)
{
struct audit_net *aunet;
if (!net)
return NULL;
aunet = net_generic(net, audit_net_id);
return aunet->sk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
void audit_panic(const char *message)
{
switch (audit_failure) {
case AUDIT_FAIL_SILENT:
break;
case AUDIT_FAIL_PRINTK:
if (printk_ratelimit())
pr_err("%s\n", message);
break;
case AUDIT_FAIL_PANIC:
panic("audit: %s\n", message);
break;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 38 | 80.85% | 1 | 25.00% |
Eric Paris | 5 | 10.64% | 1 | 25.00% |
Joe Perches | 2 | 4.26% | 1 | 25.00% |
Chris Wright | 2 | 4.26% | 1 | 25.00% |
Total | 47 | 100.00% | 4 | 100.00% |
static inline int audit_rate_check(void)
{
static unsigned long last_check = 0;
static int messages = 0;
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned long now;
unsigned long elapsed;
int retval = 0;
if (!audit_rate_limit) return 1;
spin_lock_irqsave(&lock, flags);
if (++messages < audit_rate_limit) {
retval = 1;
} else {
now = jiffies;
elapsed = now - last_check;
if (elapsed > HZ) {
last_check = now;
messages = 0;
retval = 1;
}
}
spin_unlock_irqrestore(&lock, flags);
return retval;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 114 | 96.61% | 1 | 50.00% |
Thomas Gleixner | 4 | 3.39% | 1 | 50.00% |
Total | 118 | 100.00% | 2 | 100.00% |
/**
* audit_log_lost - conditionally log lost audit message event
* @message: the message stating reason for lost audit message
*
* Emit at least 1 message per second, even if audit_rate_check is
* throttling.
* Always increment the lost messages counter.
*/
void audit_log_lost(const char *message)
{
static unsigned long last_msg = 0;
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned long now;
int print;
atomic_inc(&audit_lost);
print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
if (!print) {
spin_lock_irqsave(&lock, flags);
now = jiffies;
if (now - last_msg > HZ) {
print = 1;
last_msg = now;
}
spin_unlock_irqrestore(&lock, flags);
}
if (print) {
if (printk_ratelimit())
pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
atomic_read(&audit_lost),
audit_rate_limit,
audit_backlog_limit);
audit_panic(message);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 116 | 91.34% | 1 | 20.00% |
Eric Paris | 5 | 3.94% | 1 | 20.00% |
Thomas Gleixner | 4 | 3.15% | 1 | 20.00% |
Joe Perches | 2 | 1.57% | 2 | 40.00% |
Total | 127 | 100.00% | 5 | 100.00% |
static int audit_log_config_change(char *function_name, u32 new, u32 old,
int allow_changes)
{
struct audit_buffer *ab;
int rc = 0;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
if (unlikely(!ab))
return rc;
audit_log_format(ab, "%s=%u old=%u", function_name, new, old);
audit_log_session_info(ab);
rc = audit_log_task_context(ab);
if (rc)
allow_changes = 0; /* Something weird, deny request */
audit_log_format(ab, " res=%d", allow_changes);
audit_log_end(ab);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Paris | 58 | 56.86% | 3 | 27.27% |
Andrew Morton | 13 | 12.75% | 1 | 9.09% |
Steve Grubb | 13 | 12.75% | 2 | 18.18% |
Kees Cook | 11 | 10.78% | 1 | 9.09% |
Joe Perches | 3 | 2.94% | 1 | 9.09% |
Serge E. Hallyn | 2 | 1.96% | 1 | 9.09% |
Adrian Bunk | 1 | 0.98% | 1 | 9.09% |
Eric W. Biedermann | 1 | 0.98% | 1 | 9.09% |
Total | 102 | 100.00% | 11 | 100.00% |
static int audit_do_config_change(char *function_name, u32 *to_change, u32 new)
{
int allow_changes, rc = 0;
u32 old = *to_change;
/* check if we are locked */
if (audit_enabled == AUDIT_LOCKED)
allow_changes = 0;
else
allow_changes = 1;
if (audit_enabled != AUDIT_OFF) {
rc = audit_log_config_change(function_name, new, old, allow_changes);
if (rc)
allow_changes = 0;
}
/* If we are allowed, make the change */
if (allow_changes == 1)
*to_change = new;
/* Not allowed, update reason */
else if (rc == 0)
rc = -EPERM;
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steve Grubb | 54 | 52.43% | 2 | 28.57% |
Eric Paris | 31 | 30.10% | 1 | 14.29% |
Andrew Morton | 12 | 11.65% | 1 | 14.29% |
Joe Perches | 4 | 3.88% | 1 | 14.29% |
Serge E. Hallyn | 1 | 0.97% | 1 | 14.29% |
Adrian Bunk | 1 | 0.97% | 1 | 14.29% |
Total | 103 | 100.00% | 7 | 100.00% |
static int audit_set_rate_limit(u32 limit)
{
return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Paris | 7 | 35.00% | 1 | 16.67% |
Steve Grubb | 6 | 30.00% | 2 | 33.33% |
Andrew Morton | 5 | 25.00% | 1 | 16.67% |
Adrian Bunk | 1 | 5.00% | 1 | 16.67% |
Joe Perches | 1 | 5.00% | 1 | 16.67% |
Total | 20 | 100.00% | 6 | 100.00% |
static int audit_set_backlog_limit(u32 limit)
{
return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Paris | 15 | 75.00% | 1 | 33.33% |
Andrew Morton | 4 | 20.00% | 1 | 33.33% |
Joe Perches | 1 | 5.00% | 1 | 33.33% |
Total | 20 | 100.00% | 3 | 100.00% |
static int audit_set_backlog_wait_time(u32 timeout)
{
return audit_do_config_change("audit_backlog_wait_time",
&audit_backlog_wait_time, timeout);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Guy Briggs | 18 | 90.00% | 1 | 33.33% |
Joe Perches | 1 | 5.00% | 1 | 33.33% |
Paul Moore | 1 | 5.00% | 1 | 33.33% |
Total | 20 | 100.00% | 3 | 100.00% |
static int audit_set_enabled(u32 state)
{
int rc;
if (state > AUDIT_LOCKED)
return -EINVAL;
rc = audit_do_config_change("audit_enabled", &audit_enabled, state);
if (!rc)
audit_ever_enabled |= !!state;
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Paris | 40 | 83.33% | 2 | 33.33% |
Steve Grubb | 5 | 10.42% | 2 | 33.33% |
Andrew Morton | 2 | 4.17% | 1 | 16.67% |
Joe Perches | 1 | 2.08% | 1 | 16.67% |
Total | 48 | 100.00% | 6 | 100.00% |
static int audit_set_failure(u32 state)
{
if (state != AUDIT_FAIL_SILENT
&& state != AUDIT_FAIL_PRINTK
&& state != AUDIT_FAIL_PANIC)
return -EINVAL;
return audit_do_config_change("audit_failure", &audit_failure, state);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 28 | 73.68% | 1 | 20.00% |
Steve Grubb | 4 | 10.53% | 1 | 20.00% |
Eric Paris | 4 | 10.53% | 1 | 20.00% |
Adrian Bunk | 1 | 2.63% | 1 | 20.00% |
Joe Perches | 1 | 2.63% | 1 | 20.00% |
Total | 38 | 100.00% | 5 | 100.00% |
/**
* auditd_conn_free - RCU helper to release an auditd connection struct
* @rcu: RCU head
*
* Description:
* Drop any references inside the auditd connection tracking struct and free
* the memory.
*/
static void auditd_conn_free(struct rcu_head *rcu)
{
struct auditd_connection *ac;
ac = container_of(rcu, struct auditd_connection, rcu);
put_pid(ac->pid);
put_net(ac->net);
kfree(ac);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 44 | 93.62% | 1 | 50.00% |
Eric Paris | 3 | 6.38% | 1 | 50.00% |
Total | 47 | 100.00% | 2 | 100.00% |
/**
* auditd_set - Set/Reset the auditd connection state
* @pid: auditd PID
* @portid: auditd netlink portid
* @net: auditd network namespace pointer
*
* Description:
* This function will obtain and drop network namespace references as
* necessary. Returns zero on success, negative values on failure.
*/
static int auditd_set(struct pid *pid, u32 portid, struct net *net)
{
unsigned long flags;
struct auditd_connection *ac_old, *ac_new;
if (!pid || !net)
return -EINVAL;
ac_new = kzalloc(sizeof(*ac_new), GFP_KERNEL);
if (!ac_new)
return -ENOMEM;
ac_new->pid = get_pid(pid);
ac_new->portid = portid;
ac_new->net = get_net(net);
spin_lock_irqsave(&auditd_conn_lock, flags);
ac_old = rcu_dereference_protected(auditd_conn,
lockdep_is_held(&auditd_conn_lock));
rcu_assign_pointer(auditd_conn, ac_new);
spin_unlock_irqrestore(&auditd_conn_lock, flags);
if (ac_old)
call_rcu(&ac_old->rcu, auditd_conn_free);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 134 | 94.37% | 3 | 75.00% |
Eric Paris | 8 | 5.63% | 1 | 25.00% |
Total | 142 | 100.00% | 4 | 100.00% |
/**
* kauditd_print_skb - Print the audit record to the ring buffer
* @skb: audit record
*
* Whatever the reason, this packet may not make it to the auditd connection
* so write it via printk so the information isn't completely lost.
*/
static void kauditd_printk_skb(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
char *data = nlmsg_data(nlh);
if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit())
pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 42 | 80.77% | 1 | 50.00% |
Eric Paris | 10 | 19.23% | 1 | 50.00% |
Total | 52 | 100.00% | 2 | 100.00% |
/**
* kauditd_rehold_skb - Handle a audit record send failure in the hold queue
* @skb: audit record
*
* Description:
* This should only be used by the kauditd_thread when it fails to flush the
* hold queue.
*/
static void kauditd_rehold_skb(struct sk_buff *skb)
{
/* put the record back in the queue at the same place */
skb_queue_head(&audit_hold_queue, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 16 | 80.00% | 1 | 50.00% |
Eric Paris | 4 | 20.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
/**
* kauditd_hold_skb - Queue an audit record, waiting for auditd
* @skb: audit record
*
* Description:
* Queue the audit record, waiting for an instance of auditd. When this
* function is called we haven't given up yet on sending the record, but things
* are not looking good. The first thing we want to do is try to write the
* record via printk and then see if we want to try and hold on to the record
* and queue it, if we have room. If we want to hold on to the record, but we
* don't have room, record a record lost message.
*/
static void kauditd_hold_skb(struct sk_buff *skb)
{
/* at this point it is uncertain if we will ever send this to auditd so
* try to send the message via printk before we go any further */
kauditd_printk_skb(skb);
/* can we just silently drop the message? */
if (!audit_default) {
kfree_skb(skb);
return;
}
/* if we have room, queue the message */
if (!audit_backlog_limit ||
skb_queue_len(&audit_hold_queue) < audit_backlog_limit) {
skb_queue_tail(&audit_hold_queue, skb);
return;
}
/* we have no other options - drop the message */
audit_log_lost("kauditd hold queue overflow");
kfree_skb(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 27 | 40.30% | 1 | 16.67% |
Eric Paris | 18 | 26.87% | 2 | 33.33% |
David Woodhouse | 12 | 17.91% | 1 | 16.67% |
Richard Guy Briggs | 10 | 14.93% | 2 | 33.33% |
Total | 67 | 100.00% | 6 | 100.00% |
/**
* kauditd_retry_skb - Queue an audit record, attempt to send again to auditd
* @skb: audit record
*
* Description:
* Not as serious as kauditd_hold_skb() as we still have a connected auditd,
* but for some reason we are having problems sending it audit records so
* queue the given record and attempt to resend.
*/
static void kauditd_retry_skb(struct sk_buff *skb)
{
/* NOTE: because records should only live in the retry queue for a
* short period of time, before either being sent or moved to the hold
* queue, we don't currently enforce a limit on this queue */
skb_queue_tail(&audit_retry_queue, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Guy Briggs | 13 | 65.00% | 1 | 33.33% |
Paul Moore | 6 | 30.00% | 1 | 33.33% |
Eric Paris | 1 | 5.00% | 1 | 33.33% |
Total | 20 | 100.00% | 3 | 100.00% |
/**
* auditd_reset - Disconnect the auditd connection
* @ac: auditd connection state
*
* Description:
* Break the auditd/kauditd connection and move all the queued records into the
* hold queue in case auditd reconnects. It is important to note that the @ac
* pointer should never be dereferenced inside this function as it may be NULL
* or invalid, you can only compare the memory address! If @ac is NULL then
* the connection will always be reset.
*/
static void auditd_reset(const struct auditd_connection *ac)
{
unsigned long flags;
struct sk_buff *skb;
struct auditd_connection *ac_old;
/* if it isn't already broken, break the connection */
spin_lock_irqsave(&auditd_conn_lock, flags);
ac_old = rcu_dereference_protected(auditd_conn,
lockdep_is_held(&auditd_conn_lock));
if (ac && ac != ac_old) {
/* someone already registered a new auditd connection */
spin_unlock_irqrestore(&auditd_conn_lock, flags);
return;
}
rcu_assign_pointer(auditd_conn, NULL);
spin_unlock_irqrestore(&auditd_conn_lock, flags);
if (ac_old)
call_rcu(&ac_old->rcu, auditd_conn_free);
/* flush the retry queue to the hold queue, but don't touch the main
* queue since we need to process that normally for multicast */
while ((skb = skb_dequeue(&audit_retry_queue)))
kauditd_hold_skb(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 115 | 100.00% | 4 | 100.00% |
Total | 115 | 100.00% | 4 | 100.00% |
/**
* auditd_send_unicast_skb - Send a record via unicast to auditd
* @skb: audit record
*
* Description:
* Send a skb to the audit daemon, returns positive/zero values on success and
* negative values on failure; in all cases the skb will be consumed by this
* function. If the send results in -ECONNREFUSED the connection with auditd
* will be reset. This function may sleep so callers should not hold any locks
* where this would cause a problem.
*/
static int auditd_send_unicast_skb(struct sk_buff *skb)
{
int rc;
u32 portid;
struct net *net;
struct sock *sk;
struct auditd_connection *ac;
/* NOTE: we can't call netlink_unicast while in the RCU section so
* take a reference to the network namespace and grab local
* copies of the namespace, the sock, and the portid; the
* namespace and sock aren't going to go away while we hold a
* reference and if the portid does become invalid after the RCU
* section netlink_unicast() should safely return an error */
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
if (!ac) {
rcu_read_unlock();
kfree_skb(skb);
rc = -ECONNREFUSED;
goto err;
}
net = get_net(ac->net);
sk = audit_get_sk(net);
portid = ac->portid;
rcu_read_unlock();
rc = netlink_unicast(sk, skb, portid, 0);
put_net(net);
if (rc < 0)
goto err;
return rc;
err:
if (ac && rc == -ECONNREFUSED)
auditd_reset(ac);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 116 | 82.86% | 4 | 36.36% |
Richard Guy Briggs | 18 | 12.86% | 5 | 45.45% |
Shu Wang | 5 | 3.57% | 1 | 9.09% |
David Woodhouse | 1 | 0.71% | 1 | 9.09% |
Total | 140 | 100.00% | 11 | 100.00% |
/**
* kauditd_send_queue - Helper for kauditd_thread to flush skb queues
* @sk: the sending sock
* @portid: the netlink destination
* @queue: the skb queue to process
* @retry_limit: limit on number of netlink unicast failures
* @skb_hook: per-skb hook for additional processing
* @err_hook: hook called if the skb fails the netlink unicast send
*
* Description:
* Run through the given queue and attempt to send the audit records to auditd,
* returns zero on success, negative values on failure. It is up to the caller
* to ensure that the @sk is valid for the duration of this function.
*
*/
static int kauditd_send_queue(struct sock *sk, u32 portid,
struct sk_buff_head *queue,
unsigned int retry_limit,
void (*skb_hook)(struct sk_buff *skb),
void (*err_hook)(struct sk_buff *skb))
{
int rc = 0;
struct sk_buff *skb;
static unsigned int failed = 0;
/* NOTE: kauditd_thread takes care of all our locking, we just use
* the netlink info passed to us (e.g. sk and portid) */
while ((skb = skb_dequeue(queue))) {
/* call the skb_hook for each skb we touch */
if (skb_hook)
(*skb_hook)(skb);
/* can we send to anyone via unicast? */
if (!sk) {
if (err_hook)
(*err_hook)(skb);
continue;
}
/* grab an extra skb reference in case of error */
skb_get(skb);
rc = netlink_unicast(sk, skb, portid, 0);
if (rc < 0) {
/* fatal failure for our queue flush attempt? */
if (++failed >= retry_limit ||
rc == -ECONNREFUSED || rc == -EPERM) {
/* yes - error processing for the queue */
sk = NULL;
if (err_hook)
(*err_hook)(skb);
if (!skb_hook)
goto out;
/* keep processing with the skb_hook */
continue;
} else
/* no - requeue to preserve ordering */
skb_queue_head(queue, skb);
} else {
/* it worked - drop the extra reference and continue */
consume_skb(skb);
failed = 0;
}
}
out:
return (rc >= 0 ? 0 : rc);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 211 | 95.48% | 3 | 60.00% |
Eric Paris | 9 | 4.07% | 1 | 20.00% |
Neil Horman | 1 | 0.45% | 1 | 20.00% |
Total | 221 | 100.00% | 5 | 100.00% |
/*
* kauditd_send_multicast_skb - Send a record to any multicast listeners
* @skb: audit record
*
* Description:
* Write a multicast message to anyone listening in the initial network
* namespace. This function doesn't consume an skb as might be expected since
* it has to copy it anyways.
*/
static void kauditd_send_multicast_skb(struct sk_buff *skb)
{
struct sk_buff *copy;
struct sock *sock = audit_get_sk(&init_net);
struct nlmsghdr *nlh;
/* NOTE: we are not taking an additional reference for init_net since
* we don't have to worry about it going away */
if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
return;
/*
* The seemingly wasteful skb_copy() rather than bumping the refcount
* using skb_get() is necessary because non-standard mods are made to
* the skb by the original kaudit unicast socket send routine. The
* existing auditd daemon assumes this breakage. Fixing this would
* require co-ordinating a change in the established protocol between
* the kaudit kernel subsystem and the auditd userspace code. There is
* no reason for new multicast clients to continue with this
* non-compliance.
*/
copy = skb_copy(skb, GFP_KERNEL);
if (!copy)
return;
nlh = nlmsg_hdr(copy);
nlh->nlmsg_len = skb->len;
nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Guy Briggs | 63 | 71.59% | 2 | 50.00% |
Paul Moore | 25 | 28.41% | 2 | 50.00% |
Total | 88 | 100.00% | 4 | 100.00% |
/**
* kauditd_thread - Worker thread to send audit records to userspace
* @dummy: unused
*/
static int kauditd_thread(void *dummy)
{
int rc;
u32 portid = 0;
struct net *net = NULL;
struct sock *sk = NULL;
struct auditd_connection *ac;
#define UNICAST_RETRIES 5
set_freezable();
while (!kthread_should_stop()) {
/* NOTE: see the lock comments in auditd_send_unicast_skb() */
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
if (!ac) {
rcu_read_unlock();
goto main_queue;
}
net = get_net(ac->net);
sk = audit_get_sk(net);
portid = ac->portid;
rcu_read_unlock();
/* attempt to flush the hold queue */
rc = kauditd_send_queue(sk, portid,
&audit_hold_queue, UNICAST_RETRIES,
NULL, kauditd_rehold_skb);
if (ac && rc < 0) {
sk = NULL;
auditd_reset(ac);
goto main_queue;
}
/* attempt to flush the retry queue */
rc = kauditd_send_queue(sk, portid,
&audit_retry_queue, UNICAST_RETRIES,
NULL, kauditd_hold_skb);
if (ac && rc < 0) {
sk = NULL;
auditd_reset(ac);
goto main_queue;
}
main_queue:
/* process the main queue - do the multicast send and attempt
* unicast, dump failed record sends to the retry queue; if
* sk == NULL due to previous failures we will just do the
* multicast send and move the record to the hold queue */
rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
kauditd_send_multicast_skb,
(sk ?
kauditd_retry_skb : kauditd_hold_skb));
if (ac && rc < 0)
auditd_reset(ac);
sk = NULL;
/* drop our netns reference, no auditd sends past this line */
if (net) {
put_net(net);
net = NULL;
}
/* we have processed all the queues so wake everyone */
wake_up(&audit_backlog_wait);
/* NOTE: we want to wake up if there is anything on the queue,
* regardless of if an auditd is connected, as we need to
* do the multicast send and rotate records from the
* main queue to the retry/hold queues */
wait_event_freezable(kauditd_wait,
(skb_queue_len(&audit_queue) ? 1 : 0));
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Moore | 219 | 80.81% | 6 | 46.15% |
Eric Paris | 23 | 8.49% | 1 | 7.69% |
Richard Guy Briggs | 10 | 3.69% | 2 | 15.38% |
Dan Duval | 8 | 2.95% | 1 | 7.69% |
David Woodhouse | 7 | 2.58% | 1 | 7.69% |
Andrew Morton | 3 | 1.11% | 1 | 7.69% |
Peter Zijlstra | 1 | 0.37% | 1 | 7.69% |
Total | 271 | 100.00% | 13 | 100.00% |
int audit_send_list(void *_dest)
{
struct audit_netlink_list *dest = _dest;
struct sk_buff *skb;
struct sock *sk = audit_get_sk(dest->net);
/* wait for parent to finish and send an ACK */
mutex_lock(&audit_cmd_mutex);
mutex_unlock(&audit_cmd_mutex);
while ((skb = __skb_dequeue(&dest->q)) != NULL)
netlink_unicast(sk, skb, dest->portid, 0);
put_net(dest->net);
kfree(dest);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 63 | 70.00% | 1 | 14.29% |
Richard Guy Briggs | 11 | 12.22% | 2 | 28.57% |
Paul Moore | 7 | 7.78% | 1 | 14.29% |
Eric W. Biedermann | 6 | 6.67% | 1 | 14.29% |
Amy Griffis | 2 | 2.22% | 1 | 14.29% |
Andrew Morton | 1 | 1.11% | 1 | 14.29% |
Total | 90 | 100.00% | 7 | 100.00% |
struct sk_buff *audit_make_reply(int seq, int type, int done,
int multi, const void *payload, int size)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
void *data;
int flags = multi ? NLM_F_MULTI : 0;
int t = done ? NLMSG_DONE : type;
skb = nlmsg_new(size, GFP_KERNEL);
if (!skb)
return NULL;
nlh = nlmsg_put(skb, 0, seq, t, size, flags);
if (!nlh)
goto out_kfree_skb;
data = nlmsg_data(nlh);
memcpy(data, payload, size);
return skb;
out_kfree_skb:
kfree_skb(skb);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 94 | 72.31% | 1 | 14.29% |
Al Viro | 18 | 13.85% | 1 | 14.29% |
David S. Miller | 11 | 8.46% | 1 | 14.29% |
Eric Paris | 4 | 3.08% | 1 | 14.29% |
David Woodhouse | 1 | 0.77% | 1 | 14.29% |
Stephen Hemminger | 1 | 0.77% | 1 | 14.29% |
Paul Moore | 1 | 0.77% | 1 | 14.29% |
Total | 130 | 100.00% | 7 | 100.00% |
static int audit_send_reply_thread(void *arg)
{
struct audit_reply *reply = (struct audit_reply *)arg;
struct sock *sk = audit_get_sk(reply->net);
mutex_lock(&audit_cmd_mutex);
mutex_unlock(&audit_cmd_mutex);
/* Ignore failure. It'll only happen if the sender goes away,
because our timeout is set to infinite. */
netlink_unicast(sk, reply->skb, reply->portid, 0);
put_net(reply->net);
kfree(reply);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Paris | 56 | 72.73% | 1 | 20.00% |
Richard Guy Briggs | 8 | 10.39% | 2 | 40.00% |
Paul Moore | 7 | 9.09% | 1 | 20.00% |
Eric W. Biedermann | 6 | 7.79% | 1 | 20.00% |
Total | 77 | 100.00% | |