Release 4.12 include/linux/uaccess.h
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/kasan-checks.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
#include <asm/uaccess.h>
/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and
* __copy_{to,from}_user{,_inatomic}().
*
* raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
* return the amount left to copy. They should assume that access_ok() has
* already been checked (and succeeded); they should *not* zero-pad anything.
* No KASAN or object size checks either - those belong here.
*
* Both of these functions should attempt to copy size bytes starting at from
* into the area starting at to. They must not fetch or store anything
* outside of those areas. Return value must be between 0 (everything
* copied successfully) and size (nothing copied).
*
* If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
* at to must become equal to the bytes fetched from the corresponding area
* starting at from. All data past to + size - N must be left unmodified.
*
* If copying succeeds, the return value must be 0. If some data cannot be
* fetched, it is permitted to copy less than had been fetched; the only
* hard requirement is that not storing anything at all (i.e. returning size)
* should happen only when nothing could be copied. In other words, you don't
* have to squeeze as much as possible - it is allowed, but not necessary.
*
* For raw_copy_from_user() to always points to kernel memory and no faults
* on store should happen. Interpretation of from is affected by set_fs().
* For raw_copy_to_user() it's the other way round.
*
* Both can be inlined - it's up to architectures whether it wants to bother
* with that. They should not be used directly; they are used to implement
* the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
* that are used instead. Out of those, __... ones are inlined. Plain
* copy_{to,from}_user() might or might not be inlined. If you want them
* inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
*
* NOTE: only copy_from_user() zero-pads the destination in case of short copy.
* Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
* at all; their callers absolutely must check the return value.
*
* Biarch ones should also provide raw_copy_in_user() - similar to the above,
* but both source and destination are __user pointers (affected by set_fs()
* as usual) and both source and destination can trigger faults.
*/
static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 48 | 100.00% | 1 | 100.00% |
Total | 48 | 100.00% | 1 | 100.00% |
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
kasan_check_write(to, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
/**
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
static __always_inline unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 48 | 100.00% | 1 | 100.00% |
Total | 48 | 100.00% | 1 | 100.00% |
static __always_inline unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
kasan_check_read(from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
#ifdef INLINE_COPY_FROM_USER
static inline unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
res = raw_copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 78 | 100.00% | 1 | 100.00% |
Total | 78 | 100.00% | 1 | 100.00% |
#else
extern unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
#endif
#ifdef INLINE_COPY_TO_USER
static inline unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
n = raw_copy_to_user(to, from, n);
return n;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 47 | 100.00% | 1 | 100.00% |
Total | 47 | 100.00% | 1 | 100.00% |
#else
extern unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif
extern void __compiletime_error("usercopy buffer size is too small")
__bad_copy_user(void);
static inline void copy_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
int sz = __compiletime_object_size(to);
might_fault();
kasan_check_write(to, n);
if (likely(sz < 0 || sz >= n)) {
check_object_size(to, n, false);
n = _copy_from_user(to, from, n);
} else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 99 | 100.00% | 1 | 100.00% |
Total | 99 | 100.00% | 1 | 100.00% |
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
kasan_check_read(from, n);
might_fault();
if (likely(sz < 0 || sz >= n)) {
check_object_size(from, n, true);
n = _copy_to_user(to, from, n);
} else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 99 | 100.00% | 1 | 100.00% |
Total | 99 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_COMPAT
static __always_inline unsigned long __must_check
__copy_in_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
return raw_copy_in_user(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
static __always_inline unsigned long __must_check
copy_in_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
n = raw_copy_in_user(to, from, n);
return n;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 60 | 100.00% | 1 | 100.00% |
Total | 60 | 100.00% | 1 | 100.00% |
#endif
static __always_inline void pagefault_disabled_inc(void)
{
current->pagefault_disabled++;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Hildenbrand | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
static __always_inline void pagefault_disabled_dec(void)
{
current->pagefault_disabled--;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Hildenbrand | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
/*
* These routines enable/disable the pagefault handler. If disabled, it will
* not take any locks and go straight to the fixup table.
*
* User access methods will not sleep when called from a pagefault_disabled()
* environment.
*/
static inline void pagefault_disable(void)
{
pagefault_disabled_inc();
/*
* make sure to have issued the store before a pagefault
* can hit.
*/
barrier();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 13 | 81.25% | 1 | 50.00% |
David Hildenbrand | 3 | 18.75% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static inline void pagefault_enable(void)
{
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
pagefault_disabled_dec();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 13 | 81.25% | 1 | 50.00% |
David Hildenbrand | 3 | 18.75% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
/*
* Is the pagefault handler disabled? If so, user access methods will not sleep.
*/
#define pagefault_disabled() (current->pagefault_disabled != 0)
/*
* The pagefault handler is in general disabled by pagefault_disable() or
* when in irq context (via in_atomic()).
*
* This function should only be used by the fault handlers. Other users should
* stick to pagefault_disabled().
* Please NEVER use preempt_disable() to disable the fault handler. With
* !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
* in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
*/
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hiro Yoshioka | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
#endif /* ARCH_HAS_NOCACHE_UACCESS */
/*
* probe_kernel_read(): safely attempt to read from a location
* @dst: pointer to the buffer that shall take the data
* @src: address to read from
* @size: size of the data chunk
*
* Safely read from address @src to the buffer at @dst. If a kernel fault
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, const void *src, size_t size);
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
/*
* probe_kernel_write(): safely attempt to write to a location
* @dst: address to write to
* @src: pointer to the data that shall be written
* @size: size of the data chunk
*
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
/**
* probe_kernel_address(): safely attempt to read from a location
* @addr: address to read from
* @retval: read into this variable
*
* Returns 0 on success, or -EFAULT.
*/
#define probe_kernel_address(addr, retval) \
probe_kernel_read(&retval, addr, sizeof(retval))
#ifndef user_access_begin
#define user_access_begin() do { } while (0)
#define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
#endif
#endif /* __LINUX_UACCESS_H__ */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 727 | 73.43% | 5 | 29.41% |
Hiro Yoshioka | 50 | 5.05% | 1 | 5.88% |
David Hildenbrand | 50 | 5.05% | 3 | 17.65% |
Linus Torvalds | 37 | 3.74% | 2 | 11.76% |
Ingo Molnar | 34 | 3.43% | 1 | 5.88% |
Jason Wessel | 34 | 3.43% | 1 | 5.88% |
Peter Zijlstra | 26 | 2.63% | 1 | 5.88% |
Alexei Starovoitov | 17 | 1.72% | 1 | 5.88% |
Andrew Morton | 11 | 1.11% | 1 | 5.88% |
Steven Rostedt | 4 | 0.40% | 1 | 5.88% |
Total | 990 | 100.00% | 17 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.