Release 4.7 include/linux/uaccess.h
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
#include <linux/sched.h>
#include <asm/uaccess.h>
static __always_inline void pagefault_disabled_inc(void)
{
current->pagefault_disabled++;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david hildenbrand | david hildenbrand | 14 | 100.00% | 1 | 100.00% |
| Total | 14 | 100.00% | 1 | 100.00% |
static __always_inline void pagefault_disabled_dec(void)
{
current->pagefault_disabled--;
WARN_ON(current->pagefault_disabled < 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david hildenbrand | david hildenbrand | 23 | 100.00% | 1 | 100.00% |
| Total | 23 | 100.00% | 1 | 100.00% |
/*
* These routines enable/disable the pagefault handler. If disabled, it will
* not take any locks and go straight to the fixup table.
*
* User access methods will not sleep when called from a pagefault_disabled()
* environment.
*/
static inline void pagefault_disable(void)
{
pagefault_disabled_inc();
/*
* make sure to have issued the store before a pagefault
* can hit.
*/
barrier();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
peter zijlstra | peter zijlstra | 13 | 81.25% | 1 | 50.00% |
david hildenbrand | david hildenbrand | 3 | 18.75% | 1 | 50.00% |
| Total | 16 | 100.00% | 2 | 100.00% |
static inline void pagefault_enable(void)
{
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
pagefault_disabled_dec();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
peter zijlstra | peter zijlstra | 13 | 81.25% | 1 | 50.00% |
david hildenbrand | david hildenbrand | 3 | 18.75% | 1 | 50.00% |
| Total | 16 | 100.00% | 2 | 100.00% |
/*
* Is the pagefault handler disabled? If so, user access methods will not sleep.
*/
#define pagefault_disabled() (current->pagefault_disabled != 0)
/*
* The pagefault handler is in general disabled by pagefault_disable() or
* when in irq context (via in_atomic()).
*
* This function should only be used by the fault handlers. Other users should
* stick to pagefault_disabled().
* Please NEVER use preempt_disable() to disable the fault handler. With
* !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
* in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
*/
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
hiro yoshioka | hiro yoshioka | 32 | 100.00% | 1 | 100.00% |
| Total | 32 | 100.00% | 1 | 100.00% |
static inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user(to, from, n);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
hiro yoshioka | hiro yoshioka | 32 | 100.00% | 1 | 100.00% |
| Total | 32 | 100.00% | 1 | 100.00% |
#endif /* ARCH_HAS_NOCACHE_UACCESS */
/*
* probe_kernel_read(): safely attempt to read from a location
* @dst: pointer to the buffer that shall take the data
* @src: address to read from
* @size: size of the data chunk
*
* Safely read from address @src to the buffer at @dst. If a kernel fault
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, const void *src, size_t size);
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
/*
* probe_kernel_write(): safely attempt to write to a location
* @dst: address to write to
* @src: pointer to the data that shall be written
* @size: size of the data chunk
*
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
/**
* probe_kernel_address(): safely attempt to read from a location
* @addr: address to read from
* @retval: read into this variable
*
* Returns 0 on success, or -EFAULT.
*/
#define probe_kernel_address(addr, retval) \
probe_kernel_read(&retval, addr, sizeof(retval))
#ifndef user_access_begin
#define user_access_begin() do { } while (0)
#define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr) __get_user(x, ptr)
#define unsafe_put_user(x, ptr) __put_user(x, ptr)
#endif
#endif /* __LINUX_UACCESS_H__ */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
hiro yoshioka | hiro yoshioka | 82 | 27.33% | 1 | 9.09% |
david hildenbrand | david hildenbrand | 59 | 19.67% | 3 | 27.27% |
jason wessel | jason wessel | 34 | 11.33% | 1 | 9.09% |
ingo molnar | ingo molnar | 34 | 11.33% | 1 | 9.09% |
linus torvalds | linus torvalds | 33 | 11.00% | 1 | 9.09% |
peter zijlstra | peter zijlstra | 26 | 8.67% | 1 | 9.09% |
alexei starovoitov | alexei starovoitov | 17 | 5.67% | 1 | 9.09% |
andrew morton | andrew morton | 11 | 3.67% | 1 | 9.09% |
steven rostedt | steven rostedt | 4 | 1.33% | 1 | 9.09% |
| Total | 300 | 100.00% | 11 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.