Release 4.11 arch/x86/include/asm/uaccess_32.h
#ifndef _ASM_X86_UACCESS_32_H
#define _ASM_X86_UACCESS_32_H
/*
* User space memory access functions
*/
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/string.h>
#include <asm/asm.h>
#include <asm/page.h>
unsigned long __must_check __copy_to_user_ll
(void __user *to, const void *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll
(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nozero
(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache
(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache_nozero
(void *to, const void __user *from, unsigned long n);
/**
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
check_object_size(from, n, true);
return __copy_to_user_ll(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 16 | 38.10% | 1 | 14.29% |
Andrew Morton | 14 | 33.33% | 2 | 28.57% |
Kees Cook | 9 | 21.43% | 1 | 14.29% |
Ingo Molnar | 2 | 4.76% | 2 | 28.57% |
Linus Torvalds | 1 | 2.38% | 1 | 14.29% |
Total | 42 | 100.00% | 7 | 100.00% |
/**
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
return __copy_to_user_inatomic(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 34 | 94.44% | 2 | 50.00% |
Andrew Morton | 1 | 2.78% | 1 | 25.00% |
Nicholas Piggin | 1 | 2.78% | 1 | 25.00% |
Total | 36 | 100.00% | 4 | 100.00% |
static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
return __copy_from_user_ll_nozero(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 16 | 50.00% | 1 | 16.67% |
Andrew Morton | 12 | 37.50% | 1 | 16.67% |
Ingo Molnar | 2 | 6.25% | 2 | 33.33% |
Neil Brown | 1 | 3.12% | 1 | 16.67% |
Linus Torvalds | 1 | 3.12% | 1 | 16.67% |
Total | 32 | 100.00% | 6 | 100.00% |
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*
* An alternate version - __copy_from_user_inatomic() - may be called from
* atomic context and will fail rather than sleep. In this case the
* uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
* for explanation of why this is needed.
*/
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
check_object_size(to, n, false);
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
__uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
__uaccess_end();
return ret;
case 2:
__uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
__uaccess_end();
return ret;
case 4:
__uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
__uaccess_end();
return ret;
}
}
return __copy_from_user_ll(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hiro Yoshioka | 121 | 79.08% | 1 | 20.00% |
Linus Torvalds | 18 | 11.76% | 1 | 20.00% |
Kees Cook | 9 | 5.88% | 1 | 20.00% |
Neil Brown | 4 | 2.61% | 1 | 20.00% |
Nicholas Piggin | 1 | 0.65% | 1 | 20.00% |
Total | 153 | 100.00% | 5 | 100.00% |
static __always_inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
might_fault();
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
__uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
__uaccess_end();
return ret;
case 2:
__uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
__uaccess_end();
return ret;
case 4:
__uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
__uaccess_end();
return ret;
}
}
return __copy_from_user_ll_nocache(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Brown | 93 | 64.58% | 1 | 20.00% |
Ingo Molnar | 32 | 22.22% | 2 | 40.00% |
Linus Torvalds | 18 | 12.50% | 1 | 20.00% |
Nicholas Piggin | 1 | 0.69% | 1 | 20.00% |
Total | 144 | 100.00% | 5 | 100.00% |
static __always_inline unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
unsigned long n)
{
return __copy_from_user_ll_nocache_nozero(to, from, n);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hiro Yoshioka | 30 | 93.75% | 1 | 50.00% |
Neil Brown | 2 | 6.25% | 1 | 50.00% |
Total | 32 | 100.00% | 2 | 100.00% |
#endif /* _ASM_X86_UACCESS_32_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hiro Yoshioka | 171 | 30.16% | 1 | 4.55% |
Neil Brown | 140 | 24.69% | 1 | 4.55% |
Linus Torvalds (pre-git) | 77 | 13.58% | 4 | 18.18% |
Ingo Molnar | 70 | 12.35% | 2 | 9.09% |
Linus Torvalds | 41 | 7.23% | 3 | 13.64% |
Andrew Morton | 35 | 6.17% | 4 | 18.18% |
Kees Cook | 18 | 3.17% | 1 | 4.55% |
H. Peter Anvin | 6 | 1.06% | 2 | 9.09% |
Dave Jones | 3 | 0.53% | 1 | 4.55% |
Nicholas Piggin | 3 | 0.53% | 1 | 4.55% |
David Hildenbrand | 2 | 0.35% | 1 | 4.55% |
Arnaldo Carvalho de Melo | 1 | 0.18% | 1 | 4.55% |
Total | 567 | 100.00% | 22 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.