Contributors: 18
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Andi Kleen |
232 |
39.93% |
8 |
19.05% |
Linus Torvalds |
170 |
29.26% |
12 |
28.57% |
Kirill A. Shutemov |
64 |
11.02% |
3 |
7.14% |
Al Viro |
27 |
4.65% |
3 |
7.14% |
Andrey Ryabinin |
17 |
2.93% |
1 |
2.38% |
Borislav Petkov |
16 |
2.75% |
2 |
4.76% |
Jan Beulich |
13 |
2.24% |
1 |
2.38% |
Dan J Williams |
10 |
1.72% |
1 |
2.38% |
H. Peter Anvin |
6 |
1.03% |
2 |
4.76% |
Ross Zwisler |
5 |
0.86% |
1 |
2.38% |
Gerd Hoffmann |
5 |
0.86% |
1 |
2.38% |
Uros Bizjak |
4 |
0.69% |
1 |
2.38% |
Nicholas Piggin |
3 |
0.52% |
1 |
2.38% |
Frédéric Weisbecker |
3 |
0.52% |
1 |
2.38% |
Thomas Gleixner |
2 |
0.34% |
1 |
2.38% |
Ingo Molnar |
2 |
0.34% |
1 |
2.38% |
Greg Kroah-Hartman |
1 |
0.17% |
1 |
2.38% |
Mateusz Guzik |
1 |
0.17% |
1 |
2.38% |
Total |
581 |
|
42 |
|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_UACCESS_64_H
#define _ASM_X86_UACCESS_64_H
/*
* User space memory access functions
*/
#include <linux/compiler.h>
#include <linux/lockdep.h>
#include <linux/kasan-checks.h>
#include <asm/alternative.h>
#include <asm/cpufeatures.h>
#include <asm/page.h>
#include <asm/percpu.h>
#include <asm/runtime-const.h>
/*
* Virtual variable: there's no actual backing store for this,
* it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)'
*/
extern unsigned long USER_PTR_MAX;
#ifdef CONFIG_ADDRESS_MASKING
/*
* Mask out tag bits from the address.
*/
static inline unsigned long __untagged_addr(unsigned long addr)
{
asm (ALTERNATIVE("",
"and " __percpu_arg([mask]) ", %[addr]", X86_FEATURE_LAM)
: [addr] "+r" (addr)
: [mask] "m" (__my_cpu_var(tlbstate_untag_mask)));
return addr;
}
#define untagged_addr(addr) ({ \
unsigned long __addr = (__force unsigned long)(addr); \
(__force __typeof__(addr))__untagged_addr(__addr); \
})
static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
unsigned long addr)
{
mmap_assert_locked(mm);
return addr & (mm)->context.untag_mask;
}
#define untagged_addr_remote(mm, addr) ({ \
unsigned long __addr = (__force unsigned long)(addr); \
(__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
})
#endif
#define valid_user_address(x) \
((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX))
/*
* Masking the user address is an alternative to a conditional
* user_access_begin that can avoid the fencing. This only works
* for dense accesses starting at the address.
*/
static inline void __user *mask_user_address(const void __user *ptr)
{
unsigned long mask;
asm("cmp %1,%0\n\t"
"sbb %0,%0"
:"=r" (mask)
:"r" (ptr),
"0" (runtime_const_ptr(USER_PTR_MAX)));
return (__force void __user *)(mask | (__force unsigned long)ptr);
}
#define masked_user_access_begin(x) ({ \
__auto_type __masked_ptr = (x); \
__masked_ptr = mask_user_address(__masked_ptr); \
__uaccess_begin(); __masked_ptr; })
/*
* User pointers can have tag bits on x86-64. This scheme tolerates
* arbitrary values in those bits rather then masking them off.
*
* Enforce two rules:
* 1. 'ptr' must be in the user part of the address space
* 2. 'ptr+size' must not overflow into kernel addresses
*
* Note that we always have at least one guard page between the
* max user address and the non-canonical gap, allowing us to
* ignore small sizes entirely.
*
* In fact, we could probably remove the size check entirely, since
* any kernel accesses will be in increasing address order starting
* at 'ptr'.
*
* That's a separate optimization, for now just handle the small
* constant case.
*/
static inline bool __access_ok(const void __user *ptr, unsigned long size)
{
if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
return valid_user_address(ptr);
} else {
unsigned long sum = size + (__force unsigned long)ptr;
return valid_user_address(sum) && sum >= (__force unsigned long)ptr;
}
}
#define __access_ok __access_ok
/*
* Copy To/From Userspace
*/
/* Handles exceptions in both to and from, but doesn't do access_ok */
__must_check unsigned long
rep_movs_alternative(void *to, const void *from, unsigned len);
static __always_inline __must_check unsigned long
copy_user_generic(void *to, const void *from, unsigned long len)
{
stac();
/*
* If CPU has FSRM feature, use 'rep movs'.
* Otherwise, use rep_movs_alternative.
*/
asm volatile(
"1:\n\t"
ALTERNATIVE("rep movsb",
"call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
"2:\n"
_ASM_EXTABLE_UA(1b, 2b)
:"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
: : "memory", "rax");
clac();
return len;
}
static __always_inline __must_check unsigned long
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
{
return copy_user_generic(dst, (__force void *)src, size);
}
static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
{
return copy_user_generic((__force void *)dst, src, size);
}
extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size);
extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size)
{
long ret;
kasan_check_write(dst, size);
stac();
ret = __copy_user_nocache(dst, src, size);
clac();
return ret;
}
static inline int
__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
{
kasan_check_write(dst, size);
return __copy_user_flushcache(dst, src, size);
}
/*
* Zero Userspace.
*/
__must_check unsigned long
rep_stos_alternative(void __user *addr, unsigned long len);
static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
{
might_fault();
stac();
/*
* No memory constraint because it doesn't change any memory gcc
* knows about.
*/
asm volatile(
"1:\n\t"
ALTERNATIVE("rep stosb",
"call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS))
"2:\n"
_ASM_EXTABLE_UA(1b, 2b)
: "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
: "a" (0));
clac();
return size;
}
static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
{
if (__access_ok(to, n))
return __clear_user(to, n);
return n;
}
#endif /* _ASM_X86_UACCESS_64_H */