Release 4.14 arch/x86/lib/usercopy_64.c
/*
* User address space access functions.
*
* Copyright 1997 Andi Kleen <ak@muc.de>
* Copyright 1997 Linus Torvalds
* Copyright 2002 Andi Kleen <ak@suse.de>
*/
#include <linux/export.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
/*
* Zero Userspace
*/
unsigned long __clear_user(void __user *addr, unsigned long size)
{
long __d0;
might_fault();
/* no memory constraint because it doesn't change any memory gcc knows
about */
stac();
asm volatile(
" testq %[size8],%[size8]\n"
" jz 4f\n"
"0: movq %[zero],(%[dst])\n"
" addq %[eight],%[dst]\n"
" decl %%ecx ; jnz 0b\n"
"4: movq %[size1],%%rcx\n"
" testl %%ecx,%%ecx\n"
" jz 2f\n"
"1: movb %b[zero],(%[dst])\n"
" incq %[dst]\n"
" decl %%ecx ; jnz 1b\n"
"2:\n"
".section .fixup,\"ax\"\n"
"3: lea 0(%[size1],%[size8],8),%[size8]\n"
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(0b,3b)
_ASM_EXTABLE(1b,2b)
: [size8] "=&c"(size), [dst] "=&D" (__d0)
: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
[zero] "r" (0UL), [eight] "r" (8UL));
clac();
return size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 26 | 76.47% | 4 | 57.14% |
H. Peter Anvin | 6 | 17.65% | 1 | 14.29% |
Nicholas Piggin | 1 | 2.94% | 1 | 14.29% |
Al Viro | 1 | 2.94% | 1 | 14.29% |
Total | 34 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(__clear_user);
unsigned long clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
return __clear_user(to, n);
return n;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 36 | 97.30% | 2 | 66.67% |
Al Viro | 1 | 2.70% | 1 | 33.33% |
Total | 37 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(clear_user);
/*
* Try to copy last bytes and clear the rest if needed.
* Since protection fault in copy_from/to_user is not a normal situation,
* it is not necessary to optimize tail handling.
*/
__visible unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len)
{
for (; len; --len, to++) {
char c;
if (__get_user_nocheck(c, from++, sizeof(char)))
break;
if (__put_user_nocheck(c, to, sizeof(char)))
break;
}
clac();
return len;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vitaly Mayatskikh | 61 | 85.92% | 1 | 25.00% |
Linus Torvalds | 6 | 8.45% | 1 | 25.00% |
CQ Tang | 3 | 4.23% | 1 | 25.00% |
Andi Kleen | 1 | 1.41% | 1 | 25.00% |
Total | 71 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/**
* clean_cache_range - write back a cache range with CLWB
* @vaddr: virtual start address
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
* instruction. Note that @size is internally rounded up to be cache
* line size aligned.
*/
static void clean_cache_range(void *addr, size_t size)
{
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
unsigned long clflush_mask = x86_clflush_size - 1;
void *vend = addr + size;
void *p;
for (p = (void *)((unsigned long)addr & ~clflush_mask);
p < vend; p += x86_clflush_size)
clwb(p);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 72 | 100.00% | 1 | 100.00% |
Total | 72 | 100.00% | 1 | 100.00% |
void arch_wb_cache_pmem(void *addr, size_t size)
{
clean_cache_range(addr, size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
{
unsigned long flushed, dest = (unsigned long) dst;
long rc = __copy_user_nocache(dst, src, size, 0);
/*
* __copy_user_nocache() uses non-temporal stores for the bulk
* of the transfer, but we need to manually flush if the
* transfer is unaligned. A cached memory copy is used when
* destination or size is not naturally aligned. That is:
* - Require 8-byte alignment when size is 8 bytes or larger.
* - Require 4-byte alignment when size is 4 bytes.
*/
if (size < 8) {
if (!IS_ALIGNED(dest, 4) || size != 4)
clean_cache_range(dst, 1);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
clean_cache_range(dst, 1);
}
flushed = dest - (unsigned long) dst;
if (size > flushed && !IS_ALIGNED(size - flushed, 8))
clean_cache_range(dst + size - 1, 1);
}
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 147 | 100.00% | 1 | 100.00% |
Total | 147 | 100.00% | 1 | 100.00% |
void memcpy_flushcache(void *_dst, const void *_src, size_t size)
{
unsigned long dest = (unsigned long) _dst;
unsigned long source = (unsigned long) _src;
/* cache copy and flush to align dest */
if (!IS_ALIGNED(dest, 8)) {
unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
memcpy((void *) dest, (void *) source, len);
clean_cache_range((void *) dest, len);
dest += len;
source += len;
size -= len;
if (!size)
return;
}
/* 4x8 movnti loop */
while (size >= 32) {
asm("movq (%0), %%r8\n"
"movq 8(%0), %%r9\n"
"movq 16(%0), %%r10\n"
"movq 24(%0), %%r11\n"
"movnti %%r8, (%1)\n"
"movnti %%r9, 8(%1)\n"
"movnti %%r10, 16(%1)\n"
"movnti %%r11, 24(%1)\n"
:: "r" (source), "r" (dest)
: "memory", "r8", "r9", "r10", "r11");
dest += 32;
source += 32;
size -= 32;
}
/* 1x8 movnti loop */
while (size >= 8) {
asm("movq (%0), %%r8\n"
"movnti %%r8, (%1)\n"
:: "r" (source), "r" (dest)
: "memory", "r8");
dest += 8;
source += 8;
size -= 8;
}
/* 1x4 movnti loop */
while (size >= 4) {
asm("movl (%0), %%r8d\n"
"movnti %%r8d, (%1)\n"
:: "r" (source), "r" (dest)
: "memory", "r8");
dest += 4;
source += 4;
size -= 4;
}
/* cache copy for remaining bytes */
if (size) {
memcpy((void *) dest, (void *) source, size);
clean_cache_range((void *) dest, size);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 216 | 100.00% | 1 | 100.00% |
Total | 216 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(memcpy_flushcache);
void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
size_t len)
{
char *from = kmap_atomic(page);
memcpy_flushcache(to, from + offset, len);
kunmap_atomic(from);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 45 | 100.00% | 1 | 100.00% |
Total | 45 | 100.00% | 1 | 100.00% |
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 518 | 76.29% | 2 | 12.50% |
Andi Kleen | 79 | 11.63% | 6 | 37.50% |
Vitaly Mayatskikh | 62 | 9.13% | 1 | 6.25% |
Linus Torvalds | 6 | 0.88% | 1 | 6.25% |
H. Peter Anvin | 6 | 0.88% | 1 | 6.25% |
CQ Tang | 3 | 0.44% | 1 | 6.25% |
Al Viro | 2 | 0.29% | 1 | 6.25% |
Andrew Lutomirski | 1 | 0.15% | 1 | 6.25% |
Nicholas Piggin | 1 | 0.15% | 1 | 6.25% |
Paul Gortmaker | 1 | 0.15% | 1 | 6.25% |
Total | 679 | 100.00% | 16 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.