Contributors: 10
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Andi Kleen |
198 |
48.53% |
5 |
26.32% |
Matthew Wilcox |
78 |
19.12% |
1 |
5.26% |
Mikulas Patocka |
38 |
9.31% |
1 |
5.26% |
Alexander Potapenko |
26 |
6.37% |
3 |
15.79% |
Tony Luck |
22 |
5.39% |
3 |
15.79% |
Alexey Dobriyan |
21 |
5.15% |
1 |
5.26% |
Dan J Williams |
15 |
3.68% |
2 |
10.53% |
Andrey Ryabinin |
6 |
1.47% |
1 |
5.26% |
H. Peter Anvin |
3 |
0.74% |
1 |
5.26% |
Greg Kroah-Hartman |
1 |
0.25% |
1 |
5.26% |
Total |
408 |
|
19 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_STRING_64_H
#define _ASM_X86_STRING_64_H
#ifdef __KERNEL__
#include <linux/jump_label.h>
/* Written 2002 by Andi Kleen */
/* Even with __builtin_ the compiler may decide to use the out of line
function. */
#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
#include <linux/kmsan_string.h>
#endif
#define __HAVE_ARCH_MEMCPY 1
extern void *memcpy(void *to, const void *from, size_t len);
extern void *__memcpy(void *to, const void *from, size_t len);
#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
void *__memset(void *s, int c, size_t n);
/*
* KMSAN needs to instrument as much code as possible. Use C versions of
* memsetXX() from lib/string.c under KMSAN.
*/
#if !defined(CONFIG_KMSAN)
#define __HAVE_ARCH_MEMSET16
static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
{
const __auto_type s0 = s;
asm volatile (
"rep stosw"
: "+D" (s), "+c" (n)
: "a" (v)
: "memory"
);
return s0;
}
#define __HAVE_ARCH_MEMSET32
static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
{
const __auto_type s0 = s;
asm volatile (
"rep stosl"
: "+D" (s), "+c" (n)
: "a" (v)
: "memory"
);
return s0;
}
#define __HAVE_ARCH_MEMSET64
static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
{
const __auto_type s0 = s;
asm volatile (
"rep stosq"
: "+D" (s), "+c" (n)
: "a" (v)
: "memory"
);
return s0;
}
#endif
#define __HAVE_ARCH_MEMMOVE
void *memmove(void *dest, const void *src, size_t count);
void *__memmove(void *dest, const void *src, size_t count);
int memcmp(const void *cs, const void *ct, size_t count);
size_t strlen(const char *s);
char *strcpy(char *dest, const char *src);
char *strcat(char *dest, const char *src);
int strcmp(const char *cs, const char *ct);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
{
if (__builtin_constant_p(cnt)) {
switch (cnt) {
case 4:
asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
return;
case 8:
asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
return;
case 16:
asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
return;
}
}
__memcpy_flushcache(dst, src, cnt);
}
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_X86_STRING_64_H */