Contributors: 19
Author Tokens Token Proportion Commits Commit Proportion
Dan J Williams 196 51.99% 7 26.92%
Arnaldo Carvalho de Melo 62 16.45% 1 3.85%
Alexander Potapenko 37 9.81% 1 3.85%
Aleksa Sarai 20 5.31% 1 3.85%
David Howells 16 4.24% 1 3.85%
Andi Kleen 10 2.65% 2 7.69%
Al Viro 8 2.12% 1 3.85%
Linus Torvalds 6 1.59% 1 3.85%
Shaohua Li 3 0.80% 1 3.85%
Andrew Morton 3 0.80% 1 3.85%
Matt Domsch 3 0.80% 1 3.85%
Tony Luck 3 0.80% 1 3.85%
René Herman 2 0.53% 1 3.85%
Dave Jones 2 0.53% 1 3.85%
H. Peter Anvin 2 0.53% 1 3.85%
Andrew Lutomirski 1 0.27% 1 3.85%
Josh Poimboeuf 1 0.27% 1 3.85%
Ingo Molnar 1 0.27% 1 3.85%
Mikulas Patocka 1 0.27% 1 3.85%
Total 377 26

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */

#include <linux/jump_label.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/instrumented.h>
#include <linux/string.h>
#include <linux/types.h>

#include <asm/mce.h>

#ifdef CONFIG_X86_MCE
static DEFINE_STATIC_KEY_FALSE(copy_mc_fragile_key);

void enable_copy_mc_fragile(void)
{
	static_branch_inc(&copy_mc_fragile_key);
}
#define copy_mc_fragile_enabled (static_branch_unlikely(&copy_mc_fragile_key))

/*
 * Similar to copy_user_handle_tail, probe for the write fault point, or
 * source exception point.
 */
__visible notrace unsigned long
copy_mc_fragile_handle_tail(char *to, char *from, unsigned len)
{
	for (; len; --len, to++, from++)
		if (copy_mc_fragile(to, from, 1))
			break;
	return len;
}
#else
/*
 * No point in doing careful copying, or consulting a static key when
 * there is no #MC handler in the CONFIG_X86_MCE=n case.
 */
void enable_copy_mc_fragile(void)
{
}
#define copy_mc_fragile_enabled (0)
#endif

unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len);

/**
 * copy_mc_to_kernel - memory copy that handles source exceptions
 *
 * @dst:	destination address
 * @src:	source address
 * @len:	number of bytes to copy
 *
 * Call into the 'fragile' version on systems that benefit from avoiding
 * corner case poison consumption scenarios, For example, accessing
 * poison across 2 cachelines with a single instruction. Almost all
 * other uses case can use copy_mc_enhanced_fast_string() for a fast
 * recoverable copy, or fallback to plain memcpy.
 *
 * Return 0 for success, or number of bytes not copied if there was an
 * exception.
 */
unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
{
	unsigned long ret;

	if (copy_mc_fragile_enabled) {
		instrument_memcpy_before(dst, src, len);
		ret = copy_mc_fragile(dst, src, len);
		instrument_memcpy_after(dst, src, len, ret);
		return ret;
	}
	if (static_cpu_has(X86_FEATURE_ERMS)) {
		instrument_memcpy_before(dst, src, len);
		ret = copy_mc_enhanced_fast_string(dst, src, len);
		instrument_memcpy_after(dst, src, len, ret);
		return ret;
	}
	memcpy(dst, src, len);
	return 0;
}
EXPORT_SYMBOL_GPL(copy_mc_to_kernel);

unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
{
	unsigned long ret;

	if (copy_mc_fragile_enabled) {
		instrument_copy_to_user(dst, src, len);
		__uaccess_begin();
		ret = copy_mc_fragile((__force void *)dst, src, len);
		__uaccess_end();
		return ret;
	}

	if (static_cpu_has(X86_FEATURE_ERMS)) {
		instrument_copy_to_user(dst, src, len);
		__uaccess_begin();
		ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
		__uaccess_end();
		return ret;
	}

	return copy_user_generic((__force void *)dst, src, len);
}