Contributors: 10
Author Tokens Token Proportion Commits Commit Proportion
Oliver O'Halloran 178 40.45% 2 15.38%
Christophe Leroy 150 34.09% 2 15.38%
Aneesh Kumar K.V 62 14.09% 2 15.38%
Anton Blanchard 28 6.36% 1 7.69%
Benjamin Herrenschmidt 8 1.82% 1 7.69%
Stephen Rothwell 4 0.91% 1 7.69%
Alastair D'Silva 4 0.91% 1 7.69%
Cédric Le Goater 3 0.68% 1 7.69%
Thomas Gleixner 2 0.45% 1 7.69%
Al Viro 1 0.23% 1 7.69%
Total 440 13


// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright(c) 2017 IBM Corporation. All rights reserved.
 */

#include <linux/string.h>
#include <linux/export.h>
#include <linux/uaccess.h>
#include <linux/libnvdimm.h>

#include <asm/cacheflush.h>

static inline void __clean_pmem_range(unsigned long start, unsigned long stop)
{
	unsigned long shift = l1_dcache_shift();
	unsigned long bytes = l1_dcache_bytes();
	void *addr = (void *)(start & ~(bytes - 1));
	unsigned long size = stop - (unsigned long)addr + (bytes - 1);
	unsigned long i;

	for (i = 0; i < size >> shift; i++, addr += bytes)
		asm volatile(PPC_DCBSTPS(%0, %1): :"i"(0), "r"(addr): "memory");
}

static inline void __flush_pmem_range(unsigned long start, unsigned long stop)
{
	unsigned long shift = l1_dcache_shift();
	unsigned long bytes = l1_dcache_bytes();
	void *addr = (void *)(start & ~(bytes - 1));
	unsigned long size = stop - (unsigned long)addr + (bytes - 1);
	unsigned long i;

	for (i = 0; i < size >> shift; i++, addr += bytes)
		asm volatile(PPC_DCBFPS(%0, %1): :"i"(0), "r"(addr): "memory");
}

static inline void clean_pmem_range(unsigned long start, unsigned long stop)
{
	if (cpu_has_feature(CPU_FTR_ARCH_207S))
		return __clean_pmem_range(start, stop);
}

static inline void flush_pmem_range(unsigned long start, unsigned long stop)
{
	if (cpu_has_feature(CPU_FTR_ARCH_207S))
		return __flush_pmem_range(start, stop);
}

/*
 * CONFIG_ARCH_HAS_PMEM_API symbols
 */
void arch_wb_cache_pmem(void *addr, size_t size)
{
	unsigned long start = (unsigned long) addr;
	clean_pmem_range(start, start + size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);

void arch_invalidate_pmem(void *addr, size_t size)
{
	unsigned long start = (unsigned long) addr;
	flush_pmem_range(start, start + size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);

/*
 * CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE symbols
 */
long __copy_from_user_flushcache(void *dest, const void __user *src,
		unsigned size)
{
	unsigned long copied, start = (unsigned long) dest;

	copied = __copy_from_user(dest, src, size);
	clean_pmem_range(start, start + size);

	return copied;
}

void memcpy_flushcache(void *dest, const void *src, size_t size)
{
	unsigned long start = (unsigned long) dest;

	memcpy(dest, src, size);
	clean_pmem_range(start, start + size);
}
EXPORT_SYMBOL(memcpy_flushcache);