Contributors: 16
Author Tokens Token Proportion Commits Commit Proportion
Matthew Wilcox 123 28.60% 9 23.08%
John David Anglin 74 17.21% 5 12.82%
Andrew Morton 43 10.00% 2 5.13%
Randolph Chung 42 9.77% 1 2.56%
James Bottomley 40 9.30% 3 7.69%
Helge Deller 39 9.07% 3 7.69%
Linus Torvalds (pre-git) 33 7.67% 7 17.95%
Ralf Baechle 7 1.63% 1 2.56%
Hugh Dickins 6 1.40% 1 2.56%
Russell King 5 1.16% 1 2.56%
Christoph Hellwig 4 0.93% 1 2.56%
Ilya Loginov 4 0.93% 1 2.56%
Alexandre Ghiti 4 0.93% 1 2.56%
Fabio M. De Francesco 3 0.70% 1 2.56%
Ira Weiny 2 0.47% 1 2.56%
Greg Kroah-Hartman 1 0.23% 1 2.56%
Total 430 39


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PARISC_CACHEFLUSH_H
#define _PARISC_CACHEFLUSH_H

#include <linux/mm.h>
#include <linux/uaccess.h>
#include <asm/tlbflush.h>

/* The usual comment is "Caches aren't brain-dead on the <architecture>".
 * Unfortunately, that doesn't apply to PA-RISC. */

#include <linux/jump_label.h>

DECLARE_STATIC_KEY_TRUE(parisc_has_cache);
DECLARE_STATIC_KEY_TRUE(parisc_has_dcache);
DECLARE_STATIC_KEY_TRUE(parisc_has_icache);

#define flush_cache_dup_mm(mm) flush_cache_mm(mm)

void flush_user_icache_range_asm(unsigned long, unsigned long);
void flush_kernel_icache_range_asm(unsigned long, unsigned long);
void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_page_asm(const void *addr);
void flush_kernel_icache_page(void *);

/* Cache flush operations */

void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);

#define flush_kernel_dcache_range(start,size) \
	flush_kernel_dcache_range_asm((start), (start)+(size));

/* The only way to flush a vmap range is to flush whole cache */
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
void flush_kernel_vmap_range(void *vaddr, int size);
void invalidate_kernel_vmap_range(void *vaddr, int size);

void flush_cache_vmap(unsigned long start, unsigned long end);
#define flush_cache_vmap_early(start, end)	do { } while (0)
void flush_cache_vunmap(unsigned long start, unsigned long end);

void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void flush_dcache_page(struct page *page)
{
	flush_dcache_folio(page_folio(page));
}

#define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)
#define flush_dcache_mmap_lock_irqsave(mapping, flags)		\
		xa_lock_irqsave(&mapping->i_pages, flags)
#define flush_dcache_mmap_unlock_irqrestore(mapping, flags)	\
		xa_unlock_irqrestore(&mapping->i_pages, flags)

void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
		unsigned int nr);
#define flush_icache_pages flush_icache_pages

#define flush_icache_range(s,e)		do { 		\
	flush_kernel_dcache_range_asm(s,e); 		\
	flush_kernel_icache_range_asm(s,e); 		\
} while (0)

void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		unsigned long user_vaddr, void *dst, void *src, int len);
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
		unsigned long user_vaddr, void *dst, void *src, int len);
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
		unsigned long pfn);
void flush_cache_range(struct vm_area_struct *vma,
		unsigned long start, unsigned long end);

#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);

#define ARCH_HAS_FLUSH_ON_KUNMAP
void kunmap_flush_on_unmap(const void *addr);

#endif /* _PARISC_CACHEFLUSH_H */