Contributors: 12
Author Tokens Token Proportion Commits Commit Proportion
Andrew Morton 56 23.14% 2 6.45%
Linus Torvalds (pre-git) 56 23.14% 16 51.61%
David S. Miller 37 15.29% 4 12.90%
Matthew Wilcox 33 13.64% 1 3.23%
Pete Zaitcev 27 11.16% 1 3.23%
David Howells 10 4.13% 1 3.23%
Sam Ravnborg 8 3.31% 1 3.23%
Ralf Baechle 6 2.48% 1 3.23%
Ilya Loginov 4 1.65% 1 3.23%
Alexandre Ghiti 3 1.24% 1 3.23%
Greg Kroah-Hartman 1 0.41% 1 3.23%
Rob Radez 1 0.41% 1 3.23%
Total 242 31


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H

#include <linux/page-flags.h>
#include <asm/cachetlb_32.h>

#define flush_cache_all() \
	sparc32_cachetlb_ops->cache_all()
#define flush_cache_mm(mm) \
	sparc32_cachetlb_ops->cache_mm(mm)
#define flush_cache_dup_mm(mm) \
	sparc32_cachetlb_ops->cache_mm(mm)
#define flush_cache_range(vma,start,end) \
	sparc32_cachetlb_ops->cache_range(vma, start, end)
#define flush_cache_page(vma,addr,pfn) \
	sparc32_cachetlb_ops->cache_page(vma, addr)
#define flush_icache_range(start, end)		do { } while (0)

#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
	do {							\
		flush_cache_page(vma, vaddr, page_to_pfn(page));\
		memcpy(dst, src, len);				\
	} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
	do {							\
		flush_cache_page(vma, vaddr, page_to_pfn(page));\
		memcpy(dst, src, len);				\
	} while (0)

#define __flush_page_to_ram(addr) \
	sparc32_cachetlb_ops->page_to_ram(addr)
#define flush_sig_insns(mm,insn_addr) \
	sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
#define flush_page_for_dma(addr) \
	sparc32_cachetlb_ops->page_for_dma(addr)

void sparc_flush_page_to_ram(struct page *page);
void sparc_flush_folio_to_ram(struct folio *folio);

#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_folio(folio)		sparc_flush_folio_to_ram(folio)
static inline void flush_dcache_page(struct page *page)
{
	flush_dcache_folio(page_folio(page));
}
#define flush_dcache_mmap_lock(mapping)		do { } while (0)
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)

#define flush_cache_vmap(start, end)		flush_cache_all()
#define flush_cache_vmap_early(start, end)	do { } while (0)
#define flush_cache_vunmap(start, end)		flush_cache_all()

/* When a context switch happens we must flush all user windows so that
 * the windows of the current process are flushed onto its stack. This
 * way the windows are all clean for the next process and the stack
 * frames are up to date.
 */
void flush_user_windows(void);
void kill_user_windows(void);
void flushw_all(void);

#endif /* _SPARC_CACHEFLUSH_H */