Release 4.14 arch/m68k/include/asm/cacheflush_no.h
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _M68KNOMMU_CACHEFLUSH_H
#define _M68KNOMMU_CACHEFLUSH_H
/*
* (C) Copyright 2000-2010, Greg Ungerer <gerg@snapgear.com>
*/
#include <linux/mm.h>
#include <asm/mcfsim.h>
#define flush_cache_all() __flush_cache_all()
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_range(start, len) __flush_dcache_all()
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, len) __flush_icache_all()
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
void mcf_cache_push(void);
static inline void __clear_cache_all(void)
{
#ifdef CACHE_INVALIDATE
__asm__ __volatile__ (
"movec %0, %%CACR\n\t"
"nop\n\t"
: : "r" (CACHE_INVALIDATE) );
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Ungerer | 16 | 94.12% | 1 | 50.00% |
Philippe De Muyter | 1 | 5.88% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
static inline void __flush_cache_all(void)
{
#ifdef CACHE_PUSH
mcf_cache_push();
#endif
__clear_cache_all();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Ungerer | 20 | 100.00% | 5 | 100.00% |
Total | 20 | 100.00% | 5 | 100.00% |
/*
* Some ColdFire parts implement separate instruction and data caches,
* on those we should just flush the appropriate cache. If we don't need
* to do any specific flushing then this will be optimized away.
*/
static inline void __flush_icache_all(void)
{
#ifdef CACHE_INVALIDATEI
__asm__ __volatile__ (
"movec %0, %%CACR\n\t"
"nop\n\t"
: : "r" (CACHE_INVALIDATEI) );
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Ungerer | 16 | 94.12% | 1 | 50.00% |
Philippe De Muyter | 1 | 5.88% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
static inline void __flush_dcache_all(void)
{
#ifdef CACHE_PUSH
mcf_cache_push();
#endif
#ifdef CACHE_INVALIDATED
__asm__ __volatile__ (
"movec %0, %%CACR\n\t"
"nop\n\t"
: : "r" (CACHE_INVALIDATED) );
#else
/* Flush the write buffer */
__asm__ __volatile__ ( "nop" );
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Ungerer | 29 | 93.55% | 1 | 50.00% |
Philippe De Muyter | 2 | 6.45% | 1 | 50.00% |
Total | 31 | 100.00% | 2 | 100.00% |
/*
* Push cache entries at supplied address. We want to write back any dirty
* data and then invalidate the cache lines associated with this address.
*/
static inline void cache_push(unsigned long paddr, int len)
{
__flush_cache_all();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Ungerer | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
/*
* Clear cache entries at supplied address (that is don't write back any
* dirty data).
*/
static inline void cache_clear(unsigned long paddr, int len)
{
__clear_cache_all();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Ungerer | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
#endif /* _M68KNOMMU_CACHEFLUSH_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Ungerer | 149 | 49.17% | 9 | 52.94% |
Alan Cox | 69 | 22.77% | 1 | 5.88% |
Andrew Morton | 66 | 21.78% | 2 | 11.76% |
Ralf Bächle | 7 | 2.31% | 1 | 5.88% |
Philippe De Muyter | 7 | 2.31% | 2 | 11.76% |
Ilya Loginov | 4 | 1.32% | 1 | 5.88% |
Greg Kroah-Hartman | 1 | 0.33% | 1 | 5.88% |
Total | 303 | 100.00% | 17 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.