cregit-Linux how code gets into the kernel

Release 4.14 arch/alpha/include/asm/cacheflush.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ALPHA_CACHEFLUSH_H

#define _ALPHA_CACHEFLUSH_H

#include <linux/mm.h>

/* Caches aren't brain-dead on the Alpha. */

#define flush_cache_all()			do { } while (0)

#define flush_cache_mm(mm)			do { } while (0)

#define flush_cache_dup_mm(mm)			do { } while (0)

#define flush_cache_range(vma, start, end)	do { } while (0)

#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)

#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0

#define flush_dcache_page(page)			do { } while (0)

#define flush_dcache_mmap_lock(mapping)		do { } while (0)

#define flush_dcache_mmap_unlock(mapping)	do { } while (0)

#define flush_cache_vmap(start, end)		do { } while (0)

#define flush_cache_vunmap(start, end)		do { } while (0)

/* Note that the following two definitions are _highly_ dependent
   on the contexts in which they are used in the kernel.  I personally
   think it is criminal how loosely defined these macros are.  */

/* We need to flush the kernel's icache after loading modules.  The
   only other use of this macro is in load_aout_interp which is not
   used on Alpha. 

   Note that this definition should *not* be used for userspace
   icache flushing.  While functional, it is _way_ overkill.  The
   icache is tagged with ASNs and it suffices to allocate a new ASN
   for the process.  */
#ifndef CONFIG_SMP

#define flush_icache_range(start, end)		imb()
#else

#define flush_icache_range(start, end)		smp_imb()
extern void smp_imb(void);
#endif

/* We need to flush the userspace icache after setting breakpoints in
   ptrace.

   Instead of indiscriminately using imb, take advantage of the fact
   that icache entries are tagged with the ASN and load a new mm context.  */
/* ??? Ought to use this in arch/alpha/kernel/signal.c too.  */

#ifndef CONFIG_SMP
#include <linux/sched.h>

extern void __load_new_mm_context(struct mm_struct *);

static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { if (vma->vm_flags & VM_EXEC) { struct mm_struct *mm = vma->vm_mm; if (current->active_mm == mm) __load_new_mm_context(mm); else mm->context[smp_processor_id()] = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson67100.00%1100.00%
Total67100.00%1100.00%

#else extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len); #endif /* This is used only in __do_fault and do_swap_page. */ #define flush_icache_page(vma, page) \ flush_icache_user_range((vma), (page), 0, 0) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { memcpy(dst, src, len); \ flush_icache_user_range(vma, page, vaddr, len); \ } while (0) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) #endif /* _ALPHA_CACHEFLUSH_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Richard Henderson20370.24%111.11%
Andrew Morton6823.53%222.22%
Ralf Bächle72.42%111.11%
Ilya Loginov41.38%111.11%
Tejun Heo31.04%111.11%
David S. Miller20.69%111.11%
Ryota Ozaki10.35%111.11%
Greg Kroah-Hartman10.35%111.11%
Total289100.00%9100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.