cregit-Linux how code gets into the kernel

Release 4.14 arch/sh/mm/mmap.c

Directory: arch/sh/mm
/*
 * arch/sh/mm/mmap.c
 *
 * Copyright (C) 2008 - 2009  Paul Mundt
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <asm/page.h>
#include <asm/processor.h>


unsigned long shm_align_mask = PAGE_SIZE - 1;	
/* Sane caches */

EXPORT_SYMBOL(shm_align_mask);

#ifdef CONFIG_MMU
/*
 * To avoid cache aliases, we map the shared page with same color.
 */

static inline unsigned long COLOUR_ALIGN(unsigned long addr, unsigned long pgoff) { unsigned long base = (addr + shm_align_mask) & ~shm_align_mask; unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; return base + off; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt46100.00%2100.00%
Total46100.00%2100.00%


unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int do_colour_align; struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } if (unlikely(len > TASK_SIZE)) return -ENOMEM; do_colour_align = 0; if (filp || (flags & MAP_SHARED)) do_colour_align = 1; if (addr) { if (do_colour_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt18178.35%125.00%
Michel Lespinasse3916.88%125.00%
Al Viro83.46%125.00%
Hugh Dickins31.30%125.00%
Total231100.00%4100.00%


unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_colour_align; struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } if (unlikely(len > TASK_SIZE)) return -ENOMEM; do_colour_align = 0; if (filp || (flags & MAP_SHARED)) do_colour_align = 1; /* requesting a specific address */ if (addr) { if (do_colour_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); } return addr; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt22476.71%133.33%
Michel Lespinasse6522.26%133.33%
Hugh Dickins31.03%133.33%
Total292100.00%3100.00%

#endif /* CONFIG_MMU */ /* * You really shouldn't be using read() or write() on /dev/mem. This * might go away in the future. */
int valid_phys_addr_range(phys_addr_t addr, size_t count) { if (addr < __MEMORY_START) return 0; if (addr + count > __pa(high_memory)) return 0; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt3697.30%266.67%
Cyril Chemparathy12.70%133.33%
Total37100.00%3100.00%


int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt15100.00%1100.00%
Total15100.00%1100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt54381.65%550.00%
Michel Lespinasse10415.64%110.00%
Al Viro81.20%110.00%
Hugh Dickins60.90%110.00%
Ingo Molnar30.45%110.00%
Cyril Chemparathy10.15%110.00%
Total665100.00%10100.00%
Directory: arch/sh/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.