Release 4.14 arch/sh/mm/mmap.c
/*
* arch/sh/mm/mmap.c
*
* Copyright (C) 2008 - 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <asm/page.h>
#include <asm/processor.h>
unsigned long shm_align_mask = PAGE_SIZE - 1;
/* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
#ifdef CONFIG_MMU
/*
* To avoid cache aliases, we map the shared page with same color.
*/
static inline unsigned long COLOUR_ALIGN(unsigned long addr,
unsigned long pgoff)
{
unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
return base + off;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 46 | 100.00% | 2 | 100.00% |
Total | 46 | 100.00% | 2 | 100.00% |
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int do_colour_align;
struct vm_unmapped_area_info info;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
do_colour_align = 0;
if (filp || (flags & MAP_SHARED))
do_colour_align = 1;
if (addr) {
if (do_colour_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.flags = 0;
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 181 | 78.35% | 1 | 25.00% |
Michel Lespinasse | 39 | 16.88% | 1 | 25.00% |
Al Viro | 8 | 3.46% | 1 | 25.00% |
Hugh Dickins | 3 | 1.30% | 1 | 25.00% |
Total | 231 | 100.00% | 4 | 100.00% |
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_colour_align;
struct vm_unmapped_area_info info;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
do_colour_align = 0;
if (filp || (flags & MAP_SHARED))
do_colour_align = 1;
/* requesting a specific address */
if (addr) {
if (do_colour_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (addr & ~PAGE_MASK) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
return addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 224 | 76.71% | 1 | 33.33% |
Michel Lespinasse | 65 | 22.26% | 1 | 33.33% |
Hugh Dickins | 3 | 1.03% | 1 | 33.33% |
Total | 292 | 100.00% | 3 | 100.00% |
#endif /* CONFIG_MMU */
/*
* You really shouldn't be using read() or write() on /dev/mem. This
* might go away in the future.
*/
int valid_phys_addr_range(phys_addr_t addr, size_t count)
{
if (addr < __MEMORY_START)
return 0;
if (addr + count > __pa(high_memory))
return 0;
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 36 | 97.30% | 2 | 66.67% |
Cyril Chemparathy | 1 | 2.70% | 1 | 33.33% |
Total | 37 | 100.00% | 3 | 100.00% |
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 543 | 81.65% | 5 | 50.00% |
Michel Lespinasse | 104 | 15.64% | 1 | 10.00% |
Al Viro | 8 | 1.20% | 1 | 10.00% |
Hugh Dickins | 6 | 0.90% | 1 | 10.00% |
Ingo Molnar | 3 | 0.45% | 1 | 10.00% |
Cyril Chemparathy | 1 | 0.15% | 1 | 10.00% |
Total | 665 | 100.00% | 10 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.