Release 4.11 mm/mremap.c
/*
* mm/mremap.c
*
* (C) Copyright 1996 Linus Torvalds
*
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
*/
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/ksm.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/swapops.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/mmu_notifier.h>
#include <linux/uaccess.h>
#include <linux/mm-arch-hooks.h>
#include <linux/userfaultfd_k.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include "internal.h"
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
if (pgd_none_or_clear_bad(pgd))
return NULL;
p4d = p4d_offset(pgd, addr);
if (p4d_none_or_clear_bad(p4d))
return NULL;
pud = pud_offset(p4d, addr);
if (pud_none_or_clear_bad(pud))
return NULL;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return NULL;
return pmd;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 58 | 51.79% | 2 | 25.00% |
Kirill A. Shutemov | 24 | 21.43% | 1 | 12.50% |
Andi Kleen | 22 | 19.64% | 1 | 12.50% |
Linus Torvalds (pre-git) | 3 | 2.68% | 1 | 12.50% |
Hugh Dickins | 3 | 2.68% | 2 | 25.00% |
Andrea Arcangeli | 2 | 1.79% | 1 | 12.50% |
Total | 112 | 100.00% | 8 | 100.00% |
static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
p4d = p4d_alloc(mm, pgd, addr);
if (!p4d)
return NULL;
pud = pud_alloc(mm, p4d, addr);
if (!pud)
return NULL;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return NULL;
VM_BUG_ON(pmd_trans_huge(*pmd));
return pmd;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 34 | 29.57% | 1 | 14.29% |
Andi Kleen | 30 | 26.09% | 1 | 14.29% |
Kirill A. Shutemov | 24 | 20.87% | 1 | 14.29% |
Andrea Arcangeli | 14 | 12.17% | 1 | 14.29% |
Hugh Dickins | 11 | 9.57% | 2 | 28.57% |
Linus Torvalds | 2 | 1.74% | 1 | 14.29% |
Total | 115 | 100.00% | 7 | 100.00% |
static void take_rmap_locks(struct vm_area_struct *vma)
{
if (vma->vm_file)
i_mmap_lock_write(vma->vm_file->f_mapping);
if (vma->anon_vma)
anon_vma_lock_write(vma->anon_vma);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hugh Dickins | 39 | 100.00% | 1 | 100.00% |
Total | 39 | 100.00% | 1 | 100.00% |
static void drop_rmap_locks(struct vm_area_struct *vma)
{
if (vma->anon_vma)
anon_vma_unlock_write(vma->anon_vma);
if (vma->vm_file)
i_mmap_unlock_write(vma->vm_file->f_mapping);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hugh Dickins | 39 | 100.00% | 1 | 100.00% |
Total | 39 | 100.00% | 1 | 100.00% |
static pte_t move_soft_dirty_pte(pte_t pte)
{
/*
* Set soft dirty bit so we can notice
* in userspace the ptes were moved.
*/
#ifdef CONFIG_MEM_SOFT_DIRTY
if (pte_present(pte))
pte = pte_mksoft_dirty(pte);
else if (is_swap_pte(pte))
pte = pte_swp_mksoft_dirty(pte);
#endif
return pte;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Cyrill V. Gorcunov | 47 | 100.00% | 1 | 100.00% |
Total | 47 | 100.00% | 1 | 100.00% |
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
unsigned long old_addr, unsigned long old_end,
struct vm_area_struct *new_vma, pmd_t *new_pmd,
unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
spinlock_t *old_ptl, *new_ptl;
bool force_flush = false;
unsigned long len = old_end - old_addr;
/*
* When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
* locks to ensure that rmap will always observe either the old or the
* new ptes. This is the easiest way to avoid races with
* truncate_pagecache(), page migration, etc...
*
* When need_rmap_locks is false, we use other ways to avoid
* such races:
*
* - During exec() shift_arg_pages(), we use a specially tagged vma
* which rmap call sites look for using is_vma_temporary_stack().
*
* - During mremap(), new_vma is often known to be placed after vma
* in rmap traversal order. This ensures rmap will always observe
* either the old pte, or the new pte, or both (the page table locks
* serialize access to individual ptes, but only rmap traversal
* order guarantees that we won't miss both the old and new ptes).
*/
if (need_rmap_locks)
take_rmap_locks(vma);
/*
* We don't have to worry about the ordering of src and dst
* pte locks because exclusive mmap_sem prevents deadlock.
*/
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
new_pte = pte_offset_map(new_pmd, new_addr);
new_ptl = pte_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
arch_enter_lazy_mmu_mode();
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
new_pte++, new_addr += PAGE_SIZE) {
if (pte_none(*old_pte))
continue;
pte = ptep_get_and_clear(mm, old_addr, old_pte);
/*
* If we are remapping a dirty PTE, make sure
* to flush TLB before we drop the PTL for the
* old PTE or we may race with page_mkclean().
*
* This check has to be done after we removed the
* old PTE from page tables or another thread may
* dirty it after the check and before the removal.
*/
if (pte_present(pte) && pte_dirty(pte))
force_flush = true;
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
pte = move_soft_dirty_pte(pte);
set_pte_at(mm, new_addr, new_pte, pte);
}
arch_leave_lazy_mmu_mode();
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
pte_unmap(new_pte - 1);
if (force_flush)
flush_tlb_range(vma, old_end - len, old_end);
else
*need_flush = true;
pte_unmap_unlock(old_pte - 1, old_ptl);
if (need_rmap_locks)
drop_rmap_locks(vma);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hugh Dickins | 110 | 37.54% | 5 | 19.23% |
Aaron Lu | 66 | 22.53% | 2 | 7.69% |
Linus Torvalds (pre-git) | 26 | 8.87% | 1 | 3.85% |
Andrew Morton | 26 | 8.87% | 5 | 19.23% |
Nicholas Piggin | 13 | 4.44% | 2 | 7.69% |
Kanoj Sarcar | 11 | 3.75% | 1 | 3.85% |
Michel Lespinasse | 10 | 3.41% | 2 | 7.69% |
Ingo Molnar | 8 | 2.73% | 2 | 7.69% |
Cyrill V. Gorcunov | 7 | 2.39% | 1 | 3.85% |
Zachary Amsden | 6 | 2.05% | 1 | 3.85% |
David S. Miller | 5 | 1.71% | 1 | 3.85% |
Linus Torvalds | 2 | 0.68% | 1 | 3.85% |
Peter Zijlstra | 2 | 0.68% | 1 | 3.85% |
Davidlohr Bueso A | 1 | 0.34% | 1 | 3.85% |
Total | 293 | 100.00% | 26 | 100.00% |
#define LATENCY_LIMIT (64 * PAGE_SIZE)
unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len,
bool need_rmap_locks)
{
unsigned long extent, next, old_end;
pmd_t *old_pmd, *new_pmd;
bool need_flush = false;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
old_end = old_addr + len;
flush_cache_range(vma, old_addr, old_end);
mmun_start = old_addr;
mmun_end = old_end;
mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
cond_resched();
next = (old_addr + PMD_SIZE) & PMD_MASK;
/* even if next overflowed, extent below will be ok */
extent = next - old_addr;
if (extent > old_end - old_addr)
extent = old_end - old_addr;
old_pmd = get_old_pmd(vma->vm_mm, old_addr);
if (!old_pmd)
continue;
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
if (!new_pmd)
break;
if (pmd_trans_huge(*old_pmd)) {
if (extent == HPAGE_PMD_SIZE) {
bool moved;
/* See comment in move_ptes() */
if (need_rmap_locks)
take_rmap_locks(vma);
moved = move_huge_pmd(vma, old_addr, new_addr,
old_end, old_pmd, new_pmd,
&need_flush);
if (need_rmap_locks)
drop_rmap_locks(vma);
if (moved)
continue;
}
split_huge_pmd(vma, old_pmd, old_addr);
if (pmd_trans_unstable(old_pmd))
continue;
}
if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
break;
next = (new_addr + PMD_SIZE) & PMD_MASK;
if (extent > next - new_addr)
extent = next - new_addr;
if (extent > LATENCY_LIMIT)
extent = LATENCY_LIMIT;
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
new_pmd, new_addr, need_rmap_locks, &need_flush);
}
if (need_flush)
flush_tlb_range(vma, old_end-len, old_addr);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
return len + old_addr - old_end; /* how much done */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hugh Dickins | 145 | 39.19% | 4 | 17.39% |
Andrea Arcangeli | 108 | 29.19% | 4 | 17.39% |
Linus Torvalds (pre-git) | 36 | 9.73% | 2 | 8.70% |
Kirill A. Shutemov | 35 | 9.46% | 6 | 26.09% |
Sagi Grimberg | 22 | 5.95% | 1 | 4.35% |
Andrew Morton | 8 | 2.16% | 1 | 4.35% |
Aaron Lu | 6 | 1.62% | 1 | 4.35% |
Michel Lespinasse | 5 | 1.35% | 1 | 4.35% |
Linus Torvalds | 3 | 0.81% | 1 | 4.35% |
Naoya Horiguchi | 1 | 0.27% | 1 | 4.35% |
Kanoj Sarcar | 1 | 0.27% | 1 | 4.35% |
Total | 370 | 100.00% | 23 | 100.00% |
static unsigned long move_vma(struct vm_area_struct *vma,
unsigned long old_addr, unsigned long old_len,
unsigned long new_len, unsigned long new_addr,
bool *locked, struct vm_userfaultfd_ctx *uf,
struct list_head *uf_unmap)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma;
unsigned long vm_flags = vma->vm_flags;
unsigned long new_pgoff;
unsigned long moved_len;
unsigned long excess = 0;
unsigned long hiwater_vm;
int split = 0;
int err;
bool need_rmap_locks;
/*
* We'd prefer to avoid failure later on in do_munmap:
* which may split one vma into three before unmapping.
*/
if (mm->map_count >= sysctl_max_map_count - 3)
return -ENOMEM;
/*
* Advise KSM to break any KSM pages in the area to be moved:
* it would be confusing if they were to turn up at the new
* location, where they happen to coincide with different KSM
* pages recently unmapped. But leave vma->vm_flags as it was,
* so KSM can come around to merge on vma and new_vma afterwards.
*/
err = ksm_madvise(vma, old_addr, old_addr + old_len,
MADV_UNMERGEABLE, &vm_flags);
if (err)
return err;
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
&need_rmap_locks);
if (!new_vma)
return -ENOMEM;
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
need_rmap_locks);
if (moved_len < old_len) {
err = -ENOMEM;
} else if (vma->vm_ops && vma->vm_ops->mremap) {
err = vma->vm_ops->mremap(new_vma);
}
if (unlikely(err)) {
/*
* On error, move entries back from new area to old,
* which will succeed since page tables still there,
* and then proceed to unmap new area instead of old.
*/
move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
true);
vma = new_vma;
old_len = new_len;
old_addr = new_addr;
new_addr = err;
} else {
mremap_userfaultfd_prep(new_vma, uf);
arch_remap(mm, old_addr, old_addr + old_len,
new_addr, new_addr + new_len);
}
/* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT) {
vma->vm_flags &= ~VM_ACCOUNT;
excess = vma->vm_end - vma->vm_start - old_len;
if (old_addr > vma->vm_start &&
old_addr + old_len < vma->vm_end)
split = 1;
}
/*
* If we failed to move page tables we still do total_vm increment
* since do_munmap() will decrement it by old_len == new_len.
*
* Since total_vm is about to be raised artificially high for a
* moment, we need to restore high watermark afterwards: if stats
* are taken meanwhile, total_vm and hiwater_vm appear too high.
* If this were a serious issue, we'd add a flag to do_munmap().
*/
hiwater_vm = mm->hiwater_vm;
vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
/* Tell pfnmap has moved from this vma */
if (unlikely(vma->vm_flags & VM_PFNMAP))
untrack_pfn_moved(vma);
if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
/* OOM: unable to split vma, just get accounts right */
vm_unacct_memory(excess >> PAGE_SHIFT);
excess = 0;
}
mm->hiwater_vm = hiwater_vm;
/* Restore VM_ACCOUNT if one or two pieces of vma left */
if (excess) {
vma->vm_flags |= VM_ACCOUNT;
if (split)
vma->vm_next->vm_flags |= VM_ACCOUNT;
}
if (vm_flags & VM_LOCKED) {
mm->locked_vm += new_len >> PAGE_SHIFT;
*locked = true;
}
return new_addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hugh Dickins | 124 | 26.33% | 8 | 29.63% |
Andrew Morton | 109 | 23.14% | 2 | 7.41% |
Linus Torvalds (pre-git) | 72 | 15.29% | 5 | 18.52% |
Pavel Emelyanov | 30 | 6.37% | 2 | 7.41% |
Oleg Nesterov | 27 | 5.73% | 2 | 7.41% |
Linus Torvalds | 26 | 5.52% | 2 | 7.41% |
Al Viro | 25 | 5.31% | 1 | 3.70% |
Laurent Dufour | 18 | 3.82% | 1 | 3.70% |
Toshi Kani | 17 | 3.61% | 1 | 3.70% |
Michel Lespinasse | 16 | 3.40% | 2 | 7.41% |
Mike Rapoport | 7 | 1.49% | 1 | 3.70% |
Total | 471 | 100.00% | 27 | 100.00% |
static struct vm_area_struct *vma_to_resize(unsigned long addr,
unsigned long old_len, unsigned long new_len, unsigned long *p)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = find_vma(mm, addr);
unsigned long pgoff;
if (!vma || vma->vm_start > addr)
return ERR_PTR(-EFAULT);
if (is_vm_hugetlb_page(vma))
return ERR_PTR(-EINVAL);
/* We can't remap across vm area boundaries */
if (old_len > vma->vm_end - addr)
return ERR_PTR(-EFAULT);
if (new_len == old_len)
return vma;
/* Need to be careful about a growing mapping */
pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
return ERR_PTR(-EINVAL);
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
return ERR_PTR(-EFAULT);
if (vma->vm_flags & VM_LOCKED) {
unsigned long locked, lock_limit;
locked = mm->locked_vm << PAGE_SHIFT;
lock_limit = rlimit(RLIMIT_MEMLOCK);
locked += new_len - old_len;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return ERR_PTR(-EAGAIN);
}
if (!may_expand_vm(mm, vma->vm_flags,
(new_len - old_len) >> PAGE_SHIFT))
return ERR_PTR(-ENOMEM);
if (vma->vm_flags & VM_ACCOUNT) {
unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
if (security_vm_enough_memory_mm(mm, charged))
return ERR_PTR(-ENOMEM);
*p = charged;
}
return vma;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 180 | 61.02% | 2 | 28.57% |
Derek | 51 | 17.29% | 1 | 14.29% |
Linus Torvalds | 31 | 10.51% | 1 | 14.29% |
Oleg Nesterov | 26 | 8.81% | 1 | 14.29% |
Konstantin Khlebnikov | 4 | 1.36% | 1 | 14.29% |
Jiri Slaby | 3 | 1.02% | 1 | 14.29% |
Total | 295 | 100.00% | 7 | 100.00% |
static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
unsigned long new_addr, unsigned long new_len, bool *locked,
struct vm_userfaultfd_ctx *uf,
struct list_head *uf_unmap)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
unsigned long charged = 0;
unsigned long map_flags;
if (offset_in_page(new_addr))
goto out;
if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
goto out;
/* Ensure the old/new locations do not overlap */
if (addr + old_len > new_addr && new_addr + new_len > addr)
goto out;
ret = do_munmap(mm, new_addr, new_len, NULL);
if (ret)
goto out;
if (old_len >= new_len) {
ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
if (ret && old_len != new_len)
goto out;
old_len = new_len;
}
vma = vma_to_resize(addr, old_len, new_len, &charged);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
map_flags = MAP_FIXED;
if (vma->vm_flags & VM_MAYSHARE)
map_flags |= MAP_SHARED;
ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
((addr - vma->vm_start) >> PAGE_SHIFT),
map_flags);
if (offset_in_page(ret))
goto out1;
ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf,
uf_unmap);
if (!(offset_in_page(ret)))
goto out;
out1:
vm_unacct_memory(charged);
out:
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 132 | 43.00% | 2 | 13.33% |
Linus Torvalds (pre-git) | 96 | 31.27% | 5 | 33.33% |
Andrew Morton | 32 | 10.42% | 2 | 13.33% |
Mike Rapoport | 11 | 3.58% | 1 | 6.67% |
Hugh Dickins | 9 | 2.93% | 1 | 6.67% |
Alexander Kuleshov | 9 | 2.93% | 1 | 6.67% |
Pavel Emelyanov | 7 | 2.28% | 1 | 6.67% |
Michel Lespinasse | 6 | 1.95% | 1 | 6.67% |
Oleg Nesterov | 5 | 1.63% | 1 | 6.67% |
Total | 307 | 100.00% | 15 | 100.00% |
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
{
unsigned long end = vma->vm_end + delta;
if (end < vma->vm_end) /* overflow */
return 0;
if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
return 0;
if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
0, MAP_FIXED) & ~PAGE_MASK)
return 0;
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 85 | 100.00% | 3 | 100.00% |
Total | 85 | 100.00% | 3 | 100.00% |
/*
* Expand (or shrink) an existing mapping, potentially moving it at the
* same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
*
* MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
* This option implies MREMAP_MAYMOVE.
*/
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
unsigned long, new_len, unsigned long, flags,
unsigned long, new_addr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
unsigned long charged = 0;
bool locked = false;
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
LIST_HEAD(uf_unmap);
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
return ret;
if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
return ret;
if (offset_in_page(addr))
return ret;
old_len = PAGE_ALIGN(old_len);
new_len = PAGE_ALIGN(new_len);
/*
* We allow a zero old-len as a special case
* for DOS-emu "duplicate shm area" thing. But
* a zero new-len is nonsensical.
*/
if (!new_len)
return ret;
if (down_write_killable(¤t->mm->mmap_sem))
return -EINTR;
if (flags & MREMAP_FIXED) {
ret = mremap_to(addr, old_len, new_addr, new_len,
&locked, &uf, &uf_unmap);
goto out;
}
/*
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
* do_munmap does all the needed commit accounting
*/
if (old_len >= new_len) {
ret = do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap);
if (ret && old_len != new_len)
goto out;
ret = addr;
goto out;
}
/*
* Ok, we need to grow..
*/
vma = vma_to_resize(addr, old_len, new_len, &charged);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
/* old_len exactly to the end of the area..
*/
if (old_len == vma->vm_end - addr) {
/* can we just expand the current mapping? */
if (vma_expandable(vma, new_len - old_len)) {
int pages = (new_len - old_len) >> PAGE_SHIFT;
if (vma_adjust(vma, vma->vm_start, addr + new_len,
vma->vm_pgoff, NULL)) {
ret = -ENOMEM;
goto out;
}
vm_stat_account(mm, vma->vm_flags, pages);
if (vma->vm_flags & VM_LOCKED) {
mm->locked_vm += pages;
locked = true;
new_addr = addr;
}
ret = addr;
goto out;
}
}
/*
* We weren't able to just expand or shrink the area,
* we need to create a new one and move it..
*/
ret = -ENOMEM;
if (flags & MREMAP_MAYMOVE) {
unsigned long map_flags = 0;
if (vma->vm_flags & VM_MAYSHARE)
map_flags |= MAP_SHARED;
new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
vma->vm_pgoff +
((addr - vma->vm_start) >> PAGE_SHIFT),
map_flags);
if (offset_in_page(new_addr)) {
ret = new_addr;
goto out;
}
ret = move_vma(vma, addr, old_len, new_len, new_addr,
&locked, &uf, &uf_unmap);
}
out:
if (offset_in_page(ret)) {
vm_unacct_memory(charged);
locked = 0;
}
up_write(¤t->mm->mmap_sem);
if (locked && new_len > old_len)
mm_populate(new_addr + old_len, new_len - old_len);
mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
userfaultfd_unmap_complete(mm, &uf_unmap);
return ret;
}
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 597 | 21.46% | 10 | 8.70% |
Hugh Dickins | 493 | 17.72% | 15 | 13.04% |
Linus Torvalds (pre-git) | 419 | 15.06% | 11 | 9.57% |
Andrew Morton | 266 | 9.56% | 14 | 12.17% |
Andrea Arcangeli | 128 | 4.60% | 6 | 5.22% |
Linus Torvalds | 95 | 3.41% | 6 | 5.22% |
Kirill A. Shutemov | 83 | 2.98% | 7 | 6.09% |
Michel Lespinasse | 73 | 2.62% | 3 | 2.61% |
Aaron Lu | 72 | 2.59% | 2 | 1.74% |
Oleg Nesterov | 64 | 2.30% | 5 | 4.35% |
Pavel Emelyanov | 63 | 2.26% | 2 | 1.74% |
Cyrill V. Gorcunov | 57 | 2.05% | 1 | 0.87% |
Andi Kleen | 52 | 1.87% | 1 | 0.87% |
Derek | 51 | 1.83% | 1 | 0.87% |
Mike Rapoport | 40 | 1.44% | 1 | 0.87% |
Rasmus Villemoes | 33 | 1.19% | 1 | 0.87% |
Sagi Grimberg | 22 | 0.79% | 1 | 0.87% |
Laurent Dufour | 21 | 0.75% | 1 | 0.87% |
Alexander Kuleshov | 18 | 0.65% | 1 | 0.87% |
Toshi Kani | 17 | 0.61% | 1 | 0.87% |
Rik Van Riel | 16 | 0.58% | 3 | 2.61% |
Nicholas Piggin | 13 | 0.47% | 2 | 1.74% |
Kanoj Sarcar | 12 | 0.43% | 1 | 0.87% |
Eric Paris | 11 | 0.40% | 1 | 0.87% |
William Lee Irwin III | 9 | 0.32% | 1 | 0.87% |
Michal Hocko | 8 | 0.29% | 1 | 0.87% |
Ingo Molnar | 8 | 0.29% | 2 | 1.74% |
Richard Henderson | 7 | 0.25% | 1 | 0.87% |
Zachary Amsden | 6 | 0.22% | 1 | 0.87% |
David S. Miller | 5 | 0.18% | 1 | 0.87% |
Konstantin Khlebnikov | 4 | 0.14% | 1 | 0.87% |
Randy Dunlap | 3 | 0.11% | 1 | 0.87% |
Jiri Slaby | 3 | 0.11% | 1 | 0.87% |
Arnd Bergmann | 3 | 0.11% | 1 | 0.87% |
Dave Jones | 3 | 0.11% | 1 | 0.87% |
Peter Zijlstra | 2 | 0.07% | 1 | 0.87% |
Alan Cox | 1 | 0.04% | 1 | 0.87% |
Paul McQuade | 1 | 0.04% | 1 | 0.87% |
Naoya Horiguchi | 1 | 0.04% | 1 | 0.87% |
James Bottomley | 1 | 0.04% | 1 | 0.87% |
Davidlohr Bueso A | 1 | 0.04% | 1 | 0.87% |
Total | 2782 | 100.00% | 115 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.