cregit-Linux how code gets into the kernel

Release 4.14 arch/powerpc/mm/subpage-prot.c

Directory: arch/powerpc/mm
/*
 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>

#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/tlbflush.h>

/*
 * Free all pages allocated for subpage protection maps and pointers.
 * Also makes sure that the subpage_prot_table structure is
 * reinitialized for the next user.
 */

void subpage_prot_free(struct mm_struct *mm) { struct subpage_prot_table *spt = &mm->context.spt; unsigned long i, j, addr; u32 **p; for (i = 0; i < 4; ++i) { if (spt->low_prot[i]) { free_page((unsigned long)spt->low_prot[i]); spt->low_prot[i] = NULL; } } addr = 0; for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) { p = spt->protptrs[i]; if (!p) continue; spt->protptrs[i] = NULL; for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr; ++j, addr += PAGE_SIZE) if (p[j]) free_page((unsigned long)p[j]); free_page((unsigned long)p); } spt->maxaddr = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras17492.55%133.33%
David Gibson94.79%133.33%
Aneesh Kumar K.V52.66%133.33%
Total188100.00%3100.00%


void subpage_prot_init_new_context(struct mm_struct *mm) { struct subpage_prot_table *spt = &mm->context.spt; memset(spt, 0, sizeof(*spt)); }

Contributors

PersonTokensPropCommitsCommitProp
David Gibson35100.00%1100.00%
Total35100.00%1100.00%


static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, int npages) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) return; pud = pud_offset(pgd, addr); if (pud_none(*pud)) return; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return; pte = pte_offset_map_lock(mm, pmd, addr, &ptl); arch_enter_lazy_mmu_mode(); for (; npages > 0; --npages) { pte_update(mm, addr, pte, 0, 0, 0); addr += PAGE_SIZE; ++pte; } arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras15398.71%150.00%
Aneesh Kumar K.V21.29%150.00%
Total155100.00%2100.00%

/* * Clear the subpage protection map for an address range, allowing * all accesses that are allowed by the pte permissions. */
static void subpage_prot_clear(unsigned long addr, unsigned long len) { struct mm_struct *mm = current->mm; struct subpage_prot_table *spt = &mm->context.spt; u32 **spm, *spp; unsigned long i; size_t nw; unsigned long next, limit; down_write(&mm->mmap_sem); limit = addr + len; if (limit > spt->maxaddr) limit = spt->maxaddr; for (; addr < limit; addr = next) { next = pmd_addr_end(addr, limit); if (addr < 0x100000000UL) { spm = spt->low_prot; } else { spm = spt->protptrs[addr >> SBP_L3_SHIFT]; if (!spm) continue; } spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; if (!spp) continue; spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); nw = PTRS_PER_PTE - i; if (addr + (nw << PAGE_SHIFT) > next) nw = (next - addr) >> PAGE_SHIFT; memset(spp, 0, nw * sizeof(u32)); /* now flush any existing HPTEs for the range */ hpte_flush_range(mm, addr, nw); } up_write(&mm->mmap_sem); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras24296.41%125.00%
Joe MacDonald41.59%125.00%
David Gibson41.59%125.00%
Anton Blanchard10.40%125.00%
Total251100.00%4100.00%

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; split_huge_pmd(vma, pmd, addr); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V4090.91%133.33%
Kirill A. Shutemov36.82%133.33%
Naoya Horiguchi12.27%133.33%
Total44100.00%3100.00%


static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, unsigned long len) { struct vm_area_struct *vma; struct mm_walk subpage_proto_walk = { .mm = mm, .pmd_entry = subpage_walk_pmd_entry, }; /* * We don't try too hard, we just mark all the vma in that range * VM_NOHUGEPAGE and split them. */ vma = find_vma(mm, addr); /* * If the range is in unmapped range, just return */ if (vma && ((addr + len) <= vma->vm_start)) return; while (vma) { if (vma->vm_start >= (addr + len)) break; vma->vm_flags |= VM_NOHUGEPAGE; walk_page_vma(vma, &subpage_proto_walk); vma = vma->vm_next; } }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V10699.07%150.00%
Naoya Horiguchi10.93%150.00%
Total107100.00%2100.00%

#else
static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, unsigned long len) { return; }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V20100.00%1100.00%
Total20100.00%1100.00%

#endif /* * Copy in a subpage protection map for an address range. * The map has 2 bits per 4k subpage, so 32 bits per 64k page. * Each 2-bit field is 0 to allow any access, 1 to prevent writes, * 2 or 3 to prevent all accesses. * Note that the normal page protections also apply; the subpage * protection mechanism is an additional constraint, so putting 0 * in a 2-bit field won't allow writes to a page that is otherwise * write-protected. */
long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) { struct mm_struct *mm = current->mm; struct subpage_prot_table *spt = &mm->context.spt; u32 **spm, *spp; unsigned long i; size_t nw; unsigned long next, limit; int err; /* Check parameters */ if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) || addr >= mm->task_size || len >= mm->task_size || addr + len > mm->task_size) return -EINVAL; if (is_hugepage_only_range(mm, addr, len)) return -EINVAL; if (!map) { /* Clear out the protection map for the address range */ subpage_prot_clear(addr, len); return 0; } if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32))) return -EFAULT; down_write(&mm->mmap_sem); subpage_mark_vma_nohuge(mm, addr, len); for (limit = addr + len; addr < limit; addr = next) { next = pmd_addr_end(addr, limit); err = -ENOMEM; if (addr < 0x100000000UL) { spm = spt->low_prot; } else { spm = spt->protptrs[addr >> SBP_L3_SHIFT]; if (!spm) { spm = (u32 **)get_zeroed_page(GFP_KERNEL); if (!spm) goto out; spt->protptrs[addr >> SBP_L3_SHIFT] = spm; } } spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1); spp = *spm; if (!spp) { spp = (u32 *)get_zeroed_page(GFP_KERNEL); if (!spp) goto out; *spm = spp; } spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); local_irq_disable(); demote_segment_4k(mm, addr); local_irq_enable(); i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); nw = PTRS_PER_PTE - i; if (addr + (nw << PAGE_SHIFT) > next) nw = (next - addr) >> PAGE_SHIFT; up_write(&mm->mmap_sem); if (__copy_from_user(spp, map, nw * sizeof(u32))) return -EFAULT; map += nw; down_write(&mm->mmap_sem); /* now flush any existing HPTEs for the range */ hpte_flush_range(mm, addr, nw); } if (limit > spt->maxaddr) spt->maxaddr = limit; err = 0; out: up_write(&mm->mmap_sem); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras44693.50%112.50%
Aneesh Kumar K.V183.77%337.50%
SF Markus Elfring40.84%112.50%
Joe MacDonald40.84%112.50%
David Gibson40.84%112.50%
Anton Blanchard10.21%112.50%
Total477100.00%8100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras104579.47%17.69%
Aneesh Kumar K.V19815.06%538.46%
David Gibson523.95%17.69%
Joe MacDonald80.61%17.69%
SF Markus Elfring40.30%17.69%
Kirill A. Shutemov30.23%17.69%
Naoya Horiguchi20.15%17.69%
Anton Blanchard20.15%17.69%
Linus Torvalds10.08%17.69%
Total1315100.00%13100.00%
Directory: arch/powerpc/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.