cregit-Linux how code gets into the kernel

Release 4.14 arch/arm64/mm/pageattr.c

Directory: arch/arm64/mm
/*
 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>

#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>


struct page_change_data {
	
pgprot_t set_mask;
	
pgprot_t clear_mask;
};


static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, void *data) { struct page_change_data *cdata = data; pte_t pte = *ptep; pte = clear_pte_bit(pte, cdata->clear_mask); pte = set_pte_bit(pte, cdata->set_mask); set_pte(ptep, pte); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Laura Abbott66100.00%1100.00%
Total66100.00%1100.00%

/* * This function assumes that the range is mapped with PAGE_SIZE pages. */
static int __change_memory_common(unsigned long start, unsigned long size, pgprot_t set_mask, pgprot_t clear_mask) { struct page_change_data data; int ret; data.set_mask = set_mask; data.clear_mask = clear_mask; ret = apply_to_page_range(&init_mm, start, size, change_page_range, &data); flush_tlb_kernel_range(start, start + size); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Laura Abbott68100.00%1100.00%
Total68100.00%1100.00%


static int change_memory_common(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { unsigned long start = addr; unsigned long size = PAGE_SIZE*numpages; unsigned long end = start + size; struct vm_struct *area; if (!PAGE_ALIGNED(addr)) { start &= PAGE_MASK; end = start + size; WARN_ON_ONCE(1); } /* * Kernel VA mappings are always live, and splitting live section * mappings into page mappings may cause TLB conflicts. This means * we have to ensure that changing the permission bits of the range * we are operating on does not result in such splitting. * * Let's restrict ourselves to mappings created by vmalloc (or vmap). * Those are guaranteed to consist entirely of page mappings, and * splitting is never needed. * * So check whether the [addr, addr + size) interval is entirely * covered by precisely one VM area that has the VM_ALLOC flag set. */ area = find_vm_area((void *)addr); if (!area || end > (unsigned long)area->addr + area->size || !(area->flags & VM_ALLOC)) return -EINVAL; if (!numpages) return 0; return __change_memory_common(start, size, set_mask, clear_mask); }

Contributors

PersonTokensPropCommitsCommitProp
Laura Abbott8764.44%457.14%
Ard Biesheuvel3928.89%114.29%
Mika Penttilä85.93%114.29%
Alexander Kuleshov10.74%114.29%
Total135100.00%7100.00%


int set_memory_ro(unsigned long addr, int numpages) { return change_memory_common(addr, numpages, __pgprot(PTE_RDONLY), __pgprot(PTE_WRITE)); }

Contributors

PersonTokensPropCommitsCommitProp
Laura Abbott30100.00%1100.00%
Total30100.00%1100.00%


int set_memory_rw(unsigned long addr, int numpages) { return change_memory_common(addr, numpages, __pgprot(PTE_WRITE), __pgprot(PTE_RDONLY)); }

Contributors

PersonTokensPropCommitsCommitProp
Laura Abbott30100.00%1100.00%
Total30100.00%1100.00%


int set_memory_nx(unsigned long addr, int numpages) { return change_memory_common(addr, numpages, __pgprot(PTE_PXN), __pgprot(0)); }

Contributors

PersonTokensPropCommitsCommitProp
Laura Abbott30100.00%1100.00%
Total30100.00%1100.00%

EXPORT_SYMBOL_GPL(set_memory_nx);
int set_memory_x(unsigned long addr, int numpages) { return change_memory_common(addr, numpages, __pgprot(0), __pgprot(PTE_PXN)); }

Contributors

PersonTokensPropCommitsCommitProp
Laura Abbott30100.00%1100.00%
Total30100.00%1100.00%

EXPORT_SYMBOL_GPL(set_memory_x);
int set_memory_valid(unsigned long addr, int numpages, int enable) { if (enable) return __change_memory_common(addr, PAGE_SIZE * numpages, __pgprot(PTE_VALID), __pgprot(0)); else return __change_memory_common(addr, PAGE_SIZE * numpages, __pgprot(0), __pgprot(PTE_VALID)); }

Contributors

PersonTokensPropCommitsCommitProp
Laura Abbott5083.33%150.00%
AKASHI Takahiro1016.67%150.00%
Total60100.00%2100.00%

#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable) { set_memory_valid((unsigned long)page_address(page), numpages, enable); }

Contributors

PersonTokensPropCommitsCommitProp
AKASHI Takahiro3093.75%150.00%
Laura Abbott26.25%150.00%
Total32100.00%2100.00%

#ifdef CONFIG_HIBERNATION /* * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function * is used to determine if a linear map page has been marked as not-valid by * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit. * This is based on kern_addr_valid(), which almost does what we need. * * Because this is only called on the kernel linear map, p?d_sect() implies * p?d_present(). When debug_pagealloc is enabled, sections mappings are * disabled. */
bool kernel_page_present(struct page *page) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long addr = (unsigned long)page_address(page); pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) return false; pud = pud_offset(pgd, addr); if (pud_none(*pud)) return false; if (pud_sect(*pud)) return true; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return false; if (pmd_sect(*pmd)) return true; pte = pte_offset_kernel(pmd, addr); return pte_valid(*pte); }

Contributors

PersonTokensPropCommitsCommitProp
James Morse135100.00%1100.00%
Total135100.00%1100.00%

#endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_DEBUG_PAGEALLOC */

Overall Contributors

PersonTokensPropCommitsCommitProp
Laura Abbott43964.94%550.00%
James Morse14321.15%110.00%
AKASHI Takahiro436.36%110.00%
Ard Biesheuvel426.21%110.00%
Mika Penttilä81.18%110.00%
Alexander Kuleshov10.15%110.00%
Total676100.00%10100.00%
Directory: arch/arm64/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.