cregit-Linux how code gets into the kernel

Release 4.11 mm/pagewalk.c

Directory: mm
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>


static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { pte_t *pte; int err = 0; pte = pte_offset_map(pmd, addr); for (;;) { err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); if (err) break; addr += PAGE_SIZE; if (addr == end) break; pte++; } pte_unmap(pte); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Matt Mackall7783.70%133.33%
Johannes Weiner1415.22%133.33%
Dave Hansen11.09%133.33%
Total92100.00%3100.00%


static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { pmd_t *pmd; unsigned long next; int err = 0; pmd = pmd_offset(pud, addr); do { again: next = pmd_addr_end(addr, end); if (pmd_none(*pmd) || !walk->vma) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; continue; } /* * This implies that each ->pmd_entry() handler * needs to know about pmd_trans_huge() pmds */ if (walk->pmd_entry) err = walk->pmd_entry(pmd, addr, next, walk); if (err) break; /* * Check this here so we only break down trans_huge * pages when we _need_ to */ if (!walk->pte_entry) continue; split_huge_pmd(walk->vma, pmd, addr); if (pmd_trans_unstable(pmd)) goto again; err = walk_pte_range(pmd, addr, next, walk); if (err) break; } while (pmd++, addr = next, addr != end); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Matt Mackall15179.06%114.29%
Dave Hansen2915.18%228.57%
Naoya Horiguchi63.14%228.57%
Kirill A. Shutemov52.62%228.57%
Total191100.00%7100.00%


static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, struct mm_walk *walk) { pud_t *pud; unsigned long next; int err = 0; pud = pud_offset(p4d, addr); do { again: next = pud_addr_end(addr, end); if (pud_none(*pud) || !walk->vma) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; continue; } if (walk->pud_entry) { spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma); if (ptl) { err = walk->pud_entry(pud, addr, next, walk); spin_unlock(ptl); if (err) break; continue; } } split_huge_pud(walk->vma, pud, addr); if (pud_none(*pud)) goto again; if (walk->pmd_entry || walk->pte_entry) err = walk_pmd_range(pud, addr, next, walk); if (err) break; } while (pud++, addr = next, addr != end); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Matt Mackall13159.82%125.00%
Matthew Wilcox8438.36%125.00%
Kirill A. Shutemov31.37%125.00%
Dave Hansen10.46%125.00%
Total219100.00%4100.00%


static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, struct mm_walk *walk) { p4d_t *p4d; unsigned long next; int err = 0; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; continue; } if (walk->pmd_entry || walk->pte_entry) err = walk_pud_range(p4d, addr, next, walk); if (err) break; } while (p4d++, addr = next, addr != end); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov136100.00%1100.00%
Total136100.00%1100.00%


static int walk_pgd_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { pgd_t *pgd; unsigned long next; int err = 0; pgd = pgd_offset(walk->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; continue; } if (walk->pmd_entry || walk->pte_entry) err = walk_p4d_range(pgd, addr, next, walk); if (err) break; } while (pgd++, addr = next, addr != end); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Naoya Horiguchi13399.25%150.00%
Kirill A. Shutemov10.75%150.00%
Total134100.00%2100.00%

#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, unsigned long end) { unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); return boundary < end ? boundary : end; }

Contributors

PersonTokensPropCommitsCommitProp
Naoya Horiguchi47100.00%1100.00%
Total47100.00%1100.00%


static int walk_hugetlb_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; struct hstate *h = hstate_vma(vma); unsigned long next; unsigned long hmask = huge_page_mask(h); pte_t *pte; int err = 0; do { next = hugetlb_entry_end(h, addr, end); pte = huge_pte_offset(walk->mm, addr & hmask); if (pte && walk->hugetlb_entry) err = walk->hugetlb_entry(pte, hmask, addr, next, walk); if (err) break; } while (addr = next, addr != end); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Naoya Horiguchi131100.00%2100.00%
Total131100.00%2100.00%

#else /* CONFIG_HUGETLB_PAGE */
static int walk_hugetlb_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Motohiro Kosaki22100.00%1100.00%
Total22100.00%1100.00%

#endif /* CONFIG_HUGETLB_PAGE */ /* * Decide whether we really walk over the current vma on [@start, @end) * or skip it via the returned value. Return 0 if we do walk over the * current vma, and return 1 if we skip the vma. Negative values means * error, where we abort the current walk. */
static int walk_page_test(unsigned long start, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; if (walk->test_walk) return walk->test_walk(start, end, walk); /* * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP * range, so we don't walk over it as we do for normal vmas. However, * Some callers are interested in handling hole range and they don't * want to just ignore any single address range. Such users certainly * define their ->pte_hole() callbacks, so let's delegate them to handle * vma(VM_PFNMAP). */ if (vma->vm_flags & VM_PFNMAP) { int err = 1; if (walk->pte_hole) err = walk->pte_hole(start, end, walk); return err ? err : 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Naoya Horiguchi7481.32%266.67%
Matt Mackall1718.68%133.33%
Total91100.00%3100.00%


static int __walk_page_range(unsigned long start, unsigned long end, struct mm_walk *walk) { int err = 0; struct vm_area_struct *vma = walk->vma; if (vma && is_vm_hugetlb_page(vma)) { if (walk->hugetlb_entry) err = walk_hugetlb_range(start, end, walk); } else err = walk_pgd_range(start, end, walk); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Naoya Horiguchi7396.05%150.00%
Matt Mackall33.95%150.00%
Total76100.00%2100.00%

/** * walk_page_range - walk page table with caller specific callbacks * * Recursively walk the page table tree of the process represented by @walk->mm * within the virtual address range [@start, @end). During walking, we can do * some caller-specific works for each entry, by setting up pmd_entry(), * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these * callbacks, the associated entries/pages are just ignored. * The return values of these callbacks are commonly defined like below: * - 0 : succeeded to handle the current entry, and if you don't reach the * end address yet, continue to walk. * - >0 : succeeded to handle the current entry, and return to the caller * with caller specific value. * - <0 : failed to handle the current entry, and return to the caller * with error code. * * Before starting to walk page table, some callers want to check whether * they really want to walk over the current vma, typically by checking * its vm_flags. walk_page_test() and @walk->test_walk() are used for this * purpose. * * struct mm_walk keeps current values of some common data like vma and pmd, * which are useful for the access from callbacks. If you want to pass some * caller-specific data to callbacks, @walk->private should be helpful. * * Locking: * Callers of walk_page_range() and walk_page_vma() should hold * @walk->mm->mmap_sem, because these function traverse vma list and/or * access to vma's data. */
int walk_page_range(unsigned long start, unsigned long end, struct mm_walk *walk) { int err = 0; unsigned long next; struct vm_area_struct *vma; if (start >= end) return -EINVAL; if (!walk->mm) return -EINVAL; VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm); vma = find_vma(walk->mm, start); do { if (!vma) { /* after the last vma */ walk->vma = NULL; next = end; } else if (start < vma->vm_start) { /* outside vma */ walk->vma = NULL; next = min(end, vma->vm_start); } else { /* inside vma */ walk->vma = vma; next = min(end, vma->vm_end); vma = vma->vm_next; err = walk_page_test(start, next, walk); if (err > 0) { /* * positive return values are purely for * controlling the pagewalk, so should never * be passed to the callers. */ err = 0; continue; } if (err < 0) break; } if (walk->vma || walk->pte_hole) err = __walk_page_range(start, next, walk); if (err) break; } while (start = next, start < end); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Naoya Horiguchi11449.35%541.67%
Matt Mackall6528.14%18.33%
Cliff Wickman3012.99%18.33%
Dave Hansen135.63%18.33%
Sasha Levin52.16%18.33%
Shiraz Hashim20.87%18.33%
Chen LinX10.43%18.33%
David Sterba10.43%18.33%
Total231100.00%12100.00%


int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk) { int err; if (!walk->mm) return -EINVAL; VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); VM_BUG_ON(!vma); walk->vma = vma; err = walk_page_test(vma->vm_start, vma->vm_end, walk); if (err > 0) return 0; if (err < 0) return err; return __walk_page_range(vma->vm_start, vma->vm_end, walk); }

Contributors

PersonTokensPropCommitsCommitProp
Naoya Horiguchi102100.00%1100.00%
Total102100.00%1100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Naoya Horiguchi69046.15%733.33%
Matt Mackall45330.30%14.76%
Kirill A. Shutemov1459.70%314.29%
Matthew Wilcox845.62%14.76%
Dave Hansen442.94%29.52%
Cliff Wickman302.01%14.76%
Motohiro Kosaki261.74%14.76%
Johannes Weiner140.94%14.76%
Sasha Levin50.33%14.76%
Shiraz Hashim20.13%14.76%
David Sterba10.07%14.76%
Chen LinX10.07%14.76%
Total1495100.00%21100.00%
Directory: mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.