cregit-Linux how code gets into the kernel

Release 4.8 mm/huge_memory.c

Directory: mm
/*
 *  Copyright (C) 2009  Red Hat, Inc.
 *
 *  This work is licensed under the terms of the GNU GPL, version 2. See
 *  the COPYING file in the top-level directory.
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/mmu_notifier.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/shrinker.h>
#include <linux/mm_inline.h>
#include <linux/swapops.h>
#include <linux/dax.h>
#include <linux/khugepaged.h>
#include <linux/freezer.h>
#include <linux/pfn_t.h>
#include <linux/mman.h>
#include <linux/memremap.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include <linux/migrate.h>
#include <linux/hashtable.h>
#include <linux/userfaultfd_k.h>
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>

#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include "internal.h"

/*
 * By default transparent hugepage support is disabled in order that avoid
 * to risk increase the memory footprint of applications without a guaranteed
 * benefit. When transparent hugepage support is enabled, is for all mappings,
 * and khugepaged scans all mappings.
 * Defrag is invoked by khugepaged hugepage allocations and by page faults
 * for all hugepage allocations.
 */

unsigned long transparent_hugepage_flags __read_mostly =
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
#endif
	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
	(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);


static struct shrinker deferred_split_shrinker;


static atomic_t huge_zero_refcount;

struct page *huge_zero_page __read_mostly;


struct page *get_huge_zero_page(void) { struct page *zero_page; retry: if (likely(atomic_inc_not_zero(&huge_zero_refcount))) return READ_ONCE(huge_zero_page); zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, HPAGE_PMD_ORDER); if (!zero_page) { count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); return NULL; } count_vm_event(THP_ZERO_PAGE_ALLOC); preempt_disable(); if (cmpxchg(&huge_zero_page, NULL, zero_page)) { preempt_enable(); __free_pages(zero_page, compound_order(zero_page)); goto retry; } /* We take additional reference here. It will be put back by shrinker */ atomic_set(&huge_zero_refcount, 2); preempt_enable(); return READ_ONCE(huge_zero_page); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov9982.50%660.00%
andrea arcangeliandrea arcangeli1411.67%220.00%
yu zhaoyu zhao65.00%110.00%
jason lowjason low10.83%110.00%
Total120100.00%10100.00%


void put_huge_zero_page(void) { /* * Counter should never go to zero here. Only shrinker can put * last reference. */ BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov17100.00%2100.00%
Total17100.00%2100.00%


static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, struct shrink_control *sc) { /* we can free zero page only if last reference remains */ return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov2787.10%266.67%
glauber costaglauber costa412.90%133.33%
Total31100.00%3100.00%


static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, struct shrink_control *sc) { if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { struct page *zero_page = xchg(&huge_zero_page, NULL); BUG_ON(zero_page == NULL); __free_pages(zero_page, compound_order(zero_page)); return HPAGE_PMD_NR; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov4463.77%360.00%
glauber costaglauber costa1927.54%120.00%
yu zhaoyu zhao68.70%120.00%
Total69100.00%5100.00%

static struct shrinker huge_zero_page_shrinker = { .count_objects = shrink_huge_zero_page_count, .scan_objects = shrink_huge_zero_page_scan, .seeks = DEFAULT_SEEKS, }; #ifdef CONFIG_SYSFS
static ssize_t triple_flag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, enum transparent_hugepage_flag enabled, enum transparent_hugepage_flag deferred, enum transparent_hugepage_flag req_madv) { if (!memcmp("defer", buf, min(sizeof("defer")-1, count))) { if (enabled == deferred) return -EINVAL; clear_bit(enabled, &transparent_hugepage_flags); clear_bit(req_madv, &transparent_hugepage_flags); set_bit(deferred, &transparent_hugepage_flags); } else if (!memcmp("always", buf, min(sizeof("always")-1, count))) { clear_bit(deferred, &transparent_hugepage_flags); clear_bit(req_madv, &transparent_hugepage_flags); set_bit(enabled, &transparent_hugepage_flags); } else if (!memcmp("madvise", buf, min(sizeof("madvise")-1, count))) { clear_bit(enabled, &transparent_hugepage_flags); clear_bit(deferred, &transparent_hugepage_flags); set_bit(req_madv, &transparent_hugepage_flags); } else if (!memcmp("never", buf, min(sizeof("never")-1, count))) { clear_bit(enabled, &transparent_hugepage_flags); clear_bit(req_madv, &transparent_hugepage_flags); clear_bit(deferred, &transparent_hugepage_flags); } else return -EINVAL; return count; }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli17871.49%266.67%
mel gormanmel gorman7128.51%133.33%
Total249100.00%3100.00%


static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) return sprintf(buf, "[always] madvise never\n"); else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) return sprintf(buf, "always [madvise] never\n"); else return sprintf(buf, "always madvise [never]\n"); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman3756.06%150.00%
andrea arcangeliandrea arcangeli2943.94%150.00%
Total66100.00%2100.00%


static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { ssize_t ret; ret = triple_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); if (ret > 0) { int err = start_stop_khugepaged(); if (err) ret = err; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli6591.55%240.00%
mel gormanmel gorman34.23%120.00%
kirill a. shutemovkirill a. shutemov34.23%240.00%
Total71100.00%5100.00%

static struct kobj_attribute enabled_attr = __ATTR(enabled, 0644, enabled_show, enabled_store);
ssize_t single_hugepage_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag flag) { return sprintf(buf, "%d\n", !!test_bit(flag, &transparent_hugepage_flags)); }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli3175.61%133.33%
ben hutchingsben hutchings921.95%133.33%
kirill a. shutemovkirill a. shutemov12.44%133.33%
Total41100.00%3100.00%


ssize_t single_hugepage_flag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, enum transparent_hugepage_flag flag) { unsigned long value; int ret; ret = kstrtoul(buf, 10, &value); if (ret < 0) return ret; if (value > 1) return -EINVAL; if (value) set_bit(flag, &transparent_hugepage_flags); else clear_bit(flag, &transparent_hugepage_flags); return count; }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli5157.30%133.33%
ben hutchingsben hutchings3741.57%133.33%
kirill a. shutemovkirill a. shutemov11.12%133.33%
Total89100.00%3100.00%

/* * Currently defrag only disables __GFP_NOWAIT for allocation. A blind * __GFP_REPEAT is too aggressive, it's never worth swapping tons of * memory just to allocate one more hugepage. */
static ssize_t defrag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) return sprintf(buf, "[always] defer madvise never\n"); if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) return sprintf(buf, "always [defer] madvise never\n"); else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) return sprintf(buf, "always defer [madvise] never\n"); else return sprintf(buf, "always defer madvise [never]\n"); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman5565.48%150.00%
andrea arcangeliandrea arcangeli2934.52%150.00%
Total84100.00%2100.00%


static ssize_t defrag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return triple_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli3890.48%150.00%
mel gormanmel gorman49.52%150.00%
Total42100.00%2100.00%

static struct kobj_attribute defrag_attr = __ATTR(defrag, 0644, defrag_show, defrag_store);
static ssize_t use_zero_page_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return single_hugepage_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov32100.00%2100.00%
Total32100.00%2100.00%


static ssize_t use_zero_page_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return single_hugepage_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov38100.00%2100.00%
Total38100.00%2100.00%

static struct kobj_attribute use_zero_page_attr = __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); #ifdef CONFIG_DEBUG_VM
static ssize_t debug_cow_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return single_hugepage_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli3196.88%150.00%
kirill a. shutemovkirill a. shutemov13.12%150.00%
Total32100.00%2100.00%


static ssize_t debug_cow_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return single_hugepage_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli3797.37%150.00%
kirill a. shutemovkirill a. shutemov12.63%150.00%
Total38100.00%2100.00%

static struct kobj_attribute debug_cow_attr = __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); #endif /* CONFIG_DEBUG_VM */ static struct attribute *hugepage_attr[] = { &enabled_attr.attr, &defrag_attr.attr, &use_zero_page_attr.attr, #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &shmem_enabled_attr.attr, #endif #ifdef CONFIG_DEBUG_VM &debug_cow_attr.attr, #endif NULL, }; static struct attribute_group hugepage_attr_group = { .attrs = hugepage_attr, };
static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) { int err; *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); if (unlikely(!*hugepage_kobj)) { pr_err("failed to create transparent hugepage kobject\n"); return -ENOMEM; } err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); if (err) { pr_err("failed to register transparent hugepage group\n"); goto delete_obj; } err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); if (err) { pr_err("failed to register transparent hugepage group\n"); goto remove_hp_group; } return 0; remove_hp_group: sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); delete_obj: kobject_put(*hugepage_kobj); return err; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov7057.85%125.00%
andrea arcangeliandrea arcangeli5041.32%250.00%
david rientjesdavid rientjes10.83%125.00%
Total121100.00%4100.00%


static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) { sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); kobject_put(hugepage_kobj); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov2781.82%133.33%
andrea arcangeliandrea arcangeli618.18%266.67%
Total33100.00%3100.00%

#else
static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli1168.75%266.67%
kirill a. shutemovkirill a. shutemov531.25%133.33%
Total16100.00%3100.00%


static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) { }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov654.55%133.33%
andrea arcangeliandrea arcangeli545.45%266.67%
Total11100.00%3100.00%

#endif /* CONFIG_SYSFS */
static int __init hugepage_init(void) { int err; struct kobject *hugepage_kobj; if (!has_transparent_hugepage()) { transparent_hugepage_flags = 0; return -EINVAL; } /* * hugepages can't be allocated by the buddy allocator */ MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); /* * we use page->mapping and page->index in second tail page * as list_head: assuming THP order >= 2 */ MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); err = hugepage_init_sysfs(&hugepage_kobj); if (err) goto err_sysfs; err = khugepaged_init(); if (err) goto err_slab; err = register_shrinker(&huge_zero_page_shrinker); if (err) goto err_hzp_shrinker; err = register_shrinker(&deferred_split_shrinker); if (err) goto err_split_shrinker; /* * By default disable transparent hugepages on smaller systems, * where the extra memory used could hurt more than TLB overhead * is likely to save. The admin can still enable it through /sys. */ if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { transparent_hugepage_flags = 0; return 0; } err = start_stop_khugepaged(); if (err) goto err_khugepaged; return 0; err_khugepaged: unregister_shrinker(&deferred_split_shrinker); err_split_shrinker: unregister_shrinker(&huge_zero_page_shrinker); err_hzp_shrinker: khugepaged_destroy(); err_slab: hugepage_exit_sysfs(hugepage_kobj); err_sysfs: return err; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov15184.83%133.33%
andrea arcangeliandrea arcangeli2715.17%266.67%
Total178100.00%3100.00%

subsys_initcall(hugepage_init);
static int __init setup_transparent_hugepage(char *str) { int ret = 0; if (!str) goto out; if (!strcmp(str, "always")) { set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } else if (!strcmp(str, "madvise")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } else if (!strcmp(str, "never")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } out: if (!ret) pr_warn("transparent_hugepage= cannot parse, ignored\n"); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov9770.80%125.00%
andrea arcangeliandrea arcangeli4029.20%375.00%
Total137100.00%4100.00%

__setup("transparent_hugepage=", setup_transparent_hugepage);
pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) { if (likely(vma->vm_flags & VM_WRITE)) pmd = pmd_mkwrite(pmd); return pmd; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov3088.24%150.00%
ebru akagunduzebru akagunduz411.76%150.00%
Total34100.00%2100.00%


static inline struct list_head *page_deferred_list(struct page *page) { /* * ->lru in the tail pages is occupied by compound_head. * Let's use ->mapping + ->index in the second tail page as list_head. */ return (struct list_head *)&page[2].mapping; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov2586.21%150.00%
ebru akagunduzebru akagunduz413.79%150.00%
Total29100.00%2100.00%


void prep_transhuge_page(struct page *page) { /* * we use page->mapping and page->indexlru in second tail page * as list_head: assuming THP order >= 2 */ INIT_LIST_HEAD(page_deferred_list(page)); set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov2284.62%150.00%
ebru akagunduzebru akagunduz415.38%150.00%
Total26100.00%2100.00%


static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, gfp_t gfp) { struct vm_area_struct *vma = fe->vma; struct mem_cgroup *memcg; pgtable_t pgtable; unsigned long haddr = fe->address & HPAGE_PMD_MASK; VM_BUG_ON_PAGE(!PageCompound(page), page); if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) { put_page(page); count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } pgtable = pte_alloc_one(vma->vm_mm, haddr); if (unlikely(!pgtable)) { mem_cgroup_cancel_charge(page, memcg, true); put_page(page); return VM_FAULT_OOM; } clear_huge_page(page, haddr, HPAGE_PMD_NR); /* * The memory barrier inside __SetPageUptodate makes sure that * clear_huge_page writes become visible before the set_pmd_at() * write. */ __SetPageUptodate(page); fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); if (unlikely(!pmd_none(*fe->pmd))) { spin_unlock(fe->ptl); mem_cgroup_cancel_charge(page, memcg, true); put_page(page); pte_free(vma->vm_mm, pgtable); } else { pmd_t entry; /* Deliver the page fault to userland */ if (userfaultfd_missing(vma)) { int ret; spin_unlock(fe->ptl); mem_cgroup_cancel_charge(page, memcg, true); put_page(page); pte_free(vma->vm_mm, pgtable); ret = handle_userfault(fe, VM_UFFD_MISSING); VM_BUG_ON(ret & VM_FAULT_FALLBACK); return ret; } entry = mk_huge_pmd(page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); page_add_new_anon_rmap(page, vma, haddr, true); mem_cgroup_commit_charge(page, memcg, false, true); lru_cache_add_active_or_unevictable(page, vma); pgtable_trans_huge_deposit(vma->vm_mm, fe->pmd, pgtable); set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); atomic_long_inc(&vma->vm_mm->nr_ptes); spin_unlock(fe->ptl); count_vm_event(THP_FAULT_ALLOC); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov29275.26%562.50%
andrea arcangeliandrea arcangeli359.02%112.50%
ebru akagunduzebru akagunduz338.51%112.50%
li shaohuali shaohua287.22%112.50%
Total388100.00%8100.00%

/* * If THP defrag is set to always then directly reclaim/compact as necessary * If set to defer then do only background reclaim/compact and defer to khugepaged * If set to madvise and the VMA is flagged then directly reclaim/compact * When direct reclaim/compact is allowed, don't retry except for flagged VMA's */
static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) { bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags) && vma_madvised) return GFP_TRANSHUGE; else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); return GFP_TRANSHUGE_LIGHT; }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli3441.98%133.33%
vlastimil babkavlastimil babka2935.80%133.33%
kirill a. shutemovkirill a. shutemov1822.22%133.33%
Total81100.00%3100.00%

/* Caller must hold page table lock. */
static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) { pmd_t entry; if (!pmd_none(*pmd)) return false; entry = mk_pmd(zero_page, vma->vm_page_prot); entry = pmd_mkhuge(entry); if (pgtable) pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, haddr, pmd, entry); atomic_long_inc(&mm->nr_ptes); return true; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov7777.00%150.00%
andrea arcangeliandrea arcangeli2323.00%150.00%
Total100100.00%2100.00%


int do_huge_pmd_anonymous_page(struct fault_env *fe) { struct vm_area_struct *vma = fe->vma; gfp_t gfp; struct page *page; unsigned long haddr = fe->address & HPAGE_PMD_MASK; if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) return VM_FAULT_FALLBACK; if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; if (unlikely(khugepaged_enter(vma, vma->vm_flags))) return VM_FAULT_OOM; if (!(fe->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm) && transparent_hugepage_use_zero_page()) { pgtable_t pgtable; struct page *zero_page; bool set; int ret; pgtable = pte_alloc_one(vma->vm_mm, haddr); if (unlikely(!pgtable)) return VM_FAULT_OOM; zero_page = get_huge_zero_page(); if (unlikely(!zero_page)) { pte_free(vma->vm_mm, pgtable); count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); ret = 0; set = false; if (pmd_none(*fe->pmd)) { if (userfaultfd_missing(vma)) { spin_unlock(fe->ptl); ret = handle_userfault(fe, VM_UFFD_MISSING); VM_BUG_ON(ret & VM_FAULT_FALLBACK); } else { set_huge_zero_page(pgtable, vma->vm_mm, vma, haddr, fe->pmd, zero_page); spin_unlock(fe->ptl); set = true; } } else spin_unlock(fe->ptl); if (!set) { pte_free(vma->vm_mm, pgtable); put_huge_zero_page(); } return ret; } gfp = alloc_hugepage_direct_gfpmask(vma); page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); if (unlikely(!page)) { count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } prep_transhuge_page(page); return __do_huge_pmd_anonymous_page(fe, page, gfp); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov25269.81%555.56%
andrea arcangeliandrea arcangeli9526.32%333.33%
johannes weinerjohannes weiner143.88%111.11%
Total361100.00%9100.00%


static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write) { struct mm_struct *mm = vma->