Contributors: 20
Author Tokens Token Proportion Commits Commit Proportion
Pavel Tatashin 892 72.46% 4 14.81%
Peter Xu 153 12.43% 2 7.41%
Linus Torvalds (pre-git) 45 3.66% 4 14.81%
Charan Teja Reddy 24 1.95% 1 3.70%
Ruihan Li 24 1.95% 1 3.70%
Matthew Wilcox 18 1.46% 1 3.70%
David Hildenbrand 12 0.97% 1 3.70%
Kamezawa Hiroyuki 10 0.81% 1 3.70%
Hugh Dickins 9 0.73% 1 3.70%
Miaohe Lin 7 0.57% 1 3.70%
Stanislaw Gruszka 6 0.49% 1 3.70%
Ryan Roberts 6 0.49% 1 3.70%
KeMeng Shi 5 0.41% 1 3.70%
Michal Nazarewicz 4 0.32% 1 3.70%
Alexander Potapenko 4 0.32% 1 3.70%
tongtiangen 4 0.32% 1 3.70%
Christophe Jaillet 4 0.32% 1 3.70%
Chin En Lin 2 0.16% 1 3.70%
David Alan Gilbert 1 0.08% 1 3.70%
Greg Kroah-Hartman 1 0.08% 1 3.70%
Total 1231 27


// SPDX-License-Identifier: GPL-2.0

/*
 * Copyright (c) 2021, Google LLC.
 * Pasha Tatashin <pasha.tatashin@soleen.com>
 */
#include <linux/kstrtox.h>
#include <linux/mm.h>
#include <linux/page_table_check.h>
#include <linux/swap.h>
#include <linux/swapops.h>

#undef pr_fmt
#define pr_fmt(fmt)	"page_table_check: " fmt

struct page_table_check {
	atomic_t anon_map_count;
	atomic_t file_map_count;
};

static bool __page_table_check_enabled __initdata =
				IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);

DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
EXPORT_SYMBOL(page_table_check_disabled);

static int __init early_page_table_check_param(char *buf)
{
	return kstrtobool(buf, &__page_table_check_enabled);
}

early_param("page_table_check", early_page_table_check_param);

static bool __init need_page_table_check(void)
{
	return __page_table_check_enabled;
}

static void __init init_page_table_check(void)
{
	if (!__page_table_check_enabled)
		return;
	static_branch_disable(&page_table_check_disabled);
}

struct page_ext_operations page_table_check_ops = {
	.size = sizeof(struct page_table_check),
	.need = need_page_table_check,
	.init = init_page_table_check,
	.need_shared_flags = false,
};

static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
{
	BUG_ON(!page_ext);
	return page_ext_data(page_ext, &page_table_check_ops);
}

/*
 * An entry is removed from the page table, decrement the counters for that page
 * verify that it is of correct type and counters do not become negative.
 */
static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
{
	struct page_ext *page_ext;
	struct page *page;
	unsigned long i;
	bool anon;

	if (!pfn_valid(pfn))
		return;

	page = pfn_to_page(pfn);
	page_ext = page_ext_get(page);

	if (!page_ext)
		return;

	BUG_ON(PageSlab(page));
	anon = PageAnon(page);

	for (i = 0; i < pgcnt; i++) {
		struct page_table_check *ptc = get_page_table_check(page_ext);

		if (anon) {
			BUG_ON(atomic_read(&ptc->file_map_count));
			BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
		} else {
			BUG_ON(atomic_read(&ptc->anon_map_count));
			BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
		}
		page_ext = page_ext_next(page_ext);
	}
	page_ext_put(page_ext);
}

/*
 * A new entry is added to the page table, increment the counters for that page
 * verify that it is of correct type and is not being mapped with a different
 * type to a different process.
 */
static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
				 bool rw)
{
	struct page_ext *page_ext;
	struct page *page;
	unsigned long i;
	bool anon;

	if (!pfn_valid(pfn))
		return;

	page = pfn_to_page(pfn);
	page_ext = page_ext_get(page);

	if (!page_ext)
		return;

	BUG_ON(PageSlab(page));
	anon = PageAnon(page);

	for (i = 0; i < pgcnt; i++) {
		struct page_table_check *ptc = get_page_table_check(page_ext);

		if (anon) {
			BUG_ON(atomic_read(&ptc->file_map_count));
			BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
		} else {
			BUG_ON(atomic_read(&ptc->anon_map_count));
			BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
		}
		page_ext = page_ext_next(page_ext);
	}
	page_ext_put(page_ext);
}

/*
 * page is on free list, or is being allocated, verify that counters are zeroes
 * crash if they are not.
 */
void __page_table_check_zero(struct page *page, unsigned int order)
{
	struct page_ext *page_ext;
	unsigned long i;

	BUG_ON(PageSlab(page));

	page_ext = page_ext_get(page);

	if (!page_ext)
		return;

	for (i = 0; i < (1ul << order); i++) {
		struct page_table_check *ptc = get_page_table_check(page_ext);

		BUG_ON(atomic_read(&ptc->anon_map_count));
		BUG_ON(atomic_read(&ptc->file_map_count));
		page_ext = page_ext_next(page_ext);
	}
	page_ext_put(page_ext);
}

void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
{
	if (&init_mm == mm)
		return;

	if (pte_user_accessible_page(pte)) {
		page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT);
	}
}
EXPORT_SYMBOL(__page_table_check_pte_clear);

void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
{
	if (&init_mm == mm)
		return;

	if (pmd_user_accessible_page(pmd)) {
		page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT);
	}
}
EXPORT_SYMBOL(__page_table_check_pmd_clear);

void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
{
	if (&init_mm == mm)
		return;

	if (pud_user_accessible_page(pud)) {
		page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT);
	}
}
EXPORT_SYMBOL(__page_table_check_pud_clear);

/* Whether the swap entry cached writable information */
static inline bool swap_cached_writable(swp_entry_t entry)
{
	return is_writable_device_exclusive_entry(entry) ||
	    is_writable_device_private_entry(entry) ||
	    is_writable_migration_entry(entry);
}

static inline void page_table_check_pte_flags(pte_t pte)
{
	if (pte_present(pte) && pte_uffd_wp(pte))
		WARN_ON_ONCE(pte_write(pte));
	else if (is_swap_pte(pte) && pte_swp_uffd_wp(pte))
		WARN_ON_ONCE(swap_cached_writable(pte_to_swp_entry(pte)));
}

void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
		unsigned int nr)
{
	unsigned int i;

	if (&init_mm == mm)
		return;

	page_table_check_pte_flags(pte);

	for (i = 0; i < nr; i++)
		__page_table_check_pte_clear(mm, ptep_get(ptep + i));
	if (pte_user_accessible_page(pte))
		page_table_check_set(pte_pfn(pte), nr, pte_write(pte));
}
EXPORT_SYMBOL(__page_table_check_ptes_set);

static inline void page_table_check_pmd_flags(pmd_t pmd)
{
	if (pmd_present(pmd) && pmd_uffd_wp(pmd))
		WARN_ON_ONCE(pmd_write(pmd));
	else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd))
		WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd)));
}

void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
{
	if (&init_mm == mm)
		return;

	page_table_check_pmd_flags(pmd);

	__page_table_check_pmd_clear(mm, *pmdp);
	if (pmd_user_accessible_page(pmd)) {
		page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
				     pmd_write(pmd));
	}
}
EXPORT_SYMBOL(__page_table_check_pmd_set);

void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud)
{
	if (&init_mm == mm)
		return;

	__page_table_check_pud_clear(mm, *pudp);
	if (pud_user_accessible_page(pud)) {
		page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT,
				     pud_write(pud));
	}
}
EXPORT_SYMBOL(__page_table_check_pud_set);

void __page_table_check_pte_clear_range(struct mm_struct *mm,
					unsigned long addr,
					pmd_t pmd)
{
	if (&init_mm == mm)
		return;

	if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
		pte_t *ptep = pte_offset_map(&pmd, addr);
		unsigned long i;

		if (WARN_ON(!ptep))
			return;
		for (i = 0; i < PTRS_PER_PTE; i++) {
			__page_table_check_pte_clear(mm, ptep_get(ptep));
			addr += PAGE_SIZE;
			ptep++;
		}
		pte_unmap(ptep - PTRS_PER_PTE);
	}
}