Contributors: 10
Author Tokens Token Proportion Commits Commit Proportion
Vincenzo Frascino 92 33.45% 1 6.67%
Yang Shi 85 30.91% 1 6.67%
Catalin Marinas 71 25.82% 5 33.33%
Ashok Kumar 6 2.18% 1 6.67%
Suzuki K. Poulose 5 1.82% 1 6.67%
Peter Collingbourne 5 1.82% 2 13.33%
Mark Salter 4 1.45% 1 6.67%
Marc Zyngier 3 1.09% 1 6.67%
tongtiangen 2 0.73% 1 6.67%
Thomas Gleixner 2 0.73% 1 6.67%
Total 275 15

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Based on arch/arm/mm/copypage.c
 *
 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
 * Copyright (C) 2012 ARM Ltd.
 */

#include <linux/bitops.h>
#include <linux/mm.h>

#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>

void copy_highpage(struct page *to, struct page *from)
{
	void *kto = page_address(to);
	void *kfrom = page_address(from);
	struct folio *src = page_folio(from);
	struct folio *dst = page_folio(to);
	unsigned int i, nr_pages;

	copy_page(kto, kfrom);

	if (kasan_hw_tags_enabled())
		page_kasan_tag_reset(to);

	if (!system_supports_mte())
		return;

	if (folio_test_hugetlb(src)) {
		if (!folio_test_hugetlb_mte_tagged(src) ||
		    from != folio_page(src, 0))
			return;

		WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));

		/*
		 * Populate tags for all subpages.
		 *
		 * Don't assume the first page is head page since
		 * huge page copy may start from any subpage.
		 */
		nr_pages = folio_nr_pages(src);
		for (i = 0; i < nr_pages; i++) {
			kfrom = page_address(folio_page(src, i));
			kto = page_address(folio_page(dst, i));
			mte_copy_page_tags(kto, kfrom);
		}
		folio_set_hugetlb_mte_tagged(dst);
	} else if (page_mte_tagged(from)) {
		/* It's a new page, shouldn't have been tagged yet */
		WARN_ON_ONCE(!try_page_mte_tagging(to));

		mte_copy_page_tags(kto, kfrom);
		set_page_mte_tagged(to);
	}
}
EXPORT_SYMBOL(copy_highpage);

void copy_user_highpage(struct page *to, struct page *from,
			unsigned long vaddr, struct vm_area_struct *vma)
{
	copy_highpage(to, from);
	flush_dcache_page(to);
}
EXPORT_SYMBOL_GPL(copy_user_highpage);