Contributors: 5
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Vincenzo Frascino |
109 |
72.19% |
2 |
33.33% |
Catalin Marinas |
27 |
17.88% |
1 |
16.67% |
Ashok Kumar |
9 |
5.96% |
1 |
16.67% |
Mark Salter |
4 |
2.65% |
1 |
16.67% |
Thomas Gleixner |
2 |
1.32% |
1 |
16.67% |
Total |
151 |
|
6 |
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/mm/copypage.c
*
* Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/bitops.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>
void copy_highpage(struct page *to, struct page *from)
{
struct page *kto = page_address(to);
struct page *kfrom = page_address(from);
copy_page(kto, kfrom);
if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
set_bit(PG_mte_tagged, &to->flags);
page_kasan_tag_reset(to);
/*
* We need smp_wmb() in between setting the flags and clearing the
* tags because if another thread reads page->flags and builds a
* tagged address out of it, there is an actual dependency to the
* memory access, but on the current thread we do not guarantee that
* the new page->flags are visible before the tags were updated.
*/
smp_wmb();
mte_copy_page_tags(kto, kfrom);
}
}
EXPORT_SYMBOL(copy_highpage);
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
copy_highpage(to, from);
flush_dcache_page(to);
}
EXPORT_SYMBOL_GPL(copy_user_highpage);