Contributors: 8
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Christophe Leroy |
109 |
58.92% |
2 |
18.18% |
Nicholas Miehlbradt |
33 |
17.84% |
1 |
9.09% |
Hari Bathini |
24 |
12.97% |
1 |
9.09% |
Aneesh Kumar K.V |
7 |
3.78% |
1 |
9.09% |
Linus Torvalds (pre-git) |
5 |
2.70% |
3 |
27.27% |
Linus Walleij |
4 |
2.16% |
1 |
9.09% |
Andrew Morton |
2 |
1.08% |
1 |
9.09% |
Paul Mackerras |
1 |
0.54% |
1 |
9.09% |
Total |
185 |
|
11 |
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* powerpc KFENCE support.
*
* Copyright (C) 2020 CS GROUP France
*/
#ifndef __ASM_POWERPC_KFENCE_H
#define __ASM_POWERPC_KFENCE_H
#include <linux/mm.h>
#include <asm/pgtable.h>
#ifdef CONFIG_PPC64_ELF_ABI_V1
#define ARCH_FUNC_PREFIX "."
#endif
#ifdef CONFIG_KFENCE
extern bool kfence_disabled;
static inline void disable_kfence(void)
{
kfence_disabled = true;
}
static inline bool arch_kfence_init_pool(void)
{
return !kfence_disabled;
}
#endif
#ifdef CONFIG_PPC64
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
struct page *page = virt_to_page((void *)addr);
__kernel_map_pages(page, 1, !protect);
return true;
}
#else
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *kpte = virt_to_kpte(addr);
if (protect) {
pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
} else {
pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
}
return true;
}
#endif
#endif /* __ASM_POWERPC_KFENCE_H */