Contributors: 20
Author Tokens Token Proportion Commits Commit Proportion
JoonSoo Kim 141 40.17% 5 15.62%
Charan Teja Reddy 34 9.69% 1 3.12%
Vlastimil Babka 29 8.26% 3 9.38%
Li Zhe 28 7.98% 1 3.12%
KeMeng Shi 26 7.41% 1 3.12%
Vladimir Davydov 19 5.41% 1 3.12%
Andrew Morton 14 3.99% 1 3.12%
Linus Torvalds (pre-git) 11 3.13% 5 15.62%
Zhenhua HUANG 10 2.85% 1 3.12%
Len Brown 7 1.99% 2 6.25%
Kamezawa Hiroyuki 6 1.71% 1 3.12%
Pavel Tatashin 5 1.42% 1 3.12%
Arjan van de Ven 5 1.42% 1 3.12%
Al Viro 4 1.14% 1 3.12%
Andy Whitcroft 3 0.85% 1 3.12%
Rusty Russell 3 0.85% 1 3.12%
Mel Gorman 3 0.85% 2 6.25%
Greg Kroah-Hartman 1 0.28% 1 3.12%
SeongJae Park 1 0.28% 1 3.12%
Rik Van Riel 1 0.28% 1 3.12%
Total 351 32


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PAGE_EXT_H
#define __LINUX_PAGE_EXT_H

#include <linux/types.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>

struct pglist_data;

#ifdef CONFIG_PAGE_EXTENSION
/**
 * struct page_ext_operations - per page_ext client operations
 * @offset: Offset to the client's data within page_ext. Offset is returned to
 *          the client by page_ext_init.
 * @size: The size of the client data within page_ext.
 * @need: Function that returns true if client requires page_ext.
 * @init: (optional) Called to initialize client once page_exts are allocated.
 * @need_shared_flags: True when client is using shared page_ext->flags
 *                     field.
 *
 * Each Page Extension client must define page_ext_operations in
 * page_ext_ops array.
 */
struct page_ext_operations {
	size_t offset;
	size_t size;
	bool (*need)(void);
	void (*init)(void);
	bool need_shared_flags;
};

/*
 * The page_ext_flags users must set need_shared_flags to true.
 */
enum page_ext_flags {
	PAGE_EXT_OWNER,
	PAGE_EXT_OWNER_ALLOCATED,
#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
	PAGE_EXT_YOUNG,
	PAGE_EXT_IDLE,
#endif
};

/*
 * Page Extension can be considered as an extended mem_map.
 * A page_ext page is associated with every page descriptor. The
 * page_ext helps us add more information about the page.
 * All page_ext are allocated at boot or memory hotplug event,
 * then the page_ext for pfn always exists.
 */
struct page_ext {
	unsigned long flags;
};

extern bool early_page_ext;
extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);

static inline bool early_page_ext_enabled(void)
{
	return early_page_ext;
}

#ifdef CONFIG_SPARSEMEM
static inline void page_ext_init_flatmem(void)
{
}
extern void page_ext_init(void);
static inline void page_ext_init_flatmem_late(void)
{
}
#else
extern void page_ext_init_flatmem(void);
extern void page_ext_init_flatmem_late(void);
static inline void page_ext_init(void)
{
}
#endif

extern struct page_ext *page_ext_get(struct page *page);
extern void page_ext_put(struct page_ext *page_ext);

static inline void *page_ext_data(struct page_ext *page_ext,
				  struct page_ext_operations *ops)
{
	return (void *)(page_ext) + ops->offset;
}

static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
	void *next = curr;
	next += page_ext_size;
	return next;
}

#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;

static inline bool early_page_ext_enabled(void)
{
	return false;
}

static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
}

static inline void page_ext_init(void)
{
}

static inline void page_ext_init_flatmem_late(void)
{
}

static inline void page_ext_init_flatmem(void)
{
}

static inline struct page_ext *page_ext_get(struct page *page)
{
	return NULL;
}

static inline void page_ext_put(struct page_ext *page_ext)
{
}
#endif /* CONFIG_PAGE_EXTENSION */
#endif /* __LINUX_PAGE_EXT_H */