Contributors: 11
	  
        
          | Author | 
          Tokens | 
          Token Proportion | 
          Commits | 
          Commit Proportion | 
        
	  
	  
        
        
          | JoonSoo Kim | 
          194 | 
          45.75% | 
          4 | 
          19.05% | 
        
        
          | Vlastimil Babka | 
          114 | 
          26.89% | 
          4 | 
          19.05% | 
        
        
          | Hugh Dickins | 
          48 | 
          11.32% | 
          1 | 
          4.76% | 
        
        
          | Matthew Wilcox | 
          21 | 
          4.95% | 
          2 | 
          9.52% | 
        
        
          | Zi Yan | 
          17 | 
          4.01% | 
          2 | 
          9.52% | 
        
        
          | Andrew Morton | 
          13 | 
          3.07% | 
          3 | 
          14.29% | 
        
        
          | Christoph Lameter | 
          7 | 
          1.65% | 
          1 | 
          4.76% | 
        
        
          | Yixuan Cao | 
          5 | 
          1.18% | 
          1 | 
          4.76% | 
        
        
          | Mel Gorman | 
          2 | 
          0.47% | 
          1 | 
          4.76% | 
        
        
          | Linus Torvalds (pre-git) | 
          2 | 
          0.47% | 
          1 | 
          4.76% | 
        
        
          | Greg Kroah-Hartman | 
          1 | 
          0.24% | 
          1 | 
          4.76% | 
        
	  
	  
        
          | Total | 
          424 | 
           | 
          21 | 
           | 
	    
	  
    
 
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PAGE_OWNER_H
#define __LINUX_PAGE_OWNER_H
#include <linux/jump_label.h>
#ifdef CONFIG_PAGE_OWNER
extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
			unsigned short order, gfp_t gfp_mask);
extern void __split_page_owner(struct page *page, int old_order,
			int new_order);
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
					pg_data_t *pgdat, struct zone *zone);
static inline void reset_page_owner(struct page *page, unsigned short order)
{
	if (static_branch_unlikely(&page_owner_inited))
		__reset_page_owner(page, order);
}
static inline void set_page_owner(struct page *page,
			unsigned short order, gfp_t gfp_mask)
{
	if (static_branch_unlikely(&page_owner_inited))
		__set_page_owner(page, order, gfp_mask);
}
static inline void split_page_owner(struct page *page, int old_order,
			int new_order)
{
	if (static_branch_unlikely(&page_owner_inited))
		__split_page_owner(page, old_order, new_order);
}
static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
{
	if (static_branch_unlikely(&page_owner_inited))
		__folio_copy_owner(newfolio, old);
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
	if (static_branch_unlikely(&page_owner_inited))
		__set_page_owner_migrate_reason(page, reason);
}
static inline void dump_page_owner(const struct page *page)
{
	if (static_branch_unlikely(&page_owner_inited))
		__dump_page_owner(page);
}
#else
static inline void reset_page_owner(struct page *page, unsigned short order)
{
}
static inline void set_page_owner(struct page *page,
			unsigned short order, gfp_t gfp_mask)
{
}
static inline void split_page_owner(struct page *page, int old_order,
			int new_order)
{
}
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
{
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
}
static inline void dump_page_owner(const struct page *page)
{
}
#endif /* CONFIG_PAGE_OWNER */
#endif /* __LINUX_PAGE_OWNER_H */