Contributors: 21
Author Tokens Token Proportion Commits Commit Proportion
JoonSoo Kim 150 32.47% 5 15.62%
Yu Zhao 73 15.80% 1 3.12%
Marek Szyprowski 54 11.69% 5 15.62%
Laura Abbott 44 9.52% 2 6.25%
van der Linden, Frank 43 9.31% 3 9.38%
Aslan Bakirov 31 6.71% 1 3.12%
Hari Bathini 10 2.16% 1 3.12%
Sasha Levin 10 2.16% 1 3.12%
David Hildenbrand 7 1.52% 1 3.12%
Weijie Yang 6 1.30% 1 3.12%
MinChan Kim 6 1.30% 1 3.12%
Thierry Reding 6 1.30% 1 3.12%
Pintu Kumar 5 1.08% 1 3.12%
Barry Song 4 0.87% 1 3.12%
Mike Travis 4 0.87% 1 3.12%
Mike Kravetz 3 0.65% 1 3.12%
Zi Yan 2 0.43% 1 3.12%
Motohiro Kosaki 1 0.22% 1 3.12%
Greg Kroah-Hartman 1 0.22% 1 3.12%
Anshuman Khandual 1 0.22% 1 3.12%
Lucas Stach 1 0.22% 1 3.12%
Total 462 32


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CMA_H__
#define __CMA_H__

#include <linux/init.h>
#include <linux/types.h>
#include <linux/numa.h>

#ifdef CONFIG_CMA_AREAS
#define MAX_CMA_AREAS	CONFIG_CMA_AREAS
#endif

#define CMA_MAX_NAME 64

/*
 *  the buddy -- especially pageblock merging and alloc_contig_range()
 * -- can deal with only some pageblocks of a higher-order page being
 *  MIGRATE_CMA, we can use pageblock_nr_pages.
 */
#define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages
#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)

struct cma;

extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
extern const char *cma_get_name(const struct cma *cma);

extern int __init cma_declare_contiguous_nid(phys_addr_t base,
			phys_addr_t size, phys_addr_t limit,
			phys_addr_t alignment, unsigned int order_per_bit,
			bool fixed, const char *name, struct cma **res_cma,
			int nid);
static inline int __init cma_declare_contiguous(phys_addr_t base,
			phys_addr_t size, phys_addr_t limit,
			phys_addr_t alignment, unsigned int order_per_bit,
			bool fixed, const char *name, struct cma **res_cma)
{
	return cma_declare_contiguous_nid(base, size, limit, alignment,
			order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
}
extern int __init cma_declare_contiguous_multi(phys_addr_t size,
			phys_addr_t align, unsigned int order_per_bit,
			const char *name, struct cma **res_cma, int nid);
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
					unsigned int order_per_bit,
					const char *name,
					struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
			      bool no_warn);
extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);

extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);

extern void cma_reserve_pages_on_error(struct cma *cma);

#ifdef CONFIG_CMA
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
bool cma_free_folio(struct cma *cma, const struct folio *folio);
bool cma_validate_zones(struct cma *cma);
#else
static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
{
	return NULL;
}

static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
{
	return false;
}
static inline bool cma_validate_zones(struct cma *cma)
{
	return false;
}
#endif

#endif