Contributors: 18
Author Tokens Token Proportion Commits Commit Proportion
JoonSoo Kim 136 44.16% 5 18.52%
Marek Szyprowski 43 13.96% 5 18.52%
Laura Abbott 39 12.66% 2 7.41%
Aslan Bakirov 28 9.09% 1 3.70%
Hari Bathini 10 3.25% 1 3.70%
Sasha Levin 8 2.60% 1 3.70%
David Hildenbrand 7 2.27% 1 3.70%
Weijie Yang 6 1.95% 1 3.70%
Thierry Reding 6 1.95% 1 3.70%
Pintu Kumar 5 1.62% 1 3.70%
Barry Song 4 1.30% 1 3.70%
MinChan Kim 4 1.30% 1 3.70%
Mike Travis 4 1.30% 1 3.70%
Mike Kravetz 3 0.97% 1 3.70%
Zi Yan 2 0.65% 1 3.70%
Motohiro Kosaki 1 0.32% 1 3.70%
Greg Kroah-Hartman 1 0.32% 1 3.70%
Lucas Stach 1 0.32% 1 3.70%
Total 308 27


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CMA_H__
#define __CMA_H__

#include <linux/init.h>
#include <linux/types.h>
#include <linux/numa.h>

/*
 * There is always at least global CMA area and a few optional
 * areas configured in kernel .config.
 */
#ifdef CONFIG_CMA_AREAS
#define MAX_CMA_AREAS	(1 + CONFIG_CMA_AREAS)
#endif

#define CMA_MAX_NAME 64

/*
 *  the buddy -- especially pageblock merging and alloc_contig_range()
 * -- can deal with only some pageblocks of a higher-order page being
 *  MIGRATE_CMA, we can use pageblock_nr_pages.
 */
#define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages
#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)

struct cma;

extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
extern const char *cma_get_name(const struct cma *cma);

extern int __init cma_declare_contiguous_nid(phys_addr_t base,
			phys_addr_t size, phys_addr_t limit,
			phys_addr_t alignment, unsigned int order_per_bit,
			bool fixed, const char *name, struct cma **res_cma,
			int nid);
static inline int __init cma_declare_contiguous(phys_addr_t base,
			phys_addr_t size, phys_addr_t limit,
			phys_addr_t alignment, unsigned int order_per_bit,
			bool fixed, const char *name, struct cma **res_cma)
{
	return cma_declare_contiguous_nid(base, size, limit, alignment,
			order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
}
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
					unsigned int order_per_bit,
					const char *name,
					struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
			      bool no_warn);
extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);

extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);

extern void cma_reserve_pages_on_error(struct cma *cma);
#endif