Contributors: 6
Author Tokens Token Proportion Commits Commit Proportion
Ben Hutchings 208 90.83% 2 25.00%
David Decotigny 8 3.49% 1 12.50%
Amir Vadai 7 3.06% 1 12.50%
Eli Cohen 3 1.31% 2 25.00%
Thomas Gleixner 2 0.87% 1 12.50%
Gustavo A. R. Silva 1 0.44% 1 12.50%
Total 229 8


/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __LINUX_CPU_RMAP_H
#define __LINUX_CPU_RMAP_H

/*
 * cpu_rmap.c: CPU affinity reverse-map support
 * Copyright 2011 Solarflare Communications Inc.
 */

#include <linux/cpumask.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/kref.h>

/**
 * struct cpu_rmap - CPU affinity reverse-map
 * @refcount: kref for object
 * @size: Number of objects to be reverse-mapped
 * @obj: Pointer to array of object pointers
 * @near: For each CPU, the index and distance to the nearest object,
 *      based on affinity masks
 */
struct cpu_rmap {
	struct kref	refcount;
	u16		size;
	void		**obj;
	struct {
		u16	index;
		u16	dist;
	}		near[];
};
#define CPU_RMAP_DIST_INF 0xffff

extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
extern int cpu_rmap_put(struct cpu_rmap *rmap);

extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
			   const struct cpumask *affinity);

static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
{
	return rmap->near[cpu].index;
}

static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
{
	return rmap->obj[rmap->near[cpu].index];
}

/**
 * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
 * @size: Number of objects to be mapped
 *
 * Must be called in process context.
 */
static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
{
	return alloc_cpu_rmap(size, GFP_KERNEL);
}
extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);

int irq_cpu_rmap_remove(struct cpu_rmap *rmap, int irq);
extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);

#endif /* __LINUX_CPU_RMAP_H */