Contributors: 36
| Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
| David Hildenbrand |
245 |
36.68% |
16 |
25.00% |
| Dave Hansen |
146 |
21.86% |
3 |
4.69% |
| Yasunori Goto |
48 |
7.19% |
4 |
6.25% |
| Gregory Price |
31 |
4.64% |
1 |
1.56% |
| Israel Batista |
27 |
4.04% |
3 |
4.69% |
| Nathan Fontenot |
20 |
2.99% |
2 |
3.12% |
| Liu Shixin |
19 |
2.84% |
1 |
1.56% |
| Naoya Horiguchi |
16 |
2.40% |
1 |
1.56% |
| Gary Hade |
12 |
1.80% |
1 |
1.56% |
| Donet Tom |
11 |
1.65% |
1 |
1.56% |
| Aneesh Kumar K.V |
8 |
1.20% |
1 |
1.56% |
| Heiko Carstens |
8 |
1.20% |
1 |
1.56% |
| Mike Travis |
8 |
1.20% |
1 |
1.56% |
| Nadia Derbey |
7 |
1.05% |
2 |
3.12% |
| Dave Jiang |
7 |
1.05% |
3 |
4.69% |
| Rashika Kheria |
7 |
1.05% |
1 |
1.56% |
| Mathieu Desnoyers |
6 |
0.90% |
1 |
1.56% |
| Wen Congyang |
5 |
0.75% |
1 |
1.56% |
| Seth Jennings |
4 |
0.60% |
1 |
1.56% |
| Hannes Reinecke |
4 |
0.60% |
2 |
3.12% |
| 권오훈 |
3 |
0.45% |
1 |
1.56% |
| Linus Torvalds |
3 |
0.45% |
1 |
1.56% |
| Tim Schmielau |
3 |
0.45% |
1 |
1.56% |
| Rafael J. Wysocki |
2 |
0.30% |
1 |
1.56% |
| Daniel Kiper |
2 |
0.30% |
1 |
1.56% |
| Oscar Salvador |
2 |
0.30% |
1 |
1.56% |
| Wei Yang |
2 |
0.30% |
1 |
1.56% |
| Kay Sievers |
2 |
0.30% |
1 |
1.56% |
| Andrew Morton |
2 |
0.30% |
2 |
3.12% |
| Randy Dunlap |
2 |
0.30% |
1 |
1.56% |
| Linus Torvalds (pre-git) |
1 |
0.15% |
1 |
1.56% |
| Daniel Walker |
1 |
0.15% |
1 |
1.56% |
| Jianguo Wu |
1 |
0.15% |
1 |
1.56% |
| Vitaly Kuznetsov |
1 |
0.15% |
1 |
1.56% |
| Greg Kroah-Hartman |
1 |
0.15% |
1 |
1.56% |
| Gavin Shan |
1 |
0.15% |
1 |
1.56% |
| Total |
668 |
|
64 |
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/memory.h - generic memory definition
*
* This is mainly for topological representation. We define the
* basic "struct memory_block" here, which can be embedded in per-arch
* definitions or NUMA information.
*
* Basic handling of the devices is done in drivers/base/memory.c
* and system devices are handled in drivers/base/sys.c.
*
* Memory block are exported via sysfs in the class/memory/devices/
* directory.
*
*/
#ifndef _LINUX_MEMORY_H_
#define _LINUX_MEMORY_H_
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
/**
* struct memory_group - a logical group of memory blocks
* @nid: The node id for all memory blocks inside the memory group.
* @memory_blocks: List of all memory blocks belonging to this memory group.
* @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this
* memory group.
* @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this
* memory group.
* @is_dynamic: The memory group type: static vs. dynamic
* @s.max_pages: Valid with &memory_group.is_dynamic == false. The maximum
* number of pages we'll have in this static memory group.
* @d.unit_pages: Valid with &memory_group.is_dynamic == true. Unit in pages
* in which memory is added/removed in this dynamic memory group.
* This granularity defines the alignment of a unit in physical
* address space; it has to be at least as big as a single
* memory block.
*
* A memory group logically groups memory blocks; each memory block
* belongs to at most one memory group. A memory group corresponds to
* a memory device, such as a DIMM or a NUMA node, which spans multiple
* memory blocks and might even span multiple non-contiguous physical memory
* ranges.
*
* Modification of members after registration is serialized by memory
* hot(un)plug code.
*/
struct memory_group {
int nid;
struct list_head memory_blocks;
unsigned long present_kernel_pages;
unsigned long present_movable_pages;
bool is_dynamic;
union {
struct {
unsigned long max_pages;
} s;
struct {
unsigned long unit_pages;
} d;
};
};
enum memory_block_state {
/* These states are exposed to userspace as text strings in sysfs */
MEM_ONLINE, /* exposed to userspace */
MEM_GOING_OFFLINE, /* exposed to userspace */
MEM_OFFLINE, /* exposed to userspace */
MEM_GOING_ONLINE,
MEM_CANCEL_ONLINE,
MEM_CANCEL_OFFLINE,
};
struct memory_block {
unsigned long start_section_nr;
enum memory_block_state state; /* serialized by the dev->lock */
int online_type; /* for passing data to online routine */
int nid; /* NID for this memory block */
/*
* The single zone of this memory block if all PFNs of this memory block
* that are System RAM (not a memory hole, not ZONE_DEVICE ranges) are
* managed by a single zone. NULL if multiple zones (including nodes)
* apply.
*/
struct zone *zone;
struct device dev;
struct vmem_altmap *altmap;
struct memory_group *group; /* group (if any) for this block */
struct list_head group_next; /* next block inside memory group */
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
atomic_long_t nr_hwpoison;
#endif
};
int arch_get_memory_phys_device(unsigned long start_pfn);
unsigned long memory_block_size_bytes(void);
int set_memory_block_size_order(unsigned int order);
struct memory_notify {
unsigned long start_pfn;
unsigned long nr_pages;
};
struct notifier_block;
struct mem_section;
/*
* Priorities for the hotplug memory callback routines. Invoked from
* high to low. Higher priorities correspond to higher numbers.
*/
#define DEFAULT_CALLBACK_PRI 0
#define SLAB_CALLBACK_PRI 1
#define CXL_CALLBACK_PRI 5
#define HMAT_CALLBACK_PRI 6
#define MM_COMPUTE_BATCH_PRI 10
#define CPUSET_CALLBACK_PRI 10
#define MEMTIER_HOTPLUG_PRI 100
#define KSM_CALLBACK_PRI 100
#ifndef CONFIG_MEMORY_HOTPLUG
static inline void memory_dev_init(void)
{
return;
}
static inline int register_memory_notifier(struct notifier_block *nb)
{
return 0;
}
static inline void unregister_memory_notifier(struct notifier_block *nb)
{
}
static inline int memory_notify(enum memory_block_state state, void *v)
{
return 0;
}
static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
{
return 0;
}
static inline int memory_block_advise_max_size(unsigned long size)
{
return -ENODEV;
}
static inline unsigned long memory_block_advised_max_size(void)
{
return 0;
}
#else /* CONFIG_MEMORY_HOTPLUG */
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size,
int nid, struct vmem_altmap *altmap,
struct memory_group *group);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
extern int memory_notify(enum memory_block_state state, void *v);
extern struct memory_block *find_memory_block(unsigned long section_nr);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func);
extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
extern int memory_group_register_static(int nid, unsigned long max_pages);
extern int memory_group_register_dynamic(int nid, unsigned long unit_pages);
extern int memory_group_unregister(int mgid);
struct memory_group *memory_group_find_by_id(int mgid);
typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
struct memory_group *excluded, void *arg);
struct memory_block *find_memory_block_by_id(unsigned long block_id);
#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri };\
register_memory_notifier(&fn##_mem_nb); \
})
extern int sections_per_block;
static inline unsigned long memory_block_id(unsigned long section_nr)
{
return section_nr / sections_per_block;
}
static inline unsigned long pfn_to_block_id(unsigned long pfn)
{
return memory_block_id(pfn_to_section_nr(pfn));
}
static inline unsigned long phys_to_block_id(unsigned long phys)
{
return pfn_to_block_id(PFN_DOWN(phys));
}
#ifdef CONFIG_NUMA
void memory_block_add_nid_early(struct memory_block *mem, int nid);
#endif /* CONFIG_NUMA */
int memory_block_advise_max_size(unsigned long size);
unsigned long memory_block_advised_max_size(void);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* Kernel text modification mutex, used for code patching. Users of this lock
* can sleep.
*/
extern struct mutex text_mutex;
#endif /* _LINUX_MEMORY_H_ */