Contributors: 16
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Mel Gorman |
156 |
37.59% |
6 |
25.00% |
Kamezawa Hiroyuki |
122 |
29.40% |
2 |
8.33% |
Konstantin Khlebnikov |
48 |
11.57% |
1 |
4.17% |
Peter Zijlstra |
25 |
6.02% |
2 |
8.33% |
Peter Collingbourne |
19 |
4.58% |
1 |
4.17% |
Alex Shi |
8 |
1.93% |
1 |
4.17% |
Hugh Dickins |
6 |
1.45% |
1 |
4.17% |
Kefeng Wang |
5 |
1.20% |
1 |
4.17% |
Yu Zhao |
5 |
1.20% |
1 |
4.17% |
Daniel Kiper |
5 |
1.20% |
1 |
4.17% |
Andrew Morton |
5 |
1.20% |
1 |
4.17% |
Nhat Pham |
5 |
1.20% |
1 |
4.17% |
Linus Torvalds (pre-git) |
3 |
0.72% |
2 |
8.33% |
Greg Kroah-Hartman |
1 |
0.24% |
1 |
4.17% |
Steven Rostedt |
1 |
0.24% |
1 |
4.17% |
Andi Kleen |
1 |
0.24% |
1 |
4.17% |
Total |
415 |
|
24 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/mmzone.c
*
* management codes for pgdats, zones and page flags
*/
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
struct pglist_data *first_online_pgdat(void)
{
return NODE_DATA(first_online_node);
}
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
{
int nid = next_online_node(pgdat->node_id);
if (nid == MAX_NUMNODES)
return NULL;
return NODE_DATA(nid);
}
/*
* next_zone - helper magic for for_each_zone()
*/
struct zone *next_zone(struct zone *zone)
{
pg_data_t *pgdat = zone->zone_pgdat;
if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
zone++;
else {
pgdat = next_online_pgdat(pgdat);
if (pgdat)
zone = pgdat->node_zones;
else
zone = NULL;
}
return zone;
}
static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
{
#ifdef CONFIG_NUMA
return node_isset(zonelist_node_idx(zref), *nodes);
#else
return 1;
#endif /* CONFIG_NUMA */
}
/* Returns the next zone at or below highest_zoneidx in a zonelist */
struct zoneref *__next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
nodemask_t *nodes)
{
/*
* Find the next suitable zone to use for the allocation.
* Only filter based on nodemask if it's set
*/
if (unlikely(nodes == NULL))
while (zonelist_zone_idx(z) > highest_zoneidx)
z++;
else
while (zonelist_zone_idx(z) > highest_zoneidx ||
(z->zone && !zref_in_nodemask(z, nodes)))
z++;
return z;
}
void lruvec_init(struct lruvec *lruvec)
{
enum lru_list lru;
memset(lruvec, 0, sizeof(struct lruvec));
spin_lock_init(&lruvec->lru_lock);
zswap_lruvec_state_init(lruvec);
for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]);
/*
* The "Unevictable LRU" is imaginary: though its size is maintained,
* it is never scanned, and unevictable pages are not threaded on it
* (so that their lru fields can be reused to hold mlock_count).
* Poison its list head, so that any operations on it would crash.
*/
list_del(&lruvec->lists[LRU_UNEVICTABLE]);
lru_gen_init_lruvec(lruvec);
}
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
unsigned long old_flags, flags;
int last_cpupid;
old_flags = READ_ONCE(folio->flags);
do {
flags = old_flags;
last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
} while (unlikely(!try_cmpxchg(&folio->flags, &old_flags, flags)));
return last_cpupid;
}
#endif