cregit-Linux how code gets into the kernel

Release 4.14 include/linux/vmstat.h

Directory: include/linux
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_VMSTAT_H

#define _LINUX_VMSTAT_H

#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/mmzone.h>
#include <linux/vm_event_item.h>
#include <linux/atomic.h>

extern int sysctl_stat_interval;

#ifdef CONFIG_VM_EVENT_COUNTERS
/*
 * Light weight per cpu counter implementation.
 *
 * Counters should only be incremented and no critical kernel component
 * should rely on the counter values.
 *
 * Counters are handled completely inline. On many platforms the code
 * generated will simply be the increment of a global address.
 */


struct vm_event_state {
	
unsigned long event[NR_VM_EVENT_ITEMS];
};

DECLARE_PER_CPU(struct vm_event_state, vm_event_states);

/*
 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
 * local_irq_disable overhead.
 */

static inline void __count_vm_event(enum vm_event_item item) { raw_cpu_inc(vm_event_states.event[item]); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter21100.00%4100.00%
Total21100.00%4100.00%


static inline void count_vm_event(enum vm_event_item item) { this_cpu_inc(vm_event_states.event[item]); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter21100.00%3100.00%
Total21100.00%3100.00%


static inline void __count_vm_events(enum vm_event_item item, long delta) { raw_cpu_add(vm_event_states.event[item], delta); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter26100.00%4100.00%
Total26100.00%4100.00%


static inline void count_vm_events(enum vm_event_item item, long delta) { this_cpu_add(vm_event_states.event[item], delta); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter2596.15%375.00%
Andrew Morton13.85%125.00%
Total26100.00%4100.00%

extern void all_vm_events(unsigned long *); extern void vm_events_fold_cpu(int cpu); #else /* Disable counters */
static inline void count_vm_event(enum vm_event_item item) { }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton770.00%133.33%
Christoph Lameter330.00%266.67%
Total10100.00%3100.00%


static inline void count_vm_events(enum vm_event_item item, long delta) { }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton969.23%133.33%
Christoph Lameter430.77%266.67%
Total13100.00%3100.00%


static inline void __count_vm_event(enum vm_event_item item) { }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton770.00%133.33%
Christoph Lameter330.00%266.67%
Total10100.00%3100.00%


static inline void __count_vm_events(enum vm_event_item item, long delta) { }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton969.23%133.33%
Christoph Lameter430.77%266.67%
Total13100.00%3100.00%


static inline void all_vm_events(unsigned long *ret) { }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton11100.00%1100.00%
Total11100.00%1100.00%


static inline void vm_events_fold_cpu(int cpu) { }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton666.67%133.33%
Christoph Lameter333.33%266.67%
Total9100.00%3100.00%

#endif /* CONFIG_VM_EVENT_COUNTERS */ #ifdef CONFIG_NUMA_BALANCING #define count_vm_numa_event(x) count_vm_event(x) #define count_vm_numa_events(x, y) count_vm_events(x, y) #else #define count_vm_numa_event(x) do {} while (0) #define count_vm_numa_events(x, y) do { (void)(y); } while (0) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_DEBUG_TLBFLUSH #define count_vm_tlb_event(x) count_vm_event(x) #define count_vm_tlb_events(x, y) count_vm_events(x, y) #else #define count_vm_tlb_event(x) do {} while (0) #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) #endif #ifdef CONFIG_DEBUG_VM_VMACACHE #define count_vm_vmacache_event(x) count_vm_event(x) #else #define count_vm_vmacache_event(x) do {} while (0) #endif #define __count_zid_vm_events(item, zid, delta) \ __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) /* * Zone and node-based page accounting with per cpu differentials. */ extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; #ifdef CONFIG_NUMA
static inline void zone_numa_state_add(long x, struct zone *zone, enum numa_stat_item item) { atomic_long_add(x, &zone->vm_numa_stat[item]); atomic_long_add(x, &vm_numa_stat[item]); }

Contributors

PersonTokensPropCommitsCommitProp
Kemi Wang43100.00%1100.00%
Total43100.00%1100.00%


static inline unsigned long global_numa_state(enum numa_stat_item item) { long x = atomic_long_read(&vm_numa_stat[item]); return x; }

Contributors

PersonTokensPropCommitsCommitProp
Kemi Wang27100.00%1100.00%
Total27100.00%1100.00%


static inline unsigned long zone_numa_state_snapshot(struct zone *zone, enum numa_stat_item item) { long x = atomic_long_read(&zone->vm_numa_stat[item]); int cpu; for_each_online_cpu(cpu) x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]; return x; }

Contributors

PersonTokensPropCommitsCommitProp
Kemi Wang57100.00%2100.00%
Total57100.00%2100.00%

#endif /* CONFIG_NUMA */
static inline void zone_page_state_add(long x, struct zone *zone, enum zone_stat_item item) { atomic_long_add(x, &zone->vm_stat[item]); atomic_long_add(x, &vm_zone_stat[item]); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter3683.72%150.00%
Mel Gorman716.28%150.00%
Total43100.00%2100.00%


static inline void node_page_state_add(long x, struct pglist_data *pgdat, enum node_stat_item item) { atomic_long_add(x, &pgdat->vm_stat[item]); atomic_long_add(x, &vm_node_stat[item]); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman3683.72%150.00%
Christoph Lameter716.28%150.00%
Total43100.00%2100.00%


static inline unsigned long global_zone_page_state(enum zone_stat_item item) { long x = atomic_long_read(&vm_zone_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; #endif return x; }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman2559.52%133.33%
Christoph Lameter1638.10%133.33%
Michal Hocko12.38%133.33%
Total42100.00%3100.00%


static inline unsigned long global_node_page_state(enum node_stat_item item) { long x = atomic_long_read(&vm_node_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; #endif return x; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter2457.14%150.00%
Mel Gorman1842.86%150.00%
Total42100.00%2100.00%


static inline unsigned long zone_page_state(struct zone *zone, enum zone_stat_item item) { long x = atomic_long_read(&zone->vm_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; #endif return x; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter49100.00%1100.00%
Total49100.00%1100.00%

/* * More accurate version that also considers the currently pending * deltas. For that we need to loop over all cpus to find the current * deltas. There is no synchronization so the result cannot be * exactly accurate either. */
static inline unsigned long zone_page_state_snapshot(struct zone *zone, enum zone_stat_item item) { long x = atomic_long_read(&zone->vm_stat[item]); #ifdef CONFIG_SMP int cpu; for_each_online_cpu(cpu) x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; if (x < 0) x = 0; #endif return x; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter72100.00%1100.00%
Total72100.00%1100.00%


static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat, enum node_stat_item item) { long x = atomic_long_read(&pgdat->vm_stat[item]); #ifdef CONFIG_SMP int cpu; for_each_online_cpu(cpu) x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item]; if (x < 0) x = 0; #endif return x; }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman71100.00%1100.00%
Total71100.00%1100.00%

#ifdef CONFIG_NUMA extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); extern unsigned long sum_zone_node_page_state(int node, enum zone_stat_item item); extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item); extern unsigned long node_page_state(struct pglist_data *pgdat, enum node_stat_item item); #else #define sum_zone_node_page_state(node, item) global_zone_page_state(item) #define node_page_state(node, item) global_node_page_state(item) #endif /* CONFIG_NUMA */ #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) #define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d) #define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d)) #ifdef CONFIG_SMP void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); void __inc_zone_page_state(struct page *, enum zone_stat_item); void __dec_zone_page_state(struct page *, enum zone_stat_item); void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); void __inc_node_page_state(struct page *, enum node_stat_item); void __dec_node_page_state(struct page *, enum node_stat_item); void mod_zone_page_state(struct zone *, enum zone_stat_item, long); void inc_zone_page_state(struct page *, enum zone_stat_item); void dec_zone_page_state(struct page *, enum zone_stat_item); void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); void inc_node_page_state(struct page *, enum node_stat_item); void dec_node_page_state(struct page *, enum node_stat_item); extern void inc_node_state(struct pglist_data *, enum node_stat_item); extern void __inc_zone_state(struct zone *, enum zone_stat_item); extern void __inc_node_state(struct pglist_data *, enum node_stat_item); extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_node_state(struct pglist_data *, enum node_stat_item); void quiet_vmstat(void); void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); struct ctl_table; int vmstat_refresh(struct ctl_table *, int write, void __user *buffer, size_t *lenp, loff_t *ppos); void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); int calculate_pressure_threshold(struct zone *zone); int calculate_normal_threshold(struct zone *zone); void set_pgdat_percpu_threshold(pg_data_t *pgdat, int (*calculate_pressure)(struct zone *)); #else /* CONFIG_SMP */ /* * We do not maintain differentials in a single processor configuration. * The functions directly modify the zone and global counters. */
static inline void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long delta) { zone_page_state_add(delta, zone, item); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter2796.43%150.00%
Heiko Carstens13.57%150.00%
Total28100.00%2100.00%


static inline void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, int delta) { node_page_state_add(delta, pgdat, item); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman28100.00%1100.00%
Total28100.00%1100.00%


static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_inc(&zone->vm_stat[item]); atomic_long_inc(&vm_zone_stat[item]); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter2980.56%266.67%
Mel Gorman719.44%133.33%
Total36100.00%3100.00%


static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) { atomic_long_inc(&pgdat->vm_stat[item]); atomic_long_inc(&vm_node_stat[item]); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman2980.56%150.00%
Christoph Lameter719.44%150.00%
Total36100.00%2100.00%


static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_dec(&zone->vm_stat[item]); atomic_long_dec(&vm_zone_stat[item]); }

Contributors

PersonTokensPropCommitsCommitProp
Johannes Weiner1541.67%133.33%
Christoph Lameter1438.89%133.33%
Mel Gorman719.44%133.33%
Total36100.00%3100.00%


static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) { atomic_long_dec(&pgdat->vm_stat[item]); atomic_long_dec(&vm_node_stat[item]); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman2980.56%133.33%
Christoph Lameter411.11%133.33%
Johannes Weiner38.33%133.33%
Total36100.00%3100.00%


static inline void __inc_zone_page_state(struct page *page, enum zone_stat_item item) { __inc_zone_state(page_zone(page), item); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter1869.23%150.00%
Johannes Weiner830.77%150.00%
Total26100.00%2100.00%


static inline void __inc_node_page_state(struct page *page, enum node_stat_item item) { __inc_node_state(page_pgdat(page), item); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman26100.00%1100.00%
Total26100.00%1100.00%


static inline void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { __dec_zone_state(page_zone(page), item); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter2492.31%150.00%
Uwe Kleine-König27.69%150.00%
Total26100.00%2100.00%


static inline void __dec_node_page_state(struct page *page, enum node_stat_item item) { __dec_node_state(page_pgdat(page), item); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman26100.00%1100.00%
Total26100.00%1100.00%

/* * We only use atomic operations to update counters. So there is no need to * disable interrupts. */ #define inc_zone_page_state __inc_zone_page_state #define dec_zone_page_state __dec_zone_page_state #define mod_zone_page_state __mod_zone_page_state #define inc_node_page_state __inc_node_page_state #define dec_node_page_state __dec_node_page_state #define mod_node_page_state __mod_node_page_state #define inc_zone_state __inc_zone_state #define inc_node_state __inc_node_state #define dec_zone_state __dec_zone_state #define set_pgdat_percpu_threshold(pgdat, callback) { }
static inline void refresh_zone_stat_thresholds(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Motohiro Kosaki8100.00%1100.00%
Total8100.00%1100.00%


static inline void cpu_vm_stats_fold(int cpu) { }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter9100.00%1100.00%
Total9100.00%1100.00%


static inline void quiet_vmstat(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter8100.00%1100.00%
Total8100.00%1100.00%


static inline void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) { }

Contributors

PersonTokensPropCommitsCommitProp
MinChan Kim16100.00%1100.00%
Total16100.00%1100.00%

#endif /* CONFIG_SMP */
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, int migratetype) { __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); if (is_migrate_cma(migratetype)) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); }

Contributors

PersonTokensPropCommitsCommitProp
Bartlomiej Zolnierkiewicz43100.00%1100.00%
Total43100.00%1100.00%

extern const char * const vmstat_text[]; #endif /* _LINUX_VMSTAT_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter72241.16%1128.95%
Mel Gorman61635.12%821.05%
Kemi Wang1679.52%25.26%
Andrew Morton593.36%410.53%
Bartlomiej Zolnierkiewicz432.45%12.63%
Johannes Weiner341.94%12.63%
MinChan Kim291.65%12.63%
Hugh Dickins271.54%12.63%
Motohiro Kosaki231.31%25.26%
Davidlohr Bueso A211.20%12.63%
Adrian Bunk40.23%12.63%
Heiko Carstens30.17%12.63%
Michal Hocko20.11%12.63%
Uwe Kleine-König20.11%12.63%
Greg Kroah-Hartman10.06%12.63%
Arun Sharma10.06%12.63%
Total1754100.00%38100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.