cregit-Linux how code gets into the kernel

Release 4.7 include/linux/vmstat.h

Directory: include/linux
#ifndef _LINUX_VMSTAT_H

#define _LINUX_VMSTAT_H

#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/vm_event_item.h>
#include <linux/atomic.h>

extern int sysctl_stat_interval;

#ifdef CONFIG_VM_EVENT_COUNTERS
/*
 * Light weight per cpu counter implementation.
 *
 * Counters should only be incremented and no critical kernel component
 * should rely on the counter values.
 *
 * Counters are handled completely inline. On many platforms the code
 * generated will simply be the increment of a global address.
 */


struct vm_event_state {
	
unsigned long event[NR_VM_EVENT_ITEMS];
};

DECLARE_PER_CPU(struct vm_event_state, vm_event_states);

/*
 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
 * local_irq_disable overhead.
 */

static inline void __count_vm_event(enum vm_event_item item) { raw_cpu_inc(vm_event_states.event[item]); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter21100.00%4100.00%
Total21100.00%4100.00%


static inline void count_vm_event(enum vm_event_item item) { this_cpu_inc(vm_event_states.event[item]); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter21100.00%3100.00%
Total21100.00%3100.00%


static inline void __count_vm_events(enum vm_event_item item, long delta) { raw_cpu_add(vm_event_states.event[item], delta); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter26100.00%4100.00%
Total26100.00%4100.00%


static inline void count_vm_events(enum vm_event_item item, long delta) { this_cpu_add(vm_event_states.event[item], delta); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2596.15%375.00%
andrew mortonandrew morton13.85%125.00%
Total26100.00%4100.00%

extern void all_vm_events(unsigned long *); extern void vm_events_fold_cpu(int cpu); #else /* Disable counters */
static inline void count_vm_event(enum vm_event_item item) { }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton770.00%133.33%
christoph lameterchristoph lameter330.00%266.67%
Total10100.00%3100.00%


static inline void count_vm_events(enum vm_event_item item, long delta) { }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton969.23%133.33%
christoph lameterchristoph lameter430.77%266.67%
Total13100.00%3100.00%


static inline void __count_vm_event(enum vm_event_item item) { }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton770.00%133.33%
christoph lameterchristoph lameter330.00%266.67%
Total10100.00%3100.00%


static inline void __count_vm_events(enum vm_event_item item, long delta) { }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton969.23%133.33%
christoph lameterchristoph lameter430.77%266.67%
Total13100.00%3100.00%


static inline void all_vm_events(unsigned long *ret) { }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton11100.00%1100.00%
Total11100.00%1100.00%


static inline void vm_events_fold_cpu(int cpu) { }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton666.67%133.33%
christoph lameterchristoph lameter333.33%266.67%
Total9100.00%3100.00%

#endif /* CONFIG_VM_EVENT_COUNTERS */ #ifdef CONFIG_NUMA_BALANCING #define count_vm_numa_event(x) count_vm_event(x) #define count_vm_numa_events(x, y) count_vm_events(x, y) #else #define count_vm_numa_event(x) do {} while (0) #define count_vm_numa_events(x, y) do { (void)(y); } while (0) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_DEBUG_TLBFLUSH #define count_vm_tlb_event(x) count_vm_event(x) #define count_vm_tlb_events(x, y) count_vm_events(x, y) #else #define count_vm_tlb_event(x) do {} while (0) #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) #endif #ifdef CONFIG_DEBUG_VM_VMACACHE #define count_vm_vmacache_event(x) count_vm_event(x) #else #define count_vm_vmacache_event(x) do {} while (0) #endif #define __count_zone_vm_events(item, zone, delta) \ __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ zone_idx(zone), delta) /* * Zone based page accounting with per cpu differentials. */ extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
static inline void zone_page_state_add(long x, struct zone *zone, enum zone_stat_item item) { atomic_long_add(x, &zone->vm_stat[item]); atomic_long_add(x, &vm_stat[item]); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter43100.00%1100.00%
Total43100.00%1100.00%


static inline unsigned long global_page_state(enum zone_stat_item item) { long x = atomic_long_read(&vm_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; #endif return x; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter42100.00%1100.00%
Total42100.00%1100.00%


static inline unsigned long zone_page_state(struct zone *zone, enum zone_stat_item item) { long x = atomic_long_read(&zone->vm_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; #endif return x; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter49100.00%1100.00%
Total49100.00%1100.00%

/* * More accurate version that also considers the currently pending * deltas. For that we need to loop over all cpus to find the current * deltas. There is no synchronization so the result cannot be * exactly accurate either. */
static inline unsigned long zone_page_state_snapshot(struct zone *zone, enum zone_stat_item item) { long x = atomic_long_read(&zone->vm_stat[item]); #ifdef CONFIG_SMP int cpu; for_each_online_cpu(cpu) x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; if (x < 0) x = 0; #endif return x; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter72100.00%1100.00%
Total72100.00%1100.00%

#ifdef CONFIG_NUMA extern unsigned long node_page_state(int node, enum zone_stat_item item); #else #define node_page_state(node, item) global_page_state(item) #endif /* CONFIG_NUMA */ #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) #ifdef CONFIG_SMP void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); void __inc_zone_page_state(struct page *, enum zone_stat_item); void __dec_zone_page_state(struct page *, enum zone_stat_item); void mod_zone_page_state(struct zone *, enum zone_stat_item, long); void inc_zone_page_state(struct page *, enum zone_stat_item); void dec_zone_page_state(struct page *, enum zone_stat_item); extern void inc_zone_state(struct zone *, enum zone_stat_item); extern void __inc_zone_state(struct zone *, enum zone_stat_item); extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); void quiet_vmstat(void); void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); struct ctl_table; int vmstat_refresh(struct ctl_table *, int write, void __user *buffer, size_t *lenp, loff_t *ppos); void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); int calculate_pressure_threshold(struct zone *zone); int calculate_normal_threshold(struct zone *zone); void set_pgdat_percpu_threshold(pg_data_t *pgdat, int (*calculate_pressure)(struct zone *)); #else /* CONFIG_SMP */ /* * We do not maintain differentials in a single processor configuration. * The functions directly modify the zone and global counters. */
static inline void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long delta) { zone_page_state_add(delta, zone, item); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2796.43%150.00%
heiko carstensheiko carstens13.57%150.00%
Total28100.00%2100.00%


static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_inc(&zone->vm_stat[item]); atomic_long_inc(&vm_stat[item]); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter36100.00%2100.00%
Total36100.00%2100.00%


static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_dec(&zone->vm_stat[item]); atomic_long_dec(&vm_stat[item]); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter1850.00%150.00%
johannes weinerjohannes weiner1850.00%150.00%
Total36100.00%2100.00%


static inline void __inc_zone_page_state(struct page *page, enum zone_stat_item item) { __inc_zone_state(page_zone(page), item); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter1869.23%150.00%
johannes weinerjohannes weiner830.77%150.00%
Total26100.00%2100.00%


static inline void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { __dec_zone_state(page_zone(page), item); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2492.31%150.00%
uwe kleine-koeniguwe kleine-koenig27.69%150.00%
Total26100.00%2100.00%

/* * We only use atomic operations to update counters. So there is no need to * disable interrupts. */ #define inc_zone_page_state __inc_zone_page_state #define dec_zone_page_state __dec_zone_page_state #define mod_zone_page_state __mod_zone_page_state #define inc_zone_state __inc_zone_state #define dec_zone_state __dec_zone_state #define set_pgdat_percpu_threshold(pgdat, callback) { }
static inline void refresh_zone_stat_thresholds(void) { }

Contributors

PersonTokensPropCommitsCommitProp
kosaki motohirokosaki motohiro8100.00%1100.00%
Total8100.00%1100.00%


static inline void cpu_vm_stats_fold(int cpu) { }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter9100.00%1100.00%
Total9100.00%1100.00%


static inline void quiet_vmstat(void) { }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter8100.00%1100.00%
Total8100.00%1100.00%


static inline void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) { }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim16100.00%1100.00%
Total16100.00%1100.00%

#endif /* CONFIG_SMP */
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, int migratetype) { __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); if (is_migrate_cma(migratetype)) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); }

Contributors

PersonTokensPropCommitsCommitProp
bartlomiej zolnierkiewiczbartlomiej zolnierkiewicz43100.00%1100.00%
Total43100.00%1100.00%

extern const char * const vmstat_text[]; #endif /* _LINUX_VMSTAT_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter74666.79%1339.39%
mel gormanmel gorman12511.19%515.15%
andrew mortonandrew morton595.28%412.12%
bartlomiej zolnierkiewiczbartlomiej zolnierkiewicz433.85%13.03%
johannes weinerjohannes weiner343.04%13.03%
minchan kimminchan kim292.60%13.03%
hugh dickinshugh dickins272.42%13.03%
kosaki motohirokosaki motohiro232.06%26.06%
davidlohr buesodavidlohr bueso211.88%13.03%
adrian bunkadrian bunk40.36%13.03%
heiko carstensheiko carstens30.27%13.03%
uwe kleine-koeniguwe kleine-koenig20.18%13.03%
arun sharmaarun sharma10.09%13.03%
Total1117100.00%33100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}