Release 4.7 include/linux/vmstat.h
#ifndef _LINUX_VMSTAT_H
#define _LINUX_VMSTAT_H
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/vm_event_item.h>
#include <linux/atomic.h>
extern int sysctl_stat_interval;
#ifdef CONFIG_VM_EVENT_COUNTERS
/*
* Light weight per cpu counter implementation.
*
* Counters should only be incremented and no critical kernel component
* should rely on the counter values.
*
* Counters are handled completely inline. On many platforms the code
* generated will simply be the increment of a global address.
*/
struct vm_event_state {
unsigned long event[NR_VM_EVENT_ITEMS];
};
DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
/*
* vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
* local_irq_disable overhead.
*/
static inline void __count_vm_event(enum vm_event_item item)
{
raw_cpu_inc(vm_event_states.event[item]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 21 | 100.00% | 4 | 100.00% |
| Total | 21 | 100.00% | 4 | 100.00% |
static inline void count_vm_event(enum vm_event_item item)
{
this_cpu_inc(vm_event_states.event[item]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 21 | 100.00% | 3 | 100.00% |
| Total | 21 | 100.00% | 3 | 100.00% |
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
raw_cpu_add(vm_event_states.event[item], delta);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 26 | 100.00% | 4 | 100.00% |
| Total | 26 | 100.00% | 4 | 100.00% |
static inline void count_vm_events(enum vm_event_item item, long delta)
{
this_cpu_add(vm_event_states.event[item], delta);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 25 | 96.15% | 3 | 75.00% |
andrew morton | andrew morton | 1 | 3.85% | 1 | 25.00% |
| Total | 26 | 100.00% | 4 | 100.00% |
extern void all_vm_events(unsigned long *);
extern void vm_events_fold_cpu(int cpu);
#else
/* Disable counters */
static inline void count_vm_event(enum vm_event_item item)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 7 | 70.00% | 1 | 33.33% |
christoph lameter | christoph lameter | 3 | 30.00% | 2 | 66.67% |
| Total | 10 | 100.00% | 3 | 100.00% |
static inline void count_vm_events(enum vm_event_item item, long delta)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 9 | 69.23% | 1 | 33.33% |
christoph lameter | christoph lameter | 4 | 30.77% | 2 | 66.67% |
| Total | 13 | 100.00% | 3 | 100.00% |
static inline void __count_vm_event(enum vm_event_item item)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 7 | 70.00% | 1 | 33.33% |
christoph lameter | christoph lameter | 3 | 30.00% | 2 | 66.67% |
| Total | 10 | 100.00% | 3 | 100.00% |
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 9 | 69.23% | 1 | 33.33% |
christoph lameter | christoph lameter | 4 | 30.77% | 2 | 66.67% |
| Total | 13 | 100.00% | 3 | 100.00% |
static inline void all_vm_events(unsigned long *ret)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 11 | 100.00% | 1 | 100.00% |
| Total | 11 | 100.00% | 1 | 100.00% |
static inline void vm_events_fold_cpu(int cpu)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andrew morton | andrew morton | 6 | 66.67% | 1 | 33.33% |
christoph lameter | christoph lameter | 3 | 33.33% | 2 | 66.67% |
| Total | 9 | 100.00% | 3 | 100.00% |
#endif /* CONFIG_VM_EVENT_COUNTERS */
#ifdef CONFIG_NUMA_BALANCING
#define count_vm_numa_event(x) count_vm_event(x)
#define count_vm_numa_events(x, y) count_vm_events(x, y)
#else
#define count_vm_numa_event(x) do {} while (0)
#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_DEBUG_TLBFLUSH
#define count_vm_tlb_event(x) count_vm_event(x)
#define count_vm_tlb_events(x, y) count_vm_events(x, y)
#else
#define count_vm_tlb_event(x) do {} while (0)
#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
#endif
#ifdef CONFIG_DEBUG_VM_VMACACHE
#define count_vm_vmacache_event(x) count_vm_event(x)
#else
#define count_vm_vmacache_event(x) do {} while (0)
#endif
#define __count_zone_vm_events(item, zone, delta) \
__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
zone_idx(zone), delta)
/*
* Zone based page accounting with per cpu differentials.
*/
extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
static inline void zone_page_state_add(long x, struct zone *zone,
enum zone_stat_item item)
{
atomic_long_add(x, &zone->vm_stat[item]);
atomic_long_add(x, &vm_stat[item]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 43 | 100.00% | 1 | 100.00% |
| Total | 43 | 100.00% | 1 | 100.00% |
static inline unsigned long global_page_state(enum zone_stat_item item)
{
long x = atomic_long_read(&vm_stat[item]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
#endif
return x;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 42 | 100.00% | 1 | 100.00% |
| Total | 42 | 100.00% | 1 | 100.00% |
static inline unsigned long zone_page_state(struct zone *zone,
enum zone_stat_item item)
{
long x = atomic_long_read(&zone->vm_stat[item]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
#endif
return x;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 49 | 100.00% | 1 | 100.00% |
| Total | 49 | 100.00% | 1 | 100.00% |
/*
* More accurate version that also considers the currently pending
* deltas. For that we need to loop over all cpus to find the current
* deltas. There is no synchronization so the result cannot be
* exactly accurate either.
*/
static inline unsigned long zone_page_state_snapshot(struct zone *zone,
enum zone_stat_item item)
{
long x = atomic_long_read(&zone->vm_stat[item]);
#ifdef CONFIG_SMP
int cpu;
for_each_online_cpu(cpu)
x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
if (x < 0)
x = 0;
#endif
return x;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 72 | 100.00% | 1 | 100.00% |
| Total | 72 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_NUMA
extern unsigned long node_page_state(int node, enum zone_stat_item item);
#else
#define node_page_state(node, item) global_page_state(item)
#endif /* CONFIG_NUMA */
#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
#ifdef CONFIG_SMP
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
void __inc_zone_page_state(struct page *, enum zone_stat_item);
void __dec_zone_page_state(struct page *, enum zone_stat_item);
void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
void inc_zone_page_state(struct page *, enum zone_stat_item);
void dec_zone_page_state(struct page *, enum zone_stat_item);
extern void inc_zone_state(struct zone *, enum zone_stat_item);
extern void __inc_zone_state(struct zone *, enum zone_stat_item);
extern void dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
void quiet_vmstat(void);
void cpu_vm_stats_fold(int cpu);
void refresh_zone_stat_thresholds(void);
struct ctl_table;
int vmstat_refresh(struct ctl_table *, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
int calculate_pressure_threshold(struct zone *zone);
int calculate_normal_threshold(struct zone *zone);
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
int (*calculate_pressure)(struct zone *));
#else /* CONFIG_SMP */
/*
* We do not maintain differentials in a single processor configuration.
* The functions directly modify the zone and global counters.
*/
static inline void __mod_zone_page_state(struct zone *zone,
enum zone_stat_item item, long delta)
{
zone_page_state_add(delta, zone, item);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 27 | 96.43% | 1 | 50.00% |
heiko carstens | heiko carstens | 1 | 3.57% | 1 | 50.00% |
| Total | 28 | 100.00% | 2 | 100.00% |
static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
atomic_long_inc(&zone->vm_stat[item]);
atomic_long_inc(&vm_stat[item]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 36 | 100.00% | 2 | 100.00% |
| Total | 36 | 100.00% | 2 | 100.00% |
static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
atomic_long_dec(&zone->vm_stat[item]);
atomic_long_dec(&vm_stat[item]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 18 | 50.00% | 1 | 50.00% |
johannes weiner | johannes weiner | 18 | 50.00% | 1 | 50.00% |
| Total | 36 | 100.00% | 2 | 100.00% |
static inline void __inc_zone_page_state(struct page *page,
enum zone_stat_item item)
{
__inc_zone_state(page_zone(page), item);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 18 | 69.23% | 1 | 50.00% |
johannes weiner | johannes weiner | 8 | 30.77% | 1 | 50.00% |
| Total | 26 | 100.00% | 2 | 100.00% |
static inline void __dec_zone_page_state(struct page *page,
enum zone_stat_item item)
{
__dec_zone_state(page_zone(page), item);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 24 | 92.31% | 1 | 50.00% |
uwe kleine-koenig | uwe kleine-koenig | 2 | 7.69% | 1 | 50.00% |
| Total | 26 | 100.00% | 2 | 100.00% |
/*
* We only use atomic operations to update counters. So there is no need to
* disable interrupts.
*/
#define inc_zone_page_state __inc_zone_page_state
#define dec_zone_page_state __dec_zone_page_state
#define mod_zone_page_state __mod_zone_page_state
#define inc_zone_state __inc_zone_state
#define dec_zone_state __dec_zone_state
#define set_pgdat_percpu_threshold(pgdat, callback) { }
static inline void refresh_zone_stat_thresholds(void) { }
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kosaki motohiro | kosaki motohiro | 8 | 100.00% | 1 | 100.00% |
| Total | 8 | 100.00% | 1 | 100.00% |
static inline void cpu_vm_stats_fold(int cpu) { }
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 9 | 100.00% | 1 | 100.00% |
| Total | 9 | 100.00% | 1 | 100.00% |
static inline void quiet_vmstat(void) { }
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 8 | 100.00% | 1 | 100.00% |
| Total | 8 | 100.00% | 1 | 100.00% |
static inline void drain_zonestat(struct zone *zone,
struct per_cpu_pageset *pset) { }
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
minchan kim | minchan kim | 16 | 100.00% | 1 | 100.00% |
| Total | 16 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_SMP */
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
int migratetype)
{
__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
if (is_migrate_cma(migratetype))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
bartlomiej zolnierkiewicz | bartlomiej zolnierkiewicz | 43 | 100.00% | 1 | 100.00% |
| Total | 43 | 100.00% | 1 | 100.00% |
extern const char * const vmstat_text[];
#endif /* _LINUX_VMSTAT_H */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph lameter | christoph lameter | 746 | 66.79% | 13 | 39.39% |
mel gorman | mel gorman | 125 | 11.19% | 5 | 15.15% |
andrew morton | andrew morton | 59 | 5.28% | 4 | 12.12% |
bartlomiej zolnierkiewicz | bartlomiej zolnierkiewicz | 43 | 3.85% | 1 | 3.03% |
johannes weiner | johannes weiner | 34 | 3.04% | 1 | 3.03% |
minchan kim | minchan kim | 29 | 2.60% | 1 | 3.03% |
hugh dickins | hugh dickins | 27 | 2.42% | 1 | 3.03% |
kosaki motohiro | kosaki motohiro | 23 | 2.06% | 2 | 6.06% |
davidlohr bueso | davidlohr bueso | 21 | 1.88% | 1 | 3.03% |
adrian bunk | adrian bunk | 4 | 0.36% | 1 | 3.03% |
heiko carstens | heiko carstens | 3 | 0.27% | 1 | 3.03% |
uwe kleine-koenig | uwe kleine-koenig | 2 | 0.18% | 1 | 3.03% |
arun sharma | arun sharma | 1 | 0.09% | 1 | 3.03% |
| Total | 1117 | 100.00% | 33 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.