cregit-Linux how code gets into the kernel

Release 4.8 mm/vmstat.c

Directory: mm
/*
 *  linux/mm/vmstat.c
 *
 *  Manages VM statistics
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  zoned VM statistics
 *  Copyright (C) 2006 Silicon Graphics, Inc.,
 *              Christoph Lameter <christoph@lameter.com>
 *  Copyright (C) 2008-2014 Christoph Lameter
 */
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/vmstat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/math64.h>
#include <linux/writeback.h>
#include <linux/compaction.h>
#include <linux/mm_inline.h>
#include <linux/page_ext.h>
#include <linux/page_owner.h>

#include "internal.h"

#ifdef CONFIG_VM_EVENT_COUNTERS
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};

EXPORT_PER_CPU_SYMBOL(vm_event_states);


static void sum_vm_events(unsigned long *ret) { int cpu; int i; memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); for_each_online_cpu(cpu) { struct vm_event_state *this = &per_cpu(vm_event_states, cpu); for (i = 0; i < NR_VM_EVENT_ITEMS; i++) ret[i] += this->event[i]; } }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter7598.68%480.00%
minchan kimminchan kim11.32%120.00%
Total76100.00%5100.00%

/* * Accumulate the vm event counters across all CPUs. * The result is unavoidably approximate - it can change * during and after execution of this function. */
void all_vm_events(unsigned long *ret) { get_online_cpus(); sum_vm_events(ret); put_online_cpus(); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter1571.43%266.67%
kosaki motohirokosaki motohiro628.57%133.33%
Total21100.00%3100.00%

EXPORT_SYMBOL_GPL(all_vm_events); /* * Fold the foreign cpu events into our own. * * This is adding to the events on one processor * but keeps the global counts constant. */
void vm_events_fold_cpu(int cpu) { struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); int i; for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { count_vm_events(i, fold_state->event[i]); fold_state->event[i] = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter60100.00%2100.00%
Total60100.00%2100.00%

#endif /* CONFIG_VM_EVENT_COUNTERS */ /* * Manage combined zone based / global counters * * vm_stat contains the global counters */ atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp; atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp; EXPORT_SYMBOL(vm_zone_stat); EXPORT_SYMBOL(vm_node_stat); #ifdef CONFIG_SMP
int calculate_pressure_threshold(struct zone *zone) { int threshold; int watermark_distance; /* * As vmstats are not up to date, there is drift between the estimated * and real values. For high thresholds and a high number of CPUs, it * is possible for the min watermark to be breached while the estimated * value looks fine. The pressure threshold is a reduced value such * that even the maximum amount of drift will not accidentally breach * the min watermark */ watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); threshold = max(1, (int)(watermark_distance / num_online_cpus())); /* * Maximum threshold is 125 */ threshold = min(125, threshold); return threshold; }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman59100.00%1100.00%
Total59100.00%1100.00%


int calculate_normal_threshold(struct zone *zone) { int threshold; int mem; /* memory in 128 MB units */ /* * The threshold scales with the number of processors and the amount * of memory per zone. More memory means that we can defer updates for * longer, more processors could lead to more contention. * fls() is used to have a cheap way of logarithmic scaling. * * Some sample thresholds: * * Threshold Processors (fls) Zonesize fls(mem+1) * ------------------------------------------------------------------ * 8 1 1 0.9-1 GB 4 * 16 2 2 0.9-1 GB 4 * 20 2 2 1-2 GB 5 * 24 2 2 2-4 GB 6 * 28 2 2 4-8 GB 7 * 32 2 2 8-16 GB 8 * 4 2 2 <128M 1 * 30 4 3 2-4 GB 5 * 48 4 3 8-16 GB 8 * 32 8 4 1-2 GB 4 * 32 8 4 0.9-1GB 4 * 10 16 5 <128M 1 * 40 16 5 900M 4 * 70 64 7 2-4 GB 5 * 84 64 7 4-8 GB 6 * 108 512 9 4-8 GB 6 * 125 1024 10 8-16 GB 8 * 125 1024 10 16-32 GB 9 */ mem = zone->managed_pages >> (27 - PAGE_SHIFT); threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); /* * Maximum threshold is 125 */ threshold = min(125, threshold); return threshold; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter6096.77%250.00%
mel gormanmel gorman11.61%125.00%
jiang liujiang liu11.61%125.00%
Total62100.00%4100.00%

/* * Refresh the thresholds for each zone. */
void refresh_zone_stat_thresholds(void) { struct pglist_data *pgdat; struct zone *zone; int cpu; int threshold; /* Zero current pgdat thresholds */ for_each_online_pgdat(pgdat) { for_each_online_cpu(cpu) { per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; } } for_each_populated_zone(zone) { struct pglist_data *pgdat = zone->zone_pgdat; unsigned long max_drift, tolerate_drift; threshold = calculate_normal_threshold(zone); for_each_online_cpu(cpu) { int pgdat_threshold; per_cpu_ptr(zone->pageset, cpu)->stat_threshold = threshold; /* Base nodestat threshold on the largest populated zone. */ pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = max(threshold, pgdat_threshold); } /* * Only set percpu_drift_mark if there is a danger that * NR_FREE_PAGES reports the low watermark is ok when in fact * the min watermark could be breached by an allocation */ tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); max_drift = num_online_cpus() * threshold; if (max_drift > tolerate_drift) zone->percpu_drift_mark = high_wmark_pages(zone) + max_drift; } }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter8952.98%457.14%
mel gormanmel gorman7846.43%228.57%
kosaki motohirokosaki motohiro10.60%114.29%
Total168100.00%7100.00%


void set_pgdat_percpu_threshold(pg_data_t *pgdat, int (*calculate_pressure)(struct zone *)) { struct zone *zone; int cpu; int threshold; int i; for (i = 0; i < pgdat->nr_zones; i++) { zone = &pgdat->node_zones[i]; if (!zone->percpu_drift_mark) continue; threshold = (*calculate_pressure)(zone); for_each_online_cpu(cpu) per_cpu_ptr(zone->pageset, cpu)->stat_threshold = threshold; } }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman96100.00%3100.00%
Total96100.00%3100.00%

/* * For use when we know that interrupts are disabled, * or when we know that preemption is disabled and that * particular counter cannot be updated from interrupt context. */
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long delta) { struct per_cpu_pageset __percpu *pcp = zone->pageset; s8 __percpu *p = pcp->vm_stat_diff + item; long x; long t; x = delta + __this_cpu_read(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(x > t || x < -t)) { zone_page_state_add(x, zone, item); x = 0; } __this_cpu_write(*p, x); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter9999.00%480.00%
heiko carstensheiko carstens11.00%120.00%
Total100100.00%5100.00%

EXPORT_SYMBOL(__mod_zone_page_state);
void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, long delta) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; s8 __percpu *p = pcp->vm_node_stat_diff + item; long x; long t; x = delta + __this_cpu_read(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(x > t || x < -t)) { node_page_state_add(x, pgdat, item); x = 0; } __this_cpu_write(*p, x); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman100100.00%1100.00%
Total100100.00%1100.00%

EXPORT_SYMBOL(__mod_node_page_state); /* * Optimized increment and decrement functions. * * These are only for a single page and therefore can take a struct page * * argument instead of struct zone *. This allows the inclusion of the code * generated for page_zone(page) into the optimized functions. * * No overflow check is necessary and therefore the differential can be * incremented or decremented in place which may allow the compilers to * generate better code. * The increment or decrement is known and therefore one boundary check can * be omitted. * * NOTE: These functions are very performance sensitive. Change only * with care. * * Some processors have inc/dec instructions that are atomic vs an interrupt. * However, the code must first determine the differential location in a zone * based on the processor number and then inc/dec the counter. There is no * guarantee without disabling preemption that the processor will not change * in between and therefore the atomicity vs. interrupt cannot be exploited * in a useful way here. */
void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { struct per_cpu_pageset __percpu *pcp = zone->pageset; s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { s8 overstep = t >> 1; zone_page_state_add(v + overstep, zone, item); __this_cpu_write(*p, -overstep); } }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter95100.00%7100.00%
Total95100.00%7100.00%


void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; s8 __percpu *p = pcp->vm_node_stat_diff + item; s8 v, t; v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { s8 overstep = t >> 1; node_page_state_add(v + overstep, pgdat, item); __this_cpu_write(*p, -overstep); } }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter8185.26%480.00%
mel gormanmel gorman1414.74%120.00%
Total95100.00%5100.00%


void __inc_zone_page_state(struct page *page, enum zone_stat_item item) { __inc_zone_state(page_zone(page), item); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2291.67%150.00%
mel gormanmel gorman28.33%150.00%
Total24100.00%2100.00%

EXPORT_SYMBOL(__inc_zone_page_state);
void __inc_node_page_state(struct page *page, enum node_stat_item item) { __inc_node_state(page_pgdat(page), item); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman24100.00%1100.00%
Total24100.00%1100.00%

EXPORT_SYMBOL(__inc_node_page_state);
void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { struct per_cpu_pageset __percpu *pcp = zone->pageset; s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { s8 overstep = t >> 1; zone_page_state_add(v - overstep, zone, item); __this_cpu_write(*p, overstep); } }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman95100.00%1100.00%
Total95100.00%1100.00%


void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; s8 __percpu *p = pcp->vm_node_stat_diff + item; s8 v, t; v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { s8 overstep = t >> 1; node_page_state_add(v - overstep, pgdat, item); __this_cpu_write(*p, overstep); } }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman95100.00%1100.00%
Total95100.00%1100.00%


void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { __dec_zone_state(page_zone(page), item); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman24100.00%1100.00%
Total24100.00%1100.00%

EXPORT_SYMBOL(__dec_zone_page_state);
void __dec_node_page_state(struct page *page, enum node_stat_item item) { __dec_node_state(page_pgdat(page), item); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman24100.00%1100.00%
Total24100.00%1100.00%

EXPORT_SYMBOL(__dec_node_page_state); #ifdef CONFIG_HAVE_CMPXCHG_LOCAL /* * If we have cmpxchg_local support then we do not need to incur the overhead * that comes with local_irq_save/restore if we use this_cpu_cmpxchg. * * mod_state() modifies the zone counter state through atomic per cpu * operations. * * Overstep mode specifies how overstep should handled: * 0 No overstepping * 1 Overstepping half of threshold * -1 Overstepping minus half of threshold */
static inline void mod_zone_state(struct zone *zone, enum zone_stat_item item, long delta, int overstep_mode) { struct per_cpu_pageset __percpu *pcp = zone->pageset; s8 __percpu *p = pcp->vm_stat_diff + item; long o, n, t, z; do { z = 0; /* overflow to zone counters */ /* * The fetching of the stat_threshold is racy. We may apply * a counter threshold to the wrong the cpu if we get * rescheduled while executing here. However, the next * counter update will apply the threshold again and * therefore bring the counter under the threshold again. * * Most of the time the thresholds are the same anyways * for all cpus in a zone. */ t = this_cpu_read(pcp->stat_threshold); o = this_cpu_read(*p); n = delta + o; if (n > t || n < -t) { int os = overstep_mode * (t >> 1) ; /* Overflow must be added to zone counters */ z = n + os; n = -os; } } while (this_cpu_cmpxchg(*p, o, n) != o); if (z) zone_page_state_add(z, zone, item); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman14597.97%133.33%
christoph lameterchristoph lameter32.03%266.67%
Total148100.00%3100.00%


void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long delta) { mod_zone_state(zone, item, delta, 0); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman1657.14%120.00%
christoph lameterchristoph lameter1139.29%360.00%
heiko carstensheiko carstens13.57%120.00%
Total28100.00%5100.00%

EXPORT_SYMBOL(mod_zone_page_state);
void inc_zone_page_state(struct page *page, enum zone_stat_item item) { mod_zone_state(page_zone(page), item, 1, 1); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman28100.00%1100.00%
Total28100.00%1100.00%

EXPORT_SYMBOL(inc_zone_page_state);
void dec_zone_page_state(struct page *page, enum zone_stat_item item) { mod_zone_state(page_zone(page), item, -1, -1); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman30100.00%1100.00%
Total30100.00%1100.00%

EXPORT_SYMBOL(dec_zone_page_state);
static inline void mod_node_state(struct pglist_data *pgdat, enum node_stat_item item, int delta, int overstep_mode) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; s8 __percpu *p = pcp->vm_node_stat_diff + item; long o, n, t, z; do { z = 0; /* overflow to node counters */ /* * The fetching of the stat_threshold is racy. We may apply * a counter threshold to the wrong the cpu if we get * rescheduled while executing here. However, the next * counter update will apply the threshold again and * therefore bring the counter under the threshold again. * * Most of the time the thresholds are the same anyways * for all cpus in a node. */ t = this_cpu_read(pcp->stat_threshold); o = this_cpu_read(*p); n = delta + o; if (n > t || n < -t) { int os = overstep_mode * (t >> 1) ; /* Overflow must be added to node counters */ z = n + os; n = -os; } } while (this_cpu_cmpxchg(*p, o, n) != o); if (z) node_page_state_add(z, pgdat, item); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter12383.11%480.00%
mel gormanmel gorman2516.89%120.00%
Total148100.00%5100.00%


void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, long delta) { mod_node_state(pgdat, item, delta, 0); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2175.00%133.33%
mel gormanmel gorman621.43%133.33%
heiko carstensheiko carstens13.57%133.33%
Total28100.00%3100.00%

EXPORT_SYMBOL(mod_node_page_state);
void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) { mod_node_state(pgdat, item, 1, 1); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter1976.00%266.67%
mel gormanmel gorman624.00%133.33%
Total25100.00%3100.00%


void inc_node_page_state(struct page *page, enum node_stat_item item) { mod_node_state(page_pgdat(page), item, 1, 1); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2485.71%266.67%
mel gormanmel gorman414.29%133.33%
Total28100.00%3100.00%

EXPORT_SYMBOL(inc_node_page_state);
void dec_node_page_state(struct page *page, enum node_stat_item item) { mod_node_state(page_pgdat(page), item, -1, -1); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2686.67%266.67%
mel gormanmel gorman413.33%133.33%
Total30100.00%3100.00%

EXPORT_SYMBOL(dec_node_page_state); #else /* * Use interrupt disable to serialize counter updates */
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long delta) { unsigned long flags; local_irq_save(flags); __mod_zone_page_state(zone, item, delta); local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter3997.50%150.00%
heiko carstensheiko carstens12.50%150.00%
Total40100.00%2100.00%

EXPORT_SYMBOL(mod_zone_page_state);
void inc_zone_page_state(struct page *page, enum zone_stat_item item) { unsigned long flags; struct zone *zone; zone = page_zone(page); local_irq_save(flags); __inc_zone_state(zone, item); local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter47100.00%2100.00%
Total47100.00%2100.00%

EXPORT_SYMBOL(inc_zone_page_state);
void dec_zone_page_state(struct page *page, enum zone_stat_item item) { unsigned long flags; local_irq_save(flags); __dec_zone_page_state(page, item); local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter35100.00%2100.00%
Total35100.00%2100.00%

EXPORT_SYMBOL(dec_zone_page_state);
void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) { unsigned long flags; local_irq_save(flags); __inc_node_state(pgdat, item); local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman35100.00%1100.00%
Total35100.00%1100.00%

EXPORT_SYMBOL(inc_node_state);
void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, long delta) { unsigned long flags; local_irq_save(flags); __mod_node_page_state(pgdat, item, delta); local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman40100.00%1100.00%
Total40100.00%1100.00%

EXPORT_SYMBOL(mod_node_page_state);
void inc_node_page_state(struct page *page, enum node_stat_item item) { unsigned long flags; struct pglist_data *pgdat; pgdat = page_pgdat(page); local_irq_save(flags); __inc_node_state(pgdat, item); local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman47100.00%1100.00%
Total47100.00%1100.00%

EXPORT_SYMBOL(inc_node_page_state);
void dec_node_page_state(struct page *page, enum node_stat_item item) { unsigned long flags; local_irq_save(flags); __dec_node_page_state(page, item); local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman35100.00%1100.00%
Total35100.00%1100.00%

EXPORT_SYMBOL(dec_node_page_state); #endif /* * Fold a differential into the global counters. * Returns the number of counters updated. */
static int fold_diff(int *zone_diff, int *node_diff) { int i; int changes = 0; for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) if (zone_diff[i]) { atomic_long_add(zone_diff[i], &vm_zone_stat[i]); changes++; } for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) if (node_diff[i]) { atomic_long_add(node_diff[i], &vm_node_stat[i]); changes++; } return changes; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter5654.37%266.67%
mel gormanmel gorman4745.63%133.33%
Total103100.00%3100.00%

/* * Update the zone counters for the current cpu. * * Note that refresh_cpu_vm_stats strives to only access * node local memory. The per cpu pagesets on remote zones are placed * in the memory local to the processor using that pageset. So the * loop over all zones will access a series of cachelines local to * the processor. * * The call to zone_page_state_add updates the cachelines with the * statistics in the remote zone struct as well as the global cachelines * with the global counters. These could cause remote node cache line * bouncing and will have to be only done when necessary. * * The function returns the number of global counters updated. */
static int refresh_cpu_vm_stats(bool do_pagesets) { struct pglist_data *pgdat; struct zone *zone; int i; int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; int changes = 0; for_each_populated_zone(zone) { struct per_cpu_pageset __percpu *p = zone->pageset; for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { int v; v = this_cpu_xchg(p->vm_stat_diff[i], 0); if (v) { atomic_long_add(v, &zone->vm_stat[i]); global_zone_diff[i] += v; #ifdef CONFIG_NUMA /* 3 seconds idle till flush */ __this_cpu_write(p->expire, 3); #endif } } #ifdef CONFIG_NUMA if (do_pagesets) { cond_resched(); /* * Deal with draining the remote pageset of this * processor * * Check if there are pages remaining in this pageset * if not then there is nothing to expire. */ if (!__this_cpu_read(p->expire) || !__this_cpu_read(p->pcp.count)) continue; /* * We never drain zones local to this processor. */ if (zone_to_nid(zone) == numa_node_id()) { __this_cpu_write(p->expire, 0); continue; } if (__this_cpu_dec_return(p->expire)) continue; if (__this_cpu_read(p->pcp.count)) { drain_zone_pages(zone, this_cpu_ptr(&p->pcp)); changes++; } } #endif } for_each_online_pgdat(pgdat) { struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats; for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { int v; v = this_cpu_xchg(p->vm_node_stat_diff[i], 0); if (v) { atomic_long_add(v, &pgdat->vm_stat[i]); global_node_diff[i] += v; } } } changes += fold_diff(global_zone_diff, global_node_diff); return changes; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter22570.31%1285.71%
mel gormanmel gorman9429.38%17.14%
kosaki motohirokosaki motohiro10.31%17.14%
Total320100.00%14100.00%

/* * Fold the data for an offline cpu into the global array. * There cannot be any access by the offline cpu and therefore * synchronization is simplified. */
void cpu_vm_stats_fold(int cpu) { struct pglist_data *pgdat; struct zone *zone; int i; int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; for_each_populated_zone(zone) { struct per_cpu_pageset *p; p = per_cpu_ptr(zone->pageset, cpu); for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) if (p->vm_stat_diff[i]) { int v; v = p->vm_stat_diff[i]; p->vm_stat_diff[i] = 0; atomic_long_add(v, &zone->vm_stat[i]); global_zone_diff[