Release 4.13 lib/percpu_counter.c
/*
* Fast batching percpu counters.
*/
#include <linux/percpu_counter.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/debugobjects.h>
#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(percpu_counters);
static DEFINE_SPINLOCK(percpu_counters_lock);
#endif
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
static struct debug_obj_descr percpu_counter_debug_descr;
static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
{
struct percpu_counter *fbc = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
percpu_counter_destroy(fbc);
debug_object_free(fbc, &percpu_counter_debug_descr);
return true;
default:
return false;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 47 | 94.00% | 1 | 50.00% |
Changbin Du | 3 | 6.00% | 1 | 50.00% |
Total | 50 | 100.00% | 2 | 100.00% |
static struct debug_obj_descr percpu_counter_debug_descr = {
.name = "percpu_counter",
.fixup_free = percpu_counter_fixup_free,
};
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{
debug_object_init(fbc, &percpu_counter_debug_descr);
debug_object_activate(fbc, &percpu_counter_debug_descr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{
debug_object_deactivate(fbc, &percpu_counter_debug_descr);
debug_object_free(fbc, &percpu_counter_debug_descr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{ }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{ }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 60 | 85.71% | 1 | 50.00% |
Shaohua Li | 10 | 14.29% | 1 | 50.00% |
Total | 70 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(percpu_counter_set);
/**
* This function is both preempt and irq safe. The former is due to explicit
* preemption disable. The latter is guaranteed by the fact that the slow path
* is explicitly protected by an irq-safe spinlock whereas the fast patch uses
* this_cpu_add which is irq-safe by definition. Hence there is no need muck
* with irq state before calling this one
*/
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount;
if (count >= batch || count <= -batch) {
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count;
__this_cpu_sub(*fbc->counters, count - amount);
raw_spin_unlock_irqrestore(&fbc->lock, flags);
} else {
this_cpu_add(*fbc->counters, amount);
}
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ravikiran G. Thirumalai | 60 | 57.14% | 1 | 10.00% |
Christoph Lameter | 16 | 15.24% | 2 | 20.00% |
Lei Ming | 10 | 9.52% | 1 | 10.00% |
Shaohua Li | 8 | 7.62% | 1 | 10.00% |
Peter Zijlstra | 7 | 6.67% | 2 | 20.00% |
Hugh Dickins | 2 | 1.90% | 1 | 10.00% |
Fan Du | 1 | 0.95% | 1 | 10.00% |
Nikolay Borisov | 1 | 0.95% | 1 | 10.00% |
Total | 105 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(percpu_counter_add_batch);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ravikiran G. Thirumalai | 58 | 79.45% | 1 | 20.00% |
Shaohua Li | 10 | 13.70% | 1 | 20.00% |
Mingming Cao | 3 | 4.11% | 1 | 20.00% |
Peter Zijlstra | 1 | 1.37% | 1 | 20.00% |
Andrew Morton | 1 | 1.37% | 1 | 20.00% |
Total | 73 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{
unsigned long flags __maybe_unused;
raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
fbc->counters = alloc_percpu_gfp(s32, gfp);
if (!fbc->counters)
return -ENOMEM;
debug_percpu_counter_activate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
spin_lock_irqsave(&percpu_counters_lock, flags);
list_add(&fbc->list, &percpu_counters);
spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 58 | 48.33% | 1 | 12.50% |
Peter Zijlstra | 31 | 25.83% | 2 | 25.00% |
Tejun Heo | 22 | 18.33% | 3 | 37.50% |
Masanori ITOH | 8 | 6.67% | 1 | 12.50% |
Thomas Gleixner | 1 | 0.83% | 1 | 12.50% |
Total | 120 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL(__percpu_counter_init);
void percpu_counter_destroy(struct percpu_counter *fbc)
{
unsigned long flags __maybe_unused;
if (!fbc->counters)
return;
debug_percpu_counter_deactivate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
spin_lock_irqsave(&percpu_counters_lock, flags);
list_del(&fbc->list);
spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 33 | 47.14% | 1 | 20.00% |
Tejun Heo | 16 | 22.86% | 2 | 40.00% |
Eric Dumazet | 13 | 18.57% | 1 | 20.00% |
Peter Zijlstra | 8 | 11.43% | 1 | 20.00% |
Total | 70 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(percpu_counter_destroy);
int percpu_counter_batch __read_mostly = 32;
EXPORT_SYMBOL(percpu_counter_batch);
static int compute_batch_value(unsigned int cpu)
{
int nr = num_online_cpus();
percpu_counter_batch = max(32, nr*2);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 23 | 76.67% | 1 | 50.00% |
Sebastian Andrzej Siewior | 7 | 23.33% | 1 | 50.00% |
Total | 30 | 100.00% | 2 | 100.00% |
static int percpu_counter_cpu_dead(unsigned int cpu)
{
#ifdef CONFIG_HOTPLUG_CPU
struct percpu_counter *fbc;
compute_batch_value(cpu);
spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
raw_spin_lock(&fbc->lock);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
raw_spin_unlock(&fbc->lock);
}
spin_unlock_irq(&percpu_counters_lock);
#endif
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 77 | 82.80% | 1 | 20.00% |
Eric Dumazet | 8 | 8.60% | 2 | 40.00% |
Sebastian Andrzej Siewior | 6 | 6.45% | 1 | 20.00% |
Tejun Heo | 2 | 2.15% | 1 | 20.00% |
Total | 93 | 100.00% | 5 | 100.00% |
/*
* Compare counter against given value.
* Return 1 if greater, 0 if equal and -1 if less
*/
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
{
s64 count;
count = percpu_counter_read(fbc);
/* Check to see if rough count will be sufficient for comparison */
if (abs(count - rhs) > (batch * num_online_cpus())) {
if (count > rhs)
return 1;
else
return -1;
}
/* Need to use precise count */
count = percpu_counter_sum(fbc);
if (count > rhs)
return 1;
else if (count < rhs)
return -1;
else
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tim Chen | 86 | 94.51% | 1 | 50.00% |
Dave Chinner | 5 | 5.49% | 1 | 50.00% |
Total | 91 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(__percpu_counter_compare);
static int __init percpu_counter_startup(void)
{
int ret;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
compute_batch_value, NULL);
WARN_ON(ret < 0);
ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
"lib/percpu_cnt:dead", NULL,
percpu_counter_cpu_dead);
WARN_ON(ret < 0);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sebastian Andrzej Siewior | 36 | 65.45% | 1 | 33.33% |
Andrew Morton | 17 | 30.91% | 1 | 33.33% |
Eric Dumazet | 2 | 3.64% | 1 | 33.33% |
Total | 55 | 100.00% | 3 | 100.00% |
module_init(percpu_counter_startup);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 223 | 23.38% | 2 | 6.25% |
Tejun Heo | 199 | 20.86% | 3 | 9.38% |
Ravikiran G. Thirumalai | 134 | 14.05% | 1 | 3.12% |
Peter Zijlstra | 114 | 11.95% | 6 | 18.75% |
Tim Chen | 91 | 9.54% | 1 | 3.12% |
Eric Dumazet | 57 | 5.97% | 3 | 9.38% |
Sebastian Andrzej Siewior | 49 | 5.14% | 1 | 3.12% |
Shaohua Li | 28 | 2.94% | 1 | 3.12% |
Christoph Lameter | 16 | 1.68% | 2 | 6.25% |
Lei Ming | 10 | 1.05% | 1 | 3.12% |
Masanori ITOH | 8 | 0.84% | 1 | 3.12% |
Dave Chinner | 6 | 0.63% | 1 | 3.12% |
Glauber de Oliveira Costa | 5 | 0.52% | 1 | 3.12% |
Mingming Cao | 3 | 0.31% | 1 | 3.12% |
Nikolay Borisov | 3 | 0.31% | 2 | 6.25% |
Changbin Du | 3 | 0.31% | 1 | 3.12% |
Hugh Dickins | 2 | 0.21% | 1 | 3.12% |
Fan Du | 1 | 0.10% | 1 | 3.12% |
Thomas Gleixner | 1 | 0.10% | 1 | 3.12% |
Al Viro | 1 | 0.10% | 1 | 3.12% |
Total | 954 | 100.00% | 32 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.