Release 4.12 drivers/cpuidle/cpuidle.c
  
  
  
/*
 * cpuidle.c - core cpuidle infrastructure
 *
 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *               Shaohua Li <shaohua.li@intel.com>
 *               Adam Belay <abelay@novell.com>
 *
 * This code is licenced under the GPL.
 */
#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/notifier.h>
#include <linux/pm_qos.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/tick.h>
#include <trace/events/power.h>
#include "cpuidle.h"
DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices);
static int enabled_devices;
static int off __read_mostly;
static int initialized __read_mostly;
int cpuidle_disabled(void)
{
	return off;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 10 | 100.00% | 1 | 100.00% | 
| Total | 10 | 100.00% | 1 | 100.00% | 
void disable_cpuidle(void)
{
	off = 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 11 | 100.00% | 1 | 100.00% | 
| Total | 11 | 100.00% | 1 | 100.00% | 
bool cpuidle_not_available(struct cpuidle_driver *drv,
			   struct cpuidle_device *dev)
{
	return off || !initialized || !drv || !dev || !dev->enabled;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rafael J. Wysocki | 32 | 100.00% | 1 | 100.00% | 
| Total | 32 | 100.00% | 1 | 100.00% | 
/**
 * cpuidle_play_dead - cpu off-lining
 *
 * Returns in case of an error or no driver
 */
int cpuidle_play_dead(void)
{
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
	int i;
	if (!drv)
		return -ENODEV;
	/* Find lowest-power state that supports long-term idle */
	for (i = drv->state_count - 1; i >= 0; i--)
		if (drv->states[i].enter_dead)
			return drv->states[i].enter_dead(dev, i);
	return -ENODEV;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Boris Ostrovsky | 62 | 71.26% | 1 | 20.00% | 
| Daniel Lezcano | 15 | 17.24% | 2 | 40.00% | 
| Toshi Kani | 9 | 10.34% | 1 | 20.00% | 
| Rafael J. Wysocki | 1 | 1.15% | 1 | 20.00% | 
| Total | 87 | 100.00% | 5 | 100.00% | 
static int find_deepest_state(struct cpuidle_driver *drv,
			      struct cpuidle_device *dev,
			      unsigned int max_latency,
			      unsigned int forbidden_flags,
			      bool freeze)
{
	unsigned int latency_req = 0;
	int i, ret = 0;
	for (i = 1; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];
		if (s->disabled || su->disable || s->exit_latency <= latency_req
		    || s->exit_latency > max_latency
		    || (s->flags & forbidden_flags)
		    || (freeze && !s->enter_freeze))
			continue;
		latency_req = s->exit_latency;
		ret = i;
	}
	return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rafael J. Wysocki | 136 | 100.00% | 5 | 100.00% | 
| Total | 136 | 100.00% | 5 | 100.00% | 
/**
 * cpuidle_use_deepest_state - Set/clear governor override flag.
 * @enable: New value of the flag.
 *
 * Set/unset the current CPU to use the deepest idle state (override governors
 * going forward if set).
 */
void cpuidle_use_deepest_state(bool enable)
{
	struct cpuidle_device *dev;
	preempt_disable();
	dev = cpuidle_get_device();
	if (dev)
		dev->use_deepest_state = enable;
	preempt_enable();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Jacob jun Pan | 30 | 88.24% | 1 | 50.00% | 
| Li Fei | 4 | 11.76% | 1 | 50.00% | 
| Total | 34 | 100.00% | 2 | 100.00% | 
/**
 * cpuidle_find_deepest_state - Find the deepest available idle state.
 * @drv: cpuidle driver for the given CPU.
 * @dev: cpuidle device for the given CPU.
 */
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
			       struct cpuidle_device *dev)
{
	return find_deepest_state(drv, dev, UINT_MAX, 0, false);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rafael J. Wysocki | 29 | 100.00% | 2 | 100.00% | 
| Total | 29 | 100.00% | 2 | 100.00% | 
#ifdef CONFIG_SUSPEND
static void enter_freeze_proper(struct cpuidle_driver *drv,
				struct cpuidle_device *dev, int index)
{
	/*
         * trace_suspend_resume() called by tick_freeze() for the last CPU
         * executing it contains RCU usage regarded as invalid in the idle
         * context, so tell RCU about that.
         */
	RCU_NONIDLE(tick_freeze());
	/*
         * The state used here cannot be a "coupled" one, because the "coupled"
         * cpuidle mechanism enables interrupts and doing that with timekeeping
         * suspended is generally unsafe.
         */
	stop_critical_timings();
	drv->states[index].enter_freeze(dev, drv, index);
	WARN_ON(!irqs_disabled());
	/*
         * timekeeping_resume() that will be called by tick_unfreeze() for the
         * first CPU executing it calls functions containing RCU read-side
         * critical sections, so tell RCU about that.
         */
	RCU_NONIDLE(tick_unfreeze());
	start_critical_timings();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rafael J. Wysocki | 57 | 90.48% | 2 | 66.67% | 
| Lucas Stach | 6 | 9.52% | 1 | 33.33% | 
| Total | 63 | 100.00% | 3 | 100.00% | 
/**
 * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
 * @drv: cpuidle driver for the given CPU.
 * @dev: cpuidle device for the given CPU.
 *
 * If there are states with the ->enter_freeze callback, find the deepest of
 * them and enter it with frozen tick.
 */
int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	int index;
	/*
         * Find the deepest state with ->enter_freeze present, which guarantees
         * that interrupts won't be enabled when it exits and allows the tick to
         * be frozen safely.
         */
	index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
	if (index > 0)
		enter_freeze_proper(drv, dev, index);
	return index;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rafael J. Wysocki | 51 | 98.08% | 4 | 80.00% | 
| Sudeep Holla | 1 | 1.92% | 1 | 20.00% | 
| Total | 52 | 100.00% | 5 | 100.00% | 
#endif /* CONFIG_SUSPEND */
/**
 * cpuidle_enter_state - enter the state and update stats
 * @dev: cpuidle device for this cpu
 * @drv: cpuidle driver for this cpu
 * @index: index into the states table in @drv of the state to enter
 */
int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
			int index)
{
	int entered_state;
	struct cpuidle_state *target_state = &drv->states[index];
	bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
	ktime_t time_start, time_end;
	s64 diff;
	/*
         * Tell the time framework to switch to a broadcast timer because our
         * local timer will be shut down.  If a local timer is used from another
         * CPU as a broadcast timer, this call may fail if it is not available.
         */
	if (broadcast && tick_broadcast_enter()) {
		index = find_deepest_state(drv, dev, target_state->exit_latency,
					   CPUIDLE_FLAG_TIMER_STOP, false);
		if (index < 0) {
			default_idle_call();
			return -EBUSY;
		}
		target_state = &drv->states[index];
	}
	/* Take note of the planned idle state. */
	sched_idle_set_state(target_state);
	trace_cpu_idle_rcuidle(index, dev->cpu);
	time_start = ns_to_ktime(local_clock());
	stop_critical_timings();
	entered_state = target_state->enter(dev, drv, index);
	start_critical_timings();
	time_end = ns_to_ktime(local_clock());
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
	/* The cpu is no longer idle or about to enter idle. */
	sched_idle_set_state(NULL);
	if (broadcast) {
		if (WARN_ON_ONCE(!irqs_disabled()))
			local_irq_disable();
		tick_broadcast_exit();
	}
	if (!cpuidle_state_is_coupled(drv, index))
		local_irq_enable();
	diff = ktime_us_delta(time_end, time_start);
	if (diff > INT_MAX)
		diff = INT_MAX;
	dev->last_residency = (int) diff;
	if (entered_state >= 0) {
		/* Update cpuidle counters */
		/* This can be moved to within driver enter routine
                 * but that results in multiple copies of same code.
                 */
		dev->states_usage[entered_state].time += dev->last_residency;
		dev->states_usage[entered_state].usage++;
	} else {
		dev->last_residency = 0;
	}
	return entered_state;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rafael J. Wysocki | 98 | 35.13% | 4 | 33.33% | 
| Colin Cross | 74 | 26.52% | 1 | 8.33% | 
| Daniel Lezcano | 63 | 22.58% | 3 | 25.00% | 
| Sandeep Tripathy | 18 | 6.45% | 1 | 8.33% | 
| Shreyas B. Prabhu | 11 | 3.94% | 1 | 8.33% | 
| Paul Burton | 9 | 3.23% | 1 | 8.33% | 
| Lucas Stach | 6 | 2.15% | 1 | 8.33% | 
| Total | 279 | 100.00% | 12 | 100.00% | 
/**
 * cpuidle_select - ask the cpuidle framework to choose an idle state
 *
 * @drv: the cpuidle driver
 * @dev: the cpuidle device
 *
 * Returns the index of the idle state.  The return value must not be negative.
 */
int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	return cpuidle_curr_governor->select(drv, dev);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 12 | 48.00% | 2 | 40.00% | 
| Daniel Lezcano | 7 | 28.00% | 1 | 20.00% | 
| Deepthi Dharwar | 5 | 20.00% | 1 | 20.00% | 
| Kevin Hilman | 1 | 4.00% | 1 | 20.00% | 
| Total | 25 | 100.00% | 5 | 100.00% | 
/**
 * cpuidle_enter - enter into the specified idle state
 *
 * @drv:   the cpuidle driver tied with the cpu
 * @dev:   the cpuidle device
 * @index: the index in the idle state table
 *
 * Returns the index in the idle state, < 0 in case of error.
 * The error code depends on the backend driver
 */
int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
		  int index)
{
	if (cpuidle_state_is_coupled(drv, index))
		return cpuidle_enter_state_coupled(dev, drv, index);
	return cpuidle_enter_state(dev, drv, index);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Daniel Lezcano | 27 | 57.45% | 2 | 28.57% | 
| Colin Cross | 14 | 29.79% | 2 | 28.57% | 
| Len Brown | 3 | 6.38% | 1 | 14.29% | 
| Deepthi Dharwar | 2 | 4.26% | 1 | 14.29% | 
| Preeti U. Murthy | 1 | 2.13% | 1 | 14.29% | 
| Total | 47 | 100.00% | 7 | 100.00% | 
/**
 * cpuidle_reflect - tell the underlying governor what was the state
 * we were in
 *
 * @dev  : the cpuidle device
 * @index: the index in the idle state table
 *
 */
void cpuidle_reflect(struct cpuidle_device *dev, int index)
{
	if (cpuidle_curr_governor->reflect && index >= 0)
		cpuidle_curr_governor->reflect(dev, index);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 14 | 43.75% | 1 | 20.00% | 
| Daniel Lezcano | 12 | 37.50% | 1 | 20.00% | 
| Rafael J. Wysocki | 4 | 12.50% | 1 | 20.00% | 
| Deepthi Dharwar | 1 | 3.12% | 1 | 20.00% | 
| Thomas Renninger | 1 | 3.12% | 1 | 20.00% | 
| Total | 32 | 100.00% | 5 | 100.00% | 
/**
 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
 */
void cpuidle_install_idle_handler(void)
{
	if (enabled_devices) {
		/* Make sure all changes finished before we switch to new idle */
		smp_wmb();
		initialized = 1;
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 21 | 100.00% | 2 | 100.00% | 
| Total | 21 | 100.00% | 2 | 100.00% | 
/**
 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
 */
void cpuidle_uninstall_idle_handler(void)
{
	if (enabled_devices) {
		initialized = 0;
		wake_up_all_idle_cpus();
	}
	/*
         * Make sure external observers (such as the scheduler)
         * are done looking at pointed idle states.
         */
	synchronize_rcu();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 19 | 79.17% | 2 | 50.00% | 
| Daniel Lezcano | 4 | 16.67% | 1 | 25.00% | 
| Chuansheng Liu | 1 | 4.17% | 1 | 25.00% | 
| Total | 24 | 100.00% | 4 | 100.00% | 
/**
 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
 */
void cpuidle_pause_and_lock(void)
{
	mutex_lock(&cpuidle_lock);
	cpuidle_uninstall_idle_handler();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 16 | 100.00% | 1 | 100.00% | 
| Total | 16 | 100.00% | 1 | 100.00% | 
EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
/**
 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
 */
void cpuidle_resume_and_unlock(void)
{
	cpuidle_install_idle_handler();
	mutex_unlock(&cpuidle_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 16 | 100.00% | 1 | 100.00% | 
| Total | 16 | 100.00% | 1 | 100.00% | 
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
/* Currently used in suspend/resume path to suspend cpuidle */
void cpuidle_pause(void)
{
	mutex_lock(&cpuidle_lock);
	cpuidle_uninstall_idle_handler();
	mutex_unlock(&cpuidle_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Preeti U. Murthy | 22 | 100.00% | 1 | 100.00% | 
| Total | 22 | 100.00% | 1 | 100.00% | 
/* Currently used in suspend/resume path to resume cpuidle */
void cpuidle_resume(void)
{
	mutex_lock(&cpuidle_lock);
	cpuidle_install_idle_handler();
	mutex_unlock(&cpuidle_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Preeti U. Murthy | 22 | 100.00% | 1 | 100.00% | 
| Total | 22 | 100.00% | 1 | 100.00% | 
/**
 * cpuidle_enable_device - enables idle PM for a CPU
 * @dev: the CPU
 *
 * This function must be called between cpuidle_pause_and_lock and
 * cpuidle_resume_and_unlock when used externally.
 */
int cpuidle_enable_device(struct cpuidle_device *dev)
{
	int ret;
	struct cpuidle_driver *drv;
	if (!dev)
		return -EINVAL;
	if (dev->enabled)
		return 0;
	drv = cpuidle_get_cpu_driver(dev);
	if (!drv || !cpuidle_curr_governor)
		return -EIO;
	if (!dev->registered)
		return -EINVAL;
	ret = cpuidle_add_device_sysfs(dev);
	if (ret)
		return ret;
	if (cpuidle_curr_governor->enable &&
	    (ret = cpuidle_curr_governor->enable(drv, dev)))
		goto fail_sysfs;
	smp_wmb();
	dev->enabled = 1;
	enabled_devices++;
	return 0;
fail_sysfs:
	cpuidle_remove_device_sysfs(dev);
	return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 91 | 71.65% | 1 | 16.67% | 
| Daniel Lezcano | 19 | 14.96% | 2 | 33.33% | 
| Srivatsa S. Bhat | 9 | 7.09% | 1 | 16.67% | 
| Robert Lee | 7 | 5.51% | 1 | 16.67% | 
| Deepthi Dharwar | 1 | 0.79% | 1 | 16.67% | 
| Total | 127 | 100.00% | 6 | 100.00% | 
EXPORT_SYMBOL_GPL(cpuidle_enable_device);
/**
 * cpuidle_disable_device - disables idle PM for a CPU
 * @dev: the CPU
 *
 * This function must be called between cpuidle_pause_and_lock and
 * cpuidle_resume_and_unlock when used externally.
 */
void cpuidle_disable_device(struct cpuidle_device *dev)
{
	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
	if (!dev || !dev->enabled)
		return;
	if (!drv || !cpuidle_curr_governor)
		return;
	dev->enabled = 0;
	if (cpuidle_curr_governor->disable)
		cpuidle_curr_governor->disable(drv, dev);
	cpuidle_remove_device_sysfs(dev);
	enabled_devices--;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 52 | 75.36% | 1 | 25.00% | 
| Daniel Lezcano | 13 | 18.84% | 1 | 25.00% | 
| Srivatsa S. Bhat | 3 | 4.35% | 1 | 25.00% | 
| Deepthi Dharwar | 1 | 1.45% | 1 | 25.00% | 
| Total | 69 | 100.00% | 4 | 100.00% | 
EXPORT_SYMBOL_GPL(cpuidle_disable_device);
static void __cpuidle_unregister_device(struct cpuidle_device *dev)
{
	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
	list_del(&dev->device_list);
	per_cpu(cpuidle_devices, dev->cpu) = NULL;
	module_put(drv->owner);
	dev->registered = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Daniel Lezcano | 47 | 88.68% | 1 | 50.00% | 
| Dave Gerlach | 6 | 11.32% | 1 | 50.00% | 
| Total | 53 | 100.00% | 2 | 100.00% | 
static void __cpuidle_device_init(struct cpuidle_device *dev)
{
	memset(dev->states_usage, 0, sizeof(dev->states_usage));
	dev->last_residency = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Daniel Lezcano | 32 | 96.97% | 1 | 50.00% | 
| Viresh Kumar | 1 | 3.03% | 1 | 50.00% | 
| Total | 33 | 100.00% | 2 | 100.00% | 
/**
 * __cpuidle_register_device - internal register function called before register
 * and enable routines
 * @dev: the cpu
 *
 * cpuidle_lock mutex must be held before this is called
 */
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
	int ret;
	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
	if (!try_module_get(drv->owner))
		return -EINVAL;
	per_cpu(cpuidle_devices, dev->cpu) = dev;
	list_add(&dev->device_list, &cpuidle_detected_devices);
	ret = cpuidle_coupled_register_device(dev);
	if (ret)
		__cpuidle_unregister_device(dev);
	else
		dev->registered = 1;
	return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 56 | 65.12% | 2 | 22.22% | 
| Venkatesh Pallipadi | 16 | 18.60% | 1 | 11.11% | 
| Daniel Lezcano | 9 | 10.47% | 3 | 33.33% | 
| Colin Cross | 3 | 3.49% | 2 | 22.22% | 
| Viresh Kumar | 2 | 2.33% | 1 | 11.11% | 
| Total | 86 | 100.00% | 9 | 100.00% | 
/**
 * cpuidle_register_device - registers a CPU's idle PM feature
 * @dev: the cpu
 */
int cpuidle_register_device(struct cpuidle_device *dev)
{
	int ret = -EBUSY;
	if (!dev)
		return -EINVAL;
	mutex_lock(&cpuidle_lock);
	if (dev->registered)
		goto out_unlock;
	__cpuidle_device_init(dev);
	ret = __cpuidle_register_device(dev);
	if (ret)
		goto out_unlock;
	ret = cpuidle_add_sysfs(dev);
	if (ret)
		goto out_unregister;
	ret = cpuidle_enable_device(dev);
	if (ret)
		goto out_sysfs;
	cpuidle_install_idle_handler();
out_unlock:
	mutex_unlock(&cpuidle_lock);
	return ret;
out_sysfs:
	cpuidle_remove_sysfs(dev);
out_unregister:
	__cpuidle_unregister_device(dev);
	goto out_unlock;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Daniel Lezcano | 72 | 61.02% | 4 | 57.14% | 
| Len Brown | 19 | 16.10% | 1 | 14.29% | 
| Venkatesh Pallipadi | 18 | 15.25% | 1 | 14.29% | 
| Srivatsa S. Bhat | 9 | 7.63% | 1 | 14.29% | 
| Total | 118 | 100.00% | 7 | 100.00% | 
EXPORT_SYMBOL_GPL(cpuidle_register_device);
/**
 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
 * @dev: the cpu
 */
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
	if (!dev || dev->registered == 0)
		return;
	cpuidle_pause_and_lock();
	cpuidle_disable_device(dev);
	cpuidle_remove_sysfs(dev);
	__cpuidle_unregister_device(dev);
	cpuidle_coupled_unregister_device(dev);
	cpuidle_resume_and_unlock();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 29 | 60.42% | 1 | 16.67% | 
| Venkatesh Pallipadi | 9 | 18.75% | 1 | 16.67% | 
| Colin Cross | 5 | 10.42% | 1 | 16.67% | 
| Konrad Rzeszutek Wilk | 3 | 6.25% | 1 | 16.67% | 
| Daniel Lezcano | 2 | 4.17% | 2 | 33.33% | 
| Total | 48 | 100.00% | 6 | 100.00% | 
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
/**
 * cpuidle_unregister: unregister a driver and the devices. This function
 * can be used only if the driver has been previously registered through
 * the cpuidle_register function.
 *
 * @drv: a valid pointer to a struct cpuidle_driver
 */
void cpuidle_unregister(struct cpuidle_driver *drv)
{
	int cpu;
	struct cpuidle_device *device;
	for_each_cpu(cpu, drv->cpumask) {
		device = &per_cpu(cpuidle_dev, cpu);
		cpuidle_unregister_device(device);
	}
	cpuidle_unregister_driver(drv);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Daniel Lezcano | 46 | 100.00% | 2 | 100.00% | 
| Total | 46 | 100.00% | 2 | 100.00% | 
EXPORT_SYMBOL_GPL(cpuidle_unregister);
/**
 * cpuidle_register: registers the driver and the cpu devices with the
 * coupled_cpus passed as parameter. This function is used for all common
 * initialization pattern there are in the arch specific drivers. The
 * devices is globally defined in this file.
 *
 * @drv         : a valid pointer to a struct cpuidle_driver
 * @coupled_cpus: a cpumask for the coupled states
 *
 * Returns 0 on success, < 0 otherwise
 */
int cpuidle_register(struct cpuidle_driver *drv,
		     const struct cpumask *const coupled_cpus)
{
	int ret, cpu;
	struct cpuidle_device *device;
	ret = cpuidle_register_driver(drv);
	if (ret) {
		pr_err("failed to register cpuidle driver\n");
		return ret;
	}
	for_each_cpu(cpu, drv->cpumask) {
		device = &per_cpu(cpuidle_dev, cpu);
		device->cpu = cpu;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
		/*
                 * On multiplatform for ARM, the coupled idle states could be
                 * enabled in the kernel even if the cpuidle driver does not
                 * use it. Note, coupled_cpus is a struct copy.
                 */
		if (coupled_cpus)
			device->coupled_cpus = *coupled_cpus;
#endif
		ret = cpuidle_register_device(device);
		if (!ret)
			continue;
		pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
		cpuidle_unregister(drv);
		break;
	}
	return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Daniel Lezcano | 117 | 99.15% | 2 | 66.67% | 
| Viresh Kumar | 1 | 0.85% | 1 | 33.33% | 
| Total | 118 | 100.00% | 3 | 100.00% | 
EXPORT_SYMBOL_GPL(cpuidle_register);
#ifdef CONFIG_SMP
/*
 * This function gets called when a part of the kernel has a new latency
 * requirement.  This means we need to get all processors out of their C-state,
 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
 * wakes them all right up.
 */
static int cpuidle_latency_notify(struct notifier_block *b,
		unsigned long l, void *v)
{
	wake_up_all_idle_cpus();
	return NOTIFY_OK;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 23 | 92.00% | 1 | 50.00% | 
| Chuansheng Liu | 2 | 8.00% | 1 | 50.00% | 
| Total | 25 | 100.00% | 2 | 100.00% | 
static struct notifier_block cpuidle_latency_notifier = {
	.notifier_call = cpuidle_latency_notify,
};
static inline void latency_notifier_init(struct notifier_block *n)
{
	pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Mark Gross | 16 | 84.21% | 1 | 50.00% | 
| Len Brown | 3 | 15.79% | 1 | 50.00% | 
| Total | 19 | 100.00% | 2 | 100.00% | 
#else /* CONFIG_SMP */
#define latency_notifier_init(x) do { } while (0)
#endif /* CONFIG_SMP */
/**
 * cpuidle_init - core initializer
 */
static int __init cpuidle_init(void)
{
	int ret;
	if (cpuidle_disabled())
		return -ENODEV;
	ret = cpuidle_add_interface(cpu_subsys.dev_root);
	if (ret)
		return ret;
	latency_notifier_init(&cpuidle_latency_notifier);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 42 | 91.30% | 2 | 66.67% | 
| Kay Sievers | 4 | 8.70% | 1 | 33.33% | 
| Total | 46 | 100.00% | 3 | 100.00% | 
module_param(off, int, 0444);
core_initcall(cpuidle_init);
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Len Brown | 575 | 29.53% | 5 | 6.85% | 
| Daniel Lezcano | 510 | 26.19% | 16 | 21.92% | 
| Rafael J. Wysocki | 422 | 21.67% | 16 | 21.92% | 
| Colin Cross | 96 | 4.93% | 3 | 4.11% | 
| Boris Ostrovsky | 62 | 3.18% | 1 | 1.37% | 
| Venkatesh Pallipadi | 48 | 2.47% | 2 | 2.74% | 
| Preeti U. Murthy | 47 | 2.41% | 2 | 2.74% | 
| Jacob jun Pan | 33 | 1.69% | 1 | 1.37% | 
| Srivatsa S. Bhat | 21 | 1.08% | 2 | 2.74% | 
| Sandeep Tripathy | 18 | 0.92% | 1 | 1.37% | 
| Mark Gross | 16 | 0.82% | 1 | 1.37% | 
| Lucas Stach | 12 | 0.62% | 1 | 1.37% | 
| Shreyas B. Prabhu | 11 | 0.56% | 1 | 1.37% | 
| Deepthi Dharwar | 10 | 0.51% | 2 | 2.74% | 
| Toshi Kani | 10 | 0.51% | 1 | 1.37% | 
| Paul Burton | 9 | 0.46% | 1 | 1.37% | 
| Robert Lee | 7 | 0.36% | 1 | 1.37% | 
| Dave Gerlach | 6 | 0.31% | 1 | 1.37% | 
| Arjan van de Ven | 6 | 0.31% | 2 | 2.74% | 
| Viresh Kumar | 4 | 0.21% | 3 | 4.11% | 
| Kay Sievers | 4 | 0.21% | 1 | 1.37% | 
| Li Fei | 4 | 0.21% | 1 | 1.37% | 
| Chuansheng Liu | 3 | 0.15% | 1 | 1.37% | 
| Ingo Molnar | 3 | 0.15% | 1 | 1.37% | 
| Konrad Rzeszutek Wilk | 3 | 0.15% | 1 | 1.37% | 
| Paul Gortmaker | 3 | 0.15% | 1 | 1.37% | 
| Kevin Hilman | 1 | 0.05% | 1 | 1.37% | 
| Thomas Renninger | 1 | 0.05% | 1 | 1.37% | 
| Jean Pihet | 1 | 0.05% | 1 | 1.37% | 
| Sudeep Holla | 1 | 0.05% | 1 | 1.37% | 
| Total | 1947 | 100.00% | 73 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.