Contributors: 17
Author Tokens Token Proportion Commits Commit Proportion
Len Brown 499 64.30% 3 11.11%
Venkatesh Pallipadi 81 10.44% 2 7.41%
Rafael J. Wysocki 69 8.89% 4 14.81%
Deepthi Dharwar 43 5.54% 2 7.41%
Carsten Emde 25 3.22% 1 3.70%
Marcelo Tosatti 17 2.19% 1 3.70%
Jean Delvare 15 1.93% 1 3.70%
Linus Torvalds (pre-git) 5 0.64% 3 11.11%
James Ketrenos 5 0.64% 1 3.70%
Andy Grover 4 0.52% 1 3.70%
Rusty Russell 4 0.52% 2 7.41%
Ramesh Thomas 3 0.39% 1 3.70%
Christoph Lameter 2 0.26% 1 3.70%
Daniel Lezcano 1 0.13% 1 3.70%
Fieah Lim 1 0.13% 1 3.70%
Jeff Johnson 1 0.13% 1 3.70%
Linus Torvalds 1 0.13% 1 3.70%
Total 776 27


/*
 * ladder.c - the residency ladder algorithm
 *
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
 *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
 *
 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *               Shaohua Li <shaohua.li@intel.com>
 *               Adam Belay <abelay@novell.com>
 *
 * This code is licenced under the GPL.
 */

#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/jiffies.h>
#include <linux/tick.h>

#include <asm/io.h>
#include <linux/uaccess.h>

#define PROMOTION_COUNT 4
#define DEMOTION_COUNT 1

struct ladder_device_state {
	struct {
		u32 promotion_count;
		u32 demotion_count;
		u64 promotion_time_ns;
		u64 demotion_time_ns;
	} threshold;
	struct {
		int promotion_count;
		int demotion_count;
	} stats;
};

struct ladder_device {
	struct ladder_device_state states[CPUIDLE_STATE_MAX];
};

static DEFINE_PER_CPU(struct ladder_device, ladder_devices);

/**
 * ladder_do_selection - prepares private data for a state change
 * @dev: the CPU
 * @ldev: the ladder device
 * @old_idx: the current state index
 * @new_idx: the new target state index
 */
static inline void ladder_do_selection(struct cpuidle_device *dev,
				       struct ladder_device *ldev,
				       int old_idx, int new_idx)
{
	ldev->states[old_idx].stats.promotion_count = 0;
	ldev->states[old_idx].stats.demotion_count = 0;
	dev->last_state_idx = new_idx;
}

/**
 * ladder_select_state - selects the next state to enter
 * @drv: cpuidle driver
 * @dev: the CPU
 * @dummy: not used
 */
static int ladder_select_state(struct cpuidle_driver *drv,
			       struct cpuidle_device *dev, bool *dummy)
{
	struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
	struct ladder_device_state *last_state;
	int last_idx = dev->last_state_idx;
	int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
	s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
	s64 last_residency;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0)) {
		ladder_do_selection(dev, ldev, last_idx, 0);
		return 0;
	}

	last_state = &ldev->states[last_idx];

	last_residency = dev->last_residency_ns - drv->states[last_idx].exit_latency_ns;

	/* consider promotion */
	if (last_idx < drv->state_count - 1 &&
	    !dev->states_usage[last_idx + 1].disable &&
	    last_residency > last_state->threshold.promotion_time_ns &&
	    drv->states[last_idx + 1].exit_latency_ns <= latency_req) {
		last_state->stats.promotion_count++;
		last_state->stats.demotion_count = 0;
		if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
			ladder_do_selection(dev, ldev, last_idx, last_idx + 1);
			return last_idx + 1;
		}
	}

	/* consider demotion */
	if (last_idx > first_idx &&
	    (dev->states_usage[last_idx].disable ||
	    drv->states[last_idx].exit_latency_ns > latency_req)) {
		int i;

		for (i = last_idx - 1; i > first_idx; i--) {
			if (drv->states[i].exit_latency_ns <= latency_req)
				break;
		}
		ladder_do_selection(dev, ldev, last_idx, i);
		return i;
	}

	if (last_idx > first_idx &&
	    last_residency < last_state->threshold.demotion_time_ns) {
		last_state->stats.demotion_count++;
		last_state->stats.promotion_count = 0;
		if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
			ladder_do_selection(dev, ldev, last_idx, last_idx - 1);
			return last_idx - 1;
		}
	}

	/* otherwise remain at the current state */
	return last_idx;
}

/**
 * ladder_enable_device - setup for the governor
 * @drv: cpuidle driver
 * @dev: the CPU
 */
static int ladder_enable_device(struct cpuidle_driver *drv,
				struct cpuidle_device *dev)
{
	int i;
	int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
	struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
	struct ladder_device_state *lstate;
	struct cpuidle_state *state;

	dev->last_state_idx = first_idx;

	for (i = first_idx; i < drv->state_count; i++) {
		state = &drv->states[i];
		lstate = &ldev->states[i];

		lstate->stats.promotion_count = 0;
		lstate->stats.demotion_count = 0;

		lstate->threshold.promotion_count = PROMOTION_COUNT;
		lstate->threshold.demotion_count = DEMOTION_COUNT;

		if (i < drv->state_count - 1)
			lstate->threshold.promotion_time_ns = state->exit_latency_ns;
		if (i > first_idx)
			lstate->threshold.demotion_time_ns = state->exit_latency_ns;
	}

	return 0;
}

/**
 * ladder_reflect - update the correct last_state_idx
 * @dev: the CPU
 * @index: the index of actual state entered
 */
static void ladder_reflect(struct cpuidle_device *dev, int index)
{
	if (index > 0)
		dev->last_state_idx = index;
}

static struct cpuidle_governor ladder_governor = {
	.name =		"ladder",
	.rating =	10,
	.enable =	ladder_enable_device,
	.select =	ladder_select_state,
	.reflect =	ladder_reflect,
};

/**
 * init_ladder - initializes the governor
 */
static int __init init_ladder(void)
{
	/*
	 * When NO_HZ is disabled, or when booting with nohz=off, the ladder
	 * governor is better so give it a higher rating than the menu
	 * governor.
	 */
	if (!tick_nohz_enabled)
		ladder_governor.rating = 25;

	return cpuidle_register_governor(&ladder_governor);
}

postcore_initcall(init_ladder);