cregit-Linux how code gets into the kernel

Release 4.17 drivers/cpuidle/governors/menu.c

/*
 * menu.c - the menu idle governor
 *
 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
 * Copyright (C) 2009 Intel Corporation
 * Author:
 *        Arjan van de Ven <arjan@linux.intel.com>
 *
 * This code is licenced under the GPL version 2 as described
 * in the COPYING file that acompanies the Linux Kernel.
 */

#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
#include <linux/time.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/sched/stat.h>
#include <linux/math64.h>
#include <linux/cpu.h>

/*
 * Please note when changing the tuning values:
 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
 * a scaling operation multiplication may overflow on 32 bit platforms.
 * In that case, #define RESOLUTION as ULL to get 64 bit result:
 * #define RESOLUTION 1024ULL
 *
 * The default values do not overflow.
 */

#define BUCKETS 12

#define INTERVAL_SHIFT 3

#define INTERVALS (1UL << INTERVAL_SHIFT)

#define RESOLUTION 1024

#define DECAY 8

#define MAX_INTERESTING 50000


/*
 * Concepts and ideas behind the menu governor
 *
 * For the menu governor, there are 3 decision factors for picking a C
 * state:
 * 1) Energy break even point
 * 2) Performance impact
 * 3) Latency tolerance (from pmqos infrastructure)
 * These these three factors are treated independently.
 *
 * Energy break even point
 * -----------------------
 * C state entry and exit have an energy cost, and a certain amount of time in
 * the  C state is required to actually break even on this cost. CPUIDLE
 * provides us this duration in the "target_residency" field. So all that we
 * need is a good prediction of how long we'll be idle. Like the traditional
 * menu governor, we start with the actual known "next timer event" time.
 *
 * Since there are other source of wakeups (interrupts for example) than
 * the next timer event, this estimation is rather optimistic. To get a
 * more realistic estimate, a correction factor is applied to the estimate,
 * that is based on historic behavior. For example, if in the past the actual
 * duration always was 50% of the next timer tick, the correction factor will
 * be 0.5.
 *
 * menu uses a running average for this correction factor, however it uses a
 * set of factors, not just a single factor. This stems from the realization
 * that the ratio is dependent on the order of magnitude of the expected
 * duration; if we expect 500 milliseconds of idle time the likelihood of
 * getting an interrupt very early is much higher than if we expect 50 micro
 * seconds of idle time. A second independent factor that has big impact on
 * the actual factor is if there is (disk) IO outstanding or not.
 * (as a special twist, we consider every sleep longer than 50 milliseconds
 * as perfect; there are no power gains for sleeping longer than this)
 *
 * For these two reasons we keep an array of 12 independent factors, that gets
 * indexed based on the magnitude of the expected duration as well as the
 * "is IO outstanding" property.
 *
 * Repeatable-interval-detector
 * ----------------------------
 * There are some cases where "next timer" is a completely unusable predictor:
 * Those cases where the interval is fixed, for example due to hardware
 * interrupt mitigation, but also due to fixed transfer rate devices such as
 * mice.
 * For this, we use a different predictor: We track the duration of the last 8
 * intervals and if the stand deviation of these 8 intervals is below a
 * threshold value, we use the average of these intervals as prediction.
 *
 * Limiting Performance Impact
 * ---------------------------
 * C states, especially those with large exit latencies, can have a real
 * noticeable impact on workloads, which is not acceptable for most sysadmins,
 * and in addition, less performance has a power price of its own.
 *
 * As a general rule of thumb, menu assumes that the following heuristic
 * holds:
 *     The busier the system, the less impact of C states is acceptable
 *
 * This rule-of-thumb is implemented using a performance-multiplier:
 * If the exit latency times the performance multiplier is longer than
 * the predicted duration, the C state is not considered a candidate
 * for selection due to a too high performance impact. So the higher
 * this multiplier is, the longer we need to be idle to pick a deep C
 * state, and thus the less likely a busy CPU will hit such a deep
 * C state.
 *
 * Two factors are used in determing this multiplier:
 * a value of 10 is added for each point of "per cpu load average" we have.
 * a value of 5 points is added for each process that is waiting for
 * IO on this CPU.
 * (these values are experimentally determined)
 *
 * The load average factor gives a longer term (few seconds) input to the
 * decision, while the iowait value gives a cpu local instantanious input.
 * The iowait factor may look low, but realize that this is also already
 * represented in the system load average.
 *
 */


struct menu_device {
	
int		last_state_idx;
	
int             needs_update;
	
int             tick_wakeup;

	
unsigned int	next_timer_us;
	
unsigned int	predicted_us;
	
unsigned int	bucket;
	
unsigned int	correction_factor[BUCKETS];
	
unsigned int	intervals[INTERVALS];
	
int		interval_ptr;
};



#define LOAD_INT(x) ((x) >> FSHIFT)

#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)


static inline int get_loadavg(unsigned long load) { return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10; }

Contributors

PersonTokensPropCommitsCommitProp
Arjan van de Ven2076.92%150.00%
Mel Gorman623.08%150.00%
Total26100.00%2100.00%


static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters) { int bucket = 0; /* * We keep two groups of stats; one with no * IO pending, one without. * This allows us to calculate * E(duration)|iowait */ if (nr_iowaiters) bucket = BUCKETS/2; if (duration < 10) return bucket; if (duration < 100) return bucket + 1; if (duration < 1000) return bucket + 2; if (duration < 10000) return bucket + 3; if (duration < 100000) return bucket + 4; return bucket + 5; }

Contributors

PersonTokensPropCommitsCommitProp
Arjan van de Ven8494.38%150.00%
Mel Gorman55.62%150.00%
Total89100.00%2100.00%

/* * Return a multiplier for the exit latency that is intended * to take performance requirements into account. * The more performance critical we estimate the system * to be, the higher this multiplier, and thus the higher * the barrier to go to an expensive C state. */
static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load) { int mult = 1; /* for higher loadavg, we are more reluctant */ mult += 2 * get_loadavg(load); /* for IO wait tasks (per cpu!) we add 5x each */ mult += 10 * nr_iowaiters; return mult; }

Contributors

PersonTokensPropCommitsCommitProp
Arjan van de Ven2972.50%133.33%
Mel Gorman1127.50%266.67%
Total40100.00%3100.00%

static DEFINE_PER_CPU(struct menu_device, menu_devices); static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); /* * Try detecting repeating patterns by keeping track of the last 8 * intervals, and checking if the standard deviation of that set * of points is below a threshold. If it is... then use the * average of these 8 points as the estimated value. */
static unsigned int get_typical_interval(struct menu_device *data) { int i, divisor; unsigned int max, thresh, avg; uint64_t sum, variance; thresh = UINT_MAX; /* Discard outliers above this value */ again: /* First calculate the average of past intervals */ max = 0; sum = 0; divisor = 0; for (i = 0; i < INTERVALS; i++) { unsigned int value = data->intervals[i]; if (value <= thresh) { sum += value; divisor++; if (value > max) max = value; } } if (divisor == INTERVALS) avg = sum >> INTERVAL_SHIFT; else avg = div_u64(sum, divisor); /* Then try to determine variance */ variance = 0; for (i = 0; i < INTERVALS; i++) { unsigned int value = data->intervals[i]; if (value <= thresh) { int64_t diff = (int64_t)value - avg; variance += diff * diff; } } if (divisor == INTERVALS) variance >>= INTERVAL_SHIFT; else do_div(variance, divisor); /* * The typical interval is obtained when standard deviation is * small (stddev <= 20 us, variance <= 400 us^2) or standard * deviation is small compared to the average interval (avg > * 6*stddev, avg^2 > 36*variance). The average is smaller than * UINT_MAX aka U32_MAX, so computing its square does not * overflow a u64. We simply reject this candidate average if * the standard deviation is greater than 715 s (which is * rather unlikely). * * Use this result only if there is no timer to wake us up sooner. */ if (likely(variance <= U64_MAX/36)) { if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3)) || variance <= 400) { return avg; } } /* * If we have outliers to the upside in our distribution, discard * those by setting the threshold to exclude these outliers, then * calculate the average and standard deviation again. Once we get * down to the bottom 3/4 of our samples, stop excluding samples. * * This can deal with workloads that have long pauses interspersed * with sporadic activity with a bunch of short pauses. */ if ((divisor * 4) <= INTERVALS * 3) return UINT_MAX; thresh = max - 1; goto again; }

Contributors

PersonTokensPropCommitsCommitProp
Youquan Song11040.59%218.18%
Arjan van de Ven6523.99%19.09%
Rasmus Villemoes3512.92%218.18%
Tuukka Tikkanen3412.55%436.36%
Mel Gorman207.38%19.09%
Rik Van Riel72.58%19.09%
Total271100.00%11100.00%

/** * menu_select - selects the next idle state to enter * @drv: cpuidle driver containing state data * @dev: the CPU * @stop_tick: indication on whether or not to stop the tick */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, bool *stop_tick) { struct menu_device *data = this_cpu_ptr(&menu_devices); struct device *device = get_cpu_device(dev->cpu); int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); int i; int first_idx; int idx; unsigned int interactivity_req; unsigned int expected_interval; unsigned long nr_iowaiters, cpu_load; int resume_latency = dev_pm_qos_raw_read_value(device); ktime_t delta_next; if (data->needs_update) { menu_update(drv, dev); data->needs_update = 0; } if (resume_latency < latency_req && resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) latency_req = resume_latency; /* Special case when user has set very strict latency requirement */ if (unlikely(latency_req == 0)) { *stop_tick = false; return 0; } /* determine the expected residency time, round up */ data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next)); get_iowait_load(&nr_iowaiters, &cpu_load); data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); /* * Force the result of multiplication to be 64 bits even if both * operands are 32 bits. * Make sure to round up for half microseconds. */ data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us * data->correction_factor[data->bucket], RESOLUTION * DECAY); expected_interval = get_typical_interval(data); expected_interval = min(expected_interval, data->next_timer_us); first_idx = 0; if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) { struct cpuidle_state *s = &drv->states[1]; unsigned int polling_threshold; /* * We want to default to C1 (hlt), not to busy polling * unless the timer is happening really really soon, or * C1's exit latency exceeds the user configured limit. */ polling_threshold = max_t(unsigned int, 20, s->target_residency); if (data->next_timer_us > polling_threshold && latency_req > s->exit_latency && !s->disabled && !dev->states_usage[1].disable) first_idx = 1; } /* * Use the lowest expected idle interval to pick the idle state. */ data->predicted_us = min(data->predicted_us, expected_interval); if (tick_nohz_tick_stopped()) { /* * If the tick is already stopped, the cost of possible short * idle duration misprediction is much higher, because the CPU * may be stuck in a shallow idle state for a long time as a * result of it. In that case say we might mispredict and try * to force the CPU into a state for which we would have stopped * the tick, unless a timer is going to expire really soon * anyway. */ if (data->predicted_us < TICK_USEC) data->predicted_us = min_t(unsigned int, TICK_USEC, ktime_to_us(delta_next)); } else { /* * Use the performance multiplier and the user-configurable * latency_req to determine the maximum exit latency. */ interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); if (latency_req > interactivity_req) latency_req = interactivity_req; } expected_interval = data->predicted_us; /* * Find the idle state with the lowest power while satisfying * our constraints. */ idx = -1; for (i = first_idx; i < drv->state_count; i++) { struct cpuidle_state *s = &drv->states[i]; struct cpuidle_state_usage *su = &dev->states_usage[i]; if (s->disabled || su->disable) continue; if (idx == -1) idx = i; /* first enabled state */ if (s->target_residency > data->predicted_us) break; if (s->exit_latency > latency_req) { /* * If we break out of the loop for latency reasons, use * the target residency of the selected state as the * expected idle duration so that the tick is retained * as long as that target residency is low enough. */ expected_interval = drv->states[idx].target_residency; break; } idx = i; } if (idx == -1) idx = 0; /* No states enabled. Must use 0. */ /* * Don't stop the tick if the selected state is a polling one or if the * expected idle duration is shorter than the tick period length. */ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || expected_interval < TICK_USEC) { unsigned int delta_next_us = ktime_to_us(delta_next); *stop_tick = false; if (!tick_nohz_tick_stopped() && idx > 0 && drv->states[idx].target_residency > delta_next_us) { /* * The tick is not going to be stopped and the target * residency of the state to be returned is not within * the time until the next timer event including the * tick, so try to correct that. */ for (i = idx - 1; i >= 0; i--) { if (drv->states[i].disabled || dev->states_usage[i].disable) continue; idx = i; if (drv->states[i].target_residency <= delta_next_us) break; } } } data->last_state_idx = idx; return data->last_state_idx; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki25340.29%926.47%
Len Brown7511.94%12.94%
Rik Van Riel609.55%12.94%
Nicholas Piggin436.85%12.94%
Arjan van de Ven386.05%25.88%
Alex Shi314.94%25.88%
Liu ShuoX294.62%25.88%
Venkatesh Pallipadi284.46%25.88%
Mel Gorman223.50%38.82%
Corrado Zoccolo193.03%12.94%
Tuukka Tikkanen121.91%38.82%
Deepthi Dharwar91.43%12.94%
Ai Li20.32%12.94%
Christoph Lameter20.32%12.94%
Tero Kristo20.32%12.94%
Javi Merino10.16%12.94%
Youquan Song10.16%12.94%
Mark Gross10.16%12.94%
Total628100.00%34100.00%

/** * menu_reflect - records that data structures need update * @dev: the CPU * @index: the index of actual entered state * * NOTE: it's important to be fast here because this operation will add to * the overall exit latency. */
static void menu_reflect(struct cpuidle_device *dev, int index) { struct menu_device *data = this_cpu_ptr(&menu_devices); data->last_state_idx = index; data->needs_update = 1; data->tick_wakeup = tick_nohz_idle_got_tick(); }

Contributors

PersonTokensPropCommitsCommitProp
Len Brown1943.18%120.00%
Deepthi Dharwar920.45%120.00%
Rafael J. Wysocki715.91%120.00%
Corrado Zoccolo715.91%120.00%
Christoph Lameter24.55%120.00%
Total44100.00%5100.00%

/** * menu_update - attempts to guess what happened after entry * @drv: cpuidle driver containing state data * @dev: the CPU */
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct menu_device *data = this_cpu_ptr(&menu_devices); int last_idx = data->last_state_idx; struct cpuidle_state *target = &drv->states[last_idx]; unsigned int measured_us; unsigned int new_factor; /* * Try to figure out how much time passed between entry to low * power state and occurrence of the wakeup event. * * If the entered idle state didn't support residency measurements, * we use them anyway if they are short, and if long, * truncate to the whole expected time. * * Any measured amount of time will include the exit latency. * Since we are interested in when the wakeup begun, not when it * was completed, we must subtract the exit latency. However, if * the measured amount of time is less than the exit latency, * assume the state was never reached and the exit latency is 0. */ if (data->tick_wakeup && data->next_timer_us > TICK_USEC) { /* * The nohz code said that there wouldn't be any events within * the tick boundary (if the tick was stopped), but the idle * duration predictor had a differing opinion. Since the CPU * was woken up by a tick (that wasn't stopped after all), the * predictor was not quite right, so assume that the CPU could * have been idle long (but not forever) to help the idle * duration predictor do a better job next time. */ measured_us = 9 * MAX_INTERESTING / 10; } else { /* measured value */ measured_us = cpuidle_get_last_residency(dev); /* Deduct exit latency */ if (measured_us > 2 * target->exit_latency) measured_us -= target->exit_latency; else measured_us /= 2; } /* Make sure our coefficients do not exceed unity */ if (measured_us > data->next_timer_us) measured_us = data->next_timer_us; /* Update our correction ratio */ new_factor = data->correction_factor[data->bucket]; new_factor -= new_factor / DECAY; if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING) new_factor += RESOLUTION * measured_us / data->next_timer_us; else /* * we were idle so long that we count it as a perfect * prediction */ new_factor += RESOLUTION; /* * We don't want 0 as factor; we always want at least * a tiny bit of estimated time. Fortunately, due to rounding, * new_factor will stay nonzero regardless of measured_us values * and the compiler can eliminate this test as long as DECAY > 1. */ if (DECAY == 1 && unlikely(new_factor == 0)) new_factor = 1; data->correction_factor[data->bucket] = new_factor; /* update the repeating-pattern data */ data->intervals[data->interval_ptr++] = measured_us; if (data->interval_ptr >= INTERVALS) data->interval_ptr = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Arjan van de Ven7331.88%213.33%
Len Brown4620.09%213.33%
Tuukka Tikkanen4218.34%533.33%
Rafael J. Wysocki2611.35%16.67%
Corrado Zoccolo198.30%16.67%
Venkatesh Pallipadi83.49%16.67%
Rik Van Riel73.06%16.67%
Deepthi Dharwar62.62%16.67%
Christoph Lameter20.87%16.67%
Total229100.00%15100.00%

/** * menu_enable_device - scans a CPU's states and does setup * @drv: cpuidle driver * @dev: the CPU */
static int menu_enable_device(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct menu_device *data = &per_cpu(menu_devices, dev->cpu); int i; memset(data, 0, sizeof(struct menu_device)); /* * if the correction factor is 0 (eg first time init or cpu hotplug * etc), we actually want to start out with a unity factor. */ for(i = 0; i < BUCKETS; i++) data->correction_factor[i] = RESOLUTION * DECAY; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Len Brown4256.00%133.33%
Chander Kashyap2837.33%133.33%
Deepthi Dharwar56.67%133.33%
Total75100.00%3100.00%

static struct cpuidle_governor menu_governor = { .name = "menu", .rating = 20, .enable = menu_enable_device, .select = menu_select, .reflect = menu_reflect, }; /** * init_menu - initializes the governor */
static int __init init_menu(void) { return cpuidle_register_governor(&menu_governor); }

Contributors

PersonTokensPropCommitsCommitProp
Len Brown16100.00%1100.00%
Total16100.00%1100.00%

postcore_initcall(init_menu);

Overall Contributors

PersonTokensPropCommitsCommitProp
Arjan van de Ven35822.29%23.39%
Rafael J. Wysocki29018.06%915.25%
Len Brown28017.43%23.39%
Youquan Song1116.91%23.39%
Tuukka Tikkanen965.98%1220.34%
Rik Van Riel744.61%23.39%
Mel Gorman694.30%46.78%
Corrado Zoccolo583.61%11.69%
Nicholas Piggin432.68%11.69%
Venkatesh Pallipadi382.37%35.08%
Deepthi Dharwar372.30%23.39%
Rasmus Villemoes352.18%23.39%
Alex Shi342.12%23.39%
Liu ShuoX291.81%23.39%
Chander Kashyap281.74%11.69%
Ingo Molnar60.37%23.39%
Christoph Lameter60.37%11.69%
Stephen Hemminger30.19%11.69%
Ai Li20.12%11.69%
Tero Kristo20.12%11.69%
Richard Kennedy20.12%11.69%
Daniel Lezcano10.06%11.69%
Javi Merino10.06%11.69%
Jean Pihet10.06%11.69%
Mark Gross10.06%11.69%
Lucas De Marchi10.06%11.69%
Total1606100.00%59100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.