cregit-Linux how code gets into the kernel

Release 4.14 drivers/cpufreq/cpufreq_conservative.c

Directory: drivers/cpufreq
/*
 *  drivers/cpufreq/cpufreq_conservative.c
 *
 *  Copyright (C)  2001 Russell King
 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *                      Jun Nakajima <jun.nakajima@intel.com>
 *            (C)  2009 Alexander Clouter <alex@digriz.org.uk>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/slab.h>
#include "cpufreq_governor.h"


struct cs_policy_dbs_info {
	
struct policy_dbs_info policy_dbs;
	
unsigned int down_skip;
	
unsigned int requested_freq;
};


static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs) { return container_of(policy_dbs, struct cs_policy_dbs_info, policy_dbs); }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki25100.00%1100.00%
Total25100.00%1100.00%

struct cs_dbs_tuners { unsigned int down_threshold; unsigned int freq_step; }; /* Conservative governor macros */ #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_DOWN_THRESHOLD (20) #define DEF_FREQUENCY_STEP (5) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (10)
static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners, struct cpufreq_policy *policy) { unsigned int freq_step = (cs_tuners->freq_step * policy->max) / 100; /* max freq cannot be less than 100. But who knows... */ if (unlikely(freq_step == 0)) freq_step = DEF_FREQUENCY_STEP; return freq_step; }

Contributors

PersonTokensPropCommitsCommitProp
Stratos Karafotis4690.20%150.00%
Viresh Kumar59.80%150.00%
Total51100.00%2100.00%

/* * Every sampling_rate, we check, if current idle time is less than 20% * (default), then we try to increase frequency. Every sampling_rate * * sampling_down_factor, we check, if current idle time is more than 80% * (default), then we try to decrease frequency * * Frequency updates happen at minimum steps of 5% (default) of maximum * frequency */
static unsigned int cs_dbs_update(struct cpufreq_policy *policy) { struct policy_dbs_info *policy_dbs = policy->governor_data; struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); unsigned int requested_freq = dbs_info->requested_freq; struct dbs_data *dbs_data = policy_dbs->dbs_data; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int load = dbs_update(policy); unsigned int freq_step; /* * break out if we 'cannot' reduce the speed as the user might * want freq_step to be zero */ if (cs_tuners->freq_step == 0) goto out; /* * If requested_freq is out of range, it is likely that the limits * changed in the meantime, so fall back to current frequency in that * case. */ if (requested_freq > policy->max || requested_freq < policy->min) requested_freq = policy->cur; freq_step = get_freq_step(cs_tuners, policy); /* * Decrease requested_freq one freq_step for each idle period that * we didn't update the frequency. */ if (policy_dbs->idle_periods < UINT_MAX) { unsigned int freq_steps = policy_dbs->idle_periods * freq_step; if (requested_freq > freq_steps) requested_freq -= freq_steps; else requested_freq = policy->min; policy_dbs->idle_periods = UINT_MAX; } /* Check for frequency increase */ if (load > dbs_data->up_threshold) { dbs_info->down_skip = 0; /* if we are already at full speed then break out early */ if (requested_freq == policy->max) goto out; requested_freq += freq_step; if (requested_freq > policy->max) requested_freq = policy->max; __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H); dbs_info->requested_freq = requested_freq; goto out; } /* if sampling_down_factor is active break out early */ if (++dbs_info->down_skip < dbs_data->sampling_down_factor) goto out; dbs_info->down_skip = 0; /* Check for frequency decrease */ if (load < cs_tuners->down_threshold) { /* * if we cannot reduce the frequency anymore, break out early */ if (requested_freq == policy->min) goto out; if (requested_freq > freq_step) requested_freq -= freq_step; else requested_freq = policy->min; __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L); dbs_info->requested_freq = requested_freq; } out: return dbs_data->sampling_rate; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki10534.54%525.00%
Viresh Kumar7725.33%735.00%
Stratos Karafotis7625.00%315.00%
Xiaoguang Chen154.93%15.00%
Elias Oltmanns123.95%15.00%
Alexander Clouter103.29%15.00%
Namhyung Kim92.96%210.00%
Total304100.00%20100.00%

/************************** sysfs interface ************************/
static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; dbs_data->sampling_down_factor = input; return count; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Jones4560.00%116.67%
Rafael J. Wysocki1216.00%116.67%
Viresh Kumar1013.33%350.00%
Alexander Clouter810.67%116.67%
Total75100.00%6100.00%


static ssize_t store_up_threshold(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold) return -EINVAL; dbs_data->up_threshold = input; return count; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Jones6069.77%116.67%
Viresh Kumar1315.12%350.00%
Rafael J. Wysocki1213.95%116.67%
Alexander Clouter11.16%116.67%
Total86100.00%6100.00%


static ssize_t store_down_threshold(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input; int ret; ret = sscanf(buf, "%u", &input); /* cannot be lower than 1 otherwise freq will not fall */ if (ret != 1 || input < 1 || input > 100 || input >= dbs_data->up_threshold) return -EINVAL; cs_tuners->down_threshold = input; return count; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Jones6065.93%112.50%
Viresh Kumar1314.29%337.50%
Rafael J. Wysocki1213.19%112.50%
Alexander Clouter44.40%225.00%
Tomasz Wilczyński22.20%112.50%
Total91100.00%8100.00%


static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; if (input > 1) input = 1; if (input == dbs_data->ignore_nice_load) /* nothing to do */ return count; dbs_data->ignore_nice_load = input; /* we need to re-evaluate prev_cpu_idle */ gov_update_cpu_data(dbs_data); return count; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Jones7174.74%112.50%
Rafael J. Wysocki1313.68%225.00%
Viresh Kumar88.42%337.50%
Alexander Clouter22.11%112.50%
Thomas Renninger11.05%112.50%
Total95100.00%8100.00%


static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; if (input > 100) input = 100; /* * no need to test here if freq_step is zero as the user might actually * want this, they would be crazy though :) */ cs_tuners->freq_step = input; return count; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Jones6271.26%120.00%
Viresh Kumar1213.79%240.00%
Rafael J. Wysocki1213.79%120.00%
Thomas Renninger11.15%120.00%
Total87100.00%5100.00%

gov_show_one_common(sampling_rate); gov_show_one_common(sampling_down_factor); gov_show_one_common(up_threshold); gov_show_one_common(ignore_nice_load); gov_show_one(cs, down_threshold); gov_show_one(cs, freq_step); gov_attr_rw(sampling_rate); gov_attr_rw(sampling_down_factor); gov_attr_rw(up_threshold); gov_attr_rw(ignore_nice_load); gov_attr_rw(down_threshold); gov_attr_rw(freq_step); static struct attribute *cs_attributes[] = { &sampling_rate.attr, &sampling_down_factor.attr, &up_threshold.attr, &down_threshold.attr, &ignore_nice_load.attr, &freq_step.attr, NULL }; /************************** sysfs end ************************/
static struct policy_dbs_info *cs_alloc(void) { struct cs_policy_dbs_info *dbs_info; dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL); return dbs_info ? &dbs_info->policy_dbs : NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki38100.00%1100.00%
Total38100.00%1100.00%


static void cs_free(struct policy_dbs_info *policy_dbs) { kfree(to_dbs_info(policy_dbs)); }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki19100.00%1100.00%
Total19100.00%1100.00%


static int cs_init(struct dbs_data *dbs_data) { struct cs_dbs_tuners *tuners; tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); if (!tuners) return -ENOMEM; tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; tuners->freq_step = DEF_FREQUENCY_STEP; dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; dbs_data->ignore_nice_load = 0; dbs_data->tuners = tuners; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Viresh Kumar77100.00%4100.00%
Total77100.00%4100.00%


static void cs_exit(struct dbs_data *dbs_data) { kfree(dbs_data->tuners); }

Contributors

PersonTokensPropCommitsCommitProp
Viresh Kumar18100.00%1100.00%
Total18100.00%1100.00%


static void cs_start(struct cpufreq_policy *policy) { struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data); dbs_info->down_skip = 0; dbs_info->requested_freq = policy->cur; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki37100.00%3100.00%
Total37100.00%3100.00%

static struct dbs_governor cs_governor = { .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"), .kobj_type = { .default_attrs = cs_attributes }, .gov_dbs_update = cs_dbs_update, .alloc = cs_alloc, .free = cs_free, .init = cs_init, .exit = cs_exit, .start = cs_start, }; #define CPU_FREQ_GOV_CONSERVATIVE (&cs_governor.gov)
static int __init cpufreq_gov_dbs_init(void) { return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE); }

Contributors

PersonTokensPropCommitsCommitProp
Dave Jones1493.33%150.00%
Rafael J. Wysocki16.67%150.00%
Total15100.00%2100.00%


static void __exit cpufreq_gov_dbs_exit(void) { cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE); }

Contributors

PersonTokensPropCommitsCommitProp
Dave Jones1392.86%150.00%
Rafael J. Wysocki17.14%150.00%
Total14100.00%2100.00%

MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " "Low Latency Frequency Transition capable processors " "optimised for use in a battery environment"); MODULE_LICENSE("GPL"); #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
struct cpufreq_governor *cpufreq_default_governor(void) { return CPU_FREQ_GOV_CONSERVATIVE; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki12100.00%2100.00%
Total12100.00%2100.00%

fs_initcall(cpufreq_gov_dbs_init); #else module_init(cpufreq_gov_dbs_init); #endif module_exit(cpufreq_gov_dbs_exit);

Overall Contributors

PersonTokensPropCommitsCommitProp
Dave Jones38929.74%12.38%
Rafael J. Wysocki35827.37%1330.95%
Viresh Kumar34826.61%1126.19%
Stratos Karafotis1289.79%614.29%
Alexander Clouter322.45%49.52%
Xiaoguang Chen151.15%12.38%
Elias Oltmanns120.92%12.38%
Johannes Weiner120.92%12.38%
Namhyung Kim90.69%24.76%
Thomas Renninger30.23%12.38%
Tomasz Wilczyński20.15%12.38%
Total1308100.00%42100.00%
Directory: drivers/cpufreq
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.