Release 4.15 kernel/power/qos.c
/*
* This module exposes the interface to kernel space for specifying
* QoS dependencies. It provides infrastructure for registration of:
*
* Dependents on a QoS value : register requests
* Watchers of QoS value : get notified when target QoS value changes
*
* This QoS design is best effort based. Dependents register their QoS needs.
* Watchers register to keep track of the current QoS needs of the system.
*
* There are 3 basic classes of QoS parameter: latency, timeout, throughput
* each have defined units:
* latency: usec
* timeout: usec <-- currently not used.
* throughput: kbs (kilo byte / sec)
*
* There are lists of pm_qos_objects each one wrapping requests, notifiers
*
* User mode requests on a QOS parameter register themselves to the
* subsystem by opening the device node /dev/... and writing there request to
* the node. As long as the process holds a file handle open to the node the
* client continues to be accounted for. Upon file release the usermode
* request is removed and a new qos target is computed. This way when the
* request that the application has is cleaned up when closes the file
* pointer or exits the pm_qos_object will get an opportunity to clean up.
*
* Mark Gross <mgross@linux.intel.com>
*/
/*#define DEBUG*/
#include <linux/pm_qos.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <trace/events/power.h>
/*
* locking rule: all changes to constraints or notifiers lists
* or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
* held, taken with _irqsave. One lock to rule them all
*/
struct pm_qos_object {
struct pm_qos_constraints *constraints;
struct miscdevice pm_qos_power_miscdev;
char *name;
};
static DEFINE_SPINLOCK(pm_qos_lock);
static struct pm_qos_object null_pm_qos;
static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_constraints cpu_dma_constraints = {
.list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &cpu_dma_lat_notifier,
};
static struct pm_qos_object cpu_dma_pm_qos = {
.constraints = &cpu_dma_constraints,
.name = "cpu_dma_latency",
};
static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_constraints network_lat_constraints = {
.list = PLIST_HEAD_INIT(network_lat_constraints.list),
.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &network_lat_notifier,
};
static struct pm_qos_object network_lat_pm_qos = {
.constraints = &network_lat_constraints,
.name = "network_latency",
};
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_constraints network_tput_constraints = {
.list = PLIST_HEAD_INIT(network_tput_constraints.list),
.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.type = PM_QOS_MAX,
.notifiers = &network_throughput_notifier,
};
static struct pm_qos_object network_throughput_pm_qos = {
.constraints = &network_tput_constraints,
.name = "network_throughput",
};
static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier);
static struct pm_qos_constraints memory_bw_constraints = {
.list = PLIST_HEAD_INIT(memory_bw_constraints.list),
.target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
.default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
.type = PM_QOS_SUM,
.notifiers = &memory_bandwidth_notifier,
};
static struct pm_qos_object memory_bandwidth_pm_qos = {
.constraints = &memory_bw_constraints,
.name = "memory_bandwidth",
};
static struct pm_qos_object *pm_qos_array[] = {
&null_pm_qos,
&cpu_dma_pm_qos,
&network_lat_pm_qos,
&network_throughput_pm_qos,
&memory_bandwidth_pm_qos,
};
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos);
static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos);
static int pm_qos_power_open(struct inode *inode, struct file *filp);
static int pm_qos_power_release(struct inode *inode, struct file *filp);
static const struct file_operations pm_qos_power_fops = {
.write = pm_qos_power_write,
.read = pm_qos_power_read,
.open = pm_qos_power_open,
.release = pm_qos_power_release,
.llseek = noop_llseek,
};
/* unlocked internal variant */
static inline int pm_qos_get_value(struct pm_qos_constraints *c)
{
struct plist_node *node;
int total_value = 0;
if (plist_head_empty(&c->list))
return c->no_constraint_value;
switch (c->type) {
case PM_QOS_MIN:
return plist_first(&c->list)->prio;
case PM_QOS_MAX:
return plist_last(&c->list)->prio;
case PM_QOS_SUM:
plist_for_each(node, &c->list)
total_value += node->prio;
return total_value;
default:
/* runtime check for not using enum */
BUG();
return PM_QOS_DEFAULT_VALUE;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Bottomley | 37 | 37.37% | 1 | 12.50% |
Tomeu Vizoso | 28 | 28.28% | 1 | 12.50% |
Jean Pihet | 15 | 15.15% | 2 | 25.00% |
Mark Gross | 13 | 13.13% | 1 | 12.50% |
Luis Gonzalez Fernandez | 3 | 3.03% | 1 | 12.50% |
Colin Cross | 2 | 2.02% | 1 | 12.50% |
Rafael J. Wysocki | 1 | 1.01% | 1 | 12.50% |
Total | 99 | 100.00% | 8 | 100.00% |
s32 pm_qos_read_value(struct pm_qos_constraints *c)
{
return c->target_value;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tim Chen | 11 | 73.33% | 1 | 33.33% |
Jean Pihet | 4 | 26.67% | 2 | 66.67% |
Total | 15 | 100.00% | 3 | 100.00% |
static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
{
c->target_value = value;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tim Chen | 17 | 80.95% | 1 | 33.33% |
Jean Pihet | 4 | 19.05% | 2 | 66.67% |
Total | 21 | 100.00% | 3 | 100.00% |
static inline int pm_qos_get_value(struct pm_qos_constraints *c);
static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
{
struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
struct pm_qos_constraints *c;
struct pm_qos_request *req;
char *type;
unsigned long flags;
int tot_reqs = 0;
int active_reqs = 0;
if (IS_ERR_OR_NULL(qos)) {
pr_err("%s: bad qos param!\n", __func__);
return -EINVAL;
}
c = qos->constraints;
if (IS_ERR_OR_NULL(c)) {
pr_err("%s: Bad constraints on qos?\n", __func__);
return -EINVAL;
}
/* Lock to ensure we have a snapshot */
spin_lock_irqsave(&pm_qos_lock, flags);
if (plist_head_empty(&c->list)) {
seq_puts(s, "Empty!\n");
goto out;
}
switch (c->type) {
case PM_QOS_MIN:
type = "Minimum";
break;
case PM_QOS_MAX:
type = "Maximum";
break;
case PM_QOS_SUM:
type = "Sum";
break;
default:
type = "Unknown";
}
plist_for_each_entry(req, &c->list, node) {
char *state = "Default";
if ((req->node).prio != c->default_value) {
active_reqs++;
state = "Active";
}
tot_reqs++;
seq_printf(s, "%d: %d: %s\n", tot_reqs,
(req->node).prio, state);
}
seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
type, pm_qos_get_value(c), active_reqs, tot_reqs);
out:
spin_unlock_irqrestore(&pm_qos_lock, flags);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nishanth Menon | 263 | 100.00% | 1 | 100.00% |
Total | 263 | 100.00% | 1 | 100.00% |
static int pm_qos_dbg_open(struct inode *inode, struct file *file)
{
return single_open(file, pm_qos_dbg_show_requests,
inode->i_private);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nishanth Menon | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static const struct file_operations pm_qos_debug_fops = {
.open = pm_qos_dbg_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* pm_qos_update_target - manages the constraints list and calls the notifiers
* if needed
* @c: constraints data struct
* @node: request to add to the list, to update or to remove
* @action: action to take on the constraints list
* @value: value of the request to add or update
*
* This function returns 1 if the aggregated constraint value has changed, 0
* otherwise.
*/
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
enum pm_qos_req_action action, int value)
{
unsigned long flags;
int prev_value, curr_value, new_value;
int ret;
spin_lock_irqsave(&pm_qos_lock, flags);
prev_value = pm_qos_get_value(c);
if (value == PM_QOS_DEFAULT_VALUE)
new_value = c->default_value;
else
new_value = value;
switch (action) {
case PM_QOS_REMOVE_REQ:
plist_del(node, &c->list);
break;
case PM_QOS_UPDATE_REQ:
/*
* to change the list, we atomically remove, reinit
* with new value and add, then see if the extremal
* changed
*/
plist_del(node, &c->list);
case PM_QOS_ADD_REQ:
plist_node_init(node, new_value);
plist_add(node, &c->list);
break;
default:
/* no action */
;
}
curr_value = pm_qos_get_value(c);
pm_qos_set_value(c, curr_value);
spin_unlock_irqrestore(&pm_qos_lock, flags);
trace_pm_qos_update_target(action, prev_value, curr_value);
if (prev_value != curr_value) {
ret = 1;
if (c->notifiers)
blocking_notifier_call_chain(c->notifiers,
(unsigned long)curr_value,
NULL);
} else {
ret = 0;
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jean Pihet | 63 | 31.50% | 2 | 25.00% |
Mark Gross | 53 | 26.50% | 1 | 12.50% |
James Bottomley | 43 | 21.50% | 1 | 12.50% |
Rafael J. Wysocki | 19 | 9.50% | 1 | 12.50% |
Keun-o Park | 9 | 4.50% | 1 | 12.50% |
John Kacur | 7 | 3.50% | 1 | 12.50% |
Tim Chen | 6 | 3.00% | 1 | 12.50% |
Total | 200 | 100.00% | 8 | 100.00% |
/**
* pm_qos_flags_remove_req - Remove device PM QoS flags request.
* @pqf: Device PM QoS flags set to remove the request from.
* @req: Request to remove from the set.
*/
static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
struct pm_qos_flags_request *req)
{
s32 val = 0;
list_del(&req->node);
list_for_each_entry(req, &pqf->list, node)
val |= req->flags;
pqf->effective_flags = val;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 49 | 100.00% | 1 | 100.00% |
Total | 49 | 100.00% | 1 | 100.00% |
/**
* pm_qos_update_flags - Update a set of PM QoS flags.
* @pqf: Set of flags to update.
* @req: Request to add to the set, to modify, or to remove from the set.
* @action: Action to take on the set.
* @val: Value of the request to add or modify.
*
* Update the given set of PM QoS flags and call notifiers if the aggregate
* value has changed. Returns 1 if the aggregate constraint value has changed,
* 0 otherwise.
*/
bool pm_qos_update_flags(struct pm_qos_flags *pqf,
struct pm_qos_flags_request *req,
enum pm_qos_req_action action, s32 val)
{
unsigned long irqflags;
s32 prev_value, curr_value;
spin_lock_irqsave(&pm_qos_lock, irqflags);
prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
switch (action) {
case PM_QOS_REMOVE_REQ:
pm_qos_flags_remove_req(pqf, req);
break;
case PM_QOS_UPDATE_REQ:
pm_qos_flags_remove_req(pqf, req);
case PM_QOS_ADD_REQ:
req->flags = val;
INIT_LIST_HEAD(&req->node);
list_add_tail(&req->node, &pqf->list);
pqf->effective_flags |= val;
break;
default:
/* no action */
;
}
curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
spin_unlock_irqrestore(&pm_qos_lock, irqflags);
trace_pm_qos_update_flags(action, prev_value, curr_value);
return prev_value != curr_value;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 151 | 94.38% | 1 | 50.00% |
Keun-o Park | 9 | 5.62% | 1 | 50.00% |
Total | 160 | 100.00% | 2 | 100.00% |
/**
* pm_qos_request - returns current system wide qos expectation
* @pm_qos_class: identification of which qos value is requested
*
* This function returns the current target value.
*/
int pm_qos_request(int pm_qos_class)
{
return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Gross | 14 | 73.68% | 2 | 40.00% |
Jean Pihet | 2 | 10.53% | 1 | 20.00% |
Tim Chen | 2 | 10.53% | 1 | 20.00% |
James Bottomley | 1 | 5.26% | 1 | 20.00% |
Total | 19 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(pm_qos_request);
int pm_qos_request_active(struct pm_qos_request *req)
{
return req->pm_qos_class != 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Bottomley | 16 | 94.12% | 1 | 50.00% |
Jean Pihet | 1 | 5.88% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(pm_qos_request_active);
static void __pm_qos_update_request(struct pm_qos_request *req,
s32 new_value)
{
trace_pm_qos_update_request(req->pm_qos_class, new_value);
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Boyd | 54 | 100.00% | 1 | 100.00% |
Total | 54 | 100.00% | 1 | 100.00% |
/**
* pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
* @work: work struct for the delayed work (timeout)
*
* This cancels the timeout request by falling back to the default at timeout.
*/
static void pm_qos_work_fn(struct work_struct *work)
{
struct pm_qos_request *req = container_of(to_delayed_work(work),
struct pm_qos_request,
work);
__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
MyungJoo Ham | 35 | 97.22% | 1 | 50.00% |
Stephen Boyd | 1 | 2.78% | 1 | 50.00% |
Total | 36 | 100.00% | 2 | 100.00% |
/**
* pm_qos_add_request - inserts new qos request into the list
* @req: pointer to a preallocated handle
* @pm_qos_class: identifies which list of qos request to use
* @value: defines the qos request
*
* This function inserts a new entry in the pm_qos_class list of requested qos
* performance characteristics. It recomputes the aggregate QoS expectations
* for the pm_qos_class of parameters and initializes the pm_qos_request
* handle. Caller needs to save this handle for later use in updates and
* removal.
*/
void pm_qos_add_request(struct pm_qos_request *req,
int pm_qos_class, s32 value)
{
if (!req) /*guard against callers passing in null */
return;
if (pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
return;
}
req->pm_qos_class = pm_qos_class;
INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
trace_pm_qos_add_request(pm_qos_class, value);
pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
&req->node, PM_QOS_ADD_REQ, value);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Gross | 25 | 30.12% | 2 | 28.57% |
Jean Pihet | 22 | 26.51% | 2 | 28.57% |
James Bottomley | 19 | 22.89% | 1 | 14.29% |
MyungJoo Ham | 10 | 12.05% | 1 | 14.29% |
Keun-o Park | 7 | 8.43% | 1 | 14.29% |
Total | 83 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL_GPL(pm_qos_add_request);
/**
* pm_qos_update_request - modifies an existing qos request
* @req : handle to list element holding a pm_qos request to use
* @value: defines the qos request
*
* Updates an existing qos request for the pm_qos_class of parameters along
* with updating the target pm_qos_class value.
*
* Attempts are made to make this code callable on hot code paths.
*/
void pm_qos_update_request(struct pm_qos_request *req,
s32 new_value)
{
if (!req) /*guard against callers passing in null */
return;
if (!pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
return;
}
cancel_delayed_work_sync(&req->work);
__pm_qos_update_request(req, new_value);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Bottomley | 21 | 38.89% | 2 | 20.00% |
Mark Gross | 17 | 31.48% | 3 | 30.00% |
MyungJoo Ham | 8 | 14.81% | 1 | 10.00% |
Jean Pihet | 5 | 9.26% | 2 | 20.00% |
Keun-o Park | 2 | 3.70% | 1 | 10.00% |
Stephen Boyd | 1 | 1.85% | 1 | 10.00% |
Total | 54 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL_GPL(pm_qos_update_request);
/**
* pm_qos_update_request_timeout - modifies an existing qos request temporarily.
* @req : handle to list element holding a pm_qos request to use
* @new_value: defines the temporal qos request
* @timeout_us: the effective duration of this qos request in usecs.
*
* After timeout_us, this qos request is cancelled automatically.
*/
void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
unsigned long timeout_us)
{
if (!req)
return;
if (WARN(!pm_qos_request_active(req),
"%s called for unknown object.", __func__))
return;
cancel_delayed_work_sync(&req->work);
trace_pm_qos_update_request_timeout(req->pm_qos_class,
new_value, timeout_us);
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value);
schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
MyungJoo Ham | 91 | 89.22% | 1 | 50.00% |
Keun-o Park | 11 | 10.78% | 1 | 50.00% |
Total | 102 | 100.00% | 2 | 100.00% |
/**
* pm_qos_remove_request - modifies an existing qos request
* @req: handle to request list element
*
* Will remove pm qos request from the list of constraints and
* recompute the current target value for the pm_qos_class. Call this
* on slow code paths.
*/
void pm_qos_remove_request(struct pm_qos_request *req)
{
if (!req) /*guard against callers passing in null */
return;
/* silent return to keep pcm code cleaner */
if (!pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
return;
}
cancel_delayed_work_sync(&req->work);
trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Bottomley | 32 | 36.36% | 2 | 25.00% |
Mark Gross | 23 | 26.14% | 2 | 25.00% |
Jean Pihet | 16 | 18.18% | 2 | 25.00% |
Keun-o Park | 9 | 10.23% | 1 | 12.50% |
MyungJoo Ham | 8 | 9.09% | 1 | 12.50% |
Total | 88 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL_GPL(pm_qos_remove_request);
/**
* pm_qos_add_notifier - sets notification entry for changes to target value
* @pm_qos_class: identifies which qos target changes should be notified.
* @notifier: notifier block managed by caller.
*
* will register the notifier into a notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
retval = blocking_notifier_chain_register(
pm_qos_array[pm_qos_class]->constraints->notifiers,
notifier);
return retval;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Gross | 33 | 94.29% | 1 | 50.00% |
Jean Pihet | 2 | 5.71% | 1 | 50.00% |
Total | 35 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
/**
* pm_qos_remove_notifier - deletes notification entry from chain.
* @pm_qos_class: identifies which qos target changes are notified.
* @notifier: notifier block to be removed.
*
* will remove the notifier from the notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
retval = blocking_notifier_chain_unregister(
pm_qos_array[pm_qos_class]->constraints->notifiers,
notifier);
return retval;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Gross | 33 | 94.29% | 1 | 50.00% |
Jean Pihet | 2 | 5.71% | 1 | 50.00% |
Total | 35 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
/* User space interface to PM QoS classes via misc devices */
static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
{
qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
qos->pm_qos_power_miscdev.name = qos->name;
qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
if (d) {
(void)debugfs_create_file(qos->name, S_IRUGO, d,
(void *)qos, &pm_qos_debug_fops);
}
return misc_register(&qos->pm_qos_power_miscdev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jean Pihet | 47 | 58.02% | 1 | 50.00% |
Nishanth Menon | 34 | 41.98% | 1 | 50.00% |
Total | 81 | 100.00% | 2 | 100.00% |
static int find_pm_qos_object_by_minor(int minor)
{
int pm_qos_class;
for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
if (minor ==
pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
return pm_qos_class;
}
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jean Pihet | 46 | 97.87% | 1 | 50.00% |
Keun-o Park | 1 | 2.13% | 1 | 50.00% |
Total | 47 | 100.00% | 2 | 100.00% |
static int pm_qos_power_open(struct inode *inode, struct file *filp)
{
long pm_qos_class;
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
filp->private_data = req;
return 0;
}
return -EPERM;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Gross | 51 | 60.71% | 2 | 33.33% |
James Bottomley | 29 | 34.52% | 1 | 16.67% |
David Alan Gilbert | 2 | 2.38% | 1 | 16.67% |
Keun-o Park | 1 | 1.19% | 1 | 16.67% |
Jean Pihet | 1 | 1.19% | 1 | 16.67% |
Total | 84 | 100.00% | 6 | 100.00% |
static int pm_qos_power_release(struct inode *inode, struct file *filp)
{
struct pm_qos_request *req;
req = filp->private_data;
pm_qos_remove_request(req);
kfree(req);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Gross | 33 | 82.50% | 2 | 40.00% |
James Bottomley | 5 | 12.50% | 1 | 20.00% |
Jonathan Corbet | 1 | 2.50% | 1 | 20.00% |
Jean Pihet | 1 | 2.50% | 1 | 20.00% |
Total | 40 | 100.00% | 5 | 100.00% |
static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{
s32 value;
unsigned long flags;
struct pm_qos_request *req = filp->private_data;
if (!req)
return -EINVAL;
if (!pm_qos_request_active(req))
return -EINVAL;
spin_lock_irqsave(&pm_qos_lock, flags);
value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
spin_unlock_irqrestore(&pm_qos_lock, flags);
return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Renninger | 96 | 88.89% | 1 | 33.33% |
Jean Pihet | 12 | 11.11% | 2 | 66.67% |
Total | 108 | 100.00% | 3 | 100.00% |
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
s32 value;
struct pm_qos_request *req;
if (count == sizeof(s32)) {
if (copy_from_user(&value, buf, sizeof(s32)))
return -EFAULT;
} else {
int ret;
ret = kstrtos32_from_user(buf, count, 16, &value);
if (ret)
return ret;
}
req = filp->private_data;
pm_qos_update_request(req, value);
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Gross | 86 | 81.90% | 2 | 33.33% |
Rafael J. Wysocki | 8 | 7.62% | 1 | 16.67% |
Andy Shevchenko | 6 | 5.71% | 1 | 16.67% |
Jean Pihet | 4 | 3.81% | 1 | 16.67% |
Jonathan Corbet | 1 | 0.95% | 1 | 16.67% |
Total | 105 | 100.00% | 6 | 100.00% |
static int __init pm_qos_power_init(void)
{
int ret = 0;
int i;
struct dentry *d;
BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
d = debugfs_create_dir("pm_qos", NULL);
if (IS_ERR_OR_NULL(d))
d = NULL;
for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
ret = register_pm_qos_misc(pm_qos_array[i], d);
if (ret < 0) {
pr_err("%s: %s setup failed\n",
__func__, pm_qos_array[i]->name);
return ret;
}
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Gross | 42 | 39.25% | 1 | 20.00% |
Alex Frid | 33 | 30.84% | 1 | 20.00% |
Nishanth Menon | 27 | 25.23% | 1 | 20.00% |
Joe Perches | 4 | 3.74% | 1 | 20.00% |
Keun-o Park | 1 | 0.93% | 1 | 20.00% |
Total | 107 | 100.00% | 5 | 100.00% |
late_initcall(pm_qos_power_init);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mark Gross | 678 | 27.07% | 3 | 8.57% |
Nishanth Menon | 397 | 15.85% | 1 | 2.86% |
Jean Pihet | 339 | 13.53% | 5 | 14.29% |
Rafael J. Wysocki | 248 | 9.90% | 4 | 11.43% |
James Bottomley | 229 | 9.14% | 2 | 5.71% |
MyungJoo Ham | 154 | 6.15% | 1 | 2.86% |
Thomas Renninger | 123 | 4.91% | 1 | 2.86% |
Tomeu Vizoso | 99 | 3.95% | 1 | 2.86% |
Stephen Boyd | 56 | 2.24% | 1 | 2.86% |
Tim Chen | 55 | 2.20% | 1 | 2.86% |
Keun-o Park | 53 | 2.12% | 3 | 8.57% |
Alex Frid | 33 | 1.32% | 1 | 2.86% |
John Kacur | 7 | 0.28% | 1 | 2.86% |
Andy Shevchenko | 6 | 0.24% | 1 | 2.86% |
Dominik Brodowski | 5 | 0.20% | 1 | 2.86% |
Arnd Bergmann | 5 | 0.20% | 1 | 2.86% |
Joe Perches | 4 | 0.16% | 1 | 2.86% |
Paul Gortmaker | 3 | 0.12% | 1 | 2.86% |
Luis Gonzalez Fernandez | 3 | 0.12% | 1 | 2.86% |
Jonathan Corbet | 2 | 0.08% | 1 | 2.86% |
Colin Cross | 2 | 0.08% | 1 | 2.86% |
Richard Hughes | 2 | 0.08% | 1 | 2.86% |
David Alan Gilbert | 2 | 0.08% | 1 | 2.86% |
Total | 2505 | 100.00% | 35 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.