Release 4.18 block/elevator.c
/*
* Block device elevator/IO-scheduler.
*
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*
* 30042000 Jens Axboe <axboe@kernel.dk> :
*
* Split the elevator a bit so that it is possible to choose a different
* one or even write a new "plug in". There are three pieces:
* - elevator_fn, inserts a new request in the queue list
* - elevator_merge_fn, decides whether a new buffer can be merged with
* an existing request
* - elevator_dequeue_fn, called when a request is taken off the active list
*
* 20082000 Dave Jones <davej@suse.de> :
* Removed tests for max-bomb-segments, which was breaking elvtune
* when run without -bN
*
* Jens:
* - Rework again to work with bio instead of buffer_heads
* - loose bi_dev comparisons, partition handling is right now
* - completely modularize elevator setup and teardown
*
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/blktrace_api.h>
#include <linux/hash.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
#include <trace/events/block.h>
#include "blk.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h"
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
/*
* Merge hash stuff.
*/
#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
/*
* Query io scheduler to see if the current process issuing bio may be
* merged with rq.
*/
static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
{
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
if (e->uses_mq && e->type->ops.mq.allow_merge)
return e->type->ops.mq.allow_merge(q, rq, bio);
else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 98 | 91.59% | 5 | 71.43% |
Tejun Heo | 6 | 5.61% | 1 | 14.29% |
Tahsin Erdogan | 3 | 2.80% | 1 | 14.29% |
Total | 107 | 100.00% | 7 | 100.00% |
/*
* can we safely merge with this request?
*/
bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
{
if (!blk_rq_merge_ok(rq, bio))
return false;
if (!elv_iosched_allow_bio_merge(rq, bio))
return false;
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 20 | 45.45% | 3 | 42.86% |
Jens Axboe | 11 | 25.00% | 1 | 14.29% |
Tahsin Erdogan | 5 | 11.36% | 1 | 14.29% |
Martin K. Petersen | 5 | 11.36% | 1 | 14.29% |
Tejun Heo | 3 | 6.82% | 1 | 14.29% |
Total | 44 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(elv_bio_merge_ok);
static bool elevator_match(const struct elevator_type *e, const char *name)
{
if (!strcmp(e->elevator_name, name))
return true;
if (e->elevator_alias && !strcmp(e->elevator_alias, name))
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 54 | 100.00% | 1 | 100.00% |
Total | 54 | 100.00% | 1 | 100.00% |
/*
* Return scheduler with name 'name' and with matching 'mq capability
*/
static struct elevator_type *elevator_find(const char *name, bool mq)
{
struct elevator_type *e;
list_for_each_entry(e, &elv_list, list) {
if (elevator_match(e, name) && (mq == e->uses_mq))
return e;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 45 | 83.33% | 3 | 50.00% |
Matthias Kaehlcke | 4 | 7.41% | 1 | 16.67% |
Vasily Tarasov | 4 | 7.41% | 1 | 16.67% |
Adrian Bunk | 1 | 1.85% | 1 | 16.67% |
Total | 54 | 100.00% | 6 | 100.00% |
static void elevator_put(struct elevator_type *e)
{
module_put(e->elevator_owner);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
static struct elevator_type *elevator_get(struct request_queue *q,
const char *name, bool try_loading)
{
struct elevator_type *e;
spin_lock(&elv_list_lock);
e = elevator_find(name, q->mq_ops != NULL);
if (!e && try_loading) {
spin_unlock(&elv_list_lock);
request_module("%s-iosched", name);
spin_lock(&elv_list_lock);
e = elevator_find(name, q->mq_ops != NULL);
}
if (e && !try_module_get(e->elevator_owner))
e = NULL;
spin_unlock(&elv_list_lock);
return e;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 86 | 77.48% | 4 | 50.00% |
Tejun Heo | 23 | 20.72% | 2 | 25.00% |
wzt wzt | 1 | 0.90% | 1 | 12.50% |
Kees Cook | 1 | 0.90% | 1 | 12.50% |
Total | 111 | 100.00% | 8 | 100.00% |
static char chosen_elevator[ELV_NAME_MAX];
static int __init elevator_setup(char *str)
{
/*
* Be backwards-compatible with previous kernels, so users
* won't get the wrong elevator.
*/
strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 17 | 58.62% | 2 | 28.57% |
Nate Diller | 6 | 20.69% | 1 | 14.29% |
Linus Torvalds (pre-git) | 2 | 6.90% | 1 | 14.29% |
Linus Torvalds | 2 | 6.90% | 1 | 14.29% |
Hirofumi Ogawa | 1 | 3.45% | 1 | 14.29% |
Chuck Ebbert | 1 | 3.45% | 1 | 14.29% |
Total | 29 | 100.00% | 7 | 100.00% |
__setup("elevator=", elevator_setup);
/* called during boot to load the elevator chosen by the elevator param */
void __init load_default_elevator_module(void)
{
struct elevator_type *e;
if (!chosen_elevator[0])
return;
/*
* Boot parameter is deprecated, we haven't supported that for MQ.
* Only look for non-mq schedulers from here.
*/
spin_lock(&elv_list_lock);
e = elevator_find(chosen_elevator, false);
spin_unlock(&elv_list_lock);
if (!e)
request_module("%s-iosched", chosen_elevator);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 53 | 94.64% | 1 | 50.00% |
Jens Axboe | 3 | 5.36% | 1 | 50.00% |
Total | 56 | 100.00% | 2 | 100.00% |
static struct kobj_type elv_ktype;
struct elevator_queue *elevator_alloc(struct request_queue *q,
struct elevator_type *e)
{
struct elevator_queue *eq;
eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
if (unlikely(!eq))
return NULL;
eq->type = e;
kobject_init(&eq->kobj, &elv_ktype);
mutex_init(&eq->sysfs_lock);
hash_init(eq->hash);
eq->uses_mq = e->uses_mq;
return eq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 44 | 47.31% | 1 | 7.69% |
Jens Axboe | 40 | 43.01% | 5 | 38.46% |
Greg Kroah-Hartman | 3 | 3.23% | 3 | 23.08% |
Chao Yu | 3 | 3.23% | 1 | 7.69% |
Joe Perches | 1 | 1.08% | 1 | 7.69% |
Sasha Levin | 1 | 1.08% | 1 | 7.69% |
Tejun Heo | 1 | 1.08% | 1 | 7.69% |
Total | 93 | 100.00% | 13 | 100.00% |
EXPORT_SYMBOL(elevator_alloc);
static void elevator_release(struct kobject *kobj)
{
struct elevator_queue *e;
e = container_of(kobj, struct elevator_queue, kobj);
elevator_put(e->type);
kfree(e);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 31 | 77.50% | 1 | 33.33% |
Jens Axboe | 8 | 20.00% | 1 | 33.33% |
Tejun Heo | 1 | 2.50% | 1 | 33.33% |
Total | 40 | 100.00% | 3 | 100.00% |
/*
* Use the default elevator specified by config boot param for non-mq devices,
* or by config option. Don't try to load modules as we could be running off
* async and request_module() isn't allowed from async.
*/
int elevator_init(struct request_queue *q)
{
struct elevator_type *e = NULL;
int err = 0;
/*
* q->sysfs_lock must be held to provide mutual exclusion between
* elevator_switch() and here.
*/
mutex_lock(&q->sysfs_lock);
if (unlikely(q->elevator))
goto out_unlock;
if (*chosen_elevator) {
e = elevator_get(q, chosen_elevator, false);
if (!e)
printk(KERN_ERR "I/O scheduler %s not found\n",
chosen_elevator);
}
if (!e)
e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
if (!e) {
printk(KERN_ERR
"Default I/O scheduler not found. Using noop.\n");
e = elevator_get(q, "noop", false);
}
err = e->ops.sq.elevator_init_fn(q, e);
if (err)
elevator_put(e);
out_unlock:
mutex_unlock(&q->sysfs_lock);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 49 | 32.45% | 8 | 40.00% |
Nate Diller | 32 | 21.19% | 2 | 10.00% |
Christoph Hellwig | 17 | 11.26% | 2 | 10.00% |
Tejun Heo | 14 | 9.27% | 3 | 15.00% |
Linus Torvalds | 10 | 6.62% | 1 | 5.00% |
Sudip Mukherjee | 10 | 6.62% | 1 | 5.00% |
Mike Snitzer | 9 | 5.96% | 1 | 5.00% |
Tomoki Sekiyama | 8 | 5.30% | 1 | 5.00% |
Jianpeng Ma (马建朋) | 2 | 1.32% | 1 | 5.00% |
Total | 151 | 100.00% | 20 | 100.00% |
void elevator_exit(struct request_queue *q, struct elevator_queue *e)
{
mutex_lock(&e->sysfs_lock);
if (e->uses_mq && e->type->ops.mq.exit_sched)
blk_mq_exit_sched(q, e);
else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
e->type->ops.sq.elevator_exit_fn(e);
mutex_unlock(&e->sysfs_lock);
kobject_put(&e->kobj);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 62 | 66.67% | 4 | 57.14% |
Al Viro | 17 | 18.28% | 1 | 14.29% |
Omar Sandoval | 8 | 8.60% | 1 | 14.29% |
Tejun Heo | 6 | 6.45% | 1 | 14.29% |
Total | 93 | 100.00% | 7 | 100.00% |
static inline void __elv_rqhash_del(struct request *rq)
{
hash_del(&rq->hash);
rq->rq_flags &= ~RQF_HASHED;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 24 | 88.89% | 2 | 50.00% |
Christoph Hellwig | 2 | 7.41% | 1 | 25.00% |
Sasha Levin | 1 | 3.70% | 1 | 25.00% |
Total | 27 | 100.00% | 4 | 100.00% |
void elv_rqhash_del(struct request_queue *q, struct request *rq)
{
if (ELV_ON_HASH(rq))
__elv_rqhash_del(rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 27 | 100.00% | 2 | 100.00% |
Total | 27 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(elv_rqhash_del);
void elv_rqhash_add(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
BUG_ON(ELV_ON_HASH(rq));
hash_add(e->hash, &rq->hash, rq_hash_key(rq));
rq->rq_flags |= RQF_HASHED;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 49 | 89.09% | 4 | 66.67% |
Sasha Levin | 4 | 7.27% | 1 | 16.67% |
Christoph Hellwig | 2 | 3.64% | 1 | 16.67% |
Total | 55 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(elv_rqhash_add);
void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
{
__elv_rqhash_del(rq);
elv_rqhash_add(q, rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 27 | 100.00% | 2 | 100.00% |
Total | 27 | 100.00% | 2 | 100.00% |
struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
struct elevator_queue *e = q->elevator;
struct hlist_node *next;
struct request *rq;
hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
BUG_ON(!ELV_ON_HASH(rq));
if (unlikely(!rq_mergeable(rq))) {
__elv_rqhash_del(rq);
continue;
}
if (rq_hash_key(rq) == offset)
return rq;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 86 | 94.51% | 3 | 75.00% |
Sasha Levin | 5 | 5.49% | 1 | 25.00% |
Total | 91 | 100.00% | 4 | 100.00% |
/*
* RB-tree support functions for inserting/lookup/removal of requests
* in a sorted RB tree.
*/
void elv_rb_add(struct rb_root *root, struct request *rq)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct request *__rq;
while (*p) {
parent = *p;
__rq = rb_entry(parent, struct request, rb_node);
if (blk_rq_pos(rq) < blk_rq_pos(__rq))
p = &(*p)->rb_left;
else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
p = &(*p)->rb_right;
}
rb_link_node(&rq->rb_node, parent, p);
rb_insert_color(&rq->rb_node, root);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 115 | 89.15% | 1 | 33.33% |
Tejun Heo | 12 | 9.30% | 1 | 33.33% |
Jeff Moyer | 2 | 1.55% | 1 | 33.33% |
Total | 129 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(elv_rb_add);
void elv_rb_del(struct rb_root *root, struct request *rq)
{
BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
rb_erase(&rq->rb_node, root);
RB_CLEAR_NODE(&rq->rb_node);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 44 | 100.00% | 1 | 100.00% |
Total | 44 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(elv_rb_del);
struct request *elv_rb_find(struct rb_root *root, sector_t sector)
{
struct rb_node *n = root->rb_node;
struct request *rq;
while (n) {
rq = rb_entry(n, struct request, rb_node);
if (sector < blk_rq_pos(rq))
n = n->rb_left;
else if (sector > blk_rq_pos(rq))
n = n->rb_right;
else
return rq;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 79 | 92.94% | 1 | 50.00% |
Tejun Heo | 6 | 7.06% | 1 | 50.00% |
Total | 85 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(elv_rb_find);
/*
* Insert rq into dispatch queue of q. Queue lock must be held on
* entry. rq is sort instead into the dispatch queue. To be used by
* specific elevators.
*/
void elv_dispatch_sort(struct request_queue *q, struct request *rq)
{
sector_t boundary;
struct list_head *entry;
if (q->last_merge == rq)
q->last_merge = NULL;
elv_rqhash_del(q, rq);
q->nr_sorted--;
boundary = q->end_sector;
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
if (req_op(rq) != req_op(pos))
break;
if (rq_data_dir(rq) != rq_data_dir(pos))
break;
if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
break;
if (blk_rq_pos(rq) >= boundary) {
if (blk_rq_pos(pos) < boundary)
continue;
} else {
if (blk_rq_pos(pos) >= boundary)
break;
}
if (blk_rq_pos(rq) >= blk_rq_pos(pos))
break;
}
list_add(&rq->queuelist, entry);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 126 | 74.56% | 4 | 36.36% |
Jens Axboe | 24 | 14.20% | 4 | 36.36% |
David Woodhouse | 7 | 4.14% | 1 | 9.09% |
Christoph Hellwig | 6 | 3.55% | 1 | 9.09% |
Michael Christie | 6 | 3.55% | 1 | 9.09% |
Total | 169 | 100.00% | 11 | 100.00% |
EXPORT_SYMBOL(elv_dispatch_sort);
/*
* Insert rq into dispatch queue of q. Queue lock must be held on
* entry. rq is added to the back of the dispatch queue. To be used by
* specific elevators.
*/
void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
{
if (q->last_merge == rq)
q->last_merge = NULL;
elv_rqhash_del(q, rq);
q->nr_sorted--;
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
list_add_tail(&rq->queuelist, &q->queue_head);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 69 | 100.00% | 1 | 100.00% |
Total | 69 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(elv_dispatch_add_tail);
enum elv_merge elv_merge(struct request_queue *q, struct request **req,
struct bio *bio)
{
struct elevator_queue *e = q->elevator;
struct request *__rq;
/*
* Levels of merges:
* nomerges: No merges at all attempted
* noxmerges: Only simple one-hit cache try
* merges: All merge tries attempted
*/
if (blk_queue_nomerges(q) || !bio_mergeable(bio))
return ELEVATOR_NO_MERGE;
/*
* First try one-hit cache.
*/
if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
enum elv_merge ret = blk_try_merge(q->last_merge, bio);
if (ret != ELEVATOR_NO_MERGE) {
*req = q->last_merge;
return ret;
}
}
if (blk_queue_noxmerges(q))
return ELEVATOR_NO_MERGE;
/*
* See if our hash lookup can find a potential backmerge.
*/
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_BACK_MERGE;
}
if (e->uses_mq && e->type->ops.mq.request_merge)
return e->type->ops.mq.request_merge(q, req, bio);
else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
return e->type->ops.sq.elevator_merge_fn(q, req, bio);
return ELEVATOR_NO_MERGE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 114 | 51.82% | 7 | 41.18% |
Tejun Heo | 49 | 22.27% | 3 | 17.65% |
Alan D. Brunelle | 21 | 9.55% | 2 | 11.76% |
Linus Torvalds | 20 | 9.09% | 1 | 5.88% |
Ming Lei | 6 | 2.73% | 1 | 5.88% |
Christoph Hellwig | 6 | 2.73% | 1 | 5.88% |
Kent Overstreet | 2 | 0.91% | 1 | 5.88% |
Tahsin Erdogan | 2 | 0.91% | 1 | 5.88% |
Total | 220 | 100.00% | 17 | 100.00% |
/*
* Attempt to do an insertion back merge. Only check for the case where
* we can append 'rq' to an existing request, so we can throw 'rq' away
* afterwards.
*
* Returns true if we merged, false otherwise
*/
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
{
struct request *__rq;
bool ret;
if (blk_queue_nomerges(q))
return false;
/*
* First try one-hit cache.
*/
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
return true;
if (blk_queue_noxmerges(q))
return false;
ret = false;
/*
* See if our hash lookup can find a potential backmerge.
*/
while (1) {
__rq = elv_rqhash_find(q, blk_rq_pos(rq));
if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
break;
/* The merged request could be merged with others, try again */
ret = true;
rq = __rq;
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 89 | 77.39% | 1 | 50.00% |
Shaohua Li | 26 | 22.61% | 1 | 50.00% |
Total | 115 | 100.00% | 2 | 100.00% |
void elv_merged_request(struct request_queue *q, struct request *rq,
enum elv_merge type)
{
struct elevator_queue *e = q->elevator;
if (e->uses_mq && e->type->ops.mq.request_merged)
e->type->ops.mq.request_merged(q, rq, type);
else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
e->type->ops.sq.elevator_merged_fn(q, rq, type);
if (type == ELEVATOR_BACK_MERGE)
elv_rqhash_reposition(q, rq);
q->last_merge = rq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 101 | 87.83% | 8 | 72.73% |
Tejun Heo | 12 | 10.43% | 2 | 18.18% |
Christoph Hellwig | 2 | 1.74% | 1 | 9.09% |
Total | 115 | 100.00% | 11 | 100.00% |
void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct elevator_queue *e = q->elevator;
bool next_sorted = false;
if (e->uses_mq && e->type->ops.mq.requests_merged)
e->type->ops.mq.requests_merged(q, rq, next);
else if (e->type->ops.sq.elevator_merge_req_fn) {
next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
if (next_sorted)
e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
}
elv_rqhash_reposition(q, rq);
if (next_sorted) {
elv_rqhash_del(q, next);
q->nr_sorted--;
}
q->last_merge = rq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 87 | 58.78% | 8 | 61.54% |
Linus Torvalds | 38 | 25.68% | 1 | 7.69% |
Tejun Heo | 17 | 11.49% | 3 | 23.08% |
Bart Van Assche | 6 | 4.05% | 1 | 7.69% |
Total | 148 | 100.00% | 13 | 100.00% |
void elv_bio_merged(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct elevator_queue *e = q->elevator;
if (WARN_ON_ONCE(e->uses_mq))
return;
if (e->type->ops.sq.elevator_bio_merged_fn)
e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Divyesh Shah | 48 | 70.59% | 1 | 25.00% |
Jens Axboe | 14 | 20.59% | 2 | 50.00% |
Tejun Heo | 6 | 8.82% | 1 | 25.00% |
Total | 68 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_PM
static void blk_pm_requeue_request(struct request *rq)
{
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
rq->q->nr_pending--;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lin Ming | 33 | 94.29% | 1 | 50.00% |
Christoph Hellwig | 2 | 5.71% | 1 | 50.00% |
Total | 35 | 100.00% | 2 | 100.00% |
static void blk_pm_add_request(struct request_queue *q, struct request *rq)
{
if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
(q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
pm_request_resume(q->dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lin Ming | 57 | 96.61% | 1 | 50.00% |
Christoph Hellwig | 2 | 3.39% | 1 | 50.00% |
Total | 59 | 100.00% | 2 | 100.00% |
#else
static inline void blk_pm_requeue_request(struct request *rq) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lin Ming | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
static inline void blk_pm_add_request(struct request_queue *q,
struct request *rq)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lin Ming | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
#endif
void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
* it already went through dequeue, we need to decrement the
* in_flight count again
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
if (rq->rq_flags & RQF_SORTED)
elv_deactivate_rq(q, rq);
}
rq->rq_flags &= ~RQF_STARTED;
blk_pm_requeue_request(rq);
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 41 | 56.94% | 6 | 50.00% |
Andrew Morton | 13 | 18.06% | 1 | 8.33% |
Tejun Heo | 7 | 9.72% | 2 | 16.67% |
Christoph Hellwig | 6 | 8.33% | 2 | 16.67% |
Lin Ming | 5 | 6.94% | 1 | 8.33% |
Total | 72 | 100.00% | 12 | 100.00% |
void elv_drain_elevator(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
static int printed;
if (WARN_ON_ONCE(e->uses_mq))
return;
lockdep_assert_held(q->queue_lock);
while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
;
if (q->nr_sorted && printed++ < 10) {
printk(KERN_ERR "%s: forced dispatching is broken "
"(nr_sorted=%u), please report this\n",
q->elevator->type->elevator_name, q->nr_sorted);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 66 | 73.33% | 3 | 50.00% |
Jens Axboe | 24 | 26.67% | 3 | 50.00% |
Total | 90 | 100.00% | 6 | 100.00% |
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
trace_block_rq_insert(q, rq);
blk_pm_add_request(q, rq);
rq->q = q;
if (rq->rq_flags & RQF_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (!blk_rq_is_passthrough(rq)) {
q->end_sector = rq_end_sector(rq);
q