Release 4.12 block/blk-mq.c
/*
* Block multiqueue core code
*
* Copyright (C) 2013-2014 Jens Axboe
* Copyright (C) 2013-2014 Christoph Hellwig
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/kmemleak.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>
#include <linux/llist.h>
#include <linux/list_sort.h>
#include <linux/cpu.h>
#include <linux/cache.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/topology.h>
#include <linux/sched/signal.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
#include <linux/prefetch.h>
#include <trace/events/block.h>
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
#include "blk-wbt.h"
#include "blk-mq-sched.h"
static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
int ddir, bytes, bucket;
ddir = rq_data_dir(rq);
bytes = blk_rq_bytes(rq);
bucket = ddir + 2*(ilog2(bytes) - 9);
if (bucket < 0)
return -1;
else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
return bucket;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Bates | 74 | 98.67% | 1 | 50.00% |
Jens Axboe | 1 | 1.33% | 1 | 50.00% |
Total | 75 | 100.00% | 2 | 100.00% |
/*
* Check if any of the ctx's have pending work in this hardware queue
*/
bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
return sbitmap_any_bit_set(&hctx->ctx_map) ||
!list_empty_careful(&hctx->dispatch) ||
blk_mq_sched_has_work(hctx);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 30 | 90.91% | 3 | 75.00% |
Omar Sandoval | 3 | 9.09% | 1 | 25.00% |
Total | 33 | 100.00% | 4 | 100.00% |
/*
* Mark this ctx as having pending work in this hardware queue
*/
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 33 | 76.74% | 2 | 66.67% |
Omar Sandoval | 10 | 23.26% | 1 | 33.33% |
Total | 43 | 100.00% | 3 | 100.00% |
static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 23 | 82.14% | 2 | 66.67% |
Omar Sandoval | 5 | 17.86% | 1 | 33.33% |
Total | 28 | 100.00% | 3 | 100.00% |
void blk_freeze_queue_start(struct request_queue *q)
{
int freeze_depth;
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
if (freeze_depth == 1) {
percpu_ref_kill(&q->q_usage_counter);
blk_mq_run_hw_queues(q, false);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 17 | 36.96% | 1 | 10.00% |
Tejun Heo | 15 | 32.61% | 5 | 50.00% |
Christoph Hellwig | 9 | 19.57% | 1 | 10.00% |
Mike Snitzer | 3 | 6.52% | 1 | 10.00% |
Dan J Williams | 1 | 2.17% | 1 | 10.00% |
Lei Ming | 1 | 2.17% | 1 | 10.00% |
Total | 46 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
void blk_mq_freeze_queue_wait(struct request_queue *q)
{
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 15 | 60.00% | 2 | 40.00% |
Lei Ming | 6 | 24.00% | 1 | 20.00% |
Jens Axboe | 3 | 12.00% | 1 | 20.00% |
Dan J Williams | 1 | 4.00% | 1 | 20.00% |
Total | 25 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout)
{
return wait_event_timeout(q->mq_freeze_wq,
percpu_ref_is_zero(&q->q_usage_counter),
timeout);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Keith Busch | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
/*
* Guarantee no request is in use, so we can change any data structure of
* the queue afterward.
*/
void blk_freeze_queue(struct request_queue *q)
{
/*
* In the !blk_mq case we are only calling this to kill the
* q_usage_counter, otherwise this increases the freeze depth
* and waits for it to return to zero. For this reason there is
* no blk_unfreeze_queue(), and blk_freeze_queue() is not
* exported to drivers as the only user for unfreeze is blk_mq.
*/
blk_freeze_queue_start(q);
blk_mq_freeze_queue_wait(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 18 | 85.71% | 1 | 33.33% |
Dan J Williams | 2 | 9.52% | 1 | 33.33% |
Lei Ming | 1 | 4.76% | 1 | 33.33% |
Total | 21 | 100.00% | 3 | 100.00% |
void blk_mq_freeze_queue(struct request_queue *q)
{
/*
* ...just an alias to keep freeze and unfreeze actions balanced
* in the blk_mq_* namespace
*/
blk_freeze_queue(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q)
{
int freeze_depth;
freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
percpu_ref_reinit(&q->q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 31 | 58.49% | 1 | 20.00% |
Tejun Heo | 11 | 20.75% | 2 | 40.00% |
Christoph Hellwig | 10 | 18.87% | 1 | 20.00% |
Dan J Williams | 1 | 1.89% | 1 | 20.00% |
Total | 53 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
/**
* blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
* @q: request queue.
*
* Note: this function does not prevent that the struct request end_io()
* callback function is invoked. Additionally, it is not prevented that
* new queue_rq() calls occur unless the queue has been stopped first.
*/
void blk_mq_quiesce_queue(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
bool rcu = false;
__blk_mq_stop_hw_queues(q, true);
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING)
synchronize_srcu(&hctx->queue_rq_srcu);
else
rcu = true;
}
if (rcu)
synchronize_rcu();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bart Van Assche | 66 | 95.65% | 1 | 50.00% |
Jens Axboe | 3 | 4.35% | 1 | 50.00% |
Total | 69 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
void blk_mq_wake_waiters(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
queue_for_each_hw_ctx(q, hctx, i)
if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_wakeup_all(hctx->tags, true);
/*
* If we are called because the queue has now been marked as
* dying, we need to ensure that processes currently waiting on
* the queue are notified as well.
*/
wake_up_all(&q->mq_freeze_wq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 43 | 82.69% | 1 | 50.00% |
Keith Busch | 9 | 17.31% | 1 | 50.00% |
Total | 52 | 100.00% | 2 | 100.00% |
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
{
return blk_mq_has_free_tags(hctx->tags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(blk_mq_can_queue);
void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
struct request *rq, unsigned int op)
{
INIT_LIST_HEAD(&rq->queuelist);
/* csd/requeue_work/fifo_time is initialized before use */
rq->q = q;
rq->mq_ctx = ctx;
rq->cmd_flags = op;
if (blk_queue_io_stat(q))
rq->rq_flags |= RQF_IO_STAT;
/* do not touch atomic flags, it needs atomic ops against the timer */
rq->cpu = -1;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->rq_disk = NULL;
rq->part = NULL;
rq->start_time = jiffies;
#ifdef CONFIG_BLK_CGROUP
rq->rl = NULL;
set_start_time_ns(rq);
rq->io_start_time_ns = 0;
#endif
rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0;
#endif
rq->special = NULL;
/* tag was already set */
rq->extra_len = 0;
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
rq->end_io = NULL;
rq->end_io_data = NULL;
rq->next_rq = NULL;
ctx->rq_dispatched[op_is_sync(op)]++;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 145 | 71.08% | 3 | 33.33% |
Jens Axboe | 52 | 25.49% | 4 | 44.44% |
Lei Ming | 5 | 2.45% | 1 | 11.11% |
Michael Christie | 2 | 0.98% | 1 | 11.11% |
Total | 204 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
unsigned int op)
{
struct request *rq;
unsigned int tag;
tag = blk_mq_get_tag(data);
if (tag != BLK_MQ_TAG_FAIL) {
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
rq = tags->static_rqs[tag];
if (data->flags & BLK_MQ_REQ_INTERNAL) {
rq->tag = -1;
rq->internal_tag = tag;
} else {
if (blk_mq_tag_busy(data->hctx)) {
rq->rq_flags = RQF_MQ_INFLIGHT;
atomic_inc(&data->hctx->nr_active);
}
rq->tag = tag;
rq->internal_tag = -1;
data->hctx->tags->rqs[rq->tag] = rq;
}
blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
return rq;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 83 | 51.55% | 3 | 33.33% |
Jens Axboe | 50 | 31.06% | 3 | 33.33% |
Omar Sandoval | 15 | 9.32% | 1 | 11.11% |
Lei Ming | 11 | 6.83% | 1 | 11.11% |
Michael Christie | 2 | 1.24% | 1 | 11.11% |
Total | 161 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
unsigned int flags)
{
struct blk_mq_alloc_data alloc_data = { .flags = flags };
struct request *rq;
int ret;
ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
if (ret)
return ERR_PTR(ret);
rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
if (!rq)
return ERR_PTR(-EWOULDBLOCK);
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL;
return rq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 48 | 38.10% | 3 | 25.00% |
Christoph Hellwig | 46 | 36.51% | 4 | 33.33% |
Joe Lawrence | 20 | 15.87% | 1 | 8.33% |
Keith Busch | 5 | 3.97% | 2 | 16.67% |
Lei Ming | 5 | 3.97% | 1 | 8.33% |
Dan J Williams | 2 | 1.59% | 1 | 8.33% |
Total | 126 | 100.00% | 12 | 100.00% |
EXPORT_SYMBOL(blk_mq_alloc_request);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
unsigned int flags, unsigned int hctx_idx)
{
struct blk_mq_alloc_data alloc_data = { .flags = flags };
struct request *rq;
unsigned int cpu;
int ret;
/*
* If the tag allocator sleeps we could get an allocation for a
* different hardware context. No need to complicate the low level
* allocator for this for the rare use case of a command tied to
* a specific queue.
*/
if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
return ERR_PTR(-EINVAL);
if (hctx_idx >= q->nr_hw_queues)
return ERR_PTR(-EIO);
ret = blk_queue_enter(q, true);
if (ret)
return ERR_PTR(ret);
/*
* Check if the hardware context is actually mapped to anything.
* If not tell the caller that it should skip this queue.
*/
alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
blk_queue_exit(q);
return ERR_PTR(-EXDEV);
}
cpu = cpumask_first(alloc_data.hctx->cpumask);
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
blk_queue_exit(q);
if (!rq)
return ERR_PTR(-EWOULDBLOCK);
return rq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ming Lin | 127 | 66.15% | 1 | 33.33% |
Omar Sandoval | 52 | 27.08% | 1 | 33.33% |
Christoph Hellwig | 13 | 6.77% | 1 | 33.33% |
Total | 192 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct request *rq)
{
const int sched_tag = rq->internal_tag;
struct request_queue *q = rq->q;
if (rq->rq_flags & RQF_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active);
wbt_done(q->rq_wb, &rq->issue_stat);
rq->rq_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
blk_mq_sched_restart(hctx);
blk_queue_exit(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 115 | 79.31% | 8 | 53.33% |
Christoph Hellwig | 13 | 8.97% | 2 | 13.33% |
Omar Sandoval | 10 | 6.90% | 2 | 13.33% |
David Hildenbrand | 5 | 3.45% | 1 | 6.67% |
Bart Van Assche | 1 | 0.69% | 1 | 6.67% |
Dan J Williams | 1 | 0.69% | 1 | 6.67% |
Total | 145 | 100.00% | 15 | 100.00% |
static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
ctx->rq_completed[rq_is_sync(rq)]++;
__blk_mq_finish_request(hctx, ctx, rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 45 | 100.00% | 4 | 100.00% |
Total | 45 | 100.00% | 4 | 100.00% |
void blk_mq_finish_request(struct request *rq)
{
blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 25 | 89.29% | 3 | 75.00% |
Christoph Hellwig | 3 | 10.71% | 1 | 25.00% |
Total | 28 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(blk_mq_finish_request);
void blk_mq_free_request(struct request *rq)
{
blk_mq_sched_put_request(rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(blk_mq_free_request);
inline void __blk_mq_end_request(struct request *rq, int error)
{
blk_account_io_done(rq);
if (rq->end_io) {
wbt_done(rq->q->rq_wb, &rq->issue_stat);
rq->end_io(rq, error);
} else {
if (unlikely(blk_bidi_rq(rq)))
blk_mq_free_request(rq->next_rq);
blk_mq_free_request(rq);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 46 | 61.33% | 2 | 33.33% |
Christoph Hellwig | 24 | 32.00% | 3 | 50.00% |
Lei Ming | 5 | 6.67% | 1 | 16.67% |
Total | 75 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(__blk_mq_end_request);
void blk_mq_end_request(struct request *rq, int error)
{
if (blk_update_request(rq, error, blk_rq_bytes(rq)))
BUG();
__blk_mq_end_request(rq, error);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 37 | 100.00% | 2 | 100.00% |
Total | 37 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(blk_mq_end_request);
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
rq->q->softirq_done_fn(rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 18 | 69.23% | 1 | 33.33% |
Christoph Hellwig | 8 | 30.77% | 2 | 66.67% |
Total | 26 | 100.00% | 3 | 100.00% |
static void __blk_mq_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
bool shared = false;
int cpu;
if (rq->internal_tag != -1)
blk_mq_sched_completed_request(rq);
if (rq->rq_flags & RQF_STATS) {
blk_mq_poll_stats_start(rq->q);
blk_stat_add(rq);
}
if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
rq->q->softirq_done_fn(rq);
return;
}
cpu = get_cpu();
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
smp_call_function_single_async(ctx->cpu, &rq->csd);
} else {
rq->q->softirq_done_fn(rq);
}
put_cpu();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 132 | 68.39% | 4 | 57.14% |
Jens Axboe | 60 | 31.09% | 2 | 28.57% |
Frédéric Weisbecker | 1 | 0.52% | 1 | 14.29% |
Total | 193 | 100.00% | 7 | 100.00% |
/**
* blk_mq_complete_request - end I/O on a request
* @rq: the request being processed
*
* Description:
* Ends all I/O on a request. It does not handle partial completions.
* The actual completion happens out-of-order, through a IPI handler.
**/
void blk_mq_complete_request(struct request *rq)
{
struct request_queue *q = rq->q;
if (unlikely(blk_should_fake_timeout(q)))
return;
if (!blk_mark_rq_complete(rq))
__blk_mq_complete_request(rq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 34 | 79.07% | 1 | 50.00% |
Jens Axboe | 9 | 20.93% | 1 | 50.00% |
Total | 43 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(blk_mq_complete_request);
int blk_mq_request_started(struct request *rq)
{
return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Keith Busch | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(blk_mq_request_started);
void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
blk_mq_sched_started_request(rq);
trace_block_rq_issue(q, rq);
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
rq->rq_flags |= RQF_STATS;
wbt_issue(q->rq_wb, &rq->issue_stat);
}
blk_add_timer(rq);
/*
* Ensure that ->deadline is visible before set the started
* flag and clear the completed flag.
*/
smp_mb__before_atomic();
/*
* Mark us as started and clear complete. Complete might have been
* set if requeue raced with timeout, which then marked it as
* complete. So be sure to clear complete again when we start
* the request, otherwise we'll ignore the completion event.
*/
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
* Make sure space for the drain appears. We know we can do
* this because max_hw_segments has been adjusted to be one
* fewer than the device can handle.
*/
rq->nr_phys_segments++;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 122 | 81.33% | 8 | 72.73% |
Christoph Hellwig | 19 | 12.67% | 1 | 9.09% |
Shaohua Li | 6 | 4.00% | 1 | 9.09% |
Lei Ming | 3 | 2.00% | 1 | 9.09% |
Total | 150 | 100.00% | 11 | 100.00% |
EXPORT_SYMBOL(blk_mq_start_request);
/*
* When we reach here because queue is busy, REQ_ATOM_COMPLETE
* flag isn't set yet, so there may be race with timeout handler,
* but given rq->deadline is just set in .queue_rq() under
* this situation, the race won't be possible in reality because
* rq->timeout should be set as big enough to cover the window
* between blk_mq_start_request() called from .queue_rq() and
* clearing REQ_ATOM_STARTED here.
*/
static void __blk_mq_requeue_request(struct request *rq)
{
struct request_queue *q = rq->q;
trace_block_rq_requeue(q, rq);
wbt_requeue(q->rq_wb, &rq->issue_stat);
blk_mq_sched_requeue_request(rq);
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
if (q->dma_drain_size && blk_rq_bytes(rq))
rq->nr_phys_segments--;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 51 | 68.92% | 3 | 50.00% |
Christoph Hellwig | 23 | 31.08% | 3 | 50.00% |
Total | 74 | 100.00% | 6 | 100.00% |
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
{
__blk_mq_requeue_request(rq);
BUG_ON(blk_queued_rq(rq));
blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 30 | 85.71% | 2 | 66.67% |
Bart Van Assche | 5 | 14.29% | 1 | 33.33% |
Total | 35 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(blk_mq_requeue_request);
static void blk_mq_requeue_work(struct work_struct *work)
{
struct request_queue *q =
container_of(work, struct request_queue, requeue_work.work);
LIST_HEAD(rq_list);
struct request *rq, *next;
unsigned long flags;
spin_lock_irqsave(&q->requeue_lock, flags);
list_splice_init(&q->requeue_list, &rq_list);
spin_unlock_irqrestore(&q->requeue_lock, flags);
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
if (!(rq->rq_flags & RQF_SOFTBARRIER))
continue;
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
blk_mq_sched_insert_request(rq, true, false, false, true);
}
while (!list_empty(&rq_list)) {
rq = list_entry(rq_list.next, struct request, queuelist);
list_del_init(&rq->queuelist);
blk_mq_sched_insert_request(rq, false, false, false, true);
}
blk_mq_run_hw_queues(q, false);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 170 | 93.92% | 3 | 42.86% |
Jens Axboe | 6 | 3.31% | 2 | 28.57% |
Bart Van Assche | 3 | 1.66% | 1 | 14.29% |
Mike Snitzer | 2 | 1.10% | 1 | 14.29% |
Total | 181 | 100.00% | 7 | 100.00% |
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
bool kick_requeue_list)
{
struct request_queue *q = rq->q;
unsigned long flags;
/*
* We abuse this flag that is otherwise used by the I/O scheduler to
* request head insertation from the workqueue.
*/
BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
spin_lock_irqsave(&q->requeue_lock, flags);
if (at_head) {
rq->rq_flags |= RQF_SOFTBARRIER;
list_add(&rq->queuelist, &q->requeue_list);
} else {
list_add_tail(&rq->queuelist, &q->requeue_list);
}
spin_unlock_irqrestore(&q->requeue_lock, flags);
if (kick_requeue_list)
blk_mq_kick_requeue_list(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 97 | 88.99% | 3 | 75.00% |
Bart Van Assche | 12 | 11.01% | 1 | 25.00% |
Total | 109 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q)
{
kblockd_schedule_delayed_work(&q->requeue_work, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 17 | 85.00% | 1 | 50.00% |
Mike Snitzer | 3 | 15.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
unsigned long msecs)
{
kblockd_schedule_delayed_work(&q->requeue_work,
msecs_to_jiffies(msecs));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 14 | 51.85% | 1 | 50.00% |
Mike Snitzer | 13 | 48.15% | 1 | 50.00% |
Total | 27 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
{
if (tag < tags->nr_tags) {
prefetch(tags->rqs[tag]);
return tags->rqs[tag];
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 27 | 57.45% | 2 | 40.00% |
Hannes Reinecke | 11 | 23.40% | 1 | 20.00% |
Christoph Hellwig | 8 | 17.02% | 1 | 20.00% |
Ming Lei | 1 | 2.13% | 1 | 20.00% |
Total | 47 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(blk_mq_tag_to_rq)