Release 4.12 block/blk-core.c
/*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994, Karl Keyte: Added support for disk statistics
* Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
* Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
* kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
* - July2000
* bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
*/
/*
* This handles all read/write requests to block devices
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/fault-inject.h>
#include <linux/list_sort.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
#include <linux/debugfs.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h"
#ifdef CONFIG_DEBUG_FS
struct dentry *blk_debugfs_root;
#endif
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
/*
* For the allocated request tables
*/
struct kmem_cache *request_cachep;
/*
* For queue allocation
*/
struct kmem_cache *blk_requestq_cachep;
/*
* Controlling structure to kblockd
*/
static struct workqueue_struct *kblockd_workqueue;
static void blk_clear_congested(struct request_list *rl, int sync)
{
#ifdef CONFIG_CGROUP_WRITEBACK
clear_wb_congested(rl->blkg->wb_congested, sync);
#else
/*
* If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
* flip its congestion state for events on other blkcgs.
*/
if (rl == &rl->q->root_rl)
clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 58 | 98.31% | 2 | 66.67% |
Jan Kara | 1 | 1.69% | 1 | 33.33% |
Total | 59 | 100.00% | 3 | 100.00% |
static void blk_set_congested(struct request_list *rl, int sync)
{
#ifdef CONFIG_CGROUP_WRITEBACK
set_wb_congested(rl->blkg->wb_congested, sync);
#else
/* see blk_clear_congested() */
if (rl == &rl->q->root_rl)
set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 58 | 98.31% | 2 | 66.67% |
Jan Kara | 1 | 1.69% | 1 | 33.33% |
Total | 59 | 100.00% | 3 | 100.00% |
void blk_queue_congestion_threshold(struct request_queue *q)
{
int nr;
nr = q->nr_requests - (q->nr_requests / 8) + 1;
if (nr > q->nr_requests)
nr = q->nr_requests;
q->nr_congestion_on = nr;
nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
if (nr < 1)
nr = 1;
q->nr_congestion_off = nr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 79 | 88.76% | 4 | 66.67% |
Jens Axboe | 10 | 11.24% | 2 | 33.33% |
Total | 89 | 100.00% | 6 | 100.00% |
void blk_rq_init(struct request_queue *q, struct request *rq)
{
memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
rq->__sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->tag = -1;
rq->internal_tag = -1;
rq->start_time = jiffies;
set_start_time_ns(rq);
rq->part = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 70 | 61.40% | 9 | 52.94% |
FUJITA Tomonori | 14 | 12.28% | 2 | 11.76% |
Linus Torvalds | 10 | 8.77% | 1 | 5.88% |
Jerome Marchand | 6 | 5.26% | 1 | 5.88% |
Divyesh Shah | 5 | 4.39% | 1 | 5.88% |
Tejun Heo | 5 | 4.39% | 2 | 11.76% |
Arnaldo Carvalho de Melo | 4 | 3.51% | 1 | 5.88% |
Total | 114 | 100.00% | 17 | 100.00% |
EXPORT_SYMBOL(blk_rq_init);
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
{
if (error)
bio->bi_error = error;
if (unlikely(rq->rq_flags & RQF_QUIET))
bio_set_flag(bio, BIO_QUIET);
bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
bio_endio(bio);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 46 | 55.42% | 4 | 33.33% |
Tejun Heo | 11 | 13.25% | 2 | 16.67% |
Keith Mannthey | 9 | 10.84% | 1 | 8.33% |
Martin K. Petersen | 8 | 9.64% | 1 | 8.33% |
Christoph Hellwig | 6 | 7.23% | 2 | 16.67% |
Kent Overstreet | 3 | 3.61% | 2 | 16.67% |
Total | 83 | 100.00% | 12 | 100.00% |
void blk_dump_rq_flags(struct request *rq, char *msg)
{
printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?",
(unsigned long long) rq->cmd_flags);
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
(unsigned long long)blk_rq_pos(rq),
blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
printk(KERN_INFO " bio %p, biotail %p, len %u\n",
rq->bio, rq->biotail, blk_rq_bytes(rq));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 62 | 70.45% | 8 | 57.14% |
Tejun Heo | 24 | 27.27% | 4 | 28.57% |
Kiyoshi Ueda | 1 | 1.14% | 1 | 7.14% |
Christoph Hellwig | 1 | 1.14% | 1 | 7.14% |
Total | 88 | 100.00% | 14 | 100.00% |
EXPORT_SYMBOL(blk_dump_rq_flags);
static void blk_delay_work(struct work_struct *work)
{
struct request_queue *q;
q = container_of(work, struct request_queue, delay_work.work);
spin_lock_irq(q->queue_lock);
__blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 44 | 89.80% | 4 | 80.00% |
Tejun Heo | 5 | 10.20% | 1 | 20.00% |
Total | 49 | 100.00% | 5 | 100.00% |
/**
* blk_delay_queue - restart queueing after defined interval
* @q: The &struct request_queue in question
* @msecs: Delay in msecs
*
* Description:
* Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is
* restarted around the specified time. Queue lock must be held.
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
if (likely(!blk_queue_dead(q)))
queue_delayed_work(kblockd_workqueue, &q->delay_work,
msecs_to_jiffies(msecs));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 29 | 72.50% | 3 | 75.00% |
Bart Van Assche | 11 | 27.50% | 1 | 25.00% |
Total | 40 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(blk_delay_queue);
/**
* blk_start_queue_async - asynchronously restart a previously stopped queue
* @q: The &struct request_queue in question
*
* Description:
* blk_start_queue_async() will clear the stop flag on the queue, and
* ensure that the request_fn for the queue is run from an async
* context.
**/
void blk_start_queue_async(struct request_queue *q)
{
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
blk_run_queue_async(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(blk_start_queue_async);
/**
* blk_start_queue - restart a previously stopped queue
* @q: The &struct request_queue in question
*
* Description:
* blk_start_queue() will clear the stop flag on the queue, and call
* the request_fn for the queue if it was in a stopped state when
* entered. Also see blk_stop_queue(). Queue lock must be held.
**/
void blk_start_queue(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 23 | 79.31% | 5 | 71.43% |
Tejun Heo | 6 | 20.69% | 2 | 28.57% |
Total | 29 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(blk_start_queue);
/**
* blk_stop_queue - stop a queue
* @q: The &struct request_queue in question
*
* Description:
* The Linux block layer assumes that a block driver will consume all
* entries on the request queue when the request_fn strategy is called.
* Often this will not happen, because of hardware limitations (queue
* depth settings). If a device driver gets a 'queue full' response,
* or if it simply chooses not to queue more I/O at one point, it can
* call this function to prevent the request_fn from being called until
* the driver has signalled it's ready to go again. This happens by calling
* blk_start_queue() to restart queue operations. Queue lock must be held.
**/
void blk_stop_queue(struct request_queue *q)
{
cancel_delayed_work(&q->delay_work);
queue_flag_set(QUEUE_FLAG_STOPPED, q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 21 | 84.00% | 4 | 50.00% |
Tejun Heo | 2 | 8.00% | 2 | 25.00% |
Andrew Morton | 1 | 4.00% | 1 | 12.50% |
Nicholas Piggin | 1 | 4.00% | 1 | 12.50% |
Total | 25 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL(blk_stop_queue);
/**
* blk_sync_queue - cancel any pending callbacks on a queue
* @q: the queue
*
* Description:
* The block layer may perform asynchronous callback activity
* on a queue, such as calling the unplug function after a timeout.
* A block device may call blk_sync_queue to ensure that any
* such activity is cancelled, thus allowing it to release resources
* that the callbacks might use. The caller must already have made sure
* that its ->make_request_fn will not re-add plugging prior to calling
* this function.
*
* This function does not cancel any asynchronous activity arising
* out of elevator or throttling code. That would require elevator_exit()
* and blkcg_exit_queue() to be called with queue lock initialized.
*
*/
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->timeout);
if (q->mq_ops) {
struct blk_mq_hw_ctx *hctx;
int i;
queue_for_each_hw_ctx(q, hctx, i)
cancel_delayed_work_sync(&hctx->run_work);
} else {
cancel_delayed_work_sync(&q->delay_work);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 26 | 42.62% | 7 | 70.00% |
Lei Ming | 26 | 42.62% | 1 | 10.00% |
Christoph Hellwig | 8 | 13.11% | 1 | 10.00% |
Andrew Morton | 1 | 1.64% | 1 | 10.00% |
Total | 61 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(blk_sync_queue);
/**
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
* @q: The queue to run
*
* Description:
* Invoke request handling on a queue if there are any pending requests.
* May be used to restart request handling after a request has completed.
* This variant runs the queue whether or not the queue has been
* stopped. Must be called with the queue lock held and interrupts
* disabled. See also @blk_run_queue.
*/
inline void __blk_run_queue_uncond(struct request_queue *q)
{
if (unlikely(blk_queue_dead(q)))
return;
/*
* Some request_fn implementations, e.g. scsi_request_fn(), unlock
* the queue lock internally. As a result multiple threads may be
* running such a request function concurrently. Keep track of the
* number of active request_fn invocations such that blk_drain_queue()
* can wait until all these request_fn calls have finished.
*/
q->request_fn_active++;
q->request_fn(q);
q->request_fn_active--;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bart Van Assche | 40 | 100.00% | 2 | 100.00% |
Total | 40 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
*
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
*/
void __blk_run_queue(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
__blk_run_queue_uncond(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 15 | 57.69% | 4 | 50.00% |
Andrew Morton | 8 | 30.77% | 1 | 12.50% |
Nicholas Piggin | 1 | 3.85% | 1 | 12.50% |
Bart Van Assche | 1 | 3.85% | 1 | 12.50% |
Tejun Heo | 1 | 3.85% | 1 | 12.50% |
Total | 26 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL(__blk_run_queue);
/**
* blk_run_queue_async - run a single device queue in workqueue context
* @q: The queue to run
*
* Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf
* of us. The caller must hold the queue lock.
*/
void blk_run_queue_async(struct request_queue *q)
{
if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 32 | 82.05% | 1 | 33.33% |
Bart Van Assche | 6 | 15.38% | 1 | 33.33% |
Tejun Heo | 1 | 2.56% | 1 | 33.33% |
Total | 39 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(blk_run_queue_async);
/**
* blk_run_queue - run a single device queue
* @q: The queue to run
*
* Description:
* Invoke request handling on this queue, if it has pending work to do.
* May be used to restart queueing when a request has completed.
*/
void blk_run_queue(struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 26 | 70.27% | 3 | 50.00% |
Nicholas Piggin | 9 | 24.32% | 1 | 16.67% |
Andrew Morton | 1 | 2.70% | 1 | 16.67% |
Tejun Heo | 1 | 2.70% | 1 | 16.67% |
Total | 37 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(blk_run_queue);
void blk_put_queue(struct request_queue *q)
{
kobject_put(&q->kobj);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 18 | 100.00% | 2 | 100.00% |
Total | 18 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(blk_put_queue);
/**
* __blk_drain_queue - drain requests from request_queue
* @q: queue to drain
* @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
*
* Drain requests from @q. If @drain_all is set, all requests are drained.
* If not, only ELVPRIV requests are drained. The caller is responsible
* for ensuring that no new requests which need to be drained are queued.
*/
static void __blk_drain_queue(struct request_queue *q, bool drain_all)
__releases(q->queue_lock)
__acquires(q->queue_lock)
{
int i;
lockdep_assert_held(q->queue_lock);
while (true) {
bool drain = false;
/*
* The caller might be trying to drain @q before its
* elevator is initialized.
*/
if (q->elevator)
elv_drain_elevator(q);
blkcg_drain_queue(q);
/*
* This function might be called on a queue which failed
* driver init after queue creation or is not yet fully
* active yet. Some drivers (e.g. fd and loop) get unhappy
* in such cases. Kick queue iff dispatch queue has
* something on it and @q has request_fn set.
*/
if (!list_empty(&q->queue_head) && q->request_fn)
__blk_run_queue(q);
drain |= q->nr_rqs_elvpriv;
drain |= q->request_fn_active;
/*
* Unfortunately, requests are queued at and tracked from
* multiple places and there's no single counter which can
* be drained. Check all the queues and counters.
*/
if (drain_all) {
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
drain |= !list_empty(&q->queue_head);
for (i = 0; i < 2; i++) {
drain |= q->nr_rqs[i];
drain |= q->in_flight[i];
if (fq)
drain |= !list_empty(&fq->flush_queue[i]);
}
}
if (!drain)
break;
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
}
/*
* With queue marked dead, any woken up waiter will fail the
* allocation path, so the wakeup chaining is lost and we're
* left with hung waiters. We need to wake up those waiters.
*/
if (q->request_fn) {
struct request_list *rl;
blk_queue_for_each_rl(rl, q)
for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
wake_up_all(&rl->wait[i]);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 156 | 61.90% | 8 | 61.54% |
Bart Van Assche | 40 | 15.87% | 2 | 15.38% |
Asias He | 39 | 15.48% | 1 | 7.69% |
Ming Lei | 17 | 6.75% | 2 | 15.38% |
Total | 252 | 100.00% | 13 | 100.00% |
/**
* blk_queue_bypass_start - enter queue bypass mode
* @q: queue of interest
*
* In bypass mode, only the dispatch FIFO queue of @q is used. This
* function makes @q enter bypass mode and drains all requests which were
* throttled or issued before. On return, it's guaranteed that no request
* is being throttled or has ELVPRIV set and blk_queue_bypass() %true
* inside queue or RCU read lock.
*/
void blk_queue_bypass_start(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q);
spin_unlock_irq(q->queue_lock);
/*
* Queues start drained. Skip actual draining till init is
* complete. This avoids lenghty delays during queue init which
* can happen many times during boot.
*/
if (blk_queue_init_done(q)) {
spin_lock_irq(q->queue_lock);
__blk_drain_queue(q, false);
spin_unlock_irq(q->queue_lock);
/* ensure blk_queue_bypass() is %true inside RCU read lock */
synchronize_rcu();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 56 | 78.87% | 4 | 80.00% |
Bart Van Assche | 15 | 21.13% | 1 | 20.00% |
Total | 71 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
/**
* blk_queue_bypass_end - leave queue bypass mode
* @q: queue of interest
*
* Leave bypass mode and restore the normal queueing behavior.
*/
void blk_queue_bypass_end(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
if (!--q->bypass_depth)
queue_flag_clear(QUEUE_FLAG_BYPASS, q);
WARN_ON_ONCE(q->bypass_depth < 0);
spin_unlock_irq(q->queue_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 48 | 100.00% | 1 | 100.00% |
Total | 48 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
void blk_set_queue_dying(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(q->queue_lock);
/*
* When queue DYING flag is set, we need to block new req
* entering queue, so we call blk_freeze_queue_start() to
* prevent I/O from crossing blk_queue_enter().
*/
blk_freeze_queue_start(q);
if (q->mq_ops)
blk_mq_wake_waiters(q);
else {
struct request_list *rl;
spin_lock_irq(q->queue_lock);
blk_queue_for_each_rl(rl, q) {
if (rl->rq_pool) {
wake_up(&rl->wait[BLK_RW_SYNC]);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
spin_unlock_irq(q->queue_lock);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 73 | 67.59% | 1 | 25.00% |
Bart Van Assche | 15 | 13.89% | 1 | 25.00% |
Tahsin Erdogan | 14 | 12.96% | 1 | 25.00% |
Lei Ming | 6 | 5.56% | 1 | 25.00% |
Total | 108 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
* Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
* put it. All future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
spinlock_t *lock = q->queue_lock;
/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
blk_set_queue_dying(q);
spin_lock_irq(lock);
/*
* A dying queue is permanently in bypass mode till released. Note
* that, unlike blk_queue_bypass_start(), we aren't performing
* synchronize_rcu() after entering bypass mode to avoid the delay
* as some drivers create and destroy a lot of queues while
* probing. This is still safe because blk_release_queue() will be
* called only after the queue refcnt drops to zero and nothing,
* RCU or not, would be traversing the queue by then.
*/
q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q);
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
/*
* Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that q->request_fn() gets invoked after draining finished.
*/
blk_freeze_queue(q);
spin_lock_irq(lock);
if (!q->mq_ops)
__blk_drain_queue(q, true);
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q);
if (q->mq_ops)
blk_mq_free_queue(q);
percpu_ref_exit(&q->q_usage_counter);
spin_lock_irq(lock);
if (q->queue_lock != &q->__queue_lock)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 74 | 37.76% | 2 | 10.00% |
Asias He | 30 | 15.31% | 1 | 5.00% |
Bart Van Assche | 28 | 14.29% | 4 | 20.00% |
Jens Axboe | 23 | 11.73% | 4 | 20.00% |
Dan J Williams | 20 | 10.20% | 2 | 10.00% |
Andrew Morton | 6 | 3.06% | 2 | 10.00% |
Omar Sandoval | 5 | 2.55% | 1 | 5.00% |
Vivek Goyal | 4 | 2.04% | 1 | 5.00% |
Lei Ming | 4 | 2.04% | 1 | 5.00% |
Jan Kara | 1 | 0.51% | 1 | 5.00% |
Linus Torvalds | 1 | 0.51% | 1 | 5.00% |
Total | 196 | 100.00% | 20 | 100.00% |
EXPORT_SYMBOL(blk_cleanup_queue);
/* Allocate memory local to the request queue */
static void *alloc_request_simple(gfp_t gfp_mask, void *data)
{
struct request_queue *q = data;
return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 25 | 75.76% | 1 | 50.00% |
Christoph Hellwig | 8 | 24.24% | 1 | 50.00% |
Total | 33 | 100.00% | 2 | 100.00% |
static void free_request_simple(void *element, void *data)
{
kmem_cache_free(request_cachep, element);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 19 | 90.48% | 1 | 50.00% |
Christoph Hellwig | 2 | 9.52% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static void *alloc_request_size(gfp_t gfp_mask, void *data)
{
struct request_queue *q = data;
struct request *rq;
rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
q->node);
if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
kfree(rq);
rq = NULL;
}
return rq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 82 | 100.00% | 1 | 100.00% |
Total | 82 | 100.00% | 1 | 100.00% |
static void free_request_size(void *element, void *data)
{
struct request_queue *q = data;
if (q->exit_rq_fn)
q->exit_rq_fn(q, element);
kfree(element);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask)
{
if (unlikely(rl->rq_pool))
return 0;
rl->q = q;
rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
if (q->cmd_size) {
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
alloc_request_size, free_request_size,
q, gfp_mask, q->node);
} else {
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
alloc_request_simple, free_request_simple,
q, gfp_mask, q->node);
}
if (!rl->rq_pool)
return -ENOMEM;
if (rl != &q->root_rl)
WARN_ON_ONCE(!blk_get_queue(q));
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 75 | 42.86% | 7 | 43.75% |
Tejun Heo | 33 | 18.86% | 3 | 18.75% |
Christoph Hellwig | 32 | 18.29% | 1 | 6.25% |
Bart Van Assche | 18 | 10.29% | 1 | 6.25% |
Martin Dalecki | 9 | 5.14% | 2 | 12.50% |
Mike Snitzer | 5 | 2.86% | 1 | 6.25% |
David Rientjes | 3 | 1.71% | 1 | 6.25% |
Total | 175 | 100.00% | 16 | 100.00% |
void blk_exit_rl(struct request_queue *q, struct request_list *rl)
{
if (rl->rq_pool) {
mempool_destroy(rl->rq_pool);
if (rl != &q->root_rl)
blk_put_queue(q);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 23 | 52.27% | 1 | 50.00% |
Bart Van Assche | 21 | 47.73% | 1 | 50.00% |
Total | 44 | 100.00% | 2 | 100.00% |
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 11 | 61.11% | 1 | 25.00% |
Nicholas Piggin | 5 | 27.78% | 1 | 25.00% |
Ezequiel GarcÃa | 1 | 5.56% | 1 | 25.00% |
Martin Dalecki | 1 | 5.56% | 1 | 25.00% |
Total | 18 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(blk_alloc_queue);
int blk_queue_enter(struct request_queue *q, bool nowait)
{
while (true) {
int ret;
if (percpu_ref_tryget_live(&q->q_usage_counter))
return 0;
if (nowait)
return -EBUSY;
/*
* read pair of barrier in blk_freeze_queue_start(),
* we need to order reading __PERCPU_REF_DEAD flag of
* .q_usage_counter and reading .mq_freeze_depth or
* queue dying flag, otherwise the following wait may
* never return if the two reads are reordered.
*/
smp_rmb();
ret = wait_event_interruptible(q->mq_freeze_wq,
!atomic_read(&q->mq_freeze_depth) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
if (ret)
return ret;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 81 | 92.05% | 1 | 25.00% |
Lei Ming | 4 | 4.55% | 2 | 50.00% |
Christoph Hellwig | 3 | 3.41% | 1 | 25.00% |
Total | 88 | 100.00% | 4 | 100.00% |
void blk_queue_exit(struct request_queue *q)
{
percpu_ref_put(&q->q_usage_counter);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
static void blk_rq_timed_out_timer(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
kblockd_schedule_work(&q->timeout_work);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 30 | 100.00% | 1 | 100.00% |
Total | 30 | 100.00% | 1 | 100.00% |
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
q = kmem_cache_alloc_node(blk_requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
goto fail_q;
q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
if (!q->bio_split)
goto fail_id;
q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
if (!q->backing_dev_info)
goto fail_split;
q->stats = blk_alloc_queue_stats();
if (!q->stats)
goto fail_stats;
q->backing_dev_info->ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info->name = "block";
q->node = node_id;
setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
INIT_LIST_HEAD(&q->blkg_list);
#endif
INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
kobject_init(&q->kobj, &blk_queue_ktype);
mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock);
/*
* By default initialize queue_lock to internal lock and driver can
* override it later if need be.
*/
q->queue_lock = &q->__queue_lock;
/*
* A queue starts its life with bypass turned on to avoid
* unnecessary bypass on/off overhead and nasty surprises during
* init. The initial bypass will be finished when the queue is
* registered by blk_register_queue().
*/
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
init_waitqueue_head(&q->mq_freeze_wq);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
*/
if (percpu_ref_init(&q->q_usage_counter,
blk_queue_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto fail_bdi;
if (blkcg_init_queue(q))