cregit-Linux how code gets into the kernel

Release 4.18 block/blk-core.c

Directory: block
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *      -  July2000
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/fault-inject.h>
#include <linux/list_sort.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
#include <linux/debugfs.h>
#include <linux/bpf.h>


#define CREATE_TRACE_POINTS
#include <trace/events/block.h>

#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h"

#ifdef CONFIG_DEBUG_FS

struct dentry *blk_debugfs_root;
#endif

EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);

DEFINE_IDA(blk_queue_ida);

/*
 * For the allocated request tables
 */

struct kmem_cache *request_cachep;

/*
 * For queue allocation
 */

struct kmem_cache *blk_requestq_cachep;

/*
 * Controlling structure to kblockd
 */

static struct workqueue_struct *kblockd_workqueue;

/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */

void blk_queue_flag_set(unsigned int flag, struct request_queue *q) { unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); queue_flag_set(flag, q); spin_unlock_irqrestore(q->queue_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche43100.00%1100.00%
Total43100.00%1100.00%

EXPORT_SYMBOL(blk_queue_flag_set); /** * blk_queue_flag_clear - atomically clear a queue flag * @flag: flag to be cleared * @q: request queue */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) { unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); queue_flag_clear(flag, q); spin_unlock_irqrestore(q->queue_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche43100.00%1100.00%
Total43100.00%1100.00%

EXPORT_SYMBOL(blk_queue_flag_clear); /** * blk_queue_flag_test_and_set - atomically test and set a queue flag * @flag: flag to be set * @q: request queue * * Returns the previous value of @flag - 0 if the flag was not set and 1 if * the flag was already set. */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) { unsigned long flags; bool res; spin_lock_irqsave(q->queue_lock, flags); res = queue_flag_test_and_set(flag, q); spin_unlock_irqrestore(q->queue_lock, flags); return res; }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche51100.00%1100.00%
Total51100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); /** * blk_queue_flag_test_and_clear - atomically test and clear a queue flag * @flag: flag to be cleared * @q: request queue * * Returns the previous value of @flag - 0 if the flag was not set and 1 if * the flag was set. */
bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q) { unsigned long flags; bool res; spin_lock_irqsave(q->queue_lock, flags); res = queue_flag_test_and_clear(flag, q); spin_unlock_irqrestore(q->queue_lock, flags); return res; }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche51100.00%1100.00%
Total51100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
static void blk_clear_congested(struct request_list *rl, int sync) { #ifdef CONFIG_CGROUP_WRITEBACK clear_wb_congested(rl->blkg->wb_congested, sync); #else /* * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't * flip its congestion state for events on other blkcgs. */ if (rl == &rl->q->root_rl) clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo5898.31%266.67%
Jan Kara11.69%133.33%
Total59100.00%3100.00%


static void blk_set_congested(struct request_list *rl, int sync) { #ifdef CONFIG_CGROUP_WRITEBACK set_wb_congested(rl->blkg->wb_congested, sync); #else /* see blk_clear_congested() */ if (rl == &rl->q->root_rl) set_wb_congested(rl->q->backing_dev_info->wb.congested, sync); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo5898.31%266.67%
Jan Kara11.69%133.33%
Total59100.00%3100.00%


void blk_queue_congestion_threshold(struct request_queue *q) { int nr; nr = q->nr_requests - (q->nr_requests / 8) + 1; if (nr > q->nr_requests) nr = q->nr_requests; q->nr_congestion_on = nr; nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; if (nr < 1) nr = 1; q->nr_congestion_off = nr; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton7988.76%466.67%
Jens Axboe1011.24%233.33%
Total89100.00%6100.00%


void blk_rq_init(struct request_queue *q, struct request *rq) { memset(rq, 0, sizeof(*rq)); INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->timeout_list); rq->cpu = -1; rq->q = q; rq->__sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->tag = -1; rq->internal_tag = -1; rq->start_time_ns = ktime_get_ns(); rq->part = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7063.64%950.00%
FUJITA Tomonori1412.73%211.11%
Linus Torvalds109.09%15.56%
Jerome Marchand65.45%15.56%
Arnaldo Carvalho de Melo43.64%15.56%
Omar Sandoval32.73%15.56%
Tejun Heo21.82%211.11%
Divyesh Shah10.91%15.56%
Total110100.00%18100.00%

EXPORT_SYMBOL(blk_rq_init); static const struct { int errno; const char *name; } blk_errors[] = { [BLK_STS_OK] = { 0, "" }, [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, /* device mapper special case, should not leak out: */ [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, /* everything else not covered above: */ [BLK_STS_IOERR] = { -EIO, "I/O" }, };
blk_status_t errno_to_blk_status(int errno) { int i; for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { if (blk_errors[i].errno == errno) return (__force blk_status_t)i; } return BLK_STS_IOERR; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig50100.00%1100.00%
Total50100.00%1100.00%

EXPORT_SYMBOL_GPL(errno_to_blk_status);
int blk_status_to_errno(blk_status_t status) { int idx = (__force int)status; if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) return -EIO; return blk_errors[idx].errno; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig4097.56%150.00%
Bart Van Assche12.44%150.00%
Total41100.00%2100.00%

EXPORT_SYMBOL_GPL(blk_status_to_errno);
static void print_req_error(struct request *req, blk_status_t status) { int idx = (__force int)status; if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) return; printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", __func__, blk_errors[idx].name, req->rq_disk ? req->rq_disk->disk_name : "?", (unsigned long long)blk_rq_pos(req)); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig7298.63%150.00%
Bart Van Assche11.37%150.00%
Total73100.00%2100.00%


static void req_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, blk_status_t error) { if (error) bio->bi_status = error; if (unlikely(rq->rq_flags & RQF_QUIET)) bio_set_flag(bio, BIO_QUIET); bio_advance(bio, nbytes); /* don't actually finish bio if it's part of flush sequence */ if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) bio_endio(bio); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4554.22%428.57%
Tejun Heo1113.25%214.29%
Keith Mannthey910.84%17.14%
Martin K. Petersen89.64%17.14%
Christoph Hellwig78.43%428.57%
Kent Overstreet33.61%214.29%
Total83100.00%14100.00%


void blk_dump_rq_flags(struct request *rq, char *msg) { printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, rq->rq_disk ? rq->rq_disk->disk_name : "?", (unsigned long long) rq->cmd_flags); printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); printk(KERN_INFO " bio %p, biotail %p, len %u\n", rq->bio, rq->biotail, blk_rq_bytes(rq)); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe6270.45%857.14%
Tejun Heo2427.27%428.57%
Christoph Hellwig11.14%17.14%
Kiyoshi Ueda11.14%17.14%
Total88100.00%14100.00%

EXPORT_SYMBOL(blk_dump_rq_flags);
static void blk_delay_work(struct work_struct *work) { struct request_queue *q; q = container_of(work, struct request_queue, delay_work.work); spin_lock_irq(q->queue_lock); __blk_run_queue(q); spin_unlock_irq(q->queue_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4489.80%480.00%
Tejun Heo510.20%120.00%
Total49100.00%5100.00%

/** * blk_delay_queue - restart queueing after defined interval * @q: The &struct request_queue in question * @msecs: Delay in msecs * * Description: * Sometimes queueing needs to be postponed for a little while, to allow * resources to come back. This function will make sure that queueing is * restarted around the specified time. */
void blk_delay_queue(struct request_queue *q, unsigned long msecs) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); if (likely(!blk_queue_dead(q))) queue_delayed_work(kblockd_workqueue, &q->delay_work, msecs_to_jiffies(msecs)); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2953.70%350.00%
Bart Van Assche2546.30%350.00%
Total54100.00%6100.00%

EXPORT_SYMBOL(blk_delay_queue); /** * blk_start_queue_async - asynchronously restart a previously stopped queue * @q: The &struct request_queue in question * * Description: * blk_start_queue_async() will clear the stop flag on the queue, and * ensure that the request_fn for the queue is run from an async * context. **/
void blk_start_queue_async(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); queue_flag_clear(QUEUE_FLAG_STOPPED, q); blk_run_queue_async(q); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2261.11%133.33%
Bart Van Assche1438.89%266.67%
Total36100.00%3100.00%

EXPORT_SYMBOL(blk_start_queue_async); /** * blk_start_queue - restart a previously stopped queue * @q: The &struct request_queue in question * * Description: * blk_start_queue() will clear the stop flag on the queue, and call * the request_fn for the queue if it was in a stopped state when * entered. Also see blk_stop_queue(). **/
void blk_start_queue(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); queue_flag_clear(QUEUE_FLAG_STOPPED, q); __blk_run_queue(q); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe1850.00%555.56%
Bart Van Assche1438.89%222.22%
Tejun Heo411.11%222.22%
Total36100.00%9100.00%

EXPORT_SYMBOL(blk_start_queue); /** * blk_stop_queue - stop a queue * @q: The &struct request_queue in question * * Description: * The Linux block layer assumes that a block driver will consume all * entries on the request queue when the request_fn strategy is called. * Often this will not happen, because of hardware limitations (queue * depth settings). If a device driver gets a 'queue full' response, * or if it simply chooses not to queue more I/O at one point, it can * call this function to prevent the request_fn from being called until * the driver has signalled it's ready to go again. This happens by calling * blk_start_queue() to restart queue operations. **/
void blk_stop_queue(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); cancel_delayed_work(&q->delay_work); queue_flag_set(QUEUE_FLAG_STOPPED, q); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2256.41%550.00%
Bart Van Assche1435.90%220.00%
Tejun Heo25.13%220.00%
Nicholas Piggin12.56%110.00%
Total39100.00%10100.00%

EXPORT_SYMBOL(blk_stop_queue); /** * blk_sync_queue - cancel any pending callbacks on a queue * @q: the queue * * Description: * The block layer may perform asynchronous callback activity * on a queue, such as calling the unplug function after a timeout. * A block device may call blk_sync_queue to ensure that any * such activity is cancelled, thus allowing it to release resources * that the callbacks might use. The caller must already have made sure * that its ->make_request_fn will not re-add plugging prior to calling * this function. * * This function does not cancel any asynchronous activity arising * out of elevator or throttling code. That would require elevator_exit() * and blkcg_exit_queue() to be called with queue lock initialized. * */
void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); cancel_work_sync(&q->timeout_work); if (q->mq_ops) { struct blk_mq_hw_ctx *hctx; int i; cancel_delayed_work_sync(&q->requeue_work); queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } else { cancel_delayed_work_sync(&q->delay_work); } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2633.77%654.55%
Lei Ming2633.77%19.09%
Bart Van Assche1620.78%218.18%
Christoph Hellwig810.39%19.09%
Brian King11.30%19.09%
Total77100.00%11100.00%

EXPORT_SYMBOL(blk_sync_queue); /** * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY * @q: request queue pointer * * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not * set and 1 if the flag was already set. */
int blk_set_preempt_only(struct request_queue *q) { return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q); }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche18100.00%2100.00%
Total18100.00%2100.00%

EXPORT_SYMBOL_GPL(blk_set_preempt_only);
void blk_clear_preempt_only(struct request_queue *q) { blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q); wake_up_all(&q->mq_freeze_wq); }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche25100.00%3100.00%
Total25100.00%3100.00%

EXPORT_SYMBOL_GPL(blk_clear_preempt_only); /** * __blk_run_queue_uncond - run a queue whether or not it has been stopped * @q: The queue to run * * Description: * Invoke request handling on a queue if there are any pending requests. * May be used to restart request handling after a request has completed. * This variant runs the queue whether or not the queue has been * stopped. Must be called with the queue lock held and interrupts * disabled. See also @blk_run_queue. */
inline void __blk_run_queue_uncond(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); if (unlikely(blk_queue_dead(q))) return; /* * Some request_fn implementations, e.g. scsi_request_fn(), unlock * the queue lock internally. As a result multiple threads may be * running such a request function concurrently. Keep track of the * number of active request_fn invocations such that blk_drain_queue() * can wait until all these request_fn calls have finished. */ q->request_fn_active++; q->request_fn(q); q->request_fn_active--; }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche54100.00%4100.00%
Total54100.00%4100.00%

EXPORT_SYMBOL_GPL(__blk_run_queue_uncond); /** * __blk_run_queue - run a single device queue * @q: The queue to run * * Description: * See @blk_run_queue. */
void __blk_run_queue(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); if (unlikely(blk_queue_stopped(q))) return; __blk_run_queue_uncond(q); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2152.50%545.45%
Bart Van Assche1537.50%327.27%
Andrew Morton25.00%19.09%
Tejun Heo12.50%19.09%
Nicholas Piggin12.50%19.09%
Total40100.00%11100.00%

EXPORT_SYMBOL(__blk_run_queue); /** * blk_run_queue_async - run a single device queue in workqueue context * @q: The queue to run * * Description: * Tells kblockd to perform the equivalent of @blk_run_queue on behalf * of us. * * Note: * Since it is not allowed to run q->delay_work after blk_cleanup_queue() * has canceled q->delay_work, callers must hold the queue lock to avoid * race conditions between blk_cleanup_queue() and blk_run_queue_async(). */
void blk_run_queue_async(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3260.38%120.00%
Bart Van Assche2037.74%360.00%
Tejun Heo11.89%120.00%
Total53100.00%5100.00%

EXPORT_SYMBOL(blk_run_queue_async); /** * blk_run_queue - run a single device queue * @q: The queue to run * * Description: * Invoke request handling on this queue, if it has pending work to do. * May be used to restart queueing when a request has completed. */
void blk_run_queue(struct request_queue *q) { unsigned long flags; WARN_ON_ONCE(q->mq_ops); spin_lock_irqsave(q->queue_lock, flags); __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2761.36%350.00%
Nicholas Piggin920.45%116.67%
Bart Van Assche715.91%116.67%
Tejun Heo12.27%116.67%
Total44100.00%6100.00%

EXPORT_SYMBOL(blk_run_queue);
void blk_put_queue(struct request_queue *q) { kobject_put(&q->kobj); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe1688.89%480.00%
Linus Torvalds (pre-git)211.11%120.00%
Total18100.00%5100.00%

EXPORT_SYMBOL(blk_put_queue); /** * __blk_drain_queue - drain requests from request_queue * @q: queue to drain * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV * * Drain requests from @q. If @drain_all is set, all requests are drained. * If not, only ELVPRIV requests are drained. The caller is responsible * for ensuring that no new requests which need to be drained are queued. */
static void __blk_drain_queue(struct request_queue *q, bool drain_all) __releases(q->queue_lock) __acquires(q->queue_lock) { int i; lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); while (true) { bool drain = false; /* * The caller might be trying to drain @q before its * elevator is initialized. */ if (q->elevator) elv_drain_elevator(q); blkcg_drain_queue(q); /* * This function might be called on a queue which failed * driver init after queue creation or is not yet fully * active yet. Some drivers (e.g. fd and loop) get unhappy * in such cases. Kick queue iff dispatch queue has * something on it and @q has request_fn set. */ if (!list_empty(&q->queue_head) && q->request_fn) __blk_run_queue(q); drain |= q->nr_rqs_elvpriv; drain |= q->request_fn_active; /* * Unfortunately, requests are queued at and tracked from * multiple places and there's no single counter which can * be drained. Check all the queues and counters. */ if (drain_all) { struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); drain |= !list_empty(&q->queue_head); for (i = 0; i < 2; i++) { drain |= q->nr_rqs[i]; drain |= q->in_flight[i]; if (fq) drain |= !list_empty(&fq->flush_queue[i]); } } if (!drain) break; spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); } /* * With queue marked dead, any woken up waiter will fail the * allocation path, so the wakeup chaining is lost and we're * left with hung waiters. We need to wake up those waiters. */ if (q->request_fn) { struct request_list *rl; blk_queue_for_each_rl(rl, q) for (i = 0; i < ARRAY_SIZE(rl->wait); i++) wake_up_all(&rl->wait[i]); } }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo15660.23%857.14%
Bart Van Assche4718.15%321.43%
Asias He3915.06%17.14%
Ming Lei176.56%214.29%
Total259100.00%14100.00%


void blk_drain_queue(struct request_queue *q) { spin_lock_irq(q->queue_lock); __blk_drain_queue(q, true); spin_unlock_irq(q->queue_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Ming Lei31100.00%1100.00%
Total31100.00%1100.00%

/** * blk_queue_bypass_start - enter queue bypass mode * @q: queue of interest * * In bypass mode, only the dispatch FIFO queue of @q is used. This * function makes @q enter bypass mode and drains all requests which were * throttled or issued before. On return, it's guaranteed that no request * is being throttled or has ELVPRIV set and blk_queue_bypass() %true * inside queue or RCU read lock. */
void blk_queue_bypass_start(struct request_queue *q) { WARN_ON_ONCE(q->mq_ops); spin_lock_irq(q->queue_lock); q->bypass_depth++; queue_flag_set(QUEUE_FLAG_BYPASS, q); spin_unlock_irq(q->queue_lock); /* * Queues start drained. Skip actual draining till init is * complete. This avoids lenghty delays during queue init which * can happen many times during boot. */ if (blk_queue_init_done(q)) { spin_lock_irq(q->queue_lock); __blk_drain_queue(q, false); spin_unlock_irq(q->queue_lock); /* ensure blk_queue_bypass() is %true inside RCU read lock */ synchronize_rcu(); } }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo5671.79%466.67%
Bart Van Assche2228.21%233.33%
Total78100.00%6100.00%

EXPORT_SYMBOL_GPL(blk_queue_bypass_start); /** * blk_queue_bypass_end - leave queue bypass mode * @q: queue of interest * * Leave bypass mode and restore the normal queueing behavior. * * Note: although blk_queue_bypass_start() is only called for blk-sq queues, * this function is called for both blk-sq and blk-mq queues. */
void blk_queue_bypass_end(struct request_queue *q) { spin_lock_irq(q->queue_lock); if (!--q->bypass_depth) queue_flag_clear(QUEUE_FLAG_BYPASS, q); WARN_ON_ONCE(q->bypass_depth < 0); spin_unlock_irq(q->queue_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo48100.00%1100.00%
Total48100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
void blk_set_queue_dying(struct request_queue *q) { blk_queue_flag_set(QUEUE_FLAG_DYING, q); /* * When queue DYING flag is set, we need to block new req * entering queue, so we call blk_freeze_queue_start() to * prevent I/O from crossing blk_queue_enter(). */ blk_freeze_queue_start(q); if (q->mq_ops) blk_mq_wake_waiters(q); else { struct request_list *rl; spin_lock_irq(q->queue_lock); blk_queue_for_each_rl(rl, q) { if (rl->rq_pool) { wake_up_all(&rl->wait[BLK_RW_SYNC]); wake_up_all(&rl->wait[BLK_RW_ASYNC]); } } spin_unlock_irq(q->queue_lock); } /* Make blk_queue_enter() reexamine the DYING flag. */ wake_up_all(&q->mq_freeze_wq); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7168.93%116.67%
Tahsin Erdogan1413.59%116.67%
Ming Lei1110.68%233.33%
Lei Ming65.83%116.67%
Bart Van Assche10.97%116.67%
Total103100.00%6100.00%

EXPORT_SYMBOL_GPL(blk_set_queue_dying); /** * blk_cleanup_queue - shutdown a request queue * @q: request queue to shutdown * * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and * put it. All future requests will be failed immediately with -ENODEV. */
void blk_cleanup_queue(struct request_queue *q) { spinlock_t *lock = q->queue_lock; /* mark @q DYING, no new request or merges will be allowed afterwards */ mutex_lock(&q->sysfs_lock); blk_set_queue_dying(q); spin_lock_irq(lock); /* * A dying queue is permanently in bypass mode till released. Note * that, unlike blk_queue_bypass_start(), we aren't performing * synchronize_rcu() after entering bypass mode to avoid the delay * as some drivers create and destroy a lot of queues while * probing. This is still safe because blk_release_queue() will be * called only after the queue refcnt drops to zero and nothing, * RCU or not, would be traversing the queue by then. */ q->bypass_depth++; queue_flag_set(QUEUE_FLAG_BYPASS, q); queue_flag_set(QUEUE_FLAG_NOMERGES, q); queue_flag_set(QUEUE_FLAG_NOXMERGES, q); queue_flag_set(QUEUE_FLAG_DYING, q); spin_unlock_irq(lock); mutex_unlock(&q->sysfs_lock); /* * Drain all requests queued before DYING marking. Set DEAD flag to * prevent that q->request_fn() gets invoked after draining finished. */ blk_freeze_queue(q); spin_lock_irq(lock); queue_flag_set(QUEUE_FLAG_DEAD, q); spin_unlock_irq(lock); /* * make sure all in-progress dispatch are completed because * blk_freeze_queue() can only complete all requests, and * dispatch may still be in-progress since we dispatch requests * from more than one contexts */ if (q->mq_ops) blk_mq_quiesce_queue(q); /* for synchronous bio-based driver finish in-flight integrity i/o */ blk_flush_integrity(); /* @q won't process any more request, flush async actions */ del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); blk_sync_queue(q); /* * I/O scheduler exit is only safe after the sysfs scheduler attribute * has been removed. */ WARN_ON_ONCE(q->kobj.state_in_sysfs); /* * Since the I/O scheduler exit code may access cgroup information, * perform I/O scheduler exit before disassociating from the block * cgroup controller. */ if (q->elevator) { ioc_clear_queue(q); elevator_exit(q, q->elevator); q->elevator = NULL; } /* * Remove all references to @q from the block cgroup controller before * restoring @q->queue_lock to avoid that restoring this pointer causes * e.g. blkcg_print_blkgs() to crash. */ blkcg_exit_queue(q); /* * Since the cgroup code may dereference the @q->backing_dev_info * pointer, only decrease its reference count after having removed the * association with the block cgroup controller. */ bdi_put(q->backing_dev_info); if (q->mq_ops) blk_mq_free_queue(q); percpu_ref_exit(&q->q_usage_counter); spin_lock_irq(lock); if (q->queue_lock != &q->__queue_lock) q->queue_lock = &q->__queue_lock; spin_unlock_irq(lock); /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche7630.77%521.74%
Tejun Heo7329.55%313.04%
Asias He3012.15%14.35%
Jens Axboe2710.93%521.74%
Dan J Williams135.26%28.70%
Ming Lei124.86%14.35%
Omar Sandoval52.02%14.35%
Lei Ming41.62%14.35%
Vivek Goyal41.62%14.35%
Andrew Morton20.81%28.70%
Jan Kara10.40%14.35%
Total247100.00%23100.00%

EXPORT_SYMBOL(blk_cleanup_queue); /* Allocate memory local to the request queue */
static void *alloc_request_simple(gfp_t gfp_mask, void *data) { struct request_queue *q = data; return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node); }

Contributors

PersonTokensPropCommitsCommitProp
David Rientjes2575.76%150.00%
Christoph Hellwig824.24%150.00%
Total33100.00%2100.00%


static void free_request_simple(void *element, void *data) { kmem_cache_free(request_cachep, element); }

Contributors

PersonTokensPropCommitsCommitProp
David Rientjes1990.48%150.00%
Christoph Hellwig29.52%150.00%
Total21100.00%2100.00%


static void *alloc_request_size(gfp_t gfp_mask, void *data) { struct request_queue *q = data; struct request *rq