cregit-Linux how code gets into the kernel

Release 4.12 block/blk-core.c

Directory: block
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *      -  July2000
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/fault-inject.h>
#include <linux/list_sort.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
#include <linux/debugfs.h>


#define CREATE_TRACE_POINTS
#include <trace/events/block.h>

#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h"

#ifdef CONFIG_DEBUG_FS

struct dentry *blk_debugfs_root;
#endif


EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);

EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);

EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);

EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);

EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);


DEFINE_IDA(blk_queue_ida);

/*
 * For the allocated request tables
 */

struct kmem_cache *request_cachep;

/*
 * For queue allocation
 */

struct kmem_cache *blk_requestq_cachep;

/*
 * Controlling structure to kblockd
 */

static struct workqueue_struct *kblockd_workqueue;


static void blk_clear_congested(struct request_list *rl, int sync) { #ifdef CONFIG_CGROUP_WRITEBACK clear_wb_congested(rl->blkg->wb_congested, sync); #else /* * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't * flip its congestion state for events on other blkcgs. */ if (rl == &rl->q->root_rl) clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo5898.31%266.67%
Jan Kara11.69%133.33%
Total59100.00%3100.00%


static void blk_set_congested(struct request_list *rl, int sync) { #ifdef CONFIG_CGROUP_WRITEBACK set_wb_congested(rl->blkg->wb_congested, sync); #else /* see blk_clear_congested() */ if (rl == &rl->q->root_rl) set_wb_congested(rl->q->backing_dev_info->wb.congested, sync); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo5898.31%266.67%
Jan Kara11.69%133.33%
Total59100.00%3100.00%


void blk_queue_congestion_threshold(struct request_queue *q) { int nr; nr = q->nr_requests - (q->nr_requests / 8) + 1; if (nr > q->nr_requests) nr = q->nr_requests; q->nr_congestion_on = nr; nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; if (nr < 1) nr = 1; q->nr_congestion_off = nr; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton7988.76%466.67%
Jens Axboe1011.24%233.33%
Total89100.00%6100.00%


void blk_rq_init(struct request_queue *q, struct request *rq) { memset(rq, 0, sizeof(*rq)); INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->timeout_list); rq->cpu = -1; rq->q = q; rq->__sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->tag = -1; rq->internal_tag = -1; rq->start_time = jiffies; set_start_time_ns(rq); rq->part = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7061.40%952.94%
FUJITA Tomonori1412.28%211.76%
Linus Torvalds108.77%15.88%
Jerome Marchand65.26%15.88%
Divyesh Shah54.39%15.88%
Tejun Heo54.39%211.76%
Arnaldo Carvalho de Melo43.51%15.88%
Total114100.00%17100.00%

EXPORT_SYMBOL(blk_rq_init);
static void req_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, int error) { if (error) bio->bi_error = error; if (unlikely(rq->rq_flags & RQF_QUIET)) bio_set_flag(bio, BIO_QUIET); bio_advance(bio, nbytes); /* don't actually finish bio if it's part of flush sequence */ if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) bio_endio(bio); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4655.42%433.33%
Tejun Heo1113.25%216.67%
Keith Mannthey910.84%18.33%
Martin K. Petersen89.64%18.33%
Christoph Hellwig67.23%216.67%
Kent Overstreet33.61%216.67%
Total83100.00%12100.00%


void blk_dump_rq_flags(struct request *rq, char *msg) { printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, rq->rq_disk ? rq->rq_disk->disk_name : "?", (unsigned long long) rq->cmd_flags); printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); printk(KERN_INFO " bio %p, biotail %p, len %u\n", rq->bio, rq->biotail, blk_rq_bytes(rq)); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe6270.45%857.14%
Tejun Heo2427.27%428.57%
Kiyoshi Ueda11.14%17.14%
Christoph Hellwig11.14%17.14%
Total88100.00%14100.00%

EXPORT_SYMBOL(blk_dump_rq_flags);
static void blk_delay_work(struct work_struct *work) { struct request_queue *q; q = container_of(work, struct request_queue, delay_work.work); spin_lock_irq(q->queue_lock); __blk_run_queue(q); spin_unlock_irq(q->queue_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4489.80%480.00%
Tejun Heo510.20%120.00%
Total49100.00%5100.00%

/** * blk_delay_queue - restart queueing after defined interval * @q: The &struct request_queue in question * @msecs: Delay in msecs * * Description: * Sometimes queueing needs to be postponed for a little while, to allow * resources to come back. This function will make sure that queueing is * restarted around the specified time. Queue lock must be held. */
void blk_delay_queue(struct request_queue *q, unsigned long msecs) { if (likely(!blk_queue_dead(q))) queue_delayed_work(kblockd_workqueue, &q->delay_work, msecs_to_jiffies(msecs)); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2972.50%375.00%
Bart Van Assche1127.50%125.00%
Total40100.00%4100.00%

EXPORT_SYMBOL(blk_delay_queue); /** * blk_start_queue_async - asynchronously restart a previously stopped queue * @q: The &struct request_queue in question * * Description: * blk_start_queue_async() will clear the stop flag on the queue, and * ensure that the request_fn for the queue is run from an async * context. **/
void blk_start_queue_async(struct request_queue *q) { queue_flag_clear(QUEUE_FLAG_STOPPED, q); blk_run_queue_async(q); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe22100.00%1100.00%
Total22100.00%1100.00%

EXPORT_SYMBOL(blk_start_queue_async); /** * blk_start_queue - restart a previously stopped queue * @q: The &struct request_queue in question * * Description: * blk_start_queue() will clear the stop flag on the queue, and call * the request_fn for the queue if it was in a stopped state when * entered. Also see blk_stop_queue(). Queue lock must be held. **/
void blk_start_queue(struct request_queue *q) { WARN_ON(!irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); __blk_run_queue(q); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2379.31%571.43%
Tejun Heo620.69%228.57%
Total29100.00%7100.00%

EXPORT_SYMBOL(blk_start_queue); /** * blk_stop_queue - stop a queue * @q: The &struct request_queue in question * * Description: * The Linux block layer assumes that a block driver will consume all * entries on the request queue when the request_fn strategy is called. * Often this will not happen, because of hardware limitations (queue * depth settings). If a device driver gets a 'queue full' response, * or if it simply chooses not to queue more I/O at one point, it can * call this function to prevent the request_fn from being called until * the driver has signalled it's ready to go again. This happens by calling * blk_start_queue() to restart queue operations. Queue lock must be held. **/
void blk_stop_queue(struct request_queue *q) { cancel_delayed_work(&q->delay_work); queue_flag_set(QUEUE_FLAG_STOPPED, q); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2184.00%450.00%
Tejun Heo28.00%225.00%
Andrew Morton14.00%112.50%
Nicholas Piggin14.00%112.50%
Total25100.00%8100.00%

EXPORT_SYMBOL(blk_stop_queue); /** * blk_sync_queue - cancel any pending callbacks on a queue * @q: the queue * * Description: * The block layer may perform asynchronous callback activity * on a queue, such as calling the unplug function after a timeout. * A block device may call blk_sync_queue to ensure that any * such activity is cancelled, thus allowing it to release resources * that the callbacks might use. The caller must already have made sure * that its ->make_request_fn will not re-add plugging prior to calling * this function. * * This function does not cancel any asynchronous activity arising * out of elevator or throttling code. That would require elevator_exit() * and blkcg_exit_queue() to be called with queue lock initialized. * */
void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); if (q->mq_ops) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } else { cancel_delayed_work_sync(&q->delay_work); } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2642.62%770.00%
Lei Ming2642.62%110.00%
Christoph Hellwig813.11%110.00%
Andrew Morton11.64%110.00%
Total61100.00%10100.00%

EXPORT_SYMBOL(blk_sync_queue); /** * __blk_run_queue_uncond - run a queue whether or not it has been stopped * @q: The queue to run * * Description: * Invoke request handling on a queue if there are any pending requests. * May be used to restart request handling after a request has completed. * This variant runs the queue whether or not the queue has been * stopped. Must be called with the queue lock held and interrupts * disabled. See also @blk_run_queue. */
inline void __blk_run_queue_uncond(struct request_queue *q) { if (unlikely(blk_queue_dead(q))) return; /* * Some request_fn implementations, e.g. scsi_request_fn(), unlock * the queue lock internally. As a result multiple threads may be * running such a request function concurrently. Keep track of the * number of active request_fn invocations such that blk_drain_queue() * can wait until all these request_fn calls have finished. */ q->request_fn_active++; q->request_fn(q); q->request_fn_active--; }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche40100.00%2100.00%
Total40100.00%2100.00%

EXPORT_SYMBOL_GPL(__blk_run_queue_uncond); /** * __blk_run_queue - run a single device queue * @q: The queue to run * * Description: * See @blk_run_queue. This variant must be called with the queue lock * held and interrupts disabled. */
void __blk_run_queue(struct request_queue *q) { if (unlikely(blk_queue_stopped(q))) return; __blk_run_queue_uncond(q); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe1557.69%450.00%
Andrew Morton830.77%112.50%
Nicholas Piggin13.85%112.50%
Bart Van Assche13.85%112.50%
Tejun Heo13.85%112.50%
Total26100.00%8100.00%

EXPORT_SYMBOL(__blk_run_queue); /** * blk_run_queue_async - run a single device queue in workqueue context * @q: The queue to run * * Description: * Tells kblockd to perform the equivalent of @blk_run_queue on behalf * of us. The caller must hold the queue lock. */
void blk_run_queue_async(struct request_queue *q) { if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3282.05%133.33%
Bart Van Assche615.38%133.33%
Tejun Heo12.56%133.33%
Total39100.00%3100.00%

EXPORT_SYMBOL(blk_run_queue_async); /** * blk_run_queue - run a single device queue * @q: The queue to run * * Description: * Invoke request handling on this queue, if it has pending work to do. * May be used to restart queueing when a request has completed. */
void blk_run_queue(struct request_queue *q) { unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2670.27%350.00%
Nicholas Piggin924.32%116.67%
Andrew Morton12.70%116.67%
Tejun Heo12.70%116.67%
Total37100.00%6100.00%

EXPORT_SYMBOL(blk_run_queue);
void blk_put_queue(struct request_queue *q) { kobject_put(&q->kobj); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe18100.00%2100.00%
Total18100.00%2100.00%

EXPORT_SYMBOL(blk_put_queue); /** * __blk_drain_queue - drain requests from request_queue * @q: queue to drain * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV * * Drain requests from @q. If @drain_all is set, all requests are drained. * If not, only ELVPRIV requests are drained. The caller is responsible * for ensuring that no new requests which need to be drained are queued. */
static void __blk_drain_queue(struct request_queue *q, bool drain_all) __releases(q->queue_lock) __acquires(q->queue_lock) { int i; lockdep_assert_held(q->queue_lock); while (true) { bool drain = false; /* * The caller might be trying to drain @q before its * elevator is initialized. */ if (q->elevator) elv_drain_elevator(q); blkcg_drain_queue(q); /* * This function might be called on a queue which failed * driver init after queue creation or is not yet fully * active yet. Some drivers (e.g. fd and loop) get unhappy * in such cases. Kick queue iff dispatch queue has * something on it and @q has request_fn set. */ if (!list_empty(&q->queue_head) && q->request_fn) __blk_run_queue(q); drain |= q->nr_rqs_elvpriv; drain |= q->request_fn_active; /* * Unfortunately, requests are queued at and tracked from * multiple places and there's no single counter which can * be drained. Check all the queues and counters. */ if (drain_all) { struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); drain |= !list_empty(&q->queue_head); for (i = 0; i < 2; i++) { drain |= q->nr_rqs[i]; drain |= q->in_flight[i]; if (fq) drain |= !list_empty(&fq->flush_queue[i]); } } if (!drain) break; spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); } /* * With queue marked dead, any woken up waiter will fail the * allocation path, so the wakeup chaining is lost and we're * left with hung waiters. We need to wake up those waiters. */ if (q->request_fn) { struct request_list *rl; blk_queue_for_each_rl(rl, q) for (i = 0; i < ARRAY_SIZE(rl->wait); i++) wake_up_all(&rl->wait[i]); } }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo15661.90%861.54%
Bart Van Assche4015.87%215.38%
Asias He3915.48%17.69%
Ming Lei176.75%215.38%
Total252100.00%13100.00%

/** * blk_queue_bypass_start - enter queue bypass mode * @q: queue of interest * * In bypass mode, only the dispatch FIFO queue of @q is used. This * function makes @q enter bypass mode and drains all requests which were * throttled or issued before. On return, it's guaranteed that no request * is being throttled or has ELVPRIV set and blk_queue_bypass() %true * inside queue or RCU read lock. */
void blk_queue_bypass_start(struct request_queue *q) { spin_lock_irq(q->queue_lock); q->bypass_depth++; queue_flag_set(QUEUE_FLAG_BYPASS, q); spin_unlock_irq(q->queue_lock); /* * Queues start drained. Skip actual draining till init is * complete. This avoids lenghty delays during queue init which * can happen many times during boot. */ if (blk_queue_init_done(q)) { spin_lock_irq(q->queue_lock); __blk_drain_queue(q, false); spin_unlock_irq(q->queue_lock); /* ensure blk_queue_bypass() is %true inside RCU read lock */ synchronize_rcu(); } }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo5678.87%480.00%
Bart Van Assche1521.13%120.00%
Total71100.00%5100.00%

EXPORT_SYMBOL_GPL(blk_queue_bypass_start); /** * blk_queue_bypass_end - leave queue bypass mode * @q: queue of interest * * Leave bypass mode and restore the normal queueing behavior. */
void blk_queue_bypass_end(struct request_queue *q) { spin_lock_irq(q->queue_lock); if (!--q->bypass_depth) queue_flag_clear(QUEUE_FLAG_BYPASS, q); WARN_ON_ONCE(q->bypass_depth < 0); spin_unlock_irq(q->queue_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo48100.00%1100.00%
Total48100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
void blk_set_queue_dying(struct request_queue *q) { spin_lock_irq(q->queue_lock); queue_flag_set(QUEUE_FLAG_DYING, q); spin_unlock_irq(q->queue_lock); /* * When queue DYING flag is set, we need to block new req * entering queue, so we call blk_freeze_queue_start() to * prevent I/O from crossing blk_queue_enter(). */ blk_freeze_queue_start(q); if (q->mq_ops) blk_mq_wake_waiters(q); else { struct request_list *rl; spin_lock_irq(q->queue_lock); blk_queue_for_each_rl(rl, q) { if (rl->rq_pool) { wake_up(&rl->wait[BLK_RW_SYNC]); wake_up(&rl->wait[BLK_RW_ASYNC]); } } spin_unlock_irq(q->queue_lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7367.59%125.00%
Bart Van Assche1513.89%125.00%
Tahsin Erdogan1412.96%125.00%
Lei Ming65.56%125.00%
Total108100.00%4100.00%

EXPORT_SYMBOL_GPL(blk_set_queue_dying); /** * blk_cleanup_queue - shutdown a request queue * @q: request queue to shutdown * * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and * put it. All future requests will be failed immediately with -ENODEV. */
void blk_cleanup_queue(struct request_queue *q) { spinlock_t *lock = q->queue_lock; /* mark @q DYING, no new request or merges will be allowed afterwards */ mutex_lock(&q->sysfs_lock); blk_set_queue_dying(q); spin_lock_irq(lock); /* * A dying queue is permanently in bypass mode till released. Note * that, unlike blk_queue_bypass_start(), we aren't performing * synchronize_rcu() after entering bypass mode to avoid the delay * as some drivers create and destroy a lot of queues while * probing. This is still safe because blk_release_queue() will be * called only after the queue refcnt drops to zero and nothing, * RCU or not, would be traversing the queue by then. */ q->bypass_depth++; queue_flag_set(QUEUE_FLAG_BYPASS, q); queue_flag_set(QUEUE_FLAG_NOMERGES, q); queue_flag_set(QUEUE_FLAG_NOXMERGES, q); queue_flag_set(QUEUE_FLAG_DYING, q); spin_unlock_irq(lock); mutex_unlock(&q->sysfs_lock); /* * Drain all requests queued before DYING marking. Set DEAD flag to * prevent that q->request_fn() gets invoked after draining finished. */ blk_freeze_queue(q); spin_lock_irq(lock); if (!q->mq_ops) __blk_drain_queue(q, true); queue_flag_set(QUEUE_FLAG_DEAD, q); spin_unlock_irq(lock); /* for synchronous bio-based driver finish in-flight integrity i/o */ blk_flush_integrity(); /* @q won't process any more request, flush async actions */ del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); blk_sync_queue(q); if (q->mq_ops) blk_mq_free_queue(q); percpu_ref_exit(&q->q_usage_counter); spin_lock_irq(lock); if (q->queue_lock != &q->__queue_lock) q->queue_lock = &q->__queue_lock; spin_unlock_irq(lock); /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo7437.76%210.00%
Asias He3015.31%15.00%
Bart Van Assche2814.29%420.00%
Jens Axboe2311.73%420.00%
Dan J Williams2010.20%210.00%
Andrew Morton63.06%210.00%
Omar Sandoval52.55%15.00%
Vivek Goyal42.04%15.00%
Lei Ming42.04%15.00%
Jan Kara10.51%15.00%
Linus Torvalds10.51%15.00%
Total196100.00%20100.00%

EXPORT_SYMBOL(blk_cleanup_queue); /* Allocate memory local to the request queue */
static void *alloc_request_simple(gfp_t gfp_mask, void *data) { struct request_queue *q = data; return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node); }

Contributors

PersonTokensPropCommitsCommitProp
David Rientjes2575.76%150.00%
Christoph Hellwig824.24%150.00%
Total33100.00%2100.00%


static void free_request_simple(void *element, void *data) { kmem_cache_free(request_cachep, element); }

Contributors

PersonTokensPropCommitsCommitProp
David Rientjes1990.48%150.00%
Christoph Hellwig29.52%150.00%
Total21100.00%2100.00%


static void *alloc_request_size(gfp_t gfp_mask, void *data) { struct request_queue *q = data; struct request *rq; rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask, q->node); if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) { kfree(rq); rq = NULL; } return rq; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig82100.00%1100.00%
Total82100.00%1100.00%


static void free_request_size(void *element, void *data) { struct request_queue *q = data; if (q->exit_rq_fn) q->exit_rq_fn(q, element); kfree(element); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig41100.00%1100.00%
Total41100.00%1100.00%


int blk_init_rl(struct request_list *rl, struct request_queue *q, gfp_t gfp_mask) { if (unlikely(rl->rq_pool)) return 0; rl->q = q; rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); if (q->cmd_size) { rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_size, free_request_size, q, gfp_mask, q->node); } else { rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_simple, free_request_simple, q, gfp_mask, q->node); } if (!rl->rq_pool) return -ENOMEM; if (rl != &q->root_rl) WARN_ON_ONCE(!blk_get_queue(q)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7542.86%743.75%
Tejun Heo3318.86%318.75%
Christoph Hellwig3218.29%16.25%
Bart Van Assche1810.29%16.25%
Martin Dalecki95.14%212.50%
Mike Snitzer52.86%16.25%
David Rientjes31.71%16.25%
Total175100.00%16100.00%


void blk_exit_rl(struct request_queue *q, struct request_list *rl) { if (rl->rq_pool) { mempool_destroy(rl->rq_pool); if (rl != &q->root_rl) blk_put_queue(q); } }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo2352.27%150.00%
Bart Van Assche2147.73%150.00%
Total44100.00%2100.00%


struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe1161.11%125.00%
Nicholas Piggin527.78%125.00%
Ezequiel García15.56%125.00%
Martin Dalecki15.56%125.00%
Total18100.00%4100.00%

EXPORT_SYMBOL(blk_alloc_queue);
int blk_queue_enter(struct request_queue *q, bool nowait) { while (true) { int ret; if (percpu_ref_tryget_live(&q->q_usage_counter)) return 0; if (nowait) return -EBUSY; /* * read pair of barrier in blk_freeze_queue_start(), * we need to order reading __PERCPU_REF_DEAD flag of * .q_usage_counter and reading .mq_freeze_depth or * queue dying flag, otherwise the following wait may * never return if the two reads are reordered. */ smp_rmb(); ret = wait_event_interruptible(q->mq_freeze_wq, !atomic_read(&q->mq_freeze_depth) || blk_queue_dying(q)); if (blk_queue_dying(q)) return -ENODEV; if (ret) return ret; } }

Contributors

PersonTokensPropCommitsCommitProp
Dan J Williams8192.05%125.00%
Lei Ming44.55%250.00%
Christoph Hellwig33.41%125.00%
Total88100.00%4100.00%


void blk_queue_exit(struct request_queue *q) { percpu_ref_put(&q->q_usage_counter); }

Contributors

PersonTokensPropCommitsCommitProp
Dan J Williams18100.00%1100.00%
Total18100.00%1100.00%


static void blk_queue_usage_counter_release(struct percpu_ref *ref) { struct request_queue *q = container_of(ref, struct request_queue, q_usage_counter); wake_up_all(&q->mq_freeze_wq); }

Contributors

PersonTokensPropCommitsCommitProp
Dan J Williams34100.00%1100.00%
Total34100.00%1100.00%


static void blk_rq_timed_out_timer(unsigned long data) { struct request_queue *q = (struct request_queue *)data; kblockd_schedule_work(&q->timeout_work); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig30100.00%1100.00%
Total30100.00%1100.00%


struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) { struct request_queue *q; q = kmem_cache_alloc_node(blk_requestq_cachep, gfp_mask | __GFP_ZERO, node_id); if (!q) return NULL; q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); if (q->id < 0) goto fail_q; q->bio_split = bioset_create(BIO_POOL_SIZE, 0); if (!q->bio_split) goto fail_id; q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id); if (!q->backing_dev_info) goto fail_split; q->stats = blk_alloc_queue_stats(); if (!q->stats) goto fail_stats; q->backing_dev_info->ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info->name = "block"; q->node = node_id; setup_timer(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->icq_list); #ifdef CONFIG_BLK_CGROUP INIT_LIST_HEAD(&q->blkg_list); #endif INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); kobject_init(&q->kobj, &blk_queue_ktype); mutex_init(&q->sysfs_lock); spin_lock_init(&q->__queue_lock); /* * By default initialize queue_lock to internal lock and driver can * override it later if need be. */ q->queue_lock = &q->__queue_lock; /* * A queue starts its life with bypass turned on to avoid * unnecessary bypass on/off overhead and nasty surprises during * init. The initial bypass will be finished when the queue is * registered by blk_register_queue(). */ q->bypass_depth = 1; __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); init_waitqueue_head(&q->mq_freeze_wq); /* * Init percpu_ref in atomic mode so that it's faster to shutdown. * See blk_register_queue() for details. */ if (percpu_ref_init(&q->q_usage_counter, blk_queue_usage_counter_release, PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) goto fail_bdi; if (blkcg_init_queue(q))