cregit-Linux how code gets into the kernel

Release 4.12 block/blk-mq.c

Directory: block
/*
 * Block multiqueue core code
 *
 * Copyright (C) 2013-2014 Jens Axboe
 * Copyright (C) 2013-2014 Christoph Hellwig
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/kmemleak.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>
#include <linux/llist.h>
#include <linux/list_sort.h>
#include <linux/cpu.h>
#include <linux/cache.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/topology.h>
#include <linux/sched/signal.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
#include <linux/prefetch.h>

#include <trace/events/block.h>

#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
#include "blk-wbt.h"
#include "blk-mq-sched.h"

static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);

static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);


static int blk_mq_poll_stats_bkt(const struct request *rq) { int ddir, bytes, bucket; ddir = rq_data_dir(rq); bytes = blk_rq_bytes(rq); bucket = ddir + 2*(ilog2(bytes) - 9); if (bucket < 0) return -1; else if (bucket >= BLK_MQ_POLL_STATS_BKTS) return ddir + BLK_MQ_POLL_STATS_BKTS - 2; return bucket; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Bates7498.67%150.00%
Jens Axboe11.33%150.00%
Total75100.00%2100.00%

/* * Check if any of the ctx's have pending work in this hardware queue */
bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) { return sbitmap_any_bit_set(&hctx->ctx_map) || !list_empty_careful(&hctx->dispatch) || blk_mq_sched_has_work(hctx); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3090.91%375.00%
Omar Sandoval39.09%125.00%
Total33100.00%4100.00%

/* * Mark this ctx as having pending work in this hardware queue */
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw)) sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3376.74%266.67%
Omar Sandoval1023.26%133.33%
Total43100.00%3100.00%


static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2382.14%266.67%
Omar Sandoval517.86%133.33%
Total28100.00%3100.00%


void blk_freeze_queue_start(struct request_queue *q) { int freeze_depth; freeze_depth = atomic_inc_return(&q->mq_freeze_depth); if (freeze_depth == 1) { percpu_ref_kill(&q->q_usage_counter); blk_mq_run_hw_queues(q, false); } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe1736.96%110.00%
Tejun Heo1532.61%550.00%
Christoph Hellwig919.57%110.00%
Mike Snitzer36.52%110.00%
Dan J Williams12.17%110.00%
Lei Ming12.17%110.00%
Total46100.00%10100.00%

EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
void blk_mq_freeze_queue_wait(struct request_queue *q) { wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo1560.00%240.00%
Lei Ming624.00%120.00%
Jens Axboe312.00%120.00%
Dan J Williams14.00%120.00%
Total25100.00%5100.00%

EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, unsigned long timeout) { return wait_event_timeout(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter), timeout); }

Contributors

PersonTokensPropCommitsCommitProp
Keith Busch32100.00%1100.00%
Total32100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); /* * Guarantee no request is in use, so we can change any data structure of * the queue afterward. */
void blk_freeze_queue(struct request_queue *q) { /* * In the !blk_mq case we are only calling this to kill the * q_usage_counter, otherwise this increases the freeze depth * and waits for it to return to zero. For this reason there is * no blk_unfreeze_queue(), and blk_freeze_queue() is not * exported to drivers as the only user for unfreeze is blk_mq. */ blk_freeze_queue_start(q); blk_mq_freeze_queue_wait(q); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo1885.71%133.33%
Dan J Williams29.52%133.33%
Lei Ming14.76%133.33%
Total21100.00%3100.00%


void blk_mq_freeze_queue(struct request_queue *q) { /* * ...just an alias to keep freeze and unfreeze actions balanced * in the blk_mq_* namespace */ blk_freeze_queue(q); }

Contributors

PersonTokensPropCommitsCommitProp
Dan J Williams16100.00%1100.00%
Total16100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q) { int freeze_depth; freeze_depth = atomic_dec_return(&q->mq_freeze_depth); WARN_ON_ONCE(freeze_depth < 0); if (!freeze_depth) { percpu_ref_reinit(&q->q_usage_counter); wake_up_all(&q->mq_freeze_wq); } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3158.49%120.00%
Tejun Heo1120.75%240.00%
Christoph Hellwig1018.87%120.00%
Dan J Williams11.89%120.00%
Total53100.00%5100.00%

EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); /** * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished * @q: request queue. * * Note: this function does not prevent that the struct request end_io() * callback function is invoked. Additionally, it is not prevented that * new queue_rq() calls occur unless the queue has been stopped first. */
void blk_mq_quiesce_queue(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned int i; bool rcu = false; __blk_mq_stop_hw_queues(q, true); queue_for_each_hw_ctx(q, hctx, i) { if (hctx->flags & BLK_MQ_F_BLOCKING) synchronize_srcu(&hctx->queue_rq_srcu); else rcu = true; } if (rcu) synchronize_rcu(); }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche6695.65%150.00%
Jens Axboe34.35%150.00%
Total69100.00%2100.00%

EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
void blk_mq_wake_waiters(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned int i; queue_for_each_hw_ctx(q, hctx, i) if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_wakeup_all(hctx->tags, true); /* * If we are called because the queue has now been marked as * dying, we need to ensure that processes currently waiting on * the queue are notified as well. */ wake_up_all(&q->mq_freeze_wq); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4382.69%150.00%
Keith Busch917.31%150.00%
Total52100.00%2100.00%


bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) { return blk_mq_has_free_tags(hctx->tags); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe18100.00%1100.00%
Total18100.00%1100.00%

EXPORT_SYMBOL(blk_mq_can_queue);
void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, struct request *rq, unsigned int op) { INIT_LIST_HEAD(&rq->queuelist); /* csd/requeue_work/fifo_time is initialized before use */ rq->q = q; rq->mq_ctx = ctx; rq->cmd_flags = op; if (blk_queue_io_stat(q)) rq->rq_flags |= RQF_IO_STAT; /* do not touch atomic flags, it needs atomic ops against the timer */ rq->cpu = -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->rq_disk = NULL; rq->part = NULL; rq->start_time = jiffies; #ifdef CONFIG_BLK_CGROUP rq->rl = NULL; set_start_time_ns(rq); rq->io_start_time_ns = 0; #endif rq->nr_phys_segments = 0; #if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; #endif rq->special = NULL; /* tag was already set */ rq->extra_len = 0; INIT_LIST_HEAD(&rq->timeout_list); rq->timeout = 0; rq->end_io = NULL; rq->end_io_data = NULL; rq->next_rq = NULL; ctx->rq_dispatched[op_is_sync(op)]++; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig14571.08%333.33%
Jens Axboe5225.49%444.44%
Lei Ming52.45%111.11%
Michael Christie20.98%111.11%
Total204100.00%9100.00%

EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op) { struct request *rq; unsigned int tag; tag = blk_mq_get_tag(data); if (tag != BLK_MQ_TAG_FAIL) { struct blk_mq_tags *tags = blk_mq_tags_from_data(data); rq = tags->static_rqs[tag]; if (data->flags & BLK_MQ_REQ_INTERNAL) { rq->tag = -1; rq->internal_tag = tag; } else { if (blk_mq_tag_busy(data->hctx)) { rq->rq_flags = RQF_MQ_INFLIGHT; atomic_inc(&data->hctx->nr_active); } rq->tag = tag; rq->internal_tag = -1; data->hctx->tags->rqs[rq->tag] = rq; } blk_mq_rq_ctx_init(data->q, data->ctx, rq, op); return rq; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig8351.55%333.33%
Jens Axboe5031.06%333.33%
Omar Sandoval159.32%111.11%
Lei Ming116.83%111.11%
Michael Christie21.24%111.11%
Total161100.00%9100.00%

EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, unsigned int flags) { struct blk_mq_alloc_data alloc_data = { .flags = flags }; struct request *rq; int ret; ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT); if (ret) return ERR_PTR(ret); rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); blk_mq_put_ctx(alloc_data.ctx); blk_queue_exit(q); if (!rq) return ERR_PTR(-EWOULDBLOCK); rq->__data_len = 0; rq->__sector = (sector_t) -1; rq->bio = rq->biotail = NULL; return rq; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4838.10%325.00%
Christoph Hellwig4636.51%433.33%
Joe Lawrence2015.87%18.33%
Keith Busch53.97%216.67%
Lei Ming53.97%18.33%
Dan J Williams21.59%18.33%
Total126100.00%12100.00%

EXPORT_SYMBOL(blk_mq_alloc_request);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, unsigned int flags, unsigned int hctx_idx) { struct blk_mq_alloc_data alloc_data = { .flags = flags }; struct request *rq; unsigned int cpu; int ret; /* * If the tag allocator sleeps we could get an allocation for a * different hardware context. No need to complicate the low level * allocator for this for the rare use case of a command tied to * a specific queue. */ if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) return ERR_PTR(-EINVAL); if (hctx_idx >= q->nr_hw_queues) return ERR_PTR(-EIO); ret = blk_queue_enter(q, true); if (ret) return ERR_PTR(ret); /* * Check if the hardware context is actually mapped to anything. * If not tell the caller that it should skip this queue. */ alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { blk_queue_exit(q); return ERR_PTR(-EXDEV); } cpu = cpumask_first(alloc_data.hctx->cpumask); alloc_data.ctx = __blk_mq_get_ctx(q, cpu); rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); blk_queue_exit(q); if (!rq) return ERR_PTR(-EWOULDBLOCK); return rq; }

Contributors

PersonTokensPropCommitsCommitProp
Ming Lin12766.15%133.33%
Omar Sandoval5227.08%133.33%
Christoph Hellwig136.77%133.33%
Total192100.00%3100.00%

EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct request *rq) { const int sched_tag = rq->internal_tag; struct request_queue *q = rq->q; if (rq->rq_flags & RQF_MQ_INFLIGHT) atomic_dec(&hctx->nr_active); wbt_done(q->rq_wb, &rq->issue_stat); rq->rq_flags = 0; clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags); if (rq->tag != -1) blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); if (sched_tag != -1) blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); blk_mq_sched_restart(hctx); blk_queue_exit(q); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe11579.31%853.33%
Christoph Hellwig138.97%213.33%
Omar Sandoval106.90%213.33%
David Hildenbrand53.45%16.67%
Bart Van Assche10.69%16.67%
Dan J Williams10.69%16.67%
Total145100.00%15100.00%


static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; ctx->rq_completed[rq_is_sync(rq)]++; __blk_mq_finish_request(hctx, ctx, rq); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe45100.00%4100.00%
Total45100.00%4100.00%


void blk_mq_finish_request(struct request *rq) { blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2589.29%375.00%
Christoph Hellwig310.71%125.00%
Total28100.00%4100.00%

EXPORT_SYMBOL_GPL(blk_mq_finish_request);
void blk_mq_free_request(struct request *rq) { blk_mq_sched_put_request(rq); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe15100.00%1100.00%
Total15100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_mq_free_request);
inline void __blk_mq_end_request(struct request *rq, int error) { blk_account_io_done(rq); if (rq->end_io) { wbt_done(rq->q->rq_wb, &rq->issue_stat); rq->end_io(rq, error); } else { if (unlikely(blk_bidi_rq(rq))) blk_mq_free_request(rq->next_rq); blk_mq_free_request(rq); } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4661.33%233.33%
Christoph Hellwig2432.00%350.00%
Lei Ming56.67%116.67%
Total75100.00%6100.00%

EXPORT_SYMBOL(__blk_mq_end_request);
void blk_mq_end_request(struct request *rq, int error) { if (blk_update_request(rq, error, blk_rq_bytes(rq))) BUG(); __blk_mq_end_request(rq, error); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig37100.00%2100.00%
Total37100.00%2100.00%

EXPORT_SYMBOL(blk_mq_end_request);
static void __blk_mq_complete_request_remote(void *data) { struct request *rq = data; rq->q->softirq_done_fn(rq); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe1869.23%133.33%
Christoph Hellwig830.77%266.67%
Total26100.00%3100.00%


static void __blk_mq_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; bool shared = false; int cpu; if (rq->internal_tag != -1) blk_mq_sched_completed_request(rq); if (rq->rq_flags & RQF_STATS) { blk_mq_poll_stats_start(rq->q); blk_stat_add(rq); } if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { rq->q->softirq_done_fn(rq); return; } cpu = get_cpu(); if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) shared = cpus_share_cache(cpu, ctx->cpu); if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; smp_call_function_single_async(ctx->cpu, &rq->csd); } else { rq->q->softirq_done_fn(rq); } put_cpu(); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig13268.39%457.14%
Jens Axboe6031.09%228.57%
Frédéric Weisbecker10.52%114.29%
Total193100.00%7100.00%

/** * blk_mq_complete_request - end I/O on a request * @rq: the request being processed * * Description: * Ends all I/O on a request. It does not handle partial completions. * The actual completion happens out-of-order, through a IPI handler. **/
void blk_mq_complete_request(struct request *rq) { struct request_queue *q = rq->q; if (unlikely(blk_should_fake_timeout(q))) return; if (!blk_mark_rq_complete(rq)) __blk_mq_complete_request(rq); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3479.07%150.00%
Jens Axboe920.93%150.00%
Total43100.00%2100.00%

EXPORT_SYMBOL(blk_mq_complete_request);
int blk_mq_request_started(struct request *rq) { return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); }

Contributors

PersonTokensPropCommitsCommitProp
Keith Busch21100.00%1100.00%
Total21100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_mq_request_started);
void blk_mq_start_request(struct request *rq) { struct request_queue *q = rq->q; blk_mq_sched_started_request(rq); trace_block_rq_issue(q, rq); if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq)); rq->rq_flags |= RQF_STATS; wbt_issue(q->rq_wb, &rq->issue_stat); } blk_add_timer(rq); /* * Ensure that ->deadline is visible before set the started * flag and clear the completed flag. */ smp_mb__before_atomic(); /* * Mark us as started and clear complete. Complete might have been * set if requeue raced with timeout, which then marked it as * complete. So be sure to clear complete again when we start * the request, otherwise we'll ignore the completion event. */ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); if (q->dma_drain_size && blk_rq_bytes(rq)) { /* * Make sure space for the drain appears. We know we can do * this because max_hw_segments has been adjusted to be one * fewer than the device can handle. */ rq->nr_phys_segments++; } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe12281.33%872.73%
Christoph Hellwig1912.67%19.09%
Shaohua Li64.00%19.09%
Lei Ming32.00%19.09%
Total150100.00%11100.00%

EXPORT_SYMBOL(blk_mq_start_request); /* * When we reach here because queue is busy, REQ_ATOM_COMPLETE * flag isn't set yet, so there may be race with timeout handler, * but given rq->deadline is just set in .queue_rq() under * this situation, the race won't be possible in reality because * rq->timeout should be set as big enough to cover the window * between blk_mq_start_request() called from .queue_rq() and * clearing REQ_ATOM_STARTED here. */
static void __blk_mq_requeue_request(struct request *rq) { struct request_queue *q = rq->q; trace_block_rq_requeue(q, rq); wbt_requeue(q->rq_wb, &rq->issue_stat); blk_mq_sched_requeue_request(rq); if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { if (q->dma_drain_size && blk_rq_bytes(rq)) rq->nr_phys_segments--; } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe5168.92%350.00%
Christoph Hellwig2331.08%350.00%
Total74100.00%6100.00%


void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) { __blk_mq_requeue_request(rq); BUG_ON(blk_queued_rq(rq)); blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3085.71%266.67%
Bart Van Assche514.29%133.33%
Total35100.00%3100.00%

EXPORT_SYMBOL(blk_mq_requeue_request);
static void blk_mq_requeue_work(struct work_struct *work) { struct request_queue *q = container_of(work, struct request_queue, requeue_work.work); LIST_HEAD(rq_list); struct request *rq, *next; unsigned long flags; spin_lock_irqsave(&q->requeue_lock, flags); list_splice_init(&q->requeue_list, &rq_list); spin_unlock_irqrestore(&q->requeue_lock, flags); list_for_each_entry_safe(rq, next, &rq_list, queuelist) { if (!(rq->rq_flags & RQF_SOFTBARRIER)) continue; rq->rq_flags &= ~RQF_SOFTBARRIER; list_del_init(&rq->queuelist); blk_mq_sched_insert_request(rq, true, false, false, true); } while (!list_empty(&rq_list)) { rq = list_entry(rq_list.next, struct request, queuelist); list_del_init(&rq->queuelist); blk_mq_sched_insert_request(rq, false, false, false, true); } blk_mq_run_hw_queues(q, false); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig17093.92%342.86%
Jens Axboe63.31%228.57%
Bart Van Assche31.66%114.29%
Mike Snitzer21.10%114.29%
Total181100.00%7100.00%


void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, bool kick_requeue_list) { struct request_queue *q = rq->q; unsigned long flags; /* * We abuse this flag that is otherwise used by the I/O scheduler to * request head insertation from the workqueue. */ BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); spin_lock_irqsave(&q->requeue_lock, flags); if (at_head) { rq->rq_flags |= RQF_SOFTBARRIER; list_add(&rq->queuelist, &q->requeue_list); } else { list_add_tail(&rq->queuelist, &q->requeue_list); } spin_unlock_irqrestore(&q->requeue_lock, flags); if (kick_requeue_list) blk_mq_kick_requeue_list(q); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig9788.99%375.00%
Bart Van Assche1211.01%125.00%
Total109100.00%4100.00%

EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q) { kblockd_schedule_delayed_work(&q->requeue_work, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig1785.00%150.00%
Mike Snitzer315.00%150.00%
Total20100.00%2100.00%

EXPORT_SYMBOL(blk_mq_kick_requeue_list);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs) { kblockd_schedule_delayed_work(&q->requeue_work, msecs_to_jiffies(msecs)); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe1451.85%150.00%
Mike Snitzer1348.15%150.00%
Total27100.00%2100.00%

EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) { if (tag < tags->nr_tags) { prefetch(tags->rqs[tag]); return tags->rqs[tag]; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2757.45%240.00%
Hannes Reinecke1123.40%120.00%
Christoph Hellwig817.02%120.00%
Ming Lei12.13%120.00%
Total47100.00%5100.00%

EXPORT_SYMBOL(blk_mq_tag_to_rq)