cregit-Linux how code gets into the kernel

Release 4.18 block/blk-mq.c

Directory: block
/*
 * Block multiqueue core code
 *
 * Copyright (C) 2013-2014 Jens Axboe
 * Copyright (C) 2013-2014 Christoph Hellwig
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/kmemleak.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>
#include <linux/llist.h>
#include <linux/list_sort.h>
#include <linux/cpu.h>
#include <linux/cache.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/topology.h>
#include <linux/sched/signal.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
#include <linux/prefetch.h>

#include <trace/events/block.h>

#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
#include "blk-wbt.h"
#include "blk-mq-sched.h"

static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);


static int blk_mq_poll_stats_bkt(const struct request *rq) { int ddir, bytes, bucket; ddir = rq_data_dir(rq); bytes = blk_rq_bytes(rq); bucket = ddir + 2*(ilog2(bytes) - 9); if (bucket < 0) return -1; else if (bucket >= BLK_MQ_POLL_STATS_BKTS) return ddir + BLK_MQ_POLL_STATS_BKTS - 2; return bucket; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Bates7498.67%150.00%
Jens Axboe11.33%150.00%
Total75100.00%2100.00%

/* * Check if any of the ctx's have pending work in this hardware queue */
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) { return !list_empty_careful(&hctx->dispatch) || sbitmap_any_bit_set(&hctx->ctx_map) || blk_mq_sched_has_work(hctx); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3294.12%480.00%
Omar Sandoval25.88%120.00%
Total34100.00%5100.00%

/* * Mark this ctx as having pending work in this hardware queue */
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw)) sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3376.74%266.67%
Omar Sandoval1023.26%133.33%
Total43100.00%3100.00%


static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2382.14%266.67%
Omar Sandoval517.86%133.33%
Total28100.00%3100.00%

struct mq_inflight { struct hd_struct *part; unsigned int *inflight; };
static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { struct mq_inflight *mi = priv; /* * index[0] counts the specific partition that was asked for. index[1] * counts the ones that are active on the whole device, so increment * that if mi->part is indeed a partition, and not a whole device. */ if (rq->part == mi->part) mi->inflight[0]++; if (mi->part->partno) mi->inflight[1]++; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe6498.46%266.67%
Omar Sandoval11.54%133.33%
Total65100.00%3100.00%


void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, unsigned int inflight[2]) { struct mq_inflight mi = { .part = part, .inflight = inflight, }; inflight[0] = inflight[1] = 0; blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe60100.00%2100.00%
Total60100.00%2100.00%


static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { struct mq_inflight *mi = priv; if (rq->part == mi->part) mi->inflight[rq_data_dir(rq)]++; }

Contributors

PersonTokensPropCommitsCommitProp
Omar Sandoval51100.00%1100.00%
Total51100.00%1100.00%


void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, unsigned int inflight[2]) { struct mq_inflight mi = { .part = part, .inflight = inflight, }; inflight[0] = inflight[1] = 0; blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi); }

Contributors

PersonTokensPropCommitsCommitProp
Omar Sandoval60100.00%1100.00%
Total60100.00%1100.00%


void blk_freeze_queue_start(struct request_queue *q) { int freeze_depth; freeze_depth = atomic_inc_return(&q->mq_freeze_depth); if (freeze_depth == 1) { percpu_ref_kill(&q->q_usage_counter); if (q->mq_ops) blk_mq_run_hw_queues(q, false); } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe1732.69%19.09%
Tejun Heo1528.85%545.45%
Christoph Hellwig917.31%19.09%
Ming Lei611.54%19.09%
Mike Snitzer35.77%19.09%
Dan J Williams11.92%19.09%
Lei Ming11.92%19.09%
Total52100.00%11100.00%

EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
void blk_mq_freeze_queue_wait(struct request_queue *q) { wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo1560.00%240.00%
Lei Ming624.00%120.00%
Jens Axboe312.00%120.00%
Dan J Williams14.00%120.00%
Total25100.00%5100.00%

EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, unsigned long timeout) { return wait_event_timeout(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter), timeout); }

Contributors

PersonTokensPropCommitsCommitProp
Keith Busch32100.00%1100.00%
Total32100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); /* * Guarantee no request is in use, so we can change any data structure of * the queue afterward. */
void blk_freeze_queue(struct request_queue *q) { /* * In the !blk_mq case we are only calling this to kill the * q_usage_counter, otherwise this increases the freeze depth * and waits for it to return to zero. For this reason there is * no blk_unfreeze_queue(), and blk_freeze_queue() is not * exported to drivers as the only user for unfreeze is blk_mq. */ blk_freeze_queue_start(q); if (!q->mq_ops) blk_drain_queue(q); blk_mq_freeze_queue_wait(q); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo1854.55%125.00%
Ming Lei1236.36%125.00%
Dan J Williams26.06%125.00%
Lei Ming13.03%125.00%
Total33100.00%4100.00%


void blk_mq_freeze_queue(struct request_queue *q) { /* * ...just an alias to keep freeze and unfreeze actions balanced * in the blk_mq_* namespace */ blk_freeze_queue(q); }

Contributors

PersonTokensPropCommitsCommitProp
Dan J Williams16100.00%1100.00%
Total16100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q) { int freeze_depth; freeze_depth = atomic_dec_return(&q->mq_freeze_depth); WARN_ON_ONCE(freeze_depth < 0); if (!freeze_depth) { percpu_ref_reinit(&q->q_usage_counter); wake_up_all(&q->mq_freeze_wq); } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3158.49%120.00%
Tejun Heo1120.75%240.00%
Christoph Hellwig1018.87%120.00%
Dan J Williams11.89%120.00%
Total53100.00%5100.00%

EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); /* * FIXME: replace the scsi_internal_device_*block_nowait() calls in the * mpt3sas driver such that this function can be removed. */
void blk_mq_quiesce_queue_nowait(struct request_queue *q) { blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche17100.00%2100.00%
Total17100.00%2100.00%

EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); /** * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished * @q: request queue. * * Note: this function does not prevent that the struct request end_io() * callback function is invoked. Once this function is returned, we make * sure no dispatch can happen until the queue is unquiesced via * blk_mq_unquiesce_queue(). */
void blk_mq_quiesce_queue(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned int i; bool rcu = false; blk_mq_quiesce_queue_nowait(q); queue_for_each_hw_ctx(q, hctx, i) { if (hctx->flags & BLK_MQ_F_BLOCKING) synchronize_srcu(hctx->srcu); else rcu = true; } if (rcu) synchronize_rcu(); }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche6090.91%125.00%
Ming Lei57.58%250.00%
Tejun Heo11.52%125.00%
Total66100.00%4100.00%

EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); /* * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() * @q: request queue. * * This function recovers queue into the state before quiescing * which is done by blk_mq_quiesce_queue. */
void blk_mq_unquiesce_queue(struct request_queue *q) { blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); /* dispatch requests which are inserted during quiescing */ blk_mq_run_hw_queues(q, true); }

Contributors

PersonTokensPropCommitsCommitProp
Ming Lei2496.00%375.00%
Bart Van Assche14.00%125.00%
Total25100.00%4100.00%

EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
void blk_mq_wake_waiters(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned int i; queue_for_each_hw_ctx(q, hctx, i) if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_wakeup_all(hctx->tags, true); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe43100.00%1100.00%
Total43100.00%1100.00%


bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) { return blk_mq_has_free_tags(hctx->tags); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe18100.00%1100.00%
Total18100.00%1100.00%

EXPORT_SYMBOL(blk_mq_can_queue);
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, unsigned int tag, unsigned int op) { struct blk_mq_tags *tags = blk_mq_tags_from_data(data); struct request *rq = tags->static_rqs[tag]; req_flags_t rq_flags = 0; if (data->flags & BLK_MQ_REQ_INTERNAL) { rq->tag = -1; rq->internal_tag = tag; } else { if (blk_mq_tag_busy(data->hctx)) { rq_flags = RQF_MQ_INFLIGHT; atomic_inc(&data->hctx->nr_active); } rq->tag = tag; rq->internal_tag = -1; data->hctx->tags->rqs[rq->tag] = rq; } /* csd/requeue_work/fifo_time is initialized before use */ rq->q = data->q; rq->mq_ctx = data->ctx; rq->rq_flags = rq_flags; rq->cpu = -1; rq->cmd_flags = op; if (data->flags & BLK_MQ_REQ_PREEMPT) rq->rq_flags |= RQF_PREEMPT; if (blk_queue_io_stat(data->q)) rq->rq_flags |= RQF_IO_STAT; INIT_LIST_HEAD(&rq->queuelist); INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->rq_disk = NULL; rq->part = NULL; rq->start_time_ns = ktime_get_ns(); rq->io_start_time_ns = 0; rq->nr_phys_segments = 0; #if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; #endif rq->special = NULL; /* tag was already set */ rq->extra_len = 0; rq->__deadline = 0; INIT_LIST_HEAD(&rq->timeout_list); rq->timeout = 0; rq->end_io = NULL; rq->end_io_data = NULL; rq->next_rq = NULL; #ifdef CONFIG_BLK_CGROUP rq->rl = NULL; #endif data->ctx->rq_dispatched[op_is_sync(op)]++; refcount_set(&rq->ref, 1); return rq; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig23567.34%631.58%
Jens Axboe7722.06%736.84%
Bart Van Assche164.58%210.53%
Keith Busch102.87%15.26%
Omar Sandoval92.58%210.53%
Michael Christie20.57%15.26%
Total349100.00%19100.00%


static struct request *blk_mq_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data) { struct elevator_queue *e = q->elevator; struct request *rq; unsigned int tag; bool put_ctx_on_error = false; blk_queue_enter_live(q); data->q = q; if (likely(!data->ctx)) { data->ctx = blk_mq_get_ctx(q); put_ctx_on_error = true; } if (likely(!data->hctx)) data->hctx = blk_mq_map_queue(q, data->ctx->cpu); if (op & REQ_NOWAIT) data->flags |= BLK_MQ_REQ_NOWAIT; if (e) { data->flags |= BLK_MQ_REQ_INTERNAL; /* * Flush requests are special and go directly to the * dispatch list. Don't include reserved tags in the * limiting, as it isn't useful. */ if (!op_is_flush(op) && e->type->ops.mq.limit_depth && !(data->flags & BLK_MQ_REQ_RESERVED)) e->type->ops.mq.limit_depth(op, data); } tag = blk_mq_get_tag(data); if (tag == BLK_MQ_TAG_FAIL) { if (put_ctx_on_error) { blk_mq_put_ctx(data->ctx); data->ctx = NULL; } blk_queue_exit(q); return NULL; } rq = blk_mq_rq_ctx_init(data, tag, op); if (!op_is_flush(op)) { rq->elv.icq = NULL; if (e && e->type->ops.mq.prepare_request) { if (e->type->icq_cache && rq_ioc(bio)) blk_mq_sched_assign_ioc(rq, bio); e->type->ops.mq.prepare_request(rq, bio); rq->rq_flags |= RQF_ELVPRIV; } } data->hctx->queued++; return rq; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig26483.54%555.56%
Ming Lei175.38%111.11%
Bart Van Assche134.11%111.11%
Goldwyn Rodrigues123.80%111.11%
Jens Axboe103.16%111.11%
Total316100.00%9100.00%


struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, blk_mq_req_flags_t flags) { struct blk_mq_alloc_data alloc_data = { .flags = flags }; struct request *rq; int ret; ret = blk_queue_enter(q, flags); if (ret) return ERR_PTR(ret); rq = blk_mq_get_request(q, NULL, op, &alloc_data); blk_queue_exit(q); if (!rq) return ERR_PTR(-EWOULDBLOCK); blk_mq_put_ctx(alloc_data.ctx); rq->__data_len = 0; rq->__sector = (sector_t) -1; rq->bio = rq->biotail = NULL; return rq; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig4233.87%531.25%
Jens Axboe3931.45%318.75%
Joe Lawrence2016.13%16.25%
Ming Lei75.65%16.25%
Keith Busch64.84%212.50%
Lei Ming54.03%16.25%
Bart Van Assche43.23%212.50%
Dan J Williams10.81%16.25%
Total124100.00%16100.00%

EXPORT_SYMBOL(blk_mq_alloc_request);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) { struct blk_mq_alloc_data alloc_data = { .flags = flags }; struct request *rq; unsigned int cpu; int ret; /* * If the tag allocator sleeps we could get an allocation for a * different hardware context. No need to complicate the low level * allocator for this for the rare use case of a command tied to * a specific queue. */ if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) return ERR_PTR(-EINVAL); if (hctx_idx >= q->nr_hw_queues) return ERR_PTR(-EIO); ret = blk_queue_enter(q, flags); if (ret) return ERR_PTR(ret); /* * Check if the hardware context is actually mapped to anything. * If not tell the caller that it should skip this queue. */ alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { blk_queue_exit(q); return ERR_PTR(-EXDEV); } cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask); alloc_data.ctx = __blk_mq_get_ctx(q, cpu); rq = blk_mq_get_request(q, NULL, op, &alloc_data); blk_queue_exit(q); if (!rq) return ERR_PTR(-EWOULDBLOCK); return rq; }

Contributors

PersonTokensPropCommitsCommitProp
Ming Lin11659.79%111.11%
Omar Sandoval5126.29%111.11%
Christoph Hellwig178.76%333.33%
Bart Van Assche52.58%333.33%
Keith Busch52.58%111.11%
Total194100.00%9100.00%

EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
static void __blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); const int sched_tag = rq->internal_tag; if (rq->tag != -1) blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); if (sched_tag != -1) blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); blk_mq_sched_restart(hctx); blk_queue_exit(q); }

Contributors

PersonTokensPropCommitsCommitProp
Keith Busch105100.00%1100.00%
Total105100.00%1100.00%


void blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); if (rq->rq_flags & RQF_ELVPRIV) { if (e && e->type->ops.mq.finish_request) e->type->ops.mq.finish_request(rq); if (rq->elv.icq) { put_io_context(rq->elv.icq->ioc); rq->elv.icq = NULL; } } ctx->rq_completed[rq_is_sync(rq)]++; if (rq->rq_flags & RQF_MQ_INFLIGHT) atomic_dec(&hctx->nr_active); if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) laptop_io_completion(q->backing_dev_info); wbt_done(q->rq_wb, rq); if (blk_rq_rl(rq)) blk_put_rl(blk_rq_rl(rq)); WRITE_ONCE(rq->state, MQ_RQ_IDLE); if (refcount_dec_and_test(&rq->ref)) __blk_mq_free_request(rq); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig11252.83%323.08%
Jens Axboe7635.85%753.85%
Shaohua Li157.08%17.69%
Keith Busch83.77%17.69%
Tejun Heo10.47%17.69%
Total212100.00%13100.00%

EXPORT_SYMBOL_GPL(blk_mq_free_request);
inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { u64 now = ktime_get_ns(); if (rq->rq_flags & RQF_STATS) { blk_mq_poll_stats_start(rq->q); blk_stat_add(rq, now); } blk_account_io_done(rq, now); if (rq->end_io) { wbt_done(rq->q->rq_wb, rq); rq->end_io(rq, error); } else { if (unlikely(blk_bidi_rq(rq))) blk_mq_free_request(rq->next_rq); blk_mq_free_request(rq); } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4240.38%222.22%
Omar Sandoval3230.77%222.22%
Christoph Hellwig2524.04%444.44%
Lei Ming54.81%111.11%
Total104100.00%9100.00%

EXPORT_SYMBOL(__blk_mq_end_request);
void blk_mq_end_request(struct request *rq, blk_status_t error) { if (blk_update_request(rq, error, blk_rq_bytes(rq))) BUG(); __blk_mq_end_request(rq, error); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig37100.00%3100.00%
Total37100.00%3100.00%

EXPORT_SYMBOL(blk_mq_end_request);
static void __blk_mq_complete_request_remote(void *data) { struct request *rq = data; rq->q->softirq_done_fn(rq); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe1869.23%133.33%
Christoph Hellwig830.77%266.67%
Total26100.00%3100.00%


static void __blk_mq_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; bool shared = false; int cpu; if (!blk_mq_mark_complete(rq)) return; if (rq->internal_tag != -1) blk_mq_sched_completed_request(rq); if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { rq->q->softirq_done_fn(rq); return; } cpu = get_cpu(); if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) shared = cpus_share_cache(cpu, ctx->cpu); if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; smp_call_function_single_async(ctx->cpu, &rq->csd); } else { rq->q->softirq_done_fn(rq); } put_cpu(); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig11061.11%436.36%
Jens Axboe6033.33%218.18%
Keith Busch63.33%218.18%
Tejun Heo31.67%218.18%
Frédéric Weisbecker10.56%19.09%
Total180100.00%11100.00%


static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) __releases(hctx->srcu) { if (!(hctx->flags & BLK_MQ_F_BLOCKING)) rcu_read_unlock(); else srcu_read_unlock(hctx->srcu, srcu_idx); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2556.82%240.00%
Christoph Hellwig1329.55%120.00%
Bart Van Assche511.36%120.00%
Tejun Heo12.27%120.00%
Total44100.00%5100.00%


static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) __acquires(hctx->srcu) { if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { /* shut up gcc false positive */ *srcu_idx = 0; rcu_read_lock(); } else *srcu_idx = srcu_read_lock(hctx->srcu); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3870.37%240.00%
Keith Busch1018.52%120.00%
Bart Van Assche59.26%120.00%
Tejun Heo11.85%120.00%
Total54100.00%5100.00%

/** * blk_mq_complete_request - end I/O on a request * @rq: the request being processed * * Description: * Ends all I/O on a request. It does not handle partial completions. * The actual completion happens out-of-order, through a IPI handler. **/
void blk_mq_complete_request(struct request *