cregit-Linux how code gets into the kernel

Release 4.12 block/blk-mq-sched.c

Directory: block
/*
 * blk-mq scheduling framework
 *
 * Copyright (C) 2016 Jens Axboe
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/blk-mq.h>

#include <trace/events/block.h>

#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-wbt.h"


void blk_mq_sched_free_hctx_data(struct request_queue *q, void (*exit)(struct blk_mq_hw_ctx *)) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { if (exit && hctx->sched_data) exit(hctx); kfree(hctx->sched_data); hctx->sched_data = NULL; } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe65100.00%1100.00%
Total65100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
static void __blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq, struct bio *bio, struct io_context *ioc) { struct io_cq *icq; spin_lock_irq(q->queue_lock); icq = ioc_lookup_icq(ioc, q); spin_unlock_irq(q->queue_lock); if (!icq) { icq = ioc_create_icq(ioc, q, GFP_ATOMIC); if (!icq) return; } rq->elv.icq = icq; if (!blk_mq_sched_get_rq_priv(q, rq, bio)) { rq->rq_flags |= RQF_ELVPRIV; get_io_context(icq->ioc); return; } rq->elv.icq = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe11594.26%150.00%
Paolo Valente75.74%150.00%
Total122100.00%2100.00%


static void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq, struct bio *bio) { struct io_context *ioc; ioc = rq_ioc(bio); if (ioc) __blk_mq_sched_assign_ioc(q, rq, bio, ioc); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4695.83%150.00%
Paolo Valente24.17%150.00%
Total48100.00%2100.00%

/* * Mark a hardware queue as needing a restart. For shared queues, maintain * a count of how many hardware queues are marked for restart. */
static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) { if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) return; if (hctx->flags & BLK_MQ_F_TAG_SHARED) { struct request_queue *q = hctx->queue; if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) atomic_inc(&q->shared_hctx_restart); } else set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe75100.00%1100.00%
Total75100.00%1100.00%


static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) { if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) return false; if (hctx->flags & BLK_MQ_F_TAG_SHARED) { struct request_queue *q = hctx->queue; if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) atomic_dec(&q->shared_hctx_restart); } else clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); if (blk_mq_hctx_has_pending(hctx)) { blk_mq_run_hw_queue(hctx, true); return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe99100.00%1100.00%
Total99100.00%1100.00%


struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data) { struct elevator_queue *e = q->elevator; struct request *rq; blk_queue_enter_live(q); data->q = q; if (likely(!data->ctx)) data->ctx = blk_mq_get_ctx(q); if (likely(!data->hctx)) data->hctx = blk_mq_map_queue(q, data->ctx->cpu); if (e) { data->flags |= BLK_MQ_REQ_INTERNAL; /* * Flush requests are special and go directly to the * dispatch list. */ if (!op_is_flush(op) && e->type->ops.mq.get_request) { rq = e->type->ops.mq.get_request(q, op, data); if (rq) rq->rq_flags |= RQF_QUEUED; } else rq = __blk_mq_alloc_request(data, op); } else { rq = __blk_mq_alloc_request(data, op); } if (rq) { if (!op_is_flush(op)) { rq->elv.icq = NULL; if (e && e->type->icq_cache) blk_mq_sched_assign_ioc(q, rq, bio); } data->hctx->queued++; return rq; } blk_queue_exit(q); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe20083.33%250.00%
Omar Sandoval3213.33%125.00%
Christoph Hellwig83.33%125.00%
Total240100.00%4100.00%


void blk_mq_sched_put_request(struct request *rq) { struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; if (rq->rq_flags & RQF_ELVPRIV) { blk_mq_sched_put_rq_priv(rq->q, rq); if (rq->elv.icq) { put_io_context(rq->elv.icq->ioc); rq->elv.icq = NULL; } } if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request) e->type->ops.mq.put_request(rq); else blk_mq_finish_request(rq); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe117100.00%1100.00%
Total117100.00%1100.00%


void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct elevator_queue *e = q->elevator; const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; bool did_work = false; LIST_HEAD(rq_list); if (unlikely(blk_mq_hctx_stopped(hctx))) return; hctx->run++; /* * If we have previous entries on our dispatch list, grab them first for * more fair dispatch. */ if (!list_empty_careful(&hctx->dispatch)) { spin_lock(&hctx->lock); if (!list_empty(&hctx->dispatch)) list_splice_init(&hctx->dispatch, &rq_list); spin_unlock(&hctx->lock); } /* * Only ask the scheduler for requests, if we didn't have residual * requests from the dispatch list. This is to avoid the case where * we only ever dispatch a fraction of the requests available because * of low device queue depth. Once we pull requests out of the IO * scheduler, we can no longer merge or sort them. So it's best to * leave them there for as long as we can. Mark the hw queue as * needing a restart in that case. */ if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart_hctx(hctx); did_work = blk_mq_dispatch_rq_list(q, &rq_list); } else if (!has_sched_dispatch) { blk_mq_flush_busy_ctxs(hctx, &rq_list); blk_mq_dispatch_rq_list(q, &rq_list); } /* * We want to dispatch from the scheduler if we had no work left * on the dispatch list, OR if we did have work but weren't able * to make progress. */ if (!did_work && has_sched_dispatch) { do { struct request *rq; rq = e->type->ops.mq.dispatch_request(hctx); if (!rq) break; list_add(&rq->queuelist, &rq_list); } while (blk_mq_dispatch_rq_list(q, &rq_list)); } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe22194.44%360.00%
Omar Sandoval135.56%240.00%
Total234100.00%5100.00%


bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, struct request **merged_request) { struct request *rq; switch (elv_merge(q, &rq, bio)) { case ELEVATOR_BACK_MERGE: if (!blk_mq_sched_allow_merge(q, rq, bio)) return false; if (!bio_attempt_back_merge(q, rq, bio)) return false; *merged_request = attempt_back_merge(q, rq); if (!*merged_request) elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); return true; case ELEVATOR_FRONT_MERGE: if (!blk_mq_sched_allow_merge(q, rq, bio)) return false; if (!bio_attempt_front_merge(q, rq, bio)) return false; *merged_request = attempt_front_merge(q, rq); if (!*merged_request) elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); return true; default: return false; } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe14788.55%266.67%
Christoph Hellwig1911.45%133.33%
Total166100.00%3100.00%

EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) { struct elevator_queue *e = q->elevator; if (e->type->ops.mq.bio_merge) { struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); blk_mq_put_ctx(ctx); return e->type->ops.mq.bio_merge(hctx, bio); } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe86100.00%1100.00%
Total86100.00%1100.00%


bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) { return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe28100.00%1100.00%
Total28100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
void blk_mq_sched_request_inserted(struct request *rq) { trace_block_rq_insert(rq->q, rq); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe19100.00%1100.00%
Total19100.00%1100.00%

EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq) { if (rq->tag == -1) { rq->rq_flags |= RQF_SORTED; return false; } /* * If we already have a real request tag, send directly to * the dispatch list. */ spin_lock(&hctx->lock); list_add(&rq->queuelist, &hctx->dispatch); spin_unlock(&hctx->lock); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe6898.55%150.00%
Omar Sandoval11.45%150.00%
Total69100.00%2100.00%

/** * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list * @pos: loop cursor. * @skip: the list element that will not be examined. Iteration starts at * @skip->next. * @head: head of the list to examine. This list must have at least one * element, namely @skip. * @member: name of the list_head structure within typeof(*pos). */ #define list_for_each_entry_rcu_rr(pos, skip, head, member) \ for ((pos) = (skip); \ (pos = (pos)->member.next != (head) ? list_entry_rcu( \ (pos)->member.next, typeof(*pos), member) : \ list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \ (pos) != (skip); ) /* * Called after a driver tag has been freed to check whether a hctx needs to * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware * queues in a round-robin fashion if the tag set of @hctx is shared with other * hardware queues. */
void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) { struct blk_mq_tags *const tags = hctx->tags; struct blk_mq_tag_set *const set = hctx->queue->tag_set; struct request_queue *const queue = hctx->queue, *q; struct blk_mq_hw_ctx *hctx2; unsigned int i, j; if (set->flags & BLK_MQ_F_TAG_SHARED) { /* * If this is 0, then we know that no hardware queues * have RESTART marked. We're done. */ if (!atomic_read(&queue->shared_hctx_restart)) return; rcu_read_lock(); list_for_each_entry_rcu_rr(q, queue, &set->tag_list, tag_set_list) { queue_for_each_hw_ctx(q, hctx2, i) if (hctx2->tags == tags && blk_mq_sched_restart_hctx(hctx2)) goto done; } j = hctx->queue_num + 1; for (i = 0; i < queue->nr_hw_queues; i++, j++) { if (j == queue->nr_hw_queues) j = 0; hctx2 = queue->queue_hw_ctx[j]; if (hctx2->tags == tags && blk_mq_sched_restart_hctx(hctx2)) break; } done: rcu_read_unlock(); } else { blk_mq_sched_restart_hctx(hctx); } }

Contributors

PersonTokensPropCommitsCommitProp
Bart Van Assche13167.18%125.00%
Jens Axboe5025.64%250.00%
Omar Sandoval147.18%125.00%
Total195100.00%4100.00%

/* * Add flush/fua to the queue. If we fail getting a driver tag, then * punt to the requeue list. Requeue will re-invoke us from a context * that's safe to block from. */
static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx, struct request *rq, bool can_block) { if (blk_mq_get_driver_tag(rq, &hctx, can_block)) { blk_insert_flush(rq); blk_mq_run_hw_queue(hctx, true); } else blk_mq_add_to_requeue_list(rq, false, true); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe55100.00%2100.00%
Total55100.00%2100.00%


void blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue, bool async, bool can_block) { struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) { blk_mq_sched_insert_flush(hctx, rq, can_block); return; } if (e && blk_mq_sched_bypass_insert(hctx, rq)) goto run; if (e && e->type->ops.mq.insert_requests) { LIST_HEAD(list); list_add(&rq->queuelist, &list); e->type->ops.mq.insert_requests(hctx, &list, at_head); } else { spin_lock(&ctx->lock); __blk_mq_insert_request(hctx, rq, at_head); spin_unlock(&ctx->lock); } run: if (run_queue) blk_mq_run_hw_queue(hctx, async); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe18091.84%266.67%
Omar Sandoval168.16%133.33%
Total196100.00%3100.00%


void blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_ctx *ctx, struct list_head *list, bool run_queue_async) { struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct elevator_queue *e = hctx->queue->elevator; if (e) { struct request *rq, *next; /* * We bypass requests that already have a driver tag assigned, * which should only be flushes. Flushes are only ever inserted * as single requests, so we shouldn't ever hit the * WARN_ON_ONCE() below (but let's handle it just in case). */ list_for_each_entry_safe(rq, next, list, queuelist) { if (WARN_ON_ONCE(rq->tag != -1)) { list_del_init(&rq->queuelist); blk_mq_sched_bypass_insert(hctx, rq); } } } if (e && e->type->ops.mq.insert_requests) e->type->ops.mq.insert_requests(hctx, list, false); else blk_mq_insert_requests(hctx, ctx, list); blk_mq_run_hw_queue(hctx, run_queue_async); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe9663.16%150.00%
Omar Sandoval5636.84%150.00%
Total152100.00%2100.00%


static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { if (hctx->sched_tags) { blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); blk_mq_free_rq_map(hctx->sched_tags); hctx->sched_tags = NULL; } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe52100.00%1100.00%
Total52100.00%1100.00%


static int blk_mq_sched_alloc_tags(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { struct blk_mq_tag_set *set = q->tag_set; int ret; hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, set->reserved_tags); if (!hctx->sched_tags) return -ENOMEM; ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); if (ret) blk_mq_sched_free_tags(set, hctx, hctx_idx); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7680.00%133.33%
Omar Sandoval1616.84%133.33%
Sagi Grimberg33.16%133.33%
Total95100.00%3100.00%


static void blk_mq_sched_tags_teardown(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_sched_free_tags(set, hctx, i); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4395.56%150.00%
Omar Sandoval24.44%150.00%
Total45100.00%2100.00%


int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { struct elevator_queue *e = q->elevator; int ret; if (!e) return 0; ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx); if (ret) return ret; if (e->type->ops.mq.init_hctx) { ret = e->type->ops.mq.init_hctx(hctx, hctx_idx); if (ret) { blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); return ret; } } blk_mq_debugfs_register_sched_hctx(q, hctx); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Omar Sandoval118100.00%3100.00%
Total118100.00%3100.00%


void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { struct elevator_queue *e = q->elevator; if (!e) return; blk_mq_debugfs_unregister_sched_hctx(hctx); if (e->type->ops.mq.exit_hctx && hctx->sched_data) { e->type->ops.mq.exit_hctx(hctx, hctx_idx); hctx->sched_data = NULL; } blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); }

Contributors

PersonTokensPropCommitsCommitProp
Omar Sandoval89100.00%3100.00%
Total89100.00%3100.00%


int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) { struct blk_mq_hw_ctx *hctx; struct elevator_queue *eq; unsigned int i; int ret; if (!e) { q->elevator = NULL; return 0; } /* * Default to 256, since we don't split into sync/async like the * old code did. Additionally, this is a per-hw queue depth. */ q->nr_requests = 2 * BLKDEV_MAX_RQ; queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_sched_alloc_tags(q, hctx, i); if (ret) goto err; } ret = e->ops.mq.init_sched(q, e); if (ret) goto err; blk_mq_debugfs_register_sched(q); queue_for_each_hw_ctx(q, hctx, i) { if (e->ops.mq.init_hctx) { ret = e->ops.mq.init_hctx(hctx, i); if (ret) { eq = q->elevator; blk_mq_exit_sched(q, eq); kobject_put(&eq->kobj); return ret; } } blk_mq_debugfs_register_sched_hctx(q, hctx); } return 0; err: blk_mq_sched_tags_teardown(q); q->elevator = NULL; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Omar Sandoval205100.00%4100.00%
Total205100.00%4100.00%


void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) { struct blk_mq_hw_ctx *hctx; unsigned int i; queue_for_each_hw_ctx(q, hctx, i) { blk_mq_debugfs_unregister_sched_hctx(hctx); if (e->type->ops.mq.exit_hctx && hctx->sched_data) { e->type->ops.mq.exit_hctx(hctx, i); hctx->sched_data = NULL; } } blk_mq_debugfs_unregister_sched(q); if (e->type->ops.mq.exit_sched) e->type->ops.mq.exit_sched(e); blk_mq_sched_tags_teardown(q); q->elevator = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Omar Sandoval119100.00%3100.00%
Total119100.00%3100.00%


int blk_mq_sched_init(struct request_queue *q) { int ret; mutex_lock(&q->sysfs_lock); ret = elevator_init(q, NULL); mutex_unlock(&q->sysfs_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe41100.00%1100.00%
Total41100.00%1100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe192968.92%1144.00%
Omar Sandoval68424.44%936.00%
Bart Van Assche1475.25%14.00%
Christoph Hellwig270.96%28.00%
Paolo Valente90.32%14.00%
Sagi Grimberg30.11%14.00%
Total2799100.00%25100.00%
Directory: block
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.