Release 4.11 drivers/mmc/core/queue.c
/*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright 2006-2007 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include "queue.h"
#include "block.h"
#include "core.h"
#include "card.h"
#define MMC_QUEUE_BOUNCESZ 65536
/*
* Prepare a MMC request. This just filters out odd stuff.
*/
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
struct mmc_queue *mq = q->queuedata;
if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
return BLKPREP_KILL;
req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sujit Reddy Thumma | 23 | 41.82% | 1 | 20.00% |
Russell King | 22 | 40.00% | 1 | 20.00% |
Chuanxiao Dong | 7 | 12.73% | 1 | 20.00% |
Christoph Hellwig | 2 | 3.64% | 1 | 20.00% |
Pierre Ossman | 1 | 1.82% | 1 | 20.00% |
Total | 55 | 100.00% | 5 | 100.00% |
static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
struct mmc_context_info *cntx = &mq->card->host->context_info;
current->flags |= PF_MEMALLOC;
down(&mq->thread_sem);
do {
struct request *req = NULL;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
req = blk_fetch_request(q);
mq->asleep = false;
cntx->is_waiting_last_req = false;
cntx->is_new_req = false;
if (!req) {
/*
* Dispatch queue is empty so set flags for
* mmc_request_fn() to wake us up.
*/
if (mq->mqrq_prev->req)
cntx->is_waiting_last_req = true;
else
mq->asleep = true;
}
mq->mqrq_cur->req = req;
spin_unlock_irq(q->queue_lock);
if (req || mq->mqrq_prev->req) {
bool req_is_special = mmc_req_is_special(req);
set_current_state(TASK_RUNNING);
mmc_blk_issue_rq(mq, req);
cond_resched();
if (mq->new_request) {
mq->new_request = false;
continue; /* fetch again */
}
/*
* Current request becomes previous request
* and vice versa.
* In case of special requests, current request
* has been finished. Do not assign it to previous
* request.
*/
if (req_is_special)
mq->mqrq_cur->req = NULL;
mq->mqrq_prev->brq.mrq.data = NULL;
mq->mqrq_prev->req = NULL;
swap(mq->mqrq_prev, mq->mqrq_cur);
} else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
}
up(&mq->thread_sem);
schedule();
down(&mq->thread_sem);
}
} while (1);
up(&mq->thread_sem);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 101 | 34.71% | 4 | 23.53% |
Adrian Hunter | 70 | 24.05% | 2 | 11.76% |
Seungwon Jeon | 48 | 16.49% | 2 | 11.76% |
Per Forlin | 41 | 14.09% | 2 | 11.76% |
Konstantin Dorfman | 12 | 4.12% | 1 | 5.88% |
Juha Yrjölä | 6 | 2.06% | 1 | 5.88% |
Linus Walleij | 5 | 1.72% | 2 | 11.76% |
Fabian Frederick | 4 | 1.37% | 1 | 5.88% |
Rabin Vincent | 3 | 1.03% | 1 | 5.88% |
Tejun Heo | 1 | 0.34% | 1 | 5.88% |
Total | 291 | 100.00% | 17 | 100.00% |
/*
* Generic MMC request handler. This is called for any queue on a
* particular host. When the host is not busy, we look for a request
* on any queue on this host, and attempt to issue it. This may
* not be the queue we were asked to process.
*/
static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
struct mmc_context_info *cntx;
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
req->rq_flags |= RQF_QUIET;
__blk_end_request_all(req, -EIO);
}
return;
}
cntx = &mq->card->host->context_info;
if (cntx->is_waiting_last_req) {
cntx->is_new_req = true;
wake_up_interruptible(&cntx->wait);
}
if (mq->asleep)
wake_up_process(mq->thread);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konstantin Dorfman | 38 | 33.63% | 1 | 7.69% |
Pierre Ossman | 30 | 26.55% | 1 | 7.69% |
Russell King | 25 | 22.12% | 1 | 7.69% |
Adrian Hunter | 7 | 6.19% | 2 | 15.38% |
Christoph Hellwig | 4 | 3.54% | 2 | 15.38% |
Jens Axboe | 2 | 1.77% | 1 | 7.69% |
Tejun Heo | 2 | 1.77% | 2 | 15.38% |
Per Forlin | 2 | 1.77% | 1 | 7.69% |
Kiyoshi Ueda | 2 | 1.77% | 1 | 7.69% |
Venkatraman Sathiyamoorthy | 1 | 0.88% | 1 | 7.69% |
Total | 113 | 100.00% | 13 | 100.00% |
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
{
struct scatterlist *sg;
sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
if (!sg)
*err = -ENOMEM;
else {
*err = 0;
sg_init_table(sg, sg_len);
}
return sg;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Per Forlin | 58 | 90.62% | 1 | 33.33% |
SF Markus Elfring | 5 | 7.81% | 1 | 33.33% |
Venkatraman Sathiyamoorthy | 1 | 1.56% | 1 | 33.33% |
Total | 64 | 100.00% | 3 | 100.00% |
static void mmc_queue_setup_discard(struct request_queue *q,
struct mmc_card *card)
{
unsigned max_discard;
max_discard = mmc_calc_max_discard(card);
if (!max_discard)
return;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_max_discard_sectors(q, max_discard);
if (card->erased_byte == 0 && !mmc_can_discard(card))
q->limits.discard_zeroes_data = 1;
q->limits.discard_granularity = card->pref_erase << 9;
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
q->limits.discard_granularity = 0;
if (mmc_can_secure_erase_trim(card))
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 106 | 95.50% | 2 | 50.00% |
Jens Axboe | 4 | 3.60% | 1 | 25.00% |
Christoph Hellwig | 1 | 0.90% | 1 | 25.00% |
Total | 111 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_MMC_BLOCK_BOUNCE
static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
unsigned int bouncesz)
{
int i;
for (i = 0; i < mq->qdepth; i++) {
mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
if (!mq->mqrq[i].bounce_buf)
goto out_err;
}
return true;
out_err:
while (--i >= 0) {
kfree(mq->mqrq[i].bounce_buf);
mq->mqrq[i].bounce_buf = NULL;
}
pr_warn("%s: unable to allocate bounce buffers\n",
mmc_card_name(mq->card));
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 118 | 100.00% | 2 | 100.00% |
Total | 118 | 100.00% | 2 | 100.00% |
static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
unsigned int bouncesz)
{
int i, ret;
for (i = 0; i < mq->qdepth; i++) {
mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
if (ret)
return ret;
mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
if (ret)
return ret;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 90 | 100.00% | 2 | 100.00% |
Total | 90 | 100.00% | 2 | 100.00% |
#endif
static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
{
int i, ret;
for (i = 0; i < mq->qdepth; i++) {
mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
if (ret)
return ret;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 63 | 100.00% | 2 | 100.00% |
Total | 63 | 100.00% | 2 | 100.00% |
static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
{
kfree(mqrq->bounce_sg);
mqrq->bounce_sg = NULL;
kfree(mqrq->sg);
mqrq->sg = NULL;
kfree(mqrq->bounce_buf);
mqrq->bounce_buf = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 50 | 100.00% | 2 | 100.00% |
Total | 50 | 100.00% | 2 | 100.00% |
static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
{
int i;
for (i = 0; i < mq->qdepth; i++)
mmc_queue_req_free_bufs(&mq->mqrq[i]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 40 | 100.00% | 2 | 100.00% |
Total | 40 | 100.00% | 2 | 100.00% |
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
* @card: mmc card to attach this queue
* @lock: queue lock
* @subname: partition subname
*
* Initialise a MMC card request queue.
*/
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
spinlock_t *lock, const char *subname)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
bool bounce = false;
int ret = -ENOMEM;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
mq->queue = blk_init_queue(mmc_request_fn, lock);
if (!mq->queue)
return -ENOMEM;
mq->qdepth = 2;
mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
GFP_KERNEL);
if (!mq->mqrq)
goto blk_cleanup;
mq->mqrq_cur = &mq->mqrq[0];
mq->mqrq_prev = &mq->mqrq[1];
mq->queue->queuedata = mq;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_segs == 1) {
unsigned int bouncesz;
bouncesz = MMC_QUEUE_BOUNCESZ;
if (bouncesz > host->max_req_size)
bouncesz = host->max_req_size;
if (bouncesz > host->max_seg_size)
bouncesz = host->max_seg_size;
if (bouncesz > (host->max_blk_count * 512))
bouncesz = host->max_blk_count * 512;
if (bouncesz > 512 &&
mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
if (ret)
goto cleanup_queue;
bounce = true;
}
}
#endif
if (!bounce) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
ret = mmc_queue_alloc_sgs(mq, host->max_segs);
if (ret)
goto cleanup_queue;
}
sema_init(&mq->thread_sem, 1);
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
host->index, subname ? subname : "");
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
goto cleanup_queue;
}
return 0;
cleanup_queue:
mmc_queue_reqs_free_bufs(mq);
kfree(mq->mqrq);
mq->mqrq = NULL;
blk_cleanup:
blk_cleanup_queue(mq->queue);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 170 | 31.95% | 6 | 18.18% |
Pierre Ossman | 150 | 28.20% | 6 | 18.18% |
Adrian Hunter | 115 | 21.62% | 9 | 27.27% |
Per Forlin | 45 | 8.46% | 2 | 6.06% |
Christoph Hellwig | 15 | 2.82% | 1 | 3.03% |
Mike Snitzer | 9 | 1.69% | 1 | 3.03% |
Greg Kroah-Hartman | 9 | 1.69% | 1 | 3.03% |
Martin K. Petersen | 6 | 1.13% | 3 | 9.09% |
Santosh Shilimkar | 5 | 0.94% | 1 | 3.03% |
Ethan Du | 4 | 0.75% | 1 | 3.03% |
Thomas Gleixner | 3 | 0.56% | 1 | 3.03% |
Venkatraman Sathiyamoorthy | 1 | 0.19% | 1 | 3.03% |
Total | 532 | 100.00% | 33 | 100.00% |
void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
/* Then terminate our worker thread */
kthread_stop(mq->thread);
/* Empty the queue */
spin_lock_irqsave(q->queue_lock, flags);
q->queuedata = NULL;
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
mmc_queue_reqs_free_bufs(mq);
kfree(mq->mqrq);
mq->mqrq = NULL;
mq->card = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 35 | 38.46% | 3 | 33.33% |
Pierre Ossman | 27 | 29.67% | 1 | 11.11% |
Russell King | 25 | 27.47% | 3 | 33.33% |
Per Forlin | 2 | 2.20% | 1 | 11.11% |
Jens Axboe | 2 | 2.20% | 1 | 11.11% |
Total | 91 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(mmc_cleanup_queue);
/**
* mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend
*
* Stop the block request queue, and wait for our thread to
* complete any outstanding requests. This ensures that we
* won't suspend while a request is being processed.
*/
void mmc_queue_suspend(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
if (!mq->suspended) {
mq->suspended |= true;
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
down(&mq->thread_sem);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 64 | 92.75% | 1 | 33.33% |
Linus Walleij | 3 | 4.35% | 1 | 33.33% |
Jens Axboe | 2 | 2.90% | 1 | 33.33% |
Total | 69 | 100.00% | 3 | 100.00% |
/**
* mmc_queue_resume - resume a previously suspended MMC request queue
* @mq: MMC queue to resume
*/
void mmc_queue_resume(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
if (mq->suspended) {
mq->suspended = false;
up(&mq->thread_sem);
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 62 | 91.18% | 1 | 33.33% |
Linus Walleij | 4 | 5.88% | 1 | 33.33% |
Jens Axboe | 2 | 2.94% | 1 | 33.33% |
Total | 68 | 100.00% | 3 | 100.00% |
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
int i;
if (!mqrq->bounce_buf)
return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len;
buflen = 0;
for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 95 | 84.07% | 2 | 50.00% |
Per Forlin | 14 | 12.39% | 1 | 25.00% |
Jens Axboe | 4 | 3.54% | 1 | 25.00% |
Total | 113 | 100.00% | 4 | 100.00% |
/*
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
{
if (!mqrq->bounce_buf)
return;
if (rq_data_dir(mqrq->req) != WRITE)
return;
sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
mqrq->bounce_buf, mqrq->sg[0].length);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 46 | 85.19% | 2 | 66.67% |
Per Forlin | 8 | 14.81% | 1 | 33.33% |
Total | 54 | 100.00% | 3 | 100.00% |
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
{
if (!mqrq->bounce_buf)
return;
if (rq_data_dir(mqrq->req) != READ)
return;
sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
mqrq->bounce_buf, mqrq->sg[0].length);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 46 | 85.19% | 2 | 66.67% |
Per Forlin | 8 | 14.81% | 1 | 33.33% |
Total | 54 | 100.00% | 3 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 700 | 34.35% | 13 | 18.84% |
Russell King | 491 | 24.09% | 9 | 13.04% |
Pierre Ossman | 404 | 19.82% | 9 | 13.04% |
Per Forlin | 178 | 8.73% | 3 | 4.35% |
Konstantin Dorfman | 50 | 2.45% | 1 | 1.45% |
Seungwon Jeon | 48 | 2.36% | 2 | 2.90% |
Christoph Hellwig | 25 | 1.23% | 3 | 4.35% |
Sujit Reddy Thumma | 23 | 1.13% | 1 | 1.45% |
Jens Axboe | 19 | 0.93% | 3 | 4.35% |
Linus Walleij | 15 | 0.74% | 2 | 2.90% |
Greg Kroah-Hartman | 9 | 0.44% | 1 | 1.45% |
Mike Snitzer | 9 | 0.44% | 1 | 1.45% |
Santosh Shilimkar | 8 | 0.39% | 1 | 1.45% |
Ulf Hansson | 7 | 0.34% | 3 | 4.35% |
Chuanxiao Dong | 7 | 0.34% | 1 | 1.45% |
Tejun Heo | 6 | 0.29% | 3 | 4.35% |
Juha Yrjölä | 6 | 0.29% | 1 | 1.45% |
Martin K. Petersen | 6 | 0.29% | 3 | 4.35% |
SF Markus Elfring | 5 | 0.25% | 1 | 1.45% |
Fabian Frederick | 4 | 0.20% | 1 | 1.45% |
Ethan Du | 4 | 0.20% | 1 | 1.45% |
Rabin Vincent | 3 | 0.15% | 1 | 1.45% |
Thomas Gleixner | 3 | 0.15% | 1 | 1.45% |
Venkatraman Sathiyamoorthy | 3 | 0.15% | 2 | 2.90% |
Rafael J. Wysocki | 3 | 0.15% | 1 | 1.45% |
Kiyoshi Ueda | 2 | 0.10% | 1 | 1.45% |
Total | 2038 | 100.00% | 69 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.