Release 4.7 drivers/mmc/card/queue.c
  
  
/*
 *  linux/drivers/mmc/card/queue.c
 *
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
 *  Copyright 2006-2007 Pierre Ossman
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include "queue.h"
#define MMC_QUEUE_BOUNCESZ	65536
/*
 * Prepare a MMC request. This just filters out odd stuff.
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
	struct mmc_queue *mq = q->queuedata;
	/*
         * We only like normal block requests and discards.
         */
	if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
		blk_dump_rq_flags(req, "MMC bad request");
		return BLKPREP_KILL;
	}
	if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
		return BLKPREP_KILL;
	req->cmd_flags |= REQ_DONTPREP;
	return BLKPREP_OK;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 36 | 42.35% | 1 | 14.29% | 
| sujit reddy thumma | sujit reddy thumma | 23 | 27.06% | 1 | 14.29% | 
| adrian hunter | adrian hunter | 10 | 11.76% | 1 | 14.29% | 
| chuanxiao dong | chuanxiao dong | 7 | 8.24% | 1 | 14.29% | 
| pierre ossman | pierre ossman | 4 | 4.71% | 1 | 14.29% | 
| christoph hellwig | christoph hellwig | 4 | 4.71% | 1 | 14.29% | 
| jens axboe | jens axboe | 1 | 1.18% | 1 | 14.29% | 
 | Total | 85 | 100.00% | 7 | 100.00% | 
static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;
	current->flags |= PF_MEMALLOC;
	down(&mq->thread_sem);
	do {
		struct request *req = NULL;
		unsigned int cmd_flags = 0;
		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		req = blk_fetch_request(q);
		mq->mqrq_cur->req = req;
		spin_unlock_irq(q->queue_lock);
		if (req || mq->mqrq_prev->req) {
			set_current_state(TASK_RUNNING);
			cmd_flags = req ? req->cmd_flags : 0;
			mq->issue_fn(mq, req);
			cond_resched();
			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
				continue; /* fetch again */
			}
			/*
                         * Current request becomes previous request
                         * and vice versa.
                         * In case of special requests, current request
                         * has been finished. Do not assign it to previous
                         * request.
                         */
			if (cmd_flags & MMC_REQ_SPECIAL_MASK)
				mq->mqrq_cur->req = NULL;
			mq->mqrq_prev->brq.mrq.data = NULL;
			mq->mqrq_prev->req = NULL;
			swap(mq->mqrq_prev, mq->mqrq_cur);
		} else {
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
				break;
			}
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
		}
	} while (1);
	up(&mq->thread_sem);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 101 | 41.22% | 4 | 30.77% | 
| seungwon jeon | seungwon jeon | 67 | 27.35% | 2 | 15.38% | 
| per forlin | per forlin | 44 | 17.96% | 2 | 15.38% | 
| konstantin dorfman | konstantin dorfman | 19 | 7.76% | 1 | 7.69% | 
| juha yrjola | juha yrjola | 6 | 2.45% | 1 | 7.69% | 
| fabian frederick | fabian frederick | 4 | 1.63% | 1 | 7.69% | 
| rabin vincent | rabin vincent | 3 | 1.22% | 1 | 7.69% | 
| tejun heo | tejun heo | 1 | 0.41% | 1 | 7.69% | 
 | Total | 245 | 100.00% | 13 | 100.00% | 
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request_fn(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	unsigned long flags;
	struct mmc_context_info *cntx;
	if (!mq) {
		while ((req = blk_fetch_request(q)) != NULL) {
			req->cmd_flags |= REQ_QUIET;
			__blk_end_request_all(req, -EIO);
		}
		return;
	}
	cntx = &mq->card->host->context_info;
	if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
		/*
                 * New MMC request arrived when MMC thread may be
                 * blocked on the previous request to be complete
                 * with no current request fetched
                 */
		spin_lock_irqsave(&cntx->lock, flags);
		if (cntx->is_waiting_last_req) {
			cntx->is_new_req = true;
			wake_up_interruptible(&cntx->wait);
		}
		spin_unlock_irqrestore(&cntx->lock, flags);
	} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
		wake_up_process(mq->thread);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| konstantin dorfman | konstantin dorfman | 81 | 48.80% | 1 | 8.33% | 
| pierre ossman | pierre ossman | 30 | 18.07% | 1 | 8.33% | 
| russell king | russell king | 29 | 17.47% | 1 | 8.33% | 
| per forlin | per forlin | 9 | 5.42% | 2 | 16.67% | 
| adrian hunter | adrian hunter | 8 | 4.82% | 1 | 8.33% | 
| jens axboe | jens axboe | 2 | 1.20% | 1 | 8.33% | 
| tejun heo | tejun heo | 2 | 1.20% | 2 | 16.67% | 
| christoph hellwig | christoph hellwig | 2 | 1.20% | 1 | 8.33% | 
| kiyoshi ueda | kiyoshi ueda | 2 | 1.20% | 1 | 8.33% | 
| venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 1 | 0.60% | 1 | 8.33% | 
 | Total | 166 | 100.00% | 12 | 100.00% | 
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
{
	struct scatterlist *sg;
	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
	if (!sg)
		*err = -ENOMEM;
	else {
		*err = 0;
		sg_init_table(sg, sg_len);
	}
	return sg;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| per forlin | per forlin | 63 | 98.44% | 1 | 50.00% | 
| venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 1 | 1.56% | 1 | 50.00% | 
 | Total | 64 | 100.00% | 2 | 100.00% | 
static void mmc_queue_setup_discard(struct request_queue *q,
				    struct mmc_card *card)
{
	unsigned max_discard;
	max_discard = mmc_calc_max_discard(card);
	if (!max_discard)
		return;
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
	blk_queue_max_discard_sectors(q, max_discard);
	if (card->erased_byte == 0 && !mmc_can_discard(card))
		q->limits.discard_zeroes_data = 1;
	q->limits.discard_granularity = card->pref_erase << 9;
	/* granularity must not be greater than max. discard */
	if (card->pref_erase > max_discard)
		q->limits.discard_granularity = 0;
	if (mmc_can_secure_erase_trim(card))
		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| adrian hunter | adrian hunter | 107 | 96.40% | 2 | 66.67% | 
| jens axboe | jens axboe | 4 | 3.60% | 1 | 33.33% | 
 | Total | 111 | 100.00% | 3 | 100.00% | 
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
	mq->card = card;
	mq->queue = blk_init_queue(mmc_request_fn, lock);
	if (!mq->queue)
		return -ENOMEM;
	mq->mqrq_cur = mqrq_cur;
	mq->mqrq_prev = mqrq_prev;
	mq->queue->queuedata = mq;
	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_segs == 1) {
		unsigned int bouncesz;
		bouncesz = MMC_QUEUE_BOUNCESZ;
		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;
		if (bouncesz > 512) {
			mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mqrq_cur->bounce_buf) {
				pr_warn("%s: unable to allocate bounce cur buffer\n",
					mmc_card_name(card));
			} else {
				mqrq_prev->bounce_buf =
						kmalloc(bouncesz, GFP_KERNEL);
				if (!mqrq_prev->bounce_buf) {
					pr_warn("%s: unable to allocate bounce prev buffer\n",
						mmc_card_name(card));
					kfree(mqrq_cur->bounce_buf);
					mqrq_cur->bounce_buf = NULL;
				}
			}
		}
		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);
			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;
			mqrq_cur->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;
			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;
			mqrq_prev->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;
		}
	}
#endif
	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_hw_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_segments(mq->queue, host->max_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);
		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;
		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;
	}
	sema_init(&mq->thread_sem, 1);
	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}
	return 0;
 free_bounce_sg:
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;
 cleanup_queue:
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;
	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;
	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| pierre ossman | pierre ossman | 236 | 32.11% | 6 | 20.00% | 
| per forlin | per forlin | 216 | 29.39% | 2 | 6.67% | 
| russell king | russell king | 194 | 26.39% | 6 | 20.00% | 
| adrian hunter | adrian hunter | 28 | 3.81% | 4 | 13.33% | 
| christoph hellwig | christoph hellwig | 16 | 2.18% | 1 | 3.33% | 
| greg kroah-hartman | greg kroah-hartman | 9 | 1.22% | 1 | 3.33% | 
| mike snitzer | mike snitzer | 9 | 1.22% | 1 | 3.33% | 
| martin k. petersen | martin k. petersen | 7 | 0.95% | 3 | 10.00% | 
| santosh shilimkar | santosh shilimkar | 5 | 0.68% | 1 | 3.33% | 
| joe perches | joe perches | 4 | 0.54% | 1 | 3.33% | 
| ethan du | ethan du | 4 | 0.54% | 1 | 3.33% | 
| bhuvanesh surachari | bhuvanesh surachari | 3 | 0.41% | 1 | 3.33% | 
| thomas gleixner | thomas gleixner | 3 | 0.41% | 1 | 3.33% | 
| venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 1 | 0.14% | 1 | 3.33% | 
 | Total | 735 | 100.00% | 30 | 100.00% | 
void mmc_cleanup_queue(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;
	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);
	/* Then terminate our worker thread */
	kthread_stop(mq->thread);
	/* Empty the queue */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;
	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;
	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;
	mq->card = NULL;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| per forlin | per forlin | 63 | 37.28% | 2 | 22.22% | 
| pierre ossman | pierre ossman | 51 | 30.18% | 2 | 22.22% | 
| russell king | russell king | 33 | 19.53% | 3 | 33.33% | 
| adrian hunter | adrian hunter | 20 | 11.83% | 1 | 11.11% | 
| jens axboe | jens axboe | 2 | 1.18% | 1 | 11.11% | 
 | Total | 169 | 100.00% | 9 | 100.00% | 
EXPORT_SYMBOL(mmc_cleanup_queue);
int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
{
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
	int ret = 0;
	mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
	if (!mqrq_cur->packed) {
		pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
			mmc_card_name(card));
		ret = -ENOMEM;
		goto out;
	}
	mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
	if (!mqrq_prev->packed) {
		pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
			mmc_card_name(card));
		kfree(mqrq_cur->packed);
		mqrq_cur->packed = NULL;
		ret = -ENOMEM;
		goto out;
	}
	INIT_LIST_HEAD(&mqrq_cur->packed->list);
	INIT_LIST_HEAD(&mqrq_prev->packed->list);
out:
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 168 | 100.00% | 1 | 100.00% | 
 | Total | 168 | 100.00% | 1 | 100.00% | 
void mmc_packed_clean(struct mmc_queue *mq)
{
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
	kfree(mqrq_cur->packed);
	mqrq_cur->packed = NULL;
	kfree(mqrq_prev->packed);
	mqrq_prev->packed = NULL;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 62 | 100.00% | 1 | 100.00% | 
 | Total | 62 | 100.00% | 1 | 100.00% | 
/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;
	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;
		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
		down(&mq->thread_sem);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 71 | 97.26% | 1 | 50.00% | 
| jens axboe | jens axboe | 2 | 2.74% | 1 | 50.00% | 
 | Total | 73 | 100.00% | 2 | 100.00% | 
/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;
	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;
		up(&mq->thread_sem);
		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 69 | 97.18% | 1 | 50.00% | 
| jens axboe | jens axboe | 2 | 2.82% | 1 | 50.00% | 
 | Total | 71 | 100.00% | 2 | 100.00% | 
static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
					    struct mmc_packed *packed,
					    struct scatterlist *sg,
					    enum mmc_packed_type cmd_type)
{
	struct scatterlist *__sg = sg;
	unsigned int sg_len = 0;
	struct request *req;
	if (mmc_packed_wr(cmd_type)) {
		unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
		unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
		unsigned int len, remain, offset = 0;
		u8 *buf = (u8 *)packed->cmd_hdr;
		remain = hdr_sz;
		do {
			len = min(remain, max_seg_sz);
			sg_set_buf(__sg, buf + offset, len);
			offset += len;
			remain -= len;
			sg_unmark_end(__sg++);
			sg_len++;
		} while (remain);
	}
	list_for_each_entry(req, &packed->list, queuelist) {
		sg_len += blk_rq_map_sg(mq->queue, req, __sg);
		__sg = sg + (sg_len - 1);
		sg_unmark_end(__sg++);
	}
	sg_mark_end(sg + (sg_len - 1));
	return sg_len;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 197 | 97.04% | 1 | 50.00% | 
| dan williams | dan williams | 6 | 2.96% | 1 | 50.00% | 
 | Total | 203 | 100.00% | 2 | 100.00% | 
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
	unsigned int sg_len;
	size_t buflen;
	struct scatterlist *sg;
	enum mmc_packed_type cmd_type;
	int i;
	cmd_type = mqrq->cmd_type;
	if (!mqrq->bounce_buf) {
		if (mmc_packed_cmd(cmd_type))
			return mmc_queue_packed_map_sg(mq, mqrq->packed,
						       mqrq->sg, cmd_type);
		else
			return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
	}
	BUG_ON(!mqrq->bounce_sg);
	if (mmc_packed_cmd(cmd_type))
		sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
						 mqrq->bounce_sg, cmd_type);
	else
		sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
	mqrq->bounce_sg_len = sg_len;
	buflen = 0;
	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
		buflen += sg->length;
	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
	return 1;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| pierre ossman | pierre ossman | 102 | 56.04% | 2 | 40.00% | 
| seungwon jeon | seungwon jeon | 61 | 33.52% | 1 | 20.00% | 
| per forlin | per forlin | 15 | 8.24% | 1 | 20.00% | 
| jens axboe | jens axboe | 4 | 2.20% | 1 | 20.00% | 
 | Total | 182 | 100.00% | 5 | 100.00% | 
/*
 * If writing, bounce the data to the buffer before the request
 * is sent to the host driver
 */
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
{
	if (!mqrq->bounce_buf)
		return;
	if (rq_data_dir(mqrq->req) != WRITE)
		return;
	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| pierre ossman | pierre ossman | 46 | 85.19% | 2 | 66.67% | 
| per forlin | per forlin | 8 | 14.81% | 1 | 33.33% | 
 | Total | 54 | 100.00% | 3 | 100.00% | 
/*
 * If reading, bounce the data from the buffer after the request
 * has been handled by the host driver
 */
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
{
	if (!mqrq->bounce_buf)
		return;
	if (rq_data_dir(mqrq->req) != READ)
		return;
	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| pierre ossman | pierre ossman | 46 | 85.19% | 2 | 66.67% | 
| per forlin | per forlin | 8 | 14.81% | 1 | 33.33% | 
 | Total | 54 | 100.00% | 3 | 100.00% | 
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 555 | 22.29% | 9 | 14.75% | 
| seungwon jeon | seungwon jeon | 555 | 22.29% | 3 | 4.92% | 
| pierre ossman | pierre ossman | 525 | 21.08% | 10 | 16.39% | 
| per forlin | per forlin | 426 | 17.11% | 3 | 4.92% | 
| adrian hunter | adrian hunter | 174 | 6.99% | 6 | 9.84% | 
| konstantin dorfman | konstantin dorfman | 100 | 4.02% | 1 | 1.64% | 
| christoph hellwig | christoph hellwig | 25 | 1.00% | 2 | 3.28% | 
| sujit reddy thumma | sujit reddy thumma | 23 | 0.92% | 1 | 1.64% | 
| jens axboe | jens axboe | 20 | 0.80% | 4 | 6.56% | 
| mike snitzer | mike snitzer | 9 | 0.36% | 1 | 1.64% | 
| greg kroah-hartman | greg kroah-hartman | 9 | 0.36% | 1 | 1.64% | 
| santosh shilimkar | santosh shilimkar | 8 | 0.32% | 1 | 1.64% | 
| martin k. petersen | martin k. petersen | 7 | 0.28% | 3 | 4.92% | 
| chuanxiao dong | chuanxiao dong | 7 | 0.28% | 1 | 1.64% | 
| juha yrjola | juha yrjola | 6 | 0.24% | 1 | 1.64% | 
| tejun heo | tejun heo | 6 | 0.24% | 3 | 4.92% | 
| dan williams | dan williams | 6 | 0.24% | 1 | 1.64% | 
| fabian frederick | fabian frederick | 4 | 0.16% | 1 | 1.64% | 
| ethan du | ethan du | 4 | 0.16% | 1 | 1.64% | 
| joe perches | joe perches | 4 | 0.16% | 1 | 1.64% | 
| rabin vincent | rabin vincent | 3 | 0.12% | 1 | 1.64% | 
| venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 3 | 0.12% | 2 | 3.28% | 
| thomas gleixner | thomas gleixner | 3 | 0.12% | 1 | 1.64% | 
| bhuvanesh surachari | bhuvanesh surachari | 3 | 0.12% | 1 | 1.64% | 
| rafael j. wysocki | rafael j. wysocki | 3 | 0.12% | 1 | 1.64% | 
| kiyoshi ueda | kiyoshi ueda | 2 | 0.08% | 1 | 1.64% | 
 | Total | 2490 | 100.00% | 61 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.