Release 4.7 drivers/mmc/card/block.c
  
  
/*
 * Block driver for media (i.e., flash cards)
 *
 * Copyright 2002 Hewlett-Packard Company
 * Copyright 2005-2008 Pierre Ossman
 *
 * Use consistent with the GNU GPL is permitted,
 * provided that this copyright notice is
 * preserved in its entirety in all copies and derived works.
 *
 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
 * FITNESS FOR ANY PARTICULAR PURPOSE.
 *
 * Many thanks to Alessandro Rubini and Jonathan Corbet!
 *
 * Author:  Andrew Christian
 *          28 May 2002
 */
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
#include <linux/delay.h>
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/pm_runtime.h>
#include <linux/idr.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <asm/uaccess.h>
#include "queue.h"
MODULE_ALIAS("mmc:block");
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
#define INAND_CMD38_ARG_EXT_CSD  113
#define INAND_CMD38_ARG_ERASE    0x00
#define INAND_CMD38_ARG_TRIM     0x01
#define INAND_CMD38_ARG_SECERASE 0x80
#define INAND_CMD38_ARG_SECTRIM1 0x81
#define INAND_CMD38_ARG_SECTRIM2 0x88
#define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        
/* 10 minute timeout */
#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
#define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
                                  (rq_data_dir(req) == WRITE))
#define PACKED_CMD_VER	0x01
#define PACKED_CMD_WR	0x02
static DEFINE_MUTEX(block_mutex);
/*
 * The defaults come from config options but can be overriden by module
 * or bootarg options.
 */
static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
/*
 * We've only got one major, so number of mmcblk devices is
 * limited to (1 << 20) / number of minors per device.  It is also
 * limited by the MAX_DEVICES below.
 */
static int max_devices;
#define MAX_DEVICES 256
static DEFINE_IDA(mmc_blk_ida);
static DEFINE_SPINLOCK(mmc_blk_lock);
/*
 * There is one mmc_blk_data per slot.
 */
struct mmc_blk_data {
	
spinlock_t	lock;
	
struct gendisk	*disk;
	
struct mmc_queue queue;
	
struct list_head part;
	
unsigned int	flags;
#define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
#define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
#define MMC_BLK_PACKED_CMD	(1 << 2)	/* MMC packed command support */
	
unsigned int	usage;
	
unsigned int	read_only;
	
unsigned int	part_type;
	
unsigned int	reset_done;
#define MMC_BLK_READ		BIT(0)
#define MMC_BLK_WRITE		BIT(1)
#define MMC_BLK_DISCARD		BIT(2)
#define MMC_BLK_SECDISCARD	BIT(3)
	/*
         * Only set in main mmc_blk_data associated
         * with mmc_card with dev_set_drvdata, and keeps
         * track of the current selected device partition.
         */
	
unsigned int	part_curr;
	
struct device_attribute force_ro;
	
struct device_attribute power_ro_lock;
	
int	area_type;
};
static DEFINE_MUTEX(open_lock);
enum {
	
MMC_PACKED_NR_IDX = -1,
	
MMC_PACKED_NR_ZERO,
	
MMC_PACKED_NR_SINGLE,
};
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
static inline int mmc_blk_part_switch(struct mmc_card *card,
				      struct mmc_blk_data *md);
static int get_card_status(struct mmc_card *card, u32 *status, int retries);
static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
{
	struct mmc_packed *packed = mqrq->packed;
	BUG_ON(!packed);
	mqrq->cmd_type = MMC_PACKED_NONE;
	packed->nr_entries = MMC_PACKED_NR_ZERO;
	packed->idx_failure = MMC_PACKED_NR_IDX;
	packed->retries = 0;
	packed->blocks = 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 57 | 100.00% | 1 | 100.00% | 
 | Total | 57 | 100.00% | 1 | 100.00% | 
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
	struct mmc_blk_data *md;
	mutex_lock(&open_lock);
	md = disk->private_data;
	if (md && md->usage == 0)
		md = NULL;
	if (md)
		md->usage++;
	mutex_unlock(&open_lock);
	return md;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 60 | 96.77% | 1 | 50.00% | 
| arjan van de ven | arjan van de ven | 2 | 3.23% | 1 | 50.00% | 
 | Total | 62 | 100.00% | 2 | 100.00% | 
static inline int mmc_get_devidx(struct gendisk *disk)
{
	int devidx = disk->first_minor / perdev_minors;
	return devidx;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 9 | 37.50% | 1 | 16.67% | 
| russell king | russell king | 6 | 25.00% | 1 | 16.67% | 
| david woodhouse | david woodhouse | 3 | 12.50% | 1 | 16.67% | 
| anna lemehova | anna lemehova | 3 | 12.50% | 1 | 16.67% | 
| olof johansson | olof johansson | 2 | 8.33% | 1 | 16.67% | 
| colin cross | colin cross | 1 | 4.17% | 1 | 16.67% | 
 | Total | 24 | 100.00% | 6 | 100.00% | 
static void mmc_blk_put(struct mmc_blk_data *md)
{
	mutex_lock(&open_lock);
	md->usage--;
	if (md->usage == 0) {
		int devidx = mmc_get_devidx(md->disk);
		blk_cleanup_queue(md->queue.queue);
		spin_lock(&mmc_blk_lock);
		ida_remove(&mmc_blk_ida, devidx);
		spin_unlock(&mmc_blk_lock);
		put_disk(md->disk);
		kfree(md);
	}
	mutex_unlock(&open_lock);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 40 | 44.94% | 1 | 16.67% | 
| russell king | russell king | 19 | 21.35% | 1 | 16.67% | 
| ulf hansson | ulf hansson | 16 | 17.98% | 1 | 16.67% | 
| adrian hunter | adrian hunter | 9 | 10.11% | 1 | 16.67% | 
| david woodhouse | david woodhouse | 4 | 4.49% | 1 | 16.67% | 
| arjan van de ven | arjan van de ven | 1 | 1.12% | 1 | 16.67% | 
 | Total | 89 | 100.00% | 6 | 100.00% | 
static ssize_t power_ro_lock_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int ret;
	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
	struct mmc_card *card = md->queue.card;
	int locked = 0;
	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
		locked = 2;
	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
		locked = 1;
	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
	mmc_blk_put(md);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| johan rudholm | johan rudholm | 97 | 95.10% | 1 | 50.00% | 
| tomas winkler | tomas winkler | 5 | 4.90% | 1 | 50.00% | 
 | Total | 102 | 100.00% | 2 | 100.00% | 
static ssize_t power_ro_lock_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	int ret;
	struct mmc_blk_data *md, *part_md;
	struct mmc_card *card;
	unsigned long set;
	if (kstrtoul(buf, 0, &set))
		return -EINVAL;
	if (set != 1)
		return count;
	md = mmc_blk_get(dev_to_disk(dev));
	card = md->queue.card;
	mmc_get_card(card);
	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
				card->ext_csd.boot_ro_lock |
				EXT_CSD_BOOT_WP_B_PWR_WP_EN,
				card->ext_csd.part_time);
	if (ret)
		pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
	else
		card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
	mmc_put_card(card);
	if (!ret) {
		pr_info("%s: Locking boot partition ro until next power on\n",
			md->disk->disk_name);
		set_disk_ro(md->disk, 1);
		list_for_each_entry(part_md, &md->part, part)
			if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
				pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
				set_disk_ro(part_md->disk, 1);
			}
	}
	mmc_blk_put(md);
	return count;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| johan rudholm | johan rudholm | 219 | 99.10% | 1 | 50.00% | 
| ulf hansson | ulf hansson | 2 | 0.90% | 1 | 50.00% | 
 | Total | 221 | 100.00% | 2 | 100.00% | 
static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{
	int ret;
	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
	ret = snprintf(buf, PAGE_SIZE, "%d\n",
		       get_disk_ro(dev_to_disk(dev)) ^
		       md->read_only);
	mmc_blk_put(md);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 66 | 98.51% | 1 | 50.00% | 
| baruch siach | baruch siach | 1 | 1.49% | 1 | 50.00% | 
 | Total | 67 | 100.00% | 2 | 100.00% | 
static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
			      const char *buf, size_t count)
{
	int ret;
	char *end;
	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
	unsigned long set = simple_strtoul(buf, &end, 0);
	if (end == buf) {
		ret = -EINVAL;
		goto out;
	}
	set_disk_ro(dev_to_disk(dev), set || md->read_only);
	ret = count;
out:
	mmc_blk_put(md);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 102 | 100.00% | 1 | 100.00% | 
 | Total | 102 | 100.00% | 1 | 100.00% | 
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
	int ret = -ENXIO;
	mutex_lock(&block_mutex);
	if (md) {
		if (md->usage == 2)
			check_disk_change(bdev);
		ret = 0;
		if ((mode & FMODE_WRITE) && md->read_only) {
			mmc_blk_put(md);
			ret = -EROFS;
		}
	}
	mutex_unlock(&block_mutex);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 47 | 50.00% | 2 | 28.57% | 
| pierre ossman | pierre ossman | 15 | 15.96% | 1 | 14.29% | 
| al viro | al viro | 13 | 13.83% | 1 | 14.29% | 
| arnd bergmann | arnd bergmann | 12 | 12.77% | 2 | 28.57% | 
| andrew morton | andrew morton | 7 | 7.45% | 1 | 14.29% | 
 | Total | 94 | 100.00% | 7 | 100.00% | 
static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
{
	struct mmc_blk_data *md = disk->private_data;
	mutex_lock(&block_mutex);
	mmc_blk_put(md);
	mutex_unlock(&block_mutex);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 22 | 55.00% | 1 | 20.00% | 
| arnd bergmann | arnd bergmann | 12 | 30.00% | 2 | 40.00% | 
| al viro | al viro | 6 | 15.00% | 2 | 40.00% | 
 | Total | 40 | 100.00% | 5 | 100.00% | 
static int
mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
	geo->heads = 4;
	geo->sectors = 16;
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 40 | 83.33% | 1 | 50.00% | 
| christoph hellwig | christoph hellwig | 8 | 16.67% | 1 | 50.00% | 
 | Total | 48 | 100.00% | 2 | 100.00% | 
struct mmc_blk_ioc_data {
	
struct mmc_ioc_cmd ic;
	
unsigned char *buf;
	
u64 buf_bytes;
};
static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
	struct mmc_ioc_cmd __user *user)
{
	struct mmc_blk_ioc_data *idata;
	int err;
	idata = kmalloc(sizeof(*idata), GFP_KERNEL);
	if (!idata) {
		err = -ENOMEM;
		goto out;
	}
	if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
		err = -EFAULT;
		goto idata_err;
	}
	idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
	if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
		err = -EOVERFLOW;
		goto idata_err;
	}
	if (!idata->buf_bytes) {
		idata->buf = NULL;
		return idata;
	}
	idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
	if (!idata->buf) {
		err = -ENOMEM;
		goto idata_err;
	}
	if (copy_from_user(idata->buf, (void __user *)(unsigned long)
					idata->ic.data_ptr, idata->buf_bytes)) {
		err = -EFAULT;
		goto copy_err;
	}
	return idata;
copy_err:
	kfree(idata->buf);
idata_err:
	kfree(idata);
out:
	return ERR_PTR(err);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| john calixto | john calixto | 192 | 83.84% | 1 | 16.67% | 
| johan rudholm | johan rudholm | 10 | 4.37% | 1 | 16.67% | 
| russell king | russell king | 9 | 3.93% | 1 | 16.67% | 
| vladimir motyka | vladimir motyka | 8 | 3.49% | 1 | 16.67% | 
| ville viinikka | ville viinikka | 8 | 3.49% | 1 | 16.67% | 
| yalin wang | yalin wang | 2 | 0.87% | 1 | 16.67% | 
 | Total | 229 | 100.00% | 6 | 100.00% | 
static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
				      struct mmc_blk_ioc_data *idata)
{
	struct mmc_ioc_cmd *ic = &idata->ic;
	if (copy_to_user(&(ic_ptr->response), ic->response,
			 sizeof(ic->response)))
		return -EFAULT;
	if (!idata->ic.write_flag) {
		if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
				 idata->buf, idata->buf_bytes))
			return -EFAULT;
	}
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| jon hunter | jon hunter | 98 | 100.00% | 1 | 100.00% | 
 | Total | 98 | 100.00% | 1 | 100.00% | 
static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
				       u32 retries_max)
{
	int err;
	u32 retry_count = 0;
	if (!status || !retries_max)
		return -EINVAL;
	do {
		err = get_card_status(card, status, 5);
		if (err)
			break;
		if (!R1_STATUS(*status) &&
				(R1_CURRENT_STATE(*status) != R1_STATE_PRG))
			break; /* RPMB programming operation complete */
		/*
                 * Rechedule to give the MMC device a chance to continue
                 * processing the previous command without being polled too
                 * frequently.
                 */
		usleep_range(1000, 5000);
	} while (++retry_count < retries_max);
	if (retry_count == retries_max)
		err = -EPERM;
	return err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| loic pallardy | loic pallardy | 108 | 100.00% | 1 | 100.00% | 
 | Total | 108 | 100.00% | 1 | 100.00% | 
static int ioctl_do_sanitize(struct mmc_card *card)
{
	int err;
	if (!mmc_can_sanitize(card)) {
			pr_warn("%s: %s - SANITIZE is not supported\n",
				mmc_hostname(card->host), __func__);
			err = -EOPNOTSUPP;
			goto out;
	}
	pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
		mmc_hostname(card->host), __func__);
	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					EXT_CSD_SANITIZE_START, 1,
					MMC_SANITIZE_REQ_TIMEOUT);
	if (err)
		pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
		       mmc_hostname(card->host), __func__, err);
	pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
					     __func__);
out:
	return err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| maya erez | maya erez | 114 | 100.00% | 1 | 100.00% | 
 | Total | 114 | 100.00% | 1 | 100.00% | 
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
			       struct mmc_blk_ioc_data *idata)
{
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct mmc_request mrq = {NULL};
	struct scatterlist sg;
	int err;
	int is_rpmb = false;
	u32 status = 0;
	if (!card || !md || !idata)
		return -EINVAL;
	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
		is_rpmb = true;
	cmd.opcode = idata->ic.opcode;
	cmd.arg = idata->ic.arg;
	cmd.flags = idata->ic.flags;
	if (idata->buf_bytes) {
		data.sg = &sg;
		data.sg_len = 1;
		data.blksz = idata->ic.blksz;
		data.blocks = idata->ic.blocks;
		sg_init_one(data.sg, idata->buf, idata->buf_bytes);
		if (idata->ic.write_flag)
			data.flags = MMC_DATA_WRITE;
		else
			data.flags = MMC_DATA_READ;
		/* data.flags must already be set before doing this. */
		mmc_set_data_timeout(&data, card);
		/* Allow overriding the timeout_ns for empirical tuning. */
		if (idata->ic.data_timeout_ns)
			data.timeout_ns = idata->ic.data_timeout_ns;
		if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
			/*
                         * Pretend this is a data transfer and rely on the
                         * host driver to compute timeout.  When all host
                         * drivers support cmd.cmd_timeout for R1B, this
                         * can be changed to:
                         *
                         *     mrq.data = NULL;
                         *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
                         */
			data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
		}
		mrq.data = &data;
	}
	mrq.cmd = &cmd;
	err = mmc_blk_part_switch(card, md);
	if (err)
		return err;
	if (idata->ic.is_acmd) {
		err = mmc_app_cmd(card->host, card);
		if (err)
			return err;
	}
	if (is_rpmb) {
		err = mmc_set_blockcount(card, data.blocks,
			idata->ic.write_flag & (1 << 31));
		if (err)
			return err;
	}
	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
	    (cmd.opcode == MMC_SWITCH)) {
		err = ioctl_do_sanitize(card);
		if (err)
			pr_err("%s: ioctl_do_sanitize() failed. err = %d",
			       __func__, err);
		return err;
	}
	mmc_wait_for_req(card->host, &mrq);
	if (cmd.error) {
		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
						__func__, cmd.error);
		return cmd.error;
	}
	if (data.error) {
		dev_err(mmc_dev(card->host), "%s: data error %d\n",
						__func__, data.error);
		return data.error;
	}
	/*
         * According to the SD specs, some commands require a delay after
         * issuing the command.
         */
	if (idata->ic.postsleep_min_us)
		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
	memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
	if (is_rpmb) {
		/*
                 * Ensure RPMB command has completed by polling CMD13
                 * "Send Status".
                 */
		err = ioctl_rpmb_card_status_poll(card, &status, 5);
		if (err)
			dev_err(mmc_dev(card->host),
					"%s: Card Status=0x%08X, error %d\n",
					__func__, status, err);
	}
	return err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| john calixto | john calixto | 317 | 57.22% | 1 | 14.29% | 
| jon hunter | jon hunter | 72 | 13.00% | 1 | 14.29% | 
| loic pallardy | loic pallardy | 68 | 12.27% | 1 | 14.29% | 
| johan rudholm | johan rudholm | 53 | 9.57% | 1 | 14.29% | 
| maya erez | maya erez | 33 | 5.96% | 1 | 14.29% | 
| yaniv gardi | yaniv gardi | 10 | 1.81% | 1 | 14.29% | 
| venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 1 | 0.18% | 1 | 14.29% | 
 | Total | 554 | 100.00% | 7 | 100.00% | 
static int mmc_blk_ioctl_cmd(struct block_device *bdev,
			     struct mmc_ioc_cmd __user *ic_ptr)
{
	struct mmc_blk_ioc_data *idata;
	struct mmc_blk_data *md;
	struct mmc_card *card;
	int err = 0, ioc_err = 0;
	/*
         * The caller must have CAP_SYS_RAWIO, and must be calling this on the
         * whole block device, not on a partition.  This prevents overspray
         * between sibling partitions.
         */
	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
		return -EPERM;
	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
	if (IS_ERR(idata))
		return PTR_ERR(idata);
	md = mmc_blk_get(bdev->bd_disk);
	if (!md) {
		err = -EINVAL;
		goto cmd_err;
	}
	card = md->queue.card;
	if (IS_ERR(card)) {
		err = PTR_ERR(card);
		goto cmd_done;
	}
	mmc_get_card(card);
	ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
	/* Always switch back to main area after RPMB access */
	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
		mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
	mmc_put_card(card);
	err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
cmd_done:
	mmc_blk_put(md);
cmd_err:
	kfree(idata->buf);
	kfree(idata);
	return ioc_err ? ioc_err : err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| jon hunter | jon hunter | 128 | 59.53% | 1 | 16.67% | 
| shawn lin | shawn lin | 23 | 10.70% | 1 | 16.67% | 
| adrian hunter | adrian hunter | 22 | 10.23% | 1 | 16.67% | 
| john calixto | john calixto | 17 | 7.91% | 1 | 16.67% | 
| loic pallardy | loic pallardy | 14 | 6.51% | 1 | 16.67% | 
| grant grundler | grant grundler | 11 | 5.12% | 1 | 16.67% | 
 | Total | 215 | 100.00% | 6 | 100.00% | 
static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
				   struct mmc_ioc_multi_cmd __user *user)
{
	struct mmc_blk_ioc_data **idata = NULL;
	struct mmc_ioc_cmd __user *cmds = user->cmds;
	struct mmc_card *card;
	struct mmc_blk_data *md;
	int i, err = 0, ioc_err = 0;
	__u64 num_of_cmds;
	/*
         * The caller must have CAP_SYS_RAWIO, and must be calling this on the
         * whole block device, not on a partition.  This prevents overspray
         * between sibling partitions.
         */
	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
		return -EPERM;
	if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
			   sizeof(num_of_cmds)))
		return -EFAULT;
	if (num_of_cmds > MMC_IOC_MAX_CMDS)
		return -EINVAL;
	idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
	if (!idata)
		return -ENOMEM;
	for (i = 0; i < num_of_cmds; i++) {
		idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
		if (IS_ERR(idata[i])) {
			err = PTR_ERR(idata[i]);
			num_of_cmds = i;
			goto cmd_err;
		}
	}
	md = mmc_blk_get(bdev->bd_disk);
	if (!md) {
		err = -EINVAL;
		goto cmd_err;
	}
	card = md->queue.card;
	if (IS_ERR(card)) {
		err = PTR_ERR(card);
		goto cmd_done;
	}
	mmc_get_card(card);
	for (i = 0; i < num_of_cmds && !ioc_err; i++)
		ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
	/* Always switch back to main area after RPMB access */
	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
		mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
	mmc_put_card(card);
	/* copy to user if data and response */
	for (i = 0; i < num_of_cmds && !err; i++)
		err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
cmd_done:
	mmc_blk_put(md);
cmd_err:
	for (i = 0; i < num_of_cmds; i++) {
		kfree(idata[i]->buf);
		kfree(idata[i]);
	}
	kfree(idata);
	return ioc_err ? ioc_err : err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| jon hunter | jon hunter | 291 | 73.48% | 1 | 11.11% | 
| john calixto | john calixto | 27 | 6.82% | 1 | 11.11% | 
| shawn lin | shawn lin | 23 | 5.81% | 1 | 11.11% | 
| adrian hunter | adrian hunter | 22 | 5.56% | 1 | 11.11% | 
| grant grundler | grant grundler | 16 | 4.04% | 1 | 11.11% | 
| loic pallardy | loic pallardy | 7 | 1.77% | 1 | 11.11% | 
| olof johansson | olof johansson | 7 | 1.77% | 1 | 11.11% | 
| philippe de swert | philippe de swert | 2 | 0.51% | 1 | 11.11% | 
| ulf hansson | ulf hansson | 1 | 0.25% | 1 | 11.11% | 
 | Total | 396 | 100.00% | 9 | 100.00% | 
static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
	unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case MMC_IOC_CMD:
		return mmc_blk_ioctl_cmd(bdev,
				(struct mmc_ioc_cmd __user *)arg);
	case MMC_IOC_MULTI_CMD:
		return mmc_blk_ioctl_multi_cmd(bdev,
				(struct mmc_ioc_multi_cmd __user *)arg);
	default:
		return -EINVAL;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| john calixto | john calixto | 38 | 56.72% | 1 | 50.00% | 
| jon hunter | jon hunter | 29 | 43.28% | 1 | 50.00% | 
 | Total | 67 | 100.00% | 2 | 100.00% | 
#ifdef CONFIG_COMPAT
static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
	unsigned int cmd, unsigned long arg)
{
	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| john calixto | john calixto | 41 | 100.00% | 1 | 100.00% | 
 | Total | 41 | 100.00% | 1 | 100.00% | 
#endif
static const struct block_device_operations mmc_bdops = {
	.open			= mmc_blk_open,
	.release		= mmc_blk_release,
	.getgeo			= mmc_blk_getgeo,
	.owner			= THIS_MODULE,
	.ioctl			= mmc_blk_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl		= mmc_blk_compat_ioctl,
#endif
};
static inline int mmc_blk_part_switch(struct mmc_card *card,
				      struct mmc_blk_data *md)
{
	int ret;
	struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
	if (main_md->part_curr == md->part_type)
		return 0;
	if (mmc_card_mmc(card)) {
		u8 part_config = card->ext_csd.part_config;
		if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
			mmc_retune_pause(card->host);
		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
		part_config |= md->part_type;
		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 EXT_CSD_PART_CONFIG, part_config,
				 card->ext_csd.part_time);
		if (ret) {
			if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
				mmc_retune_unpause(card->host);
			return ret;
		}
		card->ext_csd.part_config = part_config;
		if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
			mmc_retune_unpause(card->host);
	}
	main_md->part_curr = md->part_type;
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 91 | 54.49% | 1 | 16.67% | 
| adrian hunter | adrian hunter | 60 | 35.93% | 2 | 33.33% | 
| pierre ossman | pierre ossman | 10 | 5.99% | 1 | 16.67% | 
| ulf hansson | ulf hansson | 4 | 2.40% | 1 | 16.67% | 
| ben dooks | ben dooks | 2 | 1.20% | 1 | 16.67% | 
 | Total | 167 | 100.00% | 6 | 100.00% | 
static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
{
	int err;
	u32 result;
	__be32 *blocks;
	struct mmc_request mrq = {NULL};
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct scatterlist sg;
	cmd.opcode = MMC_APP_CMD;
	cmd.arg = card->rca << 16;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
	err = mmc_wait_for_cmd(card->host, &cmd, 0);
	if (err)
		return (u32)-1;
	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
		return (u32)-1;
	memset(&cmd, 0, sizeof(struct mmc_command));
	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
	cmd.arg = 0;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
	data.blksz = 4;
	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	data.sg = &sg;
	data.sg_len = 1;
	mmc_set_data_timeout(&data, card);
	mrq.cmd = &cmd;
	mrq.data = &data;
	blocks = kmalloc(4, GFP_KERNEL);
	if (!blocks)
		return (u32)-1;
	sg_init_one(&sg, blocks, 4);
	mmc_wait_for_req(card->host, &mrq);
	result = ntohl(*blocks);
	kfree(blocks);
	if (cmd.error || data.error)
		result = (u32)-1;
	return result;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| pierre ossman | pierre ossman | 191 | 65.19% | 1 | 10.00% | 
| ben dooks | ben dooks | 38 | 12.97% | 1 | 10.00% | 
| david brownell | david brownell | 22 | 7.51% | 1 | 10.00% | 
| andrei warkentin | andrei warkentin | 21 | 7.17% | 1 | 10.00% | 
| chris ball | chris ball | 11 | 3.75% | 3 | 30.00% | 
| subhash jadavani | subhash jadavani | 8 | 2.73% | 1 | 10.00% | 
| harvey harrison | harvey harrison | 1 | 0.34% | 1 | 10.00% | 
| venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 1 | 0.34% | 1 | 10.00% | 
 | Total | 293 | 100.00% | 10 | 100.00% | 
static int get_card_status(struct mmc_card *card, u32 *status, int retries)
{
	struct mmc_command cmd = {0};
	int err;
	cmd.opcode = MMC_SEND_STATUS;
	if (!mmc_host_is_spi(card->host))
		cmd.arg = card->rca << 16;
	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
	err = mmc_wait_for_cmd(card->host, &cmd, retries);
	if (err == 0)
		*status = cmd.resp[0];
	return err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| adrian hunter | adrian hunter | 65 | 66.33% | 1 | 25.00% | 
| russell king | russell king | 29 | 29.59% | 2 | 50.00% | 
| chris ball | chris ball | 4 | 4.08% | 1 | 25.00% | 
 | Total | 98 | 100.00% | 4 | 100.00% | 
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
		bool hw_busy_detect, struct request *req, int *gen_err)
{
	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
	int err = 0;
	u32 status;
	do {
		err = get_card_status(card, &status, 5);
		if (err) {
			pr_err("%s: error %d requesting status\n",
			       req->rq_disk->disk_name, err);
			return err;
		}
		if (status & R1_ERROR) {
			pr_err("%s: %s: error sending status cmd, status %#x\n",
				req->rq_disk->disk_name, __func__, status);
			*gen_err = 1;
		}
		/* We may rely on the host hw to handle busy detection.*/
		if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
			hw_busy_detect)
			break;
		/*
                 * Timeout if the device never becomes ready for data and never
                 * leaves the program state.
                 */
		if (time_after(jiffies, timeout)) {
			pr_err("%s: Card stuck in programming state! %s %s\n",
				mmc_hostname(card->host),
				req->rq_disk->disk_name, __func__);
			return -ETIMEDOUT;
		}
		/*
                 * Some cards mishandle the status bits,
                 * so make sure to check both the busy
                 * indication and the card state.
                 */
	} while (!(status & R1_READY_FOR_DATA) ||
		 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
	return err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ulf hansson | ulf hansson | 186 | 100.00% | 2 | 100.00% | 
 | Total | 186 | 100.00% | 2 | 100.00% | 
static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
		struct request *req, int *gen_err, u32 *stop_status)
{
	struct mmc_host *host = card->host;
	struct mmc_command cmd = {0};
	int err;
	bool use_r1b_resp = rq_data_dir(req) == WRITE;
	/*
         * Normally we use R1B responses for WRITE, but in cases where the host
         * has specified a max_busy_timeout we need to validate it. A failure
         * means we need to prevent the host from doing hw busy detection, which
         * is done by converting to a R1 response instead.
         */
	if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
		use_r1b_resp = false;
	cmd.opcode = MMC_STOP_TRANSMISSION;
	if (use_r1b_resp) {
		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
		cmd.busy_timeout = timeout_ms;
	} else {
		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
	}
	err = mmc_wait_for_cmd(host, &cmd, 5);
	if (err)
		return err;
	*stop_status = cmd.resp[0];
	/* No need to check card status in case of READ. */
	if (rq_data_dir(req) == READ)
		return 0;
	if (!mmc_host_is_spi(host) &&
		(*stop_status & R1_ERROR)) {
		pr_err("%s: %s: general error sending stop command, resp %#x\n",
			req->rq_disk->disk_name, __func__, *stop_status);
		*gen_err = 1;
	}
	return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ulf hansson | ulf hansson | 212 | 100.00% | 1 | 100.00% | 
 | Total | 212 | 100.00% | 1 | 100.00% | 
#define ERR_NOMEDIUM	3
#define ERR_RETRY	2
#define ERR_ABORT	1
#define ERR_CONTINUE	0
static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
	bool status_valid, u32 status)
{
	switch (error) {
	case -EILSEQ:
		/* response crc error, retry the r/w cmd */
		pr_err("%s: %s sending %s command, card status %#x\n",
			req->rq_disk->disk_name, "response CRC error",
			name, status);
		return ERR_RETRY;
	case -ETIMEDOUT:
		pr_err("%s: %s sending %s command, card status %#x\n",
			req->rq_disk->disk_name, "timed out", name, status);
		/* If the status cmd initially failed, retry the r/w cmd */
		if (!status_valid) {
			pr_err("%s: status not valid, retrying timeout\n",
				req->rq_disk->disk_name);
			return ERR_RETRY;
		}
		/*
                 * If it was a r/w cmd crc error, or illegal command
                 * (eg, issued in wrong state) then retry - we should
                 * have corrected the state problem above.
                 */
		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
			pr_err("%s: command error, retrying timeout\n",
				req->rq_disk->disk_name);
			return ERR_RETRY;
		}
		/* Otherwise abort the command */
		return ERR_ABORT;
	default:
		/* We don't understand the error code the driver gave us */
		pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
		       req->rq_disk->disk_name, error, status);
		return ERR_ABORT;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 124 | 82.67% | 1 | 50.00% | 
| ken sumrall | ken sumrall | 26 | 17.33% | 1 | 50.00% | 
 | Total | 150 | 100.00% | 2 | 100.00% | 
/*
 * Initial r/w and stop cmd error recovery.
 * We don't know whether the card received the r/w cmd or not, so try to
 * restore things back to a sane state.  Essentially, we do this as follows:
 * - Obtain card status.  If the first attempt to obtain card status fails,
 *   the status word will reflect the failed status cmd, not the failed
 *   r/w cmd.  If we fail to obtain card status, it suggests we can no
 *   longer communicate with the card.
 * - Check the card state.  If the card received the cmd but there was a
 *   transient problem with the response, it might still be in a data transfer
 *   mode.  Try to send it a stop command.  If this fails, we can't recover.
 * - If the r/w cmd failed due to a response CRC error, it was probably
 *   transient, so retry the cmd.
 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
 *   illegal cmd, retry.
 * Otherwise we don't understand what happened, so abort.
 */
static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
	struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
{
	bool prev_cmd_status_valid = true;
	u32 status, stop_status = 0;
	int err, retry;
	if (mmc_card_removed(card))
		return ERR_NOMEDIUM;
	/*
         * Try to get card status which indicates both the card state
         * and why there was no response.  If the first attempt fails,
         * we can't be sure the returned status is for the r/w command.
         */
	for (retry = 2; retry >= 0; retry--) {
		err = get_card_status(card, &status, 0);
		if (!err)
			break;
		/* Re-tune if needed */
		mmc_retune_recheck(card->host);
		prev_cmd_status_valid = false;
		pr_err("%s: error %d sending status command, %sing\n",
		       req->rq_disk->disk_name, err, retry ? "retry" : "abort");
	}
	/* We couldn't get a response from the card.  Give up. */
	if (err) {
		/* Check if the card is removed */
		if (mmc_detect_card_removed(card->host))
			return ERR_NOMEDIUM;
		return ERR_ABORT;
	}
	/* Flag ECC errors */
	if ((status & R1_CARD_ECC_FAILED) ||
	    (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
	    (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
		*ecc_err = 1;
	/* Flag General errors */
	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
		if ((status & R1_ERROR) ||
			(brq->stop.resp[0] & R1_ERROR)) {
			pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
			       req->rq_disk->disk_name, __func__,
			       brq->stop.resp[0], status);
			*gen_err = 1;
		}
	/*
         * Check the current card state.  If it is in some data transfer
         * mode, tell it to stop (and hopefully transition back to TRAN.)
         */
	if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
	    R1_CURRENT_STATE(status) == R1_STATE_RCV) {
		err = send_stop(card,
			DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
			req, gen_err, &stop_status);
		if (err) {
			pr_err("%s: error %d sending stop command\n",
			       req->rq_disk->disk_name, err);
			/*
                         * If the stop cmd also timed out, the card is probably
                         * not present, so abort. Other errors are bad news too.
                         */
			return ERR_ABORT;
		}
		if (stop_status & R1_CARD_ECC_FAILED)
			*ecc_err = 1;
	}
	/* Check for set block count errors */
	if (brq->sbc.error)
		return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
				prev_cmd_status_valid, status);
	/* Check for r/w command errors */
	if (brq->cmd.error)
		return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
				prev_cmd_status_valid, status);
	/* Data errors */
	if (!brq->stop.error)
		return ERR_CONTINUE;
	/* Now for stop errors.  These aren't fatal to the transfer. */
	pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
	       req->rq_disk->disk_name, brq->stop.error,
	       brq->cmd.resp[0], status);
	/*
         * Subsitute in our own stop status as this will give the error
         * state which happened during the execution of the r/w command.
         */
	if (stop_status) {
		brq->stop.resp[0] = stop_status;
		brq->stop.error = 0;
	}
	return ERR_CONTINUE;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 251 | 54.92% | 2 | 22.22% | 
| adrian hunter | adrian hunter | 88 | 19.26% | 3 | 33.33% | 
| kobayashi yoshitake | kobayashi yoshitake | 74 | 16.19% | 1 | 11.11% | 
| sujit reddy thumma | sujit reddy thumma | 25 | 5.47% | 1 | 11.11% | 
| ulf hansson | ulf hansson | 18 | 3.94% | 1 | 11.11% | 
| johan rudholm | johan rudholm | 1 | 0.22% | 1 | 11.11% | 
 | Total | 457 | 100.00% | 9 | 100.00% | 
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
			 int type)
{
	int err;
	if (md->reset_done & type)
		return -EEXIST;
	md->reset_done |= type;
	err = mmc_hw_reset(host);
	/* Ensure we switch back to the correct partition */
	if (err != -EOPNOTSUPP) {
		struct mmc_blk_data *main_md =
			dev_get_drvdata(&host->card->dev);
		int part_err;
		main_md->part_curr = main_md->part_type;
		part_err = mmc_blk_part_switch(host->card, md);
		if (part_err) {
			/*
                         * We have failed to get back into the correct
                         * partition, so we need to abort the whole request.
                         */
			return -ENODEV;
		}
	}
	return err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| adrian hunter | adrian hunter | 104 | 96.30% | 1 | 50.00% | 
| ulf hansson | ulf hansson | 4 | 3.70% | 1 | 50.00% | 
 | Total | 108 | 100.00% | 2 | 100.00% | 
static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
{
	md->reset_done &= ~type;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| adrian hunter | adrian hunter | 22 | 100.00% | 1 | 100.00% | 
 | Total | 22 | 100.00% | 1 | 100.00% | 
int mmc_access_rpmb(struct mmc_queue *mq)
{
	struct mmc_blk_data *md = mq->data;
	/*
         * If this is a RPMB partition access, return ture
         */
	if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
		return true;
	return false;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| chuanxiao dong | chuanxiao dong | 36 | 100.00% | 1 | 100.00% | 
 | Total | 36 | 100.00% | 1 | 100.00% | 
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	unsigned int from, nr, arg;
	int err = 0, type = MMC_BLK_DISCARD;
	if (!mmc_can_erase(card)) {
		err = -EOPNOTSUPP;
		goto out;
	}
	from = blk_rq_pos(req);
	nr = blk_rq_sectors(req);
	if (mmc_can_discard(card))
		arg = MMC_DISCARD_ARG;
	else if (mmc_can_trim(card))
		arg = MMC_TRIM_ARG;
	else
		arg = MMC_ERASE_ARG;
retry:
	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 INAND_CMD38_ARG_EXT_CSD,
				 arg == MMC_TRIM_ARG ?
				 INAND_CMD38_ARG_TRIM :
				 INAND_CMD38_ARG_ERASE,
				 0);
		if (err)
			goto out;
	}
	err = mmc_erase(card, from, nr, arg);
out:
	if (err == -EIO && !mmc_blk_reset(md, card->host, type))
		goto retry;
	if (!err)
		mmc_blk_reset_success(md, type);
	blk_end_request(req, err, blk_rq_bytes(req));
	return err ? 0 : 1;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| adrian hunter | adrian hunter | 168 | 76.02% | 2 | 33.33% | 
| andrei warkentin | andrei warkentin | 38 | 17.19% | 1 | 16.67% | 
| kyungmin park | kyungmin park | 12 | 5.43% | 1 | 16.67% | 
| russell king | russell king | 2 | 0.90% | 1 | 16.67% | 
| subhash jadavani | subhash jadavani | 1 | 0.45% | 1 | 16.67% | 
 | Total | 221 | 100.00% | 6 | 100.00% | 
static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
				       struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	unsigned int from, nr, arg;
	int err = 0, type = MMC_BLK_SECDISCARD;
	if (!(mmc_can_secure_erase_trim(card))) {
		err = -EOPNOTSUPP;
		goto out;
	}
	from = blk_rq_pos(req);
	nr = blk_rq_sectors(req);
	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
		arg = MMC_SECURE_TRIM1_ARG;
	else
		arg = MMC_SECURE_ERASE_ARG;
retry:
	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 INAND_CMD38_ARG_EXT_CSD,
				 arg == MMC_SECURE_TRIM1_ARG ?
				 INAND_CMD38_ARG_SECTRIM1 :
				 INAND_CMD38_ARG_SECERASE,
				 0);
		if (err)
			goto out_retry;
	}
	err = mmc_erase(card, from, nr, arg);
	if (err == -EIO)
		goto out_retry;
	if (err)
		goto out;
	if (arg == MMC_SECURE_TRIM1_ARG) {
		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					 INAND_CMD38_ARG_EXT_CSD,
					 INAND_CMD38_ARG_SECTRIM2,
					 0);
			if (err)
				goto out_retry;
		}
		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
		if (err == -EIO)
			goto out_retry;
		if (err)
			goto out;
	}
out_retry:
	if (err && !mmc_blk_reset(md, card->host, type))
		goto retry;
	if (!err)
		mmc_blk_reset_success(md, type);
out:
	blk_end_request(req, err, blk_rq_bytes(req));
	return err ? 0 : 1;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| adrian hunter | adrian hunter | 229 | 74.59% | 3 | 42.86% | 
| andrei warkentin | andrei warkentin | 69 | 22.48% | 1 | 14.29% | 
| maya erez | maya erez | 6 | 1.95% | 1 | 14.29% | 
| kyungmin park | kyungmin park | 2 | 0.65% | 1 | 14.29% | 
| subhash jadavani | subhash jadavani | 1 | 0.33% | 1 | 14.29% | 
 | Total | 307 | 100.00% | 7 | 100.00% | 
static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	int ret = 0;
	ret = mmc_flush_cache(card);
	if (ret)
		ret = -EIO;
	blk_end_request_all(req, ret);
	return ret ? 0 : 1;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 37 | 52.11% | 1 | 33.33% | 
| andrei warkentin | andrei warkentin | 33 | 46.48% | 1 | 33.33% | 
| subhash jadavani | subhash jadavani | 1 | 1.41% | 1 | 33.33% | 
 | Total | 71 | 100.00% | 3 | 100.00% | 
/*
 * Reformat current write as a reliable write, supporting
 * both legacy and the enhanced reliable write MMC cards.
 * In each transfer we'll handle only as much as a single
 * reliable write can handle, thus finish the request in
 * partial completions.
 */
static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
				    struct mmc_card *card,
				    struct request *req)
{
	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
		/* Legacy mode imposes restrictions on transfers. */
		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
			brq->data.blocks = 1;
		if (brq->data.blocks > card->ext_csd.rel_sectors)
			brq->data.blocks = card->ext_csd.rel_sectors;
		else if (brq->data.blocks < card->ext_csd.rel_sectors)
			brq->data.blocks = 1;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 113 | 100.00% | 2 | 100.00% | 
 | Total | 113 | 100.00% | 2 | 100.00% | 
#define CMD_ERRORS							\
	(R1_OUT_OF_RANGE |	/* Command argument out of range */	\
         R1_ADDRESS_ERROR |	/* Misaligned address */		\
         R1_BLOCK_LEN_ERROR |	/* Transferred block length incorrect */\
         R1_WP_VIOLATION |	/* Tried to write to protected block */	\
         R1_CC_ERROR |		/* Card controller error */		\
         R1_ERROR)		/* General/unknown error */
static int mmc_blk_err_check(struct mmc_card *card,
			     struct mmc_async_req *areq)
{
	struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
						    mmc_active);
	struct mmc_blk_request *brq = &mq_mrq->brq;
	struct request *req = mq_mrq->req;
	int need_retune = card->host->need_retune;
	int ecc_err = 0, gen_err = 0;
	/*
         * sbc.error indicates a problem with the set block count
         * command.  No data will have been transferred.
         *
         * cmd.error indicates a problem with the r/w command.  No
         * data will have been transferred.
         *
         * stop.error indicates a problem with the stop command.  Data
         * may have been transferred, or may still be transferring.
         */
	if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
	    brq->data.error) {
		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
		case ERR_RETRY:
			return MMC_BLK_RETRY;
		case ERR_ABORT:
			return MMC_BLK_ABORT;
		case ERR_NOMEDIUM:
			return MMC_BLK_NOMEDIUM;
		case ERR_CONTINUE:
			break;
		}
	}
	/*
         * Check for errors relating to the execution of the
         * initial command - such as address errors.  No data
         * has been transferred.
         */
	if (brq->cmd.resp[0] & CMD_ERRORS) {
		pr_err("%s: r/w command failed, status = %#x\n",
		       req->rq_disk->disk_name, brq->cmd.resp[0]);
		return MMC_BLK_ABORT;
	}
	/*
         * Everything else is either success, or a data error of some
         * kind.  If it was a write, we may have transitioned to
         * program mode, which we have to wait for it to complete.
         */
	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
		int err;
		/* Check stop command response */
		if (brq->stop.resp[0] & R1_ERROR) {
			pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
			       req->rq_disk->disk_name, __func__,
			       brq->stop.resp[0]);
			gen_err = 1;
		}
		err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
					&gen_err);
		if (err)
			return MMC_BLK_CMD_ERR;
	}
	/* if general error occurs, retry the write operation. */
	if (gen_err) {
		pr_warn("%s: retrying write for general error\n",
				req->rq_disk->disk_name);
		return MMC_BLK_RETRY;
	}
	if (brq->data.error) {
		if (need_retune && !brq->retune_retry_done) {
			pr_debug("%s: retrying because a re-tune was needed\n",
				 req->rq_disk->disk_name);
			brq->retune_retry_done = 1;
			return MMC_BLK_RETRY;
		}
		pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
		       req->rq_disk->disk_name, brq->data.error,
		       (unsigned)blk_rq_pos(req),
		       (unsigned)blk_rq_sectors(req),
		       brq->cmd.resp[0], brq->stop.resp[0]);
		if (rq_data_dir(req) == READ) {
			if (ecc_err)
				return MMC_BLK_ECC_ERR;
			return MMC_BLK_DATA_ERR;
		} else {
			return MMC_BLK_CMD_ERR;
		}
	}
	if (!brq->data.bytes_xfered)
		return MMC_BLK_RETRY;
	if (mmc_packed_cmd(mq_mrq->cmd_type)) {
		if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
			return MMC_BLK_PARTIAL;
		else
			return MMC_BLK_SUCCESS;
	}
	if (blk_rq_bytes(req) != brq->data.bytes_xfered)
		return MMC_BLK_PARTIAL;
	return MMC_BLK_SUCCESS;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| per forlin | per forlin | 240 | 50.74% | 4 | 21.05% | 
| kobayashi yoshitake | kobayashi yoshitake | 73 | 15.43% | 1 | 5.26% | 
| adrian hunter | adrian hunter | 70 | 14.80% | 2 | 10.53% | 
| seungwon jeon | seungwon jeon | 37 | 7.82% | 1 | 5.26% | 
| russell king | russell king | 18 | 3.81% | 3 | 15.79% | 
| ulf hansson | ulf hansson | 9 | 1.90% | 2 | 10.53% | 
| philip langdale | philip langdale | 8 | 1.69% | 1 | 5.26% | 
| trey ramsay | trey ramsay | 8 | 1.69% | 1 | 5.26% | 
| sujit reddy thumma | sujit reddy thumma | 6 | 1.27% | 1 | 5.26% | 
| andrei warkentin | andrei warkentin | 3 | 0.63% | 2 | 10.53% | 
| tejun heo | tejun heo | 1 | 0.21% | 1 | 5.26% | 
 | Total | 473 | 100.00% | 19 | 100.00% | 
static int mmc_blk_packed_err_check(struct mmc_card *card,
				    struct mmc_async_req *areq)
{
	struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
			mmc_active);
	struct request *req = mq_rq->req;
	struct mmc_packed *packed = mq_rq->packed;
	int err, check, status;
	u8 *ext_csd;
	BUG_ON(!packed);
	packed->retries--;
	check = mmc_blk_err_check(card, areq);
	err = get_card_status(card, &status, 0);
	if (err) {
		pr_err("%s: error %d sending status command\n",
		       req->rq_disk->disk_name, err);
		return MMC_BLK_ABORT;
	}
	if (status & R1_EXCEPTION_EVENT) {
		err = mmc_get_ext_csd(card, &ext_csd);
		if (err) {
			pr_err("%s: error %d sending ext_csd\n",
			       req->rq_disk->disk_name, err);
			return MMC_BLK_ABORT;
		}
		if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
		     EXT_CSD_PACKED_FAILURE) &&
		    (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
		     EXT_CSD_PACKED_GENERIC_ERROR)) {
			if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
			    EXT_CSD_PACKED_INDEXED_ERROR) {
				packed->idx_failure =
				  ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
				check = MMC_BLK_PARTIAL;
			}
			pr_err("%s: packed cmd failed, nr %u, sectors %u, "
			       "failure index: %d\n",
			       req->rq_disk->disk_name, packed->nr_entries,
			       packed->blocks, packed->idx_failure);
		}
		kfree(ext_csd);
	}
	return check;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 150 | 64.10% | 1 | 7.14% | 
| per forlin | per forlin | 47 | 20.09% | 2 | 14.29% | 
| pierre ossman | pierre ossman | 8 | 3.42% | 2 | 14.29% | 
| paul walmsley | paul walmsley | 7 | 2.99% | 1 | 7.14% | 
| adrian hunter | adrian hunter | 6 | 2.56% | 1 | 7.14% | 
| russell king | russell king | 4 | 1.71% | 2 | 14.29% | 
| ulf hansson | ulf hansson | 4 | 1.71% | 1 | 7.14% | 
| david brownell | david brownell | 4 | 1.71% | 1 | 7.14% | 
| tejun heo | tejun heo | 2 | 0.85% | 1 | 7.14% | 
| pavel pisa | pavel pisa | 1 | 0.43% | 1 | 7.14% | 
| saugata das | saugata das | 1 | 0.43% | 1 | 7.14% | 
 | Total | 234 | 100.00% | 14 | 100.00% | 
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
			       struct mmc_card *card,
			       int disable_multi,
			       struct mmc_queue *mq)
{
	u32 readcmd, writecmd;
	struct mmc_blk_request *brq = &mqrq->brq;
	struct request *req = mqrq->req;
	struct mmc_blk_data *md = mq->data;
	bool do_data_tag;
	/*
         * Reliable writes are used to implement Forced Unit Access and
         * are supported only on MMCs.
         */
	bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
		(rq_data_dir(req) == WRITE) &&
		(md->flags & MMC_BLK_REL_WR);
	memset(brq, 0, sizeof(struct mmc_blk_request));
	brq->mrq.cmd = &brq->cmd;
	brq->mrq.data = &brq->data;
	brq->cmd.arg = blk_rq_pos(req);
	if (!mmc_card_blockaddr(card))
		brq->cmd.arg <<= 9;
	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
	brq->data.blksz = 512;
	brq->stop.opcode = MMC_STOP_TRANSMISSION;
	brq->stop.arg = 0;
	brq->data.blocks = blk_rq_sectors(req);
	/*
         * The block layer doesn't support all sector count
         * restrictions, so we need to be prepared for too big
         * requests.
         */
	if (brq->data.blocks > card->host->max_blk_count)
		brq->data.blocks = card->host->max_blk_count;
	if (brq->data.blocks > 1) {
		/*
                 * After a read error, we redo the request one sector
                 * at a time in order to accurately determine which
                 * sectors can be read successfully.
                 */
		if (disable_multi)
			brq->data.blocks = 1;
		/*
                 * Some controllers have HW issues while operating
                 * in multiple I/O mode
                 */
		if (card->host->ops->multi_io_quirk)
			brq->data.blocks = card->host->ops->multi_io_quirk(card,
						(rq_data_dir(req) == READ) ?
						MMC_DATA_READ : MMC_DATA_WRITE,
						brq->data.blocks);
	}
	if (brq->data.blocks > 1 || do_rel_wr) {
		/* SPI multiblock writes terminate using a special
                 * token, not a STOP_TRANSMISSION request.
                 */
		if (!mmc_host_is_spi(card->host) ||
		    rq_data_dir(req) == READ)
			brq->mrq.stop = &brq->stop;
		readcmd = MMC_READ_MULTIPLE_BLOCK;
		writecmd = MMC_WRITE_MULTIPLE_BLOCK;
	} else {
		brq->mrq.stop = NULL;
		readcmd = MMC_READ_SINGLE_BLOCK;
		writecmd = MMC_WRITE_BLOCK;
	}
	if (rq_data_dir(req) == READ) {
		brq->cmd.opcode = readcmd;
		brq->data.flags = MMC_DATA_READ;
		if (brq->mrq.stop)
			brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
					MMC_CMD_AC;
	} else {
		brq->cmd.opcode = writecmd;
		brq->data.flags = MMC_DATA_WRITE;
		if (brq->mrq.stop)
			brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
					MMC_CMD_AC;
	}
	if (do_rel_wr)
		mmc_apply_rel_rw(brq, card, req);
	/*
         * Data tag is used only during writing meta data to speed
         * up write and any subsequent read of this meta data
         */
	do_data_tag = (card->ext_csd.data_tag_unit_size) &&
		(req->cmd_flags & REQ_META) &&
		(rq_data_dir(req) == WRITE) &&
		((brq->data.blocks * brq->data.blksz) >=
		 card->ext_csd.data_tag_unit_size);
	/*
         * Pre-defined multi-block transfers are preferable to
         * open ended-ones (and necessary for reliable writes).
         * However, it is not sufficient to just send CMD23,
         * and avoid the final CMD12, as on an error condition
         * CMD12 (stop) needs to be sent anyway. This, coupled
         * with Auto-CMD23 enhancements provided by some
         * hosts, means that the complexity of dealing
         * with this is best left to the host. If CMD23 is
         * supported by card and host, we'll fill sbc in and let
         * the host deal with handling it correctly. This means
         * that for hosts that don't expose MMC_CAP_CMD23, no
         * change of behavior will be observed.
         *
         * N.B: Some MMC cards experience perf degradation.
         * We'll avoid using CMD23-bounded multiblock writes for
         * these, while retaining features like reliable writes.
         */
	if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
	    (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
	     do_data_tag)) {
		brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
		brq->sbc.arg = brq->data.blocks |
			(do_rel_wr ? (1 << 31) : 0) |
			(do_data_tag ? (1 << 29) : 0);
		brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
		brq->mrq.sbc = &brq->sbc;
	}
	mmc_set_data_timeout(&brq->data, card);
	brq->data.sg = mqrq->sg;
	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
	/*
         * Adjust the sg list so it is the same size as the
         * request.
         */
	if (brq->data.blocks != blk_rq_sectors(req)) {
		int i, data_size = brq->data.blocks << 9;
		struct scatterlist *sg;
		for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
			data_size -= sg->length;
			if (data_size <= 0) {
				sg->length += data_size;
				i++;
				break;
			}
		}
		brq->data.sg_len = i;
	}
	mqrq->mmc_active.mrq = &brq->mrq;
	mqrq->mmc_active.err_check = mmc_blk_err_check;
	mmc_queue_bounce_pre(mqrq);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 605 | 80.35% | 1 | 9.09% | 
| kuninori morimoto | kuninori morimoto | 47 | 6.24% | 1 | 9.09% | 
| ulf hansson | ulf hansson | 40 | 5.31% | 1 | 9.09% | 
| andrei warkentin | andrei warkentin | 23 | 3.05% | 2 | 18.18% | 
| saugata das | saugata das | 23 | 3.05% | 1 | 9.09% | 
| per forlin | per forlin | 7 | 0.93% | 1 | 9.09% | 
| russell king | russell king | 5 | 0.66% | 2 | 18.18% | 
| jaehoon chung | jaehoon chung | 2 | 0.27% | 1 | 9.09% | 
| luca porzio | luca porzio | 1 | 0.13% | 1 | 9.09% | 
 | Total | 753 | 100.00% | 11 | 100.00% | 
static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
					  struct mmc_card *card)
{
	unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
	unsigned int max_seg_sz = queue_max_segment_size(q);
	unsigned int len, nr_segs = 0;
	do {
		len = min(hdr_sz, max_seg_sz);
		hdr_sz -= len;
		nr_segs++;
	} while (hdr_sz);
	return nr_segs;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 74 | 100.00% | 1 | 100.00% | 
 | Total | 74 | 100.00% | 1 | 100.00% | 
static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
{
	struct request_queue *q = mq->queue;
	struct mmc_card *card = mq->card;
	struct request *cur = req, *next = NULL;
	struct mmc_blk_data *md = mq->data;
	struct mmc_queue_req *mqrq = mq->mqrq_cur;
	bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
	unsigned int req_sectors = 0, phys_segments = 0;
	unsigned int max_blk_count, max_phys_segs;
	bool put_back = true;
	u8 max_packed_rw = 0;
	u8 reqs = 0;
	if (!(md->flags & MMC_BLK_PACKED_CMD))
		goto no_packed;
	if ((rq_data_dir(cur) == WRITE) &&
	    mmc_host_packed_wr(card->host))
		max_packed_rw = card->ext_csd.max_packed_writes;
	if (max_packed_rw == 0)
		goto no_packed;
	if (mmc_req_rel_wr(cur) &&
	    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
		goto no_packed;
	if (mmc_large_sector(card) &&
	    !IS_ALIGNED(blk_rq_sectors(cur), 8))
		goto no_packed;
	mmc_blk_clear_packed(mqrq);
	max_blk_count = min(card->host->max_blk_count,
			    card->host->max_req_size >> 9);
	if (unlikely(max_blk_count > 0xffff))
		max_blk_count = 0xffff;
	max_phys_segs = queue_max_segments(q);
	req_sectors += blk_rq_sectors(cur);
	phys_segments += cur->nr_phys_segments;
	if (rq_data_dir(cur) == WRITE) {
		req_sectors += mmc_large_sector(card) ? 8 : 1;
		phys_segments += mmc_calc_packed_hdr_segs(q, card);
	}
	do {
		if (reqs >= max_packed_rw - 1) {
			put_back = false;
			break;
		}
		spin_lock_irq(q->queue_lock);
		next = blk_fetch_request(q);
		spin_unlock_irq(q->queue_lock);
		if (!next) {
			put_back = false;
			break;
		}
		if (mmc_large_sector(card) &&
		    !IS_ALIGNED(blk_rq_sectors(next), 8))
			break;
		if (next->cmd_flags & REQ_DISCARD ||
		    next->cmd_flags & REQ_FLUSH)
			break;
		if (rq_data_dir(cur) != rq_data_dir(next))
			break;
		if (mmc_req_rel_wr(next) &&
		    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
			break;
		req_sectors += blk_rq_sectors(next);
		if (req_sectors > max_blk_count)
			break;
		phys_segments +=  next->nr_phys_segments;
		if (phys_segments > max_phys_segs)
			break;
		list_add_tail(&next->queuelist, &mqrq->packed->list);
		cur = next;
		reqs++;
	} while (1);
	if (put_back) {
		spin_lock_irq(q->queue_lock);
		blk_requeue_request(q, next);
		spin_unlock_irq(q->queue_lock);
	}
	if (reqs > 0) {
		list_add(&req->queuelist, &mqrq->packed->list);
		mqrq->packed->nr_entries = ++reqs;
		mqrq->packed->retries = reqs;
		return reqs;
	}
no_packed:
	mqrq->cmd_type = MMC_PACKED_NONE;
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 537 | 100.00% | 1 | 100.00% | 
 | Total | 537 | 100.00% | 1 | 100.00% | 
static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
					struct mmc_card *card,
					struct mmc_queue *mq)
{
	struct mmc_blk_request *brq = &mqrq->brq;
	struct request *req = mqrq->req;
	struct request *prq;
	struct mmc_blk_data *md = mq->data;
	struct mmc_packed *packed = mqrq->packed;
	bool do_rel_wr, do_data_tag;
	u32 *packed_cmd_hdr;
	u8 hdr_blocks;
	u8 i = 1;
	BUG_ON(!packed);
	mqrq->cmd_type = MMC_PACKED_WRITE;
	packed->blocks = 0;
	packed->idx_failure = MMC_PACKED_NR_IDX;
	packed_cmd_hdr = packed->cmd_hdr;
	memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
	packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
		(PACKED_CMD_WR << 8) | PACKED_CMD_VER);
	hdr_blocks = mmc_large_sector(card) ? 8 : 1;
	/*
         * Argument for each entry of packed group
         */
	list_for_each_entry(prq, &packed->list, queuelist) {
		do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
		do_data_tag = (card->ext_csd.data_tag_unit_size) &&
			(prq->cmd_flags & REQ_META) &&
			(rq_data_dir(prq) == WRITE) &&
			((brq->data.blocks * brq->data.blksz) >=
			 card->ext_csd.data_tag_unit_size);
		/* Argument of CMD23 */
		packed_cmd_hdr[(i * 2)] = cpu_to_le32(
			(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
			(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
			blk_rq_sectors(prq));
		/* Argument of CMD18 or CMD25 */
		packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
			mmc_card_blockaddr(card) ?
			blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
		packed->blocks += blk_rq_sectors(prq);
		i++;
	}
	memset(brq, 0, sizeof(struct mmc_blk_request));
	brq->mrq.cmd = &brq->cmd;
	brq->mrq.data = &brq->data;
	brq->mrq.sbc = &brq->sbc;
	brq->mrq.stop = &brq->stop;
	brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
	brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
	brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
	brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
	brq->cmd.arg = blk_rq_pos(req);
	if (!mmc_card_blockaddr(card))
		brq->cmd.arg <<= 9;
	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
	brq->data.blksz = 512;
	brq->data.blocks = packed->blocks + hdr_blocks;
	brq->data.flags = MMC_DATA_WRITE;
	brq->stop.opcode = MMC_STOP_TRANSMISSION;
	brq->stop.arg = 0;
	brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
	mmc_set_data_timeout(&brq->data, card);
	brq->data.sg = mqrq->sg;
	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
	mqrq->mmc_active.mrq = &brq->mrq;
	mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
	mmc_queue_bounce_pre(mqrq);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 555 | 98.23% | 1 | 33.33% | 
| taras kondratiuk | taras kondratiuk | 9 | 1.59% | 1 | 33.33% | 
| jaehoon chung | jaehoon chung | 1 | 0.18% | 1 | 33.33% | 
 | Total | 565 | 100.00% | 3 | 100.00% | 
static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
			   struct mmc_blk_request *brq, struct request *req,
			   int ret)
{
	struct mmc_queue_req *mq_rq;
	mq_rq = container_of(brq, struct mmc_queue_req, brq);
	/*
         * If this is an SD card and we're writing, we can first
         * mark the known good sectors as ok.
         *
         * If the card is not SD, we can still ok written sectors
         * as reported by the controller (which might be less than
         * the real number of written sectors, but never more).
         */
	if (mmc_card_sd(card)) {
		u32 blocks;
		blocks = mmc_sd_num_wr_blocks(card);
		if (blocks != (u32)-1) {
			ret = blk_end_request(req, 0, blocks << 9);
		}
	} else {
		if (!mmc_packed_cmd(mq_rq->cmd_type))
			ret = blk_end_request(req, 0, brq->data.bytes_xfered);
	}
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 119 | 97.54% | 1 | 50.00% | 
| saugata das | saugata das | 3 | 2.46% | 1 | 50.00% | 
 | Total | 122 | 100.00% | 2 | 100.00% | 
static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
{
	struct request *prq;
	struct mmc_packed *packed = mq_rq->packed;
	int idx = packed->idx_failure, i = 0;
	int ret = 0;
	BUG_ON(!packed);
	while (!list_empty(&packed->list)) {
		prq = list_entry_rq(packed->list.next);
		if (idx == i) {
			/* retry from error index */
			packed->nr_entries -= idx;
			mq_rq->req = prq;
			ret = 1;
			if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
				list_del_init(&prq->queuelist);
				mmc_blk_clear_packed(mq_rq);
			}
			return ret;
		}
		list_del_init(&prq->queuelist);
		blk_end_request(prq, 0, blk_rq_bytes(prq));
		i++;
	}
	mmc_blk_clear_packed(mq_rq);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 133 | 86.93% | 1 | 14.29% | 
| andrei warkentin | andrei warkentin | 7 | 4.58% | 1 | 14.29% | 
| pierre ossman | pierre ossman | 6 | 3.92% | 2 | 28.57% | 
| per forlin | per forlin | 4 | 2.61% | 1 | 14.29% | 
| tejun heo | tejun heo | 2 | 1.31% | 1 | 14.29% | 
| russell king | russell king | 1 | 0.65% | 1 | 14.29% | 
 | Total | 153 | 100.00% | 7 | 100.00% | 
static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
{
	struct request *prq;
	struct mmc_packed *packed = mq_rq->packed;
	BUG_ON(!packed);
	while (!list_empty(&packed->list)) {
		prq = list_entry_rq(packed->list.next);
		list_del_init(&prq->queuelist);
		blk_end_request(prq, -EIO, blk_rq_bytes(prq));
	}
	mmc_blk_clear_packed(mq_rq);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 59 | 72.84% | 1 | 14.29% | 
| russell king | russell king | 11 | 13.58% | 2 | 28.57% | 
| per forlin | per forlin | 9 | 11.11% | 2 | 28.57% | 
| adrian hunter | adrian hunter | 2 | 2.47% | 2 | 28.57% | 
 | Total | 81 | 100.00% | 7 | 100.00% | 
static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
				      struct mmc_queue_req *mq_rq)
{
	struct request *prq;
	struct request_queue *q = mq->queue;
	struct mmc_packed *packed = mq_rq->packed;
	BUG_ON(!packed);
	while (!list_empty(&packed->list)) {
		prq = list_entry_rq(packed->list.prev);
		if (prq->queuelist.prev != &packed->list) {
			list_del_init(&prq->queuelist);
			spin_lock_irq(q->queue_lock);
			blk_requeue_request(mq->queue, prq);
			spin_unlock_irq(q->queue_lock);
		} else {
			list_del_init(&prq->queuelist);
		}
	}
	mmc_blk_clear_packed(mq_rq);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 93 | 70.99% | 1 | 50.00% | 
| adrian hunter | adrian hunter | 38 | 29.01% | 1 | 50.00% | 
 | Total | 131 | 100.00% | 2 | 100.00% | 
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
	int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
	enum mmc_blk_status status;
	struct mmc_queue_req *mq_rq;
	struct request *req = rqc;
	struct mmc_async_req *areq;
	const u8 packed_nr = 2;
	u8 reqs = 0;
	if (!rqc && !mq->mqrq_prev->req)
		return 0;
	if (rqc)
		reqs = mmc_blk_prep_packed_list(mq, rqc);
	do {
		if (rqc) {
			/*
                         * When 4KB native sector is enabled, only 8 blocks
                         * multiple read or write is allowed
                         */
			if ((brq->data.blocks & 0x07) &&
			    (card->ext_csd.data_sector_size == 4096)) {
				pr_err("%s: Transfer size is not 4KB sector size aligned\n",
					req->rq_disk->disk_name);
				mq_rq = mq->mqrq_cur;
				goto cmd_abort;
			}
			if (reqs >= packed_nr)
				mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
							    card, mq);
			else
				mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
			areq = &mq->mqrq_cur->mmc_active;
		} else
			areq = NULL;
		areq = mmc_start_req(card->host, areq, (int *) &status);
		if (!areq) {
			if (status == MMC_BLK_NEW_REQUEST)
				mq->flags |= MMC_QUEUE_NEW_REQUEST;
			return 0;
		}
		mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
		brq = &mq_rq->brq;
		req = mq_rq->req;
		type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
		mmc_queue_bounce_post(mq_rq);
		switch (status) {
		case MMC_BLK_SUCCESS:
		case MMC_BLK_PARTIAL:
			/*
                         * A block was successfully transferred.
                         */
			mmc_blk_reset_success(md, type);
			if (mmc_packed_cmd(mq_rq->cmd_type)) {
				ret = mmc_blk_end_packed_req(mq_rq);
				break;
			} else {
				ret = blk_end_request(req, 0,
						brq->data.bytes_xfered);
			}
			/*
                         * If the blk_end_request function returns non-zero even
                         * though all data has been transferred and no errors
                         * were returned by the host controller, it's a bug.
                         */
			if (status == MMC_BLK_SUCCESS && ret) {
				pr_err("%s BUG rq_tot %d d_xfer %d\n",
				       __func__, blk_rq_bytes(req),
				       brq->data.bytes_xfered);
				rqc = NULL;
				goto cmd_abort;
			}
			break;
		case MMC_BLK_CMD_ERR:
			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
			if (mmc_blk_reset(md, card->host, type))
				goto cmd_abort;
			if (!ret)
				goto start_new_req;
			break;
		case MMC_BLK_RETRY:
			retune_retry_done = brq->retune_retry_done;
			if (retry++ < 5)
				break;
			/* Fall through */
		case MMC_BLK_ABORT:
			if (!mmc_blk_reset(md, card->host, type))
				break;
			goto cmd_abort;
		case MMC_BLK_DATA_ERR: {
			int err;
			err = mmc_blk_reset(md, card->host, type);
			if (!err)
				break;
			if (err == -ENODEV ||
				mmc_packed_cmd(mq_rq->cmd_type))
				goto cmd_abort;
			/* Fall through */
		}
		case MMC_BLK_ECC_ERR:
			if (brq->data.blocks > 1) {
				/* Redo read one sector at a time */
				pr_warn("%s: retrying using single block read\n",
					req->rq_disk->disk_name);
				disable_multi = 1;
				break;
			}
			/*
                         * After an error, we redo I/O one sector at a
                         * time, so we only reach here after trying to
                         * read a single sector.
                         */
			ret = blk_end_request(req, -EIO,
						brq->data.blksz);
			if (!ret)
				goto start_new_req;
			break;
		case MMC_BLK_NOMEDIUM:
			goto cmd_abort;
		default:
			pr_err("%s: Unhandled return value (%d)",
					req->rq_disk->disk_name, status);
			goto cmd_abort;
		}
		if (ret) {
			if (mmc_packed_cmd(mq_rq->cmd_type)) {
				if (!mq_rq->packed->retries)
					goto cmd_abort;
				mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
				mmc_start_req(card->host,
					      &mq_rq->mmc_active, NULL);
			} else {
				/*
                                 * In case of a incomplete request
                                 * prepare it again and resend.
                                 */
				mmc_blk_rw_rq_prep(mq_rq, card,
						disable_multi, mq);
				mmc_start_req(card->host,
						&mq_rq->mmc_active, NULL);
			}
			mq_rq->brq.retune_retry_done = retune_retry_done;
		}
	} while (ret);
	return 1;
 cmd_abort:
	if (mmc_packed_cmd(mq_rq->cmd_type)) {
		mmc_blk_abort_packed_req(mq_rq);
	} else {
		if (mmc_card_removed(card))
			req->cmd_flags |= REQ_QUIET;
		while (ret)
			ret = blk_end_request(req, -EIO,
					blk_rq_cur_bytes(req));
	}
 start_new_req:
	if (rqc) {
		if (mmc_card_removed(card)) {
			rqc->cmd_flags |= REQ_QUIET;
			blk_end_request_all(rqc, -EIO);
		} else {
			/*
                         * If current request is packed, it needs to put back.
                         */
			if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
				mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
			mmc_start_req(card->host,
				      &mq->mqrq_cur->mmc_active, NULL);
		}
	}
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| per forlin | per forlin | 318 | 37.50% | 4 | 17.39% | 
| seungwon jeon | seungwon jeon | 193 | 22.76% | 2 | 8.70% | 
| adrian hunter | adrian hunter | 163 | 19.22% | 3 | 13.04% | 
| russell king | russell king | 62 | 7.31% | 5 | 21.74% | 
| saugata das | saugata das | 41 | 4.83% | 1 | 4.35% | 
| konstantin dorfman | konstantin dorfman | 31 | 3.66% | 1 | 4.35% | 
| sujit reddy thumma | sujit reddy thumma | 19 | 2.24% | 1 | 4.35% | 
| ding wang | ding wang | 9 | 1.06% | 1 | 4.35% | 
| pierre ossman | pierre ossman | 4 | 0.47% | 1 | 4.35% | 
| kiyoshi ueda | kiyoshi ueda | 3 | 0.35% | 1 | 4.35% | 
| subhash jadavani | subhash jadavani | 3 | 0.35% | 1 | 4.35% | 
| joe perches | joe perches | 1 | 0.12% | 1 | 4.35% | 
| girish k s | girish k s | 1 | 0.12% | 1 | 4.35% | 
 | Total | 848 | 100.00% | 23 | 100.00% | 
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
	int ret;
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	struct mmc_host *host = card->host;
	unsigned long flags;
	unsigned int cmd_flags = req ? req->cmd_flags : 0;
	if (req && !mq->mqrq_prev->req)
		/* claim host only for the first request */
		mmc_get_card(card);
	ret = mmc_blk_part_switch(card, md);
	if (ret) {
		if (req) {
			blk_end_request_all(req, -EIO);
		}
		ret = 0;
		goto out;
	}
	mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
	if (cmd_flags & REQ_DISCARD) {
		/* complete ongoing async transfer before issuing discard */
		if (card->host->areq)
			mmc_blk_issue_rw_rq(mq, NULL);
		if (req->cmd_flags & REQ_SECURE)
			ret = mmc_blk_issue_secdiscard_rq(mq, req);
		else
			ret = mmc_blk_issue_discard_rq(mq, req);
	} else if (cmd_flags & REQ_FLUSH) {
		/* complete ongoing async transfer before issuing flush */
		if (card->host->areq)
			mmc_blk_issue_rw_rq(mq, NULL);
		ret = mmc_blk_issue_flush(mq, req);
	} else {
		if (!req && host->areq) {
			spin_lock_irqsave(&host->context_info.lock, flags);
			host->context_info.is_waiting_last_req = true;
			spin_unlock_irqrestore(&host->context_info.lock, flags);
		}
		ret = mmc_blk_issue_rw_rq(mq, req);
	}
out:
	if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
	     (cmd_flags & MMC_REQ_SPECIAL_MASK))
		/*
                 * Release host when there are no more requests
                 * and after special request(discard, flush) is done.
                 * In case sepecial request, there is no reentry to
                 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
                 */
		mmc_put_card(card);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 85 | 28.62% | 3 | 23.08% | 
| konstantin dorfman | konstantin dorfman | 72 | 24.24% | 1 | 7.69% | 
| adrian hunter | adrian hunter | 67 | 22.56% | 3 | 23.08% | 
| per forlin | per forlin | 33 | 11.11% | 1 | 7.69% | 
| jaehoon chung | jaehoon chung | 16 | 5.39% | 1 | 7.69% | 
| ray jui | ray jui | 12 | 4.04% | 1 | 7.69% | 
| seungwon jeon | seungwon jeon | 9 | 3.03% | 1 | 7.69% | 
| ulf hansson | ulf hansson | 2 | 0.67% | 1 | 7.69% | 
| subhash jadavani | subhash jadavani | 1 | 0.34% | 1 | 7.69% | 
 | Total | 297 | 100.00% | 13 | 100.00% | 
static inline int mmc_blk_readonly(struct mmc_card *card)
{
	return mmc_card_readonly(card) ||
	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 29 | 100.00% | 1 | 100.00% | 
 | Total | 29 | 100.00% | 1 | 100.00% | 
static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
					      struct device *parent,
					      sector_t size,
					      bool default_ro,
					      const char *subname,
					      int area_type)
{
	struct mmc_blk_data *md;
	int devidx, ret;
again:
	if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
		return ERR_PTR(-ENOMEM);
	spin_lock(&mmc_blk_lock);
	ret = ida_get_new(&mmc_blk_ida, &devidx);
	spin_unlock(&mmc_blk_lock);
	if (ret == -EAGAIN)
		goto again;
	else if (ret)
		return ERR_PTR(ret);
	if (devidx >= max_devices) {
		ret = -ENOSPC;
		goto out;
	}
	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
	if (!md) {
		ret = -ENOMEM;
		goto out;
	}
	md->area_type = area_type;
	/*
         * Set the read-only status based on the supported commands
         * and the write protect switch.
         */
	md->read_only = mmc_blk_readonly(card);
	md->disk = alloc_disk(perdev_minors);
	if (md->disk == NULL) {
		ret = -ENOMEM;
		goto err_kfree;
	}
	spin_lock_init(&md->lock);
	INIT_LIST_HEAD(&md->part);
	md->usage = 1;
	ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
	if (ret)
		goto err_putdisk;
	md->queue.issue_fn = mmc_blk_issue_rq;
	md->queue.data = md;
	md->disk->major	= MMC_BLOCK_MAJOR;
	md->disk->first_minor = devidx * perdev_minors;
	md->disk->fops = &mmc_bdops;
	md->disk->private_data = md;
	md->disk->queue = md->queue.queue;
	md->disk->driverfs_dev = parent;
	set_disk_ro(md->disk, md->read_only || default_ro);
	md->disk->flags = GENHD_FL_EXT_DEVT;
	if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
		md->disk->flags |= GENHD_FL_NO_PART_SCAN;
	/*
         * As discussed on lkml, GENHD_FL_REMOVABLE should:
         *
         * - be set for removable media with permanent block devices
         * - be unset for removable block devices with permanent media
         *
         * Since MMC block devices clearly fall under the second
         * case, we do not set GENHD_FL_REMOVABLE.  Userspace
         * should use the block device creation/destruction hotplug
         * messages to tell when the card is present.
         */
	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
		 "mmcblk%u%s", card->host->index, subname ? subname : "");
	if (mmc_card_mmc(card))
		blk_queue_logical_block_size(md->queue.queue,
					     card->ext_csd.data_sector_size);
	else
		blk_queue_logical_block_size(md->queue.queue, 512);
	set_capacity(md->disk, size);
	if (mmc_host_cmd23(card->host)) {
		if (mmc_card_mmc(card) ||
		    (mmc_card_sd(card) &&
		     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
			md->flags |= MMC_BLK_CMD23;
	}
	if (mmc_card_mmc(card) &&
	    md->flags & MMC_BLK_CMD23 &&
	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
	     card->ext_csd.rel_sectors)) {
		md->flags |= MMC_BLK_REL_WR;
		blk_queue_write_cache(md->queue.queue, true, true);
	}
	if (mmc_card_mmc(card) &&
	    (area_type == MMC_BLK_DATA_AREA_MAIN) &&
	    (md->flags & MMC_BLK_CMD23) &&
	    card->ext_csd.packed_event_en) {
		if (!mmc_packed_init(&md->queue, card))
			md->flags |= MMC_BLK_PACKED_CMD;
	}
	return md;
 err_putdisk:
	put_disk(md->disk);
 err_kfree:
	kfree(md);
 out:
	spin_lock(&mmc_blk_lock);
	ida_remove(&mmc_blk_ida, devidx);
	spin_unlock(&mmc_blk_lock);
	return ERR_PTR(ret);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 249 | 40.62% | 2 | 8.33% | 
| andrei warkentin | andrei warkentin | 131 | 21.37% | 4 | 16.67% | 
| ulf hansson | ulf hansson | 93 | 15.17% | 4 | 16.67% | 
| seungwon jeon | seungwon jeon | 48 | 7.83% | 1 | 4.17% | 
| saugata das | saugata das | 23 | 3.75% | 1 | 4.17% | 
| loic pallardy | loic pallardy | 14 | 2.28% | 1 | 4.17% | 
| marc-andre hebert | marc-andre hebert | 11 | 1.79% | 1 | 4.17% | 
| jiebing li | jiebing li | 10 | 1.63% | 1 | 4.17% | 
| johan rudholm | johan rudholm | 9 | 1.47% | 1 | 4.17% | 
| colin cross | colin cross | 8 | 1.31% | 1 | 4.17% | 
| pierre ossman | pierre ossman | 6 | 0.98% | 2 | 8.33% | 
| jens axboe | jens axboe | 4 | 0.65% | 1 | 4.17% | 
| olof johansson | olof johansson | 3 | 0.49% | 1 | 4.17% | 
| adrian hunter | adrian hunter | 2 | 0.33% | 1 | 4.17% | 
| yoann padioleau | yoann padioleau | 1 | 0.16% | 1 | 4.17% | 
| asaf vertz | asaf vertz | 1 | 0.16% | 1 | 4.17% | 
 | Total | 613 | 100.00% | 24 | 100.00% | 
static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
{
	sector_t size;
	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
		/*
                 * The EXT_CSD sector count is in number or 512 byte
                 * sectors.
                 */
		size = card->ext_csd.sectors;
	} else {
		/*
                 * The CSD capacity field is in units of read_blkbits.
                 * set_capacity takes units of 512 bytes.
                 */
		size = (typeof(sector_t))card->csd.capacity
			<< (card->csd.read_blkbits - 9);
	}
	return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
					MMC_BLK_DATA_AREA_MAIN);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 58 | 66.67% | 1 | 16.67% | 
| russell king | russell king | 19 | 21.84% | 1 | 16.67% | 
| kuninori morimoto | kuninori morimoto | 6 | 6.90% | 1 | 16.67% | 
| johan rudholm | johan rudholm | 2 | 2.30% | 1 | 16.67% | 
| tobias klauser | tobias klauser | 1 | 1.15% | 1 | 16.67% | 
| pierre ossman | pierre ossman | 1 | 1.15% | 1 | 16.67% | 
 | Total | 87 | 100.00% | 6 | 100.00% | 
static int mmc_blk_alloc_part(struct mmc_card *card,
			      struct mmc_blk_data *md,
			      unsigned int part_type,
			      sector_t size,
			      bool default_ro,
			      const char *subname,
			      int area_type)
{
	char cap_str[10];
	struct mmc_blk_data *part_md;
	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
				    subname, area_type);
	if (IS_ERR(part_md))
		return PTR_ERR(part_md);
	part_md->part_type = part_type;
	list_add(&part_md->part, &md->part);
	string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
			cap_str, sizeof(cap_str));
	pr_info("%s: %s %s partition %u %s\n",
	       part_md->disk->disk_name, mmc_card_id(card),
	       mmc_card_name(card), part_md->part_type, cap_str);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 145 | 94.77% | 1 | 25.00% | 
| johan rudholm | johan rudholm | 5 | 3.27% | 1 | 25.00% | 
| james bottomley | james bottomley | 2 | 1.31% | 1 | 25.00% | 
| girish k s | girish k s | 1 | 0.65% | 1 | 25.00% | 
 | Total | 153 | 100.00% | 4 | 100.00% | 
/* MMC Physical partitions consist of two boot partitions and
 * up to four general purpose partitions.
 * For each partition enabled in EXT_CSD a block device will be allocatedi
 * to provide access to the partition.
 */
static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
{
	int idx, ret = 0;
	if (!mmc_card_mmc(card))
		return 0;
	for (idx = 0; idx < card->nr_parts; idx++) {
		if (card->part[idx].size) {
			ret = mmc_blk_alloc_part(card, md,
				card->part[idx].part_cfg,
				card->part[idx].size >> 9,
				card->part[idx].force_ro,
				card->part[idx].name,
				card->part[idx].area_type);
			if (ret)
				return ret;
		}
	}
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 72 | 55.38% | 1 | 33.33% | 
| namjae jeon | namjae jeon | 49 | 37.69% | 1 | 33.33% | 
| johan rudholm | johan rudholm | 9 | 6.92% | 1 | 33.33% | 
 | Total | 130 | 100.00% | 3 | 100.00% | 
static void mmc_blk_remove_req(struct mmc_blk_data *md)
{
	struct mmc_card *card;
	if (md) {
		/*
                 * Flush remaining requests and free queues. It
                 * is freeing the queue that stops new requests
                 * from being accepted.
                 */
		card = md->queue.card;
		mmc_cleanup_queue(&md->queue);
		if (md->flags & MMC_BLK_PACKED_CMD)
			mmc_packed_clean(&md->queue);
		if (md->disk->flags & GENHD_FL_UP) {
			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
					card->ext_csd.boot_ro_lockable)
				device_remove_file(disk_to_dev(md->disk),
					&md->power_ro_lock);
			del_gendisk(md->disk);
		}
		mmc_blk_put(md);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 56 | 44.80% | 1 | 25.00% | 
| johan rudholm | johan rudholm | 36 | 28.80% | 1 | 25.00% | 
| paul taysom | paul taysom | 25 | 20.00% | 1 | 25.00% | 
| franck jullien | franck jullien | 8 | 6.40% | 1 | 25.00% | 
 | Total | 125 | 100.00% | 4 | 100.00% | 
static void mmc_blk_remove_parts(struct mmc_card *card,
				 struct mmc_blk_data *md)
{
	struct list_head *pos, *q;
	struct mmc_blk_data *part_md;
	list_for_each_safe(pos, q, &md->part) {
		part_md = list_entry(pos, struct mmc_blk_data, part);
		list_del(pos);
		mmc_blk_remove_req(part_md);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| andrei warkentin | andrei warkentin | 59 | 96.72% | 1 | 50.00% | 
| russell king | russell king | 2 | 3.28% | 1 | 50.00% | 
 | Total | 61 | 100.00% | 2 | 100.00% | 
static int mmc_add_disk(struct mmc_blk_data *md)
{
	int ret;
	struct mmc_card *card = md->queue.card;
	add_disk(md->disk);
	md->force_ro.show = force_ro_show;
	md->force_ro.store = force_ro_store;
	sysfs_attr_init(&md->force_ro.attr);
	md->force_ro.attr.name = "force_ro";
	md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
	if (ret)
		goto force_ro_fail;
	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
	     card->ext_csd.boot_ro_lockable) {
		umode_t mode;
		if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
			mode = S_IRUGO;
		else
			mode = S_IRUGO | S_IWUSR;
		md->power_ro_lock.show = power_ro_lock_show;
		md->power_ro_lock.store = power_ro_lock_store;
		sysfs_attr_init(&md->power_ro_lock.attr);
		md->power_ro_lock.attr.mode = mode;
		md->power_ro_lock.attr.name =
					"ro_lock_until_next_power_on";
		ret = device_create_file(disk_to_dev(md->disk),
				&md->power_ro_lock);
		if (ret)
			goto power_ro_lock_fail;
	}
	return ret;
power_ro_lock_fail:
	device_remove_file(disk_to_dev(md->disk), &md->force_ro);
force_ro_fail:
	del_gendisk(md->disk);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| johan rudholm | johan rudholm | 137 | 55.24% | 1 | 16.67% | 
| andrei warkentin | andrei warkentin | 80 | 32.26% | 1 | 16.67% | 
| rabin vincent | rabin vincent | 20 | 8.06% | 2 | 33.33% | 
| russell king | russell king | 10 | 4.03% | 1 | 16.67% | 
| al viro | al viro | 1 | 0.40% | 1 | 16.67% | 
 | Total | 248 | 100.00% | 6 | 100.00% | 
#define CID_MANFID_SANDISK	0x2
#define CID_MANFID_TOSHIBA	0x11
#define CID_MANFID_MICRON	0x13
#define CID_MANFID_SAMSUNG	0x15
#define CID_MANFID_KINGSTON	0x70
static const struct mmc_fixup blk_fixups[] =
{
	MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
		  MMC_QUIRK_INAND_CMD38),
	MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
		  MMC_QUIRK_INAND_CMD38),
	MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
		  MMC_QUIRK_INAND_CMD38),
	MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
		  MMC_QUIRK_INAND_CMD38),
	MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
		  MMC_QUIRK_INAND_CMD38),
	/*
         * Some MMC cards experience performance degradation with CMD23
         * instead of CMD12-bounded multiblock transfers. For now we'll
         * black list what's bad...
         * - Certain Toshiba cards.
         *
         * N.B. This doesn't affect SD cards.
         */
	MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_BLK_NO_CMD23),
	MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_BLK_NO_CMD23),
	MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_BLK_NO_CMD23),
	MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_BLK_NO_CMD23),
	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_BLK_NO_CMD23),
	/*
         * Some MMC cards need longer data read timeout than indicated in CSD.
         */
	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
		  MMC_QUIRK_LONG_READ_TIME),
	MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_LONG_READ_TIME),
	/*
         * On these Samsung MoviNAND parts, performing secure erase or
         * secure trim can result in unrecoverable corruption due to a
         * firmware bug.
         */
	MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
	MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
	MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
	MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
	MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
	MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
	MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
	MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
	/*
         *  On Some Kingston eMMCs, performing trim can result in
         *  unrecoverable data conrruption occasionally due to a firmware bug.
         */
	MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_TRIM_BROKEN),
	MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
		  MMC_QUIRK_TRIM_BROKEN),
	END_FIXUP
};
static int mmc_blk_probe(struct mmc_card *card)
{
	struct mmc_blk_data *md, *part_md;
	char cap_str[10];
	/*
         * Check that the card supports the command class(es) we need.
         */
	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
		return -ENODEV;
	mmc_fixup_device(card, blk_fixups);
	md = mmc_blk_alloc(card);
	if (IS_ERR(md))
		return PTR_ERR(md);
	string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
			cap_str, sizeof(cap_str));
	pr_info("%s: %s %s %s %s\n",
		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
		cap_str, md->read_only ? "(ro)" : "");
	if (mmc_blk_alloc_parts(card, md))
		goto out;
	dev_set_drvdata(&card->dev, md);
	if (mmc_add_disk(md))
		goto out;
	list_for_each_entry(part_md, &md->part, part) {
		if (mmc_add_disk(part_md))
			goto out;
	}
	pm_runtime_set_autosuspend_delay(&card->dev, 3000);
	pm_runtime_use_autosuspend(&card->dev);
	/*
         * Don't enable runtime PM for SD-combo cards here. Leave that
         * decision to be taken during the SDIO init sequence instead.
         */
	if (card->type != MMC_TYPE_SD_COMBO) {
		pm_runtime_set_active(&card->dev);
		pm_runtime_enable(&card->dev);
	}
	return 0;
 out:
	mmc_blk_remove_parts(card, md);
	mmc_blk_remove_req(md);
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 85 | 35.12% | 2 | 12.50% | 
| ulf hansson | ulf hansson | 57 | 23.55% | 5 | 31.25% | 
| andrei warkentin | andrei warkentin | 46 | 19.01% | 1 | 6.25% | 
| pierre ossman | pierre ossman | 37 | 15.29% | 3 | 18.75% | 
| lukas czerner | lukas czerner | 7 | 2.89% | 1 | 6.25% | 
| jarkko lavinen | jarkko lavinen | 4 | 1.65% | 1 | 6.25% | 
| yi li | yi li | 3 | 1.24% | 1 | 6.25% | 
| james bottomley | james bottomley | 2 | 0.83% | 1 | 6.25% | 
| girish k s | girish k s | 1 | 0.41% | 1 | 6.25% | 
 | Total | 242 | 100.00% | 16 | 100.00% | 
static void mmc_blk_remove(struct mmc_card *card)
{
	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
	mmc_blk_remove_parts(card, md);
	pm_runtime_get_sync(&card->dev);
	mmc_claim_host(card->host);
	mmc_blk_part_switch(card, md);
	mmc_release_host(card->host);
	if (card->type != MMC_TYPE_SD_COMBO)
		pm_runtime_disable(&card->dev);
	pm_runtime_put_noidle(&card->dev);
	mmc_blk_remove_req(md);
	dev_set_drvdata(&card->dev, NULL);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ulf hansson | ulf hansson | 49 | 49.49% | 4 | 50.00% | 
| russell king | russell king | 23 | 23.23% | 1 | 12.50% | 
| adrian hunter | adrian hunter | 21 | 21.21% | 1 | 12.50% | 
| andrei warkentin | andrei warkentin | 4 | 4.04% | 1 | 12.50% | 
| pierre ossman | pierre ossman | 2 | 2.02% | 1 | 12.50% | 
 | Total | 99 | 100.00% | 8 | 100.00% | 
static int _mmc_blk_suspend(struct mmc_card *card)
{
	struct mmc_blk_data *part_md;
	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
	if (md) {
		mmc_queue_suspend(&md->queue);
		list_for_each_entry(part_md, &md->part, part) {
			mmc_queue_suspend(&part_md->queue);
		}
	}
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 33 | 51.56% | 3 | 42.86% | 
| andrei warkentin | andrei warkentin | 23 | 35.94% | 1 | 14.29% | 
| ulf hansson | ulf hansson | 8 | 12.50% | 3 | 42.86% | 
 | Total | 64 | 100.00% | 7 | 100.00% | 
static void mmc_blk_shutdown(struct mmc_card *card)
{
	_mmc_blk_suspend(card);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ulf hansson | ulf hansson | 16 | 100.00% | 2 | 100.00% | 
 | Total | 16 | 100.00% | 2 | 100.00% | 
#ifdef CONFIG_PM_SLEEP
static int mmc_blk_suspend(struct device *dev)
{
	struct mmc_card *card = mmc_dev_to_card(dev);
	return _mmc_blk_suspend(card);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ulf hansson | ulf hansson | 27 | 100.00% | 3 | 100.00% | 
 | Total | 27 | 100.00% | 3 | 100.00% | 
static int mmc_blk_resume(struct device *dev)
{
	struct mmc_blk_data *part_md;
	struct mmc_blk_data *md = dev_get_drvdata(dev);
	if (md) {
		/*
                 * Resume involves the card going into idle state,
                 * so current partition is always the main one.
                 */
		md->part_curr = md->part_type;
		mmc_queue_resume(&md->queue);
		list_for_each_entry(part_md, &md->part, part) {
			mmc_queue_resume(&part_md->queue);
		}
	}
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 34 | 48.57% | 3 | 50.00% | 
| andrei warkentin | andrei warkentin | 32 | 45.71% | 1 | 16.67% | 
| ulf hansson | ulf hansson | 4 | 5.71% | 2 | 33.33% | 
 | Total | 70 | 100.00% | 6 | 100.00% | 
#endif
static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
static struct mmc_driver mmc_driver = {
	.drv		= {
		.name	= "mmcblk",
		.pm	= &mmc_blk_pm_ops,
        },
	.probe		= mmc_blk_probe,
	.remove		= mmc_blk_remove,
	.shutdown	= mmc_blk_shutdown,
};
static int __init mmc_blk_init(void)
{
	int res;
	if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
		pr_info("mmcblk: using %d minors per device\n", perdev_minors);
	max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
	if (res)
		goto out;
	res = mmc_register_driver(&mmc_driver);
	if (res)
		goto out2;
	return 0;
 out2:
	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
 out:
	return res;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 38 | 43.18% | 1 | 20.00% | 
| akinobu mita | akinobu mita | 21 | 23.86% | 1 | 20.00% | 
| olof johansson | olof johansson | 18 | 20.45% | 1 | 20.00% | 
| ben hutchings | ben hutchings | 10 | 11.36% | 1 | 20.00% | 
| pierre ossman | pierre ossman | 1 | 1.14% | 1 | 20.00% | 
 | Total | 88 | 100.00% | 5 | 100.00% | 
static void __exit mmc_blk_exit(void)
{
	mmc_unregister_driver(&mmc_driver);
	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| russell king | russell king | 21 | 95.45% | 1 | 50.00% | 
| pierre ossman | pierre ossman | 1 | 4.55% | 1 | 50.00% | 
 | Total | 22 | 100.00% | 2 | 100.00% | 
module_init(mmc_blk_init);
module_exit(mmc_blk_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| seungwon jeon | seungwon jeon | 2737 | 22.17% | 4 | 2.41% | 
| andrei warkentin | andrei warkentin | 1569 | 12.71% | 8 | 4.82% | 
| russell king | russell king | 1398 | 11.33% | 13 | 7.83% | 
| adrian hunter | adrian hunter | 1178 | 9.54% | 14 | 8.43% | 
| ulf hansson | ulf hansson | 796 | 6.45% | 16 | 9.64% | 
| john calixto | john calixto | 709 | 5.74% | 1 | 0.60% | 
| per forlin | per forlin | 658 | 5.33% | 4 | 2.41% | 
| jon hunter | jon hunter | 618 | 5.01% | 1 | 0.60% | 
| johan rudholm | johan rudholm | 585 | 4.74% | 3 | 1.81% | 
| pierre ossman | pierre ossman | 295 | 2.39% | 17 | 10.24% | 
| loic pallardy | loic pallardy | 244 | 1.98% | 2 | 1.20% | 
| maya erez | maya erez | 164 | 1.33% | 1 | 0.60% | 
| kobayashi yoshitake | kobayashi yoshitake | 147 | 1.19% | 1 | 0.60% | 
| ian chen | ian chen | 109 | 0.88% | 1 | 0.60% | 
| konstantin dorfman | konstantin dorfman | 103 | 0.83% | 1 | 0.60% | 
| saugata das | saugata das | 91 | 0.74% | 2 | 1.20% | 
| shawn lin | shawn lin | 77 | 0.62% | 2 | 1.20% | 
| olof johansson | olof johansson | 67 | 0.54% | 2 | 1.20% | 
| sujit reddy thumma | sujit reddy thumma | 54 | 0.44% | 1 | 0.60% | 
| kuninori morimoto | kuninori morimoto | 53 | 0.43% | 2 | 1.20% | 
| namjae jeon | namjae jeon | 50 | 0.41% | 1 | 0.60% | 
| ben dooks | ben dooks | 40 | 0.32% | 1 | 0.60% | 
| chuanxiao dong | chuanxiao dong | 36 | 0.29% | 1 | 0.60% | 
| chris ball | chris ball | 36 | 0.29% | 4 | 2.41% | 
| arnd bergmann | arnd bergmann | 30 | 0.24% | 2 | 1.20% | 
| grant grundler | grant grundler | 27 | 0.22% | 1 | 0.60% | 
| david brownell | david brownell | 26 | 0.21% | 1 | 0.60% | 
| yangbo lu | yangbo lu | 26 | 0.21% | 1 | 0.60% | 
| ken sumrall | ken sumrall | 26 | 0.21% | 1 | 0.60% | 
| paul taysom | paul taysom | 25 | 0.20% | 1 | 0.60% | 
| akinobu mita | akinobu mita | 21 | 0.17% | 1 | 0.60% | 
| al viro | al viro | 20 | 0.16% | 3 | 1.81% | 
| rabin vincent | rabin vincent | 20 | 0.16% | 2 | 1.20% | 
| jaehoon chung | jaehoon chung | 19 | 0.15% | 2 | 1.20% | 
| subhash jadavani | subhash jadavani | 15 | 0.12% | 2 | 1.20% | 
| kyungmin park | kyungmin park | 14 | 0.11% | 2 | 1.20% | 
| matt gumbel | matt gumbel | 14 | 0.11% | 1 | 0.60% | 
| ben hutchings | ben hutchings | 14 | 0.11% | 1 | 0.60% | 
| trey ramsay | trey ramsay | 13 | 0.11% | 1 | 0.60% | 
| stefan nilsson xk | stefan nilsson xk | 12 | 0.10% | 1 | 0.60% | 
| ray jui | ray jui | 12 | 0.10% | 1 | 0.60% | 
| marc-andre hebert | marc-andre hebert | 11 | 0.09% | 1 | 0.60% | 
| david woodhouse | david woodhouse | 10 | 0.08% | 1 | 0.60% | 
| jiebing li | jiebing li | 10 | 0.08% | 1 | 0.60% | 
| yaniv gardi | yaniv gardi | 10 | 0.08% | 1 | 0.60% | 
| taras kondratiuk | taras kondratiuk | 9 | 0.07% | 1 | 0.60% | 
| colin cross | colin cross | 9 | 0.07% | 1 | 0.60% | 
| ding wang | ding wang | 9 | 0.07% | 1 | 0.60% | 
| vladimir motyka | vladimir motyka | 8 | 0.06% | 1 | 0.60% | 
| tejun heo | tejun heo | 8 | 0.06% | 2 | 1.20% | 
| franck jullien | franck jullien | 8 | 0.06% | 1 | 0.60% | 
| ville viinikka | ville viinikka | 8 | 0.06% | 1 | 0.60% | 
| christoph hellwig | christoph hellwig | 8 | 0.06% | 1 | 0.60% | 
| philip langdale | philip langdale | 8 | 0.06% | 1 | 0.60% | 
| arjan van de ven | arjan van de ven | 7 | 0.06% | 1 | 0.60% | 
| lukas czerner | lukas czerner | 7 | 0.06% | 1 | 0.60% | 
| andrew morton | andrew morton | 7 | 0.06% | 1 | 0.60% | 
| paul walmsley | paul walmsley | 7 | 0.06% | 1 | 0.60% | 
| andy whitcroft | andy whitcroft | 5 | 0.04% | 1 | 0.60% | 
| tomas winkler | tomas winkler | 5 | 0.04% | 1 | 0.60% | 
| james bottomley | james bottomley | 4 | 0.03% | 1 | 0.60% | 
| jens axboe | jens axboe | 4 | 0.03% | 1 | 0.60% | 
| jarkko lavinen | jarkko lavinen | 4 | 0.03% | 1 | 0.60% | 
| girish k s | girish k s | 3 | 0.02% | 1 | 0.60% | 
| ben collins | ben collins | 3 | 0.02% | 1 | 0.60% | 
| anna lemehova | anna lemehova | 3 | 0.02% | 1 | 0.60% | 
| kiyoshi ueda | kiyoshi ueda | 3 | 0.02% | 1 | 0.60% | 
| yi li | yi li | 3 | 0.02% | 1 | 0.60% | 
| yalin wang | yalin wang | 2 | 0.02% | 1 | 0.60% | 
| venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 2 | 0.02% | 1 | 0.60% | 
| philippe de swert | philippe de swert | 2 | 0.02% | 1 | 0.60% | 
| luca porzio | luca porzio | 2 | 0.02% | 1 | 0.60% | 
| yoann padioleau | yoann padioleau | 1 | 0.01% | 1 | 0.60% | 
| asaf vertz | asaf vertz | 1 | 0.01% | 1 | 0.60% | 
| harvey harrison | harvey harrison | 1 | 0.01% | 1 | 0.60% | 
| pavel pisa | pavel pisa | 1 | 0.01% | 1 | 0.60% | 
| tobias klauser | tobias klauser | 1 | 0.01% | 1 | 0.60% | 
| baruch siach | baruch siach | 1 | 0.01% | 1 | 0.60% | 
| joe perches | joe perches | 1 | 0.01% | 1 | 0.60% | 
 | Total | 12344 | 100.00% | 166 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.