Release 4.11 drivers/mmc/core/sd_ops.c
/*
* linux/drivers/mmc/core/sd_ops.h
*
* Copyright 2006-2007 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/export.h>
#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include "core.h"
#include "sd_ops.h"
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
{
int err;
struct mmc_command cmd = {};
if (WARN_ON(card && card->host != host))
return -EINVAL;
cmd.opcode = MMC_APP_CMD;
if (card) {
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
} else {
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_BCR;
}
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
/* Check that card supported application commands */
if (!mmc_host_is_spi(host) && !(cmd.resp[0] & R1_APP_CMD))
return -EOPNOTSUPP;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Bunk | 116 | 83.45% | 1 | 16.67% |
David Brownell | 10 | 7.19% | 1 | 16.67% |
Shawn Lin | 8 | 5.76% | 1 | 16.67% |
Pierre Ossman | 3 | 2.16% | 1 | 16.67% |
Masahiro Yamada | 1 | 0.72% | 1 | 16.67% |
Chris Ball | 1 | 0.72% | 1 | 16.67% |
Total | 139 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(mmc_app_cmd);
/**
* mmc_wait_for_app_cmd - start an application command and wait for
completion
* @host: MMC host to start command
* @card: Card to send MMC_APP_CMD to
* @cmd: MMC command to start
* @retries: maximum number of retries
*
* Sends a MMC_APP_CMD, checks the card response, sends the command
* in the parameter and waits for it to complete. Return any error
* that occurred while the command was executing. Do not attempt to
* parse the response.
*/
int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
struct mmc_command *cmd, int retries)
{
struct mmc_request mrq = {};
int i, err;
if (retries < 0)
retries = MMC_CMD_RETRIES;
err = -EIO;
/*
* We have to resend MMC_APP_CMD for each attempt so
* we cannot use the retries field in mmc_command.
*/
for (i = 0;i <= retries;i++) {
err = mmc_app_cmd(host, card);
if (err) {
/* no point in retrying; no APP commands allowed */
if (mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
break;
}
continue;
}
memset(&mrq, 0, sizeof(struct mmc_request));
memset(cmd->resp, 0, sizeof(cmd->resp));
cmd->retries = 0;
mrq.cmd = cmd;
cmd->data = NULL;
mmc_wait_for_req(host, &mrq);
err = cmd->error;
if (!cmd->error)
break;
/* no point in retrying illegal APP commands */
if (mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
break;
}
}
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 144 | 72.73% | 2 | 33.33% |
David Brownell | 46 | 23.23% | 1 | 16.67% |
Shawn Lin | 6 | 3.03% | 1 | 16.67% |
Masahiro Yamada | 1 | 0.51% | 1 | 16.67% |
Chris Ball | 1 | 0.51% | 1 | 16.67% |
Total | 198 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(mmc_wait_for_app_cmd);
int mmc_app_set_bus_width(struct mmc_card *card, int width)
{
struct mmc_command cmd = {};
cmd.opcode = SD_APP_SET_BUS_WIDTH;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
switch (width) {
case MMC_BUS_WIDTH_1:
cmd.arg = SD_BUS_WIDTH_1;
break;
case MMC_BUS_WIDTH_4:
cmd.arg = SD_BUS_WIDTH_4;
break;
default:
return -EINVAL;
}
return mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 76 | 96.20% | 2 | 40.00% |
Masahiro Yamada | 2 | 2.53% | 2 | 40.00% |
Chris Ball | 1 | 1.27% | 1 | 20.00% |
Total | 79 | 100.00% | 5 | 100.00% |
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {};
int i, err = 0;
cmd.opcode = SD_APP_OP_COND;
if (mmc_host_is_spi(host))
cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
else
cmd.arg = ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
for (i = 100; i; i--) {
err = mmc_wait_for_app_cmd(host, NULL, &cmd, MMC_CMD_RETRIES);
if (err)
break;
/* if we're just probing, do a single pass */
if (ocr == 0)
break;
/* otherwise wait until reset completes */
if (mmc_host_is_spi(host)) {
if (!(cmd.resp[0] & R1_SPI_IDLE))
break;
} else {
if (cmd.resp[0] & MMC_CARD_BUSY)
break;
}
err = -ETIMEDOUT;
mmc_delay(10);
}
if (!i)
pr_err("%s: card never left busy state\n", mmc_hostname(host));
if (rocr && !mmc_host_is_spi(host))
*rocr = cmd.resp[0];
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 119 | 59.20% | 2 | 33.33% |
David Brownell | 65 | 32.34% | 1 | 16.67% |
Johan Rudholm | 15 | 7.46% | 1 | 16.67% |
Chris Ball | 1 | 0.50% | 1 | 16.67% |
Masahiro Yamada | 1 | 0.50% | 1 | 16.67% |
Total | 201 | 100.00% | 6 | 100.00% |
int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
{
struct mmc_command cmd = {};
int err;
static const u8 test_pattern = 0xAA;
u8 result_pattern;
/*
* To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
* before SD_APP_OP_COND. This command will harmlessly fail for
* SD 1.0 cards.
*/
cmd.opcode = SD_SEND_IF_COND;
cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern;
cmd.flags = MMC_RSP_SPI_R7 | MMC_RSP_R7 | MMC_CMD_BCR;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
if (mmc_host_is_spi(host))
result_pattern = cmd.resp[1] & 0xFF;
else
result_pattern = cmd.resp[0] & 0xFF;
if (result_pattern != test_pattern)
return -EIO;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 99 | 76.74% | 2 | 40.00% |
David Brownell | 28 | 21.71% | 1 | 20.00% |
Masahiro Yamada | 1 | 0.78% | 1 | 20.00% |
Chris Ball | 1 | 0.78% | 1 | 20.00% |
Total | 129 | 100.00% | 5 | 100.00% |
int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
{
int err;
struct mmc_command cmd = {};
cmd.opcode = SD_SEND_RELATIVE_ADDR;
cmd.arg = 0;
cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
*rca = cmd.resp[0] >> 16;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 76 | 97.44% | 2 | 50.00% |
Masahiro Yamada | 1 | 1.28% | 1 | 25.00% |
Chris Ball | 1 | 1.28% | 1 | 25.00% |
Total | 78 | 100.00% | 4 | 100.00% |
int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
{
int err;
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
struct scatterlist sg;
void *data_buf;
/* NOTE: caller guarantees scr is heap-allocated */
err = mmc_app_cmd(card->host, card);
if (err)
return err;
/* dma onto stack is unsafe/nonportable, but callers to this
* routine normally provide temporary on-stack buffers ...
*/
data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
if (data_buf == NULL)
return -ENOMEM;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = SD_APP_SEND_SCR;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = 8;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, data_buf, 8);
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
memcpy(scr, data_buf, sizeof(card->raw_scr));
kfree(data_buf);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
scr[0] = be32_to_cpu(scr[0]);
scr[1] = be32_to_cpu(scr[1]);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 194 | 76.98% | 3 | 33.33% |
Yoshihiro Shimoda | 49 | 19.44% | 1 | 11.11% |
Masahiro Yamada | 3 | 1.19% | 1 | 11.11% |
Chris Ball | 3 | 1.19% | 3 | 33.33% |
David Brownell | 3 | 1.19% | 1 | 11.11% |
Total | 252 | 100.00% | 9 | 100.00% |
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
struct scatterlist sg;
/* NOTE: caller guarantees resp is heap-allocated */
mode = !!mode;
value &= 0xF;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = SD_SWITCH;
cmd.arg = mode << 31 | 0x00FFFFFF;
cmd.arg &= ~(0xF << (group * 4));
cmd.arg |= value << (group * 4);
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = 64;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, resp, 64);
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 198 | 95.65% | 2 | 28.57% |
David Brownell | 3 | 1.45% | 1 | 14.29% |
Chris Ball | 3 | 1.45% | 3 | 42.86% |
Masahiro Yamada | 3 | 1.45% | 1 | 14.29% |
Total | 207 | 100.00% | 7 | 100.00% |
int mmc_app_sd_status(struct mmc_card *card, void *ssr)
{
int err;
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
struct scatterlist sg;
/* NOTE: caller guarantees ssr is heap-allocated */
err = mmc_app_cmd(card->host, card);
if (err)
return err;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = SD_APP_SD_STATUS;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = 64;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, ssr, 64);
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Adrian Hunter | 172 | 96.63% | 1 | 20.00% |
Chris Ball | 3 | 1.69% | 3 | 60.00% |
Masahiro Yamada | 3 | 1.69% | 1 | 20.00% |
Total | 178 | 100.00% | 5 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 940 | 62.54% | 5 | 27.78% |
Adrian Hunter | 172 | 11.44% | 1 | 5.56% |
David Brownell | 155 | 10.31% | 1 | 5.56% |
Adrian Bunk | 116 | 7.72% | 1 | 5.56% |
Yoshihiro Shimoda | 52 | 3.46% | 1 | 5.56% |
Masahiro Yamada | 16 | 1.06% | 2 | 11.11% |
Chris Ball | 15 | 1.00% | 3 | 16.67% |
Johan Rudholm | 15 | 1.00% | 1 | 5.56% |
Shawn Lin | 14 | 0.93% | 1 | 5.56% |
John Calixto | 5 | 0.33% | 1 | 5.56% |
Paul Gortmaker | 3 | 0.20% | 1 | 5.56% |
Total | 1503 | 100.00% | 18 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.