Release 4.10 drivers/mmc/core/sd_ops.c
/*
* linux/drivers/mmc/core/sd_ops.h
*
* Copyright 2006-2007 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/export.h>
#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include "core.h"
#include "sd_ops.h"
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
{
int err;
struct mmc_command cmd = {0};
if (WARN_ON(card && card->host != host))
return -EINVAL;
cmd.opcode = MMC_APP_CMD;
if (card) {
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
} else {
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_BCR;
}
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
/* Check that card supported application commands */
if (!mmc_host_is_spi(host) && !(cmd.resp[0] & R1_APP_CMD))
return -EOPNOTSUPP;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
adrian bunk | adrian bunk | 116 | 82.27% | 1 | 20.00% |
david brownell | david brownell | 10 | 7.09% | 1 | 20.00% |
shawn lin | shawn lin | 8 | 5.67% | 1 | 20.00% |
chris ball | chris ball | 4 | 2.84% | 1 | 20.00% |
pierre ossman | pierre ossman | 3 | 2.13% | 1 | 20.00% |
| Total | 141 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(mmc_app_cmd);
/**
* mmc_wait_for_app_cmd - start an application command and wait for
completion
* @host: MMC host to start command
* @card: Card to send MMC_APP_CMD to
* @cmd: MMC command to start
* @retries: maximum number of retries
*
* Sends a MMC_APP_CMD, checks the card response, sends the command
* in the parameter and waits for it to complete. Return any error
* that occurred while the command was executing. Do not attempt to
* parse the response.
*/
int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
struct mmc_command *cmd, int retries)
{
struct mmc_request mrq = {NULL};
int i, err;
if (retries < 0)
retries = MMC_CMD_RETRIES;
err = -EIO;
/*
* We have to resend MMC_APP_CMD for each attempt so
* we cannot use the retries field in mmc_command.
*/
for (i = 0;i <= retries;i++) {
err = mmc_app_cmd(host, card);
if (err) {
/* no point in retrying; no APP commands allowed */
if (mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
break;
}
continue;
}
memset(&mrq, 0, sizeof(struct mmc_request));
memset(cmd->resp, 0, sizeof(cmd->resp));
cmd->retries = 0;
mrq.cmd = cmd;
cmd->data = NULL;
mmc_wait_for_req(host, &mrq);
err = cmd->error;
if (!cmd->error)
break;
/* no point in retrying illegal APP commands */
if (mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
break;
}
}
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pierre ossman | pierre ossman | 144 | 72.00% | 2 | 33.33% |
david brownell | david brownell | 46 | 23.00% | 1 | 16.67% |
shawn lin | shawn lin | 6 | 3.00% | 1 | 16.67% |
chris ball | chris ball | 3 | 1.50% | 1 | 16.67% |
venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 1 | 0.50% | 1 | 16.67% |
| Total | 200 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(mmc_wait_for_app_cmd);
int mmc_app_set_bus_width(struct mmc_card *card, int width)
{
struct mmc_command cmd = {0};
cmd.opcode = SD_APP_SET_BUS_WIDTH;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
switch (width) {
case MMC_BUS_WIDTH_1:
cmd.arg = SD_BUS_WIDTH_1;
break;
case MMC_BUS_WIDTH_4:
cmd.arg = SD_BUS_WIDTH_4;
break;
default:
return -EINVAL;
}
return mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pierre ossman | pierre ossman | 76 | 93.83% | 2 | 50.00% |
chris ball | chris ball | 4 | 4.94% | 1 | 25.00% |
masahiro yamada | masahiro yamada | 1 | 1.23% | 1 | 25.00% |
| Total | 81 | 100.00% | 4 | 100.00% |
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {0};
int i, err = 0;
cmd.opcode = SD_APP_OP_COND;
if (mmc_host_is_spi(host))
cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
else
cmd.arg = ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
for (i = 100; i; i--) {
err = mmc_wait_for_app_cmd(host, NULL, &cmd, MMC_CMD_RETRIES);
if (err)
break;
/* if we're just probing, do a single pass */
if (ocr == 0)
break;
/* otherwise wait until reset completes */
if (mmc_host_is_spi(host)) {
if (!(cmd.resp[0] & R1_SPI_IDLE))
break;
} else {
if (cmd.resp[0] & MMC_CARD_BUSY)
break;
}
err = -ETIMEDOUT;
mmc_delay(10);
}
if (!i)
pr_err("%s: card never left busy state\n", mmc_hostname(host));
if (rocr && !mmc_host_is_spi(host))
*rocr = cmd.resp[0];
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pierre ossman | pierre ossman | 119 | 58.62% | 2 | 40.00% |
david brownell | david brownell | 65 | 32.02% | 1 | 20.00% |
johan rudholm | johan rudholm | 15 | 7.39% | 1 | 20.00% |
chris ball | chris ball | 4 | 1.97% | 1 | 20.00% |
| Total | 203 | 100.00% | 5 | 100.00% |
int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
{
struct mmc_command cmd = {0};
int err;
static const u8 test_pattern = 0xAA;
u8 result_pattern;
/*
* To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
* before SD_APP_OP_COND. This command will harmlessly fail for
* SD 1.0 cards.
*/
cmd.opcode = SD_SEND_IF_COND;
cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern;
cmd.flags = MMC_RSP_SPI_R7 | MMC_RSP_R7 | MMC_CMD_BCR;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
if (mmc_host_is_spi(host))
result_pattern = cmd.resp[1] & 0xFF;
else
result_pattern = cmd.resp[0] & 0xFF;
if (result_pattern != test_pattern)
return -EIO;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pierre ossman | pierre ossman | 99 | 75.57% | 2 | 50.00% |
david brownell | david brownell | 28 | 21.37% | 1 | 25.00% |
chris ball | chris ball | 4 | 3.05% | 1 | 25.00% |
| Total | 131 | 100.00% | 4 | 100.00% |
int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
{
int err;
struct mmc_command cmd = {0};
cmd.opcode = SD_SEND_RELATIVE_ADDR;
cmd.arg = 0;
cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
*rca = cmd.resp[0] >> 16;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pierre ossman | pierre ossman | 76 | 95.00% | 2 | 66.67% |
chris ball | chris ball | 4 | 5.00% | 1 | 33.33% |
| Total | 80 | 100.00% | 3 | 100.00% |
int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
{
int err;
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
void *data_buf;
/* NOTE: caller guarantees scr is heap-allocated */
err = mmc_app_cmd(card->host, card);
if (err)
return err;
/* dma onto stack is unsafe/nonportable, but callers to this
* routine normally provide temporary on-stack buffers ...
*/
data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
if (data_buf == NULL)
return -ENOMEM;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = SD_APP_SEND_SCR;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = 8;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, data_buf, 8);
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
memcpy(scr, data_buf, sizeof(card->raw_scr));
kfree(data_buf);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
scr[0] = be32_to_cpu(scr[0]);
scr[1] = be32_to_cpu(scr[1]);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pierre ossman | pierre ossman | 194 | 75.19% | 3 | 33.33% |
yoshihiro shimoda | yoshihiro shimoda | 49 | 18.99% | 1 | 11.11% |
chris ball | chris ball | 11 | 4.26% | 3 | 33.33% |
david brownell | david brownell | 3 | 1.16% | 1 | 11.11% |
venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 1 | 0.39% | 1 | 11.11% |
| Total | 258 | 100.00% | 9 | 100.00% |
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp)
{
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
/* NOTE: caller guarantees resp is heap-allocated */
mode = !!mode;
value &= 0xF;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = SD_SWITCH;
cmd.arg = mode << 31 | 0x00FFFFFF;
cmd.arg &= ~(0xF << (group * 4));
cmd.arg |= value << (group * 4);
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = 64;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, resp, 64);
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pierre ossman | pierre ossman | 198 | 92.96% | 2 | 28.57% |
chris ball | chris ball | 11 | 5.16% | 3 | 42.86% |
david brownell | david brownell | 3 | 1.41% | 1 | 14.29% |
venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 1 | 0.47% | 1 | 14.29% |
| Total | 213 | 100.00% | 7 | 100.00% |
int mmc_app_sd_status(struct mmc_card *card, void *ssr)
{
int err;
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
/* NOTE: caller guarantees ssr is heap-allocated */
err = mmc_app_cmd(card->host, card);
if (err)
return err;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = SD_APP_SD_STATUS;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = 64;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, ssr, 64);
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
adrian hunter | adrian hunter | 172 | 93.48% | 1 | 20.00% |
chris ball | chris ball | 11 | 5.98% | 3 | 60.00% |
venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 1 | 0.54% | 1 | 20.00% |
| Total | 184 | 100.00% | 5 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pierre ossman | pierre ossman | 940 | 61.32% | 5 | 27.78% |
adrian hunter | adrian hunter | 172 | 11.22% | 1 | 5.56% |
david brownell | david brownell | 155 | 10.11% | 1 | 5.56% |
adrian bunk | adrian bunk | 116 | 7.57% | 1 | 5.56% |
chris ball | chris ball | 56 | 3.65% | 3 | 16.67% |
yoshihiro shimoda | yoshihiro shimoda | 52 | 3.39% | 1 | 5.56% |
johan rudholm | johan rudholm | 15 | 0.98% | 1 | 5.56% |
shawn lin | shawn lin | 14 | 0.91% | 1 | 5.56% |
john calixto | john calixto | 5 | 0.33% | 1 | 5.56% |
venkatraman sathiyamoorthy | venkatraman sathiyamoorthy | 4 | 0.26% | 1 | 5.56% |
paul gortmaker | paul gortmaker | 3 | 0.20% | 1 | 5.56% |
masahiro yamada | masahiro yamada | 1 | 0.07% | 1 | 5.56% |
| Total | 1533 | 100.00% | 18 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.