Release 4.11 drivers/staging/goldfish/goldfish_nand.c
/*
* drivers/mtd/devices/goldfish_nand.c
*
* Copyright (C) 2007 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2013 Intel, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/io.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/goldfish.h>
#include <asm/div64.h>
#include <linux/dma-mapping.h>
#include "goldfish_nand_reg.h"
struct goldfish_nand {
/* lock protects access to the device registers */
struct mutex lock;
unsigned char __iomem *base;
struct cmd_params *cmd_params;
size_t mtd_count;
struct mtd_info mtd[0];
};
static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd,
enum nand_cmd cmd, u64 addr, u32 len,
void *ptr, u32 *rv)
{
u32 cmdp;
struct goldfish_nand *nand = mtd->priv;
struct cmd_params *cps = nand->cmd_params;
unsigned char __iomem *base = nand->base;
if (!cps)
return -1;
switch (cmd) {
case NAND_CMD_ERASE:
cmdp = NAND_CMD_ERASE_WITH_PARAMS;
break;
case NAND_CMD_READ:
cmdp = NAND_CMD_READ_WITH_PARAMS;
break;
case NAND_CMD_WRITE:
cmdp = NAND_CMD_WRITE_WITH_PARAMS;
break;
default:
return -1;
}
cps->dev = mtd - nand->mtd;
cps->addr_high = (u32)(addr >> 32);
cps->addr_low = (u32)addr;
cps->transfer_size = len;
cps->data = (unsigned long)ptr;
writel(cmdp, base + NAND_COMMAND);
*rv = cps->result;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 168 | 98.25% | 1 | 33.33% |
Jun Tian | 2 | 1.17% | 1 | 33.33% |
Ravi Teja Darbha | 1 | 0.58% | 1 | 33.33% |
Total | 171 | 100.00% | 3 | 100.00% |
static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd,
u64 addr, u32 len, void *ptr)
{
struct goldfish_nand *nand = mtd->priv;
u32 rv;
unsigned char __iomem *base = nand->base;
mutex_lock(&nand->lock);
if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) {
writel(mtd - nand->mtd, base + NAND_DEV);
writel((u32)(addr >> 32), base + NAND_ADDR_HIGH);
writel((u32)addr, base + NAND_ADDR_LOW);
writel(len, base + NAND_TRANSFER_SIZE);
gf_write_ptr(ptr, base + NAND_DATA, base + NAND_DATA_HIGH);
writel(cmd, base + NAND_COMMAND);
rv = readl(base + NAND_RESULT);
}
mutex_unlock(&nand->lock);
return rv;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 158 | 94.61% | 1 | 25.00% |
Jun Tian | 6 | 3.59% | 1 | 25.00% |
Kristina Martšenko | 2 | 1.20% | 1 | 25.00% |
Peter Senna Tschudin | 1 | 0.60% | 1 | 25.00% |
Total | 167 | 100.00% | 4 | 100.00% |
static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
{
loff_t ofs = instr->addr;
u32 len = instr->len;
s32 rem;
if (ofs + len > mtd->size)
goto invalid_arg;
ofs = div_s64_rem(ofs, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
ofs *= (mtd->writesize + mtd->oobsize);
if (len % mtd->writesize)
goto invalid_arg;
len = len / mtd->writesize * (mtd->writesize + mtd->oobsize);
if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) {
pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n",
ofs, len, mtd->size, mtd->erasesize);
return -EIO;
}
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
return 0;
invalid_arg:
pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n",
ofs, len, mtd->size, mtd->erasesize);
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 179 | 96.76% | 1 | 50.00% |
Arnd Bergmann | 6 | 3.24% | 1 | 50.00% |
Total | 185 | 100.00% | 2 | 100.00% |
static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
struct mtd_oob_ops *ops)
{
s32 rem;
if (ofs + ops->len > mtd->size)
goto invalid_arg;
if (ops->datbuf && ops->len && ops->len != mtd->writesize)
goto invalid_arg;
if (ops->ooblen + ops->ooboffs > mtd->oobsize)
goto invalid_arg;
ofs = div_s64_rem(ofs, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
ofs *= (mtd->writesize + mtd->oobsize);
if (ops->datbuf)
ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
ops->len, ops->datbuf);
ofs += mtd->writesize + ops->ooboffs;
if (ops->oobbuf)
ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
ops->ooblen, ops->oobbuf);
return 0;
invalid_arg:
pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 197 | 96.57% | 1 | 33.33% |
Arnd Bergmann | 6 | 2.94% | 1 | 33.33% |
Peter Hüwe | 1 | 0.49% | 1 | 33.33% |
Total | 204 | 100.00% | 3 | 100.00% |
static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
struct mtd_oob_ops *ops)
{
s32 rem;
if (ofs + ops->len > mtd->size)
goto invalid_arg;
if (ops->len && ops->len != mtd->writesize)
goto invalid_arg;
if (ops->ooblen + ops->ooboffs > mtd->oobsize)
goto invalid_arg;
ofs = div_s64_rem(ofs, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
ofs *= (mtd->writesize + mtd->oobsize);
if (ops->datbuf)
ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
ops->len, ops->datbuf);
ofs += mtd->writesize + ops->ooboffs;
if (ops->oobbuf)
ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
ops->ooblen, ops->oobbuf);
return 0;
invalid_arg:
pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 193 | 96.50% | 1 | 33.33% |
Arnd Bergmann | 6 | 3.00% | 1 | 33.33% |
Peter Hüwe | 1 | 0.50% | 1 | 33.33% |
Total | 200 | 100.00% | 3 | 100.00% |
static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
s32 rem;
if (from + len > mtd->size)
goto invalid_arg;
from = div_s64_rem(from, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
from *= (mtd->writesize + mtd->oobsize);
*retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf);
return 0;
invalid_arg:
pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n",
from, len, mtd->size, mtd->writesize);
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 109 | 93.97% | 1 | 33.33% |
Arnd Bergmann | 6 | 5.17% | 1 | 33.33% |
Peter Hüwe | 1 | 0.86% | 1 | 33.33% |
Total | 116 | 100.00% | 3 | 100.00% |
static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
s32 rem;
if (to + len > mtd->size)
goto invalid_arg;
to = div_s64_rem(to, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
to *= (mtd->writesize + mtd->oobsize);
*retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf);
return 0;
invalid_arg:
pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n",
to, len, mtd->size, mtd->writesize);
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 114 | 94.21% | 1 | 33.33% |
Arnd Bergmann | 6 | 4.96% | 1 | 33.33% |
Peter Hüwe | 1 | 0.83% | 1 | 33.33% |
Total | 121 | 100.00% | 3 | 100.00% |
static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
s32 rem;
if (ofs >= mtd->size)
goto invalid_arg;
ofs = div_s64_rem(ofs, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
ofs *= mtd->erasesize / mtd->writesize;
ofs *= (mtd->writesize + mtd->oobsize);
return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL);
invalid_arg:
pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
ofs, mtd->size, mtd->writesize);
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 99 | 93.40% | 1 | 50.00% |
Arnd Bergmann | 7 | 6.60% | 1 | 50.00% |
Total | 106 | 100.00% | 2 | 100.00% |
static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
s32 rem;
if (ofs >= mtd->size)
goto invalid_arg;
ofs = div_s64_rem(ofs, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
ofs *= mtd->erasesize / mtd->writesize;
ofs *= (mtd->writesize + mtd->oobsize);
if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1)
return -EIO;
return 0;
invalid_arg:
pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
ofs, mtd->size, mtd->writesize);
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 109 | 93.97% | 1 | 50.00% |
Arnd Bergmann | 7 | 6.03% | 1 | 50.00% |
Total | 116 | 100.00% | 2 | 100.00% |
static int nand_setup_cmd_params(struct platform_device *pdev,
struct goldfish_nand *nand)
{
dma_addr_t dma_handle;
unsigned char __iomem *base = nand->base;
nand->cmd_params = dmam_alloc_coherent(&pdev->dev,
sizeof(struct cmd_params),
&dma_handle, GFP_KERNEL);
if (!nand->cmd_params) {
dev_err(&pdev->dev, "allocate buffer failed\n");
return -ENOMEM;
}
writel((u32)((u64)dma_handle >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
writel((u32)dma_handle, base + NAND_CMD_PARAMS_ADDR_LOW);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 86 | 78.90% | 1 | 50.00% |
Shraddha Barke | 23 | 21.10% | 1 | 50.00% |
Total | 109 | 100.00% | 2 | 100.00% |
static int goldfish_nand_init_device(struct platform_device *pdev,
struct goldfish_nand *nand, int id)
{
u32 name_len;
u32 result;
u32 flags;
unsigned char __iomem *base = nand->base;
struct mtd_info *mtd = &nand->mtd[id];
char *name;
mutex_lock(&nand->lock);
writel(id, base + NAND_DEV);
flags = readl(base + NAND_DEV_FLAGS);
name_len = readl(base + NAND_DEV_NAME_LEN);
mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE);
mtd->size = readl(base + NAND_DEV_SIZE_LOW);
mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32;
mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE);
mtd->oobavail = mtd->oobsize;
mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
(mtd->writesize + mtd->oobsize) * mtd->writesize;
mtd->size = div_s64(mtd->size, mtd->writesize + mtd->oobsize);
mtd->size *= mtd->writesize;
dev_dbg(&pdev->dev,
"goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
id, mtd->size, mtd->writesize,
mtd->oobsize, mtd->erasesize);
mutex_unlock(&nand->lock);
mtd->priv = nand;
name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL);
if (!name)
return -ENOMEM;
mtd->name = name;
result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len,
name);
if (result != name_len) {
dev_err(&pdev->dev,
"goldfish_nand_init_device failed to get dev name %d != %d\n",
result, name_len);
return -ENODEV;
}
((char *)mtd->name)[name_len] = '\0';
/* Setup the MTD structure */
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
if (flags & NAND_DEV_FLAG_READ_ONLY)
mtd->flags &= ~MTD_WRITEABLE;
if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP)
nand_setup_cmd_params(pdev, nand);
mtd->owner = THIS_MODULE;
mtd->_erase = goldfish_nand_erase;
mtd->_read = goldfish_nand_read;
mtd->_write = goldfish_nand_write;
mtd->_read_oob = goldfish_nand_read_oob;
mtd->_write_oob = goldfish_nand_write_oob;
mtd->_block_isbad = goldfish_nand_block_isbad;
mtd->_block_markbad = goldfish_nand_block_markbad;
if (mtd_device_register(mtd, NULL, 0))
return -EIO;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 419 | 96.77% | 1 | 20.00% |
Loic Pefferkorn | 6 | 1.39% | 1 | 20.00% |
Arnd Bergmann | 5 | 1.15% | 1 | 20.00% |
Kristina Martšenko | 2 | 0.46% | 1 | 20.00% |
Somya Anand | 1 | 0.23% | 1 | 20.00% |
Total | 433 | 100.00% | 5 | 100.00% |
static int goldfish_nand_probe(struct platform_device *pdev)
{
u32 num_dev;
int i;
int err;
u32 num_dev_working;
u32 version;
struct resource *r;
struct goldfish_nand *nand;
unsigned char __iomem *base;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -ENODEV;
base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
if (!base)
return -ENOMEM;
version = readl(base + NAND_VERSION);
if (version != NAND_VERSION_CURRENT) {
dev_err(&pdev->dev,
"goldfish_nand_init: version mismatch, got %d, expected %d\n",
version, NAND_VERSION_CURRENT);
return -ENODEV;
}
num_dev = readl(base + NAND_NUM_DEV);
if (num_dev == 0)
return -ENODEV;
nand = devm_kzalloc(&pdev->dev, sizeof(*nand) +
sizeof(struct mtd_info) * num_dev, GFP_KERNEL);
if (!nand)
return -ENOMEM;
mutex_init(&nand->lock);
nand->base = base;
nand->mtd_count = num_dev;
platform_set_drvdata(pdev, nand);
num_dev_working = 0;
for (i = 0; i < num_dev; i++) {
err = goldfish_nand_init_device(pdev, nand, i);
if (err == 0)
num_dev_working++;
}
if (num_dev_working == 0)
return -ENODEV;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 251 | 98.43% | 1 | 25.00% |
Somya Anand | 2 | 0.78% | 1 | 25.00% |
Ravi Teja Darbha | 1 | 0.39% | 1 | 25.00% |
Kristina Martšenko | 1 | 0.39% | 1 | 25.00% |
Total | 255 | 100.00% | 4 | 100.00% |
static int goldfish_nand_remove(struct platform_device *pdev)
{
struct goldfish_nand *nand = platform_get_drvdata(pdev);
int i;
for (i = 0; i < nand->mtd_count; i++) {
if (nand->mtd[i].name)
mtd_device_unregister(&nand->mtd[i]);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 66 | 100.00% | 1 | 100.00% |
Total | 66 | 100.00% | 1 | 100.00% |
static struct platform_driver goldfish_nand_driver = {
.probe = goldfish_nand_probe,
.remove = goldfish_nand_remove,
.driver = {
.name = "goldfish_nand"
}
};
module_platform_driver(goldfish_nand_driver);
MODULE_LICENSE("GPL");
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arve Hjönnevåg | 2244 | 95.21% | 1 | 8.33% |
Arnd Bergmann | 49 | 2.08% | 1 | 8.33% |
Shraddha Barke | 26 | 1.10% | 1 | 8.33% |
Kristina Martšenko | 10 | 0.42% | 1 | 8.33% |
Jun Tian | 8 | 0.34% | 1 | 8.33% |
Loic Pefferkorn | 7 | 0.30% | 2 | 16.67% |
Peter Hüwe | 4 | 0.17% | 1 | 8.33% |
Somya Anand | 3 | 0.13% | 1 | 8.33% |
Alan Cox | 3 | 0.13% | 1 | 8.33% |
Ravi Teja Darbha | 2 | 0.08% | 1 | 8.33% |
Peter Senna Tschudin | 1 | 0.04% | 1 | 8.33% |
Total | 2357 | 100.00% | 12 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.