Release 4.7 drivers/spi/spi-dw.c
/*
* Designware SPI core controller driver (refer pxa2xx_spi.c)
*
* Copyright (c) 2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include "spi-dw.h"
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
/* Slave spi_dev related */
struct chip_data {
u8 cs; /* chip select pin */
u8 tmode; /* TR/TO/RO/EEPROM */
u8 type; /* SPI/SSP/MicroWire */
u8 poll_mode; /* 1 means use poll mode */
u8 enable_dma;
u16 clk_div; /* baud rate divider */
u32 speed_hz; /* baud rate */
void (*cs_control)(u32 command);
};
#ifdef CONFIG_DEBUG_FS
#define SPI_REGS_BUFSIZE 1024
static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct dw_spi *dws = file->private_data;
char *buf;
u32 len = 0;
ssize_t ret;
buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
if (!buf)
return 0;
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"%s registers:\n", dev_name(&dws->master->dev));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 438 | 93.39% | 1 | 33.33% |
andy shevchenko | andy shevchenko | 16 | 3.41% | 1 | 33.33% |
h hartley sweeten | h hartley sweeten | 15 | 3.20% | 1 | 33.33% |
| Total | 469 | 100.00% | 3 | 100.00% |
static const struct file_operations dw_spi_regs_ops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = dw_spi_show_regs,
.llseek = default_llseek,
};
static int dw_spi_debugfs_init(struct dw_spi *dws)
{
dws->debugfs = debugfs_create_dir("dw_spi", NULL);
if (!dws->debugfs)
return -ENOMEM;
debugfs_create_file("registers", S_IFREG | S_IRUGO,
dws->debugfs, (void *)dws, &dw_spi_regs_ops);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 55 | 94.83% | 1 | 50.00% |
andy shevchenko | andy shevchenko | 3 | 5.17% | 1 | 50.00% |
| Total | 58 | 100.00% | 2 | 100.00% |
static void dw_spi_debugfs_remove(struct dw_spi *dws)
{
debugfs_remove_recursive(dws->debugfs);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 17 | 94.44% | 1 | 50.00% |
andy shevchenko | andy shevchenko | 1 | 5.56% | 1 | 50.00% |
| Total | 18 | 100.00% | 2 | 100.00% |
#else
static inline int dw_spi_debugfs_init(struct dw_spi *dws)
{
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 9 | 60.00% | 1 | 33.33% |
george shore | george shore | 5 | 33.33% | 1 | 33.33% |
andy shevchenko | andy shevchenko | 1 | 6.67% | 1 | 33.33% |
| Total | 15 | 100.00% | 3 | 100.00% |
static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 10 | 90.91% | 1 | 50.00% |
andy shevchenko | andy shevchenko | 1 | 9.09% | 1 | 50.00% |
| Total | 11 | 100.00% | 2 | 100.00% |
#endif /* CONFIG_DEBUG_FS */
static void dw_spi_set_cs(struct spi_device *spi, bool enable)
{
struct dw_spi *dws = spi_master_get_devdata(spi->master);
struct chip_data *chip = spi_get_ctldata(spi);
/* Chip select logic is inverted from spi_set_cs() */
if (chip && chip->cs_control)
chip->cs_control(!enable);
if (!enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andy shevchenko | andy shevchenko | 72 | 100.00% | 2 | 100.00% |
| Total | 72 | 100.00% | 2 | 100.00% |
/* Return the max entries we can fill into tx fifo */
static inline u32 tx_max(struct dw_spi *dws)
{
u32 tx_left, tx_room, rxtx_gap;
tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
/*
* Another concern is about the tx/rx mismatch, we
* though to use (dws->fifo_len - rxflr - txflr) as
* one maximum value for tx, but it doesn't cover the
* data which is out of tx/rx fifo and inside the
* shift registers. So a control from sw point of
* view is taken.
*/
rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
/ dws->n_bytes;
return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alek du | alek du | 94 | 97.92% | 1 | 33.33% |
h hartley sweeten | h hartley sweeten | 1 | 1.04% | 1 | 33.33% |
thor thayer | thor thayer | 1 | 1.04% | 1 | 33.33% |
| Total | 96 | 100.00% | 3 | 100.00% |
/* Return the max entries we should read out of rx fifo */
static inline u32 rx_max(struct dw_spi *dws)
{
u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alek du | alek du | 39 | 88.64% | 1 | 25.00% |
jingoo han | jingoo han | 3 | 6.82% | 1 | 25.00% |
h hartley sweeten | h hartley sweeten | 1 | 2.27% | 1 | 25.00% |
thor thayer | thor thayer | 1 | 2.27% | 1 | 25.00% |
| Total | 44 | 100.00% | 4 | 100.00% |
static void dw_writer(struct dw_spi *dws)
{
u32 max = tx_max(dws);
u16 txw = 0;
while (max--) {
/* Set the tx word if the transfer's original "tx" is not null */
if (dws->tx_end - dws->len) {
if (dws->n_bytes == 1)
txw = *(u8 *)(dws->tx);
else
txw = *(u16 *)(dws->tx);
}
dw_write_io_reg(dws, DW_SPI_DR, txw);
dws->tx += dws->n_bytes;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 80 | 83.33% | 2 | 33.33% |
alek du | alek du | 14 | 14.58% | 2 | 33.33% |
michael van der westhuizen | michael van der westhuizen | 1 | 1.04% | 1 | 16.67% |
h hartley sweeten | h hartley sweeten | 1 | 1.04% | 1 | 16.67% |
| Total | 96 | 100.00% | 6 | 100.00% |
static void dw_reader(struct dw_spi *dws)
{
u32 max = rx_max(dws);
u16 rxw;
while (max--) {
rxw = dw_read_io_reg(dws, DW_SPI_DR);
/* Care rx only if the transfer's original "rx" is not null */
if (dws->rx_end - dws->len) {
if (dws->n_bytes == 1)
*(u8 *)(dws->rx) = rxw;
else
*(u16 *)(dws->rx) = rxw;
}
dws->rx += dws->n_bytes;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 81 | 86.17% | 2 | 33.33% |
alek du | alek du | 11 | 11.70% | 2 | 33.33% |
michael van der westhuizen | michael van der westhuizen | 1 | 1.06% | 1 | 16.67% |
h hartley sweeten | h hartley sweeten | 1 | 1.06% | 1 | 16.67% |
| Total | 94 | 100.00% | 6 | 100.00% |
static void int_error_stop(struct dw_spi *dws, const char *msg)
{
spi_reset_chip(dws);
dev_err(&dws->master->dev, "%s\n", msg);
dws->master->cur_msg->status = -EIO;
spi_finalize_current_transfer(dws->master);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 47 | 88.68% | 1 | 33.33% |
andy shevchenko | andy shevchenko | 6 | 11.32% | 2 | 66.67% |
| Total | 53 | 100.00% | 3 | 100.00% |
static irqreturn_t interrupt_transfer(struct dw_spi *dws)
{
u16 irq_status = dw_readl(dws, DW_SPI_ISR);
/* Error handling */
if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
dw_readl(dws, DW_SPI_ICR);
int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
return IRQ_HANDLED;
}
dw_reader(dws);
if (dws->rx_end == dws->rx) {
spi_mask_intr(dws, SPI_INT_TXEI);
spi_finalize_current_transfer(dws->master);
return IRQ_HANDLED;
}
if (irq_status & SPI_INT_TXEI) {
spi_mask_intr(dws, SPI_INT_TXEI);
dw_writer(dws);
/* Enable TX irq always, it will be disabled when RX finished */
spi_umask_intr(dws, SPI_INT_TXEI);
}
return IRQ_HANDLED;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 81 | 68.64% | 2 | 28.57% |
alek du | alek du | 30 | 25.42% | 1 | 14.29% |
andy shevchenko | andy shevchenko | 3 | 2.54% | 1 | 14.29% |
thor thayer | thor thayer | 3 | 2.54% | 2 | 28.57% |
h hartley sweeten | h hartley sweeten | 1 | 0.85% | 1 | 14.29% |
| Total | 118 | 100.00% | 7 | 100.00% |
static irqreturn_t dw_spi_irq(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct dw_spi *dws = spi_master_get_devdata(master);
u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
if (!irq_status)
return IRQ_NONE;
if (!master->cur_msg) {
spi_mask_intr(dws, SPI_INT_TXEI);
return IRQ_HANDLED;
}
return dws->transfer_handler(dws);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 45 | 58.44% | 1 | 16.67% |
yong wang | yong wang | 16 | 20.78% | 1 | 16.67% |
andy shevchenko | andy shevchenko | 12 | 15.58% | 1 | 16.67% |
alek du | alek du | 2 | 2.60% | 1 | 16.67% |
h hartley sweeten | h hartley sweeten | 1 | 1.30% | 1 | 16.67% |
thor thayer | thor thayer | 1 | 1.30% | 1 | 16.67% |
| Total | 77 | 100.00% | 6 | 100.00% |
/* Must be called inside pump_transfers() */
static int poll_transfer(struct dw_spi *dws)
{
do {
dw_writer(dws);
dw_reader(dws);
cpu_relax();
} while (dws->rx_end > dws->rx);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alek du | alek du | 17 | 41.46% | 1 | 20.00% |
feng tang | feng tang | 16 | 39.02% | 2 | 40.00% |
major lee | major lee | 4 | 9.76% | 1 | 20.00% |
andy shevchenko | andy shevchenko | 4 | 9.76% | 1 | 20.00% |
| Total | 41 | 100.00% | 5 | 100.00% |
static int dw_spi_transfer_one(struct spi_master *master,
struct spi_device *spi, struct spi_transfer *transfer)
{
struct dw_spi *dws = spi_master_get_devdata(master);
struct chip_data *chip = spi_get_ctldata(spi);
u8 imask = 0;
u16 txlevel = 0;
u16 clk_div;
u32 cr0;
int ret;
dws->dma_mapped = 0;
dws->tx = (void *)transfer->tx_buf;
dws->tx_end = dws->tx + transfer->len;
dws->rx = transfer->rx_buf;
dws->rx_end = dws->rx + transfer->len;
dws->len = transfer->len;
spi_enable_chip(dws, 0);
/* Handle per transfer options for bpw and speed */
if (transfer->speed_hz != chip->speed_hz) {
/* clk_div doesn't support odd number */
clk_div = (dws->max_freq / transfer->speed_hz + 1) & 0xfffe;
chip->speed_hz = transfer->speed_hz;
chip->clk_div = clk_div;
spi_set_clk(dws, chip->clk_div);
}
if (transfer->bits_per_word == 8) {
dws->n_bytes = 1;
dws->dma_width = 1;
} else if (transfer->bits_per_word == 16) {
dws->n_bytes = 2;
dws->dma_width = 2;
} else {
return -EINVAL;
}
/* Default SPI mode is SCPOL = 0, SCPH = 0 */
cr0 = (transfer->bits_per_word - 1)
| (chip->type << SPI_FRF_OFFSET)
| (spi->mode << SPI_MODE_OFFSET)
| (chip->tmode << SPI_TMOD_OFFSET);
/*
* Adjust transfer mode if necessary. Requires platform dependent
* chipselect mechanism.
*/
if (chip->cs_control) {
if (dws->rx && dws->tx)
chip->tmode = SPI_TMOD_TR;
else if (dws->rx)
chip->tmode = SPI_TMOD_RO;
else
chip->tmode = SPI_TMOD_TO;
cr0 &= ~SPI_TMOD_MASK;
cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
}
dw_writel(dws, DW_SPI_CTRL0, cr0);
/* Check if current transfer is a DMA transaction */
if (master->can_dma && master->can_dma(master, spi, transfer))
dws->dma_mapped = master->cur_msg_mapped;
/* For poll mode just disable all interrupts */
spi_mask_intr(dws, 0xff);
/*
* Interrupt mode
* we only need set the TXEI IRQ, as TX/RX always happen syncronizely
*/
if (dws->dma_mapped) {
ret = dws->dma_ops->dma_setup(dws, transfer);
if (ret < 0) {
spi_enable_chip(dws, 1);
return ret;
}
} else if (!chip->poll_mode) {
txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
dw_writel(dws, DW_SPI_TXFLTR, txlevel);
/* Set the interrupt mask */
imask |= SPI_INT_TXEI | SPI_INT_TXOI |
SPI_INT_RXUI | SPI_INT_RXOI;
spi_umask_intr(dws, imask);
dws->transfer_handler = interrupt_transfer;
}
spi_enable_chip(dws, 1);
if (dws->dma_mapped) {
ret = dws->dma_ops->dma_transfer(dws, transfer);
if (ret < 0)
return ret;
}
if (chip->poll_mode)
return poll_transfer(dws);
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 239 | 45.61% | 4 | 22.22% |
andy shevchenko | andy shevchenko | 220 | 41.98% | 10 | 55.56% |
george shore | george shore | 55 | 10.50% | 1 | 5.56% |
alek du | alek du | 6 | 1.15% | 1 | 5.56% |
thor thayer | thor thayer | 4 | 0.76% | 2 | 11.11% |
| Total | 524 | 100.00% | 18 | 100.00% |
static void dw_spi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
struct dw_spi *dws = spi_master_get_devdata(master);
if (dws->dma_mapped)
dws->dma_ops->dma_stop(dws);
spi_reset_chip(dws);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 19 | 41.30% | 1 | 25.00% |
andy shevchenko | andy shevchenko | 18 | 39.13% | 2 | 50.00% |
baruch siach | baruch siach | 9 | 19.57% | 1 | 25.00% |
| Total | 46 | 100.00% | 4 | 100.00% |
/* This may be called twice for each spi dev */
static int dw_spi_setup(struct spi_device *spi)
{
struct dw_spi_chip *chip_info = NULL;
struct chip_data *chip;
int ret;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
spi_set_ctldata(spi, chip);
}
/*
* Protocol drivers may change the chip settings, so...
* if chip_info exists, use it
*/
chip_info = spi->controller_data;
/* chip_info doesn't always exist */
if (chip_info) {
if (chip_info->cs_control)
chip->cs_control = chip_info->cs_control;
chip->poll_mode = chip_info->poll_mode;
chip->type = chip_info->type;
}
chip->tmode = SPI_TMOD_TR;
if (gpio_is_valid(spi->cs_gpio)) {
ret = gpio_direction_output(spi->cs_gpio,
!(spi->mode & SPI_CS_HIGH));
if (ret)
return ret;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
baruch siach | baruch siach | 103 | 64.78% | 2 | 33.33% |
feng tang | feng tang | 52 | 32.70% | 1 | 16.67% |
andy shevchenko | andy shevchenko | 2 | 1.26% | 1 | 16.67% |
jisheng zhang | jisheng zhang | 1 | 0.63% | 1 | 16.67% |
axel lin | axel lin | 1 | 0.63% | 1 | 16.67% |
| Total | 159 | 100.00% | 6 | 100.00% |
static void dw_spi_cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
kfree(chip);
spi_set_ctldata(spi, NULL);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
axel lin | axel lin | 33 | 100.00% | 1 | 100.00% |
| Total | 33 | 100.00% | 1 | 100.00% |
/* Restart the controller, disable all interrupts, clean rx fifo */
static void spi_hw_init(struct device *dev, struct dw_spi *dws)
{
spi_reset_chip(dws);
/*
* Try to detect the FIFO depth if not set by interface driver,
* the depth could be from 2 to 256 from HW spec
*/
if (!dws->fifo_len) {
u32 fifo;
for (fifo = 1; fifo < 256; fifo++) {
dw_writel(dws, DW_SPI_TXFLTR, fifo);
if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
break;
}
dw_writel(dws, DW_SPI_TXFLTR, 0);
dws->fifo_len = (fifo == 1) ? 0 : fifo;
dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 75 | 72.12% | 2 | 25.00% |
andy shevchenko | andy shevchenko | 23 | 22.12% | 3 | 37.50% |
thor thayer | thor thayer | 3 | 2.88% | 1 | 12.50% |
h hartley sweeten | h hartley sweeten | 2 | 1.92% | 1 | 12.50% |
axel lin | axel lin | 1 | 0.96% | 1 | 12.50% |
| Total | 104 | 100.00% | 8 | 100.00% |
int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
{
struct spi_master *master;
int ret;
BUG_ON(dws == NULL);
master = spi_alloc_master(dev, 0);
if (!master)
return -ENOMEM;
dws->master = master;
dws->type = SSI_MOTO_SPI;
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num);
ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, master);
if (ret < 0) {
dev_err(dev, "can not get IRQ\n");
goto err_free_master;
}
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->bus_num = dws->bus_num;
master->num_chipselect = dws->num_cs;
master->setup = dw_spi_setup;
master->cleanup = dw_spi_cleanup;
master->set_cs = dw_spi_set_cs;
master->transfer_one = dw_spi_transfer_one;
master->handle_err = dw_spi_handle_err;
master->max_speed_hz = dws->max_freq;
master->dev.of_node = dev->of_node;
/* Basic HW init */
spi_hw_init(dev, dws);
if (dws->dma_ops && dws->dma_ops->dma_init) {
ret = dws->dma_ops->dma_init(dws);
if (ret) {
dev_warn(dev, "DMA init failed\n");
dws->dma_inited = 0;
} else {
master->can_dma = dws->dma_ops->can_dma;
}
}
spi_master_set_devdata(master, dws);
ret = devm_spi_register_master(dev, master);
if (ret) {
dev_err(&master->dev, "problem registering spi master\n");
goto err_dma_exit;
}
dw_spi_debugfs_init(dws);
return 0;
err_dma_exit:
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
spi_enable_chip(dws, 0);
free_irq(dws->irq, master);
err_free_master:
spi_master_put(master);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 264 | 68.93% | 2 | 11.76% |
andy shevchenko | andy shevchenko | 44 | 11.49% | 7 | 41.18% |
liu shuox | liu shuox | 23 | 6.01% | 1 | 5.88% |
stephen warren | stephen warren | 14 | 3.66% | 1 | 5.88% |
axel lin | axel lin | 14 | 3.66% | 2 | 11.76% |
baruch siach | baruch siach | 13 | 3.39% | 2 | 11.76% |
thor thayer | thor thayer | 10 | 2.61% | 1 | 5.88% |
yong wang | yong wang | 1 | 0.26% | 1 | 5.88% |
| Total | 383 | 100.00% | 17 | 100.00% |
EXPORT_SYMBOL_GPL(dw_spi_add_host);
void dw_spi_remove_host(struct dw_spi *dws)
{
dw_spi_debugfs_remove(dws);
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
spi_shutdown_chip(dws);
free_irq(dws->irq, dws->master);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 39 | 75.00% | 2 | 40.00% |
andy shevchenko | andy shevchenko | 13 | 25.00% | 3 | 60.00% |
| Total | 52 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(dw_spi_remove_host);
int dw_spi_suspend_host(struct dw_spi *dws)
{
int ret;
ret = spi_master_suspend(dws->master);
if (ret)
return ret;
spi_shutdown_chip(dws);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 32 | 86.49% | 1 | 33.33% |
baruch siach | baruch siach | 3 | 8.11% | 1 | 33.33% |
andy shevchenko | andy shevchenko | 2 | 5.41% | 1 | 33.33% |
| Total | 37 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
int dw_spi_resume_host(struct dw_spi *dws)
{
int ret;
spi_hw_init(&dws->master->dev, dws);
ret = spi_master_resume(dws->master);
if (ret)
dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 45 | 81.82% | 1 | 33.33% |
andy shevchenko | andy shevchenko | 7 | 12.73% | 1 | 33.33% |
baruch siach | baruch siach | 3 | 5.45% | 1 | 33.33% |
| Total | 55 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(dw_spi_resume_host);
MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
MODULE_LICENSE("GPL v2");
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
feng tang | feng tang | 1780 | 63.26% | 8 | 14.04% |
andy shevchenko | andy shevchenko | 451 | 16.03% | 21 | 36.84% |
alek du | alek du | 215 | 7.64% | 2 | 3.51% |
baruch siach | baruch siach | 135 | 4.80% | 3 | 5.26% |
george shore | george shore | 60 | 2.13% | 2 | 3.51% |
axel lin | axel lin | 49 | 1.74% | 3 | 5.26% |
liu shuox | liu shuox | 23 | 0.82% | 1 | 1.75% |
h hartley sweeten | h hartley sweeten | 23 | 0.82% | 1 | 1.75% |
thor thayer | thor thayer | 23 | 0.82% | 4 | 7.02% |
yong wang | yong wang | 17 | 0.60% | 1 | 1.75% |
stephen warren | stephen warren | 14 | 0.50% | 1 | 1.75% |
arnd bergmann | arnd bergmann | 5 | 0.18% | 1 | 1.75% |
major lee | major lee | 4 | 0.14% | 1 | 1.75% |
tejun heo | tejun heo | 3 | 0.11% | 1 | 1.75% |
paul gortmaker | paul gortmaker | 3 | 0.11% | 1 | 1.75% |
jingoo han | jingoo han | 3 | 0.11% | 1 | 1.75% |
grant likely | grant likely | 2 | 0.07% | 2 | 3.51% |
michael van der westhuizen | michael van der westhuizen | 2 | 0.07% | 1 | 1.75% |
stephen boyd | stephen boyd | 1 | 0.04% | 1 | 1.75% |
jisheng zhang | jisheng zhang | 1 | 0.04% | 1 | 1.75% |
| Total | 2814 | 100.00% | 57 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.