Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Gustavo Pimentel | 2349 | 96.59% | 6 | 50.00% |
Frank Li | 41 | 1.69% | 3 | 25.00% |
Arnd Bergmann | 37 | 1.52% | 2 | 16.67% |
Herve Codina | 5 | 0.21% | 1 | 8.33% |
Total | 2432 | 12 |
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. * Synopsys DesignWare eDMA v0 core * * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> */ #include <linux/bitfield.h> #include "dw-edma-core.h" #include "dw-edma-v0-core.h" #include "dw-edma-v0-regs.h" #include "dw-edma-v0-debugfs.h" enum dw_edma_control { DW_EDMA_V0_CB = BIT(0), DW_EDMA_V0_TCB = BIT(1), DW_EDMA_V0_LLP = BIT(2), DW_EDMA_V0_LIE = BIT(3), DW_EDMA_V0_RIE = BIT(4), DW_EDMA_V0_CCS = BIT(8), DW_EDMA_V0_LLE = BIT(9), }; static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) { return dw->chip->reg_base; } #define SET_32(dw, name, value) \ writel(value, &(__dw_regs(dw)->name)) #define GET_32(dw, name) \ readl(&(__dw_regs(dw)->name)) #define SET_RW_32(dw, dir, name, value) \ do { \ if ((dir) == EDMA_DIR_WRITE) \ SET_32(dw, wr_##name, value); \ else \ SET_32(dw, rd_##name, value); \ } while (0) #define GET_RW_32(dw, dir, name) \ ((dir) == EDMA_DIR_WRITE \ ? GET_32(dw, wr_##name) \ : GET_32(dw, rd_##name)) #define SET_BOTH_32(dw, name, value) \ do { \ SET_32(dw, wr_##name, value); \ SET_32(dw, rd_##name, value); \ } while (0) #ifdef CONFIG_64BIT #define SET_64(dw, name, value) \ writeq(value, &(__dw_regs(dw)->name)) #define GET_64(dw, name) \ readq(&(__dw_regs(dw)->name)) #define SET_RW_64(dw, dir, name, value) \ do { \ if ((dir) == EDMA_DIR_WRITE) \ SET_64(dw, wr_##name, value); \ else \ SET_64(dw, rd_##name, value); \ } while (0) #define GET_RW_64(dw, dir, name) \ ((dir) == EDMA_DIR_WRITE \ ? GET_64(dw, wr_##name) \ : GET_64(dw, rd_##name)) #define SET_BOTH_64(dw, name, value) \ do { \ SET_64(dw, wr_##name, value); \ SET_64(dw, rd_##name, value); \ } while (0) #endif /* CONFIG_64BIT */ #define SET_COMPAT(dw, name, value) \ writel(value, &(__dw_regs(dw)->type.unroll.name)) #define SET_RW_COMPAT(dw, dir, name, value) \ do { \ if ((dir) == EDMA_DIR_WRITE) \ SET_COMPAT(dw, wr_##name, value); \ else \ SET_COMPAT(dw, rd_##name, value); \ } while (0) static inline struct dw_edma_v0_ch_regs __iomem * __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) { if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) return &(__dw_regs(dw)->type.legacy.ch); if (dir == EDMA_DIR_WRITE) return &__dw_regs(dw)->type.unroll.ch[ch].wr; return &__dw_regs(dw)->type.unroll.ch[ch].rd; } static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, u32 value, void __iomem *addr) { if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) { u32 viewport_sel; unsigned long flags; raw_spin_lock_irqsave(&dw->lock, flags); viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); if (dir == EDMA_DIR_READ) viewport_sel |= BIT(31); writel(viewport_sel, &(__dw_regs(dw)->type.legacy.viewport_sel)); writel(value, addr); raw_spin_unlock_irqrestore(&dw->lock, flags); } else { writel(value, addr); } } static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, const void __iomem *addr) { u32 value; if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) { u32 viewport_sel; unsigned long flags; raw_spin_lock_irqsave(&dw->lock, flags); viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); if (dir == EDMA_DIR_READ) viewport_sel |= BIT(31); writel(viewport_sel, &(__dw_regs(dw)->type.legacy.viewport_sel)); value = readl(addr); raw_spin_unlock_irqrestore(&dw->lock, flags); } else { value = readl(addr); } return value; } #define SET_CH_32(dw, dir, ch, name, value) \ writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) #define GET_CH_32(dw, dir, ch, name) \ readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) #define SET_LL_32(ll, value) \ writel(value, ll) #ifdef CONFIG_64BIT static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, u64 value, void __iomem *addr) { if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) { u32 viewport_sel; unsigned long flags; raw_spin_lock_irqsave(&dw->lock, flags); viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); if (dir == EDMA_DIR_READ) viewport_sel |= BIT(31); writel(viewport_sel, &(__dw_regs(dw)->type.legacy.viewport_sel)); writeq(value, addr); raw_spin_unlock_irqrestore(&dw->lock, flags); } else { writeq(value, addr); } } static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, const void __iomem *addr) { u32 value; if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) { u32 viewport_sel; unsigned long flags; raw_spin_lock_irqsave(&dw->lock, flags); viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); if (dir == EDMA_DIR_READ) viewport_sel |= BIT(31); writel(viewport_sel, &(__dw_regs(dw)->type.legacy.viewport_sel)); value = readq(addr); raw_spin_unlock_irqrestore(&dw->lock, flags); } else { value = readq(addr); } return value; } #define SET_CH_64(dw, dir, ch, name, value) \ writeq_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) #define GET_CH_64(dw, dir, ch, name) \ readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) #define SET_LL_64(ll, value) \ writeq(value, ll) #endif /* CONFIG_64BIT */ /* eDMA management callbacks */ void dw_edma_v0_core_off(struct dw_edma *dw) { SET_BOTH_32(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); SET_BOTH_32(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); SET_BOTH_32(dw, engine_en, 0); } u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) { u32 num_ch; if (dir == EDMA_DIR_WRITE) num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET_32(dw, ctrl)); else num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET_32(dw, ctrl)); if (num_ch > EDMA_V0_MAX_NR_CH) num_ch = EDMA_V0_MAX_NR_CH; return (u16)num_ch; } enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->dw; u32 tmp; tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK, GET_CH_32(dw, chan->dir, chan->id, ch_control1)); if (tmp == 1) return DMA_IN_PROGRESS; else if (tmp == 3) return DMA_COMPLETE; else return DMA_ERROR; } void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->dw; SET_RW_32(dw, chan->dir, int_clear, FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id))); } void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->dw; SET_RW_32(dw, chan->dir, int_clear, FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id))); } u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir) { return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW_32(dw, dir, int_status)); } u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) { return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW_32(dw, dir, int_status)); } static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) { struct dw_edma_burst *child; struct dw_edma_chan *chan = chunk->chan; struct dw_edma_v0_lli __iomem *lli; struct dw_edma_v0_llp __iomem *llp; u32 control = 0, i = 0; int j; lli = chunk->ll_region.vaddr; if (chunk->cb) control = DW_EDMA_V0_CB; j = chunk->bursts_alloc; list_for_each_entry(child, &chunk->burst->list, list) { j--; if (!j) { control |= DW_EDMA_V0_LIE; if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL)) control |= DW_EDMA_V0_RIE; } /* Channel control */ SET_LL_32(&lli[i].control, control); /* Transfer size */ SET_LL_32(&lli[i].transfer_size, child->sz); /* SAR */ #ifdef CONFIG_64BIT SET_LL_64(&lli[i].sar.reg, child->sar); #else /* CONFIG_64BIT */ SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar)); SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar)); #endif /* CONFIG_64BIT */ /* DAR */ #ifdef CONFIG_64BIT SET_LL_64(&lli[i].dar.reg, child->dar); #else /* CONFIG_64BIT */ SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar)); SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar)); #endif /* CONFIG_64BIT */ i++; } llp = (void __iomem *)&lli[i]; control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; if (!chunk->cb) control |= DW_EDMA_V0_CB; /* Channel control */ SET_LL_32(&llp->control, control); /* Linked list */ #ifdef CONFIG_64BIT SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr); #else /* CONFIG_64BIT */ SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr)); SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr)); #endif /* CONFIG_64BIT */ } void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) { struct dw_edma_chan *chan = chunk->chan; struct dw_edma *dw = chan->dw; u32 tmp; dw_edma_v0_core_write_chunk(chunk); if (first) { /* Enable engine */ SET_RW_32(dw, chan->dir, engine_en, BIT(0)); if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) { switch (chan->id) { case 0: SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en, BIT(0)); break; case 1: SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en, BIT(0)); break; case 2: SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en, BIT(0)); break; case 3: SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en, BIT(0)); break; case 4: SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en, BIT(0)); break; case 5: SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en, BIT(0)); break; case 6: SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en, BIT(0)); break; case 7: SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en, BIT(0)); break; } } /* Interrupt unmask - done, abort */ tmp = GET_RW_32(dw, chan->dir, int_mask); tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)); tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)); SET_RW_32(dw, chan->dir, int_mask, tmp); /* Linked list error */ tmp = GET_RW_32(dw, chan->dir, linked_list_err_en); tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id)); SET_RW_32(dw, chan->dir, linked_list_err_en, tmp); /* Channel control */ SET_CH_32(dw, chan->dir, chan->id, ch_control1, (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); /* Linked list */ /* llp is not aligned on 64bit -> keep 32bit accesses */ SET_CH_32(dw, chan->dir, chan->id, llp.lsb, lower_32_bits(chunk->ll_region.paddr)); SET_CH_32(dw, chan->dir, chan->id, llp.msb, upper_32_bits(chunk->ll_region.paddr)); } /* Doorbell */ SET_RW_32(dw, chan->dir, doorbell, FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); } int dw_edma_v0_core_device_config(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->dw; u32 tmp = 0; /* MSI done addr - low, high */ SET_RW_32(dw, chan->dir, done_imwr.lsb, chan->msi.address_lo); SET_RW_32(dw, chan->dir, done_imwr.msb, chan->msi.address_hi); /* MSI abort addr - low, high */ SET_RW_32(dw, chan->dir, abort_imwr.lsb, chan->msi.address_lo); SET_RW_32(dw, chan->dir, abort_imwr.msb, chan->msi.address_hi); /* MSI data - low, high */ switch (chan->id) { case 0: case 1: tmp = GET_RW_32(dw, chan->dir, ch01_imwr_data); break; case 2: case 3: tmp = GET_RW_32(dw, chan->dir, ch23_imwr_data); break; case 4: case 5: tmp = GET_RW_32(dw, chan->dir, ch45_imwr_data); break; case 6: case 7: tmp = GET_RW_32(dw, chan->dir, ch67_imwr_data); break; } if (chan->id & BIT(0)) { /* Channel odd {1, 3, 5, 7} */ tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK; tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK, chan->msi.data); } else { /* Channel even {0, 2, 4, 6} */ tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK; tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK, chan->msi.data); } switch (chan->id) { case 0: case 1: SET_RW_32(dw, chan->dir, ch01_imwr_data, tmp); break; case 2: case 3: SET_RW_32(dw, chan->dir, ch23_imwr_data, tmp); break; case 4: case 5: SET_RW_32(dw, chan->dir, ch45_imwr_data, tmp); break; case 6: case 7: SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp); break; } return 0; } /* eDMA debugfs callbacks */ void dw_edma_v0_core_debugfs_on(struct dw_edma *dw) { dw_edma_v0_debugfs_on(dw); } void dw_edma_v0_core_debugfs_off(struct dw_edma *dw) { dw_edma_v0_debugfs_off(dw); }
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1