Release 4.11 arch/sparc/mm/iommu.c
/*
* iommu.c: IOMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
#include <linux/scatterlist.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/mxcc.h>
#include <asm/mbus.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/bitext.h>
#include <asm/iommu.h>
#include <asm/dma.h>
#include "mm_32.h"
/*
* This can be sized dynamically, but we will do this
* only when we have a guidance about actual I/O pressures.
*/
#define IOMMU_RNGE IOMMU_RNGE_256MB
#define IOMMU_START 0xF0000000
#define IOMMU_WINSIZE (256*1024*1024U)
#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE)
/* 64K PTEs, 256KB */
#define IOMMU_ORDER 6
/* 4096 * (1<<6) */
static int viking_flush;
/* viking.S */
extern void viking_flush_page(unsigned long page);
extern void viking_mxcc_flush_page(unsigned long page);
/*
* Values precomputed according to CPU type.
*/
static unsigned int ioperm_noc;
/* Consistent mapping iopte flags */
static pgprot_t dvma_prot;
/* Consistent mapping pte flags */
#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
static void __init sbus_iommu_init(struct platform_device *op)
{
struct iommu_struct *iommu;
unsigned int impl, vers;
unsigned long *bitmap;
unsigned long control;
unsigned long base;
unsigned long tmp;
iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
if (!iommu) {
prom_printf("Unable to allocate iommu structure\n");
prom_halt();
}
iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
"iommu_regs");
if (!iommu->regs) {
prom_printf("Cannot map IOMMU registers\n");
prom_halt();
}
control = sbus_readl(&iommu->regs->control);
impl = (control & IOMMU_CTRL_IMPL) >> 28;
vers = (control & IOMMU_CTRL_VERS) >> 24;
control &= ~(IOMMU_CTRL_RNGE);
control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
sbus_writel(control, &iommu->regs->control);
iommu_invalidate(iommu->regs);
iommu->start = IOMMU_START;
iommu->end = 0xffffffff;
/* Allocate IOMMU page table */
/* Stupid alignment constraints give me a headache.
We need 256K or 512K or 1M or 2M area aligned to
its size and current gfp will fortunately give
it to us. */
tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
if (!tmp) {
prom_printf("Unable to allocate iommu table [0x%lx]\n",
IOMMU_NPTES * sizeof(iopte_t));
prom_halt();
}
iommu->page_table = (iopte_t *)tmp;
/* Initialize new table. */
memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
flush_cache_all();
flush_tlb_all();
base = __pa((unsigned long)iommu->page_table) >> 4;
sbus_writel(base, &iommu->regs->base);
iommu_invalidate(iommu->regs);
bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
if (!bitmap) {
prom_printf("Unable to allocate iommu bitmap [%d]\n",
(int)(IOMMU_NPTES>>3));
prom_halt();
}
bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
/* To be coherent on HyperSparc, the page color of DVMA
* and physical addresses must match.
*/
if (srmmu_modtype == HyperSparc)
iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
else
iommu->usemap.num_colors = 1;
printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
impl, vers, iommu->page_table,
(int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
op->dev.archdata.iommu = iommu;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 149 | 37.25% | 5 | 33.33% |
Pete Zaitcev | 137 | 34.25% | 1 | 6.67% |
David S. Miller | 42 | 10.50% | 2 | 13.33% |
Sam Ravnborg | 37 | 9.25% | 1 | 6.67% |
William Lee Irwin III | 26 | 6.50% | 1 | 6.67% |
Robert Reif | 4 | 1.00% | 1 | 6.67% |
Bob Breuer | 2 | 0.50% | 1 | 6.67% |
Julia Lawall | 1 | 0.25% | 1 | 6.67% |
Grant C. Likely | 1 | 0.25% | 1 | 6.67% |
Akinobu Mita | 1 | 0.25% | 1 | 6.67% |
Total | 400 | 100.00% | 15 | 100.00% |
static int __init iommu_init(void)
{
struct device_node *dp;
for_each_node_by_name(dp, "iommu") {
struct platform_device *op = of_find_device_by_node(dp);
sbus_iommu_init(op);
of_propagate_archdata(op);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 43 | 95.56% | 2 | 50.00% |
Pete Zaitcev | 1 | 2.22% | 1 | 25.00% |
Grant C. Likely | 1 | 2.22% | 1 | 25.00% |
Total | 45 | 100.00% | 4 | 100.00% |
subsys_initcall(iommu_init);
/* Flush the iotlb entries to ram. */
/* This could be better if we didn't have to flush whole pages. */
static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
{
unsigned long start;
unsigned long end;
start = (unsigned long)iopte;
end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
start &= PAGE_MASK;
if (viking_mxcc_present) {
while(start < end) {
viking_mxcc_flush_page(start);
start += PAGE_SIZE;
}
} else if (viking_flush) {
while(start < end) {
viking_flush_page(start);
start += PAGE_SIZE;
}
} else {
while(start < end) {
__flush_page_to_ram(start);
start += PAGE_SIZE;
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 55 | 47.83% | 2 | 40.00% |
Pete Zaitcev | 35 | 30.43% | 1 | 20.00% |
William Lee Irwin III | 21 | 18.26% | 1 | 20.00% |
Bob Breuer | 4 | 3.48% | 1 | 20.00% |
Total | 115 | 100.00% | 5 | 100.00% |
static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
{
struct iommu_struct *iommu = dev->archdata.iommu;
int ioptex;
iopte_t *iopte, *iopte0;
unsigned int busa, busa0;
int i;
/* page color = pfn of page */
ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
if (ioptex < 0)
panic("iommu out");
busa0 = iommu->start + (ioptex << PAGE_SHIFT);
iopte0 = &iommu->page_table[ioptex];
busa = busa0;
iopte = iopte0;
for (i = 0; i < npages; i++) {
iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
iommu_invalidate_page(iommu->regs, busa);
busa += PAGE_SIZE;
iopte++;
page++;
}
iommu_flush_iotlb(iopte0, npages);
return busa0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pete Zaitcev | 146 | 86.90% | 1 | 20.00% |
Linus Torvalds (pre-git) | 7 | 4.17% | 1 | 20.00% |
David S. Miller | 6 | 3.57% | 1 | 20.00% |
William Lee Irwin III | 6 | 3.57% | 1 | 20.00% |
Robert Reif | 3 | 1.79% | 1 | 20.00% |
Total | 168 | 100.00% | 5 | 100.00% |
static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
{
unsigned long off;
int npages;
struct page *page;
u32 busa;
off = (unsigned long)vaddr & ~PAGE_MASK;
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
busa = iommu_get_one(dev, page, npages);
return busa + off;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pete Zaitcev | 79 | 89.77% | 1 | 33.33% |
David S. Miller | 7 | 7.95% | 1 | 33.33% |
Linus Torvalds (pre-git) | 2 | 2.27% | 1 | 33.33% |
Total | 88 | 100.00% | 3 | 100.00% |
static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
{
flush_page_for_dma(0);
return iommu_get_scsi_one(dev, vaddr, len);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 22 | 64.71% | 1 | 33.33% |
David S. Miller | 7 | 20.59% | 1 | 33.33% |
Pete Zaitcev | 5 | 14.71% | 1 | 33.33% |
Total | 34 | 100.00% | 3 | 100.00% |
static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
{
unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
while(page < ((unsigned long)(vaddr + len))) {
flush_page_for_dma(page);
page += PAGE_SIZE;
}
return iommu_get_scsi_one(dev, vaddr, len);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 58 | 82.86% | 1 | 33.33% |
David S. Miller | 7 | 10.00% | 1 | 33.33% |
Pete Zaitcev | 5 | 7.14% | 1 | 33.33% |
Total | 70 | 100.00% | 3 | 100.00% |
static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
{
int n;
flush_page_for_dma(0);
while (sz != 0) {
--sz;
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
sg->dma_length = sg->length;
sg = sg_next(sg);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pete Zaitcev | 37 | 40.66% | 2 | 22.22% |
Linus Torvalds (pre-git) | 32 | 35.16% | 2 | 22.22% |
Jens Axboe | 8 | 8.79% | 2 | 22.22% |
David S. Miller | 7 | 7.69% | 1 | 11.11% |
Linus Torvalds | 5 | 5.49% | 1 | 11.11% |
Robert Reif | 2 | 2.20% | 1 | 11.11% |
Total | 91 | 100.00% | 9 | 100.00% |
static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
{
unsigned long page, oldpage = 0;
int n, i;
while(sz != 0) {
--sz;
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
/*
* We expect unmapped highmem pages to be not in the cache.
* XXX Is this a good assumption?
* XXX What if someone else unmaps it here and races us?
*/
if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
for (i = 0; i < n; i++) {
if (page != oldpage) { /* Already flushed? */
flush_page_for_dma(page);
oldpage = page;
}
page += PAGE_SIZE;
}
}
sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
sg->dma_length = sg->length;
sg = sg_next(sg);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pete Zaitcev | 73 | 46.79% | 2 | 20.00% |
Linus Torvalds (pre-git) | 53 | 33.97% | 2 | 20.00% |
Jens Axboe | 11 | 7.05% | 2 | 20.00% |
Linus Torvalds | 9 | 5.77% | 1 | 10.00% |
David S. Miller | 7 | 4.49% | 1 | 10.00% |
Robert Reif | 2 | 1.28% | 1 | 10.00% |
Rob Radez | 1 | 0.64% | 1 | 10.00% |
Total | 156 | 100.00% | 10 | 100.00% |
static void iommu_release_one(struct device *dev, u32 busa, int npages)
{
struct iommu_struct *iommu = dev->archdata.iommu;
int ioptex;
int i;
BUG_ON(busa < iommu->start);
ioptex = (busa - iommu->start) >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
iopte_val(iommu->page_table[ioptex + i]) = 0;
iommu_invalidate_page(iommu->regs, busa);
busa += PAGE_SIZE;
}
bit_map_clear(&iommu->usemap, ioptex, npages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pete Zaitcev | 87 | 79.82% | 1 | 16.67% |
Linus Torvalds (pre-git) | 10 | 9.17% | 2 | 33.33% |
David S. Miller | 6 | 5.50% | 1 | 16.67% |
Robert Reif | 3 | 2.75% | 1 | 16.67% |
Eric Sesterhenn / Snakebyte | 3 | 2.75% | 1 | 16.67% |
Total | 109 | 100.00% | 6 | 100.00% |
static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
{
unsigned long off;
int npages;
off = vaddr & ~PAGE_MASK;
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
iommu_release_one(dev, vaddr & PAGE_MASK, npages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pete Zaitcev | 39 | 68.42% | 1 | 33.33% |
Linus Torvalds (pre-git) | 11 | 19.30% | 1 | 33.33% |
David S. Miller | 7 | 12.28% | 1 | 33.33% |
Total | 57 | 100.00% | 3 | 100.00% |
static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
{
int n;
while(sz != 0) {
--sz;
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
sg->dma_address = 0x21212121;
sg = sg_next(sg);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pete Zaitcev | 51 | 66.23% | 1 | 16.67% |
Linus Torvalds (pre-git) | 12 | 15.58% | 2 | 33.33% |
David S. Miller | 7 | 9.09% | 1 | 16.67% |
Jens Axboe | 5 | 6.49% | 1 | 16.67% |
Robert Reif | 2 | 2.60% | 1 | 16.67% |
Total | 77 | 100.00% | 6 | 100.00% |
#ifdef CONFIG_SBUS
static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
unsigned long addr, int len)
{
struct iommu_struct *iommu = dev->archdata.iommu;
unsigned long page, end;
iopte_t *iopte = iommu->page_table;
iopte_t *first;
int ioptex;
BUG_ON((va & ~PAGE_MASK) != 0);
BUG_ON((addr & ~PAGE_MASK) != 0);
BUG_ON((len & ~PAGE_MASK) != 0);
/* page color = physical address */
ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
addr >> PAGE_SHIFT);
if (ioptex < 0)
panic("iommu out");
iopte += ioptex;
first = iopte;
end = addr + len;
while(addr < end) {
page = va;
{
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
if (viking_mxcc_present)
viking_mxcc_flush_page(page);
else if (viking_flush)
viking_flush_page(page);
else
__flush_page_to_ram(page);
pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
}
iopte_val(*iopte++) =
MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
addr += PAGE_SIZE;
va += PAGE_SIZE;
}
/* P3: why do we need this?
*
* DAVEM: Because there are several aspects, none of which
* are handled by a single interface. Some cpus are
* completely not I/O DMA coherent, and some have
* virtually indexed caches. The driver DMA flushing
* methods handle the former case, but here during
* IOMMU page table modifications, and usage of non-cacheable
* cpu mappings of pages potentially in the cpu caches, we have
* to handle the latter case as well.
*/
flush_cache_all();
iommu_flush_iotlb(first, len >> PAGE_SHIFT);
flush_tlb_all();
iommu_invalidate(iommu->regs);
*pba = iommu->start + (ioptex << PAGE_SHIFT);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 182 | 60.87% | 4 | 40.00% |
Pete Zaitcev | 88 | 29.43% | 2 | 20.00% |
David S. Miller | 12 | 4.01% | 1 | 10.00% |
Eric Sesterhenn / Snakebyte | 9 | 3.01% | 1 | 10.00% |
William Lee Irwin III | 5 | 1.67% | 1 | 10.00% |
Robert Reif | 3 | 1.00% | 1 | 10.00% |
Total | 299 | 100.00% | 10 | 100.00% |
static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
{
struct iommu_struct *iommu = dev->archdata.iommu;
iopte_t *iopte = iommu->page_table;
unsigned long end;
int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
BUG_ON((busa & ~PAGE_MASK) != 0);
BUG_ON((len & ~PAGE_MASK) != 0);
iopte += ioptex;
end = busa + len;
while (busa < end) {
iopte_val(*iopte++) = 0;
busa += PAGE_SIZE;
}
flush_tlb_all();
iommu_invalidate(iommu->regs);
bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 80 | 60.15% | 2 | 33.33% |
Pete Zaitcev | 38 | 28.57% | 1 | 16.67% |
Eric Sesterhenn / Snakebyte | 6 | 4.51% | 1 | 16.67% |
David S. Miller | 6 | 4.51% | 1 | 16.67% |
Robert Reif | 3 | 2.26% | 1 | 16.67% |
Total | 133 | 100.00% | 6 | 100.00% |
#endif
static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
.get_scsi_one = iommu_get_scsi_one_gflush,
.get_scsi_sgl = iommu_get_scsi_sgl_gflush,
.release_scsi_one = iommu_release_scsi_one,
.release_scsi_sgl = iommu_release_scsi_sgl,
#ifdef CONFIG_SBUS
.map_dma_area = iommu_map_dma_area,
.unmap_dma_area = iommu_unmap_dma_area,
#endif
};
static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
.get_scsi_one = iommu_get_scsi_one_pflush,
.get_scsi_sgl = iommu_get_scsi_sgl_pflush,
.release_scsi_one = iommu_release_scsi_one,
.release_scsi_sgl = iommu_release_scsi_sgl,
#ifdef CONFIG_SBUS
.map_dma_area = iommu_map_dma_area,
.unmap_dma_area = iommu_unmap_dma_area,
#endif
};
void __init ld_mmu_iommu(void)
{
if (flush_page_for_dma_global) {
/* flush_page_for_dma flushes everything, no matter of what page is it */
sparc32_dma_ops = &iommu_dma_gflush_ops;
} else {
sparc32_dma_ops = &iommu_dma_pflush_ops;
}
if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
} else {
dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pete Zaitcev | 47 | 62.67% | 1 | 25.00% |
Linus Torvalds (pre-git) | 20 | 26.67% | 2 | 50.00% |
David S. Miller | 8 | 10.67% | 1 | 25.00% |
Total | 75 | 100.00% | 4 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pete Zaitcev | 923 | 42.99% | 3 | 8.33% |
Linus Torvalds (pre-git) | 758 | 35.31% | 9 | 25.00% |
David S. Miller | 271 | 12.62% | 6 | 16.67% |
William Lee Irwin III | 60 | 2.79% | 1 | 2.78% |
Sam Ravnborg | 40 | 1.86% | 2 | 5.56% |
Jens Axboe | 25 | 1.16% | 2 | 5.56% |
Robert Reif | 22 | 1.02% | 2 | 5.56% |
Eric Sesterhenn / Snakebyte | 18 | 0.84% | 1 | 2.78% |
Linus Torvalds | 15 | 0.70% | 2 | 5.56% |
Bob Breuer | 6 | 0.28% | 2 | 5.56% |
Keith M. Wesolowski | 3 | 0.14% | 1 | 2.78% |
Grant C. Likely | 2 | 0.09% | 1 | 2.78% |
Akinobu Mita | 2 | 0.09% | 2 | 5.56% |
Julia Lawall | 1 | 0.05% | 1 | 2.78% |
Rob Radez | 1 | 0.05% | 1 | 2.78% |
Total | 2147 | 100.00% | 36 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.