Release 4.14 arch/powerpc/platforms/pseries/iommu.c
/*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
*
* Rewrite, cleanup:
*
* Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
* Copyright (C) 2006 Olof Johansson <olof@lixom.net>
*
* Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/crash_dump.h>
#include <linux/memory.h>
#include <linux/of.h>
#include <linux/iommu.h>
#include <linux/rculist.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/tce.h>
#include <asm/ppc-pci.h>
#include <asm/udbg.h>
#include <asm/mmzone.h>
#include <asm/plpar_wrappers.h>
#include "pseries.h"
static struct iommu_table_group *iommu_pseries_alloc_group(int node)
{
struct iommu_table_group *table_group = NULL;
struct iommu_table *tbl = NULL;
struct iommu_table_group_link *tgl = NULL;
table_group = kzalloc_node(sizeof(struct iommu_table_group), GFP_KERNEL,
node);
if (!table_group)
goto fail_exit;
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
if (!tbl)
goto fail_exit;
tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
node);
if (!tgl)
goto fail_exit;
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
kref_init(&tbl->it_kref);
tgl->table_group = table_group;
list_add_rcu(&tgl->next, &tbl->it_group_list);
table_group->tables[0] = tbl;
return table_group;
fail_exit:
kfree(tgl);
kfree(table_group);
kfree(tbl);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Kardashevskiy | 168 | 100.00% | 4 | 100.00% |
Total | 168 | 100.00% | 4 | 100.00% |
static void iommu_pseries_free_group(struct iommu_table_group *table_group,
const char *node_name)
{
struct iommu_table *tbl;
#ifdef CONFIG_IOMMU_API
struct iommu_table_group_link *tgl;
#endif
if (!table_group)
return;
tbl = table_group->tables[0];
#ifdef CONFIG_IOMMU_API
tgl = list_first_entry_or_null(&tbl->it_group_list,
struct iommu_table_group_link, next);
WARN_ON_ONCE(!tgl);
if (tgl) {
list_del_rcu(&tgl->next);
kfree(tgl);
}
if (table_group->group) {
iommu_group_put(table_group->group);
BUG_ON(table_group->group);
}
#endif
iommu_tce_table_put(tbl);
kfree(table_group);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Kardashevskiy | 123 | 100.00% | 4 | 100.00% |
Total | 123 | 100.00% | 4 | 100.00% |
static int tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
unsigned long attrs)
{
u64 proto_tce;
__be64 *tcep, *tces;
u64 rpn;
proto_tce = TCE_PCI_READ; // Read allowed
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
tces = tcep = ((__be64 *)tbl->it_base) + index;
while (npages--) {
/* can't move this out since we might cross MEMBLOCK boundary */
rpn = __pa(uaddr) >> TCE_SHIFT;
*tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
uaddr += TCE_PAGE_SIZE;
tcep++;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Herrenschmidt | 66 | 56.90% | 2 | 16.67% |
Olof Johansson | 27 | 23.28% | 2 | 16.67% |
Milton D. Miller II | 5 | 4.31% | 1 | 8.33% |
Anton Blanchard | 5 | 4.31% | 1 | 8.33% |
Robert Jennings | 4 | 3.45% | 1 | 8.33% |
Andrew Morton | 3 | 2.59% | 1 | 8.33% |
Krzysztof Kozlowski | 2 | 1.72% | 1 | 8.33% |
Mark Nelson | 2 | 1.72% | 1 | 8.33% |
Yinghai Lu | 1 | 0.86% | 1 | 8.33% |
Michael Ellerman | 1 | 0.86% | 1 | 8.33% |
Total | 116 | 100.00% | 12 | 100.00% |
static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
{
__be64 *tcep, *tces;
tces = tcep = ((__be64 *)tbl->it_base) + index;
while (npages--)
*(tcep++) = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Herrenschmidt | 38 | 71.70% | 1 | 25.00% |
Olof Johansson | 8 | 15.09% | 1 | 25.00% |
Milton D. Miller II | 5 | 9.43% | 1 | 25.00% |
Anton Blanchard | 2 | 3.77% | 1 | 25.00% |
Total | 53 | 100.00% | 4 | 100.00% |
static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
{
__be64 *tcep;
tcep = ((__be64 *)tbl->it_base) + index;
return be64_to_cpu(*tcep);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Haren Myneni | 35 | 87.50% | 1 | 50.00% |
Anton Blanchard | 5 | 12.50% | 1 | 50.00% |
Total | 40 | 100.00% | 2 | 100.00% |
static void tce_free_pSeriesLP(struct iommu_table*, long, long);
static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
unsigned long attrs)
{
u64 rc = 0;
u64 proto_tce, tce;
u64 rpn;
int ret = 0;
long tcenum_start = tcenum, npages_start = npages;
rpn = __pa(uaddr) >> TCE_SHIFT;
proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
while (npages--) {
tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
tce_free_pSeriesLP(tbl, tcenum_start,
(npages_start - (npages + 1)));
break;
}
if (rc && printk_ratelimit()) {
printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
printk("\ttce val = 0x%llx\n", tce );
dump_stack();
}
tcenum++;
rpn++;
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Herrenschmidt | 120 | 56.60% | 1 | 11.11% |
Robert Jennings | 56 | 26.42% | 1 | 11.11% |
Olof Johansson | 26 | 12.26% | 2 | 22.22% |
Ingo Molnar | 4 | 1.89% | 1 | 11.11% |
Mark Nelson | 2 | 0.94% | 1 | 11.11% |
Krzysztof Kozlowski | 2 | 0.94% | 1 | 11.11% |
Anton Blanchard | 1 | 0.47% | 1 | 11.11% |
Michael Ellerman | 1 | 0.47% | 1 | 11.11% |
Total | 212 | 100.00% | 9 | 100.00% |
static DEFINE_PER_CPU(__be64 *, tce_page);
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
unsigned long attrs)
{
u64 rc = 0;
u64 proto_tce;
__be64 *tcep;
u64 rpn;
long l, limit;
long tcenum_start = tcenum, npages_start = npages;
int ret = 0;
unsigned long flags;
if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
}
local_irq_save(flags); /* to protect tcep and the page behind it */
tcep = __this_cpu_read(tce_page);
/* This is safe to do since interrupts are off when we're called
* from iommu_alloc{,_sg}()
*/
if (!tcep) {
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
local_irq_restore(flags);
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
}
__this_cpu_write(tce_page, tcep);
}
rpn = __pa(uaddr) >> TCE_SHIFT;
proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
/* We can map max one pageful of TCEs at a time */
do {
/*
* Set up the page with TCE data, looping through and setting
* the values.
*/
limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
for (l = 0; l < limit; l++) {
tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
rpn++;
}
rc = plpar_tce_put_indirect((u64)tbl->it_index,
(u64)tcenum << 12,
(u64)__pa(tcep),
limit);
npages -= limit;
tcenum += limit;
} while (npages > 0 && !rc);
local_irq_restore(flags);
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
tce_freemulti_pSeriesLP(tbl, tcenum_start,
(npages_start - (npages + limit)));
return ret;
}
if (rc && printk_ratelimit()) {
printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\tnpages = 0x%llx\n", (u64)npages);
printk("\ttce[0] val = 0x%llx\n", tcep[0]);
dump_stack();
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Herrenschmidt | 245 | 63.14% | 1 | 7.14% |
Robert Jennings | 62 | 15.98% | 1 | 7.14% |
Anton Blanchard | 26 | 6.70% | 3 | 21.43% |
Olof Johansson | 25 | 6.44% | 2 | 14.29% |
Alexey Kardashevskiy | 8 | 2.06% | 1 | 7.14% |
Mark Nelson | 6 | 1.55% | 1 | 7.14% |
Michael Ellerman | 6 | 1.55% | 2 | 14.29% |
Christoph Lameter | 4 | 1.03% | 1 | 7.14% |
Ingo Molnar | 4 | 1.03% | 1 | 7.14% |
Krzysztof Kozlowski | 2 | 0.52% | 1 | 7.14% |
Total | 388 | 100.00% | 14 | 100.00% |
static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
{
u64 rc;
while (npages--) {
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
if (rc && printk_ratelimit()) {
printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
dump_stack();
}
tcenum++;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Herrenschmidt | 87 | 94.57% | 2 | 40.00% |
Ingo Molnar | 3 | 3.26% | 1 | 20.00% |
Anton Blanchard | 1 | 1.09% | 1 | 20.00% |
Olof Johansson | 1 | 1.09% | 1 | 20.00% |
Total | 92 | 100.00% | 5 | 100.00% |
static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
{
u64 rc;
if (!firmware_has_feature(FW_FEATURE_MULTITCE))
return tce_free_pSeriesLP(tbl, tcenum, npages);
rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
if (rc && printk_ratelimit()) {
printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
printk("\trc = %lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\tnpages = 0x%llx\n", (u64)npages);
dump_stack();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Herrenschmidt | 84 | 78.50% | 2 | 33.33% |
Alexey Kardashevskiy | 18 | 16.82% | 1 | 16.67% |
Ingo Molnar | 3 | 2.80% | 1 | 16.67% |
Anton Blanchard | 1 | 0.93% | 1 | 16.67% |
Olof Johansson | 1 | 0.93% | 1 | 16.67% |
Total | 107 | 100.00% | 6 | 100.00% |
static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
{
u64 rc;
unsigned long tce_ret;
rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
if (rc && printk_ratelimit()) {
printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
dump_stack();
}
return tce_ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Haren Myneni | 84 | 95.45% | 1 | 33.33% |
Ingo Molnar | 3 | 3.41% | 1 | 33.33% |
Anton Blanchard | 1 | 1.14% | 1 | 33.33% |
Total | 88 | 100.00% | 3 | 100.00% |
/* this is compatible with cells for the device tree property */
struct dynamic_dma_window_prop {
__be32 liobn; /* tce table number */
__be64 dma_base; /* address hi,lo */
__be32 tce_shift; /* ilog2(tce_page_size) */
__be32 window_shift; /* ilog2(tce_window_size) */
};
struct direct_window {
struct device_node *device;
const struct dynamic_dma_window_prop *prop;
struct list_head list;
};
/* Dynamic DMA Window support */
struct ddw_query_response {
u32 windows_available;
u32 largest_available_block;
u32 page_size;
u32 migration_capable;
};
struct ddw_create_response {
u32 liobn;
u32 addr_hi;
u32 addr_lo;
};
static LIST_HEAD(direct_window_list);
/* prevents races between memory on/offline and window creation */
static DEFINE_SPINLOCK(direct_window_list_lock);
/* protects initializing window twice for same device */
static DEFINE_MUTEX(direct_window_init_mutex);
#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
unsigned long num_pfn, const void *arg)
{
const struct dynamic_dma_window_prop *maprange = arg;
int rc;
u64 tce_size, num_tce, dma_offset, next;
u32 tce_shift;
long limit;
tce_shift = be32_to_cpu(maprange->tce_shift);
tce_size = 1ULL << tce_shift;
next = start_pfn << PAGE_SHIFT;
num_tce = num_pfn << PAGE_SHIFT;
/* round back to the beginning of the tce page size */
num_tce += next & (tce_size - 1);
next &= ~(tce_size - 1);
/* covert to number of tces */
num_tce |= tce_size - 1;
num_tce >>= tce_shift;
do {
/*
* Set up the page with TCE data, looping through and setting
* the values.
*/
limit = min_t(long, num_tce, 512);
dma_offset = next + be64_to_cpu(maprange->dma_base);
rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
dma_offset,
0, limit);
next += limit * tce_size;
num_tce -= limit;
} while (num_tce > 0 && !rc);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nishanth Aravamudan | 138 | 79.77% | 2 | 33.33% |
Benjamin Herrenschmidt | 28 | 16.18% | 2 | 33.33% |
Anton Blanchard | 5 | 2.89% | 1 | 16.67% |
Nathan T. Lynch | 2 | 1.16% | 1 | 16.67% |
Total | 173 | 100.00% | 6 | 100.00% |
static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
unsigned long num_pfn, const void *arg)
{
const struct dynamic_dma_window_prop *maprange = arg;
u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
__be64 *tcep;
u32 tce_shift;
u64 rc = 0;
long l, limit;
local_irq_disable(); /* to protect tcep and the page behind it */
tcep = __this_cpu_read(tce_page);
if (!tcep) {
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
if (!tcep) {
local_irq_enable();
return -ENOMEM;
}
__this_cpu_write(tce_page, tcep);
}
proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
liobn = (u64)be32_to_cpu(maprange->liobn);
tce_shift = be32_to_cpu(maprange->tce_shift);
tce_size = 1ULL << tce_shift;
next = start_pfn << PAGE_SHIFT;
num_tce = num_pfn << PAGE_SHIFT;
/* round back to the beginning of the tce page size */
num_tce += next & (tce_size - 1);
next &= ~(tce_size - 1);
/* covert to number of tces */
num_tce |= tce_size - 1;
num_tce >>= tce_shift;
/* We can map max one pageful of TCEs at a time */
do {
/*
* Set up the page with TCE data, looping through and setting
* the values.
*/
limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE);
dma_offset = next + be64_to_cpu(maprange->dma_base);
for (l = 0; l < limit; l++) {
tcep[l] = cpu_to_be64(proto_tce | next);
next += tce_size;
}
rc = plpar_tce_put_indirect(liobn,
dma_offset,
(u64)__pa(tcep),
limit);
num_tce -= limit;
} while (num_tce > 0 && !rc);
/* error cleanup: caller will clear whole range */
local_irq_enable();
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nishanth Aravamudan | 270 | 95.41% | 1 | 25.00% |
Anton Blanchard | 8 | 2.83% | 1 | 25.00% |
Christoph Lameter | 4 | 1.41% | 1 | 25.00% |
Michael Ellerman | 1 | 0.35% | 1 | 25.00% |
Total | 283 | 100.00% | 4 | 100.00% |
static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
unsigned long num_pfn, void *arg)
{
return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nishanth Aravamudan | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static void iommu_table_setparms(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl)
{
struct device_node *node;
const unsigned long *basep;
const u32 *sizep;
node = phb->dn;
basep = of_get_property(node, "linux,tce-base", NULL);
sizep = of_get_property(node, "linux,tce-size", NULL);
if (basep == NULL || sizep == NULL) {
printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %pOF has "
"missing tce entries !\n", dn);
return;
}
tbl->it_base = (unsigned long)__va(*basep);
if (!is_kdump_kernel())
memset((void *)tbl->it_base, 0, *sizep);
tbl->it_busno = phb->bus->number;
tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
/* Units of tce entries */
tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
/* Test if we are going over 2GB of DMA space */
if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
}
phb->dma_window_base_cur += phb->dma_window_size;
/* Set the tce table size - measured in entries */
tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
tbl->it_index = 0;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nishanth Aravamudan | 203 | 93.98% | 1 | 33.33% |
Alistair Popple | 12 | 5.56% | 1 | 33.33% |
Rob Herring | 1 | 0.46% | 1 | 33.33% |
Total | 216 | 100.00% | 3 | 100.00% |
/*
* iommu_table_setparms_lpar
*
* Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
*/
static void iommu_table_setparms_lpar(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl,
struct iommu_table_group *table_group,
const __be32 *dma_window)
{
unsigned long offset, size;
of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
tbl->it_busno = phb->bus->number;
tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
tbl->it_base = 0;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
tbl->it_offset = offset >> tbl->it_page_shift;
tbl->it_size = size >> tbl->it_page_shift;
table_group->tce32_start = offset;
table_group->tce32_size = size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nishanth Aravamudan | 91 | 75.21% | 1 | 25.00% |
Alexey Kardashevskiy | 17 | 14.05% | 1 | 25.00% |
Alistair Popple | 12 | 9.92% | 1 | 25.00% |
Anton Blanchard | 1 | 0.83% | 1 | 25.00% |
Total | 121 | 100.00% | 4 | 100.00% |
struct iommu_table_ops iommu_table_pseries_ops = {
.set = tce_build_pSeries,
.clear = tce_free_pSeries,
.get = tce_get_pseries
};
static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
{
struct device_node *dn;
struct iommu_table *tbl;
struct device_node *isa_dn, *isa_dn_orig;
struct device_node *tmp;
struct pci_dn *pci;
int children;
dn = pci_bus_to_OF_node(bus);
pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn);
if (bus->self) {
/* This is not a root bus, any setup will be done for the
* device-side of the bridge in iommu_dev_setup_pSeries().
*/
return;
}
pci = PCI_DN(dn);
/* Check if the ISA bus on the system is under
* this PHB.
*/
isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
while (isa_dn && isa_dn != dn)
isa_dn = isa_dn->parent;
of_node_put(isa_dn_orig);
/* Count number of direct PCI children of the PHB. */
for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
children++;
pr_debug("Children: %d\n", children);
/* Calculate amount of DMA window per slot. Each window must be
* a power of two (due to pci_alloc_consistent requirements).
*
* Keep 256MB aside for PHBs with ISA.
*/
if (!isa_dn) {
/* No ISA/IDE - just set window size and return */
pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
while (pci->phb->dma_window_size * children > 0x80000000ul)
pci->phb->dma_window_size >>= 1;
pr_debug("No ISA/IDE, window size is 0x%llx\n",
pci->phb->dma_window_size);
pci->phb->dma_window_base_cur = 0;
return;
}
/* If we have ISA, then we probably have an IDE
* controller too. Allocate a 128MB table but
* skip the first 128MB to avoid stepping on ISA
* space.
*/
pci->phb->dma_window_size = 0x8000000ul;
pci->phb->dma_window_base_cur = 0x8000000ul;
pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
tbl = pci->table_group->tables[0];
iommu_table_setparms(pci->phb, dn, tbl);
tbl->it_ops = &iommu_table_pseries_ops;
iommu_init_table(tbl, pci->phb->node);
iommu_register_group(pci->table_group, pci_domain_nr(bus), 0);
/* Divide the rest (1.75GB) among the children */
pci->phb->dma_window_size = 0x80000000ul;
while (pci->phb->dma_window_size * children > 0x70000000ul)
pci->phb->dma_window_size >>= 1;
pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nishanth Aravamudan | 280 | 88.33% | 1 | 20.00% |
Alexey Kardashevskiy | 36 | 11.36% | 3 | 60.00% |
Rob Herring | 1 | 0.32% | 1 | 20.00% |
Total | 317 | 100.00% | 5 | 100.00% |
#ifdef CONFIG_IOMMU_API
static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
long *tce, enum dma_data_direction *direction)
{
long rc;
unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
unsigned long flags, oldtce = 0;
u64 proto_tce = iommu_direction_to_tce_perm(*direction);
unsigned long newtce = *tce | proto_tce;
spin_lock_irqsave(&tbl->large_pool.lock, flags);
rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce);
if (!rc)
rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce);
if (!rc) {
*direction = iommu_tce_direction(oldtce);
*tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
}
spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Kardashevskiy | 159 | 100.00% | 1 | 100.00% |
Total | 159 | 100.00% | 1 | 100.00% |
#endif
struct iommu_table_ops iommu_table_lpar_multi_ops