cregit-Linux how code gets into the kernel

Release 4.14 arch/powerpc/kernel/pci-common.c

/*
 * Contains common pci routines for ALL ppc platform
 * (based on pci_32.c and pci_64.c)
 *
 * Port for PPC64 David Engebretsen, IBM Corp.
 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
 *
 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
 *   Rework, based on alpha PCI code.
 *
 * Common pmac/prep/chrp pci routines. -- Cort
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/mm.h>
#include <linux/shmem_fs.h>
#include <linux/list.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>

#include <asm/processor.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/eeh.h>

/* hose_spinlock protects accesses to the the phb_bitmap. */
static DEFINE_SPINLOCK(hose_spinlock);

LIST_HEAD(hose_list);

/* For dynamic PHB numbering on get_phb_number(): max number of PHBs. */

#define MAX_PHBS 0x10000

/*
 * For dynamic PHB numbering: used/free PHBs tracking bitmap.
 * Accesses to this bitmap should be protected by hose_spinlock.
 */
static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);

/* ISA Memory physical address */

resource_size_t isa_mem_base;

EXPORT_SYMBOL(isa_mem_base);



static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;


void set_pci_dma_ops(const struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; }

Contributors

PersonTokensPropCommitsCommitProp
Becky Bruce1386.67%133.33%
FUJITA Tomonori16.67%133.33%
Bart Van Assche16.67%133.33%
Total15100.00%3100.00%


const struct dma_map_ops *get_pci_dma_ops(void) { return pci_dma_ops; }

Contributors

PersonTokensPropCommitsCommitProp
Becky Bruce1184.62%133.33%
FUJITA Tomonori17.69%133.33%
Bart Van Assche17.69%133.33%
Total13100.00%3100.00%

EXPORT_SYMBOL(get_pci_dma_ops); /* * This function should run under locking protection, specifically * hose_spinlock. */
static int get_phb_number(struct device_node *dn) { int ret, phb_id = -1; u32 prop_32; u64 prop; /* * Try fixed PHB numbering first, by checking archs and reading * the respective device-tree properties. Firstly, try powernv by * reading "ibm,opal-phbid", only present in OPAL environment. */ ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop); if (ret) { ret = of_property_read_u32_index(dn, "reg", 1, &prop_32); prop = prop_32; } if (!ret) phb_id = (int)(prop & (MAX_PHBS - 1)); /* We need to be sure to not use the same PHB number twice. */ if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap)) return phb_id; /* * If not pseries nor powernv, or if fixed PHB numbering tried to add * the same PHB number twice, then fallback to dynamic PHB numbering. */ phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS); BUG_ON(phb_id >= MAX_PHBS); set_bit(phb_id, phb_bitmap); return phb_id; }

Contributors

PersonTokensPropCommitsCommitProp
Guilherme G. Piccoli11992.25%150.00%
Michael Ellerman107.75%150.00%
Total129100.00%2100.00%


struct pci_controller *pcibios_alloc_controller(struct device_node *dev) { struct pci_controller *phb; phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); if (phb == NULL) return NULL; spin_lock(&hose_spinlock); phb->global_number = get_phb_number(dev); list_add_tail(&phb->list_node, &hose_list); spin_unlock(&hose_spinlock); phb->dn = dev; phb->is_dynamic = slab_is_available(); #ifdef CONFIG_PPC64 if (dev) { int nid = of_node_to_nid(dev); if (nid < 0 || !node_online(nid)) nid = -1; PHB_SET_NODE(phb, nid); } #endif return phb; }

Contributors

PersonTokensPropCommitsCommitProp
Kumar Gala9875.38%120.00%
Stephen Rothwell2620.00%240.00%
Guilherme G. Piccoli43.08%120.00%
Michael Ellerman21.54%120.00%
Total130100.00%5100.00%

EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
void pcibios_free_controller(struct pci_controller *phb) { spin_lock(&hose_spinlock); /* Clear bit of phb_bitmap to allow reuse of this PHB number. */ if (phb->global_number < MAX_PHBS) clear_bit(phb->global_number, phb_bitmap); list_del(&phb->list_node); spin_unlock(&hose_spinlock); if (phb->is_dynamic) kfree(phb); }

Contributors

PersonTokensPropCommitsCommitProp
Kumar Gala4169.49%150.00%
Guilherme G. Piccoli1830.51%150.00%
Total59100.00%2100.00%

EXPORT_SYMBOL_GPL(pcibios_free_controller); /* * This function is used to call pcibios_free_controller() * in a deferred manner: a callback from the PCI subsystem. * * _*DO NOT*_ call pcibios_free_controller() explicitly if * this is used (or it may access an invalid *phb pointer). * * The callback occurs when all references to the root bus * are dropped (e.g., child buses/devices and their users). * * It's called as .release_fn() of 'struct pci_host_bridge' * which is associated with the 'struct pci_controller.bus' * (root bus) - it expects .release_data to hold a pointer * to 'struct pci_controller'. * * In order to use it, register .release_fn()/release_data * like this: * * pci_set_host_bridge_release(bridge, * pcibios_free_controller_deferred * (void *) phb); * * e.g. in the pcibios_root_bridge_prepare() callback from * pci_create_root_bus(). */
void pcibios_free_controller_deferred(struct pci_host_bridge *bridge) { struct pci_controller *phb = (struct pci_controller *) bridge->release_data; pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic); pcibios_free_controller(phb); }

Contributors

PersonTokensPropCommitsCommitProp
Mauricio Faria de Oliveira42100.00%1100.00%
Total42100.00%1100.00%

EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred); /* * The function is used to return the minimal alignment * for memory or I/O windows of the associated P2P bridge. * By default, 4KiB alignment for I/O windows and 1MiB for * memory windows. */
resource_size_t pcibios_window_alignment(struct pci_bus *bus, unsigned long type) { struct pci_controller *phb = pci_bus_to_host(bus); if (phb->controller_ops.window_alignment) return phb->controller_ops.window_alignment(bus, type); /* * PCI core will figure out the default * alignment: 4KiB for I/O and 1MiB for * memory window. */ return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Axtens2756.25%150.00%
Gavin Shan2143.75%150.00%
Total48100.00%2100.00%


void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type) { struct pci_controller *hose = pci_bus_to_host(bus); if (hose->controller_ops.setup_bridge) hose->controller_ops.setup_bridge(bus, type); }

Contributors

PersonTokensPropCommitsCommitProp
Gavin Shan43100.00%1100.00%
Total43100.00%1100.00%


void pcibios_reset_secondary_bus(struct pci_dev *dev) { struct pci_controller *phb = pci_bus_to_host(dev->bus); if (phb->controller_ops.reset_secondary_bus) { phb->controller_ops.reset_secondary_bus(dev); return; } pci_reset_secondary_bus(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Axtens3370.21%150.00%
Gavin Shan1429.79%150.00%
Total47100.00%2100.00%


resource_size_t pcibios_default_alignment(void) { if (ppc_md.pcibios_default_alignment) return ppc_md.pcibios_default_alignment(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Yongji Xie22100.00%1100.00%
Total22100.00%1100.00%

#ifdef CONFIG_PCI_IOV
resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno) { if (ppc_md.pcibios_iov_resource_alignment) return ppc_md.pcibios_iov_resource_alignment(pdev, resno); return pci_iov_resource_size(pdev, resno); }

Contributors

PersonTokensPropCommitsCommitProp
Wei Yang37100.00%1100.00%
Total37100.00%1100.00%

#endif /* CONFIG_PCI_IOV */
static resource_size_t pcibios_io_size(const struct pci_controller *hose) { #ifdef CONFIG_PPC64 return hose->pci_io_size; #else return resource_size(&hose->io_resource); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Milton D. Miller II2987.88%150.00%
Joe Perches412.12%150.00%
Total33100.00%2100.00%


int pcibios_vaddr_is_ioport(void __iomem *address) { int ret = 0; struct pci_controller *hose; resource_size_t size; spin_lock(&hose_spinlock); list_for_each_entry(hose, &hose_list, list_node) { size = pcibios_io_size(hose); if (address >= hose->io_base_virt && address < (hose->io_base_virt + size)) { ret = 1; break; } } spin_unlock(&hose_spinlock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt4758.75%150.00%
Milton D. Miller II3341.25%150.00%
Total80100.00%2100.00%


unsigned long pci_address_to_pio(phys_addr_t address) { struct pci_controller *hose; resource_size_t size; unsigned long ret = ~0; spin_lock(&hose_spinlock); list_for_each_entry(hose, &hose_list, list_node) { size = pcibios_io_size(hose); if (address >= hose->io_base_phys && address < (hose->io_base_phys + size)) { unsigned long base = (unsigned long)hose->io_base_virt - _IO_BASE; ret = base + (address - hose->io_base_phys); break; } } spin_unlock(&hose_spinlock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Milton D. Miller II7067.96%150.00%
Benjamin Herrenschmidt3332.04%150.00%
Total103100.00%2100.00%

EXPORT_SYMBOL_GPL(pci_address_to_pio); /* * Return the domain number for this bus. */
int pci_domain_nr(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); return hose->global_number; }

Contributors

PersonTokensPropCommitsCommitProp
Kumar Gala25100.00%1100.00%
Total25100.00%1100.00%

EXPORT_SYMBOL(pci_domain_nr); /* This routine is meant to be used early during boot, when the * PCI bus numbers have not yet been assigned, and you need to * issue PCI config cycles to an OF device. * It could also be used to "fix" RTAS config cycles if you want * to set pci_assign_all_buses to 1 and still use RTAS for PCI * config cycles. */
struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) { while(node) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) if (hose->dn == node) return hose; node = node->parent; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Kumar Gala5598.21%150.00%
Stephen Rothwell11.79%150.00%
Total56100.00%2100.00%

/* * Reads the interrupt pin to determine if interrupt is use by card. * If the interrupt is used, then gets the interrupt line from the * openfirmware and sets it in the pci_dev and pci_config line. */
static int pci_read_irq_line(struct pci_dev *pci_dev) { struct of_phandle_args oirq; unsigned int virq; pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); #ifdef DEBUG memset(&oirq, 0xff, sizeof(oirq)); #endif /* Try to get a mapping from the device-tree */ if (of_irq_parse_pci(pci_dev, &oirq)) { u8 line, pin; /* If that fails, lets fallback to what is in the config * space and map that through the default controller. We * also set the type to level low since that's what PCI * interrupts are. If your platform does differently, then * either provide a proper interrupt tree or don't use this * function. */ if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) return -1; if (pin == 0) return -1; if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || line == 0xff || line == 0) { return -1; } pr_debug(" No map ! Using line %d (pin %d) from PCI config\n", line, pin); virq = irq_create_mapping(NULL, line); if (virq) irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); } else { pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %pOF\n", oirq.args_count, oirq.args[0], oirq.args[1], oirq.np); virq = irq_create_of_mapping(&oirq); } if (!virq) { pr_debug(" Failed to map !\n"); return -1; } pr_debug(" Mapped to linux irq %d\n", virq); pci_dev->irq = virq; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Kumar Gala19087.56%19.09%
Benjamin Herrenschmidt167.37%327.27%
Grant C. Likely83.69%436.36%
Thomas Gleixner10.46%19.09%
Michael Ellerman10.46%19.09%
Rob Herring10.46%19.09%
Total217100.00%11100.00%

/* * Platform support for /proc/bus/pci/X/Y mmap()s, * modelled on the sparc64 implementation by Dave Miller. * -- paulus. */ /* * Adjust vm_pgoff of VMA such that it is the physical page offset * corresponding to the 32-bit pci bus offset for DEV requested by the user. * * Basically, the user finds the base address for his device which he wishes * to mmap. They read the 32-bit value from the config space base register, * add whatever PAGE_SIZE multiple offset they wish, and feed this into the * offset parameter of mmap on /proc/bus/pci/XXX for that device. * * Returns negative error code on failure, zero on success. */
static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, resource_size_t *offset, enum pci_mmap_state mmap_state) { struct pci_controller *hose = pci_bus_to_host(dev->bus); unsigned long io_offset = 0; int i, res_bit; if (hose == NULL) return NULL; /* should never happen */ /* If memory, add on the PCI bridge address offset */ if (mmap_state == pci_mmap_mem) { #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ *offset += hose->pci_mem_offset; #endif res_bit = IORESOURCE_MEM; } else { io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; *offset += io_offset; res_bit = IORESOURCE_IO; } /* * Check that the offset requested corresponds to one of the * resources of the device. */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &dev->resource[i]; int flags = rp->flags; /* treat ROM as memory (should be already) */ if (i == PCI_ROM_RESOURCE) flags |= IORESOURCE_MEM; /* Active and same type? */ if ((flags & res_bit) == 0) continue; /* In the range of this resource? */ if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) continue; /* found it! construct the final physical address */ if (mmap_state == pci_mmap_io) *offset += hose->io_base_phys - io_offset; return rp; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Kumar Gala20099.50%150.00%
Anton Blanchard10.50%150.00%
Total201100.00%2100.00%

/* * This one is used by /dev/mem and fbdev who have no clue about the * PCI device, it tries to find the PCI device first and calls the * above routine */
pgprot_t pci_phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t prot) { struct pci_dev *pdev = NULL; struct resource *found = NULL; resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; int i; if (page_is_ram(pfn)) return prot; prot = pgprot_noncached(prot); for_each_pci_dev(pdev) { for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &pdev->resource[i]; int flags = rp->flags; /* Active and same type? */ if ((flags & IORESOURCE_MEM) == 0) continue; /* In the range of this resource? */ if (offset < (rp->start & PAGE_MASK) || offset > rp->end) continue; found = rp; break; } if (found) break; } if (found) { if (found->flags & IORESOURCE_PREFETCH) prot = pgprot_noncached_wc(prot); pci_dev_put(pdev); } pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", (unsigned long long)offset, pgprot_val(prot)); return prot; }

Contributors

PersonTokensPropCommitsCommitProp
Kumar Gala16081.63%120.00%
Benjamin Herrenschmidt3618.37%480.00%
Total196100.00%5100.00%

/* * Perform the actual remap of the pages for a PCI device mapping, as * appropriate for this architecture. The region in the process to map * is described by vm_start and vm_end members of VMA, the base physical * address is found in vm_pgoff. * The pci device structure is provided so that architectures may make mapping * decisions on a per-device or per-bus basis. * * Returns a negative error code on failure, zero on success. */
int pci_mmap_page_range(struct pci_dev *dev, int bar, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { resource_size_t offset = ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; struct resource *rp; int ret; rp = __pci_mmap_make_offset(dev, &offset, mmap_state); if (rp == NULL) return -EINVAL; vma->vm_pgoff = offset >> PAGE_SHIFT; if (write_combine) vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); else vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt11384.33%125.00%
Yinghai Lu1712.69%125.00%
David Woodhouse32.24%125.00%
Kumar Gala10.75%125.00%
Total134100.00%4100.00%

/* This provides legacy IO read access on a bus */
int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) { unsigned long offset; struct pci_controller *hose = pci_bus_to_host(bus); struct resource *rp = &hose->io_resource; void __iomem *addr; /* Check if port can be supported by that bus. We only check * the ranges of the PHB though, not the bus itself as the rules * for forwarding legacy cycles down bridges are not our problem * here. So if the host bridge supports it, we do it. */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; offset += port; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (offset < rp->start || (offset + size) > rp->end) return -ENXIO; addr = hose->io_base_virt + port; switch(size) { case 1: *((u8 *)val) = in_8(addr); return 1; case 2: if (port & 1) return -EINVAL; *((u16 *)val) = in_le16(addr); return 2; case 4: if (port & 3) return -EINVAL; *((u32 *)val) = in_le32(addr); return 4; } return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt201100.00%1100.00%
Total201100.00%1100.00%

/* This provides legacy IO write access on a bus */
int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) { unsigned long offset; struct pci_controller *hose = pci_bus_to_host(bus); struct resource *rp = &hose->io_resource; void __iomem *addr; /* Check if port can be supported by that bus. We only check * the ranges of the PHB though, not the bus itself as the rules * for forwarding legacy cycles down bridges are not our problem * here. So if the host bridge supports it, we do it. */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; offset += port; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (offset < rp->start || (offset + size) > rp->end) return -ENXIO; addr = hose->io_base_virt + port; /* WARNING: The generic code is idiotic. It gets passed a pointer * to what can be a 1, 2 or 4 byte quantity and always reads that * as a u32, which means that we have to correct the location of * the data read within those 32 bits for size 1 and 2 */ switch(size) { case 1: out_8(addr, val >> 24); return 1; case 2: if (port & 1) return -EINVAL; out_le16(addr, val >> 16); return 2; case 4: if (port & 3) return -EINVAL; out_le32(addr, val); return 4; } return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt17695.65%150.00%
Kumar Gala84.35%150.00%
Total184100.00%2100.00%

/* This provides legacy IO or memory mmap access on a bus */
int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { struct pci_controller *hose = pci_bus_to_host(bus); resource_size_t offset = ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; resource_size_t size = vma->vm_end - vma->vm_start; struct resource *rp; pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", pci_domain_nr(bus), bus->number, mmap_state == pci_mmap_mem ? "MEM" : "IO", (unsigned long long)offset, (unsigned long long)(offset + size - 1)); if (mmap_state == pci_mmap_mem) { /* Hack alert ! * * Because X is lame and can fail starting if it gets an error trying * to mmap legacy_mem (instead of just moving on without legacy memory * access) we fake it here by giving it anonymous memory, effectively * behaving just like /dev/zero */ if ((offset + size) > hose->isa_mem_size) { printk(KERN_DEBUG "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n", current->comm, current->pid, pci_domain_nr(bus), bus->number); if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); return 0; } offset += hose->isa_mem_phys; } else { unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; unsigned long roffset = offset + io_offset; rp = &hose->io_resource; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (roffset < rp->start || (roffset + size) > rp->end) return -ENXIO; offset += hose->io_base_phys; } pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt21771.85%466.67%
Kumar Gala7926.16%116.67%
David Gibson61.99%116.67%
Total302100.00%6100.00%


void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end) { struct pci_bus_region region; if (rsrc->flags & IORESOURCE_IO) { pcibios_resource_to_bus(dev->bus, &region, (struct resource *) rsrc); *start = region.start; *end = region.end; return; } /* We pass a CPU physical address to userland for MMIO instead of a * BAR value because X is lame and expects to be able to use that * to pass to /dev/mem! * * That means we may have 64-bit values where some apps only expect * 32 (like X itself since it thinks only Sparc has 64-bit MMIO). */ *start = rsrc->start; *end = rsrc->end; }

Contributors

PersonTokensPropCommitsCommitProp
Kumar Gala5561.80%150.00%
Björn Helgaas3438.20%150.00%
Total89100.00%2100.00%

/** * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree * @hose: newly allocated pci_controller to be setup * @dev: device node of the host bridge * @primary: set if primary bus (32 bits only, soon to be deprecated) * * This function will parse the "ranges" property of a PCI host bridge device * node and setup the resource mapping of a pci controller based on its * content. * * Life would be boring if it wasn't for a few issues that we have to deal * with here: * * - We can only cope with one IO space range and up to 3 Memory space * ranges. However, some machines (thanks Apple !) tend to split their * space into lots of small contiguous ranges. So we have to coalesce. * * - Some busses have IO space not starting at 0, which causes trouble with * the way we do our IO resource renumbering. The code somewhat deals with * it for 64 bits but I would expect problems on 32 bits. * * - Some 32 bits platforms such as 4xx can have physical space larger than * 32 bits so we need to use 64 bits values for the parsing */
void pci_process_bridge_OF_ranges(struct pci_controller *hose, struct device_node *dev, int primary) { int memno = 0; struct resource *res; struct of_pci_range range; struct of_pci_range_parser parser; printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n", dev, primary ? "(primary)" : ""); /* Check for ranges property */ if (of_pci_range_parser_init(&parser, dev)) return; /* Parse it */ for_each_of_pci_range(&parser, &range) { /* If we failed translation or got a zero-sized region * (some FW try to feed us with non sensical zero sized regions * such as power3 which look like some kind of attempt at exposing * the VGA memory hole) */ if (range.cpu_addr == OF_BAD_ADDR || range.size == 0) continue; /* Act based on address space type */ res = NULL; switch (range.flags & IORESOURCE_TYPE_BITS) { case IORESOURCE_IO: printk(KERN_INFO " IO 0x%016llx..0x%016llx -> 0x%016llx\n", range.cpu_addr, range.cpu_addr + range.size - 1, range.pci_addr); /* We support only one IO range */ if (hose->pci_io_size) { printk(KERN_INFO " \\--> Skipped (too many) !\n"); continue; } #ifdef CONFIG_PPC32 /* On 32 bits, limit I/O space to 16MB */ if (range.size > 0x01000000) range.size = 0x01000000; /* 32 bits needs to map IOs here */ hose->io_base_virt = ioremap(range.cpu_addr, range.size); /* Expect trouble if pci_addr is not 0 */ if (primary) isa_io_base = (unsigned long)hose->io_base_virt; #endif /* CONFIG_PPC32 */ /* pci_io_size and io_base_phys always represent IO * space starting at 0 so we factor in pci_addr */ hose->pci_io_size = range.pci_addr + range.size; hose->io_base_phys = range.cpu_addr - range.pci_addr; /* Build resource */ res = &hose->io_resource; range.cpu_addr = range.pci_addr; break; case IORESOURCE_MEM: printk(KERN_INFO " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", range.cpu_addr, range.cpu_addr + range.size - 1, range.pci_addr, (range.pci_space & 0x40000000) ? "Prefetch" : ""); /* We support only 3 memory ranges */ if (memno >= 3) { printk(KERN_INFO " \\--> Skipped (too many) !\n"); continue; } /* Handles ISA memory hole space here */ if (range.pci_addr == 0) { if (primary || isa_mem_base == 0) isa_mem_base = range.cpu_addr; hose->isa_mem_phys