cregit-Linux how code gets into the kernel

Release 4.14 arch/powerpc/sysdev/fsl_pci.c

/*
 * MPC83xx/85xx/86xx PCI/PCIE support routing.
 *
 * Copyright 2007-2012 Freescale Semiconductor, Inc.
 * Copyright 2008-2009 MontaVista Software, Inc.
 *
 * Initial author: Xianghua Xiao <x.xiao@freescale.com>
 * Recode: ZHANG WEI <wei.zhang@freescale.com>
 * Rewrite the routing for Frescale PCI and PCI Express
 *      Roy Zang <tie-fei.zang@freescale.com>
 * MPC83xx PCI-Express support:
 *      Tony Li <tony.li@freescale.com>
 *      Anton Vorontsov <avorontsov@ru.mvista.com>
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/fsl/edac.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/memblock.h>
#include <linux/log2.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/uaccess.h>

#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/machdep.h>
#include <asm/mpc85xx.h>
#include <asm/disassemble.h>
#include <asm/ppc-opcode.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>



static int fsl_pcie_bus_fixup, is_mpc83xx_pci;


static void quirk_fsl_pcie_early(struct pci_dev *dev) { u8 hdr_type; /* if we aren't a PCIe don't bother */ if (!pci_is_pcie(dev)) return; /* if we aren't in host mode don't bother */ pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type); if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) return; dev->class = PCI_CLASS_BRIDGE_PCI << 8; fsl_pcie_bus_fixup = 1; return; }

Contributors

PersonTokensPropCommitsCommitProp
Anton Vorontsov3254.24%120.00%
Kumar Gala1627.12%120.00%
Minghuan Lian915.25%120.00%
Yijing Wang11.69%120.00%
Chunhe Lan11.69%120.00%
Total59100.00%5100.00%

static int fsl_indirect_read_config(struct pci_bus *, unsigned int, int, int, u32 *);
static int fsl_pcie_check_link(struct pci_controller *hose) { u32 val = 0; if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) { if (hose->ops->read == fsl_indirect_read_config) __indirect_read_config(hose, hose->first_busno, 0, PCIE_LTSSM, 4, &val); else early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val); if (val < PCIE_LTSSM_L0) return 1; } else { struct ccsr_pci __iomem *pci = hose->private_data; /* for PCIe IP rev 3.0 or greater use CSR0 for link state */ val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK) >> PEX_CSR0_LTSSM_SHIFT; if (val != PEX_CSR0_LTSSM_L0) return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Roy Zang4436.67%120.00%
Kumar Gala2823.33%120.00%
Rojhalat Ibrahim2722.50%120.00%
Anton Vorontsov1714.17%120.00%
Kim Phillips43.33%120.00%
Total120100.00%5100.00%


static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { struct pci_controller *hose = pci_bus_to_host(bus); if (fsl_pcie_check_link(hose)) hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; else hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK; return indirect_read_config(bus, devfn, offset, len, val); }

Contributors

PersonTokensPropCommitsCommitProp
Rojhalat Ibrahim70100.00%1100.00%
Total70100.00%1100.00%

#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) static struct pci_ops fsl_indirect_pcie_ops = { .read = fsl_indirect_read_config, .write = indirect_write_config, }; static u64 pci64_dma_offset; #ifdef CONFIG_SWIOTLB
static void setup_swiotlb_ops(struct pci_controller *hose) { if (ppc_swiotlb_enable) { hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb; set_pci_dma_ops(&swiotlb_dma_ops); } }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Axtens31100.00%1100.00%
Total31100.00%1100.00%

#else
static inline void setup_swiotlb_ops(struct pci_controller *hose) {}

Contributors

PersonTokensPropCommitsCommitProp
Daniel Axtens11100.00%1100.00%
Total11100.00%1100.00%

#endif
static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; /* * Fix up PCI devices that are able to DMA to the large inbound * mapping that allows addressing any RAM address from across PCI. */ if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) { set_dma_ops(dev, &dma_direct_ops); set_dma_offset(dev, pci64_dma_offset); } *dev->dma_mask = dma_mask; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Kumar Gala6788.16%133.33%
Scott Wood67.89%133.33%
Yijing Wang33.95%133.33%
Total76100.00%3100.00%


static int setup_one_atmu(struct ccsr_pci __iomem *pci, unsigned int index, const struct resource *res, resource_size_t offset) { resource_size_t pci_addr = res->start - offset; resource_size_t phys_addr = res->start; resource_size_t size = resource_size(res); u32 flags = 0x80044000; /* enable & mem R/W */ unsigned int i; pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n", (u64)res->start, (u64)size); if (res->flags & IORESOURCE_PREFETCH) flags |= 0x10000000; /* enable relaxed ordering */ for (i = 0; size > 0; i++) { unsigned int bits = min_t(u32, ilog2(size), __ffs(pci_addr | phys_addr)); if (index + i >= 5) return -1; out_be32(&pci->pow[index + i].potar, pci_addr >> 12); out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44); out_be32(&pci->pow[index + i].powbar, phys_addr >> 12); out_be32(&pci->pow[index + i].powar, flags | (bits - 1)); pci_addr += (resource_size_t)1U << bits; phys_addr += (resource_size_t)1U << bits; size -= (resource_size_t)1U << bits; } return i; }

Contributors

PersonTokensPropCommitsCommitProp
Trent Piepho24397.20%240.00%
Anton Blanchard31.20%120.00%
Joe Perches31.20%120.00%
Roy Zang10.40%120.00%
Total250100.00%5100.00%


static bool is_kdump(void) { struct device_node *node; node = of_find_node_by_type(NULL, "memory"); if (!node) { WARN_ON_ONCE(1); return false; } return of_property_read_bool(node, "linux,usable-memory"); }

Contributors

PersonTokensPropCommitsCommitProp
Scott Wood45100.00%1100.00%
Total45100.00%1100.00%

/* atmu setup for fsl pci/pcie controller */
static void setup_pci_atmu(struct pci_controller *hose) { struct ccsr_pci __iomem *pci = hose->private_data; int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4; u64 mem, sz, paddr_hi = 0; u64 offset = 0, paddr_lo = ULLONG_MAX; u32 pcicsrbar = 0, pcicsrbar_sz; u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL | PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP; const u64 *reg; int len; bool setup_inbound; /* * If this is kdump, we don't want to trigger a bunch of PCI * errors by closing the window on in-flight DMA. * * We still run most of the function's logic so that things like * hose->dma_window_size still get set. */ setup_inbound = !is_kdump(); if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) { /* * BSC9132 Rev1.0 has an issue where all the PEX inbound * windows have implemented the default target value as 0xf * for CCSR space.In all Freescale legacy devices the target * of 0xf is reserved for local memory space. 9132 Rev1.0 * now has local mempry space mapped to target 0x0 instead of * 0xf. Hence adding a workaround to remove the target 0xf * defined for memory space from Inbound window attributes. */ piwar &= ~PIWAR_TGI_LOCAL; } if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) { win_idx = 2; start_idx = 0; end_idx = 3; } } /* Disable all windows (except powar0 since it's ignored) */ for(i = 1; i < 5; i++) out_be32(&pci->pow[i].powar, 0); if (setup_inbound) { for (i = start_idx; i < end_idx; i++) out_be32(&pci->piw[i].piwar, 0); } /* Setup outbound MEM window */ for(i = 0, j = 1; i < 3; i++) { if (!(hose->mem_resources[i].flags & IORESOURCE_MEM)) continue; paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start); paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end); /* We assume all memory resources have the same offset */ offset = hose->mem_offset[i]; n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset); if (n < 0 || j >= 5) { pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i); hose->mem_resources[i].flags |= IORESOURCE_DISABLED; } else j += n; } /* Setup outbound IO window */ if (hose->io_resource.flags & IORESOURCE_IO) { if (j >= 5) { pr_err("Ran out of outbound PCI ATMUs for IO resource\n"); } else { pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, " "phy base 0x%016llx.\n", (u64)hose->io_resource.start, (u64)resource_size(&hose->io_resource), (u64)hose->io_base_phys); out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12)); out_be32(&pci->pow[j].potear, 0); out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12)); /* Enable, IO R/W */ out_be32(&pci->pow[j].powar, 0x80088000 | (ilog2(hose->io_resource.end - hose->io_resource.start + 1) - 1)); } } /* convert to pci address space */ paddr_hi -= offset; paddr_lo -= offset; if (paddr_hi == paddr_lo) { pr_err("%pOF: No outbound window space\n", hose->dn); return; } if (paddr_lo == 0) { pr_err("%pOF: No space for inbound window\n", hose->dn); return; } /* setup PCSRBAR/PEXCSRBAR */ early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff); early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz); pcicsrbar_sz = ~pcicsrbar_sz + 1; if (paddr_hi < (0x100000000ull - pcicsrbar_sz) || (paddr_lo > 0x100000000ull)) pcicsrbar = 0x100000000ull - pcicsrbar_sz; else pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz; early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar); paddr_lo = min(paddr_lo, (u64)pcicsrbar); pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar); /* Setup inbound mem window */ mem = memblock_end_of_DRAM(); pr_info("%s: end of DRAM %llx\n", __func__, mem); /* * The msi-address-64 property, if it exists, indicates the physical * address of the MSIIR register. Normally, this register is located * inside CCSR, so the ATMU that covers all of CCSR is used. But if * this property exists, then we normally need to create a new ATMU * for it. For now, however, we cheat. The only entity that creates * this property is the Freescale hypervisor, and the address is * specified in the partition configuration. Typically, the address * is located in the page immediately after the end of DDR. If so, we * can avoid allocating a new ATMU by extending the DDR ATMU by one * page. */ reg = of_get_property(hose->dn, "msi-address-64", &len); if (reg && (len == sizeof(u64))) { u64 address = be64_to_cpup(reg); if ((address >= mem) && (address < (mem + PAGE_SIZE))) { pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn); mem += PAGE_SIZE; } else { /* TODO: Create a new ATMU for MSIIR */ pr_warn("%pOF: msi-address-64 address of %llx is " "unsupported\n", hose->dn, address); } } sz = min(mem, paddr_lo); mem_log = ilog2(sz); /* PCIe can overmap inbound & outbound since RX & TX are separated */ if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { /* Size window to exact size if power-of-two or one size up */ if ((1ull << mem_log) != mem) { mem_log++; if ((1ull << mem_log) > mem) pr_info("%pOF: Setting PCI inbound window " "greater than memory size\n", hose->dn); } piwar |= ((mem_log - 1) & PIWAR_SZ_MASK); if (setup_inbound) { /* Setup inbound memory window */ out_be32(&pci->piw[win_idx].pitar, 0x00000000); out_be32(&pci->piw[win_idx].piwbar, 0x00000000); out_be32(&pci->piw[win_idx].piwar, piwar); } win_idx--; hose->dma_window_base_cur = 0x00000000; hose->dma_window_size = (resource_size_t)sz; /* * if we have >4G of memory setup second PCI inbound window to * let devices that are 64-bit address capable to work w/o * SWIOTLB and access the full range of memory */ if (sz != mem) { mem_log = ilog2(mem); /* Size window up if we dont fit in exact power-of-2 */ if ((1ull << mem_log) != mem) mem_log++; piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1); pci64_dma_offset = 1ULL << mem_log; if (setup_inbound) { /* Setup inbound memory window */ out_be32(&pci->piw[win_idx].pitar, 0x00000000); out_be32(&pci->piw[win_idx].piwbear, pci64_dma_offset >> 44); out_be32(&pci->piw[win_idx].piwbar, pci64_dma_offset >> 12); out_be32(&pci->piw[win_idx].piwar, piwar); } /* * install our own dma_set_mask handler to fixup dma_ops * and dma_offset */ ppc_md.dma_set_mask = fsl_pci_dma_set_mask; pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn); } } else { u64 paddr = 0; if (setup_inbound) { /* Setup inbound memory window */ out_be32(&pci->piw[win_idx].pitar, paddr >> 12); out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); out_be32(&pci->piw[win_idx].piwar, (piwar | (mem_log - 1))); } win_idx--; paddr += 1ull << mem_log; sz -= 1ull << mem_log; if (sz) { mem_log = ilog2(sz); piwar |= (mem_log - 1); if (setup_inbound) { out_be32(&pci->piw[win_idx].pitar, paddr >> 12); out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); out_be32(&pci->piw[win_idx].piwar, piwar); } win_idx--; paddr += 1ull << mem_log; } hose->dma_window_base_cur = 0x00000000; hose->dma_window_size = (resource_size_t)paddr; } if (hose->dma_window_size < mem) { #ifdef CONFIG_SWIOTLB ppc_swiotlb_enable = 1; #else pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to " "map - enable CONFIG_SWIOTLB to avoid dma errors.\n", hose->dn); #endif /* adjusting outbound windows could reclaim space in mem map */ if (paddr_hi < 0xffffffffull) pr_warning("%pOF: WARNING: Outbound window cfg leaves " "gaps in memory map. Adjusting the memory map " "could reduce unnecessary bounce buffering.\n", hose->dn); pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn, (u64)hose->dma_window_size); } }

Contributors

PersonTokensPropCommitsCommitProp
Kumar Gala65249.77%416.67%
Roy Zang17913.66%312.50%
Jon Loeliger15411.76%14.17%
Timur Tabi866.56%14.17%
Trent Piepho624.73%14.17%
Scott Wood554.20%28.33%
Rob Herring403.05%14.17%
Harninder Rai191.45%14.17%
Benjamin Herrenschmidt171.30%14.17%
Prabhakar Kushwaha151.15%14.17%
Kevin Hao130.99%312.50%
Becky Bruce110.84%14.17%
Joe Perches40.31%14.17%
Yinghai Lu10.08%14.17%
Anton Vorontsov10.08%14.17%
Grant C. Likely10.08%14.17%
Total1310100.00%24100.00%


static void __init setup_pci_cmd(struct pci_controller *hose) { u16 cmd; int cap_x; early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd); cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO; early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd); cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX); if (cap_x) { int pci_x_cmd = cap_x + PCI_X_CMD; cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ | PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E; early_write_config_word(hose, 0, 0, pci_x_cmd, cmd); } else { early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80); } }

Contributors

PersonTokensPropCommitsCommitProp
Jon Loeliger6251.67%120.00%
Kumar Gala5646.67%240.00%
Anton Vorontsov10.83%120.00%
Roy Zang10.83%120.00%
Total120100.00%5100.00%


void fsl_pcibios_fixup_bus(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); int i, is_pcie = 0, no_link; /* The root complex bridge comes up with bogus resources, * we copy the PHB ones in. * * With the current generic PCI code, the PHB bus no longer * has bus->resource[0..4] set, so things are a bit more * tricky. */ if (fsl_pcie_bus_fixup) is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP); no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK); if (bus->parent == hose->bus && (is_pcie || no_link)) { for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) { struct resource *res = bus->resource[i]; struct resource *par; if (!res) continue; if (i == 0) par = &hose->io_resource; else if (i < 4) par = &hose->mem_resources[i-1]; else par = NULL; res->start = par ? par->start : 0; res->end = par ? par->end : 0; res->flags = par ? par->flags : 0; } } }

Contributors

PersonTokensPropCommitsCommitProp
Kumar Gala11360.11%375.00%
Benjamin Herrenschmidt7539.89%125.00%
Total188100.00%4100.00%


int fsl_add_bridge(struct platform_device *pdev, int is_primary) { int len; struct pci_controller *hose; struct resource rsrc; const int *bus_range; u8 hdr_type, progif; struct device_node *dev; struct ccsr_pci __iomem *pci; u16 temp; u32 svr = mfspr(SPRN_SVR); dev = pdev->dev.of_node; if (!of_device_is_available(dev)) { pr_warning("%pOF: disabled\n", dev); return -ENODEV; } pr_debug("Adding PCI host bridge %pOF\n", dev); /* Fetch host bridge registers address */ if (of_address_to_resource(dev, 0, &rsrc)) { printk(KERN_WARNING "Can't get pci register base!"); return -ENOMEM; } /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) printk(KERN_WARNING "Can't get bus-range for %pOF, assume" " bus 0\n", dev); pci_add_flags(PCI_REASSIGN_ALL_BUS); hose = pcibios_alloc_controller(dev); if (!hose) return -ENOMEM; /* set platform device as the parent */ hose->parent = &pdev->dev; hose->first_busno = bus_range ? bus_range[0] : 0x0; hose->last_busno = bus_range ? bus_range[1] : 0xff; pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", (u64)rsrc.start, (u64)resource_size(&rsrc)); pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc)); if (!hose->private_data) goto no_bridge; setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, PPC_INDIRECT_TYPE_BIG_ENDIAN); if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK; if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { /* use fsl_indirect_read_config for PCIe */ hose->ops = &fsl_indirect_pcie_ops; /* For PCIE read HEADER_TYPE to identify controller mode */ early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type); if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) goto no_bridge; } else { /* For PCI read PROG to identify controller mode */ early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif); if ((progif & 1) && !of_property_read_bool(dev, "fsl,pci-agent-force-enum")) goto no_bridge; } setup_pci_cmd(hose); /* check PCI express link status */ if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG | PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS; if (fsl_pcie_check_link(hose)) hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; } else { /* * Set PBFR(PCI Bus Function Register)[10] = 1 to * disable the combining of crossing cacheline * boundary requests into one burst transaction. * PCI-X operation is not affected. * Fix erratum PCI 5 on MPC8548 */ #define PCI_BUS_FUNCTION 0x44 #define PCI_BUS_FUNCTION_MDS 0x400 /* Master disable streaming */ if (((SVR_SOC_VER(svr) == SVR_8543) || (SVR_SOC_VER(svr) == SVR_8545) || (SVR_SOC_VER(svr) == SVR_8547) || (SVR_SOC_VER(svr) == SVR_8548)) && !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) { early_read_config_word(hose, 0, 0, PCI_BUS_FUNCTION