cregit-Linux how code gets into the kernel

Release 4.14 arch/ia64/hp/common/sba_iommu.c

/*
**  IA64 System Bus Adapter (SBA) I/O MMU manager
**
**      (c) Copyright 2002-2005 Alex Williamson
**      (c) Copyright 2002-2003 Grant Grundler
**      (c) Copyright 2002-2005 Hewlett-Packard Company
**
**      Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
**      Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
**
**      This program is free software; you can redistribute it and/or modify
**      it under the terms of the GNU General Public License as published by
**      the Free Software Foundation; either version 2 of the License, or
**      (at your option) any later version.
**
**
** This module initializes the IOC (I/O Controller) found on HP
** McKinley machines and their successors.
**
*/

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/nodemask.h>
#include <linux/bitops.h>         /* hweight64() */
#include <linux/crash_dump.h>
#include <linux/iommu-helper.h>
#include <linux/dma-mapping.h>
#include <linux/prefetch.h>

#include <asm/delay.h>		/* ia64_get_itc() */
#include <asm/io.h>
#include <asm/page.h>		/* PAGE_OFFSET */
#include <asm/dma.h>

#include <asm/acpi-ext.h>

extern int swiotlb_late_init_with_default_size (size_t size);


#define PFX "IOC: "

/*
** Enabling timing search of the pdir resource map.  Output in /proc.
** Disabled by default to optimize performance.
*/

#undef PDIR_SEARCH_TIMING

/*
** This option allows cards capable of 64bit DMA to bypass the IOMMU.  If
** not defined, all DMA will be 32bit and go through the TLB.
** There's potentially a conflict in the bio merge code with us
** advertising an iommu, but then bypassing it.  Since I/O MMU bypassing
** appears to give more performance than bio-level virtual merging, we'll
** do the former for now.  NOTE: BYPASS_SG also needs to be undef'd to
** completely restrict DMA to the IOMMU.
*/

#define ALLOW_IOV_BYPASS

/*
** This option specifically allows/disallows bypassing scatterlists with
** multiple entries.  Coalescing these entries can allow better DMA streaming
** and in some cases shows better performance than entirely bypassing the
** IOMMU.  Performance increase on the order of 1-2% sequential output/input
** using bonnie++ on a RAID0 MD device (sym2 & mpt).
*/

#undef ALLOW_IOV_BYPASS_SG

/*
** If a device prefetches beyond the end of a valid pdir entry, it will cause
** a hard failure, ie. MCA.  Version 3.0 and later of the zx1 LBA should
** disconnect on 4k boundaries and prevent such issues.  If the device is
** particularly aggressive, this option will keep the entire pdir valid such
** that prefetching will hit a valid address.  This could severely impact
** error containment, and is therefore off by default.  The page that is
** used for spill-over is poisoned, so that should help debugging somewhat.
*/

#undef FULL_VALID_PDIR


#define ENABLE_MARK_CLEAN

/*
** The number of debug flags is a clue - this code is fragile.  NOTE: since
** tightening the use of res_lock the resource bitmap and actual pdir are no
** longer guaranteed to stay in sync.  The sanity checking code isn't going to
** like that.
*/

#undef DEBUG_SBA_INIT

#undef DEBUG_SBA_RUN

#undef DEBUG_SBA_RUN_SG

#undef DEBUG_SBA_RESOURCE

#undef ASSERT_PDIR_SANITY

#undef DEBUG_LARGE_SG_ENTRIES

#undef DEBUG_BYPASS

#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
#endif


#define SBA_INLINE	__inline__
/* #define SBA_INLINE */

#ifdef DEBUG_SBA_INIT

#define DBG_INIT(x...)	printk(x)
#else

#define DBG_INIT(x...)
#endif

#ifdef DEBUG_SBA_RUN

#define DBG_RUN(x...)	printk(x)
#else

#define DBG_RUN(x...)
#endif

#ifdef DEBUG_SBA_RUN_SG

#define DBG_RUN_SG(x...)	printk(x)
#else

#define DBG_RUN_SG(x...)
#endif


#ifdef DEBUG_SBA_RESOURCE

#define DBG_RES(x...)	printk(x)
#else

#define DBG_RES(x...)
#endif

#ifdef DEBUG_BYPASS

#define DBG_BYPASS(x...)	printk(x)
#else

#define DBG_BYPASS(x...)
#endif

#ifdef ASSERT_PDIR_SANITY

#define ASSERT(expr) \
        if(!(expr)) { \
                printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
                panic(#expr); \
        }
#else

#define ASSERT(expr)
#endif

/*
** The number of pdir entries to "free" before issuing
** a read to PCOM register to flush out PCOM writes.
** Interacts with allocation granularity (ie 4 or 8 entries
** allocated and free'd/purged at a time might make this
** less interesting).
*/

#define DELAYED_RESOURCE_CNT	64


#define PCI_DEVICE_ID_HP_SX2000_IOC	0x12ec


#define ZX1_IOC_ID	((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)

#define ZX2_IOC_ID	((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)

#define REO_IOC_ID	((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)

#define SX1000_IOC_ID	((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)

#define SX2000_IOC_ID	((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)


#define ZX1_IOC_OFFSET	0x1000	
/* ACPI reports SBA, we want IOC */


#define IOC_FUNC_ID	0x000

#define IOC_FCLASS	0x008	
/* function class, bist, header, rev... */

#define IOC_IBASE	0x300	
/* IO TLB */

#define IOC_IMASK	0x308

#define IOC_PCOM	0x310

#define IOC_TCNFG	0x318

#define IOC_PDIR_BASE	0x320


#define IOC_ROPE0_CFG	0x500

#define   IOC_ROPE_AO	  0x10	
/* Allow "Relaxed Ordering" */


/* AGP GART driver looks for this */

#define ZX1_SBA_IOMMU_COOKIE	0x0000badbadc0ffeeUL

/*
** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
**
** Some IOCs (sx1000) can run at the above pages sizes, but are
** really only supported using the IOC at a 4k page size.
**
** iovp_size could only be greater than PAGE_SIZE if we are
** confident the drivers really only touch the next physical
** page iff that driver instance owns it.
*/

static unsigned long iovp_size;

static unsigned long iovp_shift;

static unsigned long iovp_mask;


struct ioc {
	
void __iomem	*ioc_hpa;	/* I/O MMU base address */
	
char		*res_map;	/* resource map, bit == pdir entry */
	
u64		*pdir_base;	/* physical base address */
	
unsigned long	ibase;		/* pdir IOV Space base */
	
unsigned long	imask;		/* pdir IOV Space mask */

	
unsigned long	*res_hint;	/* next avail IOVP - circular search */
	
unsigned long	dma_mask;
	
spinlock_t	res_lock;	/* protects the resource bitmap, but must be held when */
					/* clearing pdir to prevent races with allocations. */
	
unsigned int	res_bitshift;	/* from the RIGHT! */
	
unsigned int	res_size;	/* size of resource map in bytes */
#ifdef CONFIG_NUMA
	
unsigned int	node;		/* node where this IOC lives */
#endif
#if DELAYED_RESOURCE_CNT > 0
	
spinlock_t	saved_lock;	/* may want to try to get this on a separate cacheline */
					/* than res_lock for bigger systems. */
	
int		saved_cnt;
	
struct sba_dma_pair {
		
dma_addr_t	iova;
		
size_t		size;
	
} saved[DELAYED_RESOURCE_CNT];
#endif

#ifdef PDIR_SEARCH_TIMING

#define SBA_SEARCH_SAMPLE	0x100
	
unsigned long avg_search[SBA_SEARCH_SAMPLE];
	
unsigned long avg_idx;	/* current index into avg_search */
#endif

	/* Stuff we don't need in performance path */
	
struct ioc	*next;		/* list of IOC's in system */
	
acpi_handle	handle;		/* for multiple IOC's */
	
const char 	*name;
	
unsigned int	func_id;
	
unsigned int	rev;		/* HW revision of chip */
	
u32		iov_size;
	
unsigned int	pdir_size;	/* in bytes, determined by IOV Space size */
	
struct pci_dev	*sac_only_dev;
};



static struct ioc *ioc_list, *ioc_found;

static int reserve_sba_gart = 1;

static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);


#define sba_sg_address(sg)	sg_virt((sg))

#ifdef FULL_VALID_PDIR

static u64 prefetch_spill_page;
#endif

#ifdef CONFIG_PCI

# define GET_IOC(dev)	((dev_is_pci(dev))                                              \
                         ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
#else

# define GET_IOC(dev)	NULL
#endif

/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
** (or rather not merge) DMAs into manageable chunks.
** On parisc, this is more of the software/tuning constraint
** rather than the HW. I/O MMU allocation algorithms can be
** faster with smaller sizes (to some degree).
*/

#define DMA_CHUNK_SIZE  (BITS_PER_LONG*iovp_size)


#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))

/************************************
** SBA register read and write support
**
** BE WARNED: register writes are posted.
**  (ie follow writes which must reach HW with a read)
**
*/

#define READ_REG(addr)       __raw_readq(addr)

#define WRITE_REG(val, addr) __raw_writeq(val, addr)

#ifdef DEBUG_SBA_INIT

/**
 * sba_dump_tlb - debugging only - print IOMMU operating parameters
 * @hpa: base address of the IOMMU
 *
 * Print the size/location of the IO MMU PDIR.
 */

static void sba_dump_tlb(char *hpa) { DBG_INIT("IO TLB at 0x%p\n", (void *)hpa); DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE)); DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK)); DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG)); DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE)); DBG_INIT("\n"); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang74100.00%1100.00%
Total74100.00%1100.00%

#endif #ifdef ASSERT_PDIR_SANITY /** * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry * @ioc: IO MMU structure which owns the pdir we are interested in. * @msg: text to print ont the output line. * @pide: pdir index. * * Print one entry of the IO MMU PDIR in human readable form. */
static void sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) { /* start printing from lowest pde in rval */ u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)]; unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)]; uint rcnt; printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", msg, rptr, pide & (BITS_PER_LONG - 1), *rptr); rcnt = 0; while (rcnt < BITS_PER_LONG) { printk(KERN_DEBUG "%s %2d %p %016Lx\n", (rcnt == (pide & (BITS_PER_LONG - 1))) ? " -->" : " ", rcnt, ptr, (unsigned long long) *ptr ); rcnt++; ptr++; } printk(KERN_DEBUG "%s", msg); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang153100.00%4100.00%
Total153100.00%4100.00%

/** * sba_check_pdir - debugging only - consistency checker * @ioc: IO MMU structure which owns the pdir we are interested in. * @msg: text to print ont the output line. * * Verify the resource map and pdir state is consistent */
static int sba_check_pdir(struct ioc *ioc, char *msg) { u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]); u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */ u64 *pptr = ioc->pdir_base; /* pdir ptr */ uint pide = 0; while (rptr < rptr_end) { u64 rval; int rcnt; /* number of bits we might check */ rval = *rptr; rcnt = 64; while (rcnt) { /* Get last byte and highest bit from that */ u32 pde = ((u32)((*pptr >> (63)) & 0x1)); if ((rval & 0x1) ^ pde) { /* ** BUMMER! -- res_map != pdir -- ** Dump rval and matching pdir entries */ sba_dump_pdir_entry(ioc, msg, pide); return(1); } rcnt--; rval >>= 1; /* try the next bit */ pptr++; pide++; } rptr++; /* look at next word of res_map */ } /* It'd be nice if we always got here :^) */ return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang163100.00%1100.00%
Total163100.00%1100.00%

/** * sba_dump_sg - debugging only - print Scatter-Gather list * @ioc: IO MMU structure which owns the pdir we are interested in. * @startsg: head of the SG list * @nents: number of entries in SG list * * print the SG list so we can verify it's correct by hand. */
static void sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) { while (nents-- > 0) { printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, startsg->dma_address, startsg->dma_length, sba_sg_address(startsg)); startsg = sg_next(startsg); } }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang4987.50%360.00%
Jens Axboe58.93%120.00%
Björn Helgaas23.57%120.00%
Total56100.00%5100.00%


static void sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) { struct scatterlist *the_sg = startsg; int the_nents = nents; while (the_nents-- > 0) { if (sba_sg_address(the_sg) == 0x0UL) sba_dump_sg(NULL, startsg, nents); the_sg = sg_next(the_sg); } }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang5990.77%133.33%
Jens Axboe57.69%133.33%
Björn Helgaas11.54%133.33%
Total65100.00%3100.00%

#endif /* ASSERT_PDIR_SANITY */ /************************************************************** * * I/O Pdir Resource Management * * Bits set in the resource map are in use. * Each bit can represent a number of pages. * LSbs represent lower addresses (IOVA's). * ***************************************************************/ #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ /* Convert from IOVP to IOVA and vice versa. */ #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset)) #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase)) #define PDIR_ENTRY_SIZE sizeof(u64) #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift) #define RESMAP_MASK(n) ~(~0UL << (n)) #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) /** * For most cases the normal get_order is sufficient, however it limits us * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity. * It only incurs about 1 clock cycle to use this one with the static variable * and makes the code more intuitive. */
static SBA_INLINE int get_iovp_order (unsigned long size) { long double d = size - 1; long order; order = ia64_getf_exp(d); order = order - iovp_shift - 0xffff + 1; if (order < 0) order = 0; return order; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson52100.00%1100.00%
Total52100.00%1100.00%


static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, unsigned int bitshiftcnt) { return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) + bitshiftcnt; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori44100.00%1100.00%
Total44100.00%1100.00%

/** * sba_search_bitmap - find free space in IO PDIR resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. * @bits_wanted: number of entries we need. * @use_hint: use res_hint to indicate where to start looking * * Find consecutive free bits in resource bitmap. * Each bit represents one entry in the IO Pdir. * Cool perf optimization: search for log2(size) bits at a time. */
static SBA_INLINE unsigned long sba_search_bitmap(struct ioc *ioc, struct device *dev, unsigned long bits_wanted, int use_hint) { unsigned long *res_ptr; unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); unsigned long flags, pide = ~0UL, tpide; unsigned long boundary_size; unsigned long shift; int ret; ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); ASSERT(res_ptr < res_end); boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1; boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift; BUG_ON(ioc->ibase & ~iovp_mask); shift = ioc->ibase >> iovp_shift; spin_lock_irqsave(&ioc->res_lock, flags); /* Allow caller to force a search through the entire resource space */ if (likely(use_hint)) { res_ptr = ioc->res_hint; } else { res_ptr = (ulong *)ioc->res_map; ioc->res_bitshift = 0; } /* * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts * if a TLB entry is purged while in use. sba_mark_invalid() * purges IOTLB entries in power-of-two sizes, so we also * allocate IOVA space in power-of-two sizes. */ bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift); if (likely(bits_wanted == 1)) { unsigned int bitshiftcnt; for(; res_ptr < res_end ; res_ptr++) { if (likely(*res_ptr != ~0UL)) { bitshiftcnt = ffz(*res_ptr); *res_ptr |= (1UL << bitshiftcnt); pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); ioc->res_bitshift = bitshiftcnt + bits_wanted; goto found_it; } } goto not_found; } if (likely(bits_wanted <= BITS_PER_LONG/2)) { /* ** Search the resource bit map on well-aligned values. ** "o" is the alignment. ** We need the alignment to invalidate I/O TLB using ** SBA HW features in the unmap path. */ unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift); uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); unsigned long mask, base_mask; base_mask = RESMAP_MASK(bits_wanted); mask = base_mask << bitshiftcnt; DBG_RES("%s() o %ld %p", __func__, o, res_ptr); for(; res_ptr < res_end ; res_ptr++) { DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); ASSERT(0 != mask); for (; mask ; mask <<= o, bitshiftcnt += o) { tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); ret = iommu_is_span_boundary(tpide, bits_wanted, shift, boundary_size); if ((0 == ((*res_ptr) & mask)) && !ret) { *res_ptr |= mask; /* mark resources busy! */ pide = tpide; ioc->res_bitshift = bitshiftcnt + bits_wanted; goto found_it; } } bitshiftcnt = 0; mask = base_mask; } } else { int qwords, bits, i; unsigned long *end; qwords = bits_wanted >> 6; /* /64 */ bits = bits_wanted - (qwords * BITS_PER_LONG); end = res_end - qwords; for (; res_ptr < end; res_ptr++) { tpide = ptr_to_pide(ioc, res_ptr, 0); ret = iommu_is_span_boundary(tpide, bits_wanted, shift, boundary_size); if (ret) goto next_ptr; for (i = 0 ; i < qwords ; i++) { if (res_ptr[i] != 0) goto next_ptr; } if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits)) continue; /* Found it, mark it */ for (i = 0 ; i < qwords ; i++) res_ptr[i] = ~0UL; res_ptr[i] |= RESMAP_MASK(bits); pide = tpide; res_ptr += qwords; ioc->res_bitshift = bits; goto found_it; next_ptr: ; } } not_found: prefetch(ioc->res_map); ioc->res_hint = (unsigned long *) ioc->res_map; ioc->res_bitshift = 0; spin_unlock_irqrestore(&ioc->res_lock, flags); return (pide); found_it: ioc->res_hint = res_ptr; spin_unlock_irqrestore(&ioc->res_lock, flags); return (pide); }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson34347.25%333.33%
David Mosberger-Tang23932.92%222.22%
FUJITA Tomonori13118.04%111.11%
Björn Helgaas111.52%111.11%
Harvey Harrison10.14%111.11%
Nigel Croxon10.14%111.11%
Total726100.00%9100.00%

/** * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. * @size: number of bytes to create a mapping for * * Given a size, find consecutive unmarked and then mark those bits in the * resource bit map. */
static int sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) { unsigned int pages_needed = size >> iovp_shift; #ifdef PDIR_SEARCH_TIMING unsigned long itc_start; #endif unsigned long pide; ASSERT(pages_needed); ASSERT(0 == (size & ~iovp_mask)); #ifdef PDIR_SEARCH_TIMING itc_start = ia64_get_itc(); #endif /* ** "seek and ye shall find"...praying never hurts either... */ pide = sba_search_bitmap(ioc, dev, pages_needed, 1); if (unlikely(pide >= (ioc->res_size << 3))) { pide = sba_search_bitmap(ioc, dev, pages_needed, 0); if (unlikely(pide >= (ioc->res_size << 3))) { #if DELAYED_RESOURCE_CNT > 0 unsigned long flags; /* ** With delayed resource freeing, we can give this one more shot. We're ** getting close to being in trouble here, so do what we can to make this ** one count. */ spin_lock_irqsave(&ioc->saved_lock, flags); if (ioc->saved_cnt > 0) { struct sba_dma_pair *d; int cnt = ioc->saved_cnt; d = &(ioc->saved[ioc->saved_cnt - 1]); spin_lock(&ioc->res_lock); while (cnt--) { sba_mark_invalid(ioc, d->iova, d->size); sba_free_range(ioc, d->iova, d->size); d--; } ioc->saved_cnt = 0; READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ spin_unlock(&ioc->res_lock); } spin_unlock_irqrestore(&ioc->saved_lock, flags); pide = sba_search_bitmap(ioc, dev, pages_needed, 0); if (unlikely(pide >= (ioc->res_size << 3))) { printk(KERN_WARNING "%s: I/O MMU @ %p is" "out of mapping resources, %u %u %lx\n", __func__, ioc->ioc_hpa, ioc->res_size, pages_needed, dma_get_seg_boundary(dev)); return -1; } #else printk(KERN_WARNING "%s: I/O MMU @ %p is" "out of mapping resources, %u %u %lx\n", __func__, ioc->ioc_hpa, ioc->res_size, pages_needed, dma_get_seg_boundary(dev)); return -1; #endif } } #ifdef PDIR_SEARCH_TIMING ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed; ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; #endif prefetchw(&(ioc->pdir_base[pide])); #ifdef ASSERT_PDIR_SANITY /* verify the first enable bit is clear */ if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) { sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); } #endif DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", __func__, size, pages_needed, pide, (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), ioc->res_bitshift ); return (pide); }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson27156.46%342.86%
David Mosberger-Tang15331.88%114.29%
FUJITA Tomonori5511.46%228.57%
Harvey Harrison10.21%114.29%
Total480100.00%7100.00%

/** * sba_free_range - unmark bits in IO PDIR resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. * @iova: IO virtual address which was previously allocated. * @size: number of bytes to create a mapping for * * clear bits in the ioc's resource map */
static SBA_INLINE void sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) { unsigned long iovp = SBA_IOVP(ioc, iova); unsigned int pide = PDIR_INDEX(iovp); unsigned int ridx = pide >> 3; /* convert bit to byte address */ unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); int bits_not_wanted = size >> iovp_shift; unsigned long m; /* Round up to power-of-two size: see AR2305 note above */ bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift); for (; bits_not_wanted > 0 ; res_ptr++) { if (unlikely(bits_not_wanted > BITS_PER_LONG)) { /* these mappings start 64bit aligned */ *res_ptr = 0UL; bits_not_wanted -= BITS_PER_LONG; pide += BITS_PER_LONG; } else { /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); bits_not_wanted = 0; DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size, bits_not_wanted, m, pide, res_ptr, *res_ptr); ASSERT(m != 0); ASSERT(bits_not_wanted); ASSERT((*res_ptr & m) == m); /* verify same bits are set */ *res_ptr &= ~m; } } }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang15070.42%116.67%
Alex Williamson5023.47%233.33%
Björn Helgaas115.16%116.67%
Harvey Harrison10.47%116.67%
Nigel Croxon10.47%116.67%
Total213100.00%6100.00%

/************************************************************** * * "Dynamic DMA Mapping" support (aka "Coherent I/O") * ***************************************************************/ /** * sba_io_pdir_entry - fill in one IO PDIR entry * @pdir_ptr: pointer to IO PDIR entry * @vba: Virtual CPU address of buffer to map * * SBA Mapping Routine * * Given a virtual address (vba, arg1) sba_io_pdir_entry() * loads the I/O PDIR entry pointed to by pdir_ptr (arg0). * Each IO Pdir entry consists of 8 bytes as shown below * (LSB == bit 0): * * 63 40 11 7 0 * +-+---------------------+----------------------------------+----+--------+ * |V| U | PPN[39:12] | U | FF | * +-+---------------------+----------------------------------+----+--------+ * * V == Valid Bit * U == Unused * PPN == Physical Page Number * * The physical address fields are filled with the results of virt_to_phys() * on the vba. */ #if 1 #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \ | 0x8000000000000000ULL) #else
void SBA_INLINE sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba) { *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang28100.00%2100.00%
Total28100.00%2100.00%

#endif #ifdef ENABLE_MARK_CLEAN /** * Since DMA is i-cache coherent, any (complete) pages that were written via * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to * flush them when they get mapped into an executable vm-area. */
static void mark_clean (void *addr, size_t size) { unsigned long pg_addr, end; pg_addr = PAGE_ALIGN((unsigned long) addr); end = (unsigned long) addr + size; while (pg_addr + PAGE_SIZE <= end) { struct page *page = virt_to_page((void *)pg_addr); set_bit(PG_arch_1, &page->flags); pg_addr += PAGE_SIZE; } }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang78100.00%2100.00%
Total78100.00%2100.00%

#endif /** * sba_mark_invalid - invalidate one or more IO PDIR entries * @ioc: IO MMU structure which owns the pdir we are interested in. * @iova: IO Virtual Address mapped earlier * @byte_cnt: number of bytes this mapping covers. * * Marking the IO PDIR entry(ies) as Invalid and invalidate * corresponding IO TLB entry. The PCOM (Purge Command Register) * is to purge stale entries in the IO TLB when unmapping entries. * * The PCOM register supports purging of multiple pages, with a minium * of 1 page and a maximum of 2GB. Hardware requires the address be * aligned to the size of the range being purged. The size of the range * must be a power of 2. The "Cool perf optimization" in the * allocation routine helps keep that true. */
static SBA_INLINE void sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) { u32 iovp = (u32) SBA_IOVP(ioc,iova); int off = PDIR_INDEX(iovp); /* Must be non-zero and rounded up */ ASSERT(byte_cnt > 0); ASSERT(0 == (byte_cnt & ~iovp_mask)); #ifdef ASSERT_PDIR_SANITY /* Assert first pdir entry is set */ if (!(ioc->pdir_base[off] >> 60)) { sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); } #endif if (byte_cnt <= iovp_size) { ASSERT(off < ioc->pdir_size); iovp |= iovp_shift; /* set "size" field for PCOM */ #ifndef FULL_VALID_PDIR /* ** clear I/O PDIR entry "valid" bit ** Do NOT clear the rest - save it for debugging. ** We should only clear bits that have previously ** been enabled. */ ioc->pdir_base[off] &= ~(0x80000000000000FFULL); #else /* ** If we want to maintain the PDIR as valid, put in ** the spill page so devices prefetching won't ** cause a hard fail. */ ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); #endif } else { u32 t = get_iovp_order(byte_cnt) + iovp_shift; iovp |= t; ASSERT(t <= 31); /* 2GB! Max value of "size" field */ do { /* verify this pdir entry is enabled */ ASSERT(ioc->pdir_base[off] >> 63); #ifndef FULL_VALID_PDIR /* clear I/O Pdir entry "valid" bit first */ ioc->pdir_base[off] &= ~(0x80000000000000FFULL); #else ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); #endif off++; byte_cnt -= iovp_size; } while (byte_cnt > 0); } WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang24697.62%266.67%
Alex Williamson62.38%133.33%
Total252100.00%3100.00%

/** * sba_map_single_attrs - map one buffer and return IOVA for DMA * @dev: instance of PCI owned by the driver that's asking. * @addr: driver buffer to map. * @size: number of bytes to map in driver buffer. * @dir: R/W or both. * @attrs: optional dma attributes * * See Documentation/DMA-API-HOWTO.txt */
static dma_addr_t sba_map_page(struct device *dev, struct page *page, unsigned long poff, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct ioc *ioc; void *addr = page_address(page) + poff; dma_addr_t iovp; dma_addr_t offset; u64 *pdir_start; int pide; #ifdef ASSERT_PDIR_SANITY unsigned long flags; #endif #ifdef ALLOW_IOV_BYPASS unsigned long pci_addr = virt_to_phys(addr); #endif #ifdef ALLOW_IOV_BYPASS ASSERT(to_pci_dev(dev)->dma_mask); /* ** Check if the PCI device can DMA to ptr... if so, just return ptr */ if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) { /* ** Device is bit capable of DMA'ing to the buffer... ** just return the PCI address of ptr */ DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: " "0x%lx/0x%lx\n", to_pci_dev(dev)->dma_mask, pci_addr); return pci_addr; } #endif ioc = GET_IOC(dev); ASSERT(ioc); prefetch(ioc->res_hint); ASSERT(size > 0); ASSERT(size <= DMA_CHUNK_SIZE); /* save offset bits */ offset = ((dma_addr_t) (long) addr) & ~iovp_mask; /* round up to nearest iovp_size */ size = (size + offset + ~iovp_mask) & iovp_mask; #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()")) panic("Sanity check failed"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif pide = sba_alloc_range(ioc, dev, size); if (pide < 0) return 0; iovp = (dma_addr_t) pide << iovp_shift; DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset); pdir_start = &(ioc->pdir_base[pide]); while (size > 0) { ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */ sba_io_pdir_entry(pdir_start, (unsigned long) addr); DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start); addr += iovp_size; size -= iovp_size; pdir_start++; } /* force pdir update */ wmb(); /* form complete address */ #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); sba_check_pdir(ioc,"Check after sba_map_single_attrs()"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif return SBA_IOVA(ioc, iovp, offset); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang27468.16%325.00%
Alex Williamson8621.39%216.67%
FUJITA Tomonori338.21%433.33%
Arthur Kepner61.49%18.33%
Krzysztof Kozlowski20.50%18.33%
Harvey Harrison10.25%18.33%
Total402100.00%12100.00%


static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { return sba_map_page(dev, virt_to_page(addr), (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori5096.15%150.00%
Krzysztof Kozlowski23.85%150.00%
Total52100.00%2100.00%

#ifdef ENABLE_MARK_CLEAN
static SBA_INLINE void sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) { u32 iovp = (u32) SBA_IOVP(ioc,iova); int off = PDIR_INDEX(iovp); void *addr; if (size <= iovp_size) { addr = phys_to_virt(ioc->pdir_base[off] & ~0xE000000000000FFFULL); mark_clean(addr, size); } else { do { addr = phys_to_virt(ioc->pdir_base[off] & ~0xE000000000000FFFULL); mark_clean(addr, min(size, iovp_size)); off++; size -= iovp_size; } while (size > 0); } }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson120100.00%1100.00%
Total120100.00%1100.00%

#endif /** * sba_unmap_single_attrs - unmap one IOVA and free resources * @dev: instance of PCI owned by the driver that's asking. * @iova: IOVA of driver buffer previously mapped. * @size: number of bytes mapped in driver buffer. * @dir: R/W or both. * @attrs: optional dma attributes * * See Documentation/DMA-API-HOWTO.txt */
static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct ioc *ioc; #if DELAYED_RESOURCE_CNT > 0 struct sba_dma_pair *d; #endif unsigned long flags; dma_addr_t offset; ioc = GET_IOC(dev); ASSERT(ioc); #ifdef ALLOW_IOV_BYPASS if (likely((iova & ioc->imask) != ioc->ibase)) { /* ** Address does not fall w/in IOVA, must be bypassing */ DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n", iova); #ifdef ENABLE_MARK_CLEAN if (dir == DMA_FROM_DEVICE) { mark_clean(phys_to_virt(iova), size); } #endif return; } #endif offset = iova & ~iovp_mask; DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); iova ^= offset; /* clear offset bits */ size += offset; size = ROUNDUP(size, iovp_size); #ifdef ENABLE_MARK_CLEAN if (dir == DMA_FROM_DEVICE) sba_mark_clean(ioc, iova, size); #endif #if DELAYED_RESOURCE_CNT > 0 spin_lock_irqsave(&ioc->saved_lock, flags); d = &(ioc->saved[ioc->saved_cnt]); d->iova = iova; d->size = size; if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) { int cnt = ioc->saved_cnt; spin_lock(&ioc->res_lock); while (cnt--) { sba_mark_invalid(ioc, d->iova, d->size); sba_free_range(ioc, d->iova, d->size); d--; } ioc->saved_cnt = 0; READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ spin_unlock(&ioc->res_lock); } spin_unlock_irqrestore(&ioc->saved_lock, flags); #else /* DELAYED_RESOURCE_CNT == 0 */ spin_lock_irqsave(&ioc->res_lock, flags); sba_mark_invalid(ioc, iova, size); sba_free_range(ioc, iova, size); READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ spin_unlock_irqrestore(&ioc->res_lock, flags); #endif /* DELAYED_RESOURCE_CNT == 0 */ }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang28275.81%325.00%
Alex Williamson8021.51%325.00%
FUJITA Tomonori41.08%216.67%
Arthur Kepner20.54%18.33%
Krzysztof Kozlowski20.54%18.33%
Harvey Harrison10.27%18.33%
Justin P. Mattock10.27%18.33%
Total372100.00%12100.00%


void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir, unsigned long attrs) { sba_unmap_page(dev, iova, size, dir, attrs); }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori3594.59%150.00%
Krzysztof Kozlowski25.41%150.00%
Total37100.00%2100.00%

/** * sba_alloc_coherent - allocate/map shared mem for DMA * @dev: instance of PCI owned by the driver that's asking. * @size: number of bytes mapped in driver buffer. * @dma_handle: IOVA of new buffer. * * See Documentation/DMA-API-HOWTO.txt */
static void * sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { struct ioc *ioc; void *addr; ioc = GET_IOC(dev); ASSERT(ioc); #ifdef CONFIG_NUMA { struct page *page; page = alloc_pages_node(ioc->node, flags, get_order(size)); if (unlikely(!page)) return NULL; addr = page_address(page); } #else addr = (void *) __get_free_pages(flags, get_order(size)); #endif if (unlikely(!addr)) return NULL; memset(addr, 0, size); *dma_handle = virt_to_phys(addr); #ifdef ALLOW_IOV_BYPASS ASSERT(dev->coherent_dma_mask); /* ** Check if the PCI device can DMA to ptr... if so, just return ptr */ if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) { DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n", dev->coherent_dma_mask, *dma_handle); return addr; } #endif /* * If device can't bypass or bypass is disabled, pass the 32bit fake * device to map single to get an iova mapping. */ *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, size, 0, 0); return addr; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson11956.40%212.50%
David Mosberger-Tang5927.96%425.00%
Björn Helgaas219.95%425.00%
Krzysztof Kozlowski31.42%16.25%
Vlastimil Babka31.42%16.25%
Arthur Kepner20.95%16.25%
Andrzej Pietrasiewicz20.95%16.25%
Al Viro10.47%16.25%
FUJITA Tomonori10.47%16.25%
Total211100.00%16100.00%

/** * sba_free_coherent - free/unmap shared mem for DMA * @dev: instance of PCI owned by the driver that's asking. * @size: number of bytes mapped in driver buffer. * @vaddr: virtual address IOVA of "consistent" buffer. * @dma_handler: IO virtual address of "consistent" buffer. * * See Documentation/DMA-API-HOWTO.txt */
static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { sba_unmap_single_attrs(dev, dma_handle, size, 0, 0); free_pages((unsigned long) vaddr, get_order(size)); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang4280.77%228.57%
Krzysztof Kozlowski35.77%114.29%
Arthur Kepner23.85%114.29%
Björn Helgaas23.85%114.29%
Andrzej Pietrasiewicz23.85%114.29%
FUJITA Tomonori11.92%114.29%
Total52100.00%7100.00%

/* ** Since 0 is a valid pdir_base index value, can't use that ** to determine if a value is valid or not. Use a flag to indicate ** the SG list entry contains a valid pdir index. */ #define PIDE_FLAG 0x1UL #ifdef DEBUG_LARGE_SG_ENTRIES int dump_run_sg = 0; #endif /** * sba_fill_pdir - write allocated SG entries into IO PDIR * @ioc: IO MMU structure which owns the pdir we are interested in. * @startsg: list of IOVA/size pairs * @nents: number of entries in startsg list * * Take preprocessed SG list and write corresponding entries * in the IO PDIR. */
static SBA_INLINE int sba_fill_pdir( struct ioc *ioc, struct scatterlist *startsg, int nents) { struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ int n_mappings = 0; u64 *pdirp = NULL; unsigned long dma_offset = 0; while (nents-- > 0) { int cnt = startsg->dma_length; startsg->dma_length = 0; #ifdef DEBUG_LARGE_SG_ENTRIES if (dump_run_sg) printk(" %2d : %08lx/%05x %p\n", nents, startsg->dma_address, cnt, sba_sg_address(startsg)); #else DBG_RUN_SG(" %d : %08lx/%05x %p\n", nents, startsg->dma_address, cnt, sba_sg_address(startsg)); #endif /* ** Look for the start of a new DMA stream */ if (startsg->dma_address & PIDE_FLAG) { u32 pide = startsg->dma_address & ~PIDE_FLAG; dma_offset = (unsigned long) pide & ~iovp_mask; startsg->dma_address = 0; if (n_mappings) dma_sg = sg_next(dma_sg); dma_sg->dma_address = pide | ioc->ibase; pdirp = &(ioc->pdir_base[pide >> iovp_shift]); n_mappings++; } /* ** Look for a VCONTIG chunk */ if (cnt) { unsigned long vaddr = (unsigned long) sba_sg_address(startsg); ASSERT(pdirp); /* Since multiple Vcontig blocks could make up ** one DMA stream, *add* cnt to dma_len. */ dma_sg->dma_length += cnt; cnt += dma_offset; dma_offset=0; /* only want offset on first chunk */ cnt = ROUNDUP(cnt, iovp_size); do { sba_io_pdir_entry(pdirp, vaddr); vaddr += iovp_size; cnt -= iovp_size; pdirp++; } while (cnt > 0); } startsg = sg_next(startsg); } /* force pdir update */ wmb(); #ifdef DEBUG_LARGE_SG_ENTRIES dump_run_sg = 0; #endif return(n_mappings); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang24583.62%342.86%
Björn Helgaas299.90%114.29%
Jens Axboe103.41%114.29%
Alex Williamson51.71%114.29%
FUJITA Tomonori41.37%114.29%
Total293100.00%7100.00%

/* ** Two address ranges are DMA contiguous *iff* "end of prev" and ** "start of next" are both on an IOV page boundary. ** ** (shift left is a quick trick to mask off upper bits) */ #define DMA_CONTIG(__X, __Y) \ (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL) /** * sba_coalesce_chunks - preprocess the SG list * @ioc: IO MMU structure which owns the pdir we are interested in. * @startsg: list of IOVA/size pairs * @nents: number of entries in startsg list * * First pass is to walk the SG list and determine where the breaks are * in the DMA stream. Allocates PDIR entries but does not fill them. * Returns the number of DMA chunks. * * Doing the fill separate from the coalescing/allocation keeps the * code simpler. Future enhancement could make one pass through * the sglist do both. */
static SBA_INLINE int sba_coalesce_chunks(struct ioc *ioc, struct device *dev, struct scatterlist *startsg, int nents) { struct scatterlist *vcontig_sg; /* VCONTIG chunk head */ unsigned long vcontig_len; /* len of VCONTIG chunk */ unsigned long vcontig_end; struct scatterlist *dma_sg; /* next DMA stream head */ unsigned long dma_offset, dma_len; /* start/len of DMA stream */ int n_mappings = 0; unsigned int max_seg_size = dma_get_max_seg_size(dev); int idx; while (nents > 0) { unsigned long vaddr = (unsigned long) sba_sg_address(startsg); /* ** Prepare for first/next DMA stream */ dma_sg = vcontig_sg = startsg; dma_len = vcontig_len = vcontig_end = startsg->length; vcontig_end += vaddr; dma_offset = vaddr & ~iovp_mask; /* PARANOID: clear entries */ startsg->dma_address = startsg->dma_length = 0; /* ** This loop terminates one iteration "early" since ** it's always looking one "ahead". */ while (--nents > 0) { unsigned long vaddr; /* tmp */ startsg = sg_next(startsg); /* PARANOID */ startsg->dma_address = startsg->dma_length = 0; /* catch brokenness in SCSI layer */ ASSERT(startsg->length <= DMA_CHUNK_SIZE); /* ** First make sure current dma stream won't ** exceed DMA_CHUNK_SIZE if we coalesce the ** next entry. */ if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask) > DMA_CHUNK_SIZE) break; if (dma_len + startsg->length > max_seg_size) break; /* ** Then look for virtually contiguous blocks. ** ** append the next transaction? */ vaddr = (unsigned long) sba_sg_address(startsg); if (vcontig_end == vaddr) { vcontig_len += startsg->length; vcontig_end += startsg->length; dma_len += startsg->length; continue; } #ifdef DEBUG_LARGE_SG_ENTRIES dump_run_sg = (vcontig_len > iovp_size); #endif /* ** Not virtually contiguous. ** Terminate prev chunk. ** Start a new chunk. ** ** Once we start a new VCONTIG chunk, dma_offset ** can't change. And we need the offset from the first ** chunk - not the last one. Ergo Successive chunks ** must start on page boundaries and dove tail ** with it's predecessor. */ vcontig_sg->dma_length = vcontig_len; vcontig_sg = startsg; vcontig_len = startsg->length; /* ** 3) do the entries end/start on page boundaries? ** Don't update vcontig_end until we've checked. */ if (DMA_CONTIG(vcontig_end, vaddr)) { vcontig_end = vcontig_len + vaddr; dma_len += vcontig_len; continue; } else { break; } } /* ** End of DMA Stream ** Terminate last VCONTIG block. ** Allocate space for DMA stream. */ vcontig_sg->dma_length = vcontig_len; dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; ASSERT(dma_len <= DMA_CHUNK_SIZE); idx = sba_alloc_range(ioc, dev, dma_len); if (idx < 0) { dma_sg->dma_length = 0; return -1; } dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift) | dma_offset); n_mappings++; } return n_mappings; }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang31180.99%225.00%
FUJITA Tomonori5815.10%225.00%
Alex Williamson71.82%112.50%
Jens Axboe51.30%112.50%
Björn Helgaas20.52%112.50%
André Goddard Rosa10.26%112.50%
Total384100.00%8100.00%

static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs); /** * sba_map_sg - map Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. * @sglist: array of buffer/length pairs * @nents: number of entries in list * @dir: R/W or both. * @attrs: optional dma attributes * * See Documentation/DMA-API-HOWTO.txt */
static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs) { struct ioc *ioc; int coalesced, filled = 0; #ifdef ASSERT_PDIR_SANITY unsigned long flags; #endif #ifdef ALLOW_IOV_BYPASS_SG struct scatterlist *sg; #endif DBG_RUN_SG("%s() START %d entries\n", __func__, nents); ioc = GET_IOC(dev); ASSERT(ioc); #ifdef ALLOW_IOV_BYPASS_SG ASSERT(to_pci_dev(dev)->dma_mask); if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { for_each_sg(sglist, sg, nents, filled) { sg->dma_length = sg->length; sg->dma_address = virt_to_phys(sba_sg_address(sg)); } return filled; } #endif /* Fast path single entry scatterlists. */ if (nents == 1) { sglist->dma_length = sglist->length; sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs); return 1; } #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()")) { sba_dump_sg(ioc, sglist, nents); panic("Check before sba_map_sg_attrs()"); } spin_unlock_irqrestore(&ioc->res_lock, flags); #endif prefetch(ioc->res_hint); /* ** First coalesce the chunks and allocate I/O pdir space ** ** If this is one DMA stream, we can properly map using the ** correct virtual address associated with each DMA page. ** w/o this association, we wouldn't have coherent DMA! ** Access to the virtual address is what forces a two pass algorithm. */ coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); if (coalesced < 0) { sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs); return 0; } /* ** Program the I/O Pdir ** ** map the virtual addresses to the I/O Pdir ** o dma_address will contain the pdir index ** o dma_len will contain the number of bytes to map ** o address contains the virtual address. */ filled = sba_fill_pdir(ioc, sglist, nents); #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()")) { sba_dump_sg(ioc, sglist, nents); panic("Check after sba_map_sg_attrs()\n"); } spin_unlock_irqrestore(&ioc->res_lock, flags); #endif ASSERT(coalesced == filled); DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled); return filled; }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang25167.84%428.57%
Alex Williamson5414.59%17.14%
FUJITA Tomonori297.84%428.57%
Björn Helgaas123.24%17.14%
Arthur Kepner102.70%17.14%
Jens Axboe102.70%17.14%
Harvey Harrison20.54%17.14%
Krzysztof Kozlowski20.54%17.14%
Total370100.00%14100.00%

/** * sba_unmap_sg_attrs - unmap Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. * @sglist: array of buffer/length pairs * @nents: number of entries in list * @dir: R/W or both. * @attrs: optional dma attributes * * See Documentation/DMA-API-HOWTO.txt */
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs) { #ifdef ASSERT_PDIR_SANITY struct ioc *ioc; unsigned long flags; #endif DBG_RUN_SG("%s() START %d entries, %p,%x\n", __func__, nents, sba_sg_address(sglist), sglist->length); #ifdef ASSERT_PDIR_SANITY ioc = GET_IOC(dev); ASSERT(ioc); spin_lock_irqsave(&ioc->res_lock, flags); sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif while (nents && sglist->dma_length) { sba_unmap_single_attrs(dev, sglist->dma_address, sglist->dma_length, dir, attrs); sglist = sg_next(sglist); nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang15082.87%327.27%
Arthur Kepner84.42%19.09%
Alex Williamson63.31%19.09%
Jens Axboe52.76%19.09%
Björn Helgaas52.76%19.09%
FUJITA Tomonori31.66%218.18%
Krzysztof Kozlowski21.10%19.09%
Harvey Harrison21.10%19.09%
Total181100.00%11100.00%

/************************************************************** * * Initialization and claim * ***************************************************************/
static void ioc_iova_init(struct ioc *ioc) { int tcnfg; int agp_found = 0; struct pci_dev *device = NULL; #ifdef FULL_VALID_PDIR unsigned long index; #endif /* ** Firmware programs the base and size of a "safe IOVA space" ** (one that doesn't overlap memory or LMMIO space) in the ** IBASE and IMASK registers. */ ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL; ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL; ioc->iov_size = ~ioc->imask + 1; DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n", __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask, ioc->iov_size >> 20); switch (iovp_size) { case 4*1024: tcnfg = 0; break; case 8*1024: tcnfg = 1; break; case 16*1024: tcnfg = 2; break; case 64*1024: tcnfg = 3; break; default: panic(PFX "Unsupported IOTLB page size %ldK", iovp_size >> 10); break; } WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE; ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, get_order(ioc->pdir_size)); if (!ioc->pdir_base) panic(PFX "Couldn't allocate I/O Page Table\n"); memset(ioc->pdir_base, 0, ioc->pdir_size); DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__, iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); /* ** If an AGP device is present, only use half of the IOV space ** for PCI DMA. Unfortunately we can't know ahead of time ** whether GART support will actually be used, for now we ** can just key on an AGP device found in the system. ** We program the next pdir index after we stop w/ a key for ** the GART code to handshake on. */ for_each_pci_dev(device) agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP); if (agp_found && reserve_sba_gart) { printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n", ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2); ioc->pdir_size /= 2; ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE; } #ifdef FULL_VALID_PDIR /* ** Check to see if the spill page has been allocated, we don't need more than ** one across multiple SBAs. */ if (!prefetch_spill_page) { char *spill_poison = "SBAIOMMU POISON"; int poison_size = 16; void *poison_addr, *addr; addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size)); if (!addr) panic(PFX "Couldn't allocate PDIR spill page\n"); poison_addr = addr; for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size) memcpy(poison_addr, spill_poison, poison_size); prefetch_spill_page = virt_to_phys(addr); DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page); } /* ** Set all the PDIR entries valid w/ the spill page as the target */ for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++) ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page); #endif /* Clear I/O TLB of any possible entries */ WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM); READ_REG(ioc->ioc_hpa + IOC_PCOM); /* Enable IOVA translation */ WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); READ_REG(ioc->ioc_hpa + IOC_IBASE); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang36565.41%225.00%
Alex Williamson17431.18%112.50%
Björn Helgaas122.15%225.00%
Harvey Harrison30.54%112.50%
Greg Kroah-Hartman20.36%112.50%
Hanna V. Linder20.36%112.50%
Total558100.00%8100.00%


static void __init ioc_resource_init(struct ioc *ioc) { spin_lock_init(&ioc->res_lock); #if DELAYED_RESOURCE_CNT > 0 spin_lock_init(&ioc->saved_lock); #endif /* resource map size dictated by pdir_size */ ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ ioc->res_size >>= 3; /* convert bit count to byte count */ DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size); ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(ioc->res_size)); if (!ioc->res_map) panic(PFX "Couldn't allocate resource map\n"); memset(ioc->res_map, 0, ioc->res_size); /* next available IOVP - circular search */ ioc->res_hint = (unsigned long *) ioc->res_map; #ifdef ASSERT_PDIR_SANITY /* Mark first bit busy - ie no IOVA 0 */ ioc->res_map[0] = 0x1; ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE; #endif #ifdef FULL_VALID_PDIR /* Mark the last resource used so we don't prefetch beyond IOVA space */ ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */ ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF | prefetch_spill_page); #endif DBG_INIT("%s() res_map %x %p\n", __func__, ioc->res_size, (void *) ioc->res_map); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang16980.09%240.00%
Alex Williamson4018.96%240.00%
Harvey Harrison20.95%120.00%
Total211100.00%5100.00%


static void __init ioc_sac_init(struct ioc *ioc) { struct pci_dev *sac = NULL; struct pci_controller *controller = NULL; /* * pci_alloc_coherent() must return a DMA address which is * SAC (single address cycle) addressable, so allocate a * pseudo-device to enforce that. */ sac = kzalloc(sizeof(*sac), GFP_KERNEL); if (!sac) panic(PFX "Couldn't allocate struct pci_dev"); controller = kzalloc(sizeof(*controller), GFP_KERNEL); if (!controller) panic(PFX "Couldn't allocate struct pci_controller"); controller->iommu = ioc; sac->sysdata = controller; sac->dma_mask = 0xFFFFFFFFUL; #ifdef CONFIG_PCI sac->dev.bus = &pci_bus_type; #endif ioc->sac_only_dev = sac; }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang11198.23%375.00%
Yan Burman21.77%125.00%
Total113100.00%4100.00%


static void __init ioc_zx1_init(struct ioc *ioc) { unsigned long rope_config; unsigned int i; if (ioc->rev < 0x20) panic(PFX "IOC 2.0 or later required for IOMMU support\n"); /* 38 bit memory controller + extra bit for range displaced by MMIO */ ioc->dma_mask = (0x1UL << 39) - 1; /* ** Clear ROPE(N)_CONFIG AO bit. ** Disables "NT Ordering" (~= !"Relaxed Ordering") ** Overrides bit 1 in DMA Hint Sets. ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701. */ for (i=0; i<(8*8); i+=8) { rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i); rope_config &= ~IOC_ROPE_AO; WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i); } }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson6969.70%250.00%
David Mosberger-Tang3030.30%250.00%
Total99100.00%4100.00%

typedef void (initfunc)(struct ioc *); struct ioc_iommu { u32 func_id; char *name; initfunc *init; }; static struct ioc_iommu ioc_iommu_info[] __initdata = { { ZX1_IOC_ID, "zx1", ioc_zx1_init }, { ZX2_IOC_ID, "zx2", NULL }, { SX1000_IOC_ID, "sx1000", NULL }, { SX2000_IOC_ID, "sx2000", NULL }, };
static void ioc_init(unsigned long hpa, struct ioc *ioc) { struct ioc_iommu *info; ioc->next = ioc_list; ioc_list = ioc; ioc->ioc_hpa = ioremap(hpa, 0x1000); ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID); ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL; ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */ for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) { if (ioc->func_id == info->func_id) { ioc->name = info->name; if (info->init) (info->init)(ioc); } } iovp_size = (1 << iovp_shift); iovp_mask = ~(iovp_size - 1); DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__, PAGE_SIZE >> 10, iovp_size >> 10); if (!ioc->name) { ioc->name = kmalloc(24, GFP_KERNEL); if (ioc->name) sprintf((char *) ioc->name, "Unknown (%04x:%04x)", ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF); else ioc->name = "Unknown"; } ioc_iova_init(ioc); ioc_resource_init(ioc); ioc_sac_init(ioc); if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask) ia64_max_iommu_merge_mask = ~iovp_mask; printk(KERN_INFO PFX "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, hpa, ioc->iov_size >> 20, ioc->ibase); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang19569.40%436.36%
Björn Helgaas4616.37%218.18%
Alex Williamson3311.74%218.18%
Rafael J. Wysocki41.42%19.09%
Matthew Wilcox20.71%19.09%
Harvey Harrison10.36%19.09%
Total281100.00%11100.00%

/************************************************************************** ** ** SBA initialization code (HW and SW) ** ** o identify SBA chip itself ** o FIXME: initialize DMA hints for reasonable defaults ** **************************************************************************/ #ifdef CONFIG_PROC_FS
static void * ioc_start(struct seq_file *s, loff_t *pos) { struct ioc *ioc; loff_t n = *pos; for (ioc = ioc_list; ioc; ioc = ioc->next) if (!n--) return ioc; return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang53100.00%2100.00%
Total53100.00%2100.00%


static void * ioc_next(struct seq_file *s, void *v, loff_t *pos) { struct ioc *ioc = v; ++*pos; return ioc->next; }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang36100.00%3100.00%
Total36100.00%3100.00%


static void ioc_stop(struct seq_file *s, void *v) { }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang14100.00%1100.00%
Total14100.00%1100.00%


static int ioc_show(struct seq_file *s, void *v) { struct ioc *ioc = v; unsigned long *res_ptr = (unsigned long *)ioc->res_map; int i, used = 0; seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n", ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF)); #ifdef CONFIG_NUMA if (ioc->node != NUMA_NO_NODE) seq_printf(s, "NUMA node : %d\n", ioc->node); #endif seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024)); seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024); for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr) used += hweight64(*res_ptr); seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3); seq_printf(s, "PDIR used : %d entries\n", used); #ifdef PDIR_SEARCH_TIMING { unsigned long i = 0, avg = 0, min, max; min = max = ioc->avg_search[0]; for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { avg += ioc->avg_search[i]; if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; } avg /= SBA_SEARCH_SAMPLE; seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n", min, avg, max); } #endif #ifndef ALLOW_IOV_BYPASS seq_printf(s, "IOVA bypass disabled\n"); #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson20363.84%337.50%
David Mosberger-Tang11435.85%450.00%
Björn Helgaas10.31%112.50%
Total318100.00%8100.00%

static const struct seq_operations ioc_seq_ops = { .start = ioc_start, .next = ioc_next, .stop = ioc_stop, .show = ioc_show };
static int ioc_open(struct inode *inode, struct file *file) { return seq_open(file, &ioc_seq_ops); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang2392.00%266.67%
Alex Williamson28.00%133.33%
Total25100.00%3100.00%

static const struct file_operations ioc_fops = { .open = ioc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release };
static void __init ioc_proc_init(void) { struct proc_dir_entry *dir; dir = proc_mkdir("bus/mckinley", NULL); if (!dir) return; proc_create(ioc_list->name, 0, dir, &ioc_fops); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang3479.07%466.67%
Björn Helgaas613.95%116.67%
Denis V. Lunev36.98%116.67%
Total43100.00%6100.00%

#endif
static void sba_connect_bus(struct pci_bus *bus) { acpi_handle handle, parent; acpi_status status; struct ioc *ioc; if (!PCI_CONTROLLER(bus)) panic(PFX "no sysdata on bus %d!\n", bus->number); if (PCI_CONTROLLER(bus)->iommu) return; handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion); if (!handle) return; /* * The IOC scope encloses PCI root bridges in the ACPI * namespace, so work our way out until we find an IOC we * claimed previously. */ do { for (ioc = ioc_list; ioc; ioc = ioc->next) if (ioc->handle == handle) { PCI_CONTROLLER(bus)->iommu = ioc; return; } status = acpi_get_parent(handle, &parent); handle = parent; } while (ACPI_SUCCESS(status)); printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang13391.72%233.33%
Björn Helgaas64.14%233.33%
Rafael J. Wysocki42.76%116.67%
Matthew Wilcox21.38%116.67%
Total145100.00%6100.00%


static void __init sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) { #ifdef CONFIG_NUMA unsigned int node; node = acpi_get_node(handle); if (node != NUMA_NO_NODE && !node_online(node)) node = NUMA_NO_NODE; ioc->node = node; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson4177.36%250.00%
Björn Helgaas1222.64%250.00%
Total53100.00%4100.00%


static void acpi_sba_ioc_add(struct ioc *ioc) { acpi_handle handle = ioc->handle; acpi_status status; u64 hpa, length; struct acpi_device_info *adi; ioc_found = ioc->next; status = hp_acpi_csr_space(handle, &hpa, &length); if (ACPI_FAILURE(status)) goto err; status = acpi_get_object_info(handle, &adi); if (ACPI_FAILURE(status)) goto err; /* * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI * root bridges, and its CSR space includes the IOC function. */ if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) { hpa += ZX1_IOC_OFFSET; /* zx1 based systems default to kernel page size iommu pages */ if (!iovp_shift) iovp_shift = min(PAGE_SHIFT, 16); } kfree(adi); /* * default anything not caught above or specified on cmdline to 4k * iommu page size */ if (!iovp_shift) iovp_shift = 12; ioc_init(hpa, ioc); /* setup NUMA node association */ sba_map_ioc_to_node(ioc, handle); return; err: kfree(ioc); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang8453.50%327.27%
Alex Williamson3522.29%218.18%
Rafael J. Wysocki3119.75%218.18%
Joe Perches42.55%19.09%
Len Brown10.64%19.09%
Björn Helgaas10.64%19.09%
Robert Moore10.64%19.09%
Total157100.00%11100.00%

static const struct acpi_device_id hp_ioc_iommu_device_ids[] = { {"HWP0001", 0}, {"HWP0004", 0}, {"", 0}, };
static int acpi_sba_ioc_attach(struct acpi_device *device, const struct acpi_device_id *not_used) { struct ioc *ioc; ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) return -ENOMEM; ioc->next = ioc_found; ioc_found = ioc; ioc->handle = device->handle; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki65100.00%1100.00%
Total65100.00%1100.00%

static struct acpi_scan_handler acpi_sba_ioc_handler = { .ids = hp_ioc_iommu_device_ids, .attach = acpi_sba_ioc_attach, };
static int __init acpi_sba_ioc_init_acpi(void) { return acpi_scan_add_handler(&acpi_sba_ioc_handler); }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki16100.00%1100.00%
Total16100.00%1100.00%

/* This has to run before acpi_scan_init(). */ arch_initcall(acpi_sba_ioc_init_acpi); extern const struct dma_map_ops swiotlb_dma_ops;
static int __init sba_init(void) { if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) return 0; #if defined(CONFIG_IA64_GENERIC) /* If we are booting a kdump kernel, the sba_iommu will * cause devices that were not shutdown properly to MCA * as soon as they are turned back on. Our only option for * a successful kdump kernel boot is to use the swiotlb. */ if (is_kdump_kernel()) { dma_ops = &swiotlb_dma_ops; if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) panic("Unable to initialize software I/O TLB:" " Try machvec=dig boot option"); machvec_init("dig"); return 0; } #endif /* * ioc_found should be populated by the acpi_sba_ioc_handler's .attach() * routine, but that only happens if acpi_scan_init() has already run. */ while (ioc_found) acpi_sba_ioc_add(ioc_found); if (!ioc_list) { #ifdef CONFIG_IA64_GENERIC /* * If we didn't find something sba_iommu can claim, we * need to setup the swiotlb and switch to the dig machvec. */ dma_ops = &swiotlb_dma_ops; if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) panic("Unable to find SBA IOMMU or initialize " "software I/O TLB: Try machvec=dig boot option"); machvec_init("dig"); #else panic("Unable to find SBA IOMMU: Try a generic or DIG kernel"); #endif return 0; } #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) /* * hpzx1_swiotlb needs to have a fairly small swiotlb bounce * buffer setup to support devices with smaller DMA masks than * sba_iommu can handle. */ if (ia64_platform_is("hpzx1_swiotlb")) { extern void hwsw_init(void); hwsw_init(); } #endif #ifdef CONFIG_PCI { struct pci_bus *b = NULL; while ((b = pci_find_next_bus(b)) != NULL) sba_connect_bus(b); } #endif #ifdef CONFIG_PROC_FS ioc_proc_init(); #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson9443.52%216.67%
Terry Loftin4319.91%18.33%
David Mosberger-Tang3616.67%433.33%
Greg Kroah-Hartman136.02%18.33%
Rafael J. Wysocki104.63%18.33%
FUJITA Tomonori104.63%18.33%
Björn Helgaas83.70%18.33%
Simon Horman20.93%18.33%
Total216100.00%12100.00%

subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
static int __init nosbagart(char *str) { reserve_sba_gart = 0; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang18100.00%1100.00%
Total18100.00%1100.00%


static int sba_dma_supported (struct device *dev, u64 mask) { /* make sure it's at least 32bit capable */ return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang2492.31%266.67%
FUJITA Tomonori27.69%133.33%
Total26100.00%3100.00%


static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1058.82%133.33%
FUJITA Tomonori741.18%266.67%
Total17100.00%3100.00%

__setup("nosbagart", nosbagart);
static int __init sba_page_override(char *str) { unsigned long page_size; page_size = memparse(str, &str); switch (page_size) { case 4096: case 8192: case 16384: case 65536: iovp_shift = ffs(page_size) - 1; break; default: printk("%s: unknown/unsupported iommu page size %ld\n", __func__, page_size); } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson6598.48%150.00%
Harvey Harrison11.52%150.00%
Total66100.00%2100.00%

__setup("sbapagesize=",sba_page_override); const struct dma_map_ops sba_dma_ops = { .alloc = sba_alloc_coherent, .free = sba_free_coherent, .map_page = sba_map_page, .unmap_page = sba_unmap_page, .map_sg = sba_map_sg_attrs, .unmap_sg = sba_unmap_sg_attrs, .sync_single_for_cpu = machvec_dma_sync_single, .sync_sg_for_cpu = machvec_dma_sync_sg, .sync_single_for_device = machvec_dma_sync_single, .sync_sg_for_device = machvec_dma_sync_sg, .dma_supported = sba_dma_supported, .mapping_error = sba_dma_mapping_error, };
void sba_dma_init(void) { dma_ops = &sba_dma_ops; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori12100.00%1100.00%
Total12100.00%1100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang480956.76%1314.94%
Alex Williamson208724.63%910.34%
FUJITA Tomonori5786.82%910.34%
Björn Helgaas5676.69%1314.94%
Rafael J. Wysocki1431.69%33.45%
Terry Loftin540.64%11.15%
Jens Axboe410.48%22.30%
Arthur Kepner300.35%11.15%
Thomas Renninger280.33%11.15%
Krzysztof Kozlowski200.24%11.15%
Harvey Harrison160.19%11.15%
Greg Kroah-Hartman150.18%22.30%
Andrew Morton140.17%22.30%
Grant Grundler90.11%11.15%
Paul Bolle60.07%11.15%
Andrzej Pietrasiewicz60.07%11.15%
Adrian Bunk40.05%11.15%
Joe Perches40.05%11.15%
Matthew Wilcox40.05%22.30%
Linus Torvalds30.04%11.15%
Denis V. Lunev30.04%11.15%
Vlastimil Babka30.04%11.15%
Jesse Barnes30.04%11.15%
Matt Domsch30.04%11.15%
Yan Burman20.02%11.15%
Bart Van Assche20.02%11.15%
Matt LaPlante20.02%11.15%
Nigel Croxon20.02%11.15%
Hanna V. Linder20.02%11.15%
Simon Horman20.02%11.15%
Len Brown10.01%11.15%
Yijing Wang10.01%11.15%
Justin P. Mattock10.01%11.15%
Alan Cox10.01%11.15%
Robert Moore10.01%11.15%
Rohit Seth10.01%11.15%
Steven Cole10.01%11.15%
André Goddard Rosa10.01%11.15%
Jan Engelhardt10.01%11.15%
Arjan van de Ven10.01%11.15%
Al Viro10.01%11.15%
Total8473100.00%87100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.