Release 4.12 include/xen/arm/page-coherent.h
#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
#define _ASM_ARM_XEN_PAGE_COHERENT_H
#include <asm/page.h>
#include <asm/dma-mapping.h>
#include <linux/dma-mapping.h>
static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dev_dma_ops)
return dev->archdata.dev_dma_ops;
return get_arch_dma_ops(NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stefano Stabellini | 38 | 100.00% | 2 | 100.00% |
Total | 38 | 100.00% | 2 | 100.00% |
void __xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs);
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs);
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir);
void __xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir);
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
{
return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marc Zyngier | 45 | 97.83% | 1 | 50.00% |
Stefano Stabellini | 1 | 2.17% | 1 | 50.00% |
Total | 46 | 100.00% | 2 | 100.00% |
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
{
xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marc Zyngier | 43 | 97.73% | 1 | 50.00% |
Stefano Stabellini | 1 | 2.27% | 1 | 50.00% |
Total | 44 | 100.00% | 2 | 100.00% |
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
unsigned long page_pfn = page_to_xen_pfn(page);
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
unsigned long compound_pages =
(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
bool local = (page_pfn <= dev_pfn) &&
(dev_pfn - page_pfn < compound_pages);
/*
* Dom0 is mapped 1:1, while the Linux page can span across
* multiple Xen pages, it's not possible for it to contain a
* mix of local and foreign Xen pages. So if the first xen_pfn
* == mfn the page is local otherwise it's a foreign page
* grant-mapped in dom0. If the page is local we can safely
* call the native dma_ops function, otherwise we call the xen
* specific function.
*/
if (local)
xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
else
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marc Zyngier | 127 | 99.22% | 1 | 50.00% |
Stefano Stabellini | 1 | 0.78% | 1 | 50.00% |
Total | 128 | 100.00% | 2 | 100.00% |
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
unsigned long pfn = PFN_DOWN(handle);
/*
* Dom0 is mapped 1:1, while the Linux page can be spanned accross
* multiple Xen page, it's not possible to have a mix of local and
* foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
* foreign mfn will always return false. If the page is local we can
* safely call the native dma_ops function, otherwise we call the xen
* specific function.
*/
if (pfn_valid(pfn)) {
if (xen_get_dma_ops(hwdev)->unmap_page)
xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
} else
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marc Zyngier | 84 | 97.67% | 1 | 50.00% |
Stefano Stabellini | 2 | 2.33% | 1 | 50.00% |
Total | 86 | 100.00% | 2 | 100.00% |
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(handle);
if (pfn_valid(pfn)) {
if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
} else
__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marc Zyngier | 75 | 97.40% | 1 | 50.00% |
Stefano Stabellini | 2 | 2.60% | 1 | 50.00% |
Total | 77 | 100.00% | 2 | 100.00% |
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(handle);
if (pfn_valid(pfn)) {
if (xen_get_dma_ops(hwdev)->sync_single_for_device)
xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
} else
__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marc Zyngier | 75 | 97.40% | 1 | 50.00% |
Stefano Stabellini | 2 | 2.60% | 1 | 50.00% |
Total | 77 | 100.00% | 2 | 100.00% |
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marc Zyngier | 557 | 91.76% | 1 | 33.33% |
Stefano Stabellini | 50 | 8.24% | 2 | 66.67% |
Total | 607 | 100.00% | 3 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.