Contributors: 6
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Brice Goglin |
87 |
38.16% |
1 |
16.67% |
Bryan O'Sullivan |
73 |
32.02% |
1 |
16.67% |
Stephen Boyd |
64 |
28.07% |
1 |
16.67% |
Thomas Gleixner |
2 |
0.88% |
1 |
16.67% |
Al Viro |
1 |
0.44% |
1 |
16.67% |
Paul Gortmaker |
1 |
0.44% |
1 |
16.67% |
Total |
228 |
|
6 |
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2006 PathScale, Inc. All Rights Reserved.
*/
#include <linux/export.h>
#include <linux/io.h>
/**
* __iowrite32_copy - copy data to MMIO space, in 32-bit units
* @to: destination, in MMIO space (must be 32-bit aligned)
* @from: source (must be 32-bit aligned)
* @count: number of 32-bit quantities to copy
*
* Copy data from kernel space to MMIO space, in units of 32 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
const void *from,
size_t count)
{
u32 __iomem *dst = to;
const u32 *src = from;
const u32 *end = src + count;
while (src < end)
__raw_writel(*src++, dst++);
}
EXPORT_SYMBOL_GPL(__iowrite32_copy);
/**
* __ioread32_copy - copy data from MMIO space, in 32-bit units
* @to: destination (must be 32-bit aligned)
* @from: source, in MMIO space (must be 32-bit aligned)
* @count: number of 32-bit quantities to copy
*
* Copy data from MMIO space to kernel space, in units of 32 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void __ioread32_copy(void *to, const void __iomem *from, size_t count)
{
u32 *dst = to;
const u32 __iomem *src = from;
const u32 __iomem *end = src + count;
while (src < end)
*dst++ = __raw_readl(src++);
}
EXPORT_SYMBOL_GPL(__ioread32_copy);
/**
* __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units
* @to: destination, in MMIO space (must be 64-bit aligned)
* @from: source (must be 64-bit aligned)
* @count: number of 64-bit quantities to copy
*
* Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
const void *from,
size_t count)
{
#ifdef CONFIG_64BIT
u64 __iomem *dst = to;
const u64 *src = from;
const u64 *end = src + count;
while (src < end)
__raw_writeq(*src++, dst++);
#else
__iowrite32_copy(to, from, count * 2);
#endif
}
EXPORT_SYMBOL_GPL(__iowrite64_copy);