cregit-Linux how code gets into the kernel

Release 4.11 arch/arm/include/asm/atomic.h

/*
 *  arch/arm/include/asm/atomic.h
 *
 *  Copyright (C) 1996 Russell King.
 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef __ASM_ARM_ATOMIC_H

#define __ASM_ARM_ATOMIC_H

#include <linux/compiler.h>
#include <linux/prefetch.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>


#define ATOMIC_INIT(i)	{ (i) }

#ifdef __KERNEL__

/*
 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 * strex/ldrex monitor on some implementations. The reason we can use it for
 * atomic_set() is the clrex or dummy strex done on every exception return.
 */

#define atomic_read(v)	READ_ONCE((v)->counter)

#define atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))

#if __LINUX_ARM_ARCH__ >= 6

/*
 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 * store exclusive to ensure that these are atomic.  We may loop
 * to ensure that the update happens.
 */


#define ATOMIC_OP(op, c_op, asm_op)					\
static inline void atomic_##op(int i, atomic_t *v)                      \
{                                                                       \
        unsigned long tmp;                                              \
        int result;                                                     \
                                                                        \
        prefetchw(&v->counter);                                         \
        __asm__ __volatile__("@ atomic_" #op "\n"                       \
"1:     ldrex   %0, [%3]\n"                                             \
"       " #asm_op "     %0, %0, %4\n"                                   \
"       strex   %1, %0, [%3]\n"                                         \
"       teq     %1, #0\n"                                               \
"       bne     1b"                                                     \
        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
        : "r" (&v->counter), "Ir" (i)                                   \
        : "cc");                                                        \
}                                                                       \

#define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)      \
{                                                                       \
        unsigned long tmp;                                              \
        int result;                                                     \
                                                                        \
        prefetchw(&v->counter);                                         \
                                                                        \
        __asm__ __volatile__("@ atomic_" #op "_return\n"                \
"1:     ldrex   %0, [%3]\n"                                             \
"       " #asm_op "     %0, %0, %4\n"                                   \
"       strex   %1, %0, [%3]\n"                                         \
"       teq     %1, #0\n"                                               \
"       bne     1b"                                                     \
        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
        : "r" (&v->counter), "Ir" (i)                                   \
        : "cc");                                                        \
                                                                        \
        return result;                                                  \
}


#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)       \
{                                                                       \
        unsigned long tmp;                                              \
        int result, val;                                                \
                                                                        \
        prefetchw(&v->counter);                                         \
                                                                        \
        __asm__ __volatile__("@ atomic_fetch_" #op "\n"                 \
"1:     ldrex   %0, [%4]\n"                                             \
"       " #asm_op "     %1, %0, %5\n"                                   \
"       strex   %2, %1, [%4]\n"                                         \
"       teq     %2, #0\n"                                               \
"       bne     1b"                                                     \
        : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)  \
        : "r" (&v->counter), "Ir" (i)                                   \
        : "cc");                                                        \
                                                                        \
        return result;                                                  \
}


#define atomic_add_return_relaxed	atomic_add_return_relaxed

#define atomic_sub_return_relaxed	atomic_sub_return_relaxed

#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed

#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed


#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed

#define atomic_fetch_andnot_relaxed	atomic_fetch_andnot_relaxed

#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed

#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed


static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) { int oldval; unsigned long res; prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic_cmpxchg\n" "ldrex %1, [%3]\n" "mov %0, #0\n" "teq %1, %4\n" "strexeq %0, %5, [%3]\n" : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) : "r" (&ptr->counter), "Ir" (old), "r" (new) : "cc"); } while (res); return oldval; }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin3167.39%116.67%
Will Deacon1021.74%350.00%
Chen Gang S36.52%116.67%
Russell King24.35%116.67%
Total46100.00%6100.00%

#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int oldval, newval; unsigned long tmp; smp_mb(); prefetchw(&v->counter); __asm__ __volatile__ ("@ atomic_add_unless\n" "1: ldrex %0, [%4]\n" " teq %0, %5\n" " beq 2f\n" " add %1, %0, %6\n" " strex %2, %1, [%4]\n" " teq %2, #0\n" " bne 1b\n" "2:" : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (u), "r" (a) : "cc"); if (oldval != u) smp_mb(); return oldval; }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon52100.00%1100.00%
Total52100.00%1100.00%

#else /* ARM_ARCH_6 */ #ifdef CONFIG_SMP #error SMP not supported on pre-ARMv6 CPUs #endif #define ATOMIC_OP(op, c_op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ raw_local_irq_save(flags); \ v->counter c_op i; \ raw_local_irq_restore(flags); \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ int val; \ \ raw_local_irq_save(flags); \ v->counter c_op i; \ val = v->counter; \ raw_local_irq_restore(flags); \ \ return val; \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ int val; \ \ raw_local_irq_save(flags); \ val = v->counter; \ v->counter c_op i; \ raw_local_irq_restore(flags); \ \ return val; \ }
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; unsigned long flags; raw_local_irq_save(flags); ret = v->counter; if (likely(ret == old)) v->counter = new; raw_local_irq_restore(flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin5696.55%150.00%
Lennert Buytenhek23.45%150.00%
Total58100.00%2100.00%


static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) c = old; return c; }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin6098.36%150.00%
Arun Sharma11.64%150.00%
Total61100.00%2100.00%

#endif /* __LINUX_ARM_ARCH__ */ #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP_RETURN(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) #define atomic_andnot atomic_andnot #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(and, &=, and) ATOMIC_OPS(andnot, &= ~, bic) ATOMIC_OPS(or, |=, orr) ATOMIC_OPS(xor, ^=, eor) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_inc(v) atomic_add(1, v) #define atomic_dec(v) atomic_sub(1, v) #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) #define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v)) #define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v)) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) #ifndef CONFIG_GENERIC_ATOMIC64 typedef struct { long long counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } #ifdef CONFIG_ARM_LPAE
static inline long long atomic64_read(const atomic64_t *v) { long long result; __asm__ __volatile__("@ atomic64_read\n" " ldrd %0, %H0, [%1]" : "=&r" (result) : "r" (&v->counter), "Qo" (v->counter) ); return result; }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon1982.61%150.00%
Chen Gang S417.39%150.00%
Total23100.00%2100.00%


static inline void atomic64_set(atomic64_t *v, long long i) { __asm__ __volatile__("@ atomic64_set\n" " strd %2, %H2, [%1]" : "=Qo" (v->counter) : "r" (&v->counter), "r" (i) ); }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon1688.89%150.00%
Chen Gang S211.11%150.00%
Total18100.00%2100.00%

#else
static inline long long atomic64_read(const atomic64_t *v) { long long result; __asm__ __volatile__("@ atomic64_read\n" " ldrexd %0, %H0, [%1]" : "=&r" (result) : "r" (&v->counter), "Qo" (v->counter) ); return result; }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon1878.26%250.00%
Chen Gang S417.39%125.00%
Russell King14.35%125.00%
Total23100.00%4100.00%


static inline void atomic64_set(atomic64_t *v, long long i) { long long tmp; prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_set\n" "1: ldrexd %0, %H0, [%2]\n" " strexd %0, %3, %H3, [%2]\n" " teq %0, #0\n" " bne 1b" : "=&r" (tmp), "=Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon2686.67%375.00%
Chen Gang S413.33%125.00%
Total30100.00%4100.00%

#endif #define ATOMIC64_OP(op, op1, op2) \ static inline void atomic64_##op(long long i, atomic64_t *v) \ { \ long long result; \ unsigned long tmp; \ \ prefetchw(&v->counter); \ __asm__ __volatile__("@ atomic64_" #op "\n" \ "1: ldrexd %0, %H0, [%3]\n" \ " " #op1 " %Q0, %Q0, %Q4\n" \ " " #op2 " %R0, %R0, %R4\n" \ " strexd %1, %0, %H0, [%3]\n" \ " teq %1, #0\n" \ " bne 1b" \ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ : "r" (&v->counter), "r" (i) \ : "cc"); \ } \ #define ATOMIC64_OP_RETURN(op, op1, op2) \ static inline long long \ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \ { \ long long result; \ unsigned long tmp; \ \ prefetchw(&v->counter); \ \ __asm__ __volatile__("@ atomic64_" #op "_return\n" \ "1: ldrexd %0, %H0, [%3]\n" \ " " #op1 " %Q0, %Q0, %Q4\n" \ " " #op2 " %R0, %R0, %R4\n" \ " strexd %1, %0, %H0, [%3]\n" \ " teq %1, #0\n" \ " bne 1b" \ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ : "r" (&v->counter), "r" (i) \ : "cc"); \ \ return result; \ } #define ATOMIC64_FETCH_OP(op, op1, op2) \ static inline long long \ atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \ { \ long long result, val; \ unsigned long tmp; \ \ prefetchw(&v->counter); \ \ __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \ "1: ldrexd %0, %H0, [%4]\n" \ " " #op1 " %Q1, %Q0, %Q5\n" \ " " #op2 " %R1, %R0, %R5\n" \ " strexd %2, %1, %H1, [%4]\n" \ " teq %2, #0\n" \ " bne 1b" \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ : "r" (&v->counter), "r" (i) \ : "cc"); \ \ return result; \ } #define ATOMIC64_OPS(op, op1, op2) \ ATOMIC64_OP(op, op1, op2) \ ATOMIC64_OP_RETURN(op, op1, op2) \ ATOMIC64_FETCH_OP(op, op1, op2) ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(sub, subs, sbc) #define atomic64_add_return_relaxed atomic64_add_return_relaxed #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, op1, op2) \ ATOMIC64_OP(op, op1, op2) \ ATOMIC64_FETCH_OP(op, op1, op2) #define atomic64_andnot atomic64_andnot ATOMIC64_OPS(and, and, and) ATOMIC64_OPS(andnot, bic, bic) ATOMIC64_OPS(or, orr, orr) ATOMIC64_OPS(xor, eor, eor) #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP
static inline long long atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new) { long long oldval; unsigned long res; prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic64_cmpxchg\n" "ldrexd %1, %H1, [%3]\n" "mov %0, #0\n" "teq %1, %4\n" "teqeq %H1, %H4\n" "strexdeq %0, %5, %H5, [%3]" : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) : "r" (&ptr->counter), "r" (old), "r" (new) : "cc"); } while (res); return oldval; }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon4284.00%480.00%
Chen Gang S816.00%120.00%
Total50100.00%5100.00%

#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new) { long long result; unsigned long tmp; prefetchw(&ptr->counter); __asm__ __volatile__("@ atomic64_xchg\n" "1: ldrexd %0, %H0, [%3]\n" " strexd %1, %4, %H4, [%3]\n" " teq %1, #0\n" " bne 1b" : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter) : "r" (&ptr->counter), "r" (new) : "cc"); return result; }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon3284.21%480.00%
Chen Gang S615.79%120.00%
Total38100.00%5100.00%

#define atomic64_xchg_relaxed atomic64_xchg_relaxed
static inline long long atomic64_dec_if_positive(atomic64_t *v) { long long result; unsigned long tmp; smp_mb(); prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_dec_if_positive\n" "1: ldrexd %0, %H0, [%3]\n" " subs %Q0, %Q0, #1\n" " sbc %R0, %R0, #0\n" " teq %R0, #0\n" " bmi 2f\n" " strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b\n" "2:" : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter) : "cc"); smp_mb(); return result; }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon3587.50%250.00%
Chen Gang S410.00%125.00%
Victor Kamensky12.50%125.00%
Total40100.00%4100.00%


static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) { long long val; unsigned long tmp; int ret = 1; smp_mb(); prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_add_unless\n" "1: ldrexd %0, %H0, [%4]\n" " teq %0, %5\n" " teqeq %H0, %H5\n" " moveq %1, #0\n" " beq 2f\n" " adds %Q0, %Q0, %Q6\n" " adc %R0, %R0, %R6\n" " strexd %2, %0, %H0, [%4]\n" " teq %2, #0\n" " bne 1b\n" "2:" : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (u), "r" (a) : "cc"); if (ret) smp_mb(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon4987.50%250.00%
Chen Gang S610.71%125.00%
Victor Kamensky11.79%125.00%
Total56100.00%4100.00%

#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_inc(v) atomic64_add(1LL, (v)) #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v)) #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) #define atomic64_dec(v) atomic64_sub(1LL, (v)) #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v)) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) #endif /* !CONFIG_GENERIC_ATOMIC64 */ #endif #endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Will Deacon45342.46%821.62%
Peter Zijlstra28126.34%410.81%
Nicholas Piggin14713.78%25.41%
Russell King666.19%718.92%
Chen Gang S434.03%25.41%
Linus Torvalds (pre-git)302.81%410.81%
KaiGai Kohei100.94%12.70%
Catalin Marinas100.94%12.70%
Alexander Zarochentzev90.84%12.70%
David Howells70.66%12.70%
Matthew Wilcox30.28%12.70%
Victor Kamensky20.19%12.70%
Arun Sharma20.19%25.41%
Mathieu Desnoyers20.19%12.70%
Lennert Buytenhek20.19%12.70%
Total1067100.00%37100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.