cregit-Linux how code gets into the kernel

Release 4.14 arch/parisc/include/asm/atomic.h

/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
 */

#ifndef _ASM_PARISC_ATOMIC_H_

#define _ASM_PARISC_ATOMIC_H_

#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>

/*
 * Atomic operations that C can't guarantee us.  Useful for
 * resource counting etc..
 *
 * And probably incredibly slow on parisc.  OTOH, we don't
 * have to write any serious assembly.   prumpf
 */

#ifdef CONFIG_SMP
#include <asm/spinlock.h>
#include <asm/cache.h>		/* we use L1_CACHE_BYTES */

/* Use an array of spinlocks for our atomic_ts.
 * Hash function to index into a different SPINLOCK.
 * Since "a" is usually an address, use one spinlock per cacheline.
 */

#  define ATOMIC_HASH_SIZE 4

#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))

extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;

/* Can't use raw_spin_lock_irq because of #include problems, so
 * this is the substitute */

#define _atomic_spin_lock_irqsave(l,f) do {    \
        arch_spinlock_t *s = ATOMIC_HASH(l);            \
        local_irq_save(f);                      \
        arch_spin_lock(s);                      \
} while(0)


#define _atomic_spin_unlock_irqrestore(l,f) do {    \
        arch_spinlock_t *s = ATOMIC_HASH(l);                    \
        arch_spin_unlock(s);                            \
        local_irq_restore(f);                           \
} while(0)


#else

#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)

#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
#endif

/*
 * Note that we need not lock read accesses - aligned word writes/reads
 * are atomic, so a reader never sees inconsistent values.
 */


static __inline__ void atomic_set(atomic_t *v, int i) { unsigned long flags; _atomic_spin_lock_irqsave(v, flags); v->counter = i; _atomic_spin_unlock_irqrestore(v, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3592.11%133.33%
Matthew Wilcox37.89%266.67%
Total38100.00%3100.00%

#define atomic_set_release(v, i) atomic_set((v), (i))
static __inline__ int atomic_read(const atomic_t *v) { return READ_ONCE((v)->counter); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1568.18%120.00%
Matthew Wilcox29.09%120.00%
Pranith Kumar29.09%120.00%
Anton Blanchard29.09%120.00%
Peter Zijlstra14.55%120.00%
Total22100.00%5100.00%

/* exported interface */ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); for (;;) { if (unlikely(c == (u))) break; old = atomic_cmpxchg((v), c, c + (a)); if (likely(old == c)) break; c = old; } return c; }

Contributors

PersonTokensPropCommitsCommitProp
Mathieu Desnoyers7490.24%133.33%
Nicholas Piggin78.54%133.33%
Arun Sharma11.22%133.33%
Total82100.00%3100.00%

#define ATOMIC_OP(op, c_op) \ static __inline__ void atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ _atomic_spin_lock_irqsave(v, flags); \ v->counter c_op i; \ _atomic_spin_unlock_irqrestore(v, flags); \ } \ #define ATOMIC_OP_RETURN(op, c_op) \ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ \ _atomic_spin_lock_irqsave(v, flags); \ ret = (v->counter c_op i); \ _atomic_spin_unlock_irqrestore(v, flags); \ \ return ret; \ } #define ATOMIC_FETCH_OP(op, c_op) \ static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ int ret; \ \ _atomic_spin_lock_irqsave(v, flags); \ ret = v->counter; \ v->counter c_op i; \ _atomic_spin_unlock_irqrestore(v, flags); \ \ return ret; \ } #define ATOMIC_OPS(op, c_op) \ ATOMIC_OP(op, c_op) \ ATOMIC_OP_RETURN(op, c_op) \ ATOMIC_FETCH_OP(op, c_op) ATOMIC_OPS(add, +=) ATOMIC_OPS(sub, -=) #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op) \ ATOMIC_OP(op, c_op) \ ATOMIC_FETCH_OP(op, c_op) ATOMIC_OPS(and, &=) ATOMIC_OPS(or, |=) ATOMIC_OPS(xor, ^=) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP #define atomic_inc(v) (atomic_add( 1,(v))) #define atomic_dec(v) (atomic_add( -1,(v))) #define atomic_inc_return(v) (atomic_add_return( 1,(v))) #define atomic_dec_return(v) (atomic_add_return( -1,(v))) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) /* * atomic_inc_and_test - increment and test * @v: pointer of type atomic_t * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. */ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) #define ATOMIC_INIT(i) { (i) } #ifdef CONFIG_64BIT #define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_OP(op, c_op) \ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \ { \ unsigned long flags; \ \ _atomic_spin_lock_irqsave(v, flags); \ v->counter c_op i; \ _atomic_spin_unlock_irqrestore(v, flags); \ } \ #define ATOMIC64_OP_RETURN(op, c_op) \ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ { \ unsigned long flags; \ s64 ret; \ \ _atomic_spin_lock_irqsave(v, flags); \ ret = (v->counter c_op i); \ _atomic_spin_unlock_irqrestore(v, flags); \ \ return ret; \ } #define ATOMIC64_FETCH_OP(op, c_op) \ static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \ { \ unsigned long flags; \ s64 ret; \ \ _atomic_spin_lock_irqsave(v, flags); \ ret = v->counter; \ v->counter c_op i; \ _atomic_spin_unlock_irqrestore(v, flags); \ \ return ret; \ } #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ ATOMIC64_OP_RETURN(op, c_op) \ ATOMIC64_FETCH_OP(op, c_op) ATOMIC64_OPS(add, +=) ATOMIC64_OPS(sub, -=) #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ ATOMIC64_FETCH_OP(op, c_op) ATOMIC64_OPS(and, &=) ATOMIC64_OPS(or, |=) ATOMIC64_OPS(xor, ^=) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP
static __inline__ void atomic64_set(atomic64_t *v, s64 i) { unsigned long flags; _atomic_spin_lock_irqsave(v, flags); v->counter = i; _atomic_spin_unlock_irqrestore(v, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Kyle McMartin38100.00%1100.00%
Total38100.00%1100.00%


static __inline__ s64 atomic64_read(const atomic64_t *v) { return ACCESS_ONCE((v)->counter); }

Contributors

PersonTokensPropCommitsCommitProp
Kyle McMartin1777.27%133.33%
Pranith Kumar313.64%133.33%
Anton Blanchard29.09%133.33%
Total22100.00%3100.00%

#define atomic64_inc(v) (atomic64_add( 1,(v))) #define atomic64_dec(v) (atomic64_add( -1,(v))) #define atomic64_inc_return(v) (atomic64_add_return( 1,(v))) #define atomic64_dec_return(v) (atomic64_add_return( -1,(v))) #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) /* exported interface */ #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) /** * atomic64_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) { long c, old; c = atomic64_read(v); for (;;) { if (unlikely(c == (u))) break; old = atomic64_cmpxchg((v), c, c + (a)); if (likely(old == c)) break; c = old; } return c != (u); }

Contributors

PersonTokensPropCommitsCommitProp
Mathieu Desnoyers86100.00%2100.00%
Total86100.00%2100.00%

#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) /* * atomic64_dec_if_positive - decrement by 1 if old value positive * @v: pointer of type atomic_t * * The function returns the old value of *v minus 1, even if * the atomic variable, v, was not decremented. */
static inline long atomic64_dec_if_positive(atomic64_t *v) { long c, old, dec; c = atomic64_read(v); for (;;) { dec = c - 1; if (unlikely(dec < 0)) break; old = atomic64_cmpxchg((v), c, dec); if (likely(old == c)) break; c = old; } return dec; }

Contributors

PersonTokensPropCommitsCommitProp
Helge Deller78100.00%1100.00%
Total78100.00%1100.00%

#endif /* !CONFIG_64BIT */ #endif /* _ASM_PARISC_ATOMIC_H_ */

Overall Contributors

PersonTokensPropCommitsCommitProp
Mathieu Desnoyers19022.84%25.88%
Peter Zijlstra17220.67%617.65%
Linus Torvalds (pre-git)14517.43%12.94%
Kyle McMartin14317.19%38.82%
Helge Deller809.62%25.88%
Matthew Wilcox364.33%514.71%
Nicholas Piggin172.04%25.88%
Andrew Morton172.04%25.88%
Ingo Molnar101.20%25.88%
Pranith Kumar50.60%12.94%
Anton Blanchard40.48%12.94%
Thomas Gleixner30.36%25.88%
Arun Sharma30.36%12.94%
Paul Gortmaker30.36%12.94%
Mel Gorman20.24%12.94%
Greg Kroah-Hartman10.12%12.94%
James Bottomley10.12%12.94%
Total832100.00%34100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.