cregit-Linux how code gets into the kernel

Release 4.14 arch/ia64/include/asm/spinlock.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_SPINLOCK_H

#define _ASM_IA64_SPINLOCK_H

/*
 * Copyright (C) 1998-2003 Hewlett-Packard Co
 *      David Mosberger-Tang <davidm@hpl.hp.com>
 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
 *
 * This file is used for SMP configurations only.
 */

#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/bitops.h>

#include <linux/atomic.h>
#include <asm/intrinsics.h>
#include <asm/barrier.h>
#include <asm/processor.h>


#define arch_spin_lock_init(x)			((x)->lock = 0)

/*
 * Ticket locks are conceptually two parts, one indicating the current head of
 * the queue, and the other indicating the current tail. The lock is acquired
 * by atomically noting the tail and incrementing it by one (thus adding
 * ourself to the queue and noting our position), then waiting until the head
 * becomes equal to the the initial value of the tail.
 * The pad bits in the middle are used to prevent the next_ticket number
 * overflowing into the now_serving number.
 *
 *   31             17  16    15  14                    0
 *  +----------------------------------------------------+
 *  |  now_serving     | padding |   next_ticket         |
 *  +----------------------------------------------------+
 */


#define TICKET_SHIFT	17

#define TICKET_BITS	15

#define	TICKET_MASK	((1 << TICKET_BITS) - 1)


static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { int *p = (int *)&lock->lock, ticket, serve; ticket = ia64_fetchadd(1, p, acq); if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) return; ia64_invala(); for (;;) { asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory"); if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) return; cpu_relax(); } }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck8189.01%240.00%
David Mosberger-Tang99.89%240.00%
Thomas Gleixner11.10%120.00%
Total91100.00%5100.00%


static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->lock); if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck5686.15%250.00%
David Mosberger-Tang812.31%125.00%
Thomas Gleixner11.54%125.00%
Total65100.00%4100.00%


static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); ACCESS_ONCE(*p) = (tmp + 2) & ~1; }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck4491.67%250.00%
Andrew Morton36.25%125.00%
Thomas Gleixner12.08%125.00%
Total48100.00%4100.00%


static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck3078.95%250.00%
Christoph Lameter718.42%125.00%
Thomas Gleixner12.63%125.00%
Total38100.00%4100.00%


static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck3797.37%266.67%
Thomas Gleixner12.63%133.33%
Total38100.00%3100.00%


static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK); }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck30100.00%1100.00%
Total30100.00%1100.00%


static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck1588.24%133.33%
Thomas Gleixner211.76%266.67%
Total17100.00%3100.00%


static inline int arch_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck1270.59%125.00%
Andrew Morton317.65%125.00%
Thomas Gleixner211.76%250.00%
Total17100.00%4100.00%

#define arch_spin_is_contended arch_spin_is_contended
static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck1275.00%125.00%
Thomas Gleixner212.50%250.00%
David Mosberger-Tang212.50%125.00%
Total16100.00%4100.00%


static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck1376.47%125.00%
Christoph Lameter211.76%125.00%
Thomas Gleixner211.76%250.00%
Total17100.00%4100.00%


static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck1275.00%125.00%
Linus Torvalds (pre-git)212.50%125.00%
Thomas Gleixner212.50%250.00%
Total16100.00%4100.00%


static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { arch_spin_lock(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck1575.00%125.00%
Thomas Gleixner315.00%250.00%
Linus Torvalds (pre-git)210.00%125.00%
Total20100.00%4100.00%

#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) #define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) #ifdef ASM_SUPPORTED
static __always_inline void arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1,%2\n" "br.few 3f\n" "1:\n" "fetchadd4.rel r2 = [%0], -1;;\n" "(p6) ssm psr.i\n" "2:\n" "hint @pause\n" "ld4 r2 = [%0];;\n" "cmp4.lt p7,p0 = r2, r0\n" "(p7) br.cond.spnt.few 2b\n" "(p6) rsm psr.i\n" ";;\n" "3:\n" "fetchadd4.acq r2 = [%0], 1;;\n" "cmp4.lt p7,p0 = r2, r0\n" "(p7) br.cond.spnt.few 1b\n" : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT) : "p6", "p7", "r2", "memory"); }

Contributors

PersonTokensPropCommitsCommitProp
Robin Holt1688.89%133.33%
Thomas Gleixner211.11%266.67%
Total18100.00%3100.00%

#define arch_read_lock(lock) arch_read_lock_flags(lock, 0) #else /* !ASM_SUPPORTED */ #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) #define arch_read_lock(rw) \ do { \ arch_rwlock_t *__read_lock_ptr = (rw); \ \ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ while (*(volatile int *)__read_lock_ptr < 0) \ cpu_relax(); \ } \ } while (0) #endif /* !ASM_SUPPORTED */ #define arch_read_unlock(rw) \ do { \ arch_rwlock_t *__read_lock_ptr = (rw); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ } while (0) #ifdef ASM_SUPPORTED
static __always_inline void arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1, %2\n" "mov ar.ccv = r0\n" "dep r29 = -1, r0, 31, 1\n" "br.few 3f;;\n" "1:\n" "(p6) ssm psr.i\n" "2:\n" "hint @pause\n" "ld4 r2 = [%0];;\n" "cmp4.eq p0,p7 = r0, r2\n" "(p7) br.cond.spnt.few 2b\n" "(p6) rsm psr.i\n" ";;\n" "3:\n" "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" "cmp4.eq p0,p7 = r0, r2\n" "(p7) br.cond.spnt.few 1b;;\n" : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT) : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); }

Contributors

PersonTokensPropCommitsCommitProp
Robin Holt1688.89%133.33%
Thomas Gleixner211.11%266.67%
Total18100.00%3100.00%

#define arch_write_lock(rw) arch_write_lock_flags(rw, 0) #define arch_write_trylock(rw) \ ({ \ register long result; \ \ __asm__ __volatile__ ( \ "mov ar.ccv = r0\n" \ "dep r29 = -1, r0, 31, 1;;\n" \ "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \ : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \ (result == 0); \ })
static inline void arch_write_unlock(arch_rwlock_t *x) { u8 *y = (u8 *)x; barrier(); asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" ); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter2592.59%133.33%
Thomas Gleixner27.41%266.67%
Total27100.00%3100.00%

#else /* !ASM_SUPPORTED */ #define arch_write_lock_flags(l, flags) arch_write_lock(l) #define arch_write_lock(l) \ ({ \ __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ do { \ while (*ia64_write_lock_ptr) \ ia64_barrier(); \ ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \ } while (ia64_val); \ }) #define arch_write_trylock(rw) \ ({ \ __u64 ia64_val; \ __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \ (ia64_val == 0); \ })
static inline void arch_write_unlock(arch_rwlock_t *x) { barrier(); x->write_lock = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter1890.00%133.33%
Thomas Gleixner210.00%266.67%
Total20100.00%3100.00%

#endif /* !ASM_SUPPORTED */
static inline int arch_read_trylock(arch_rwlock_t *x) { union { arch_rwlock_t lock; __u32 word; } old, new; old.lock = new.lock = *x; old.lock.write_lock = new.lock.write_lock = 0; ++new.lock.read_counter; return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; }

Contributors

PersonTokensPropCommitsCommitProp
Keith Owens7893.98%125.00%
Thomas Gleixner33.61%250.00%
Ingo Molnar22.41%125.00%
Total83100.00%4100.00%

#define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() #endif /* _ASM_IA64_SPINLOCK_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Tony Luck36848.42%310.00%
Keith Owens7810.26%13.33%
Robin Holt607.89%13.33%
David Mosberger-Tang567.37%620.00%
Christoph Lameter526.84%13.33%
Thomas Gleixner526.84%413.33%
Linus Torvalds (pre-git)445.79%413.33%
Martin Schwidefsky182.37%13.33%
Linus Torvalds121.58%13.33%
Peter Zijlstra60.79%13.33%
Andrew Morton60.79%13.33%
Suresh B. Siddha30.39%26.67%
Ingo Molnar20.26%13.33%
Greg Kroah-Hartman10.13%13.33%
Arun Sharma10.13%13.33%
Jiri Slaby10.13%13.33%
Total760100.00%30100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.