cregit-Linux how code gets into the kernel

Release 4.11 arch/arm/include/asm/spinlock.h

#ifndef __ASM_SPINLOCK_H

#define __ASM_SPINLOCK_H

#if __LINUX_ARM_ARCH__ < 6
#error SMP not supported on pre-ARMv6 CPUs
#endif

#include <linux/prefetch.h>
#include <asm/barrier.h>
#include <asm/processor.h>

/*
 * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
 * extensions, so when running on UP, we have to patch these instructions away.
 */
#ifdef CONFIG_THUMB2_KERNEL
/*
 * For Thumb-2, special care is needed to ensure that the conditional WFE
 * instruction really does assemble to exactly 4 bytes (as required by
 * the SMP_ON_UP fixup code).   By itself "wfene" might cause the
 * assembler to insert a extra (16-bit) IT instruction, depending on the
 * presence or absence of neighbouring conditional instructions.
 *
 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
 * the assembler won't change IT instructions which are explicitly present
 * in the input.
 */

#define WFE(cond)	__ALT_SMP_ASM(          \
        "it " cond "\n\t"                       \
        "wfe" cond ".n",                        \
                                                \
        "nop.w"                                 \
)
#else

#define WFE(cond)	__ALT_SMP_ASM("wfe" cond, "nop")
#endif


#define SEV		__ALT_SMP_ASM(WASM(sev), WASM(nop))


static inline void dsb_sev(void) { dsb(ishst); __asm__(SEV); }

Contributors

PersonTokensPropCommitsCommitProp
Rabin Vincent960.00%150.00%
Will Deacon640.00%150.00%
Total15100.00%2100.00%

/* * ARMv6 ticket-based spin-locking. * * A memory barrier is required after we get a lock, and before we * release it, because V6 CPUs are assumed to have weakly ordered * memory. */
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { u16 owner = READ_ONCE(lock->tickets.owner); for (;;) { arch_spinlock_t tmp = READ_ONCE(*lock); if (tmp.tickets.owner == tmp.tickets.next || tmp.tickets.owner != owner) break; wfe(); } smp_acquire__after_ctrl_dep(); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra6494.12%125.00%
Russell King22.94%125.00%
Thomas Gleixner11.47%125.00%
Ingo Molnar11.47%125.00%
Total68100.00%4100.00%

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; u32 newval; arch_spinlock_t lockval; prefetchw(&lock->slock); __asm__ __volatile__( "1: ldrex %0, [%3]\n" " add %1, %0, %4\n" " strex %2, %1, [%3]\n" " teq %2, #0\n" " bne 1b" : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) : "cc"); while (lockval.tickets.next != lockval.tickets.owner) { wfe(); lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); } smp_mb(); }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon4971.01%233.33%
Russell King1826.09%233.33%
Thomas Gleixner22.90%233.33%
Total69100.00%6100.00%


static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long contended, res; u32 slock; prefetchw(&lock->slock); do { __asm__ __volatile__( " ldrex %0, [%3]\n" " mov %2, #0\n" " subs %1, %0, %0, ror #16\n" " addeq %0, %0, %4\n" " strexeq %2, %0, [%3]" : "=&r" (slock), "=&r" (contended), "=&r" (res) : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) : "cc"); } while (res); if (!contended) { smp_mb(); return 1; } else { return 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Russell King3153.45%225.00%
Will Deacon2543.10%450.00%
Thomas Gleixner23.45%225.00%
Total58100.00%8100.00%


static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); lock->tickets.owner++; dsb_sev(); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King1250.00%233.33%
Will Deacon729.17%116.67%
Rabin Vincent312.50%116.67%
Thomas Gleixner28.33%233.33%
Total24100.00%6100.00%


static inline int arch_spin_value_unlocked(arch_spinlock_t lock) { return lock.tickets.owner == lock.tickets.next; }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon23100.00%2100.00%
Total23100.00%2100.00%


static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return !arch_spin_value_unlocked(READ_ONCE(*lock)); }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon2195.45%150.00%
Christian Bornträger14.55%150.00%
Total22100.00%2100.00%


static inline int arch_spin_is_contended(arch_spinlock_t *lock) { struct __raw_tickets tickets = READ_ONCE(lock->tickets); return (tickets.next - tickets.owner) > 1; }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon3497.14%150.00%
Christian Bornträger12.86%150.00%
Total35100.00%2100.00%

#define arch_spin_is_contended arch_spin_is_contended /* * RWLOCKS * * * Write locks are easy - we just set bit 31. When unlocking, we can * just write zero since the lock is exclusively held. */
static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; prefetchw(&rw->lock); __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" WFE("ne") " strexeq %0, %2, [%1]\n" " teq %0, #0\n" " bne 1b" : "=&r" (tmp) : "r" (&rw->lock), "r" (0x80000000) : "cc"); smp_mb(); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King1965.52%350.00%
Will Deacon827.59%116.67%
Thomas Gleixner26.90%233.33%
Total29100.00%6100.00%


static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long contended, res; prefetchw(&rw->lock); do { __asm__ __volatile__( " ldrex %0, [%2]\n" " mov %1, #0\n" " teq %0, #0\n" " strexeq %1, %3, [%2]" : "=&r" (contended), "=&r" (res) : "r" (&rw->lock), "r" (0x80000000) : "cc"); } while (res); if (!contended) { smp_mb(); return 1; } else { return 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Russell King3156.36%233.33%
Will Deacon2240.00%233.33%
Thomas Gleixner23.64%233.33%
Total55100.00%6100.00%


static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); __asm__ __volatile__( "str %1, [%0]\n" : : "r" (&rw->lock), "r" (0) : "cc"); dsb_sev(); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King1470.00%240.00%
Rabin Vincent420.00%120.00%
Thomas Gleixner210.00%240.00%
Total20100.00%5100.00%

/* write_can_lock - would write_trylock() succeed? */ #define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0) /* * Read locks are a bit more hairy: * - Exclusively load the lock value. * - Increment it. * - Store new lock value if positive, and we still own this location. * If the value is negative, we've already failed. * - If we failed to store the value, we want a negative result. * - If we failed, try again. * Unlocking is similarly hairy. We may have multiple read locks * currently active. However, we know we won't have any write * locks. */
static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; prefetchw(&rw->lock); __asm__ __volatile__( "1: ldrex %0, [%2]\n" " adds %0, %0, #1\n" " strexpl %1, %0, [%2]\n" WFE("mi") " rsbpls %0, %1, #0\n" " bmi 1b" : "=&r" (tmp), "=&r" (tmp2) : "r" (&rw->lock) : "cc"); smp_mb(); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King2167.74%350.00%
Will Deacon825.81%116.67%
Thomas Gleixner26.45%233.33%
Total31100.00%6100.00%


static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; smp_mb(); prefetchw(&rw->lock); __asm__ __volatile__( "1: ldrex %0, [%2]\n" " sub %0, %0, #1\n" " strex %1, %0, [%2]\n" " teq %1, #0\n" " bne 1b" : "=&r" (tmp), "=&r" (tmp2) : "r" (&rw->lock) : "cc"); if (tmp == 0) dsb_sev(); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King2050.00%342.86%
Rabin Vincent1025.00%114.29%
Will Deacon820.00%114.29%
Thomas Gleixner25.00%228.57%
Total40100.00%7100.00%


static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long contended, res; prefetchw(&rw->lock); do { __asm__ __volatile__( " ldrex %0, [%2]\n" " mov %1, #0\n" " adds %0, %0, #1\n" " strexpl %1, %0, [%2]" : "=&r" (contended), "=&r" (res) : "r" (&rw->lock) : "cc"); } while (res); /* If the lock is negative, then it is already held for write. */ if (contended < 0x80000000) { smp_mb(); return 1; } else { return 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon3459.65%228.57%
Russell King1831.58%114.29%
Thomas Gleixner23.51%228.57%
Ingo Molnar23.51%114.29%
Catalin Marinas11.75%114.29%
Total57100.00%7100.00%

/* read_can_lock - would read_trylock() succeed? */ #define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000) #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Will Deacon25938.54%929.03%
Russell King21531.99%516.13%
Peter Zijlstra7010.42%13.23%
Thomas Gleixner304.46%412.90%
Rabin Vincent263.87%13.23%
Martin Schwidefsky182.68%13.23%
Robin Holt142.08%13.23%
Catalin Marinas131.93%26.45%
Linus Torvalds (pre-git)111.64%13.23%
Andrew Morton71.04%13.23%
Ingo Molnar40.60%26.45%
Marc Zyngier20.30%13.23%
Christian Bornträger20.30%13.23%
Dave P Martin10.15%13.23%
Total672100.00%31100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.