cregit-Linux how code gets into the kernel

Release 4.7 arch/sh/include/asm/mutex-llsc.h

/*
 * arch/sh/include/asm/mutex-llsc.h
 *
 * SH-4A optimized mutex locking primitives
 *
 * Please look into asm-generic/mutex-xchg.h for a formal definition.
 */
#ifndef __ASM_SH_MUTEX_LLSC_H

#define __ASM_SH_MUTEX_LLSC_H

/*
 * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
 * with a bastardized atomic decrement (it is not a reliable atomic decrement
 * but it satisfies the defined semantics for our purpose, while being
 * smaller and faster than a real atomic decrement or atomic swap.
 * The idea is to attempt  decrementing the lock value only once. If once
 * decremented it isn't zero, or if its store-back fails due to a dispute
 * on the exclusive store, we simply bail out immediately through the slow
 * path where the lock will be reattempted until it succeeds.
 */

static inline void __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { int __done, __res; __asm__ __volatile__ ( "movli.l @%2, %0 \n" "add #-1, %0 \n" "movco.l %0, @%2 \n" "movt %1 \n" : "=&z" (__res), "=&r" (__done) : "r" (&(count)->counter) : "t"); if (unlikely(!__done || __res != 0)) fail_fn(count); }

Contributors

PersonTokensPropCommitsCommitProp
michael trimarchimichael trimarchi4086.96%133.33%
takashi yoshiitakashi yoshii510.87%133.33%
paul mundtpaul mundt12.17%133.33%
Total46100.00%3100.00%


static inline int __mutex_fastpath_lock_retval(atomic_t *count) { int __done, __res; __asm__ __volatile__ ( "movli.l @%2, %0 \n" "add #-1, %0 \n" "movco.l %0, @%2 \n" "movt %1 \n" : "=&z" (__res), "=&r" (__done) : "r" (&(count)->counter) : "t"); if (unlikely(!__done || __res != 0)) __res = -1; return __res; }

Contributors

PersonTokensPropCommitsCommitProp
michael trimarchimichael trimarchi3179.49%125.00%
takashi yoshiitakashi yoshii512.82%125.00%
maarten lankhorstmaarten lankhorst25.13%125.00%
paul mundtpaul mundt12.56%125.00%
Total39100.00%4100.00%


static inline void __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { int __done, __res; __asm__ __volatile__ ( "movli.l @%2, %0 \n\t" "add #1, %0 \n\t" "movco.l %0, @%2 \n\t" "movt %1 \n\t" : "=&z" (__res), "=&r" (__done) : "r" (&(count)->counter) : "t"); if (unlikely(!__done || __res <= 0)) fail_fn(count); }

Contributors

PersonTokensPropCommitsCommitProp
michael trimarchimichael trimarchi3984.78%125.00%
takashi yoshiitakashi yoshii510.87%125.00%
paul mundtpaul mundt24.35%250.00%
Total46100.00%4100.00%

/* * If the unlock was done on a contended lock, or if the unlock simply fails * then the mutex remains locked. */ #define __mutex_slowpath_needs_to_unlock() 1 /* * For __mutex_fastpath_trylock we do an atomic decrement and check the * result and put it in the __res variable. */
static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { int __res, __orig; __asm__ __volatile__ ( "1: movli.l @%2, %0 \n\t" "dt %0 \n\t" "movco.l %0,@%2 \n\t" "bf 1b \n\t" "cmp/eq #0,%0 \n\t" "bt 2f \n\t" "mov #0, %1 \n\t" "bf 3f \n\t" "2: mov #1, %1 \n\t" "3: " : "=&z" (__orig), "=&r" (__res) : "r" (&count->counter) : "t"); return __res; }

Contributors

PersonTokensPropCommitsCommitProp
michael trimarchimichael trimarchi32100.00%1100.00%
Total32100.00%1100.00%

#endif /* __ASM_SH_MUTEX_LLSC_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
michael trimarchimichael trimarchi16088.40%120.00%
takashi yoshiitakashi yoshii158.29%120.00%
paul mundtpaul mundt42.21%240.00%
maarten lankhorstmaarten lankhorst21.10%120.00%
Total181100.00%5100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}