cregit-Linux how code gets into the kernel

Release 4.14 arch/tile/lib/atomic_32.c

Directory: arch/tile/lib
/*
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/atomic.h>
#include <arch/chip.h>

/* This page is remapped on startup to be hash-for-home. */

int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;


int *__atomic_hashed_lock(volatile void *v) { /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */ /* * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index. * Using mm works here because atomic_locks is page aligned. */ unsigned long ptr = __insn_mm((unsigned long)v >> 1, (unsigned long)atomic_locks, 2, (ATOMIC_HASH_SHIFT + 2) - 1); return (int *)ptr; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf51100.00%2100.00%
Total51100.00%2100.00%

#ifdef CONFIG_SMP /* Return whether the passed pointer is a valid atomic lock pointer. */
static int is_atomic_lock(int *p) { return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE]; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf27100.00%1100.00%
Total27100.00%1100.00%


void __atomic_fault_unlock(int *irqlock_word) { BUG_ON(!is_atomic_lock(irqlock_word)); BUG_ON(*irqlock_word != 1); *irqlock_word = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf31100.00%1100.00%
Total31100.00%1100.00%

#endif /* CONFIG_SMP */
static inline int *__atomic_setup(volatile void *v) { /* Issue a load to the target to bring it into cache. */ *(volatile int *)v; return __atomic_hashed_lock(v); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf28100.00%1100.00%
Total28100.00%1100.00%


int _atomic_xchg(int *v, int n) { return __atomic32_xchg(v, __atomic_setup(v), n).val; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf2696.30%266.67%
Peter Zijlstra13.70%133.33%
Total27100.00%3100.00%

EXPORT_SYMBOL(_atomic_xchg);
int _atomic_xchg_add(int *v, int i) { return __atomic32_xchg_add(v, __atomic_setup(v), i).val; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf2696.30%266.67%
Peter Zijlstra13.70%133.33%
Total27100.00%3100.00%

EXPORT_SYMBOL(_atomic_xchg_add);
int _atomic_xchg_add_unless(int *v, int a, int u) { /* * Note: argument order is switched here since it is easier * to use the first argument consistently as the "old value" * in the assembly, as is done for _atomic_cmpxchg(). */ return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf3296.97%266.67%
Peter Zijlstra13.03%133.33%
Total33100.00%3100.00%

EXPORT_SYMBOL(_atomic_xchg_add_unless);
int _atomic_cmpxchg(int *v, int o, int n) { return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf3196.88%266.67%
Peter Zijlstra13.12%133.33%
Total32100.00%3100.00%

EXPORT_SYMBOL(_atomic_cmpxchg);
unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask) { return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf3394.29%133.33%
Peter Zijlstra25.71%266.67%
Total35100.00%3100.00%

EXPORT_SYMBOL(_atomic_fetch_or);
unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask) { return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf3394.29%133.33%
Peter Zijlstra25.71%266.67%
Total35100.00%3100.00%

EXPORT_SYMBOL(_atomic_fetch_and);
unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask) { return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf3394.29%133.33%
Peter Zijlstra25.71%266.67%
Total35100.00%3100.00%

EXPORT_SYMBOL(_atomic_fetch_andn);
unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask) { return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf3394.29%133.33%
Peter Zijlstra25.71%266.67%
Total35100.00%3100.00%

EXPORT_SYMBOL(_atomic_fetch_xor);
long long _atomic64_xchg(long long *v, long long n) { return __atomic64_xchg(v, __atomic_setup(v), n); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf2278.57%150.00%
Chen Gang S621.43%150.00%
Total28100.00%2100.00%

EXPORT_SYMBOL(_atomic64_xchg);
long long _atomic64_xchg_add(long long *v, long long i) { return __atomic64_xchg_add(v, __atomic_setup(v), i); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf2278.57%150.00%
Chen Gang S621.43%150.00%
Total28100.00%2100.00%

EXPORT_SYMBOL(_atomic64_xchg_add);
long long _atomic64_xchg_add_unless(long long *v, long long a, long long u) { /* * Note: argument order is switched here since it is easier * to use the first argument consistently as the "old value" * in the assembly, as is done for _atomic_cmpxchg(). */ return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf2777.14%150.00%
Chen Gang S822.86%150.00%
Total35100.00%2100.00%

EXPORT_SYMBOL(_atomic64_xchg_add_unless);
long long _atomic64_cmpxchg(long long *v, long long o, long long n) { return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf2676.47%150.00%
Chen Gang S823.53%150.00%
Total34100.00%2100.00%

EXPORT_SYMBOL(_atomic64_cmpxchg);
long long _atomic64_fetch_and(long long *v, long long n) { return __atomic64_fetch_and(v, __atomic_setup(v), n); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf2692.86%150.00%
Peter Zijlstra27.14%150.00%
Total28100.00%2100.00%

EXPORT_SYMBOL(_atomic64_fetch_and);
long long _atomic64_fetch_or(long long *v, long long n) { return __atomic64_fetch_or(v, __atomic_setup(v), n); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf2692.86%150.00%
Peter Zijlstra27.14%150.00%
Total28100.00%2100.00%

EXPORT_SYMBOL(_atomic64_fetch_or);
long long _atomic64_fetch_xor(long long *v, long long n) { return __atomic64_fetch_xor(v, __atomic_setup(v), n); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf2692.86%150.00%
Peter Zijlstra27.14%150.00%
Total28100.00%2100.00%

EXPORT_SYMBOL(_atomic64_fetch_xor); /* * If any of the atomic or futex routines hit a bad address (not in * the page tables at kernel PL) this routine is called. The futex * routines are never used on kernel space, and the normal atomics and * bitops are never used on user space. So a fault on kernel space * must be fatal, but a fault on userspace is a futex fault and we * need to return -EFAULT. Note that the context this routine is * invoked in is the context of the "_atomic_xxx()" routines called * by the functions in this file. */
struct __get_user __atomic_bad_address(int __user *addr) { if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int)))) panic("Bad address used for kernel atomic op: %p\n", addr); return (struct __get_user) { .err = -EFAULT }; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf49100.00%2100.00%
Total49100.00%2100.00%


void __init __init_atomic_per_cpu(void) { /* Validate power-of-two and "bigger than cpus" assumption */ BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1)); BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); /* * On TILEPro we prefer to use a single hash-for-home * page, since this means atomic operations are less * likely to encounter a TLB fault and thus should * in general perform faster. You may wish to disable * this in situations where few hash-for-home tiles * are configured. */ BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0); /* The locks must all fit on one page. */ BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE); /* * We use the page offset of the atomic value's address as * an index into atomic_locks, excluding the low 3 bits. * That should not produce more indices than ATOMIC_HASH_SIZE. */ BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf6395.45%150.00%
Akinobu Mita34.55%150.00%
Total66100.00%2100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf78193.20%654.55%
Chen Gang S283.34%19.09%
Peter Zijlstra252.98%218.18%
Akinobu Mita30.36%19.09%
Arun Sharma10.12%19.09%
Total838100.00%11100.00%
Directory: arch/tile/lib
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.