Contributors: 33
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Jeremy Fitzhardinge |
254 |
26.10% |
1 |
1.92% |
H. Peter Anvin |
77 |
7.91% |
5 |
9.62% |
Andi Kleen |
73 |
7.50% |
2 |
3.85% |
Nick Desaulniers |
58 |
5.96% |
2 |
3.85% |
Linus Torvalds |
49 |
5.04% |
3 |
5.77% |
Nicholas Piggin |
46 |
4.73% |
1 |
1.92% |
Alexander van Heukelum |
45 |
4.62% |
3 |
5.77% |
David Howells |
44 |
4.52% |
1 |
1.92% |
Andrew Morton |
42 |
4.32% |
1 |
1.92% |
Peter Zijlstra |
34 |
3.49% |
4 |
7.69% |
Vincent Mailhol |
28 |
2.88% |
2 |
3.85% |
Marco Elver |
22 |
2.26% |
1 |
1.92% |
Alexander Lobakin |
21 |
2.16% |
1 |
1.92% |
Alexander Potapenko |
21 |
2.16% |
1 |
1.92% |
Denys Vlasenko |
19 |
1.95% |
1 |
1.92% |
Uros Bizjak |
19 |
1.95% |
1 |
1.92% |
Ingo Molnar |
17 |
1.75% |
1 |
1.92% |
Mikulas Patocka |
16 |
1.64% |
1 |
1.92% |
Borislav Petkov |
11 |
1.13% |
2 |
3.85% |
Jan Beulich |
11 |
1.13% |
3 |
5.77% |
Stephen Hemminger |
10 |
1.03% |
1 |
1.92% |
Jiri Slaby |
8 |
0.82% |
1 |
1.92% |
Daniel Axtens |
7 |
0.72% |
1 |
1.92% |
Matthew Wilcox |
7 |
0.72% |
2 |
3.85% |
Akinobu Mita |
6 |
0.62% |
2 |
3.85% |
Randy Dunlap |
6 |
0.62% |
1 |
1.92% |
Thomas Gleixner |
6 |
0.62% |
1 |
1.92% |
Masahiro Yamada |
5 |
0.51% |
1 |
1.92% |
Joe Perches |
3 |
0.31% |
1 |
1.92% |
Jeff Garzik |
3 |
0.31% |
1 |
1.92% |
Gerd Hoffmann |
3 |
0.31% |
1 |
1.92% |
Greg Kroah-Hartman |
1 |
0.10% |
1 |
1.92% |
Linus Torvalds (pre-git) |
1 |
0.10% |
1 |
1.92% |
Total |
973 |
|
52 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_BITOPS_H
#define _ASM_X86_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*
* Note: inlines with more than a single statement should be marked
* __always_inline to avoid problems with older gcc's inlining heuristics.
*/
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/alternative.h>
#include <asm/rmwcc.h>
#include <asm/barrier.h>
#if BITS_PER_LONG == 32
# define _BITOPS_LONG_SHIFT 5
#elif BITS_PER_LONG == 64
# define _BITOPS_LONG_SHIFT 6
#else
# error "Unexpected BITS_PER_LONG"
#endif
#define BIT_64(n) (U64_C(1) << (n))
/*
* These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit
* was cleared before the operation and != 0 if it was not.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
#define RLONG_ADDR(x) "m" (*(volatile long *) (x))
#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
#define ADDR RLONG_ADDR(addr)
/*
* We do the locked ops that don't return the old value as
* a mask operation on a byte.
*/
#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK(nr) (1 << ((nr) & 7))
static __always_inline void
arch_set_bit(long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
asm volatile(LOCK_PREFIX "orb %b1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" (CONST_MASK(nr))
: "memory");
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
static __always_inline void
arch_clear_bit(long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
asm volatile(LOCK_PREFIX "andb %b1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" (~CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
static __always_inline void
arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
arch_clear_bit(nr, addr);
}
static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *addr)
{
bool negative;
asm volatile(LOCK_PREFIX "xorb %2,%1"
CC_SET(s)
: CC_OUT(s) (negative), WBYTE_ADDR(addr)
: "iq" ((char)mask) : "memory");
return negative;
}
#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
static __always_inline void
arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
{
arch___clear_bit(nr, addr);
}
static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
static __always_inline void
arch_change_bit(long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
asm volatile(LOCK_PREFIX "xorb %b1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" (CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
static __always_inline bool
arch_test_and_set_bit(long nr, volatile unsigned long *addr)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
}
static __always_inline bool
arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
return arch_test_and_set_bit(nr, addr);
}
static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
bool oldbit;
asm(__ASM_SIZE(bts) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
static __always_inline bool
arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
}
/*
* Note: the operation is performed atomically with respect to
* the local CPU, but not other CPUs. Portable code should not
* rely on this behaviour.
* KVM relies on this behaviour on x86 for modifying memory that is also
* accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c
*/
static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
bool oldbit;
asm volatile(__ASM_SIZE(btr) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
bool oldbit;
asm volatile(__ASM_SIZE(btc) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
static __always_inline bool
arch_test_and_change_bit(long nr, volatile unsigned long *addr)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
{
bool oldbit;
asm volatile("testb %2,%1"
CC_SET(nz)
: CC_OUT(nz) (oldbit)
: "m" (((unsigned char *)addr)[nr >> 3]),
"i" (1 << (nr & 7))
:"memory");
return oldbit;
}
static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
{
bool oldbit;
asm volatile(__ASM_SIZE(bt) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
return oldbit;
}
static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) :
variable_test_bit(nr, addr);
}
static __always_inline bool
arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
{
return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
variable_test_bit(nr, addr);
}
static __always_inline unsigned long variable__ffs(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
: ASM_INPUT_RM (word));
return word;
}
/**
* __ffs - find first set bit in word
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
#define __ffs(word) \
(__builtin_constant_p(word) ? \
(unsigned long)__builtin_ctzl(word) : \
variable__ffs(word))
static __always_inline unsigned long variable_ffz(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
: "r" (~word));
return word;
}
/**
* ffz - find first zero bit in word
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
#define ffz(word) \
(__builtin_constant_p(word) ? \
(unsigned long)__builtin_ctzl(~word) : \
variable_ffz(word))
/*
* __fls: find last set bit in word
* @word: The word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static __always_inline unsigned long __fls(unsigned long word)
{
if (__builtin_constant_p(word))
return BITS_PER_LONG - 1 - __builtin_clzl(word);
asm("bsr %1,%0"
: "=r" (word)
: ASM_INPUT_RM (word));
return word;
}
#undef ADDR
#ifdef __KERNEL__
static __always_inline int variable_ffs(int x)
{
int r;
#ifdef CONFIG_X86_64
/*
* AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
* dest reg is undefined if x==0, but their CPU architect says its
* value is written to set it to the same as before, except that the
* top 32 bits will be cleared.
*
* We cannot do this on 32 bits because at the very least some
* 486 CPUs did not behave this way.
*/
asm("bsfl %1,%0"
: "=r" (r)
: ASM_INPUT_RM (x), "0" (-1));
#elif defined(CONFIG_X86_CMOV)
asm("bsfl %1,%0\n\t"
"cmovzl %2,%0"
: "=&r" (r) : "rm" (x), "r" (-1));
#else
asm("bsfl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n"
"1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
/**
* ffs - find first set bit in word
* @x: the word to search
*
* This is defined the same way as the libc and compiler builtin ffs
* routines, therefore differs in spirit from the other bitops.
*
* ffs(value) returns 0 if value is 0 or the position of the first
* set bit if value is nonzero. The first (least significant) bit
* is at position 1.
*/
#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x))
/**
* fls - find last set bit in word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffs, but returns the position of the most significant set bit.
*
* fls(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 32.
*/
static __always_inline int fls(unsigned int x)
{
int r;
if (__builtin_constant_p(x))
return x ? 32 - __builtin_clz(x) : 0;
#ifdef CONFIG_X86_64
/*
* AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
* dest reg is undefined if x==0, but their CPU architect says its
* value is written to set it to the same as before, except that the
* top 32 bits will be cleared.
*
* We cannot do this on 32 bits because at the very least some
* 486 CPUs did not behave this way.
*/
asm("bsrl %1,%0"
: "=r" (r)
: ASM_INPUT_RM (x), "0" (-1));
#elif defined(CONFIG_X86_CMOV)
asm("bsrl %1,%0\n\t"
"cmovzl %2,%0"
: "=&r" (r) : "rm" (x), "rm" (-1));
#else
asm("bsrl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n"
"1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
/**
* fls64 - find last set bit in a 64-bit word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffsll, but returns the position of the most significant set bit.
*
* fls64(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 64.
*/
#ifdef CONFIG_X86_64
static __always_inline int fls64(__u64 x)
{
int bitpos = -1;
if (__builtin_constant_p(x))
return x ? 64 - __builtin_clzll(x) : 0;
/*
* AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
* dest reg is undefined if x==0, but their CPU architect says its
* value is written to set it to the same as before.
*/
asm("bsrq %1,%q0"
: "+r" (bitpos)
: ASM_INPUT_RM (x));
return bitpos + 1;
}
#else
#include <asm-generic/bitops/fls64.h>
#endif
#include <asm-generic/bitops/sched.h>
#include <asm/arch_hweight.h>
#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/instrumented-atomic.h>
#include <asm-generic/bitops/instrumented-non-atomic.h>
#include <asm-generic/bitops/instrumented-lock.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __KERNEL__ */
#endif /* _ASM_X86_BITOPS_H */