cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/include/asm/bitops.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_BITOPS_H

#define _ASM_X86_BITOPS_H

/*
 * Copyright 1992, Linus Torvalds.
 *
 * Note: inlines with more than a single statement should be marked
 * __always_inline to avoid problems with older gcc's inlining heuristics.
 */

#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif

#include <linux/compiler.h>
#include <asm/alternative.h>
#include <asm/rmwcc.h>
#include <asm/barrier.h>

#if BITS_PER_LONG == 32

# define _BITOPS_LONG_SHIFT 5
#elif BITS_PER_LONG == 64

# define _BITOPS_LONG_SHIFT 6
#else
# error "Unexpected BITS_PER_LONG"
#endif


#define BIT_64(n)			(U64_C(1) << (n))

/*
 * These have to be done with inline assembly: that way the bit-setting
 * is guaranteed to be atomic. All bit operations return 0 if the bit
 * was cleared before the operation and != 0 if it was not.
 *
 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 */

#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc
   versions. */

#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
#else

#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
#endif


#define ADDR				BITOP_ADDR(addr)

/*
 * We do the locked ops that don't return the old value as
 * a mask operation on a byte.
 */

#define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))

#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))

#define CONST_MASK(nr)			(1 << ((nr) & 7))

/**
 * set_bit - Atomically set a bit in memory
 * @nr: the bit to set
 * @addr: the address to start counting from
 *
 * This function is atomic and may not be reordered.  See __set_bit()
 * if you do not require the atomic guarantees.
 *
 * Note: there are no guarantees that this function will not be reordered
 * on non x86 architectures, so if you are writing portable code,
 * make sure not to rely on its reordering guarantees.
 *
 * Note that @nr may be almost arbitrarily large; this function is not
 * restricted to acting on a single-word quantity.
 */

static __always_inline void set_bit(long nr, volatile unsigned long *addr) { if (IS_IMMEDIATE(nr)) { asm volatile(LOCK_PREFIX "orb %1,%0" : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)CONST_MASK(nr)) : "memory"); } else { asm volatile(LOCK_PREFIX "bts %1,%0" : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); } }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge1441.18%114.29%
Linus Torvalds1029.41%114.29%
Ingo Molnar617.65%228.57%
Andrew Morton25.88%114.29%
Andi Kleen12.94%114.29%
H. Peter Anvin12.94%114.29%
Total34100.00%7100.00%

/** * __set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * Unlike set_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */
static __always_inline void __set_bit(long nr, volatile unsigned long *addr) { asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge1473.68%120.00%
Andrew Morton210.53%120.00%
Joe Perches15.26%120.00%
H. Peter Anvin15.26%120.00%
Denys Vlasenko15.26%120.00%
Total19100.00%5100.00%

/** * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */
static __always_inline void clear_bit(long nr, volatile unsigned long *addr) { if (IS_IMMEDIATE(nr)) { asm volatile(LOCK_PREFIX "andb %1,%0" : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)~CONST_MASK(nr))); } else { asm volatile(LOCK_PREFIX "btr %1,%0" : BITOP_ADDR(addr) : "Ir" (nr)); } }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge1441.18%114.29%
Linus Torvalds1029.41%114.29%
Ingo Molnar617.65%228.57%
Andrew Morton25.88%114.29%
Andi Kleen12.94%114.29%
H. Peter Anvin12.94%114.29%
Total34100.00%7100.00%

/* * clear_bit_unlock - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * clear_bit() is atomic and implies release semantics before the memory * operation. It can be used for an unlock. */
static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr) { barrier(); clear_bit(nr, addr); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge2284.62%125.00%
Andrew Morton27.69%125.00%
Denys Vlasenko13.85%125.00%
H. Peter Anvin13.85%125.00%
Total26100.00%4100.00%


static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) { asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge1473.68%120.00%
Andrew Morton210.53%120.00%
Denys Vlasenko15.26%120.00%
Simon Holm Thögersen15.26%120.00%
H. Peter Anvin15.26%120.00%
Total19100.00%5100.00%


static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) { bool negative; asm volatile(LOCK_PREFIX "andb %2,%1\n\t" CC_SET(s) : CC_OUT(s) (negative), ADDR : "ir" ((char) ~(1 << nr)) : "memory"); return negative; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds25100.00%1100.00%
Total25100.00%1100.00%

// Let everybody know we have it #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte /* * __clear_bit_unlock - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * __clear_bit() is non-atomic and implies release semantics before the memory * operation. It can be used for an unlock if no other CPUs can concurrently * modify other bits in the word. * * No memory barrier is required here, because x86 cannot reorder stores past * older loads. Same principle as spin_unlock. */
static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) { barrier(); __clear_bit(nr, addr); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge2284.62%125.00%
Andrew Morton27.69%125.00%
Denys Vlasenko13.85%125.00%
H. Peter Anvin13.85%125.00%
Total26100.00%4100.00%

/** * __change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * * Unlike change_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */
static __always_inline void __change_bit(long nr, volatile unsigned long *addr) { asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge1473.68%120.00%
Andrew Morton210.53%120.00%
H. Peter Anvin15.26%120.00%
Simon Holm Thögersen15.26%120.00%
Denys Vlasenko15.26%120.00%
Total19100.00%5100.00%

/** * change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * * change_bit() is atomic and may not be reordered. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */
static __always_inline void change_bit(long nr, volatile unsigned long *addr) { if (IS_IMMEDIATE(nr)) { asm volatile(LOCK_PREFIX "xorb %1,%0" : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)CONST_MASK(nr))); } else { asm volatile(LOCK_PREFIX "btc %1,%0" : BITOP_ADDR(addr) : "Ir" (nr)); } }

Contributors

PersonTokensPropCommitsCommitProp
Uros Bizjak1647.06%120.00%
Jeremy Fitzhardinge1441.18%120.00%
Andrew Morton25.88%120.00%
Denys Vlasenko12.94%120.00%
H. Peter Anvin12.94%120.00%
Total34100.00%5100.00%

/** * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */
static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1442.42%112.50%
Jeremy Fitzhardinge1133.33%112.50%
H. Peter Anvin515.15%450.00%
Andrew Morton26.06%112.50%
Denys Vlasenko13.03%112.50%
Total33100.00%8100.00%

/** * test_and_set_bit_lock - Set a bit and return its old value for lock * @nr: Bit to set * @addr: Address to count from * * This is the same as test_and_set_bit on x86. */
static __always_inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) { return test_and_set_bit(nr, addr); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge1979.17%120.00%
Andrew Morton28.33%120.00%
H. Peter Anvin28.33%240.00%
Andi Kleen14.17%120.00%
Total24100.00%5100.00%

/** * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */
static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) { bool oldbit; asm("bts %2,%1\n\t" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); return oldbit; }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge1669.57%116.67%
H. Peter Anvin417.39%350.00%
Andrew Morton28.70%116.67%
Denys Vlasenko14.35%116.67%
Total23100.00%6100.00%

/** * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */
static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1442.42%112.50%
Jeremy Fitzhardinge1133.33%112.50%
H. Peter Anvin515.15%450.00%
Andrew Morton26.06%112.50%
Denys Vlasenko13.03%112.50%
Total33100.00%8100.00%

/** * __test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. * * Note: the operation is performed atomically with respect to * the local CPU, but not other CPUs. Portable code should not * rely on this behaviour. * KVM relies on this behaviour on x86 for modifying memory that is also * accessed from a hypervisor on the same CPU if running in a VM: don't change * this without also updating arch/x86/kernel/kvm.c */
static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) { bool oldbit; asm volatile("btr %2,%1\n\t" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); return oldbit; }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge1872.00%116.67%
H. Peter Anvin416.00%350.00%
Andrew Morton28.00%116.67%
Denys Vlasenko14.00%116.67%
Total25100.00%6100.00%

/* WARNING: non atomic and it can be reordered! */
static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) { bool oldbit; asm volatile("btc %2,%1\n\t" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr) : "memory"); return oldbit; }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge1872.00%116.67%
H. Peter Anvin416.00%350.00%
Andrew Morton28.00%116.67%
Denys Vlasenko14.00%116.67%
Total25100.00%6100.00%

/** * test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */
static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1442.42%112.50%
Jeremy Fitzhardinge1133.33%112.50%
H. Peter Anvin515.15%450.00%
Andrew Morton26.06%112.50%
Denys Vlasenko13.03%112.50%
Total33100.00%8100.00%


static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) { return ((1UL << (nr & (BITS_PER_LONG-1))) & (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge3373.33%120.00%
H. Peter Anvin920.00%240.00%
Andrew Morton24.44%120.00%
Andi Kleen12.22%120.00%
Total45100.00%5100.00%


static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) { bool oldbit; asm volatile("bt %2,%1\n\t" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long *)addr), "Ir" (nr)); return oldbit; }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge1973.08%116.67%
H. Peter Anvin415.38%350.00%
Andrew Morton27.69%116.67%
Denys Vlasenko13.85%116.67%
Total26100.00%6100.00%

#if 0 /* Fool kernel-doc since it doesn't do macros yet */ /** * test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ static bool test_bit(int nr, const volatile unsigned long *addr); #endif #define test_bit(nr, addr) \ (__builtin_constant_p((nr)) \ ? constant_test_bit((nr), (addr)) \ : variable_test_bit((nr), (addr))) /** * __ffs - find first set bit in word * @word: The word to search * * Undefined if no bit exists, so code should check against 0 first. */
static __always_inline unsigned long __ffs(unsigned long word) { asm("rep; bsf %1,%0" : "=r" (word) : "rm" (word)); return word; }

Contributors

PersonTokensPropCommitsCommitProp
Alexander van Heukelum1487.50%133.33%
Denys Vlasenko16.25%133.33%
Jan Beulich16.25%133.33%
Total16100.00%3100.00%

/** * ffz - find first zero bit in word * @word: The word to search * * Undefined if no zero exists, so code should check against ~0UL first. */
static __always_inline unsigned long ffz(unsigned long word) { asm("rep; bsf %1,%0" : "=r" (word) : "r" (~word)); return word; }

Contributors

PersonTokensPropCommitsCommitProp
Alexander van Heukelum1487.50%133.33%
Denys Vlasenko16.25%133.33%
Jan Beulich16.25%133.33%
Total16100.00%3100.00%

/* * __fls: find last set bit in word * @word: The word to search * * Undefined if no set bit exists, so code should check against 0 first. */
static __always_inline unsigned long __fls(unsigned long word) { asm("bsr %1,%0" : "=r" (word) : "rm" (word)); return word; }

Contributors

PersonTokensPropCommitsCommitProp
Alexander van Heukelum1487.50%133.33%
Joe Perches16.25%133.33%
Denys Vlasenko16.25%133.33%
Total16100.00%3100.00%

#undef ADDR #ifdef __KERNEL__ /** * ffs - find first set bit in word * @x: the word to search * * This is defined the same way as the libc and compiler builtin ffs * routines, therefore differs in spirit from the other bitops. * * ffs(value) returns 0 if value is 0 or the position of the first * set bit if value is nonzero. The first (least significant) bit * is at position 1. */
static __always_inline int ffs(int x) { int r; #ifdef CONFIG_X86_64 /* * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the * dest reg is undefined if x==0, but their CPU architect says its * value is written to set it to the same as before, except that the * top 32 bits will be cleared. * * We cannot do this on 32 bits because at the very least some * 486 CPUs did not behave this way. */ asm("bsfl %1,%0" : "=r" (r) : "rm" (x), "0" (-1)); #elif defined(CONFIG_X86_CMOV) asm("bsfl %1,%0\n\t" "cmovzl %2,%0" : "=&r" (r) : "rm" (x), "r" (-1)); #else asm("bsfl %1,%0\n\t" "jnz 1f\n\t" "movl $-1,%0\n" "1:" : "=r" (r) : "rm" (x)); #endif return r + 1; }

Contributors

PersonTokensPropCommitsCommitProp
Alexander van Heukelum2468.57%120.00%
David Howells822.86%120.00%
Joe Perches12.86%120.00%
Denys Vlasenko12.86%120.00%
Jan Beulich12.86%120.00%
Total35100.00%5100.00%

/** * fls - find last set bit in word * @x: the word to search * * This is defined in a similar way as the libc and compiler builtin * ffs, but returns the position of the most significant set bit. * * fls(value) returns 0 if value is 0 or the position of the last * set bit if value is nonzero. The last (most significant) bit is * at position 32. */
static __always_inline int fls(int x) { int r; #ifdef CONFIG_X86_64 /* * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the * dest reg is undefined if x==0, but their CPU architect says its * value is written to set it to the same as before, except that the * top 32 bits will be cleared. * * We cannot do this on 32 bits because at the very least some * 486 CPUs did not behave this way. */ asm("bsrl %1,%0" : "=r" (r) : "rm" (x), "0" (-1)); #elif defined(CONFIG_X86_CMOV) asm("bsrl %1,%0\n\t" "cmovzl %2,%0" : "=&r" (r) : "rm" (x), "rm" (-1)); #else asm("bsrl %1,%0\n\t" "jnz 1f\n\t" "movl $-1,%0\n" "1:" : "=r" (r) : "rm" (x)); #endif return r + 1; }

Contributors

PersonTokensPropCommitsCommitProp
Alexander van Heukelum2468.57%120.00%
David Howells720.00%120.00%
Joe Perches25.71%120.00%
Jan Beulich12.86%120.00%
Denys Vlasenko12.86%120.00%
Total35100.00%5100.00%

/** * fls64 - find last set bit in a 64-bit word * @x: the word to search * * This is defined in a similar way as the libc and compiler builtin * ffsll, but returns the position of the most significant set bit. * * fls64(value) returns 0 if value is 0 or the position of the last * set bit if value is nonzero. The last (most significant) bit is * at position 64. */ #ifdef CONFIG_X86_64
static __always_inline int fls64(__u64 x) { int bitpos = -1; /* * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the * dest reg is undefined if x==0, but their CPU architect says its * value is written to set it to the same as before. */ asm("bsrq %1,%q0" : "+r" (bitpos) : "rm" (x)); return bitpos + 1; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells2191.30%150.00%
Jan Beulich28.70%150.00%
Total23100.00%2100.00%

#else #include <asm-generic/bitops/fls64.h> #endif #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/sched.h> #include <asm/arch_hweight.h> #include <asm-generic/bitops/const_hweight.h> #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* __KERNEL__ */ #endif /* _ASM_X86_BITOPS_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge35742.00%12.94%
Alexander van Heukelum10212.00%38.82%
H. Peter Anvin829.65%720.59%
Linus Torvalds789.18%25.88%
Peter Zijlstra495.76%25.88%
David Howells475.53%12.94%
Andrew Morton344.00%12.94%
Ingo Molnar222.59%25.88%
Denys Vlasenko182.12%12.94%
Uros Bizjak161.88%12.94%
Borislav Petkov111.29%25.88%
Akinobu Mita70.82%38.82%
Jan Beulich60.71%25.88%
Thomas Gleixner60.71%12.94%
Joe Perches60.71%12.94%
Andi Kleen50.59%12.94%
Simon Holm Thögersen20.24%12.94%
Michael S. Tsirkin10.12%12.94%
Greg Kroah-Hartman10.12%12.94%
Total850100.00%34100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.