Contributors: 47
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Alexander Lobakin |
114 |
11.66% |
7 |
10.14% |
Harvey Harrison |
82 |
8.38% |
1 |
1.45% |
Matt Mackall |
75 |
7.67% |
3 |
4.35% |
Andrew Morton |
70 |
7.16% |
2 |
2.90% |
Steven Whitehouse |
61 |
6.24% |
1 |
1.45% |
Kumar Kartikeya Dwivedi |
61 |
6.24% |
1 |
1.45% |
Rasmus Villemoes |
60 |
6.13% |
1 |
1.45% |
Linus Torvalds (pre-git) |
58 |
5.93% |
6 |
8.70% |
Alexey Dobriyan |
41 |
4.19% |
1 |
1.45% |
Andreas Herrmann |
40 |
4.09% |
1 |
1.45% |
zijun_hu |
31 |
3.17% |
1 |
1.45% |
Pavel Machek |
29 |
2.97% |
1 |
1.45% |
Yury Norov |
28 |
2.86% |
1 |
1.45% |
Martin Kepplinger |
24 |
2.45% |
2 |
2.90% |
Borislav Petkov |
21 |
2.15% |
1 |
1.45% |
Akinobu Mita |
21 |
2.15% |
4 |
5.80% |
Lukas Wunner |
17 |
1.74% |
1 |
1.45% |
Guoqing Jiang |
14 |
1.43% |
1 |
1.45% |
Theodore Y. Ts'o |
14 |
1.43% |
1 |
1.45% |
David Ahern |
12 |
1.23% |
1 |
1.45% |
Wei Yang |
11 |
1.12% |
2 |
2.90% |
Kuan-Wei Chiu |
11 |
1.12% |
1 |
1.45% |
Andy Shevchenko |
10 |
1.02% |
2 |
2.90% |
Tal Gilboa |
7 |
0.72% |
1 |
1.45% |
Alexander van Heukelum |
6 |
0.61% |
1 |
1.45% |
Masami Hiramatsu |
6 |
0.61% |
1 |
1.45% |
Ingo Molnar |
5 |
0.51% |
1 |
1.45% |
Mikulas Patocka |
4 |
0.41% |
1 |
1.45% |
David Howells |
4 |
0.41% |
2 |
2.90% |
Miklos Szeredi |
4 |
0.41% |
2 |
2.90% |
Eric Dumazet |
4 |
0.41% |
1 |
1.45% |
Sasha Levin |
4 |
0.41% |
1 |
1.45% |
Will Deacon |
3 |
0.31% |
1 |
1.45% |
Thorsten Blum |
3 |
0.31% |
1 |
1.45% |
Suresh B. Siddha |
3 |
0.31% |
1 |
1.45% |
Denys Vlasenko |
3 |
0.31% |
1 |
1.45% |
Arnd Bergmann |
3 |
0.31% |
1 |
1.45% |
Josh Poimboeuf |
2 |
0.20% |
1 |
1.45% |
Jiri Slaby |
2 |
0.20% |
1 |
1.45% |
Uros Bizjak |
2 |
0.20% |
1 |
1.45% |
Robert P. J. Day |
2 |
0.20% |
1 |
1.45% |
Greg Kroah-Hartman |
1 |
0.10% |
1 |
1.45% |
Dave Jones |
1 |
0.10% |
1 |
1.45% |
Dan Carpenter |
1 |
0.10% |
1 |
1.45% |
Geert Uytterhoeven |
1 |
0.10% |
1 |
1.45% |
Yoshinori Sato |
1 |
0.10% |
1 |
1.45% |
Thomas Gleixner |
1 |
0.10% |
1 |
1.45% |
Total |
978 |
|
69 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>
#include <linux/bits.h>
#include <linux/typecheck.h>
#include <uapi/linux/kernel.h>
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE)
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
/*
* Defined here because those may be needed by architecture-specific static
* inlines.
*/
#include <asm-generic/bitops/generic-non-atomic.h>
/*
* Many architecture-specific non-atomic bitops contain inline asm code and due
* to that the compiler can't optimize them to compile-time expressions or
* constants. In contrary, generic_*() helpers are defined in pure C and
* compilers optimize them just well.
* Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
* equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
* the arguments can be resolved at compile time. That expression itself is a
* constant and doesn't bring any functional changes to the rest of cases.
* The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
* passing a bitmap from .bss or .data (-> `!!addr` is always true).
*/
#define bitop(op, nr, addr) \
((__builtin_constant_p(nr) && \
__builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
(uintptr_t)(addr) != (uintptr_t)NULL && \
__builtin_constant_p(*(const unsigned long *)(addr))) ? \
const##op(nr, addr) : op(nr, addr))
/*
* The following macros are non-atomic versions of their non-underscored
* counterparts.
*/
#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*/
#include <asm/bitops.h>
/* Check that the bitops prototypes are sane */
#define __check_bitop_pr(name) \
static_assert(__same_type(arch_##name, generic_##name) && \
__same_type(const_##name, generic_##name) && \
__same_type(_##name, generic_##name))
__check_bitop_pr(__set_bit);
__check_bitop_pr(__clear_bit);
__check_bitop_pr(__change_bit);
__check_bitop_pr(__test_and_set_bit);
__check_bitop_pr(__test_and_clear_bit);
__check_bitop_pr(__test_and_change_bit);
__check_bitop_pr(test_bit);
__check_bitop_pr(test_bit_acquire);
#undef __check_bitop_pr
static inline int get_bitmask_order(unsigned int count)
{
int order;
order = fls(count);
return order; /* We could be slightly more clever with -1 here... */
}
static __always_inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
}
/**
* rol64 - rotate a 64-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u64 rol64(__u64 word, unsigned int shift)
{
return (word << (shift & 63)) | (word >> ((-shift) & 63));
}
/**
* ror64 - rotate a 64-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u64 ror64(__u64 word, unsigned int shift)
{
return (word >> (shift & 63)) | (word << ((-shift) & 63));
}
/**
* rol32 - rotate a 32-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << (shift & 31)) | (word >> ((-shift) & 31));
}
/**
* ror32 - rotate a 32-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u32 ror32(__u32 word, unsigned int shift)
{
return (word >> (shift & 31)) | (word << ((-shift) & 31));
}
/**
* rol16 - rotate a 16-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u16 rol16(__u16 word, unsigned int shift)
{
return (word << (shift & 15)) | (word >> ((-shift) & 15));
}
/**
* ror16 - rotate a 16-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u16 ror16(__u16 word, unsigned int shift)
{
return (word >> (shift & 15)) | (word << ((-shift) & 15));
}
/**
* rol8 - rotate an 8-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u8 rol8(__u8 word, unsigned int shift)
{
return (word << (shift & 7)) | (word >> ((-shift) & 7));
}
/**
* ror8 - rotate an 8-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u8 ror8(__u8 word, unsigned int shift)
{
return (word >> (shift & 7)) | (word << ((-shift) & 7));
}
/**
* sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<32) to sign bit
*
* This is safe to use for 16- and 8-bit types as well.
*/
static __always_inline __s32 sign_extend32(__u32 value, int index)
{
__u8 shift = 31 - index;
return (__s32)(value << shift) >> shift;
}
/**
* sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<64) to sign bit
*/
static __always_inline __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
}
static inline unsigned int fls_long(unsigned long l)
{
if (sizeof(l) == 4)
return fls(l);
return fls64(l);
}
static inline int get_count_order(unsigned int count)
{
if (count == 0)
return -1;
return fls(--count);
}
/**
* get_count_order_long - get order after rounding @l up to power of 2
* @l: parameter
*
* it is same as get_count_order() but with long type parameter
*/
static inline int get_count_order_long(unsigned long l)
{
if (l == 0UL)
return -1;
return (int)fls_long(--l);
}
/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
* On 64 bit arches this is a synonym for __ffs
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/
static inline unsigned int __ffs64(u64 word)
{
#if BITS_PER_LONG == 32
if (((u32)word) == 0UL)
return __ffs((u32)(word >> 32)) + 32;
#elif BITS_PER_LONG != 64
#error BITS_PER_LONG not 32 or 64
#endif
return __ffs((unsigned long)word);
}
/**
* fns - find N'th set bit in a word
* @word: The word to search
* @n: Bit to find
*/
static inline unsigned int fns(unsigned long word, unsigned int n)
{
while (word && n--)
word &= word - 1;
return word ? __ffs(word) : BITS_PER_LONG;
}
/**
* assign_bit - Assign value to a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
* @value: the value to assign
*/
#define assign_bit(nr, addr, value) \
((value) ? set_bit((nr), (addr)) : clear_bit((nr), (addr)))
#define __assign_bit(nr, addr, value) \
((value) ? __set_bit((nr), (addr)) : __clear_bit((nr), (addr)))
/**
* __ptr_set_bit - Set bit in a pointer's value
* @nr: the bit to set
* @addr: the address of the pointer variable
*
* Example:
* void *p = foo();
* __ptr_set_bit(bit, &p);
*/
#define __ptr_set_bit(nr, addr) \
({ \
typecheck_pointer(*(addr)); \
__set_bit(nr, (unsigned long *)(addr)); \
})
/**
* __ptr_clear_bit - Clear bit in a pointer's value
* @nr: the bit to clear
* @addr: the address of the pointer variable
*
* Example:
* void *p = foo();
* __ptr_clear_bit(bit, &p);
*/
#define __ptr_clear_bit(nr, addr) \
({ \
typecheck_pointer(*(addr)); \
__clear_bit(nr, (unsigned long *)(addr)); \
})
/**
* __ptr_test_bit - Test bit in a pointer's value
* @nr: the bit to test
* @addr: the address of the pointer variable
*
* Example:
* void *p = foo();
* if (__ptr_test_bit(bit, &p)) {
* ...
* } else {
* ...
* }
*/
#define __ptr_test_bit(nr, addr) \
({ \
typecheck_pointer(*(addr)); \
test_bit(nr, (unsigned long *)(addr)); \
})
#ifdef __KERNEL__
#ifndef set_mask_bits
#define set_mask_bits(ptr, mask, bits) \
({ \
const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
typeof(*(ptr)) old__, new__; \
\
old__ = READ_ONCE(*(ptr)); \
do { \
new__ = (old__ & ~mask__) | bits__; \
} while (!try_cmpxchg(ptr, &old__, new__)); \
\
old__; \
})
#endif
#ifndef bit_clear_unless
#define bit_clear_unless(ptr, clear, test) \
({ \
const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
typeof(*(ptr)) old__, new__; \
\
old__ = READ_ONCE(*(ptr)); \
do { \
if (old__ & test__) \
break; \
new__ = old__ & ~clear__; \
} while (!try_cmpxchg(ptr, &old__, new__)); \
\
!(old__ & test__); \
})
#endif
#endif /* __KERNEL__ */
#endif