Release 4.17 lib/find_bit.c
/* bit search implementation
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* Copyright (C) 2008 IBM Corporation
* 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
* (Inspired by David Howell's find_next_bit implementation)
*
* Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
* size and improve performance, 2015.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/kernel.h>
#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
!defined(find_next_and_bit)
/*
* This is a common helper function for find_next_bit, find_next_zero_bit, and
* find_next_and_bit. The differences are:
* - The "invert" argument, which is XORed with each fetched word before
* searching it for one bits.
* - The optional "addr2", which is anded with "addr1" if present.
*/
static inline unsigned long _find_next_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long nbits,
unsigned long start, unsigned long invert)
{
unsigned long tmp;
if (unlikely(start >= nbits))
return nbits;
tmp = addr1[start / BITS_PER_LONG];
if (addr2)
tmp &= addr2[start / BITS_PER_LONG];
tmp ^= invert;
/* Handle 1st word. */
tmp &= BITMAP_FIRST_WORD_MASK(start);
start = round_down(start, BITS_PER_LONG);
while (!tmp) {
start += BITS_PER_LONG;
if (start >= nbits)
return nbits;
tmp = addr1[start / BITS_PER_LONG];
if (addr2)
tmp &= addr2[start / BITS_PER_LONG];
tmp ^= invert;
}
return min(start + __ffs(tmp), nbits);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yury Norov | 50 | 33.33% | 1 | 20.00% |
Clement Courbet | 42 | 28.00% | 1 | 20.00% |
Akinobu Mita | 29 | 19.33% | 1 | 20.00% |
David Howells | 26 | 17.33% | 1 | 20.00% |
Matthew Wilcox | 3 | 2.00% | 1 | 20.00% |
Total | 150 | 100.00% | 5 | 100.00% |
#endif
#ifndef find_next_bit
/*
* Find the next set bit in a memory region.
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
return _find_next_bit(addr, NULL, size, offset, 0UL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yury Norov | 30 | 88.24% | 1 | 33.33% |
Akinobu Mita | 2 | 5.88% | 1 | 33.33% |
Clement Courbet | 2 | 5.88% | 1 | 33.33% |
Total | 34 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(find_next_bit);
#endif
#ifndef find_next_zero_bit
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
return _find_next_bit(addr, NULL, size, offset, ~0UL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Akinobu Mita | 18 | 51.43% | 1 | 25.00% |
Yury Norov | 14 | 40.00% | 1 | 25.00% |
Clement Courbet | 2 | 5.71% | 1 | 25.00% |
Thomas Gleixner | 1 | 2.86% | 1 | 25.00% |
Total | 35 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(find_next_zero_bit);
#endif
#if !defined(find_next_and_bit)
unsigned long find_next_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
{
return _find_next_bit(addr1, addr2, size, offset, 0UL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Clement Courbet | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(find_next_and_bit);
#endif
#ifndef find_first_bit
/*
* Find the first set bit in a memory region.
*/
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
unsigned long idx;
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
if (addr[idx])
return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
}
return size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yury Norov | 38 | 58.46% | 1 | 33.33% |
Akinobu Mita | 23 | 35.38% | 1 | 33.33% |
David Howells | 4 | 6.15% | 1 | 33.33% |
Total | 65 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(find_first_bit);
#endif
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.
*/
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
unsigned long idx;
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
if (addr[idx] != ~0UL)
return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
}
return size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yury Norov | 35 | 51.47% | 1 | 50.00% |
Alexander van Heukelum | 33 | 48.53% | 1 | 50.00% |
Total | 68 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(find_first_zero_bit);
#endif
#ifndef find_last_bit
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
if (size) {
unsigned long val = BITMAP_LAST_WORD_MASK(size);
unsigned long idx = (size-1) / BITS_PER_LONG;
do {
val &= addr[idx];
if (val)
return idx * BITS_PER_LONG + __fls(val);
val = ~0ul;
} while (idx--);
}
return size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yury Norov | 81 | 100.00% | 1 | 100.00% |
Total | 81 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(find_last_bit);
#endif
#ifdef __BIG_ENDIAN
/* include/linux/byteorder does not support "unsigned long" type */
static inline unsigned long ext2_swab(const unsigned long y)
{
#if BITS_PER_LONG == 64
return (unsigned long) __swab64((u64) y);
#elif BITS_PER_LONG == 32
return (unsigned long) __swab32((u32) y);
#else
#error BITS_PER_LONG not defined
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Akinobu Mita | 26 | 46.43% | 1 | 33.33% |
Yury Norov | 17 | 30.36% | 1 | 33.33% |
Alexander van Heukelum | 13 | 23.21% | 1 | 33.33% |
Total | 56 | 100.00% | 3 | 100.00% |
#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
const unsigned long *addr2, unsigned long nbits,
unsigned long start, unsigned long invert)
{
unsigned long tmp;
if (unlikely(start >= nbits))
return nbits;
tmp = addr1[start / BITS_PER_LONG];
if (addr2)
tmp &= addr2[start / BITS_PER_LONG];
tmp ^= invert;
/* Handle 1st word. */
tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
start = round_down(start, BITS_PER_LONG);
while (!tmp) {
start += BITS_PER_LONG;
if (start >= nbits)
return nbits;
tmp = addr1[start / BITS_PER_LONG];
if (addr2)
tmp &= addr2[start / BITS_PER_LONG];
tmp ^= invert;
}
return min(start + __ffs(ext2_swab(tmp)), nbits);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yury Norov | 56 | 35.90% | 1 | 25.00% |
Akinobu Mita | 55 | 35.26% | 1 | 25.00% |
Clement Courbet | 42 | 26.92% | 1 | 25.00% |
Matthew Wilcox | 3 | 1.92% | 1 | 25.00% |
Total | 156 | 100.00% | 4 | 100.00% |
#endif
#ifndef find_next_zero_bit_le
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
return _find_next_bit_le(addr, NULL, size, offset, ~0UL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yury Norov | 30 | 88.24% | 1 | 33.33% |
Clement Courbet | 2 | 5.88% | 1 | 33.33% |
Akinobu Mita | 2 | 5.88% | 1 | 33.33% |
Total | 34 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(find_next_zero_bit_le);
#endif
#ifndef find_next_bit_le
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
return _find_next_bit_le(addr, NULL, size, offset, 0UL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 23 | 69.70% | 1 | 20.00% |
Yury Norov | 5 | 15.15% | 1 | 20.00% |
Akinobu Mita | 3 | 9.09% | 2 | 40.00% |
Clement Courbet | 2 | 6.06% | 1 | 20.00% |
Total | 33 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(find_next_bit_le);
#endif
#endif /* __BIG_ENDIAN */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yury Norov | 420 | 46.82% | 2 | 13.33% |
Akinobu Mita | 196 | 21.85% | 5 | 33.33% |
Clement Courbet | 154 | 17.17% | 1 | 6.67% |
Alexander van Heukelum | 52 | 5.80% | 1 | 6.67% |
David Howells | 39 | 4.35% | 2 | 13.33% |
Aneesh Kumar K.V | 27 | 3.01% | 1 | 6.67% |
Matthew Wilcox | 6 | 0.67% | 1 | 6.67% |
Thomas Gleixner | 2 | 0.22% | 1 | 6.67% |
Paul Gortmaker | 1 | 0.11% | 1 | 6.67% |
Total | 897 | 100.00% | 15 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.