cregit-Linux how code gets into the kernel

Release 4.12 include/linux/math64.h

Directory: include/linux
#ifndef _LINUX_MATH64_H

#define _LINUX_MATH64_H

#include <linux/types.h>
#include <asm/div64.h>

#if BITS_PER_LONG == 64


#define div64_long(x, y) div64_s64((x), (y))

#define div64_ul(x, y)   div64_u64((x), (y))

/**
 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
 *
 * This is commonly provided by 32bit archs to provide an optimized 64bit
 * divide.
 */

static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) { *remainder = dividend % divisor; return dividend / divisor; }

Contributors

PersonTokensPropCommitsCommitProp
Roman Zippel29100.00%1100.00%
Total29100.00%1100.00%

/** * div_s64_rem - signed 64bit divide with 32bit divisor with remainder */
static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) { *remainder = dividend % divisor; return dividend / divisor; }

Contributors

PersonTokensPropCommitsCommitProp
Roman Zippel29100.00%1100.00%
Total29100.00%1100.00%

/** * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder */
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) { *remainder = dividend % divisor; return dividend / divisor; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Snitzer29100.00%1100.00%
Total29100.00%1100.00%

/** * div64_u64 - unsigned 64bit divide with 64bit divisor */
static inline u64 div64_u64(u64 dividend, u64 divisor) { return dividend / divisor; }

Contributors

PersonTokensPropCommitsCommitProp
Roman Zippel18100.00%1100.00%
Total18100.00%1100.00%

/** * div64_s64 - signed 64bit divide with 64bit divisor */
static inline s64 div64_s64(s64 dividend, s64 divisor) { return dividend / divisor; }

Contributors

PersonTokensPropCommitsCommitProp
Brian Behlendorf18100.00%1100.00%
Total18100.00%1100.00%

#elif BITS_PER_LONG == 32 #define div64_long(x, y) div_s64((x), (y)) #define div64_ul(x, y) div_u64((x), (y)) #ifndef div_u64_rem
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) { *remainder = do_div(dividend, divisor); return dividend; }

Contributors

PersonTokensPropCommitsCommitProp
Roman Zippel30100.00%1100.00%
Total30100.00%1100.00%

#endif #ifndef div_s64_rem extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); #endif #ifndef div64_u64_rem extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder); #endif #ifndef div64_u64 extern u64 div64_u64(u64 dividend, u64 divisor); #endif #ifndef div64_s64 extern s64 div64_s64(s64 dividend, s64 divisor); #endif #endif /* BITS_PER_LONG */ /** * div_u64 - unsigned 64bit divide with 32bit divisor * * This is the most common 64bit divide and should be used if possible, * as many 32bit archs can optimize this variant better than a full 64bit * divide. */ #ifndef div_u64
static inline u64 div_u64(u64 dividend, u32 divisor) { u32 remainder; return div_u64_rem(dividend, divisor, &remainder); }

Contributors

PersonTokensPropCommitsCommitProp
Roman Zippel27100.00%1100.00%
Total27100.00%1100.00%

#endif /** * div_s64 - signed 64bit divide with 32bit divisor */ #ifndef div_s64
static inline s64 div_s64(s64 dividend, s32 divisor) { s32 remainder; return div_s64_rem(dividend, divisor, &remainder); }

Contributors

PersonTokensPropCommitsCommitProp
Roman Zippel27100.00%1100.00%
Total27100.00%1100.00%

#endif u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
static __always_inline u32 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) { u32 ret = 0; while (dividend >= divisor) { /* The following asm() prevents the compiler from optimising this loop into a modulo operation. */ asm("" : "+rm"(dividend)); dividend -= divisor; ret++; } *remainder = dividend; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge47100.00%1100.00%
Total47100.00%1100.00%

#ifndef mul_u32_u32 /* * Many a GCC version messes this up and generates a 64x64 mult :-( */
static inline u64 mul_u32_u32(u32 a, u32 b) { return (u64)a * b; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra21100.00%1100.00%
Total21100.00%1100.00%

#endif #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) #ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) { return (u64)(((unsigned __int128)a * mul) >> shift); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra35100.00%1100.00%
Total35100.00%1100.00%

#endif /* mul_u64_u32_shr */ #ifndef mul_u64_u64_shr
static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) { return (u64)(((unsigned __int128)a * mul) >> shift); }

Contributors

PersonTokensPropCommitsCommitProp
Haozhong Zhang35100.00%1100.00%
Total35100.00%1100.00%

#endif /* mul_u64_u64_shr */ #else #ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) { u32 ah, al; u64 ret; al = a; ah = a >> 32; ret = mul_u32_u32(al, mul) >> shift; if (ah) ret += mul_u32_u32(ah, mul) << (32 - shift); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra68100.00%2100.00%
Total68100.00%2100.00%

#endif /* mul_u64_u32_shr */ #ifndef mul_u64_u64_shr
static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) { union { u64 ll; struct { #ifdef __BIG_ENDIAN u32 high, low; #else u32 low, high; #endif } l; } rl, rm, rn, rh, a0, b0; u64 c; a0.ll = a; b0.ll = b; rl.ll = mul_u32_u32(a0.l.low, b0.l.low); rm.ll = mul_u32_u32(a0.l.low, b0.l.high); rn.ll = mul_u32_u32(a0.l.high, b0.l.low); rh.ll = mul_u32_u32(a0.l.high, b0.l.high); /* * Each of these lines computes a 64-bit intermediate result into "c", * starting at bits 32-95. The low 32-bits go into the result of the * multiplication, the high 32-bits are carried into the next step. */ rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; rh.l.high = (c >> 32) + rh.l.high; /* * The 128-bit result of the multiplication is in rl.ll and rh.ll, * shift it right and throw away the high part of the result. */ if (shift == 0) return rl.ll; if (shift < 64) return (rl.ll >> shift) | (rh.ll << (64 - shift)); return rh.ll >> (shift & 63); }

Contributors

PersonTokensPropCommitsCommitProp
Haozhong Zhang26294.24%150.00%
Peter Zijlstra165.76%150.00%
Total278100.00%2100.00%

#endif /* mul_u64_u64_shr */ #endif #ifndef mul_u64_u32_div
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) { union { u64 ll; struct { #ifdef __BIG_ENDIAN u32 high, low; #else u32 low, high; #endif } l; } u, rl, rh; u.ll = a; rl.ll = mul_u32_u32(u.l.low, mul); rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high; /* Bits 32-63 of the result will be in rh.l.low. */ rl.l.high = do_div(rh.ll, divisor); /* Bits 0-31 of the result will be in rl.l.low. */ do_div(rl.ll, divisor); rl.l.high = rh.l.low; return rl.ll; }

Contributors

PersonTokensPropCommitsCommitProp
Haozhong Zhang12794.07%150.00%
Peter Zijlstra85.93%150.00%
Total135100.00%2100.00%

#endif /* mul_u64_u32_div */ #endif /* _LINUX_MATH64_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Haozhong Zhang44242.14%215.38%
Roman Zippel24223.07%215.38%
Peter Zijlstra18117.25%215.38%
Jeremy Fitzhardinge615.82%215.38%
Mike Snitzer504.77%17.69%
Brian Behlendorf353.34%17.69%
Alex Shi201.91%17.69%
Sasha Levin161.53%17.69%
Stanislaw Gruszka20.19%17.69%
Total1049100.00%13100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.