/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_DIV64_H #define _ASM_GENERIC_DIV64_H /* * Copyright (C) 2003 Bernardo Innocenti <[email protected]> * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h * * Optimization for constant divisors on 32-bit machines: * Copyright (C) 2006-2015 Nicolas Pitre * * The semantics of do_div() is, in C++ notation, observing that the name * is a function-like macro and the n parameter has the semantics of a C++ * reference: * * uint32_t do_div(uint64_t &n, uint32_t base) * { * uint32_t remainder = n % base; * n = n / base; * return remainder; * } * * NOTE: macro parameter n is evaluated multiple times, * beware of side effects! */ #include <linux/types.h> #include <linux/compiler.h> #if BITS_PER_LONG == 64 /** * do_div - returns 2 values: calculate remainder and update new dividend * @n: uint64_t dividend (will be updated) * @base: uint32_t divisor * * Summary: * ``uint32_t remainder = n % base;`` * ``n = n / base;`` * * Return: (uint32_t)remainder * * NOTE: macro parameter @n is evaluated multiple times, * beware of side effects! */ #define do_div(n,base) … #elif BITS_PER_LONG == 32 #include <linux/log2.h> /* * If the divisor happens to be constant, we determine the appropriate * inverse at compile time to turn the division into a few inline * multiplications which ought to be much faster. * * (It is unfortunate that gcc doesn't perform all this internally.) */ #define __div64_const32 … #ifndef __arch_xprod_64 /* * Default C implementation for __arch_xprod_64() * * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) * Semantic: retval = ((bias ? m : 0) + m * n) >> 64 * * The product is a 128-bit value, scaled down to 64 bits. * Assuming constant propagation to optimize away unused conditional code. * Architectures may provide their own optimized assembly implementation. */ static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) { uint32_t m_lo = m; uint32_t m_hi = m >> 32; uint32_t n_lo = n; uint32_t n_hi = n >> 32; uint64_t res; uint32_t res_lo, res_hi, tmp; if (!bias) { res = ((uint64_t)m_lo * n_lo) >> 32; } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) { /* there can't be any overflow here */ res = (m + (uint64_t)m_lo * n_lo) >> 32; } else { res = m + (uint64_t)m_lo * n_lo; res_lo = res >> 32; res_hi = (res_lo < m_hi); res = res_lo | ((uint64_t)res_hi << 32); } if (!(m & ((1ULL << 63) | (1ULL << 31)))) { /* there can't be any overflow here */ res += (uint64_t)m_lo * n_hi; res += (uint64_t)m_hi * n_lo; res >>= 32; } else { res += (uint64_t)m_lo * n_hi; tmp = res >> 32; res += (uint64_t)m_hi * n_lo; res_lo = res >> 32; res_hi = (res_lo < tmp); res = res_lo | ((uint64_t)res_hi << 32); } res += (uint64_t)m_hi * n_hi; return res; } #endif #ifndef __div64_32 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); #endif /* The unnecessary pointer compare is there * to check for type safety (n must be 64bit) */ #define do_div … #else /* BITS_PER_LONG == ?? */ # error do_div() does not yet support the C64 #endif /* BITS_PER_LONG */ #endif /* _ASM_GENERIC_DIV64_H */