10#ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
11#define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
14#include "./InternalHeaderCheck.h"
22EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::enable_if_t<
sizeof(T) == 4,
int> count_leading_zeros(
const T val) {
23#ifdef EIGEN_GPU_COMPILE_PHASE
25#elif defined(SYCL_DEVICE_ONLY)
26 return cl::sycl::clz(val);
29 _BitScanReverse(&index, val);
32 EIGEN_STATIC_ASSERT(
sizeof(
unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
33 return __builtin_clz(
static_cast<uint32_t
>(val));
38EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::enable_if_t<
sizeof(T) == 8,
int> count_leading_zeros(
const T val) {
39#ifdef EIGEN_GPU_COMPILE_PHASE
41#elif defined(SYCL_DEVICE_ONLY)
42 return static_cast<int>(cl::sycl::clz(val));
43#elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64
45 _BitScanReverse64(&index, val);
49 unsigned int lo = (
unsigned int)(val & 0xffffffff);
50 unsigned int hi = (
unsigned int)((val >> 32) & 0xffffffff);
53 n = 32 + count_leading_zeros<unsigned int>(lo);
55 n = count_leading_zeros<unsigned int>(hi);
58 EIGEN_STATIC_ASSERT(
sizeof(
unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
59 return __builtin_clzll(
static_cast<uint64_t
>(val));
64struct UnsignedTraits {
65 typedef std::conditional_t<
sizeof(T) == 8, uint64_t, uint32_t> type;
70 typedef typename UnsignedTraits<T>::type type;
71 static constexpr int N =
sizeof(T) * 8;
75EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t muluh(
const uint32_t a,
const T b) {
76#if defined(EIGEN_GPU_COMPILE_PHASE)
77 return __umulhi(a, b);
78#elif defined(SYCL_DEVICE_ONLY)
79 return cl::sycl::mul_hi(a,
static_cast<uint32_t
>(b));
81 return (
static_cast<uint64_t
>(a) * b) >> 32;
86EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t muluh(
const uint64_t a,
const T b) {
87#if defined(EIGEN_GPU_COMPILE_PHASE)
88 return __umul64hi(a, b);
89#elif defined(SYCL_DEVICE_ONLY)
90 return cl::sycl::mul_hi(a,
static_cast<uint64_t
>(b));
91#elif EIGEN_COMP_MSVC && (EIGEN_ARCH_x86_64 || EIGEN_ARCH_ARM64)
92 return __umulh(a,
static_cast<uint64_t
>(b));
93#elif EIGEN_HAS_BUILTIN_INT128
94 __uint128_t v =
static_cast<__uint128_t
>(a) *
static_cast<__uint128_t
>(b);
95 return static_cast<uint64_t
>(v >> 64);
97 return (TensorUInt128<static_val<0>, uint64_t>(a) * TensorUInt128<static_val<0>, uint64_t>(b)).upper();
101template <
int N,
typename T>
102struct DividerHelper {
103 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(
const int log_div,
const T divider) {
104 EIGEN_STATIC_ASSERT(N == 32, YOU_MADE_A_PROGRAMMING_MISTAKE);
105 return static_cast<uint32_t
>((
static_cast<uint64_t
>(1) << (N + log_div)) / divider -
106 (
static_cast<uint64_t
>(1) << N) + 1);
111struct DividerHelper<64, T> {
112 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(
const int log_div,
const T divider) {
113#if EIGEN_HAS_BUILTIN_INT128 && !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY)
114 return static_cast<uint64_t
>((
static_cast<__uint128_t
>(1) << (64 + log_div)) /
static_cast<__uint128_t
>(divider) -
115 (
static_cast<__uint128_t
>(1) << 64) + 1);
117 const uint64_t shift = 1ULL << log_div;
118 TensorUInt128<uint64_t, uint64_t> result =
119 TensorUInt128<uint64_t, static_val<0> >(shift, 0) / TensorUInt128<static_val<0>, uint64_t>(divider) -
120 TensorUInt128<static_val<1>, static_val<0> >(1, 0) + TensorUInt128<static_val<0>, static_val<1> >(1);
121 return static_cast<uint64_t
>(result);
137template <
typename T,
bool div_gt_one = false>
138struct TensorIntDivisor {
140 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
149 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor(
const T divider) {
150 const int N = DividerTraits<T>::N;
151 eigen_assert(
static_cast<typename UnsignedTraits<T>::type
>(divider) < NumTraits<UnsignedType>::highest() / 2);
152 eigen_assert(divider > 0);
155 const int leading_zeros = count_leading_zeros(
static_cast<UnsignedType
>(divider));
156 int log_div = N - leading_zeros;
158 if ((
static_cast<typename UnsignedTraits<T>::type
>(1) << (log_div - 1)) ==
159 static_cast<typename UnsignedTraits<T>::type
>(divider))
162 multiplier = DividerHelper<N, T>::computeMultiplier(log_div, divider);
163 shift1 = log_div > 1 ? 1 : log_div;
164 shift2 = log_div > 1 ? log_div - 1 : 0;
169 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(
const T numerator)
const {
170 eigen_assert(
static_cast<typename UnsignedTraits<T>::type
>(numerator) < NumTraits<UnsignedType>::highest() / 2);
173 UnsignedType t1 = muluh(multiplier, numerator);
174 UnsignedType t = (
static_cast<UnsignedType
>(numerator) - t1) >> shift1;
175 return (t1 + t) >> shift2;
179 typedef typename DividerTraits<T>::type UnsignedType;
180 UnsignedType multiplier;
189class TensorIntDivisor<int32_t, true> {
191 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
196 EIGEN_DEVICE_FUNC TensorIntDivisor(int32_t divider) {
197 eigen_assert(divider >= 2);
201 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
int divide(
const int32_t n)
const {
202#ifdef EIGEN_GPU_COMPILE_PHASE
203 return (__umulhi(magic, n) >> shift);
204#elif defined(SYCL_DEVICE_ONLY)
205 return (cl::sycl::mul_hi(magic,
static_cast<uint32_t
>(n)) >> shift);
207 uint64_t v =
static_cast<uint64_t
>(magic) *
static_cast<uint64_t
>(n);
208 return (
static_cast<uint32_t
>(v >> 32) >> shift);
215 EIGEN_DEVICE_FUNC
void calcMagic(int32_t d) {
216 const unsigned two31 = 0x80000000;
218 unsigned t = two31 + (ad >> 31);
219 unsigned anc = t - 1 - t % ad;
221 unsigned q1 = two31 / anc;
222 unsigned r1 = two31 - q1 * anc;
223 unsigned q2 = two31 / ad;
224 unsigned r2 = two31 - q2 * ad;
241 }
while (q1 < delta || (q1 == delta && r1 == 0));
243 magic = (unsigned)(q2 + 1);
251template <
typename T,
bool div_gt_one>
252EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator/(
const T& numerator,
const TensorIntDivisor<T, div_gt_one>& divisor) {
253 return divisor.divide(numerator);
Namespace containing all symbols from the Eigen library.