Eigen  5.0.1-dev+7c7d8473
 
Loading...
Searching...
No Matches
PacketMath.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com)
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_PACKET_MATH_AVX_H
11#define EIGEN_PACKET_MATH_AVX_H
12
13// IWYU pragma: private
14#include "../../InternalHeaderCheck.h"
15
16namespace Eigen {
17
18namespace internal {
19
20#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
21#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
22#endif
23
24#if !defined(EIGEN_VECTORIZE_AVX512) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
25#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
26#endif
27
28#ifdef EIGEN_VECTORIZE_FMA
29#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
30#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
31#endif
32#endif
33
34typedef __m256 Packet8f;
35typedef eigen_packet_wrapper<__m256i, 0> Packet8i;
36typedef __m256d Packet4d;
37#ifndef EIGEN_VECTORIZE_AVX512FP16
38typedef eigen_packet_wrapper<__m128i, 2> Packet8h;
39#endif
40typedef eigen_packet_wrapper<__m128i, 3> Packet8bf;
41typedef eigen_packet_wrapper<__m256i, 4> Packet8ui;
42
43#ifdef EIGEN_VECTORIZE_AVX2
44// Start from 3 to be compatible with AVX512
45typedef eigen_packet_wrapper<__m256i, 3> Packet4l;
46typedef eigen_packet_wrapper<__m256i, 5> Packet4ul;
47#endif
48
49template <>
50struct is_arithmetic<__m256> {
51 enum { value = true };
52};
53template <>
54struct is_arithmetic<__m256i> {
55 enum { value = true };
56};
57template <>
58struct is_arithmetic<__m256d> {
59 enum { value = true };
60};
61template <>
62struct is_arithmetic<Packet8i> {
63 enum { value = true };
64};
65// Note that `Packet8ui` uses the underlying type `__m256i`, which is
66// interpreted as a vector of _signed_ `int32`s, which breaks some arithmetic
67// operations used in `GenericPacketMath.h`.
68template <>
69struct is_arithmetic<Packet8ui> {
70 enum { value = false };
71};
72#ifndef EIGEN_VECTORIZE_AVX512FP16
73template <>
74struct is_arithmetic<Packet8h> {
75 enum { value = true };
76};
77#endif
78template <>
79struct is_arithmetic<Packet8bf> {
80 enum { value = true };
81};
82#ifdef EIGEN_VECTORIZE_AVX2
83template <>
84struct is_arithmetic<Packet4l> {
85 enum { value = true };
86};
87// Note that `Packet4ul` uses the underlying type `__m256i`, which is
88// interpreted as a vector of _signed_ `int32`s, which breaks some arithmetic
89// operations used in `GenericPacketMath.h`.
90template <>
91struct is_arithmetic<Packet4ul> {
92 enum { value = false };
93};
94#endif
95
96// Use the packet_traits defined in AVX512/PacketMath.h instead if we're going
97// to leverage AVX512 instructions.
98#ifndef EIGEN_VECTORIZE_AVX512
99template <>
100struct packet_traits<float> : default_packet_traits {
101 typedef Packet8f type;
102 typedef Packet4f half;
103 enum {
104 Vectorizable = 1,
105 AlignedOnScalar = 1,
106 size = 8,
107
108 HasCmp = 1,
109 HasDiv = 1,
110 HasReciprocal = EIGEN_FAST_MATH,
111 HasSin = EIGEN_FAST_MATH,
112 HasCos = EIGEN_FAST_MATH,
113 HasACos = 1,
114 HasASin = 1,
115 HasATan = 1,
116 HasATanh = 1,
117 HasLog = 1,
118 HasExp = 1,
119 HasLog1p = 1,
120 HasExpm1 = 1,
121 HasPow = 1,
122 HasNdtri = 1,
123 HasBessel = 1,
124 HasSqrt = 1,
125 HasRsqrt = 1,
126 HasCbrt = 1,
127 HasTanh = EIGEN_FAST_MATH,
128 HasErf = EIGEN_FAST_MATH,
129 HasErfc = EIGEN_FAST_MATH,
130 };
131};
132template <>
133struct packet_traits<double> : default_packet_traits {
134 typedef Packet4d type;
135 typedef Packet2d half;
136 enum {
137 Vectorizable = 1,
138 AlignedOnScalar = 1,
139 size = 4,
140
141 HasCmp = 1,
142 HasDiv = 1,
143#ifdef EIGEN_VECTORIZE_AVX2
144 HasSin = EIGEN_FAST_MATH,
145 HasCos = EIGEN_FAST_MATH,
146#endif
147 HasTanh = EIGEN_FAST_MATH,
148 HasErf = 1,
149 HasErfc = 1,
150 HasLog = 1,
151 HasExp = 1,
152 HasLog1p = 1,
153 HasExpm1 = 1,
154 HasPow = 1,
155 HasSqrt = 1,
156 HasRsqrt = 1,
157 HasCbrt = 1,
158 HasATan = 1,
159 HasATanh = 1,
160 };
161};
162
163template <>
164struct packet_traits<Eigen::half> : default_packet_traits {
165 typedef Packet8h type;
166 // There is no half-size packet for Packet8h.
167 typedef Packet8h half;
168 enum {
169 Vectorizable = 1,
170 AlignedOnScalar = 1,
171 size = 8,
172
173 HasCmp = 1,
174 HasAdd = 1,
175 HasSub = 1,
176 HasMul = 1,
177 HasDiv = 1,
178 HasSin = EIGEN_FAST_MATH,
179 HasCos = EIGEN_FAST_MATH,
180 HasNegate = 1,
181 HasAbs = 1,
182 HasMin = 1,
183 HasMax = 1,
184 HasConj = 1,
185 HasSetLinear = 0,
186 HasLog = 1,
187 HasLog1p = 1,
188 HasExpm1 = 1,
189 HasExp = 1,
190 HasSqrt = 1,
191 HasRsqrt = 1,
192 HasTanh = EIGEN_FAST_MATH,
193 HasErf = EIGEN_FAST_MATH,
194 HasBessel = 1,
195 HasNdtri = 1
196 };
197};
198
199template <>
200struct packet_traits<bfloat16> : default_packet_traits {
201 typedef Packet8bf type;
202 // There is no half-size packet for current Packet8bf.
203 // TODO: support as SSE path.
204 typedef Packet8bf half;
205 enum {
206 Vectorizable = 1,
207 AlignedOnScalar = 1,
208 size = 8,
209
210 HasCmp = 1,
211 HasAdd = 1,
212 HasSub = 1,
213 HasMul = 1,
214 HasDiv = 1,
215 HasSin = EIGEN_FAST_MATH,
216 HasCos = EIGEN_FAST_MATH,
217 HasNegate = 1,
218 HasAbs = 1,
219 HasMin = 1,
220 HasMax = 1,
221 HasConj = 1,
222 HasSetLinear = 0,
223 HasLog = 1,
224 HasLog1p = 1,
225 HasExpm1 = 1,
226 HasExp = 1,
227 HasSqrt = 1,
228 HasRsqrt = 1,
229 HasTanh = EIGEN_FAST_MATH,
230 HasErf = EIGEN_FAST_MATH,
231 HasBessel = 1,
232 HasNdtri = 1
233 };
234};
235
236template <>
237struct packet_traits<int> : default_packet_traits {
238 typedef Packet8i type;
239 typedef Packet4i half;
240 enum { Vectorizable = 1, AlignedOnScalar = 1, HasCmp = 1, HasDiv = 1, size = 8 };
241};
242template <>
243struct packet_traits<uint32_t> : default_packet_traits {
244 typedef Packet8ui type;
245 typedef Packet4ui half;
246 enum {
247 Vectorizable = 1,
248 AlignedOnScalar = 1,
249 size = 8,
250
251 HasDiv = 0,
252 HasNegate = 0,
253 HasSqrt = 0,
254
255 HasCmp = 1,
256 HasMin = 1,
257 HasMax = 1,
258 HasShift = 1
259 };
260};
261
262#ifdef EIGEN_VECTORIZE_AVX2
263template <>
264struct packet_traits<int64_t> : default_packet_traits {
265 typedef Packet4l type;
266 typedef Packet2l half;
267 enum { Vectorizable = 1, AlignedOnScalar = 1, HasCmp = 1, size = 4 };
268};
269template <>
270struct packet_traits<uint64_t> : default_packet_traits {
271 typedef Packet4ul type;
272 // There is no half-size packet for current Packet4ul.
273 // TODO: support as SSE path.
274 typedef Packet4ul half;
275 enum {
276 Vectorizable = 1,
277 AlignedOnScalar = 1,
278 size = 4,
279
280 // HasMin = 0,
281 // HasMax = 0,
282 HasDiv = 0,
283 HasTranspose = 0,
284 HasNegate = 0,
285 HasSqrt = 0,
286 HasCmp = 1,
287 HasShift = 1
288 };
289};
290#endif
291
292#endif
293
294template <>
295struct scalar_div_cost<float, true> {
296 enum { value = 14 };
297};
298template <>
299struct scalar_div_cost<double, true> {
300 enum { value = 16 };
301};
302
303template <>
304struct unpacket_traits<Packet8f> {
305 typedef float type;
306 typedef Packet4f half;
307 typedef Packet8i integer_packet;
308 typedef uint8_t mask_t;
309 enum {
310 size = 8,
311 alignment = Aligned32,
312 vectorizable = true,
313 masked_load_available = true,
314 masked_store_available = true
315#ifdef EIGEN_VECTORIZE_AVX512
316 ,
317 masked_fpops_available = true
318#endif
319 };
320};
321template <>
322struct unpacket_traits<Packet4d> {
323 typedef double type;
324 typedef Packet2d half;
325#ifdef EIGEN_VECTORIZE_AVX2
326 typedef Packet4l integer_packet;
327#endif
328 enum {
329 size = 4,
330 alignment = Aligned32,
331 vectorizable = true,
332 masked_load_available = false,
333 masked_store_available = false
334 };
335};
336template <>
337struct unpacket_traits<Packet8i> {
338 typedef int type;
339 typedef Packet4i half;
340 enum {
341 size = 8,
342 alignment = Aligned32,
343 vectorizable = true,
344 masked_load_available = false,
345 masked_store_available = false
346 };
347};
348template <>
349struct unpacket_traits<Packet8ui> {
350 typedef uint32_t type;
351 typedef Packet4ui half;
352 enum {
353 size = 8,
354 alignment = Aligned32,
355 vectorizable = true,
356 masked_load_available = false,
357 masked_store_available = false
358 };
359};
360#ifdef EIGEN_VECTORIZE_AVX2
361template <>
362struct unpacket_traits<Packet4l> {
363 typedef int64_t type;
364 typedef Packet2l half;
365 enum {
366 size = 4,
367 alignment = Aligned32,
368 vectorizable = true,
369 masked_load_available = false,
370 masked_store_available = false
371 };
372};
373template <>
374struct unpacket_traits<Packet4ul> {
375 typedef uint64_t type;
376 typedef Packet4ul half;
377 enum {
378 size = 4,
379 alignment = Aligned32,
380 vectorizable = true,
381 masked_load_available = false,
382 masked_store_available = false
383 };
384};
385#endif
386template <>
387struct unpacket_traits<Packet8bf> {
388 typedef bfloat16 type;
389 typedef Packet8bf half;
390 enum {
391 size = 8,
392 alignment = Aligned16,
393 vectorizable = true,
394 masked_load_available = false,
395 masked_store_available = false
396 };
397};
398
399// Helper function for bit packing snippet of low precision comparison.
400// It packs the flags from 16x16 to 8x16.
401EIGEN_STRONG_INLINE __m128i Pack16To8(Packet8f rf) {
402 return _mm_packs_epi32(_mm256_extractf128_si256(_mm256_castps_si256(rf), 0),
403 _mm256_extractf128_si256(_mm256_castps_si256(rf), 1));
404}
405
406#ifdef EIGEN_VECTORIZE_AVX2
407template <>
408EIGEN_STRONG_INLINE Packet4l pset1<Packet4l>(const int64_t& from) {
409 return _mm256_set1_epi64x(from);
410}
411template <>
412EIGEN_STRONG_INLINE Packet4ul pset1<Packet4ul>(const uint64_t& from) {
413 return _mm256_set1_epi64x(numext::bit_cast<uint64_t>(from));
414}
415template <>
416EIGEN_STRONG_INLINE Packet4l pzero(const Packet4l& /*a*/) {
417 return _mm256_setzero_si256();
418}
419template <>
420EIGEN_STRONG_INLINE Packet4ul pzero(const Packet4ul& /*a*/) {
421 return _mm256_setzero_si256();
422}
423template <>
424EIGEN_STRONG_INLINE Packet4l peven_mask(const Packet4l& /*a*/) {
425 return _mm256_set_epi64x(0ll, -1ll, 0ll, -1ll);
426}
427template <>
428EIGEN_STRONG_INLINE Packet4ul peven_mask(const Packet4ul& /*a*/) {
429 return _mm256_set_epi64x(0ll, -1ll, 0ll, -1ll);
430}
431template <>
432EIGEN_STRONG_INLINE Packet4l pload1<Packet4l>(const int64_t* from) {
433 return _mm256_set1_epi64x(*from);
434}
435template <>
436EIGEN_STRONG_INLINE Packet4ul pload1<Packet4ul>(const uint64_t* from) {
437 return _mm256_set1_epi64x(*from);
438}
439template <>
440EIGEN_STRONG_INLINE Packet4l padd<Packet4l>(const Packet4l& a, const Packet4l& b) {
441 return _mm256_add_epi64(a, b);
442}
443template <>
444EIGEN_STRONG_INLINE Packet4ul padd<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
445 return _mm256_add_epi64(a, b);
446}
447template <>
448EIGEN_STRONG_INLINE Packet4l plset<Packet4l>(const int64_t& a) {
449 return padd(pset1<Packet4l>(a), Packet4l(_mm256_set_epi64x(3ll, 2ll, 1ll, 0ll)));
450}
451template <>
452EIGEN_STRONG_INLINE Packet4ul plset<Packet4ul>(const uint64_t& a) {
453 return padd(pset1<Packet4ul>(a), Packet4ul(_mm256_set_epi64x(3ll, 2ll, 1ll, 0ll)));
454}
455template <>
456EIGEN_STRONG_INLINE Packet4l psub<Packet4l>(const Packet4l& a, const Packet4l& b) {
457 return _mm256_sub_epi64(a, b);
458}
459template <>
460EIGEN_STRONG_INLINE Packet4ul psub<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
461 return _mm256_sub_epi64(a, b);
462}
463template <>
464EIGEN_STRONG_INLINE Packet4l pnegate(const Packet4l& a) {
465 return psub(pzero(a), a);
466}
467template <>
468EIGEN_STRONG_INLINE Packet4l pconj(const Packet4l& a) {
469 return a;
470}
471template <>
472EIGEN_STRONG_INLINE Packet4l pcmp_le(const Packet4l& a, const Packet4l& b) {
473 return _mm256_xor_si256(_mm256_cmpgt_epi64(a, b), _mm256_set1_epi32(-1));
474}
475template <>
476EIGEN_STRONG_INLINE Packet4ul pcmp_le(const Packet4ul& a, const Packet4ul& b) {
477 return (Packet4ul)pcmp_le((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
478 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL)));
479}
480template <>
481EIGEN_STRONG_INLINE Packet4l pcmp_lt(const Packet4l& a, const Packet4l& b) {
482 return _mm256_cmpgt_epi64(b, a);
483}
484template <>
485EIGEN_STRONG_INLINE Packet4ul pcmp_lt(const Packet4ul& a, const Packet4ul& b) {
486 return (Packet4ul)pcmp_lt((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
487 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL)));
488}
489template <>
490EIGEN_STRONG_INLINE Packet4l pcmp_eq(const Packet4l& a, const Packet4l& b) {
491 return _mm256_cmpeq_epi64(a, b);
492}
493template <>
494EIGEN_STRONG_INLINE Packet4ul pcmp_eq(const Packet4ul& a, const Packet4ul& b) {
495 return _mm256_cmpeq_epi64(a, b);
496}
497template <>
498EIGEN_STRONG_INLINE Packet4l ptrue<Packet4l>(const Packet4l& a) {
499 return _mm256_cmpeq_epi64(a, a);
500}
501template <>
502EIGEN_STRONG_INLINE Packet4ul ptrue<Packet4ul>(const Packet4ul& a) {
503 return _mm256_cmpeq_epi64(a, a);
504}
505template <>
506EIGEN_STRONG_INLINE Packet4l pand<Packet4l>(const Packet4l& a, const Packet4l& b) {
507 return _mm256_and_si256(a, b);
508}
509template <>
510EIGEN_STRONG_INLINE Packet4l por<Packet4l>(const Packet4l& a, const Packet4l& b) {
511 return _mm256_or_si256(a, b);
512}
513template <>
514EIGEN_STRONG_INLINE Packet4l pxor<Packet4l>(const Packet4l& a, const Packet4l& b) {
515 return _mm256_xor_si256(a, b);
516}
517template <>
518EIGEN_STRONG_INLINE Packet4ul pxor<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
519 return _mm256_xor_si256(a, b);
520}
521template <>
522EIGEN_STRONG_INLINE Packet4l pandnot<Packet4l>(const Packet4l& a, const Packet4l& b) {
523 return _mm256_andnot_si256(b, a);
524}
525template <int N>
526EIGEN_STRONG_INLINE Packet4l plogical_shift_right(Packet4l a) {
527 return _mm256_srli_epi64(a, N);
528}
529template <int N>
530EIGEN_STRONG_INLINE Packet4l plogical_shift_left(Packet4l a) {
531 return _mm256_slli_epi64(a, N);
532}
533#ifdef EIGEN_VECTORIZE_AVX512FP16
534template <int N>
535EIGEN_STRONG_INLINE Packet4l parithmetic_shift_right(Packet4l a) {
536 return _mm256_srai_epi64(a, N);
537}
538#else
539template <int N>
540EIGEN_STRONG_INLINE std::enable_if_t<(N == 0), Packet4l> parithmetic_shift_right(Packet4l a) {
541 return a;
542}
543template <int N>
544EIGEN_STRONG_INLINE std::enable_if_t<(N > 0) && (N < 32), Packet4l> parithmetic_shift_right(Packet4l a) {
545 __m256i hi_word = _mm256_srai_epi32(a, N);
546 __m256i lo_word = _mm256_srli_epi64(a, N);
547 return _mm256_blend_epi32(hi_word, lo_word, 0b01010101);
548}
549template <int N>
550EIGEN_STRONG_INLINE std::enable_if_t<(N >= 32) && (N < 63), Packet4l> parithmetic_shift_right(Packet4l a) {
551 __m256i hi_word = _mm256_srai_epi32(a, 31);
552 __m256i lo_word = _mm256_shuffle_epi32(_mm256_srai_epi32(a, N - 32), (shuffle_mask<1, 1, 3, 3>::mask));
553 return _mm256_blend_epi32(hi_word, lo_word, 0b01010101);
554}
555template <int N>
556EIGEN_STRONG_INLINE std::enable_if_t<(N == 63), Packet4l> parithmetic_shift_right(Packet4l a) {
557 return _mm256_cmpgt_epi64(_mm256_setzero_si256(), a);
558}
559template <int N>
560EIGEN_STRONG_INLINE std::enable_if_t<(N < 0) || (N > 63), Packet4l> parithmetic_shift_right(Packet4l a) {
561 return parithmetic_shift_right<int(N & 63)>(a);
562}
563#endif
564template <>
565EIGEN_STRONG_INLINE Packet4l pload<Packet4l>(const int64_t* from) {
566 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
567}
568template <>
569EIGEN_STRONG_INLINE Packet4ul pload<Packet4ul>(const uint64_t* from) {
570 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
571}
572template <>
573EIGEN_STRONG_INLINE Packet4l ploadu<Packet4l>(const int64_t* from) {
574 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
575}
576template <>
577EIGEN_STRONG_INLINE Packet4ul ploadu<Packet4ul>(const uint64_t* from) {
578 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
579}
580// Loads 2 int64_ts from memory a returns the packet {a0, a0, a1, a1}
581template <>
582EIGEN_STRONG_INLINE Packet4l ploaddup<Packet4l>(const int64_t* from) {
583 const Packet4l a = _mm256_castsi128_si256(_mm_loadu_si128(reinterpret_cast<const __m128i*>(from)));
584 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 1, 0, 1, 2, 3, 2, 3));
585}
586// Loads 2 uint64_ts from memory a returns the packet {a0, a0, a1, a1}
587template <>
588EIGEN_STRONG_INLINE Packet4ul ploaddup<Packet4ul>(const uint64_t* from) {
589 const Packet4ul a = _mm256_castsi128_si256(_mm_loadu_si128(reinterpret_cast<const __m128i*>(from)));
590 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 1, 0, 1, 2, 3, 2, 3));
591}
592template <>
593EIGEN_STRONG_INLINE void pstore<int64_t>(int64_t* to, const Packet4l& from) {
594 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
595}
596template <>
597EIGEN_STRONG_INLINE void pstore<uint64_t>(uint64_t* to, const Packet4ul& from) {
598 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
599}
600template <>
601EIGEN_STRONG_INLINE void pstoreu<int64_t>(int64_t* to, const Packet4l& from) {
602 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
603}
604template <>
605EIGEN_STRONG_INLINE void pstoreu<uint64_t>(uint64_t* to, const Packet4ul& from) {
606 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
607}
608template <>
609EIGEN_DEVICE_FUNC inline Packet4l pgather<int64_t, Packet4l>(const int64_t* from, Index stride) {
610 return _mm256_set_epi64x(from[3 * stride], from[2 * stride], from[1 * stride], from[0 * stride]);
611}
612template <>
613EIGEN_DEVICE_FUNC inline Packet4ul pgather<uint64_t, Packet4ul>(const uint64_t* from, Index stride) {
614 return _mm256_set_epi64x(from[3 * stride], from[2 * stride], from[1 * stride], from[0 * stride]);
615}
616template <>
617EIGEN_DEVICE_FUNC inline void pscatter<int64_t, Packet4l>(int64_t* to, const Packet4l& from, Index stride) {
618 __m128i low = _mm256_extractf128_si256(from, 0);
619 to[stride * 0] = _mm_extract_epi64_0(low);
620 to[stride * 1] = _mm_extract_epi64_1(low);
621
622 __m128i high = _mm256_extractf128_si256(from, 1);
623 to[stride * 2] = _mm_extract_epi64_0(high);
624 to[stride * 3] = _mm_extract_epi64_1(high);
625}
626template <>
627EIGEN_DEVICE_FUNC inline void pscatter<uint64_t, Packet4ul>(uint64_t* to, const Packet4ul& from, Index stride) {
628 __m128i low = _mm256_extractf128_si256(from, 0);
629 to[stride * 0] = _mm_extract_epi64_0(low);
630 to[stride * 1] = _mm_extract_epi64_1(low);
631
632 __m128i high = _mm256_extractf128_si256(from, 1);
633 to[stride * 2] = _mm_extract_epi64_0(high);
634 to[stride * 3] = _mm_extract_epi64_1(high);
635}
636template <>
637EIGEN_STRONG_INLINE void pstore1<Packet4l>(int64_t* to, const int64_t& a) {
638 Packet4l pa = pset1<Packet4l>(a);
639 pstore(to, pa);
640}
641template <>
642EIGEN_STRONG_INLINE void pstore1<Packet4ul>(uint64_t* to, const uint64_t& a) {
643 Packet4ul pa = pset1<Packet4ul>(a);
644 pstore(to, pa);
645}
646template <>
647EIGEN_STRONG_INLINE int64_t pfirst<Packet4l>(const Packet4l& a) {
648 return _mm_extract_epi64_0(_mm256_castsi256_si128(a));
649}
650template <>
651EIGEN_STRONG_INLINE uint64_t pfirst<Packet4ul>(const Packet4ul& a) {
652 return _mm_extract_epi64_0(_mm256_castsi256_si128(a));
653}
654
655#define MM256_SHUFFLE_EPI64(A, B, M) _mm256_shuffle_pd(_mm256_castsi256_pd(A), _mm256_castsi256_pd(B), M)
656EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4l, 4>& kernel) {
657 __m256d T0 = MM256_SHUFFLE_EPI64(kernel.packet[0], kernel.packet[1], 15);
658 __m256d T1 = MM256_SHUFFLE_EPI64(kernel.packet[0], kernel.packet[1], 0);
659 __m256d T2 = MM256_SHUFFLE_EPI64(kernel.packet[2], kernel.packet[3], 15);
660 __m256d T3 = MM256_SHUFFLE_EPI64(kernel.packet[2], kernel.packet[3], 0);
661
662 kernel.packet[1] = _mm256_castpd_si256(_mm256_permute2f128_pd(T0, T2, 32));
663 kernel.packet[3] = _mm256_castpd_si256(_mm256_permute2f128_pd(T0, T2, 49));
664 kernel.packet[0] = _mm256_castpd_si256(_mm256_permute2f128_pd(T1, T3, 32));
665 kernel.packet[2] = _mm256_castpd_si256(_mm256_permute2f128_pd(T1, T3, 49));
666}
667EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4ul, 4>& kernel) {
668 ptranspose((PacketBlock<Packet4l, 4>&)kernel);
669}
670template <>
671EIGEN_STRONG_INLINE Packet4l pmin<Packet4l>(const Packet4l& a, const Packet4l& b) {
672 __m256i cmp = _mm256_cmpgt_epi64(a, b);
673 __m256i a_min = _mm256_andnot_si256(cmp, a);
674 __m256i b_min = _mm256_and_si256(cmp, b);
675 return Packet4l(_mm256_or_si256(a_min, b_min));
676}
677template <>
678EIGEN_STRONG_INLINE Packet4ul pmin<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
679 return padd((Packet4ul)pmin((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
680 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL))),
681 pset1<Packet4ul>(0x8000000000000000UL));
682}
683template <>
684EIGEN_STRONG_INLINE Packet4l pmax<Packet4l>(const Packet4l& a, const Packet4l& b) {
685 __m256i cmp = _mm256_cmpgt_epi64(a, b);
686 __m256i a_min = _mm256_and_si256(cmp, a);
687 __m256i b_min = _mm256_andnot_si256(cmp, b);
688 return Packet4l(_mm256_or_si256(a_min, b_min));
689}
690template <>
691EIGEN_STRONG_INLINE Packet4ul pmax<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
692 return padd((Packet4ul)pmax((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
693 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL))),
694 pset1<Packet4ul>(0x8000000000000000UL));
695}
696template <>
697EIGEN_STRONG_INLINE Packet4l pabs<Packet4l>(const Packet4l& a) {
698 Packet4l pz = pzero<Packet4l>(a);
699 Packet4l cmp = _mm256_cmpgt_epi64(a, pz);
700 return psub(cmp, pxor(a, cmp));
701}
702template <>
703EIGEN_STRONG_INLINE Packet4ul pabs<Packet4ul>(const Packet4ul& a) {
704 return a;
705}
706template <>
707EIGEN_STRONG_INLINE Packet4l pmul<Packet4l>(const Packet4l& a, const Packet4l& b) {
708 // 64-bit mul requires avx512, so do this with 32-bit multiplication
709 __m256i upper32_a = _mm256_srli_epi64(a, 32);
710 __m256i upper32_b = _mm256_srli_epi64(b, 32);
711
712 // upper * lower
713 __m256i mul1 = _mm256_mul_epu32(upper32_a, b);
714 __m256i mul2 = _mm256_mul_epu32(upper32_b, a);
715 // Gives us both upper*upper and lower*lower
716 __m256i mul3 = _mm256_mul_epu32(a, b);
717
718 __m256i high = _mm256_slli_epi64(_mm256_add_epi64(mul1, mul2), 32);
719 return _mm256_add_epi64(high, mul3);
720}
721template <>
722EIGEN_STRONG_INLINE Packet4ul pmul<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
723 return (Packet4ul)pmul<Packet4l>((Packet4l)a, (Packet4l)b);
724}
725#endif
726
727template <>
728EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float& from) {
729 return _mm256_set1_ps(from);
730}
731template <>
732EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) {
733 return _mm256_set1_pd(from);
734}
735template <>
736EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(const int& from) {
737 return _mm256_set1_epi32(from);
738}
739template <>
740EIGEN_STRONG_INLINE Packet8ui pset1<Packet8ui>(const uint32_t& from) {
741 return _mm256_set1_epi32(from);
742}
743
744template <>
745EIGEN_STRONG_INLINE Packet8f pset1frombits<Packet8f>(unsigned int from) {
746 return _mm256_castsi256_ps(pset1<Packet8i>(from));
747}
748template <>
749EIGEN_STRONG_INLINE Packet4d pset1frombits<Packet4d>(uint64_t from) {
750 return _mm256_castsi256_pd(_mm256_set1_epi64x(from));
751}
752
753template <>
754EIGEN_STRONG_INLINE Packet8f pzero(const Packet8f& /*a*/) {
755 return _mm256_setzero_ps();
756}
757template <>
758EIGEN_STRONG_INLINE Packet4d pzero(const Packet4d& /*a*/) {
759 return _mm256_setzero_pd();
760}
761template <>
762EIGEN_STRONG_INLINE Packet8i pzero(const Packet8i& /*a*/) {
763 return _mm256_setzero_si256();
764}
765template <>
766EIGEN_STRONG_INLINE Packet8ui pzero(const Packet8ui& /*a*/) {
767 return _mm256_setzero_si256();
768}
769
770template <>
771EIGEN_STRONG_INLINE Packet8f peven_mask(const Packet8f& /*a*/) {
772 return _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1));
773}
774template <>
775EIGEN_STRONG_INLINE Packet8i peven_mask(const Packet8i& /*a*/) {
776 return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1);
777}
778template <>
779EIGEN_STRONG_INLINE Packet8ui peven_mask(const Packet8ui& /*a*/) {
780 return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1);
781}
782template <>
783EIGEN_STRONG_INLINE Packet4d peven_mask(const Packet4d& /*a*/) {
784 return _mm256_castsi256_pd(_mm256_set_epi32(0, 0, -1, -1, 0, 0, -1, -1));
785}
786
787template <>
788EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(const float* from) {
789 return _mm256_broadcast_ss(from);
790}
791template <>
792EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(const double* from) {
793 return _mm256_broadcast_sd(from);
794}
795
796template <>
797EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b) {
798 return _mm256_add_ps(a, b);
799}
800#ifdef EIGEN_VECTORIZE_AVX512
801template <>
802EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b, uint8_t umask) {
803 __mmask16 mask = static_cast<__mmask16>(umask & 0x00FF);
804 return _mm512_castps512_ps256(_mm512_maskz_add_ps(mask, _mm512_castps256_ps512(a), _mm512_castps256_ps512(b)));
805}
806#endif
807template <>
808EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(const Packet4d& a, const Packet4d& b) {
809 return _mm256_add_pd(a, b);
810}
811template <>
812EIGEN_STRONG_INLINE Packet8i padd<Packet8i>(const Packet8i& a, const Packet8i& b) {
813#ifdef EIGEN_VECTORIZE_AVX2
814 return _mm256_add_epi32(a, b);
815#else
816 __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
817 __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
818 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
819#endif
820}
821template <>
822EIGEN_STRONG_INLINE Packet8ui padd<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
823#ifdef EIGEN_VECTORIZE_AVX2
824 return _mm256_add_epi32(a, b);
825#else
826 __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
827 __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
828 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
829#endif
830}
831
832template <>
833EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(const float& a) {
834 return padd(pset1<Packet8f>(a), _mm256_set_ps(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0));
835}
836template <>
837EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(const double& a) {
838 return padd(pset1<Packet4d>(a), _mm256_set_pd(3.0, 2.0, 1.0, 0.0));
839}
840template <>
841EIGEN_STRONG_INLINE Packet8i plset<Packet8i>(const int& a) {
842 return padd(pset1<Packet8i>(a), (Packet8i)_mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0));
843}
844template <>
845EIGEN_STRONG_INLINE Packet8ui plset<Packet8ui>(const uint32_t& a) {
846 return padd(pset1<Packet8ui>(a), (Packet8ui)_mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0));
847}
848
849template <>
850EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(const Packet8f& a, const Packet8f& b) {
851 return _mm256_sub_ps(a, b);
852}
853template <>
854EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(const Packet4d& a, const Packet4d& b) {
855 return _mm256_sub_pd(a, b);
856}
857template <>
858EIGEN_STRONG_INLINE Packet8i psub<Packet8i>(const Packet8i& a, const Packet8i& b) {
859#ifdef EIGEN_VECTORIZE_AVX2
860 return _mm256_sub_epi32(a, b);
861#else
862 __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
863 __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
864 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
865#endif
866}
867template <>
868EIGEN_STRONG_INLINE Packet8ui psub<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
869#ifdef EIGEN_VECTORIZE_AVX2
870 return _mm256_sub_epi32(a, b);
871#else
872 __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
873 __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
874 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
875#endif
876}
877
878template <>
879EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a) {
880 const Packet8f mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x80000000));
881 return _mm256_xor_ps(a, mask);
882}
883template <>
884EIGEN_STRONG_INLINE Packet4d pnegate(const Packet4d& a) {
885 const Packet4d mask = _mm256_castsi256_pd(_mm256_set1_epi64x(0x8000000000000000ULL));
886 return _mm256_xor_pd(a, mask);
887}
888template <>
889EIGEN_STRONG_INLINE Packet8i pnegate(const Packet8i& a) {
890 return psub(pzero(a), a);
891}
892
893template <>
894EIGEN_STRONG_INLINE Packet8f pconj(const Packet8f& a) {
895 return a;
896}
897template <>
898EIGEN_STRONG_INLINE Packet4d pconj(const Packet4d& a) {
899 return a;
900}
901template <>
902EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) {
903 return a;
904}
905
906template <>
907EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(const Packet8f& a, const Packet8f& b) {
908 return _mm256_mul_ps(a, b);
909}
910template <>
911EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(const Packet4d& a, const Packet4d& b) {
912 return _mm256_mul_pd(a, b);
913}
914template <>
915EIGEN_STRONG_INLINE Packet8i pmul<Packet8i>(const Packet8i& a, const Packet8i& b) {
916#ifdef EIGEN_VECTORIZE_AVX2
917 return _mm256_mullo_epi32(a, b);
918#else
919 const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
920 const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
921 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
922#endif
923}
924template <>
925EIGEN_STRONG_INLINE Packet8ui pmul<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
926#ifdef EIGEN_VECTORIZE_AVX2
927 return _mm256_mullo_epi32(a, b);
928#else
929 const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
930 const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
931 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
932#endif
933}
934
935template <>
936EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(const Packet8f& a, const Packet8f& b) {
937 return _mm256_div_ps(a, b);
938}
939template <>
940EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(const Packet4d& a, const Packet4d& b) {
941 return _mm256_div_pd(a, b);
942}
943
944template <>
945EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& a, const Packet8i& b) {
946#ifdef EIGEN_VECTORIZE_AVX512
947 return _mm512_cvttpd_epi32(_mm512_div_pd(_mm512_cvtepi32_pd(a), _mm512_cvtepi32_pd(b)));
948#else
949 Packet4i lo = pdiv<Packet4i>(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
950 Packet4i hi = pdiv<Packet4i>(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
951 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
952#endif
953}
954
955#ifdef EIGEN_VECTORIZE_FMA
956template <>
957EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
958 return _mm256_fmadd_ps(a, b, c);
959}
960template <>
961EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
962 return _mm256_fmadd_pd(a, b, c);
963}
964
965template <>
966EIGEN_STRONG_INLINE Packet8f pmsub(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
967 return _mm256_fmsub_ps(a, b, c);
968}
969
970template <>
971EIGEN_STRONG_INLINE Packet4d pmsub(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
972 return _mm256_fmsub_pd(a, b, c);
973}
974
975template <>
976EIGEN_STRONG_INLINE Packet8f pnmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
977 return _mm256_fnmadd_ps(a, b, c);
978}
979
980template <>
981EIGEN_STRONG_INLINE Packet4d pnmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
982 return _mm256_fnmadd_pd(a, b, c);
983}
984
985template <>
986EIGEN_STRONG_INLINE Packet8f pnmsub(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
987 return _mm256_fnmsub_ps(a, b, c);
988}
989
990template <>
991EIGEN_STRONG_INLINE Packet4d pnmsub(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
992 return _mm256_fnmsub_pd(a, b, c);
993}
994
995#endif
996
997template <>
998EIGEN_STRONG_INLINE Packet8f pcmp_le(const Packet8f& a, const Packet8f& b) {
999 return _mm256_cmp_ps(a, b, _CMP_LE_OQ);
1000}
1001template <>
1002EIGEN_STRONG_INLINE Packet8f pcmp_lt(const Packet8f& a, const Packet8f& b) {
1003 return _mm256_cmp_ps(a, b, _CMP_LT_OQ);
1004}
1005template <>
1006EIGEN_STRONG_INLINE Packet8f pcmp_lt_or_nan(const Packet8f& a, const Packet8f& b) {
1007 return _mm256_cmp_ps(a, b, _CMP_NGE_UQ);
1008}
1009template <>
1010EIGEN_STRONG_INLINE Packet8f pcmp_eq(const Packet8f& a, const Packet8f& b) {
1011 return _mm256_cmp_ps(a, b, _CMP_EQ_OQ);
1012}
1013template <>
1014EIGEN_STRONG_INLINE Packet8f pisnan(const Packet8f& a) {
1015 return _mm256_cmp_ps(a, a, _CMP_UNORD_Q);
1016}
1017
1018template <>
1019EIGEN_STRONG_INLINE Packet4d pcmp_le(const Packet4d& a, const Packet4d& b) {
1020 return _mm256_cmp_pd(a, b, _CMP_LE_OQ);
1021}
1022template <>
1023EIGEN_STRONG_INLINE Packet4d pcmp_lt(const Packet4d& a, const Packet4d& b) {
1024 return _mm256_cmp_pd(a, b, _CMP_LT_OQ);
1025}
1026template <>
1027EIGEN_STRONG_INLINE Packet4d pcmp_lt_or_nan(const Packet4d& a, const Packet4d& b) {
1028 return _mm256_cmp_pd(a, b, _CMP_NGE_UQ);
1029}
1030template <>
1031EIGEN_STRONG_INLINE Packet4d pcmp_eq(const Packet4d& a, const Packet4d& b) {
1032 return _mm256_cmp_pd(a, b, _CMP_EQ_OQ);
1033}
1034
1035template <>
1036EIGEN_STRONG_INLINE Packet8i pcmp_le(const Packet8i& a, const Packet8i& b) {
1037#ifdef EIGEN_VECTORIZE_AVX2
1038 return _mm256_xor_si256(_mm256_cmpgt_epi32(a, b), _mm256_set1_epi32(-1));
1039#else
1040 __m128i lo = _mm_cmpgt_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1041 lo = _mm_xor_si128(lo, _mm_set1_epi32(-1));
1042 __m128i hi = _mm_cmpgt_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1043 hi = _mm_xor_si128(hi, _mm_set1_epi32(-1));
1044 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1045#endif
1046}
1047template <>
1048EIGEN_STRONG_INLINE Packet8i pcmp_lt(const Packet8i& a, const Packet8i& b) {
1049#ifdef EIGEN_VECTORIZE_AVX2
1050 return _mm256_cmpgt_epi32(b, a);
1051#else
1052 __m128i lo = _mm_cmpgt_epi32(_mm256_extractf128_si256(b, 0), _mm256_extractf128_si256(a, 0));
1053 __m128i hi = _mm_cmpgt_epi32(_mm256_extractf128_si256(b, 1), _mm256_extractf128_si256(a, 1));
1054 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1055#endif
1056}
1057template <>
1058EIGEN_STRONG_INLINE Packet8i pcmp_eq(const Packet8i& a, const Packet8i& b) {
1059#ifdef EIGEN_VECTORIZE_AVX2
1060 return _mm256_cmpeq_epi32(a, b);
1061#else
1062 __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1063 __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1064 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1065#endif
1066}
1067template <>
1068EIGEN_STRONG_INLINE Packet8ui pcmp_eq(const Packet8ui& a, const Packet8ui& b) {
1069#ifdef EIGEN_VECTORIZE_AVX2
1070 return _mm256_cmpeq_epi32(a, b);
1071#else
1072 __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1073 __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1074 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1075#endif
1076}
1077
1078template <>
1079EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) {
1080#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1081 // There appears to be a bug in GCC, by which the optimizer may flip
1082 // the argument order in calls to _mm_min_ps/_mm_max_ps, so we have to
1083 // resort to inline ASM here. This is supposed to be fixed in gcc6.3,
1084 // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
1085 Packet8f res;
1086 asm("vminps %[a], %[b], %[res]" : [res] "=x"(res) : [a] "x"(a), [b] "x"(b));
1087 return res;
1088#else
1089 // Arguments are swapped to match NaN propagation behavior of std::min.
1090 return _mm256_min_ps(b, a);
1091#endif
1092}
1093template <>
1094EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(const Packet4d& a, const Packet4d& b) {
1095#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1096 // See pmin above
1097 Packet4d res;
1098 asm("vminpd %[a], %[b], %[res]" : [res] "=x"(res) : [a] "x"(a), [b] "x"(b));
1099 return res;
1100#else
1101 // Arguments are swapped to match NaN propagation behavior of std::min.
1102 return _mm256_min_pd(b, a);
1103#endif
1104}
1105template <>
1106EIGEN_STRONG_INLINE Packet8i pmin<Packet8i>(const Packet8i& a, const Packet8i& b) {
1107#ifdef EIGEN_VECTORIZE_AVX2
1108 return _mm256_min_epi32(a, b);
1109#else
1110 __m128i lo = _mm_min_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1111 __m128i hi = _mm_min_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1112 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1113#endif
1114}
1115template <>
1116EIGEN_STRONG_INLINE Packet8ui pmin<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1117#ifdef EIGEN_VECTORIZE_AVX2
1118 return _mm256_min_epu32(a, b);
1119#else
1120 __m128i lo = _mm_min_epu32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1121 __m128i hi = _mm_min_epu32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1122 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1123#endif
1124}
1125
1126template <>
1127EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(const Packet8f& a, const Packet8f& b) {
1128#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1129 // See pmin above
1130 Packet8f res;
1131 asm("vmaxps %[a], %[b], %[res]" : [res] "=x"(res) : [a] "x"(a), [b] "x"(b));
1132 return res;
1133#else
1134 // Arguments are swapped to match NaN propagation behavior of std::max.
1135 return _mm256_max_ps(b, a);
1136#endif
1137}
1138template <>
1139EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(const Packet4d& a, const Packet4d& b) {
1140#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1141 // See pmin above
1142 Packet4d res;
1143 asm("vmaxpd %[a], %[b], %[res]" : [res] "=x"(res) : [a] "x"(a), [b] "x"(b));
1144 return res;
1145#else
1146 // Arguments are swapped to match NaN propagation behavior of std::max.
1147 return _mm256_max_pd(b, a);
1148#endif
1149}
1150template <>
1151EIGEN_STRONG_INLINE Packet8i pmax<Packet8i>(const Packet8i& a, const Packet8i& b) {
1152#ifdef EIGEN_VECTORIZE_AVX2
1153 return _mm256_max_epi32(a, b);
1154#else
1155 __m128i lo = _mm_max_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1156 __m128i hi = _mm_max_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1157 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1158#endif
1159}
1160template <>
1161EIGEN_STRONG_INLINE Packet8ui pmax<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1162#ifdef EIGEN_VECTORIZE_AVX2
1163 return _mm256_max_epu32(a, b);
1164#else
1165 __m128i lo = _mm_max_epu32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1166 __m128i hi = _mm_max_epu32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1167 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1168#endif
1169}
1170
1171#ifdef EIGEN_VECTORIZE_AVX2
1172template <>
1173EIGEN_STRONG_INLINE Packet8i psign(const Packet8i& a) {
1174 return _mm256_sign_epi32(_mm256_set1_epi32(1), a);
1175}
1176#endif
1177
1178// Add specializations for min/max with prescribed NaN propagation.
1179template <>
1180EIGEN_STRONG_INLINE Packet8f pmin<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
1181 return pminmax_propagate_numbers(a, b, pmin<Packet8f>);
1182}
1183template <>
1184EIGEN_STRONG_INLINE Packet4d pmin<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
1185 return pminmax_propagate_numbers(a, b, pmin<Packet4d>);
1186}
1187template <>
1188EIGEN_STRONG_INLINE Packet8f pmax<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
1189 return pminmax_propagate_numbers(a, b, pmax<Packet8f>);
1190}
1191template <>
1192EIGEN_STRONG_INLINE Packet4d pmax<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
1193 return pminmax_propagate_numbers(a, b, pmax<Packet4d>);
1194}
1195template <>
1196EIGEN_STRONG_INLINE Packet8f pmin<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
1197 return pminmax_propagate_nan(a, b, pmin<Packet8f>);
1198}
1199template <>
1200EIGEN_STRONG_INLINE Packet4d pmin<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
1201 return pminmax_propagate_nan(a, b, pmin<Packet4d>);
1202}
1203template <>
1204EIGEN_STRONG_INLINE Packet8f pmax<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
1205 return pminmax_propagate_nan(a, b, pmax<Packet8f>);
1206}
1207template <>
1208EIGEN_STRONG_INLINE Packet4d pmax<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
1209 return pminmax_propagate_nan(a, b, pmax<Packet4d>);
1210}
1211
1212template <>
1213EIGEN_STRONG_INLINE Packet8f print<Packet8f>(const Packet8f& a) {
1214 return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION);
1215}
1216template <>
1217EIGEN_STRONG_INLINE Packet4d print<Packet4d>(const Packet4d& a) {
1218 return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION);
1219}
1220
1221template <>
1222EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(const Packet8f& a) {
1223 return _mm256_ceil_ps(a);
1224}
1225template <>
1226EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) {
1227 return _mm256_ceil_pd(a);
1228}
1229
1230template <>
1231EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(const Packet8f& a) {
1232 return _mm256_floor_ps(a);
1233}
1234template <>
1235EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(const Packet4d& a) {
1236 return _mm256_floor_pd(a);
1237}
1238
1239template <>
1240EIGEN_STRONG_INLINE Packet8f ptrunc<Packet8f>(const Packet8f& a) {
1241 return _mm256_round_ps(a, _MM_FROUND_TRUNC);
1242}
1243template <>
1244EIGEN_STRONG_INLINE Packet4d ptrunc<Packet4d>(const Packet4d& a) {
1245 return _mm256_round_pd(a, _MM_FROUND_TRUNC);
1246}
1247
1248template <>
1249EIGEN_STRONG_INLINE Packet8i ptrue<Packet8i>(const Packet8i& a) {
1250#ifdef EIGEN_VECTORIZE_AVX2
1251 // vpcmpeqd has lower latency than the more general vcmpps
1252 return _mm256_cmpeq_epi32(a, a);
1253#else
1254 const __m256 b = _mm256_castsi256_ps(a);
1255 return _mm256_castps_si256(_mm256_cmp_ps(b, b, _CMP_TRUE_UQ));
1256#endif
1257}
1258
1259template <>
1260EIGEN_STRONG_INLINE Packet8f ptrue<Packet8f>(const Packet8f& a) {
1261#ifdef EIGEN_VECTORIZE_AVX2
1262 // vpcmpeqd has lower latency than the more general vcmpps
1263 const __m256i b = _mm256_castps_si256(a);
1264 return _mm256_castsi256_ps(_mm256_cmpeq_epi32(b, b));
1265#else
1266 return _mm256_cmp_ps(a, a, _CMP_TRUE_UQ);
1267#endif
1268}
1269
1270template <>
1271EIGEN_STRONG_INLINE Packet4d ptrue<Packet4d>(const Packet4d& a) {
1272#ifdef EIGEN_VECTORIZE_AVX2
1273 // vpcmpeqq has lower latency than the more general vcmppd
1274 const __m256i b = _mm256_castpd_si256(a);
1275 return _mm256_castsi256_pd(_mm256_cmpeq_epi64(b, b));
1276#else
1277 return _mm256_cmp_pd(a, a, _CMP_TRUE_UQ);
1278#endif
1279}
1280
1281template <>
1282EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(const Packet8f& a, const Packet8f& b) {
1283 return _mm256_and_ps(a, b);
1284}
1285template <>
1286EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(const Packet4d& a, const Packet4d& b) {
1287 return _mm256_and_pd(a, b);
1288}
1289template <>
1290EIGEN_STRONG_INLINE Packet8i pand<Packet8i>(const Packet8i& a, const Packet8i& b) {
1291#ifdef EIGEN_VECTORIZE_AVX2
1292 return _mm256_and_si256(a, b);
1293#else
1294 return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1295#endif
1296}
1297template <>
1298EIGEN_STRONG_INLINE Packet8ui pand<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1299#ifdef EIGEN_VECTORIZE_AVX2
1300 return _mm256_and_si256(a, b);
1301#else
1302 return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1303#endif
1304}
1305
1306template <>
1307EIGEN_STRONG_INLINE Packet8f por<Packet8f>(const Packet8f& a, const Packet8f& b) {
1308 return _mm256_or_ps(a, b);
1309}
1310template <>
1311EIGEN_STRONG_INLINE Packet4d por<Packet4d>(const Packet4d& a, const Packet4d& b) {
1312 return _mm256_or_pd(a, b);
1313}
1314template <>
1315EIGEN_STRONG_INLINE Packet8i por<Packet8i>(const Packet8i& a, const Packet8i& b) {
1316#ifdef EIGEN_VECTORIZE_AVX2
1317 return _mm256_or_si256(a, b);
1318#else
1319 return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1320#endif
1321}
1322template <>
1323EIGEN_STRONG_INLINE Packet8ui por<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1324#ifdef EIGEN_VECTORIZE_AVX2
1325 return _mm256_or_si256(a, b);
1326#else
1327 return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1328#endif
1329}
1330
1331template <>
1332EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(const Packet8f& a, const Packet8f& b) {
1333 return _mm256_xor_ps(a, b);
1334}
1335template <>
1336EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(const Packet4d& a, const Packet4d& b) {
1337 return _mm256_xor_pd(a, b);
1338}
1339template <>
1340EIGEN_STRONG_INLINE Packet8i pxor<Packet8i>(const Packet8i& a, const Packet8i& b) {
1341#ifdef EIGEN_VECTORIZE_AVX2
1342 return _mm256_xor_si256(a, b);
1343#else
1344 return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1345#endif
1346}
1347template <>
1348EIGEN_STRONG_INLINE Packet8ui pxor<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1349#ifdef EIGEN_VECTORIZE_AVX2
1350 return _mm256_xor_si256(a, b);
1351#else
1352 return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1353#endif
1354}
1355
1356template <>
1357EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) {
1358 return _mm256_andnot_ps(b, a);
1359}
1360template <>
1361EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) {
1362 return _mm256_andnot_pd(b, a);
1363}
1364template <>
1365EIGEN_STRONG_INLINE Packet8i pandnot<Packet8i>(const Packet8i& a, const Packet8i& b) {
1366#ifdef EIGEN_VECTORIZE_AVX2
1367 return _mm256_andnot_si256(b, a);
1368#else
1369 return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a)));
1370#endif
1371}
1372template <>
1373EIGEN_STRONG_INLINE Packet8ui pandnot<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1374#ifdef EIGEN_VECTORIZE_AVX2
1375 return _mm256_andnot_si256(b, a);
1376#else
1377 return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a)));
1378#endif
1379}
1380
1381template <>
1382EIGEN_STRONG_INLINE Packet8ui pcmp_lt(const Packet8ui& a, const Packet8ui& b) {
1383 return pxor(pcmp_eq(a, pmax(a, b)), ptrue(a));
1384}
1385template <>
1386EIGEN_STRONG_INLINE Packet8ui pcmp_le(const Packet8ui& a, const Packet8ui& b) {
1387 return pcmp_eq(a, pmin(a, b));
1388}
1389
1390template <>
1391EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a) {
1392 const Packet8f mask = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x80000000u));
1393 const Packet8f prev0dot5 = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x3EFFFFFFu));
1394 return _mm256_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
1395}
1396template <>
1397EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a) {
1398 const Packet4d mask = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x8000000000000000ull));
1399 const Packet4d prev0dot5 = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x3FDFFFFFFFFFFFFFull));
1400 return _mm256_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
1401}
1402
1403template <>
1404EIGEN_STRONG_INLINE Packet8f pselect<Packet8f>(const Packet8f& mask, const Packet8f& a, const Packet8f& b) {
1405 return _mm256_blendv_ps(b, a, mask);
1406}
1407template <>
1408EIGEN_STRONG_INLINE Packet8i pselect<Packet8i>(const Packet8i& mask, const Packet8i& a, const Packet8i& b) {
1409 return _mm256_castps_si256(
1410 _mm256_blendv_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a), _mm256_castsi256_ps(mask)));
1411}
1412template <>
1413EIGEN_STRONG_INLINE Packet8ui pselect<Packet8ui>(const Packet8ui& mask, const Packet8ui& a, const Packet8ui& b) {
1414 return _mm256_castps_si256(
1415 _mm256_blendv_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a), _mm256_castsi256_ps(mask)));
1416}
1417
1418template <>
1419EIGEN_STRONG_INLINE Packet4d pselect<Packet4d>(const Packet4d& mask, const Packet4d& a, const Packet4d& b) {
1420 return _mm256_blendv_pd(b, a, mask);
1421}
1422
1423template <int N>
1424EIGEN_STRONG_INLINE Packet8i parithmetic_shift_right(Packet8i a) {
1425#ifdef EIGEN_VECTORIZE_AVX2
1426 return _mm256_srai_epi32(a, N);
1427#else
1428 __m128i lo = _mm_srai_epi32(_mm256_extractf128_si256(a, 0), N);
1429 __m128i hi = _mm_srai_epi32(_mm256_extractf128_si256(a, 1), N);
1430 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1431#endif
1432}
1433
1434template <int N>
1435EIGEN_STRONG_INLINE Packet8i plogical_shift_right(Packet8i a) {
1436#ifdef EIGEN_VECTORIZE_AVX2
1437 return _mm256_srli_epi32(a, N);
1438#else
1439 __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(a, 0), N);
1440 __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(a, 1), N);
1441 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1442#endif
1443}
1444
1445template <int N>
1446EIGEN_STRONG_INLINE Packet8i plogical_shift_left(Packet8i a) {
1447#ifdef EIGEN_VECTORIZE_AVX2
1448 return _mm256_slli_epi32(a, N);
1449#else
1450 __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(a, 0), N);
1451 __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(a, 1), N);
1452 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1453#endif
1454}
1455
1456template <int N>
1457EIGEN_STRONG_INLINE Packet8ui parithmetic_shift_right(Packet8ui a) {
1458 return (Packet8ui)plogical_shift_right<N>((Packet8i)a);
1459}
1460template <int N>
1461EIGEN_STRONG_INLINE Packet8ui plogical_shift_right(Packet8ui a) {
1462 return (Packet8ui)plogical_shift_right<N>((Packet8i)a);
1463}
1464template <int N>
1465EIGEN_STRONG_INLINE Packet8ui plogical_shift_left(Packet8ui a) {
1466 return (Packet8ui)plogical_shift_left<N>((Packet8i)a);
1467}
1468
1469template <>
1470EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(const float* from) {
1471 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from);
1472}
1473template <>
1474EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(const double* from) {
1475 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from);
1476}
1477template <>
1478EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(const int* from) {
1479 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
1480}
1481template <>
1482EIGEN_STRONG_INLINE Packet8ui pload<Packet8ui>(const uint32_t* from) {
1483 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
1484}
1485
1486template <>
1487EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from) {
1488 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from);
1489}
1490template <>
1491EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(const double* from) {
1492 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from);
1493}
1494template <>
1495EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(const int* from) {
1496 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
1497}
1498template <>
1499EIGEN_STRONG_INLINE Packet8ui ploadu<Packet8ui>(const uint32_t* from) {
1500 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
1501}
1502
1503template <>
1504EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from, uint8_t umask) {
1505#ifdef EIGEN_VECTORIZE_AVX512
1506 __mmask16 mask = static_cast<__mmask16>(umask & 0x00FF);
1507 EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_castps512_ps256(_mm512_maskz_loadu_ps(mask, from));
1508#else
1509 Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
1510 const Packet8i bit_mask =
1511 _mm256_set_epi32(0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe);
1512 mask = por<Packet8i>(mask, bit_mask);
1513 mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
1514 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_maskload_ps(from, mask);
1515#endif
1516}
1517
1518// Loads 4 floats from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, a3}
1519template <>
1520EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(const float* from) {
1521 // TODO try to find a way to avoid the need of a temporary register
1522 // Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from));
1523 // tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1);
1524 // return _mm256_unpacklo_ps(tmp,tmp);
1525
1526 // _mm256_insertf128_ps is very slow on Haswell, thus:
1527 Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
1528 // mimic an "inplace" permutation of the lower 128bits using a blend
1529 tmp = _mm256_blend_ps(
1530 tmp, _mm256_castps128_ps256(_mm_permute_ps(_mm256_castps256_ps128(tmp), _MM_SHUFFLE(1, 0, 1, 0))), 15);
1531 // then we can perform a consistent permutation on the global register to get everything in shape:
1532 return _mm256_permute_ps(tmp, _MM_SHUFFLE(3, 3, 2, 2));
1533}
1534// Loads 2 doubles from memory a returns the packet {a0, a0, a1, a1}
1535template <>
1536EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(const double* from) {
1537 Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from);
1538 return _mm256_permute_pd(tmp, 3 << 2);
1539}
1540// Loads 4 integers from memory a returns the packet {a0, a0, a1, a1, a2, a2, a3, a3}
1541template <>
1542EIGEN_STRONG_INLINE Packet8i ploaddup<Packet8i>(const int* from) {
1543#ifdef EIGEN_VECTORIZE_AVX2
1544 const Packet8i a = _mm256_castsi128_si256(ploadu<Packet4i>(from));
1545 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 0, 1, 1, 2, 2, 3, 3));
1546#else
1547 __m256 tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
1548 // mimic an "inplace" permutation of the lower 128bits using a blend
1549 tmp = _mm256_blend_ps(
1550 tmp, _mm256_castps128_ps256(_mm_permute_ps(_mm256_castps256_ps128(tmp), _MM_SHUFFLE(1, 0, 1, 0))), 15);
1551 // then we can perform a consistent permutation on the global register to get everything in shape:
1552 return _mm256_castps_si256(_mm256_permute_ps(tmp, _MM_SHUFFLE(3, 3, 2, 2)));
1553#endif
1554}
1555template <>
1556EIGEN_STRONG_INLINE Packet8ui ploaddup<Packet8ui>(const uint32_t* from) {
1557#ifdef EIGEN_VECTORIZE_AVX2
1558 const Packet8ui a = _mm256_castsi128_si256(ploadu<Packet4ui>(from));
1559 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 0, 1, 1, 2, 2, 3, 3));
1560#else
1561 __m256 tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
1562 // mimic an "inplace" permutation of the lower 128bits using a blend
1563 tmp = _mm256_blend_ps(
1564 tmp, _mm256_castps128_ps256(_mm_permute_ps(_mm256_castps256_ps128(tmp), _MM_SHUFFLE(1, 0, 1, 0))), 15);
1565 // then we can perform a consistent permutation on the global register to get
1566 // everything in shape:
1567 return _mm256_castps_si256(_mm256_permute_ps(tmp, _MM_SHUFFLE(3, 3, 2, 2)));
1568#endif
1569}
1570
1571// Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1}
1572template <>
1573EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(const float* from) {
1574 Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
1575 return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from + 1), 1);
1576}
1577template <>
1578EIGEN_STRONG_INLINE Packet8i ploadquad<Packet8i>(const int* from) {
1579 return _mm256_insertf128_si256(_mm256_set1_epi32(*from), _mm_set1_epi32(*(from + 1)), 1);
1580}
1581template <>
1582EIGEN_STRONG_INLINE Packet8ui ploadquad<Packet8ui>(const uint32_t* from) {
1583 return _mm256_insertf128_si256(_mm256_set1_epi32(*from), _mm_set1_epi32(*(from + 1)), 1);
1584}
1585
1586template <>
1587EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet8f& from) {
1588 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from);
1589}
1590template <>
1591EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet4d& from) {
1592 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from);
1593}
1594template <>
1595EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet8i& from) {
1596 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
1597}
1598template <>
1599EIGEN_STRONG_INLINE void pstore<uint32_t>(uint32_t* to, const Packet8ui& from) {
1600 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
1601}
1602
1603template <>
1604EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from) {
1605 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from);
1606}
1607template <>
1608EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet4d& from) {
1609 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from);
1610}
1611template <>
1612EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet8i& from) {
1613 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
1614}
1615template <>
1616EIGEN_STRONG_INLINE void pstoreu<uint32_t>(uint32_t* to, const Packet8ui& from) {
1617 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
1618}
1619
1620template <>
1621EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from, uint8_t umask) {
1622#ifdef EIGEN_VECTORIZE_AVX512
1623 __mmask16 mask = static_cast<__mmask16>(umask & 0x00FF);
1624 EIGEN_DEBUG_UNALIGNED_STORE _mm512_mask_storeu_ps(to, mask, _mm512_castps256_ps512(from));
1625#else
1626 Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
1627 const Packet8i bit_mask =
1628 _mm256_set_epi32(0x7f7f7f7f, 0xbfbfbfbf, 0xdfdfdfdf, 0xefefefef, 0xf7f7f7f7, 0xfbfbfbfb, 0xfdfdfdfd, 0xfefefefe);
1629 mask = por<Packet8i>(mask, bit_mask);
1630 mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
1631#if EIGEN_COMP_MSVC
1632 // MSVC sometimes seems to use a bogus mask with maskstore.
1633 const __m256i ifrom = _mm256_castps_si256(from);
1634 EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 0), _mm256_extractf128_si256(mask, 0),
1635 reinterpret_cast<char*>(to));
1636 EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 1), _mm256_extractf128_si256(mask, 1),
1637 reinterpret_cast<char*>(to + 4));
1638#else
1639 EIGEN_DEBUG_UNALIGNED_STORE _mm256_maskstore_ps(to, mask, from);
1640#endif
1641#endif
1642}
1643
1644// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
1645// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride),
1646// 4);
1647template <>
1648EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride) {
1649 return _mm256_set_ps(from[7 * stride], from[6 * stride], from[5 * stride], from[4 * stride], from[3 * stride],
1650 from[2 * stride], from[1 * stride], from[0 * stride]);
1651}
1652template <>
1653EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, Index stride) {
1654 return _mm256_set_pd(from[3 * stride], from[2 * stride], from[1 * stride], from[0 * stride]);
1655}
1656template <>
1657EIGEN_DEVICE_FUNC inline Packet8i pgather<int, Packet8i>(const int* from, Index stride) {
1658 return _mm256_set_epi32(from[7 * stride], from[6 * stride], from[5 * stride], from[4 * stride], from[3 * stride],
1659 from[2 * stride], from[1 * stride], from[0 * stride]);
1660}
1661template <>
1662EIGEN_DEVICE_FUNC inline Packet8ui pgather<uint32_t, Packet8ui>(const uint32_t* from, Index stride) {
1663 return (Packet8ui)pgather<int, Packet8i>((int*)from, stride);
1664}
1665
1666template <>
1667EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, Index stride) {
1668 __m128 low = _mm256_extractf128_ps(from, 0);
1669 to[stride * 0] = _mm_cvtss_f32(low);
1670 to[stride * 1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
1671 to[stride * 2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
1672 to[stride * 3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
1673
1674 __m128 high = _mm256_extractf128_ps(from, 1);
1675 to[stride * 4] = _mm_cvtss_f32(high);
1676 to[stride * 5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
1677 to[stride * 6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
1678 to[stride * 7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
1679}
1680template <>
1681EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, Index stride) {
1682 __m128d low = _mm256_extractf128_pd(from, 0);
1683 to[stride * 0] = _mm_cvtsd_f64(low);
1684 to[stride * 1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
1685 __m128d high = _mm256_extractf128_pd(from, 1);
1686 to[stride * 2] = _mm_cvtsd_f64(high);
1687 to[stride * 3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
1688}
1689template <>
1690EIGEN_DEVICE_FUNC inline void pscatter<int, Packet8i>(int* to, const Packet8i& from, Index stride) {
1691 __m128i low = _mm256_extractf128_si256(from, 0);
1692 to[stride * 0] = _mm_extract_epi32(low, 0);
1693 to[stride * 1] = _mm_extract_epi32(low, 1);
1694 to[stride * 2] = _mm_extract_epi32(low, 2);
1695 to[stride * 3] = _mm_extract_epi32(low, 3);
1696
1697 __m128i high = _mm256_extractf128_si256(from, 1);
1698 to[stride * 4] = _mm_extract_epi32(high, 0);
1699 to[stride * 5] = _mm_extract_epi32(high, 1);
1700 to[stride * 6] = _mm_extract_epi32(high, 2);
1701 to[stride * 7] = _mm_extract_epi32(high, 3);
1702}
1703template <>
1704EIGEN_DEVICE_FUNC inline void pscatter<uint32_t, Packet8ui>(uint32_t* to, const Packet8ui& from, Index stride) {
1705 pscatter<int, Packet8i>((int*)to, (Packet8i)from, stride);
1706}
1707
1708template <>
1709EIGEN_STRONG_INLINE void pstore1<Packet8f>(float* to, const float& a) {
1710 Packet8f pa = pset1<Packet8f>(a);
1711 pstore(to, pa);
1712}
1713template <>
1714EIGEN_STRONG_INLINE void pstore1<Packet4d>(double* to, const double& a) {
1715 Packet4d pa = pset1<Packet4d>(a);
1716 pstore(to, pa);
1717}
1718template <>
1719EIGEN_STRONG_INLINE void pstore1<Packet8i>(int* to, const int& a) {
1720 Packet8i pa = pset1<Packet8i>(a);
1721 pstore(to, pa);
1722}
1723
1724#ifndef EIGEN_VECTORIZE_AVX512
1725template <>
1726EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) {
1727 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1728}
1729template <>
1730EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) {
1731 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1732}
1733template <>
1734EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) {
1735 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1736}
1737template <>
1738EIGEN_STRONG_INLINE void prefetch<uint32_t>(const uint32_t* addr) {
1739 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1740}
1741#endif
1742
1743template <>
1744EIGEN_STRONG_INLINE float pfirst<Packet8f>(const Packet8f& a) {
1745 return _mm_cvtss_f32(_mm256_castps256_ps128(a));
1746}
1747template <>
1748EIGEN_STRONG_INLINE double pfirst<Packet4d>(const Packet4d& a) {
1749 return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
1750}
1751template <>
1752EIGEN_STRONG_INLINE int pfirst<Packet8i>(const Packet8i& a) {
1753 return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
1754}
1755template <>
1756EIGEN_STRONG_INLINE uint32_t pfirst<Packet8ui>(const Packet8ui& a) {
1757 return numext::bit_cast<uint32_t>(_mm_cvtsi128_si32(_mm256_castsi256_si128(a)));
1758}
1759
1760template <>
1761EIGEN_STRONG_INLINE Packet8f preverse(const Packet8f& a) {
1762 __m256 tmp = _mm256_shuffle_ps(a, a, 0x1b);
1763 return _mm256_permute2f128_ps(tmp, tmp, 1);
1764}
1765template <>
1766EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a) {
1767 __m256d tmp = _mm256_shuffle_pd(a, a, 5);
1768 return _mm256_permute2f128_pd(tmp, tmp, 1);
1769#if 0
1770 // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd
1771 // exhibit the same latency/throughput, but it is here for future reference/benchmarking...
1772 __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
1773 return _mm256_permute_pd(swap_halves,5);
1774#endif
1775}
1776template <>
1777EIGEN_STRONG_INLINE Packet8i preverse(const Packet8i& a) {
1778 return _mm256_castps_si256(preverse(_mm256_castsi256_ps(a)));
1779}
1780template <>
1781EIGEN_STRONG_INLINE Packet8ui preverse(const Packet8ui& a) {
1782 return _mm256_castps_si256(preverse(_mm256_castsi256_ps(a)));
1783}
1784
1785#ifdef EIGEN_VECTORIZE_AVX2
1786template <>
1787EIGEN_STRONG_INLINE Packet4l preverse(const Packet4l& a) {
1788 return _mm256_castpd_si256(preverse(_mm256_castsi256_pd(a)));
1789}
1790template <>
1791EIGEN_STRONG_INLINE Packet4ul preverse(const Packet4ul& a) {
1792 return _mm256_castpd_si256(preverse(_mm256_castsi256_pd(a)));
1793}
1794#endif
1795
1796// pabs should be ok
1797template <>
1798EIGEN_STRONG_INLINE Packet8f pabs(const Packet8f& a) {
1799 const Packet8f mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7FFFFFFF));
1800 return _mm256_and_ps(a, mask);
1801}
1802template <>
1803EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a) {
1804 const Packet4d mask = _mm256_castsi256_pd(_mm256_set1_epi64x(0x7FFFFFFFFFFFFFFF));
1805 return _mm256_and_pd(a, mask);
1806}
1807template <>
1808EIGEN_STRONG_INLINE Packet8i pabs(const Packet8i& a) {
1809#ifdef EIGEN_VECTORIZE_AVX2
1810 return _mm256_abs_epi32(a);
1811#else
1812 __m128i lo = _mm_abs_epi32(_mm256_extractf128_si256(a, 0));
1813 __m128i hi = _mm_abs_epi32(_mm256_extractf128_si256(a, 1));
1814 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1815#endif
1816}
1817template <>
1818EIGEN_STRONG_INLINE Packet8ui pabs(const Packet8ui& a) {
1819 return a;
1820}
1821
1822#ifndef EIGEN_VECTORIZE_AVX512FP16
1823template <>
1824EIGEN_STRONG_INLINE Packet8h psignbit(const Packet8h& a) {
1825 return _mm_cmpgt_epi16(_mm_setzero_si128(), a);
1826}
1827#endif // EIGEN_VECTORIZE_AVX512FP16
1828
1829template <>
1830EIGEN_STRONG_INLINE Packet8bf psignbit(const Packet8bf& a) {
1831 return _mm_cmpgt_epi16(_mm_setzero_si128(), a);
1832}
1833template <>
1834EIGEN_STRONG_INLINE Packet8f psignbit(const Packet8f& a) {
1835#ifdef EIGEN_VECTORIZE_AVX2
1836 return _mm256_castsi256_ps(_mm256_cmpgt_epi32(_mm256_setzero_si256(), _mm256_castps_si256(a)));
1837#else
1838 return _mm256_castsi256_ps(parithmetic_shift_right<31>(Packet8i(_mm256_castps_si256(a))));
1839#endif
1840}
1841template <>
1842EIGEN_STRONG_INLINE Packet8ui psignbit(const Packet8ui& /*unused*/) {
1843 return _mm256_setzero_si256();
1844}
1845#ifdef EIGEN_VECTORIZE_AVX2
1846template <>
1847EIGEN_STRONG_INLINE Packet4d psignbit(const Packet4d& a) {
1848 return _mm256_castsi256_pd(_mm256_cmpgt_epi64(_mm256_setzero_si256(), _mm256_castpd_si256(a)));
1849}
1850template <>
1851EIGEN_STRONG_INLINE Packet4ul psignbit(const Packet4ul& /*unused*/) {
1852 return _mm256_setzero_si256();
1853}
1854#endif
1855
1856template <>
1857EIGEN_STRONG_INLINE Packet8f pfrexp<Packet8f>(const Packet8f& a, Packet8f& exponent) {
1858 return pfrexp_generic(a, exponent);
1859}
1860
1861// Extract exponent without existence of Packet4l.
1862template <>
1863EIGEN_STRONG_INLINE Packet4d pfrexp_generic_get_biased_exponent(const Packet4d& a) {
1864 const Packet4d cst_exp_mask = pset1frombits<Packet4d>(static_cast<uint64_t>(0x7ff0000000000000ull));
1865 __m256i a_expo = _mm256_castpd_si256(pand(a, cst_exp_mask));
1866#ifdef EIGEN_VECTORIZE_AVX2
1867 a_expo = _mm256_srli_epi64(a_expo, 52);
1868 __m128i lo = _mm256_extractf128_si256(a_expo, 0);
1869 __m128i hi = _mm256_extractf128_si256(a_expo, 1);
1870#else
1871 __m128i lo = _mm256_extractf128_si256(a_expo, 0);
1872 __m128i hi = _mm256_extractf128_si256(a_expo, 1);
1873 lo = _mm_srli_epi64(lo, 52);
1874 hi = _mm_srli_epi64(hi, 52);
1875#endif
1876 Packet2d exponent_lo = _mm_cvtepi32_pd(vec4i_swizzle1(lo, 0, 2, 1, 3));
1877 Packet2d exponent_hi = _mm_cvtepi32_pd(vec4i_swizzle1(hi, 0, 2, 1, 3));
1878 Packet4d exponent = _mm256_insertf128_pd(_mm256_setzero_pd(), exponent_lo, 0);
1879 exponent = _mm256_insertf128_pd(exponent, exponent_hi, 1);
1880 return exponent;
1881}
1882
1883template <>
1884EIGEN_STRONG_INLINE Packet4d pfrexp<Packet4d>(const Packet4d& a, Packet4d& exponent) {
1885 return pfrexp_generic(a, exponent);
1886}
1887
1888template <>
1889EIGEN_STRONG_INLINE Packet8f pldexp<Packet8f>(const Packet8f& a, const Packet8f& exponent) {
1890 return pldexp_generic(a, exponent);
1891}
1892
1893template <>
1894EIGEN_STRONG_INLINE Packet4d pldexp<Packet4d>(const Packet4d& a, const Packet4d& exponent) {
1895 // Clamp exponent to [-2099, 2099]
1896 const Packet4d max_exponent = pset1<Packet4d>(2099.0);
1897 const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
1898
1899 // Split 2^e into four factors and multiply.
1900 const Packet4i bias = pset1<Packet4i>(1023);
1901 Packet4i b = parithmetic_shift_right<2>(e); // floor(e/4)
1902
1903 // 2^b
1904 Packet4i hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
1905 Packet4i lo = _mm_slli_epi64(hi, 52);
1906 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
1907 Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
1908 Packet4d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
1909
1910 // 2^(e - 3b)
1911 b = psub(psub(psub(e, b), b), b); // e - 3b
1912 hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
1913 lo = _mm_slli_epi64(hi, 52);
1914 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
1915 c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
1916 out = pmul(out, c); // a * 2^e
1917 return out;
1918}
1919
1920template <>
1921EIGEN_STRONG_INLINE Packet4d pldexp_fast<Packet4d>(const Packet4d& a, const Packet4d& exponent) {
1922 // Clamp exponent to [-1024, 1024]
1923 const Packet4d min_exponent = pset1<Packet4d>(-1023.0);
1924 const Packet4d max_exponent = pset1<Packet4d>(1024.0);
1925 const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, min_exponent), max_exponent));
1926 const Packet4i bias = pset1<Packet4i>(1023);
1927
1928 // 2^e
1929 Packet4i hi = vec4i_swizzle1(padd(e, bias), 0, 2, 1, 3);
1930 const Packet4i lo = _mm_slli_epi64(hi, 52);
1931 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
1932 const Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
1933 return pmul(a, c); // a * 2^e
1934}
1935
1936template <>
1937EIGEN_STRONG_INLINE Packet4f predux_half_dowto4<Packet8f>(const Packet8f& a) {
1938 return _mm_add_ps(_mm256_castps256_ps128(a), _mm256_extractf128_ps(a, 1));
1939}
1940template <>
1941EIGEN_STRONG_INLINE Packet4i predux_half_dowto4<Packet8i>(const Packet8i& a) {
1942 return _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
1943}
1944template <>
1945EIGEN_STRONG_INLINE Packet4ui predux_half_dowto4<Packet8ui>(const Packet8ui& a) {
1946 return _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
1947}
1948
1949EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8f, 8>& kernel) {
1950 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1951 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1952 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1953 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1954 __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
1955 __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
1956 __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
1957 __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
1958 __m256 S0 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1959 __m256 S1 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1960 __m256 S2 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1961 __m256 S3 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1962 __m256 S4 = _mm256_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
1963 __m256 S5 = _mm256_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
1964 __m256 S6 = _mm256_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
1965 __m256 S7 = _mm256_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
1966 kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
1967 kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
1968 kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
1969 kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
1970 kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
1971 kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
1972 kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
1973 kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
1974}
1975
1976EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8f, 4>& kernel) {
1977 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1978 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1979 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1980 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1981
1982 __m256 S0 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1983 __m256 S1 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1984 __m256 S2 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1985 __m256 S3 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1986
1987 kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
1988 kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
1989 kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
1990 kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
1991}
1992
1993#define MM256_SHUFFLE_EPI32(A, B, M) \
1994 _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(A), _mm256_castsi256_ps(B), M))
1995
1996#ifndef EIGEN_VECTORIZE_AVX2
1997#define MM256_UNPACKLO_EPI32(A, B) \
1998 _mm256_castps_si256(_mm256_unpacklo_ps(_mm256_castsi256_ps(A), _mm256_castsi256_ps(B)))
1999#define MM256_UNPACKHI_EPI32(A, B) \
2000 _mm256_castps_si256(_mm256_unpackhi_ps(_mm256_castsi256_ps(A), _mm256_castsi256_ps(B)))
2001#else
2002#define MM256_UNPACKLO_EPI32(A, B) _mm256_unpacklo_epi32(A, B)
2003#define MM256_UNPACKHI_EPI32(A, B) _mm256_unpackhi_epi32(A, B)
2004#endif
2005
2006EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8i, 8>& kernel) {
2007 __m256i T0 = MM256_UNPACKLO_EPI32(kernel.packet[0], kernel.packet[1]);
2008 __m256i T1 = MM256_UNPACKHI_EPI32(kernel.packet[0], kernel.packet[1]);
2009 __m256i T2 = MM256_UNPACKLO_EPI32(kernel.packet[2], kernel.packet[3]);
2010 __m256i T3 = MM256_UNPACKHI_EPI32(kernel.packet[2], kernel.packet[3]);
2011 __m256i T4 = MM256_UNPACKLO_EPI32(kernel.packet[4], kernel.packet[5]);
2012 __m256i T5 = MM256_UNPACKHI_EPI32(kernel.packet[4], kernel.packet[5]);
2013 __m256i T6 = MM256_UNPACKLO_EPI32(kernel.packet[6], kernel.packet[7]);
2014 __m256i T7 = MM256_UNPACKHI_EPI32(kernel.packet[6], kernel.packet[7]);
2015 __m256i S0 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
2016 __m256i S1 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
2017 __m256i S2 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
2018 __m256i S3 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
2019 __m256i S4 = MM256_SHUFFLE_EPI32(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
2020 __m256i S5 = MM256_SHUFFLE_EPI32(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
2021 __m256i S6 = MM256_SHUFFLE_EPI32(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
2022 __m256i S7 = MM256_SHUFFLE_EPI32(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
2023 kernel.packet[0] = _mm256_permute2f128_si256(S0, S4, 0x20);
2024 kernel.packet[1] = _mm256_permute2f128_si256(S1, S5, 0x20);
2025 kernel.packet[2] = _mm256_permute2f128_si256(S2, S6, 0x20);
2026 kernel.packet[3] = _mm256_permute2f128_si256(S3, S7, 0x20);
2027 kernel.packet[4] = _mm256_permute2f128_si256(S0, S4, 0x31);
2028 kernel.packet[5] = _mm256_permute2f128_si256(S1, S5, 0x31);
2029 kernel.packet[6] = _mm256_permute2f128_si256(S2, S6, 0x31);
2030 kernel.packet[7] = _mm256_permute2f128_si256(S3, S7, 0x31);
2031}
2032EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8ui, 8>& kernel) {
2033 ptranspose((PacketBlock<Packet8i, 8>&)kernel);
2034}
2035
2036EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8i, 4>& kernel) {
2037 __m256i T0 = MM256_UNPACKLO_EPI32(kernel.packet[0], kernel.packet[1]);
2038 __m256i T1 = MM256_UNPACKHI_EPI32(kernel.packet[0], kernel.packet[1]);
2039 __m256i T2 = MM256_UNPACKLO_EPI32(kernel.packet[2], kernel.packet[3]);
2040 __m256i T3 = MM256_UNPACKHI_EPI32(kernel.packet[2], kernel.packet[3]);
2041
2042 __m256i S0 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
2043 __m256i S1 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
2044 __m256i S2 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
2045 __m256i S3 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
2046
2047 kernel.packet[0] = _mm256_permute2f128_si256(S0, S1, 0x20);
2048 kernel.packet[1] = _mm256_permute2f128_si256(S2, S3, 0x20);
2049 kernel.packet[2] = _mm256_permute2f128_si256(S0, S1, 0x31);
2050 kernel.packet[3] = _mm256_permute2f128_si256(S2, S3, 0x31);
2051}
2052EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8ui, 4>& kernel) {
2053 ptranspose((PacketBlock<Packet8i, 4>&)kernel);
2054}
2055
2056EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4d, 4>& kernel) {
2057 __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
2058 __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
2059 __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
2060 __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
2061
2062 kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
2063 kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
2064 kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
2065 kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
2066}
2067
2068// Packet math for Eigen::half
2069#ifndef EIGEN_VECTORIZE_AVX512FP16
2070template <>
2071struct unpacket_traits<Packet8h> {
2072 typedef Eigen::half type;
2073 enum {
2074 size = 8,
2075 alignment = Aligned16,
2076 vectorizable = true,
2077 masked_load_available = false,
2078 masked_store_available = false
2079 };
2080 typedef Packet8h half;
2081};
2082
2083template <>
2084EIGEN_STRONG_INLINE Packet8h pset1<Packet8h>(const Eigen::half& from) {
2085 return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
2086}
2087
2088template <>
2089EIGEN_STRONG_INLINE Eigen::half pfirst<Packet8h>(const Packet8h& from) {
2090 return numext::bit_cast<Eigen::half>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
2091}
2092
2093template <>
2094EIGEN_STRONG_INLINE Packet8h pload<Packet8h>(const Eigen::half* from) {
2095 return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
2096}
2097
2098template <>
2099EIGEN_STRONG_INLINE Packet8h ploadu<Packet8h>(const Eigen::half* from) {
2100 return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
2101}
2102
2103template <>
2104EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet8h& from) {
2105 _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
2106}
2107
2108template <>
2109EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet8h& from) {
2110 _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
2111}
2112
2113template <>
2114EIGEN_STRONG_INLINE Packet8h ploaddup<Packet8h>(const Eigen::half* from) {
2115 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2116 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2117 const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
2118 const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
2119 return _mm_set_epi16(d, d, c, c, b, b, a, a);
2120}
2121
2122template <>
2123EIGEN_STRONG_INLINE Packet8h ploadquad<Packet8h>(const Eigen::half* from) {
2124 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2125 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2126 return _mm_set_epi16(b, b, b, b, a, a, a, a);
2127}
2128
2129template <>
2130EIGEN_STRONG_INLINE Packet8h ptrue(const Packet8h& a) {
2131 return _mm_cmpeq_epi32(a, a);
2132}
2133
2134template <>
2135EIGEN_STRONG_INLINE Packet8h pabs(const Packet8h& a) {
2136 const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2137 return _mm_andnot_si128(sign_mask, a);
2138}
2139
2140EIGEN_STRONG_INLINE Packet8f half2float(const Packet8h& a) {
2141#ifdef EIGEN_HAS_FP16_C
2142 return _mm256_cvtph_ps(a);
2143#else
2144 Eigen::internal::Packet8f pp = _mm256_castsi256_ps(
2145 _mm256_insertf128_si256(_mm256_castsi128_si256(half2floatsse(a)), half2floatsse(_mm_srli_si128(a, 8)), 1));
2146 return pp;
2147#endif
2148}
2149
2150EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) {
2151#ifdef EIGEN_HAS_FP16_C
2152 return _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT);
2153#else
2154 __m128i lo = float2half(_mm256_extractf128_ps(a, 0));
2155 __m128i hi = float2half(_mm256_extractf128_ps(a, 1));
2156 return _mm_packus_epi32(lo, hi);
2157#endif
2158}
2159
2160template <>
2161EIGEN_STRONG_INLINE Packet8h pmin<Packet8h>(const Packet8h& a, const Packet8h& b) {
2162 return float2half(pmin<Packet8f>(half2float(a), half2float(b)));
2163}
2164
2165template <>
2166EIGEN_STRONG_INLINE Packet8h pmax<Packet8h>(const Packet8h& a, const Packet8h& b) {
2167 return float2half(pmax<Packet8f>(half2float(a), half2float(b)));
2168}
2169
2170template <>
2171EIGEN_STRONG_INLINE Packet8h plset<Packet8h>(const half& a) {
2172 return float2half(plset<Packet8f>(static_cast<float>(a)));
2173}
2174
2175template <>
2176EIGEN_STRONG_INLINE Packet8h por(const Packet8h& a, const Packet8h& b) {
2177 // in some cases Packet4i is a wrapper around __m128i, so we either need to
2178 // cast to Packet4i to directly call the intrinsics as below:
2179 return _mm_or_si128(a, b);
2180}
2181template <>
2182EIGEN_STRONG_INLINE Packet8h pxor(const Packet8h& a, const Packet8h& b) {
2183 return _mm_xor_si128(a, b);
2184}
2185template <>
2186EIGEN_STRONG_INLINE Packet8h pand(const Packet8h& a, const Packet8h& b) {
2187 return _mm_and_si128(a, b);
2188}
2189template <>
2190EIGEN_STRONG_INLINE Packet8h pandnot(const Packet8h& a, const Packet8h& b) {
2191 return _mm_andnot_si128(b, a);
2192}
2193
2194template <>
2195EIGEN_STRONG_INLINE Packet8h pselect(const Packet8h& mask, const Packet8h& a, const Packet8h& b) {
2196 return _mm_blendv_epi8(b, a, mask);
2197}
2198
2199template <>
2200EIGEN_STRONG_INLINE Packet8h pround<Packet8h>(const Packet8h& a) {
2201 return float2half(pround<Packet8f>(half2float(a)));
2202}
2203
2204template <>
2205EIGEN_STRONG_INLINE Packet8h print<Packet8h>(const Packet8h& a) {
2206 return float2half(print<Packet8f>(half2float(a)));
2207}
2208
2209template <>
2210EIGEN_STRONG_INLINE Packet8h pceil<Packet8h>(const Packet8h& a) {
2211 return float2half(pceil<Packet8f>(half2float(a)));
2212}
2213
2214template <>
2215EIGEN_STRONG_INLINE Packet8h pfloor<Packet8h>(const Packet8h& a) {
2216 return float2half(pfloor<Packet8f>(half2float(a)));
2217}
2218
2219template <>
2220EIGEN_STRONG_INLINE Packet8h ptrunc<Packet8h>(const Packet8h& a) {
2221 return float2half(ptrunc<Packet8f>(half2float(a)));
2222}
2223
2224template <>
2225EIGEN_STRONG_INLINE Packet8h pisinf<Packet8h>(const Packet8h& a) {
2226 constexpr uint16_t kInf = ((1 << 5) - 1) << 10;
2227 constexpr uint16_t kAbsMask = (1 << 15) - 1;
2228 return _mm_cmpeq_epi16(_mm_and_si128(a.m_val, _mm_set1_epi16(kAbsMask)), _mm_set1_epi16(kInf));
2229}
2230
2231template <>
2232EIGEN_STRONG_INLINE Packet8h pisnan<Packet8h>(const Packet8h& a) {
2233 constexpr uint16_t kInf = ((1 << 5) - 1) << 10;
2234 constexpr uint16_t kAbsMask = (1 << 15) - 1;
2235 return _mm_cmpgt_epi16(_mm_and_si128(a.m_val, _mm_set1_epi16(kAbsMask)), _mm_set1_epi16(kInf));
2236}
2237
2238// convert the sign-magnitude representation to two's complement
2239EIGEN_STRONG_INLINE __m128i pmaptosigned(const __m128i& a) {
2240 constexpr uint16_t kAbsMask = (1 << 15) - 1;
2241 // if 'a' has the sign bit set, clear the sign bit and negate the result as if it were an integer
2242 return _mm_sign_epi16(_mm_and_si128(a, _mm_set1_epi16(kAbsMask)), a);
2243}
2244
2245// return true if both `a` and `b` are not NaN
2246EIGEN_STRONG_INLINE Packet8h pisordered(const Packet8h& a, const Packet8h& b) {
2247 constexpr uint16_t kInf = ((1 << 5) - 1) << 10;
2248 constexpr uint16_t kAbsMask = (1 << 15) - 1;
2249 __m128i abs_a = _mm_and_si128(a.m_val, _mm_set1_epi16(kAbsMask));
2250 __m128i abs_b = _mm_and_si128(b.m_val, _mm_set1_epi16(kAbsMask));
2251 // check if both `abs_a <= kInf` and `abs_b <= kInf` by checking if max(abs_a, abs_b) <= kInf
2252 // SSE has no `lesser or equal` instruction for integers, but comparing against kInf + 1 accomplishes the same goal
2253 return _mm_cmplt_epi16(_mm_max_epu16(abs_a, abs_b), _mm_set1_epi16(kInf + 1));
2254}
2255
2256template <>
2257EIGEN_STRONG_INLINE Packet8h pcmp_eq(const Packet8h& a, const Packet8h& b) {
2258 __m128i isOrdered = pisordered(a, b);
2259 __m128i isEqual = _mm_cmpeq_epi16(pmaptosigned(a.m_val), pmaptosigned(b.m_val));
2260 return _mm_and_si128(isOrdered, isEqual);
2261}
2262
2263template <>
2264EIGEN_STRONG_INLINE Packet8h pcmp_le(const Packet8h& a, const Packet8h& b) {
2265 __m128i isOrdered = pisordered(a, b);
2266 __m128i isGreater = _mm_cmpgt_epi16(pmaptosigned(a.m_val), pmaptosigned(b.m_val));
2267 return _mm_andnot_si128(isGreater, isOrdered);
2268}
2269
2270template <>
2271EIGEN_STRONG_INLINE Packet8h pcmp_lt(const Packet8h& a, const Packet8h& b) {
2272 __m128i isOrdered = pisordered(a, b);
2273 __m128i isLess = _mm_cmplt_epi16(pmaptosigned(a.m_val), pmaptosigned(b.m_val));
2274 return _mm_and_si128(isOrdered, isLess);
2275}
2276
2277template <>
2278EIGEN_STRONG_INLINE Packet8h pcmp_lt_or_nan(const Packet8h& a, const Packet8h& b) {
2279 __m128i isUnordered = por(pisnan(a), pisnan(b));
2280 __m128i isLess = _mm_cmplt_epi16(pmaptosigned(a.m_val), pmaptosigned(b.m_val));
2281 return _mm_or_si128(isUnordered, isLess);
2282}
2283
2284template <>
2285EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) {
2286 return a;
2287}
2288
2289template <>
2290EIGEN_STRONG_INLINE Packet8h pnegate(const Packet8h& a) {
2291 Packet8h sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2292 return _mm_xor_si128(a, sign_mask);
2293}
2294
2295#ifndef EIGEN_VECTORIZE_AVX512FP16
2296template <>
2297EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(const Packet8h& a, const Packet8h& b) {
2298 Packet8f af = half2float(a);
2299 Packet8f bf = half2float(b);
2300 Packet8f rf = padd(af, bf);
2301 return float2half(rf);
2302}
2303
2304template <>
2305EIGEN_STRONG_INLINE Packet8h psub<Packet8h>(const Packet8h& a, const Packet8h& b) {
2306 Packet8f af = half2float(a);
2307 Packet8f bf = half2float(b);
2308 Packet8f rf = psub(af, bf);
2309 return float2half(rf);
2310}
2311
2312template <>
2313EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(const Packet8h& a, const Packet8h& b) {
2314 Packet8f af = half2float(a);
2315 Packet8f bf = half2float(b);
2316 Packet8f rf = pmul(af, bf);
2317 return float2half(rf);
2318}
2319
2320template <>
2321EIGEN_STRONG_INLINE Packet8h pmadd<Packet8h>(const Packet8h& a, const Packet8h& b, const Packet8h& c) {
2322 return float2half(pmadd(half2float(a), half2float(b), half2float(c)));
2323}
2324
2325template <>
2326EIGEN_STRONG_INLINE Packet8h pmsub<Packet8h>(const Packet8h& a, const Packet8h& b, const Packet8h& c) {
2327 return float2half(pmsub(half2float(a), half2float(b), half2float(c)));
2328}
2329
2330template <>
2331EIGEN_STRONG_INLINE Packet8h pnmadd<Packet8h>(const Packet8h& a, const Packet8h& b, const Packet8h& c) {
2332 return float2half(pnmadd(half2float(a), half2float(b), half2float(c)));
2333}
2334
2335template <>
2336EIGEN_STRONG_INLINE Packet8h pnmsub<Packet8h>(const Packet8h& a, const Packet8h& b, const Packet8h& c) {
2337 return float2half(pnmsub(half2float(a), half2float(b), half2float(c)));
2338}
2339
2340template <>
2341EIGEN_STRONG_INLINE Packet8h pdiv<Packet8h>(const Packet8h& a, const Packet8h& b) {
2342 Packet8f af = half2float(a);
2343 Packet8f bf = half2float(b);
2344 Packet8f rf = pdiv(af, bf);
2345 return float2half(rf);
2346}
2347#endif
2348
2349template <>
2350EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(const Eigen::half* from, Index stride) {
2351 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0 * stride]);
2352 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1 * stride]);
2353 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2 * stride]);
2354 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3 * stride]);
2355 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4 * stride]);
2356 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5 * stride]);
2357 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6 * stride]);
2358 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7 * stride]);
2359 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
2360}
2361
2362template <>
2363EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet8h>(Eigen::half* to, const Packet8h& from, Index stride) {
2364 EIGEN_ALIGN32 Eigen::half aux[8];
2365 pstore(aux, from);
2366 to[stride * 0] = aux[0];
2367 to[stride * 1] = aux[1];
2368 to[stride * 2] = aux[2];
2369 to[stride * 3] = aux[3];
2370 to[stride * 4] = aux[4];
2371 to[stride * 5] = aux[5];
2372 to[stride * 6] = aux[6];
2373 to[stride * 7] = aux[7];
2374}
2375
2376template <>
2377EIGEN_STRONG_INLINE Packet8h preverse(const Packet8h& a) {
2378 __m128i m = _mm_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
2379 return _mm_shuffle_epi8(a, m);
2380}
2381
2382EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8h, 8>& kernel) {
2383 __m128i a = kernel.packet[0];
2384 __m128i b = kernel.packet[1];
2385 __m128i c = kernel.packet[2];
2386 __m128i d = kernel.packet[3];
2387 __m128i e = kernel.packet[4];
2388 __m128i f = kernel.packet[5];
2389 __m128i g = kernel.packet[6];
2390 __m128i h = kernel.packet[7];
2391
2392 __m128i a03b03 = _mm_unpacklo_epi16(a, b);
2393 __m128i c03d03 = _mm_unpacklo_epi16(c, d);
2394 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
2395 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
2396 __m128i a47b47 = _mm_unpackhi_epi16(a, b);
2397 __m128i c47d47 = _mm_unpackhi_epi16(c, d);
2398 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
2399 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
2400
2401 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
2402 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
2403 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
2404 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
2405 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
2406 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
2407 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
2408 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
2409
2410 __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
2411 __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
2412 __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
2413 __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
2414 __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
2415 __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
2416 __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
2417 __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
2418
2419 kernel.packet[0] = a0b0c0d0e0f0g0h0;
2420 kernel.packet[1] = a1b1c1d1e1f1g1h1;
2421 kernel.packet[2] = a2b2c2d2e2f2g2h2;
2422 kernel.packet[3] = a3b3c3d3e3f3g3h3;
2423 kernel.packet[4] = a4b4c4d4e4f4g4h4;
2424 kernel.packet[5] = a5b5c5d5e5f5g5h5;
2425 kernel.packet[6] = a6b6c6d6e6f6g6h6;
2426 kernel.packet[7] = a7b7c7d7e7f7g7h7;
2427}
2428
2429EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8h, 4>& kernel) {
2430 EIGEN_ALIGN32 Eigen::half in[4][8];
2431 pstore<Eigen::half>(in[0], kernel.packet[0]);
2432 pstore<Eigen::half>(in[1], kernel.packet[1]);
2433 pstore<Eigen::half>(in[2], kernel.packet[2]);
2434 pstore<Eigen::half>(in[3], kernel.packet[3]);
2435
2436 EIGEN_ALIGN32 Eigen::half out[4][8];
2437
2438 for (int i = 0; i < 4; ++i) {
2439 for (int j = 0; j < 4; ++j) {
2440 out[i][j] = in[j][2 * i];
2441 }
2442 for (int j = 0; j < 4; ++j) {
2443 out[i][j + 4] = in[j][2 * i + 1];
2444 }
2445 }
2446
2447 kernel.packet[0] = pload<Packet8h>(out[0]);
2448 kernel.packet[1] = pload<Packet8h>(out[1]);
2449 kernel.packet[2] = pload<Packet8h>(out[2]);
2450 kernel.packet[3] = pload<Packet8h>(out[3]);
2451}
2452
2453#endif
2454
2455// BFloat16 implementation.
2456
2457EIGEN_STRONG_INLINE Packet8f Bf16ToF32(const Packet8bf& a) {
2458#ifdef EIGEN_VECTORIZE_AVX2
2459 __m256i extend = _mm256_cvtepu16_epi32(a);
2460 return _mm256_castsi256_ps(_mm256_slli_epi32(extend, 16));
2461#else
2462 __m128i lo = _mm_cvtepu16_epi32(a);
2463 __m128i hi = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8));
2464 __m128i lo_shift = _mm_slli_epi32(lo, 16);
2465 __m128i hi_shift = _mm_slli_epi32(hi, 16);
2466 return _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo_shift), hi_shift, 1));
2467#endif
2468}
2469
2470// Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm.
2471EIGEN_STRONG_INLINE Packet8bf F32ToBf16(const Packet8f& a) {
2472 __m256i input = _mm256_castps_si256(a);
2473
2474#ifdef EIGEN_VECTORIZE_AVX2
2475 // uint32_t lsb = (input >> 16);
2476 __m256i t = _mm256_srli_epi32(input, 16);
2477 // uint32_t lsb = lsb & 1;
2478 t = _mm256_and_si256(t, _mm256_set1_epi32(1));
2479 // uint32_t rounding_bias = 0x7fff + lsb;
2480 t = _mm256_add_epi32(t, _mm256_set1_epi32(0x7fff));
2481 // input += rounding_bias;
2482 t = _mm256_add_epi32(t, input);
2483 // input = input >> 16;
2484 t = _mm256_srli_epi32(t, 16);
2485 // Check NaN before converting back to bf16
2486 __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
2487 __m256i nan = _mm256_set1_epi32(0x7fc0);
2488 t = _mm256_blendv_epi8(nan, t, _mm256_castps_si256(mask));
2489 // output = numext::bit_cast<uint16_t>(input);
2490 return _mm_packus_epi32(_mm256_extractf128_si256(t, 0), _mm256_extractf128_si256(t, 1));
2491#else
2492 // uint32_t lsb = (input >> 16);
2493 __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(input, 0), 16);
2494 __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(input, 1), 16);
2495 // uint32_t lsb = lsb & 1;
2496 lo = _mm_and_si128(lo, _mm_set1_epi32(1));
2497 hi = _mm_and_si128(hi, _mm_set1_epi32(1));
2498 // uint32_t rounding_bias = 0x7fff + lsb;
2499 lo = _mm_add_epi32(lo, _mm_set1_epi32(0x7fff));
2500 hi = _mm_add_epi32(hi, _mm_set1_epi32(0x7fff));
2501 // input += rounding_bias;
2502 lo = _mm_add_epi32(lo, _mm256_extractf128_si256(input, 0));
2503 hi = _mm_add_epi32(hi, _mm256_extractf128_si256(input, 1));
2504 // input = input >> 16;
2505 lo = _mm_srli_epi32(lo, 16);
2506 hi = _mm_srli_epi32(hi, 16);
2507 // Check NaN before converting back to bf16
2508 __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
2509 __m128i nan = _mm_set1_epi32(0x7fc0);
2510 lo = _mm_blendv_epi8(nan, lo, _mm_castps_si128(_mm256_castps256_ps128(mask)));
2511 hi = _mm_blendv_epi8(nan, hi, _mm_castps_si128(_mm256_extractf128_ps(mask, 1)));
2512 // output = numext::bit_cast<uint16_t>(input);
2513 return _mm_packus_epi32(lo, hi);
2514#endif
2515}
2516
2517template <>
2518EIGEN_STRONG_INLINE Packet8bf pset1<Packet8bf>(const bfloat16& from) {
2519 return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
2520}
2521
2522template <>
2523EIGEN_STRONG_INLINE bfloat16 pfirst<Packet8bf>(const Packet8bf& from) {
2524 return numext::bit_cast<bfloat16>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
2525}
2526
2527template <>
2528EIGEN_STRONG_INLINE Packet8bf pload<Packet8bf>(const bfloat16* from) {
2529 return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
2530}
2531
2532template <>
2533EIGEN_STRONG_INLINE Packet8bf ploadu<Packet8bf>(const bfloat16* from) {
2534 return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
2535}
2536
2537template <>
2538EIGEN_STRONG_INLINE void pstore<bfloat16>(bfloat16* to, const Packet8bf& from) {
2539 _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
2540}
2541
2542template <>
2543EIGEN_STRONG_INLINE void pstoreu<bfloat16>(bfloat16* to, const Packet8bf& from) {
2544 _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
2545}
2546
2547template <>
2548EIGEN_STRONG_INLINE Packet8bf ploaddup<Packet8bf>(const bfloat16* from) {
2549 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2550 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2551 const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
2552 const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
2553 return _mm_set_epi16(d, d, c, c, b, b, a, a);
2554}
2555
2556template <>
2557EIGEN_STRONG_INLINE Packet8bf ploadquad<Packet8bf>(const bfloat16* from) {
2558 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2559 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2560 return _mm_set_epi16(b, b, b, b, a, a, a, a);
2561}
2562
2563template <>
2564EIGEN_STRONG_INLINE Packet8bf ptrue(const Packet8bf& a) {
2565 return _mm_cmpeq_epi32(a, a);
2566}
2567
2568template <>
2569EIGEN_STRONG_INLINE Packet8bf pabs(const Packet8bf& a) {
2570 const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2571 return _mm_andnot_si128(sign_mask, a);
2572}
2573
2574template <>
2575EIGEN_STRONG_INLINE Packet8bf pmin<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2576 return F32ToBf16(pmin<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2577}
2578
2579template <>
2580EIGEN_STRONG_INLINE Packet8bf pmax<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2581 return F32ToBf16(pmax<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2582}
2583
2584template <>
2585EIGEN_STRONG_INLINE Packet8bf plset<Packet8bf>(const bfloat16& a) {
2586 return F32ToBf16(plset<Packet8f>(static_cast<float>(a)));
2587}
2588
2589template <>
2590EIGEN_STRONG_INLINE Packet8bf por(const Packet8bf& a, const Packet8bf& b) {
2591 return _mm_or_si128(a, b);
2592}
2593template <>
2594EIGEN_STRONG_INLINE Packet8bf pxor(const Packet8bf& a, const Packet8bf& b) {
2595 return _mm_xor_si128(a, b);
2596}
2597template <>
2598EIGEN_STRONG_INLINE Packet8bf pand(const Packet8bf& a, const Packet8bf& b) {
2599 return _mm_and_si128(a, b);
2600}
2601template <>
2602EIGEN_STRONG_INLINE Packet8bf pandnot(const Packet8bf& a, const Packet8bf& b) {
2603 return _mm_andnot_si128(b, a);
2604}
2605
2606template <>
2607EIGEN_STRONG_INLINE Packet8bf pselect(const Packet8bf& mask, const Packet8bf& a, const Packet8bf& b) {
2608 return _mm_blendv_epi8(b, a, mask);
2609}
2610
2611template <>
2612EIGEN_STRONG_INLINE Packet8bf pround<Packet8bf>(const Packet8bf& a) {
2613 return F32ToBf16(pround<Packet8f>(Bf16ToF32(a)));
2614}
2615
2616template <>
2617EIGEN_STRONG_INLINE Packet8bf print<Packet8bf>(const Packet8bf& a) {
2618 return F32ToBf16(print<Packet8f>(Bf16ToF32(a)));
2619}
2620
2621template <>
2622EIGEN_STRONG_INLINE Packet8bf pceil<Packet8bf>(const Packet8bf& a) {
2623 return F32ToBf16(pceil<Packet8f>(Bf16ToF32(a)));
2624}
2625
2626template <>
2627EIGEN_STRONG_INLINE Packet8bf pfloor<Packet8bf>(const Packet8bf& a) {
2628 return F32ToBf16(pfloor<Packet8f>(Bf16ToF32(a)));
2629}
2630
2631template <>
2632EIGEN_STRONG_INLINE Packet8bf ptrunc<Packet8bf>(const Packet8bf& a) {
2633 return F32ToBf16(ptrunc<Packet8f>(Bf16ToF32(a)));
2634}
2635
2636template <>
2637EIGEN_STRONG_INLINE Packet8bf pcmp_eq(const Packet8bf& a, const Packet8bf& b) {
2638 return Pack16To8(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
2639}
2640
2641template <>
2642EIGEN_STRONG_INLINE Packet8bf pcmp_le(const Packet8bf& a, const Packet8bf& b) {
2643 return Pack16To8(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
2644}
2645
2646template <>
2647EIGEN_STRONG_INLINE Packet8bf pcmp_lt(const Packet8bf& a, const Packet8bf& b) {
2648 return Pack16To8(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
2649}
2650
2651template <>
2652EIGEN_STRONG_INLINE Packet8bf pcmp_lt_or_nan(const Packet8bf& a, const Packet8bf& b) {
2653 return Pack16To8(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
2654}
2655
2656template <>
2657EIGEN_STRONG_INLINE Packet8bf pconj(const Packet8bf& a) {
2658 return a;
2659}
2660
2661template <>
2662EIGEN_STRONG_INLINE Packet8bf pnegate(const Packet8bf& a) {
2663 Packet8bf sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2664 return _mm_xor_si128(a, sign_mask);
2665}
2666
2667template <>
2668EIGEN_STRONG_INLINE Packet8bf padd<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2669 return F32ToBf16(padd<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2670}
2671
2672template <>
2673EIGEN_STRONG_INLINE Packet8bf psub<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2674 return F32ToBf16(psub<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2675}
2676
2677template <>
2678EIGEN_STRONG_INLINE Packet8bf pmul<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2679 return F32ToBf16(pmul<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2680}
2681
2682template <>
2683EIGEN_STRONG_INLINE Packet8bf pmadd<Packet8bf>(const Packet8bf& a, const Packet8bf& b, const Packet8bf& c) {
2684 return F32ToBf16(pmadd(Bf16ToF32(a), Bf16ToF32(b), Bf16ToF32(c)));
2685}
2686
2687template <>
2688EIGEN_STRONG_INLINE Packet8bf pmsub<Packet8bf>(const Packet8bf& a, const Packet8bf& b, const Packet8bf& c) {
2689 return F32ToBf16(pmsub(Bf16ToF32(a), Bf16ToF32(b), Bf16ToF32(c)));
2690}
2691
2692template <>
2693EIGEN_STRONG_INLINE Packet8bf pnmadd<Packet8bf>(const Packet8bf& a, const Packet8bf& b, const Packet8bf& c) {
2694 return F32ToBf16(pnmadd(Bf16ToF32(a), Bf16ToF32(b), Bf16ToF32(c)));
2695}
2696
2697template <>
2698EIGEN_STRONG_INLINE Packet8bf pnmsub<Packet8bf>(const Packet8bf& a, const Packet8bf& b, const Packet8bf& c) {
2699 return F32ToBf16(pnmsub(Bf16ToF32(a), Bf16ToF32(b), Bf16ToF32(c)));
2700}
2701
2702template <>
2703EIGEN_STRONG_INLINE Packet8bf pdiv<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2704 return F32ToBf16(pdiv<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2705}
2706
2707template <>
2708EIGEN_STRONG_INLINE Packet8bf pgather<bfloat16, Packet8bf>(const bfloat16* from, Index stride) {
2709 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0 * stride]);
2710 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1 * stride]);
2711 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2 * stride]);
2712 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3 * stride]);
2713 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4 * stride]);
2714 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5 * stride]);
2715 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6 * stride]);
2716 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7 * stride]);
2717 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
2718}
2719
2720template <>
2721EIGEN_STRONG_INLINE void pscatter<bfloat16, Packet8bf>(bfloat16* to, const Packet8bf& from, Index stride) {
2722 EIGEN_ALIGN32 bfloat16 aux[8];
2723 pstore(aux, from);
2724 to[stride * 0] = aux[0];
2725 to[stride * 1] = aux[1];
2726 to[stride * 2] = aux[2];
2727 to[stride * 3] = aux[3];
2728 to[stride * 4] = aux[4];
2729 to[stride * 5] = aux[5];
2730 to[stride * 6] = aux[6];
2731 to[stride * 7] = aux[7];
2732}
2733
2734template <>
2735EIGEN_STRONG_INLINE Packet8bf preverse(const Packet8bf& a) {
2736 __m128i m = _mm_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
2737 return _mm_shuffle_epi8(a, m);
2738}
2739
2740EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8bf, 8>& kernel) {
2741 __m128i a = kernel.packet[0];
2742 __m128i b = kernel.packet[1];
2743 __m128i c = kernel.packet[2];
2744 __m128i d = kernel.packet[3];
2745 __m128i e = kernel.packet[4];
2746 __m128i f = kernel.packet[5];
2747 __m128i g = kernel.packet[6];
2748 __m128i h = kernel.packet[7];
2749
2750 __m128i a03b03 = _mm_unpacklo_epi16(a, b);
2751 __m128i c03d03 = _mm_unpacklo_epi16(c, d);
2752 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
2753 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
2754 __m128i a47b47 = _mm_unpackhi_epi16(a, b);
2755 __m128i c47d47 = _mm_unpackhi_epi16(c, d);
2756 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
2757 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
2758
2759 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
2760 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
2761 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
2762 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
2763 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
2764 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
2765 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
2766 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
2767
2768 kernel.packet[0] = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
2769 kernel.packet[1] = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
2770 kernel.packet[2] = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
2771 kernel.packet[3] = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
2772 kernel.packet[4] = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
2773 kernel.packet[5] = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
2774 kernel.packet[6] = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
2775 kernel.packet[7] = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
2776}
2777
2778EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8bf, 4>& kernel) {
2779 __m128i a = kernel.packet[0];
2780 __m128i b = kernel.packet[1];
2781 __m128i c = kernel.packet[2];
2782 __m128i d = kernel.packet[3];
2783
2784 __m128i ab_03 = _mm_unpacklo_epi16(a, b);
2785 __m128i cd_03 = _mm_unpacklo_epi16(c, d);
2786 __m128i ab_47 = _mm_unpackhi_epi16(a, b);
2787 __m128i cd_47 = _mm_unpackhi_epi16(c, d);
2788
2789 kernel.packet[0] = _mm_unpacklo_epi32(ab_03, cd_03);
2790 kernel.packet[1] = _mm_unpackhi_epi32(ab_03, cd_03);
2791 kernel.packet[2] = _mm_unpacklo_epi32(ab_47, cd_47);
2792 kernel.packet[3] = _mm_unpackhi_epi32(ab_47, cd_47);
2793}
2794
2795/*---------------- load/store segment support ----------------*/
2796
2797// returns a mask of 8-bit elements (at most 4) that are all 1's in the range [begin, begin + count) and 0 elsewhere.
2798inline __m128i segment_mask_4x8(Index begin, Index count) {
2799 eigen_assert(begin >= 0 && begin + count <= 4);
2800 long long mask = 1;
2801 mask <<= CHAR_BIT * count;
2802 mask--;
2803 mask <<= CHAR_BIT * begin;
2804#if !EIGEN_ARCH_x86_64
2805 return _mm_loadl_epi64(reinterpret_cast<const __m128i*>(&mask));
2806#else
2807 return _mm_cvtsi64_si128(mask);
2808#endif
2809}
2810
2811// returns a mask of 8-bit elements (at most 8) that are all 1's in the range [begin, begin + count) and 0 elsewhere.
2812inline __m128i segment_mask_8x8(Index begin, Index count) {
2813 eigen_assert(begin >= 0 && begin + count <= 8);
2814 long long mask = 1;
2815 // avoid UB when count == 8
2816 mask <<= (CHAR_BIT / 2) * count;
2817 mask <<= (CHAR_BIT / 2) * count;
2818 mask--;
2819 mask <<= CHAR_BIT * begin;
2820#if !EIGEN_ARCH_x86_64
2821 return _mm_loadl_epi64(reinterpret_cast<const __m128i*>(&mask));
2822#else
2823 return _mm_cvtsi64_si128(mask);
2824#endif
2825}
2826
2827// returns a mask of 32-bit elements (at most 4) that are all 1's in the range [begin, begin + count) and 0 elsewhere.
2828inline __m128i segment_mask_4x32(Index begin, Index count) {
2829 eigen_assert(begin >= 0 && begin + count <= 4);
2830 return _mm_cvtepi8_epi32(segment_mask_4x8(begin, count));
2831}
2832
2833// returns a mask of 64-bit elements (at most 2) that are all 1's in the range [begin, begin + count) and 0 elsewhere.
2834inline __m128i segment_mask_2x64(Index begin, Index count) {
2835 eigen_assert(begin >= 0 && begin + count <= 2);
2836 return _mm_cvtepi8_epi64(segment_mask_4x8(begin, count));
2837}
2838
2839// returns a mask of 32-bit elements (at most 8) that are all 1's in the range [begin, begin + count) and 0 elsewhere.
2840inline __m256i segment_mask_8x32(Index begin, Index count) {
2841 __m128i mask_epi8 = segment_mask_8x8(begin, count);
2842#ifdef EIGEN_VECTORIZE_AVX2
2843 __m256i mask_epi32 = _mm256_cvtepi8_epi32(mask_epi8);
2844#else
2845 __m128i mask_epi32_lo = _mm_cvtepi8_epi32(mask_epi8);
2846 __m128i mask_epi32_hi = _mm_cvtepi8_epi32(_mm_srli_epi64(mask_epi8, 32));
2847 __m256i mask_epi32 = _mm256_insertf128_si256(_mm256_castsi128_si256(mask_epi32_lo), mask_epi32_hi, 1);
2848#endif
2849 return mask_epi32;
2850}
2851
2852// returns a mask of 64-bit elements (at most 4) that are all 1's in the range [begin, begin + count) and 0 elsewhere.
2853inline __m256i segment_mask_4x64(Index begin, Index count) {
2854 __m128i mask_epi8 = segment_mask_4x8(begin, count);
2855#ifdef EIGEN_VECTORIZE_AVX2
2856 __m256i mask_epi64 = _mm256_cvtepi8_epi64(mask_epi8);
2857#else
2858 __m128i mask_epi64_lo = _mm_cvtepi8_epi64(mask_epi8);
2859 __m128i mask_epi64_hi = _mm_cvtepi8_epi64(_mm_srli_epi64(mask_epi8, 16));
2860 __m256i mask_epi64 = _mm256_insertf128_si256(_mm256_castsi128_si256(mask_epi64_lo), mask_epi64_hi, 1);
2861#endif
2862 return mask_epi64;
2863}
2864
2865/*---------------- float ----------------*/
2866
2867template <>
2868struct has_packet_segment<Packet4f> : std::true_type {};
2869
2870template <>
2871struct has_packet_segment<Packet8f> : std::true_type {};
2872
2873template <>
2874inline Packet4f ploaduSegment<Packet4f>(const float* from, Index begin, Index count) {
2875 return _mm_maskload_ps(from, segment_mask_4x32(begin, count));
2876}
2877
2878template <>
2879inline void pstoreuSegment<float, Packet4f>(float* to, const Packet4f& from, Index begin, Index count) {
2880 _mm_maskstore_ps(to, segment_mask_4x32(begin, count), from);
2881}
2882
2883template <>
2884inline Packet8f ploaduSegment<Packet8f>(const float* from, Index begin, Index count) {
2885 return _mm256_maskload_ps(from, segment_mask_8x32(begin, count));
2886}
2887
2888template <>
2889inline void pstoreuSegment<float, Packet8f>(float* to, const Packet8f& from, Index begin, Index count) {
2890 _mm256_maskstore_ps(to, segment_mask_8x32(begin, count), from);
2891}
2892
2893/*---------------- int32 ----------------*/
2894
2895template <>
2896struct has_packet_segment<Packet4i> : std::true_type {};
2897
2898template <>
2899struct has_packet_segment<Packet8i> : std::true_type {};
2900
2901#ifdef EIGEN_VECTORIZE_AVX2
2902
2903template <>
2904inline Packet4i ploaduSegment<Packet4i>(const int* from, Index begin, Index count) {
2905 return _mm_maskload_epi32(from, segment_mask_4x32(begin, count));
2906}
2907
2908template <>
2909inline void pstoreuSegment<int, Packet4i>(int* to, const Packet4i& from, Index begin, Index count) {
2910 _mm_maskstore_epi32(to, segment_mask_4x32(begin, count), from);
2911}
2912
2913template <>
2914inline Packet8i ploaduSegment<Packet8i>(const int* from, Index begin, Index count) {
2915 return _mm256_maskload_epi32(from, segment_mask_8x32(begin, count));
2916}
2917
2918template <>
2919inline void pstoreuSegment<int, Packet8i>(int* to, const Packet8i& from, Index begin, Index count) {
2920 _mm256_maskstore_epi32(to, segment_mask_8x32(begin, count), from);
2921}
2922
2923#else
2924
2925template <>
2926inline Packet4i ploaduSegment<Packet4i>(const int* from, Index begin, Index count) {
2927 return _mm_castps_si128(ploaduSegment<Packet4f>(reinterpret_cast<const float*>(from), begin, count));
2928}
2929
2930template <>
2931inline void pstoreuSegment<int, Packet4i>(int* to, const Packet4i& from, Index begin, Index count) {
2932 pstoreuSegment<float, Packet4f>(reinterpret_cast<float*>(to), _mm_castsi128_ps(from), begin, count);
2933}
2934
2935template <>
2936inline Packet8i ploaduSegment<Packet8i>(const int* from, Index begin, Index count) {
2937 return _mm256_castps_si256(ploaduSegment<Packet8f>(reinterpret_cast<const float*>(from), begin, count));
2938}
2939
2940template <>
2941inline void pstoreuSegment<int, Packet8i>(int* to, const Packet8i& from, Index begin, Index count) {
2942 pstoreuSegment<float, Packet8f>(reinterpret_cast<float*>(to), _mm256_castsi256_ps(from), begin, count);
2943}
2944
2945#endif
2946
2947/*---------------- uint32 ----------------*/
2948
2949template <>
2950struct has_packet_segment<Packet4ui> : std::true_type {};
2951
2952template <>
2953struct has_packet_segment<Packet8ui> : std::true_type {};
2954
2955template <>
2956inline Packet4ui ploaduSegment<Packet4ui>(const uint32_t* from, Index begin, Index count) {
2957 return Packet4ui(ploaduSegment<Packet4i>(reinterpret_cast<const int*>(from), begin, count));
2958}
2959
2960template <>
2961inline void pstoreuSegment<uint32_t, Packet4ui>(uint32_t* to, const Packet4ui& from, Index begin, Index count) {
2962 pstoreuSegment<int, Packet4i>(reinterpret_cast<int*>(to), Packet4i(from), begin, count);
2963}
2964
2965template <>
2966inline Packet8ui ploaduSegment<Packet8ui>(const uint32_t* from, Index begin, Index count) {
2967 return Packet8ui(ploaduSegment<Packet8i>(reinterpret_cast<const int*>(from), begin, count));
2968}
2969
2970template <>
2971inline void pstoreuSegment<uint32_t, Packet8ui>(uint32_t* to, const Packet8ui& from, Index begin, Index count) {
2972 pstoreuSegment<int, Packet8i>(reinterpret_cast<int*>(to), Packet8i(from), begin, count);
2973}
2974
2975/*---------------- double ----------------*/
2976
2977template <>
2978struct has_packet_segment<Packet2d> : std::true_type {};
2979
2980template <>
2981struct has_packet_segment<Packet4d> : std::true_type {};
2982
2983template <>
2984inline Packet2d ploaduSegment<Packet2d>(const double* from, Index begin, Index count) {
2985 return _mm_maskload_pd(from, segment_mask_2x64(begin, count));
2986}
2987
2988template <>
2989inline void pstoreuSegment<double, Packet2d>(double* to, const Packet2d& from, Index begin, Index count) {
2990 _mm_maskstore_pd(to, segment_mask_2x64(begin, count), from);
2991}
2992
2993template <>
2994inline Packet4d ploaduSegment<Packet4d>(const double* from, Index begin, Index count) {
2995 return _mm256_maskload_pd(from, segment_mask_4x64(begin, count));
2996}
2997
2998template <>
2999inline void pstoreuSegment<double, Packet4d>(double* to, const Packet4d& from, Index begin, Index count) {
3000 _mm256_maskstore_pd(to, segment_mask_4x64(begin, count), from);
3001}
3002
3003#ifdef EIGEN_VECTORIZE_AVX2
3004
3005/*---------------- int64_t ----------------*/
3006
3007template <>
3008struct has_packet_segment<Packet2l> : std::true_type {};
3009
3010template <>
3011struct has_packet_segment<Packet4l> : std::true_type {};
3012
3013template <>
3014inline Packet2l ploaduSegment<Packet2l>(const int64_t* from, Index begin, Index count) {
3015 return _mm_maskload_epi64(reinterpret_cast<const long long*>(from), segment_mask_2x64(begin, count));
3016}
3017template <>
3018inline void pstoreuSegment<int64_t, Packet2l>(int64_t* to, const Packet2l& from, Index begin, Index count) {
3019 _mm_maskstore_epi64(reinterpret_cast<long long*>(to), segment_mask_2x64(begin, count), from);
3020}
3021template <>
3022inline Packet4l ploaduSegment<Packet4l>(const int64_t* from, Index begin, Index count) {
3023 return _mm256_maskload_epi64(reinterpret_cast<const long long*>(from), segment_mask_4x64(begin, count));
3024}
3025template <>
3026inline void pstoreuSegment<int64_t, Packet4l>(int64_t* to, const Packet4l& from, Index begin, Index count) {
3027 _mm256_maskstore_epi64(reinterpret_cast<long long*>(to), segment_mask_4x64(begin, count), from);
3028}
3029
3030/*---------------- uint64_t ----------------*/
3031
3032template <>
3033struct has_packet_segment<Packet4ul> : std::true_type {};
3034
3035template <>
3036inline Packet4ul ploaduSegment<Packet4ul>(const uint64_t* from, Index begin, Index count) {
3037 return Packet4ul(ploaduSegment<Packet4l>(reinterpret_cast<const int64_t*>(from), begin, count));
3038}
3039template <>
3040inline void pstoreuSegment<uint64_t, Packet4ul>(uint64_t* to, const Packet4ul& from, Index begin, Index count) {
3041 pstoreuSegment<int64_t, Packet4l>(reinterpret_cast<int64_t*>(to), Packet4l(from), begin, count);
3042}
3043#endif
3044
3045/*---------------- end load/store segment support ----------------*/
3046
3047} // end namespace internal
3048
3049} // end namespace Eigen
3050
3051#endif // EIGEN_PACKET_MATH_AVX_H
@ Aligned32
Definition Constants.h:238
@ Aligned16
Definition Constants.h:237
Namespace containing all symbols from the Eigen library.
Definition B01_Experimental.dox:1
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:82