10#ifndef EIGEN_PACKET_MATH_AVX_H
11#define EIGEN_PACKET_MATH_AVX_H
14#include "../../InternalHeaderCheck.h"
20#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
21#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
24#if !defined(EIGEN_VECTORIZE_AVX512) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
25#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
28#ifdef EIGEN_VECTORIZE_FMA
29#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
30#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
34typedef __m256 Packet8f;
35typedef eigen_packet_wrapper<__m256i, 0> Packet8i;
36typedef __m256d Packet4d;
37#ifndef EIGEN_VECTORIZE_AVX512FP16
38typedef eigen_packet_wrapper<__m128i, 2> Packet8h;
40typedef eigen_packet_wrapper<__m128i, 3> Packet8bf;
41typedef eigen_packet_wrapper<__m256i, 4> Packet8ui;
43#ifdef EIGEN_VECTORIZE_AVX2
45typedef eigen_packet_wrapper<__m256i, 3> Packet4l;
46typedef eigen_packet_wrapper<__m256i, 5> Packet4ul;
50struct is_arithmetic<__m256> {
51 enum { value =
true };
54struct is_arithmetic<__m256i> {
55 enum { value =
true };
58struct is_arithmetic<__m256d> {
59 enum { value =
true };
62struct is_arithmetic<Packet8i> {
63 enum { value =
true };
69struct is_arithmetic<Packet8ui> {
70 enum { value =
false };
72#ifndef EIGEN_VECTORIZE_AVX512FP16
74struct is_arithmetic<Packet8h> {
75 enum { value =
true };
79struct is_arithmetic<Packet8bf> {
80 enum { value =
true };
82#ifdef EIGEN_VECTORIZE_AVX2
84struct is_arithmetic<Packet4l> {
85 enum { value =
true };
91struct is_arithmetic<Packet4ul> {
92 enum { value =
false };
98#ifndef EIGEN_VECTORIZE_AVX512
100struct packet_traits<float> : default_packet_traits {
101 typedef Packet8f type;
102 typedef Packet4f half;
110 HasReciprocal = EIGEN_FAST_MATH,
111 HasSin = EIGEN_FAST_MATH,
112 HasCos = EIGEN_FAST_MATH,
127 HasTanh = EIGEN_FAST_MATH,
128 HasErf = EIGEN_FAST_MATH,
129 HasErfc = EIGEN_FAST_MATH,
134struct packet_traits<double> : default_packet_traits {
135 typedef Packet4d type;
136 typedef Packet2d half;
144#ifdef EIGEN_VECTORIZE_AVX2
145 HasSin = EIGEN_FAST_MATH,
146 HasCos = EIGEN_FAST_MATH,
148 HasTanh = EIGEN_FAST_MATH,
164struct packet_traits<Eigen::half> : default_packet_traits {
165 typedef Packet8h type;
167 typedef Packet8h half;
178 HasSin = EIGEN_FAST_MATH,
179 HasCos = EIGEN_FAST_MATH,
193 HasTanh = EIGEN_FAST_MATH,
194 HasErf = EIGEN_FAST_MATH,
202struct packet_traits<bfloat16> : default_packet_traits {
203 typedef Packet8bf type;
206 typedef Packet8bf half;
217 HasSin = EIGEN_FAST_MATH,
218 HasCos = EIGEN_FAST_MATH,
232 HasTanh = EIGEN_FAST_MATH,
233 HasErf = EIGEN_FAST_MATH,
241struct packet_traits<int> : default_packet_traits {
242 typedef Packet8i type;
243 typedef Packet4i half;
244 enum { Vectorizable = 1, AlignedOnScalar = 1, HasCmp = 1, HasDiv = 1, size = 8 };
247struct packet_traits<uint32_t> : default_packet_traits {
248 typedef Packet8ui type;
249 typedef Packet4ui half;
266#ifdef EIGEN_VECTORIZE_AVX2
268struct packet_traits<int64_t> : default_packet_traits {
269 typedef Packet4l type;
270 typedef Packet2l half;
271 enum { Vectorizable = 1, AlignedOnScalar = 1, HasCmp = 1, size = 4 };
274struct packet_traits<uint64_t> : default_packet_traits {
275 typedef Packet4ul type;
278 typedef Packet4ul half;
300struct scalar_div_cost<float, true> {
304struct scalar_div_cost<double, true> {
309struct unpacket_traits<Packet8f> {
311 typedef Packet4f half;
312 typedef Packet8i integer_packet;
313 typedef uint8_t mask_t;
318 masked_load_available =
true,
319 masked_store_available =
true
320#ifdef EIGEN_VECTORIZE_AVX512
322 masked_fpops_available =
true
327struct unpacket_traits<Packet4d> {
329 typedef Packet2d half;
330#ifdef EIGEN_VECTORIZE_AVX2
331 typedef Packet4l integer_packet;
337 masked_load_available =
false,
338 masked_store_available =
false
342struct unpacket_traits<Packet8i> {
344 typedef Packet4i half;
349 masked_load_available =
false,
350 masked_store_available =
false
354struct unpacket_traits<Packet8ui> {
355 typedef uint32_t type;
356 typedef Packet4ui half;
361 masked_load_available =
false,
362 masked_store_available =
false
365#ifdef EIGEN_VECTORIZE_AVX2
367struct unpacket_traits<Packet4l> {
368 typedef int64_t type;
369 typedef Packet2l half;
374 masked_load_available =
false,
375 masked_store_available =
false
379struct unpacket_traits<Packet4ul> {
380 typedef uint64_t type;
381 typedef Packet4ul half;
386 masked_load_available =
false,
387 masked_store_available =
false
392struct unpacket_traits<Packet8bf> {
393 typedef bfloat16 type;
394 typedef Packet8bf half;
399 masked_load_available =
false,
400 masked_store_available =
false
406EIGEN_STRONG_INLINE __m128i Pack16To8(Packet8f rf) {
407 return _mm_packs_epi32(_mm256_extractf128_si256(_mm256_castps_si256(rf), 0),
408 _mm256_extractf128_si256(_mm256_castps_si256(rf), 1));
411#ifdef EIGEN_VECTORIZE_AVX2
413EIGEN_STRONG_INLINE Packet4l pset1<Packet4l>(
const int64_t& from) {
414 return _mm256_set1_epi64x(from);
417EIGEN_STRONG_INLINE Packet4ul pset1<Packet4ul>(
const uint64_t& from) {
418 return _mm256_set1_epi64x(numext::bit_cast<uint64_t>(from));
421EIGEN_STRONG_INLINE Packet4l pzero(
const Packet4l& ) {
422 return _mm256_setzero_si256();
425EIGEN_STRONG_INLINE Packet4ul pzero(
const Packet4ul& ) {
426 return _mm256_setzero_si256();
429EIGEN_STRONG_INLINE Packet4l peven_mask(
const Packet4l& ) {
430 return _mm256_set_epi64x(0ll, -1ll, 0ll, -1ll);
433EIGEN_STRONG_INLINE Packet4ul peven_mask(
const Packet4ul& ) {
434 return _mm256_set_epi64x(0ll, -1ll, 0ll, -1ll);
437EIGEN_STRONG_INLINE Packet4l pload1<Packet4l>(
const int64_t* from) {
438 return _mm256_set1_epi64x(*from);
441EIGEN_STRONG_INLINE Packet4ul pload1<Packet4ul>(
const uint64_t* from) {
442 return _mm256_set1_epi64x(*from);
445EIGEN_STRONG_INLINE Packet4l padd<Packet4l>(
const Packet4l& a,
const Packet4l& b) {
446 return _mm256_add_epi64(a, b);
449EIGEN_STRONG_INLINE Packet4ul padd<Packet4ul>(
const Packet4ul& a,
const Packet4ul& b) {
450 return _mm256_add_epi64(a, b);
453EIGEN_STRONG_INLINE Packet4l plset<Packet4l>(
const int64_t& a) {
454 return padd(pset1<Packet4l>(a), Packet4l(_mm256_set_epi64x(3ll, 2ll, 1ll, 0ll)));
457EIGEN_STRONG_INLINE Packet4ul plset<Packet4ul>(
const uint64_t& a) {
458 return padd(pset1<Packet4ul>(a), Packet4ul(_mm256_set_epi64x(3ll, 2ll, 1ll, 0ll)));
461EIGEN_STRONG_INLINE Packet4l psub<Packet4l>(
const Packet4l& a,
const Packet4l& b) {
462 return _mm256_sub_epi64(a, b);
465EIGEN_STRONG_INLINE Packet4ul psub<Packet4ul>(
const Packet4ul& a,
const Packet4ul& b) {
466 return _mm256_sub_epi64(a, b);
469EIGEN_STRONG_INLINE Packet4l pnegate(
const Packet4l& a) {
470 return psub(pzero(a), a);
473EIGEN_STRONG_INLINE Packet4l pconj(
const Packet4l& a) {
477EIGEN_STRONG_INLINE Packet4l pcmp_le(
const Packet4l& a,
const Packet4l& b) {
478 return _mm256_xor_si256(_mm256_cmpgt_epi64(a, b), _mm256_set1_epi32(-1));
481EIGEN_STRONG_INLINE Packet4ul pcmp_le(
const Packet4ul& a,
const Packet4ul& b) {
482 return (Packet4ul)pcmp_le((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
483 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL)));
486EIGEN_STRONG_INLINE Packet4l pcmp_lt(
const Packet4l& a,
const Packet4l& b) {
487 return _mm256_cmpgt_epi64(b, a);
490EIGEN_STRONG_INLINE Packet4ul pcmp_lt(
const Packet4ul& a,
const Packet4ul& b) {
491 return (Packet4ul)pcmp_lt((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
492 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL)));
495EIGEN_STRONG_INLINE Packet4l pcmp_eq(
const Packet4l& a,
const Packet4l& b) {
496 return _mm256_cmpeq_epi64(a, b);
499EIGEN_STRONG_INLINE Packet4ul pcmp_eq(
const Packet4ul& a,
const Packet4ul& b) {
500 return _mm256_cmpeq_epi64(a, b);
503EIGEN_STRONG_INLINE Packet4l ptrue<Packet4l>(
const Packet4l& a) {
504 return _mm256_cmpeq_epi64(a, a);
507EIGEN_STRONG_INLINE Packet4ul ptrue<Packet4ul>(
const Packet4ul& a) {
508 return _mm256_cmpeq_epi64(a, a);
511EIGEN_STRONG_INLINE Packet4l pand<Packet4l>(
const Packet4l& a,
const Packet4l& b) {
512 return _mm256_and_si256(a, b);
515EIGEN_STRONG_INLINE Packet4l por<Packet4l>(
const Packet4l& a,
const Packet4l& b) {
516 return _mm256_or_si256(a, b);
519EIGEN_STRONG_INLINE Packet4l pxor<Packet4l>(
const Packet4l& a,
const Packet4l& b) {
520 return _mm256_xor_si256(a, b);
523EIGEN_STRONG_INLINE Packet4ul pxor<Packet4ul>(
const Packet4ul& a,
const Packet4ul& b) {
524 return _mm256_xor_si256(a, b);
527EIGEN_STRONG_INLINE Packet4l pandnot<Packet4l>(
const Packet4l& a,
const Packet4l& b) {
528 return _mm256_andnot_si256(b, a);
531EIGEN_STRONG_INLINE Packet4l plogical_shift_right(Packet4l a) {
532 return _mm256_srli_epi64(a, N);
535EIGEN_STRONG_INLINE Packet4l plogical_shift_left(Packet4l a) {
536 return _mm256_slli_epi64(a, N);
538#ifdef EIGEN_VECTORIZE_AVX512FP16
540EIGEN_STRONG_INLINE Packet4l parithmetic_shift_right(Packet4l a) {
541 return _mm256_srai_epi64(a, N);
545EIGEN_STRONG_INLINE std::enable_if_t<(N == 0), Packet4l> parithmetic_shift_right(Packet4l a) {
549EIGEN_STRONG_INLINE std::enable_if_t<(N > 0) && (N < 32), Packet4l> parithmetic_shift_right(Packet4l a) {
550 __m256i hi_word = _mm256_srai_epi32(a, N);
551 __m256i lo_word = _mm256_srli_epi64(a, N);
552 return _mm256_blend_epi32(hi_word, lo_word, 0b01010101);
555EIGEN_STRONG_INLINE std::enable_if_t<(N >= 32) && (N < 63), Packet4l> parithmetic_shift_right(Packet4l a) {
556 __m256i hi_word = _mm256_srai_epi32(a, 31);
557 __m256i lo_word = _mm256_shuffle_epi32(_mm256_srai_epi32(a, N - 32), (shuffle_mask<1, 1, 3, 3>::mask));
558 return _mm256_blend_epi32(hi_word, lo_word, 0b01010101);
561EIGEN_STRONG_INLINE std::enable_if_t<(N == 63), Packet4l> parithmetic_shift_right(Packet4l a) {
562 return _mm256_cmpgt_epi64(_mm256_setzero_si256(), a);
565EIGEN_STRONG_INLINE std::enable_if_t<(N < 0) || (N > 63), Packet4l> parithmetic_shift_right(Packet4l a) {
566 return parithmetic_shift_right<int(N & 63)>(a);
570EIGEN_STRONG_INLINE Packet4l pload<Packet4l>(
const int64_t* from) {
571 EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_si256(
reinterpret_cast<const __m256i*
>(from));
574EIGEN_STRONG_INLINE Packet4ul pload<Packet4ul>(
const uint64_t* from) {
575 EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_si256(
reinterpret_cast<const __m256i*
>(from));
578EIGEN_STRONG_INLINE Packet4l ploadu<Packet4l>(
const int64_t* from) {
579 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_si256(
reinterpret_cast<const __m256i*
>(from));
582EIGEN_STRONG_INLINE Packet4ul ploadu<Packet4ul>(
const uint64_t* from) {
583 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_si256(
reinterpret_cast<const __m256i*
>(from));
587EIGEN_STRONG_INLINE Packet4l ploaddup<Packet4l>(
const int64_t* from) {
588 const Packet4l a = _mm256_castsi128_si256(_mm_loadu_si128(
reinterpret_cast<const __m128i*
>(from)));
589 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 1, 0, 1, 2, 3, 2, 3));
593EIGEN_STRONG_INLINE Packet4ul ploaddup<Packet4ul>(
const uint64_t* from) {
594 const Packet4ul a = _mm256_castsi128_si256(_mm_loadu_si128(
reinterpret_cast<const __m128i*
>(from)));
595 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 1, 0, 1, 2, 3, 2, 3));
598EIGEN_STRONG_INLINE
void pstore<int64_t>(int64_t* to,
const Packet4l& from) {
599 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(
reinterpret_cast<__m256i*
>(to), from);
602EIGEN_STRONG_INLINE
void pstore<uint64_t>(uint64_t* to,
const Packet4ul& from) {
603 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(
reinterpret_cast<__m256i*
>(to), from);
606EIGEN_STRONG_INLINE
void pstoreu<int64_t>(int64_t* to,
const Packet4l& from) {
607 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(
reinterpret_cast<__m256i*
>(to), from);
610EIGEN_STRONG_INLINE
void pstoreu<uint64_t>(uint64_t* to,
const Packet4ul& from) {
611 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(
reinterpret_cast<__m256i*
>(to), from);
614EIGEN_DEVICE_FUNC
inline Packet4l pgather<int64_t, Packet4l>(
const int64_t* from,
Index stride) {
615 return _mm256_set_epi64x(from[3 * stride], from[2 * stride], from[1 * stride], from[0 * stride]);
618EIGEN_DEVICE_FUNC
inline Packet4ul pgather<uint64_t, Packet4ul>(
const uint64_t* from,
Index stride) {
619 return _mm256_set_epi64x(from[3 * stride], from[2 * stride], from[1 * stride], from[0 * stride]);
622EIGEN_DEVICE_FUNC
inline void pscatter<int64_t, Packet4l>(int64_t* to,
const Packet4l& from,
Index stride) {
623 __m128i low = _mm256_extractf128_si256(from, 0);
624 to[stride * 0] = _mm_extract_epi64_0(low);
625 to[stride * 1] = _mm_extract_epi64_1(low);
627 __m128i high = _mm256_extractf128_si256(from, 1);
628 to[stride * 2] = _mm_extract_epi64_0(high);
629 to[stride * 3] = _mm_extract_epi64_1(high);
632EIGEN_DEVICE_FUNC
inline void pscatter<uint64_t, Packet4ul>(uint64_t* to,
const Packet4ul& from,
Index stride) {
633 __m128i low = _mm256_extractf128_si256(from, 0);
634 to[stride * 0] = _mm_extract_epi64_0(low);
635 to[stride * 1] = _mm_extract_epi64_1(low);
637 __m128i high = _mm256_extractf128_si256(from, 1);
638 to[stride * 2] = _mm_extract_epi64_0(high);
639 to[stride * 3] = _mm_extract_epi64_1(high);
642EIGEN_STRONG_INLINE
void pstore1<Packet4l>(int64_t* to,
const int64_t& a) {
643 Packet4l pa = pset1<Packet4l>(a);
647EIGEN_STRONG_INLINE
void pstore1<Packet4ul>(uint64_t* to,
const uint64_t& a) {
648 Packet4ul pa = pset1<Packet4ul>(a);
652EIGEN_STRONG_INLINE int64_t pfirst<Packet4l>(
const Packet4l& a) {
653 return _mm_extract_epi64_0(_mm256_castsi256_si128(a));
656EIGEN_STRONG_INLINE uint64_t pfirst<Packet4ul>(
const Packet4ul& a) {
657 return _mm_extract_epi64_0(_mm256_castsi256_si128(a));
660#define MM256_SHUFFLE_EPI64(A, B, M) _mm256_shuffle_pd(_mm256_castsi256_pd(A), _mm256_castsi256_pd(B), M)
661EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet4l, 4>& kernel) {
662 __m256d T0 = MM256_SHUFFLE_EPI64(kernel.packet[0], kernel.packet[1], 15);
663 __m256d T1 = MM256_SHUFFLE_EPI64(kernel.packet[0], kernel.packet[1], 0);
664 __m256d T2 = MM256_SHUFFLE_EPI64(kernel.packet[2], kernel.packet[3], 15);
665 __m256d T3 = MM256_SHUFFLE_EPI64(kernel.packet[2], kernel.packet[3], 0);
667 kernel.packet[1] = _mm256_castpd_si256(_mm256_permute2f128_pd(T0, T2, 32));
668 kernel.packet[3] = _mm256_castpd_si256(_mm256_permute2f128_pd(T0, T2, 49));
669 kernel.packet[0] = _mm256_castpd_si256(_mm256_permute2f128_pd(T1, T3, 32));
670 kernel.packet[2] = _mm256_castpd_si256(_mm256_permute2f128_pd(T1, T3, 49));
672EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet4ul, 4>& kernel) {
673 ptranspose((PacketBlock<Packet4l, 4>&)kernel);
676EIGEN_STRONG_INLINE Packet4l pmin<Packet4l>(
const Packet4l& a,
const Packet4l& b) {
677 __m256i cmp = _mm256_cmpgt_epi64(a, b);
678 __m256i a_min = _mm256_andnot_si256(cmp, a);
679 __m256i b_min = _mm256_and_si256(cmp, b);
680 return Packet4l(_mm256_or_si256(a_min, b_min));
683EIGEN_STRONG_INLINE Packet4ul pmin<Packet4ul>(
const Packet4ul& a,
const Packet4ul& b) {
684 return padd((Packet4ul)pmin((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
685 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL))),
686 pset1<Packet4ul>(0x8000000000000000UL));
689EIGEN_STRONG_INLINE Packet4l pmax<Packet4l>(
const Packet4l& a,
const Packet4l& b) {
690 __m256i cmp = _mm256_cmpgt_epi64(a, b);
691 __m256i a_min = _mm256_and_si256(cmp, a);
692 __m256i b_min = _mm256_andnot_si256(cmp, b);
693 return Packet4l(_mm256_or_si256(a_min, b_min));
696EIGEN_STRONG_INLINE Packet4ul pmax<Packet4ul>(
const Packet4ul& a,
const Packet4ul& b) {
697 return padd((Packet4ul)pmax((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
698 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL))),
699 pset1<Packet4ul>(0x8000000000000000UL));
702EIGEN_STRONG_INLINE Packet4l pabs<Packet4l>(
const Packet4l& a) {
703 Packet4l pz = pzero<Packet4l>(a);
704 Packet4l cmp = _mm256_cmpgt_epi64(a, pz);
705 return psub(cmp, pxor(a, cmp));
708EIGEN_STRONG_INLINE Packet4ul pabs<Packet4ul>(
const Packet4ul& a) {
712EIGEN_STRONG_INLINE Packet4l pmul<Packet4l>(
const Packet4l& a,
const Packet4l& b) {
714 __m256i upper32_a = _mm256_srli_epi64(a, 32);
715 __m256i upper32_b = _mm256_srli_epi64(b, 32);
718 __m256i mul1 = _mm256_mul_epu32(upper32_a, b);
719 __m256i mul2 = _mm256_mul_epu32(upper32_b, a);
721 __m256i mul3 = _mm256_mul_epu32(a, b);
723 __m256i high = _mm256_slli_epi64(_mm256_add_epi64(mul1, mul2), 32);
724 return _mm256_add_epi64(high, mul3);
727EIGEN_STRONG_INLINE Packet4ul pmul<Packet4ul>(
const Packet4ul& a,
const Packet4ul& b) {
728 return (Packet4ul)pmul<Packet4l>((Packet4l)a, (Packet4l)b);
733EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(
const float& from) {
734 return _mm256_set1_ps(from);
737EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(
const double& from) {
738 return _mm256_set1_pd(from);
741EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(
const int& from) {
742 return _mm256_set1_epi32(from);
745EIGEN_STRONG_INLINE Packet8ui pset1<Packet8ui>(
const uint32_t& from) {
746 return _mm256_set1_epi32(from);
750EIGEN_STRONG_INLINE Packet8f pset1frombits<Packet8f>(
unsigned int from) {
751 return _mm256_castsi256_ps(pset1<Packet8i>(from));
754EIGEN_STRONG_INLINE Packet4d pset1frombits<Packet4d>(uint64_t from) {
755 return _mm256_castsi256_pd(_mm256_set1_epi64x(from));
759EIGEN_STRONG_INLINE Packet8f pzero(
const Packet8f& ) {
760 return _mm256_setzero_ps();
763EIGEN_STRONG_INLINE Packet4d pzero(
const Packet4d& ) {
764 return _mm256_setzero_pd();
767EIGEN_STRONG_INLINE Packet8i pzero(
const Packet8i& ) {
768 return _mm256_setzero_si256();
771EIGEN_STRONG_INLINE Packet8ui pzero(
const Packet8ui& ) {
772 return _mm256_setzero_si256();
776EIGEN_STRONG_INLINE Packet8f peven_mask(
const Packet8f& ) {
777 return _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1));
780EIGEN_STRONG_INLINE Packet8i peven_mask(
const Packet8i& ) {
781 return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1);
784EIGEN_STRONG_INLINE Packet8ui peven_mask(
const Packet8ui& ) {
785 return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1);
788EIGEN_STRONG_INLINE Packet4d peven_mask(
const Packet4d& ) {
789 return _mm256_castsi256_pd(_mm256_set_epi32(0, 0, -1, -1, 0, 0, -1, -1));
793EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(
const float* from) {
794 return _mm256_broadcast_ss(from);
797EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(
const double* from) {
798 return _mm256_broadcast_sd(from);
802EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
803 return _mm256_add_ps(a, b);
805#ifdef EIGEN_VECTORIZE_AVX512
807EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(
const Packet8f& a,
const Packet8f& b, uint8_t umask) {
808 __mmask16 mask =
static_cast<__mmask16
>(umask & 0x00FF);
809 return _mm512_castps512_ps256(_mm512_maskz_add_ps(mask, _mm512_castps256_ps512(a), _mm512_castps256_ps512(b)));
813EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
814 return _mm256_add_pd(a, b);
817EIGEN_STRONG_INLINE Packet8i padd<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
818#ifdef EIGEN_VECTORIZE_AVX2
819 return _mm256_add_epi32(a, b);
821 __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
822 __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
823 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
827EIGEN_STRONG_INLINE Packet8ui padd<Packet8ui>(
const Packet8ui& a,
const Packet8ui& b) {
828#ifdef EIGEN_VECTORIZE_AVX2
829 return _mm256_add_epi32(a, b);
831 __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
832 __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
833 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
838EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(
const float& a) {
839 return padd(pset1<Packet8f>(a), _mm256_set_ps(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0));
842EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(
const double& a) {
843 return padd(pset1<Packet4d>(a), _mm256_set_pd(3.0, 2.0, 1.0, 0.0));
846EIGEN_STRONG_INLINE Packet8i plset<Packet8i>(
const int& a) {
847 return padd(pset1<Packet8i>(a), (Packet8i)_mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0));
850EIGEN_STRONG_INLINE Packet8ui plset<Packet8ui>(
const uint32_t& a) {
851 return padd(pset1<Packet8ui>(a), (Packet8ui)_mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0));
855EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
856 return _mm256_sub_ps(a, b);
859EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
860 return _mm256_sub_pd(a, b);
863EIGEN_STRONG_INLINE Packet8i psub<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
864#ifdef EIGEN_VECTORIZE_AVX2
865 return _mm256_sub_epi32(a, b);
867 __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
868 __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
869 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
873EIGEN_STRONG_INLINE Packet8ui psub<Packet8ui>(
const Packet8ui& a,
const Packet8ui& b) {
874#ifdef EIGEN_VECTORIZE_AVX2
875 return _mm256_sub_epi32(a, b);
877 __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
878 __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
879 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
884EIGEN_STRONG_INLINE Packet8f pnegate(
const Packet8f& a) {
885 const Packet8f mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x80000000));
886 return _mm256_xor_ps(a, mask);
889EIGEN_STRONG_INLINE Packet4d pnegate(
const Packet4d& a) {
890 const Packet4d mask = _mm256_castsi256_pd(_mm256_set1_epi64x(0x8000000000000000ULL));
891 return _mm256_xor_pd(a, mask);
894EIGEN_STRONG_INLINE Packet8i pnegate(
const Packet8i& a) {
895 return psub(pzero(a), a);
899EIGEN_STRONG_INLINE Packet8f pconj(
const Packet8f& a) {
903EIGEN_STRONG_INLINE Packet4d pconj(
const Packet4d& a) {
907EIGEN_STRONG_INLINE Packet8i pconj(
const Packet8i& a) {
912EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
913 return _mm256_mul_ps(a, b);
916EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
917 return _mm256_mul_pd(a, b);
920EIGEN_STRONG_INLINE Packet8i pmul<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
921#ifdef EIGEN_VECTORIZE_AVX2
922 return _mm256_mullo_epi32(a, b);
924 const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
925 const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
926 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
930EIGEN_STRONG_INLINE Packet8ui pmul<Packet8ui>(
const Packet8ui& a,
const Packet8ui& b) {
931#ifdef EIGEN_VECTORIZE_AVX2
932 return _mm256_mullo_epi32(a, b);
934 const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
935 const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
936 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
941EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
942 return _mm256_div_ps(a, b);
945EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
946 return _mm256_div_pd(a, b);
950EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
951#ifdef EIGEN_VECTORIZE_AVX512
952 return _mm512_cvttpd_epi32(_mm512_div_pd(_mm512_cvtepi32_pd(a), _mm512_cvtepi32_pd(b)));
954 Packet4i lo = pdiv<Packet4i>(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
955 Packet4i hi = pdiv<Packet4i>(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
956 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
960#ifdef EIGEN_VECTORIZE_FMA
962EIGEN_STRONG_INLINE Packet8f pmadd(
const Packet8f& a,
const Packet8f& b,
const Packet8f& c) {
963 return _mm256_fmadd_ps(a, b, c);
966EIGEN_STRONG_INLINE Packet4d pmadd(
const Packet4d& a,
const Packet4d& b,
const Packet4d& c) {
967 return _mm256_fmadd_pd(a, b, c);
971EIGEN_STRONG_INLINE Packet8f pmsub(
const Packet8f& a,
const Packet8f& b,
const Packet8f& c) {
972 return _mm256_fmsub_ps(a, b, c);
976EIGEN_STRONG_INLINE Packet4d pmsub(
const Packet4d& a,
const Packet4d& b,
const Packet4d& c) {
977 return _mm256_fmsub_pd(a, b, c);
981EIGEN_STRONG_INLINE Packet8f pnmadd(
const Packet8f& a,
const Packet8f& b,
const Packet8f& c) {
982 return _mm256_fnmadd_ps(a, b, c);
986EIGEN_STRONG_INLINE Packet4d pnmadd(
const Packet4d& a,
const Packet4d& b,
const Packet4d& c) {
987 return _mm256_fnmadd_pd(a, b, c);
991EIGEN_STRONG_INLINE Packet8f pnmsub(
const Packet8f& a,
const Packet8f& b,
const Packet8f& c) {
992 return _mm256_fnmsub_ps(a, b, c);
996EIGEN_STRONG_INLINE Packet4d pnmsub(
const Packet4d& a,
const Packet4d& b,
const Packet4d& c) {
997 return _mm256_fnmsub_pd(a, b, c);
1003EIGEN_STRONG_INLINE Packet8f pcmp_le(
const Packet8f& a,
const Packet8f& b) {
1004 return _mm256_cmp_ps(a, b, _CMP_LE_OQ);
1007EIGEN_STRONG_INLINE Packet8f pcmp_lt(
const Packet8f& a,
const Packet8f& b) {
1008 return _mm256_cmp_ps(a, b, _CMP_LT_OQ);
1011EIGEN_STRONG_INLINE Packet8f pcmp_lt_or_nan(
const Packet8f& a,
const Packet8f& b) {
1012 return _mm256_cmp_ps(a, b, _CMP_NGE_UQ);
1015EIGEN_STRONG_INLINE Packet8f pcmp_eq(
const Packet8f& a,
const Packet8f& b) {
1016 return _mm256_cmp_ps(a, b, _CMP_EQ_OQ);
1019EIGEN_STRONG_INLINE Packet8f pisnan(
const Packet8f& a) {
1020 return _mm256_cmp_ps(a, a, _CMP_UNORD_Q);
1024EIGEN_STRONG_INLINE Packet4d pcmp_le(
const Packet4d& a,
const Packet4d& b) {
1025 return _mm256_cmp_pd(a, b, _CMP_LE_OQ);
1028EIGEN_STRONG_INLINE Packet4d pcmp_lt(
const Packet4d& a,
const Packet4d& b) {
1029 return _mm256_cmp_pd(a, b, _CMP_LT_OQ);
1032EIGEN_STRONG_INLINE Packet4d pcmp_lt_or_nan(
const Packet4d& a,
const Packet4d& b) {
1033 return _mm256_cmp_pd(a, b, _CMP_NGE_UQ);
1036EIGEN_STRONG_INLINE Packet4d pcmp_eq(
const Packet4d& a,
const Packet4d& b) {
1037 return _mm256_cmp_pd(a, b, _CMP_EQ_OQ);
1041EIGEN_STRONG_INLINE Packet8i pcmp_le(
const Packet8i& a,
const Packet8i& b) {
1042#ifdef EIGEN_VECTORIZE_AVX2
1043 return _mm256_xor_si256(_mm256_cmpgt_epi32(a, b), _mm256_set1_epi32(-1));
1045 __m128i lo = _mm_cmpgt_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1046 lo = _mm_xor_si128(lo, _mm_set1_epi32(-1));
1047 __m128i hi = _mm_cmpgt_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1048 hi = _mm_xor_si128(hi, _mm_set1_epi32(-1));
1049 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1053EIGEN_STRONG_INLINE Packet8i pcmp_lt(
const Packet8i& a,
const Packet8i& b) {
1054#ifdef EIGEN_VECTORIZE_AVX2
1055 return _mm256_cmpgt_epi32(b, a);
1057 __m128i lo = _mm_cmpgt_epi32(_mm256_extractf128_si256(b, 0), _mm256_extractf128_si256(a, 0));
1058 __m128i hi = _mm_cmpgt_epi32(_mm256_extractf128_si256(b, 1), _mm256_extractf128_si256(a, 1));
1059 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1063EIGEN_STRONG_INLINE Packet8i pcmp_eq(
const Packet8i& a,
const Packet8i& b) {
1064#ifdef EIGEN_VECTORIZE_AVX2
1065 return _mm256_cmpeq_epi32(a, b);
1067 __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1068 __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1069 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1073EIGEN_STRONG_INLINE Packet8ui pcmp_eq(
const Packet8ui& a,
const Packet8ui& b) {
1074#ifdef EIGEN_VECTORIZE_AVX2
1075 return _mm256_cmpeq_epi32(a, b);
1077 __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1078 __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1079 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1084EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
1085#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1091 asm(
"vminps %[a], %[b], %[res]" : [res]
"=x"(res) : [a]
"x"(a), [b]
"x"(b));
1095 return _mm256_min_ps(b, a);
1099EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
1100#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1103 asm(
"vminpd %[a], %[b], %[res]" : [res]
"=x"(res) : [a]
"x"(a), [b]
"x"(b));
1107 return _mm256_min_pd(b, a);
1111EIGEN_STRONG_INLINE Packet8i pmin<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
1112#ifdef EIGEN_VECTORIZE_AVX2
1113 return _mm256_min_epi32(a, b);
1115 __m128i lo = _mm_min_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1116 __m128i hi = _mm_min_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1117 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1121EIGEN_STRONG_INLINE Packet8ui pmin<Packet8ui>(
const Packet8ui& a,
const Packet8ui& b) {
1122#ifdef EIGEN_VECTORIZE_AVX2
1123 return _mm256_min_epu32(a, b);
1125 __m128i lo = _mm_min_epu32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1126 __m128i hi = _mm_min_epu32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1127 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1132EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
1133#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1136 asm(
"vmaxps %[a], %[b], %[res]" : [res]
"=x"(res) : [a]
"x"(a), [b]
"x"(b));
1140 return _mm256_max_ps(b, a);
1144EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
1145#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1148 asm(
"vmaxpd %[a], %[b], %[res]" : [res]
"=x"(res) : [a]
"x"(a), [b]
"x"(b));
1152 return _mm256_max_pd(b, a);
1156EIGEN_STRONG_INLINE Packet8i pmax<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
1157#ifdef EIGEN_VECTORIZE_AVX2
1158 return _mm256_max_epi32(a, b);
1160 __m128i lo = _mm_max_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1161 __m128i hi = _mm_max_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1162 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1166EIGEN_STRONG_INLINE Packet8ui pmax<Packet8ui>(
const Packet8ui& a,
const Packet8ui& b) {
1167#ifdef EIGEN_VECTORIZE_AVX2
1168 return _mm256_max_epu32(a, b);
1170 __m128i lo = _mm_max_epu32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1171 __m128i hi = _mm_max_epu32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1172 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1176#ifdef EIGEN_VECTORIZE_AVX2
1178EIGEN_STRONG_INLINE Packet8i psign(
const Packet8i& a) {
1179 return _mm256_sign_epi32(_mm256_set1_epi32(1), a);
1185EIGEN_STRONG_INLINE Packet8f pmin<PropagateNumbers, Packet8f>(
const Packet8f& a,
const Packet8f& b) {
1186 return pminmax_propagate_numbers(a, b, pmin<Packet8f>);
1189EIGEN_STRONG_INLINE Packet4d pmin<PropagateNumbers, Packet4d>(
const Packet4d& a,
const Packet4d& b) {
1190 return pminmax_propagate_numbers(a, b, pmin<Packet4d>);
1193EIGEN_STRONG_INLINE Packet8f pmax<PropagateNumbers, Packet8f>(
const Packet8f& a,
const Packet8f& b) {
1194 return pminmax_propagate_numbers(a, b, pmax<Packet8f>);
1197EIGEN_STRONG_INLINE Packet4d pmax<PropagateNumbers, Packet4d>(
const Packet4d& a,
const Packet4d& b) {
1198 return pminmax_propagate_numbers(a, b, pmax<Packet4d>);
1201EIGEN_STRONG_INLINE Packet8f pmin<PropagateNaN, Packet8f>(
const Packet8f& a,
const Packet8f& b) {
1202 return pminmax_propagate_nan(a, b, pmin<Packet8f>);
1205EIGEN_STRONG_INLINE Packet4d pmin<PropagateNaN, Packet4d>(
const Packet4d& a,
const Packet4d& b) {
1206 return pminmax_propagate_nan(a, b, pmin<Packet4d>);
1209EIGEN_STRONG_INLINE Packet8f pmax<PropagateNaN, Packet8f>(
const Packet8f& a,
const Packet8f& b) {
1210 return pminmax_propagate_nan(a, b, pmax<Packet8f>);
1213EIGEN_STRONG_INLINE Packet4d pmax<PropagateNaN, Packet4d>(
const Packet4d& a,
const Packet4d& b) {
1214 return pminmax_propagate_nan(a, b, pmax<Packet4d>);
1218EIGEN_STRONG_INLINE Packet8f print<Packet8f>(
const Packet8f& a) {
1219 return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION);
1222EIGEN_STRONG_INLINE Packet4d print<Packet4d>(
const Packet4d& a) {
1223 return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION);
1227EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(
const Packet8f& a) {
1228 return _mm256_ceil_ps(a);
1231EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(
const Packet4d& a) {
1232 return _mm256_ceil_pd(a);
1236EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(
const Packet8f& a) {
1237 return _mm256_floor_ps(a);
1240EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(
const Packet4d& a) {
1241 return _mm256_floor_pd(a);
1245EIGEN_STRONG_INLINE Packet8f ptrunc<Packet8f>(
const Packet8f& a) {
1246 return _mm256_round_ps(a, _MM_FROUND_TRUNC);
1249EIGEN_STRONG_INLINE Packet4d ptrunc<Packet4d>(
const Packet4d& a) {
1250 return _mm256_round_pd(a, _MM_FROUND_TRUNC);
1254EIGEN_STRONG_INLINE Packet8i ptrue<Packet8i>(
const Packet8i& a) {
1255#ifdef EIGEN_VECTORIZE_AVX2
1257 return _mm256_cmpeq_epi32(a, a);
1259 const __m256 b = _mm256_castsi256_ps(a);
1260 return _mm256_castps_si256(_mm256_cmp_ps(b, b, _CMP_TRUE_UQ));
1265EIGEN_STRONG_INLINE Packet8f ptrue<Packet8f>(
const Packet8f& a) {
1266#ifdef EIGEN_VECTORIZE_AVX2
1268 const __m256i b = _mm256_castps_si256(a);
1269 return _mm256_castsi256_ps(_mm256_cmpeq_epi32(b, b));
1271 return _mm256_cmp_ps(a, a, _CMP_TRUE_UQ);
1276EIGEN_STRONG_INLINE Packet4d ptrue<Packet4d>(
const Packet4d& a) {
1277#ifdef EIGEN_VECTORIZE_AVX2
1279 const __m256i b = _mm256_castpd_si256(a);
1280 return _mm256_castsi256_pd(_mm256_cmpeq_epi64(b, b));
1282 return _mm256_cmp_pd(a, a, _CMP_TRUE_UQ);
1287EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
1288 return _mm256_and_ps(a, b);
1291EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
1292 return _mm256_and_pd(a, b);
1295EIGEN_STRONG_INLINE Packet8i pand<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
1296#ifdef EIGEN_VECTORIZE_AVX2
1297 return _mm256_and_si256(a, b);
1299 return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1303EIGEN_STRONG_INLINE Packet8ui pand<Packet8ui>(
const Packet8ui& a,
const Packet8ui& b) {
1304#ifdef EIGEN_VECTORIZE_AVX2
1305 return _mm256_and_si256(a, b);
1307 return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1312EIGEN_STRONG_INLINE Packet8f por<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
1313 return _mm256_or_ps(a, b);
1316EIGEN_STRONG_INLINE Packet4d por<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
1317 return _mm256_or_pd(a, b);
1320EIGEN_STRONG_INLINE Packet8i por<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
1321#ifdef EIGEN_VECTORIZE_AVX2
1322 return _mm256_or_si256(a, b);
1324 return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1328EIGEN_STRONG_INLINE Packet8ui por<Packet8ui>(
const Packet8ui& a,
const Packet8ui& b) {
1329#ifdef EIGEN_VECTORIZE_AVX2
1330 return _mm256_or_si256(a, b);
1332 return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1337EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
1338 return _mm256_xor_ps(a, b);
1341EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
1342 return _mm256_xor_pd(a, b);
1345EIGEN_STRONG_INLINE Packet8i pxor<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
1346#ifdef EIGEN_VECTORIZE_AVX2
1347 return _mm256_xor_si256(a, b);
1349 return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1353EIGEN_STRONG_INLINE Packet8ui pxor<Packet8ui>(
const Packet8ui& a,
const Packet8ui& b) {
1354#ifdef EIGEN_VECTORIZE_AVX2
1355 return _mm256_xor_si256(a, b);
1357 return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1362EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
1363 return _mm256_andnot_ps(b, a);
1366EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
1367 return _mm256_andnot_pd(b, a);
1370EIGEN_STRONG_INLINE Packet8i pandnot<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
1371#ifdef EIGEN_VECTORIZE_AVX2
1372 return _mm256_andnot_si256(b, a);
1374 return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a)));
1378EIGEN_STRONG_INLINE Packet8ui pandnot<Packet8ui>(
const Packet8ui& a,
const Packet8ui& b) {
1379#ifdef EIGEN_VECTORIZE_AVX2
1380 return _mm256_andnot_si256(b, a);
1382 return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a)));
1387EIGEN_STRONG_INLINE Packet8ui pcmp_lt(
const Packet8ui& a,
const Packet8ui& b) {
1388 return pxor(pcmp_eq(a, pmax(a, b)), ptrue(a));
1391EIGEN_STRONG_INLINE Packet8ui pcmp_le(
const Packet8ui& a,
const Packet8ui& b) {
1392 return pcmp_eq(a, pmin(a, b));
1396EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(
const Packet8f& a) {
1397 const Packet8f mask = pset1frombits<Packet8f>(
static_cast<numext::uint32_t
>(0x80000000u));
1398 const Packet8f prev0dot5 = pset1frombits<Packet8f>(
static_cast<numext::uint32_t
>(0x3EFFFFFFu));
1399 return _mm256_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
1402EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(
const Packet4d& a) {
1403 const Packet4d mask = pset1frombits<Packet4d>(
static_cast<numext::uint64_t
>(0x8000000000000000ull));
1404 const Packet4d prev0dot5 = pset1frombits<Packet4d>(
static_cast<numext::uint64_t
>(0x3FDFFFFFFFFFFFFFull));
1405 return _mm256_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
1409EIGEN_STRONG_INLINE Packet8f pselect<Packet8f>(
const Packet8f& mask,
const Packet8f& a,
const Packet8f& b) {
1410 return _mm256_blendv_ps(b, a, mask);
1413EIGEN_STRONG_INLINE Packet8i pselect<Packet8i>(
const Packet8i& mask,
const Packet8i& a,
const Packet8i& b) {
1414 return _mm256_castps_si256(
1415 _mm256_blendv_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a), _mm256_castsi256_ps(mask)));
1418EIGEN_STRONG_INLINE Packet8ui pselect<Packet8ui>(
const Packet8ui& mask,
const Packet8ui& a,
const Packet8ui& b) {
1419 return _mm256_castps_si256(
1420 _mm256_blendv_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a), _mm256_castsi256_ps(mask)));
1424EIGEN_STRONG_INLINE Packet4d pselect<Packet4d>(
const Packet4d& mask,
const Packet4d& a,
const Packet4d& b) {
1425 return _mm256_blendv_pd(b, a, mask);
1429EIGEN_STRONG_INLINE Packet8i parithmetic_shift_right(Packet8i a) {
1430#ifdef EIGEN_VECTORIZE_AVX2
1431 return _mm256_srai_epi32(a, N);
1433 __m128i lo = _mm_srai_epi32(_mm256_extractf128_si256(a, 0), N);
1434 __m128i hi = _mm_srai_epi32(_mm256_extractf128_si256(a, 1), N);
1435 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1440EIGEN_STRONG_INLINE Packet8i plogical_shift_right(Packet8i a) {
1441#ifdef EIGEN_VECTORIZE_AVX2
1442 return _mm256_srli_epi32(a, N);
1444 __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(a, 0), N);
1445 __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(a, 1), N);
1446 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1451EIGEN_STRONG_INLINE Packet8i plogical_shift_left(Packet8i a) {
1452#ifdef EIGEN_VECTORIZE_AVX2
1453 return _mm256_slli_epi32(a, N);
1455 __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(a, 0), N);
1456 __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(a, 1), N);
1457 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1462EIGEN_STRONG_INLINE Packet8ui parithmetic_shift_right(Packet8ui a) {
1463 return (Packet8ui)plogical_shift_right<N>((Packet8i)a);
1466EIGEN_STRONG_INLINE Packet8ui plogical_shift_right(Packet8ui a) {
1467 return (Packet8ui)plogical_shift_right<N>((Packet8i)a);
1470EIGEN_STRONG_INLINE Packet8ui plogical_shift_left(Packet8ui a) {
1471 return (Packet8ui)plogical_shift_left<N>((Packet8i)a);
1475EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(
const float* from) {
1476 EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_ps(from);
1479EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(
const double* from) {
1480 EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_pd(from);
1483EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(
const int* from) {
1484 EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_si256(
reinterpret_cast<const __m256i*
>(from));
1487EIGEN_STRONG_INLINE Packet8ui pload<Packet8ui>(
const uint32_t* from) {
1488 EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_si256(
reinterpret_cast<const __m256i*
>(from));
1492EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(
const float* from) {
1493 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_ps(from);
1496EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(
const double* from) {
1497 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_pd(from);
1500EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(
const int* from) {
1501 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_si256(
reinterpret_cast<const __m256i*
>(from));
1504EIGEN_STRONG_INLINE Packet8ui ploadu<Packet8ui>(
const uint32_t* from) {
1505 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_si256(
reinterpret_cast<const __m256i*
>(from));
1509EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(
const float* from, uint8_t umask) {
1510#ifdef EIGEN_VECTORIZE_AVX512
1511 __mmask16 mask =
static_cast<__mmask16
>(umask & 0x00FF);
1512 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm512_castps512_ps256(_mm512_maskz_loadu_ps(mask, from));
1514 Packet8i mask = _mm256_set1_epi8(
static_cast<char>(umask));
1515 const Packet8i bit_mask =
1516 _mm256_set_epi32(0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe);
1517 mask = por<Packet8i>(mask, bit_mask);
1518 mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
1519 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_maskload_ps(from, mask);
1525EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(
const float* from) {
1532 Packet8f tmp = _mm256_broadcast_ps((
const __m128*)(
const void*)from);
1534 tmp = _mm256_blend_ps(
1535 tmp, _mm256_castps128_ps256(_mm_permute_ps(_mm256_castps256_ps128(tmp), _MM_SHUFFLE(1, 0, 1, 0))), 15);
1537 return _mm256_permute_ps(tmp, _MM_SHUFFLE(3, 3, 2, 2));
1541EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(
const double* from) {
1542 Packet4d tmp = _mm256_broadcast_pd((
const __m128d*)(
const void*)from);
1543 return _mm256_permute_pd(tmp, 3 << 2);
1547EIGEN_STRONG_INLINE Packet8i ploaddup<Packet8i>(
const int* from) {
1548#ifdef EIGEN_VECTORIZE_AVX2
1549 const Packet8i a = _mm256_castsi128_si256(ploadu<Packet4i>(from));
1550 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 0, 1, 1, 2, 2, 3, 3));
1552 __m256 tmp = _mm256_broadcast_ps((
const __m128*)(
const void*)from);
1554 tmp = _mm256_blend_ps(
1555 tmp, _mm256_castps128_ps256(_mm_permute_ps(_mm256_castps256_ps128(tmp), _MM_SHUFFLE(1, 0, 1, 0))), 15);
1557 return _mm256_castps_si256(_mm256_permute_ps(tmp, _MM_SHUFFLE(3, 3, 2, 2)));
1561EIGEN_STRONG_INLINE Packet8ui ploaddup<Packet8ui>(
const uint32_t* from) {
1562#ifdef EIGEN_VECTORIZE_AVX2
1563 const Packet8ui a = _mm256_castsi128_si256(ploadu<Packet4ui>(from));
1564 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 0, 1, 1, 2, 2, 3, 3));
1566 __m256 tmp = _mm256_broadcast_ps((
const __m128*)(
const void*)from);
1568 tmp = _mm256_blend_ps(
1569 tmp, _mm256_castps128_ps256(_mm_permute_ps(_mm256_castps256_ps128(tmp), _MM_SHUFFLE(1, 0, 1, 0))), 15);
1572 return _mm256_castps_si256(_mm256_permute_ps(tmp, _MM_SHUFFLE(3, 3, 2, 2)));
1578EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(
const float* from) {
1579 Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
1580 return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from + 1), 1);
1583EIGEN_STRONG_INLINE Packet8i ploadquad<Packet8i>(
const int* from) {
1584 return _mm256_insertf128_si256(_mm256_set1_epi32(*from), _mm_set1_epi32(*(from + 1)), 1);
1587EIGEN_STRONG_INLINE Packet8ui ploadquad<Packet8ui>(
const uint32_t* from) {
1588 return _mm256_insertf128_si256(_mm256_set1_epi32(*from), _mm_set1_epi32(*(from + 1)), 1);
1592EIGEN_STRONG_INLINE
void pstore<float>(
float* to,
const Packet8f& from) {
1593 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from);
1596EIGEN_STRONG_INLINE
void pstore<double>(
double* to,
const Packet4d& from) {
1597 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from);
1600EIGEN_STRONG_INLINE
void pstore<int>(
int* to,
const Packet8i& from) {
1601 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(
reinterpret_cast<__m256i*
>(to), from);
1604EIGEN_STRONG_INLINE
void pstore<uint32_t>(uint32_t* to,
const Packet8ui& from) {
1605 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(
reinterpret_cast<__m256i*
>(to), from);
1609EIGEN_STRONG_INLINE
void pstoreu<float>(
float* to,
const Packet8f& from) {
1610 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from);
1613EIGEN_STRONG_INLINE
void pstoreu<double>(
double* to,
const Packet4d& from) {
1614 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from);
1617EIGEN_STRONG_INLINE
void pstoreu<int>(
int* to,
const Packet8i& from) {
1618 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(
reinterpret_cast<__m256i*
>(to), from);
1621EIGEN_STRONG_INLINE
void pstoreu<uint32_t>(uint32_t* to,
const Packet8ui& from) {
1622 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(
reinterpret_cast<__m256i*
>(to), from);
1626EIGEN_STRONG_INLINE
void pstoreu<float>(
float* to,
const Packet8f& from, uint8_t umask) {
1627#ifdef EIGEN_VECTORIZE_AVX512
1628 __mmask16 mask =
static_cast<__mmask16
>(umask & 0x00FF);
1629 EIGEN_DEBUG_UNALIGNED_STORE _mm512_mask_storeu_ps(to, mask, _mm512_castps256_ps512(from));
1631 Packet8i mask = _mm256_set1_epi8(
static_cast<char>(umask));
1632 const Packet8i bit_mask =
1633 _mm256_set_epi32(0x7f7f7f7f, 0xbfbfbfbf, 0xdfdfdfdf, 0xefefefef, 0xf7f7f7f7, 0xfbfbfbfb, 0xfdfdfdfd, 0xfefefefe);
1634 mask = por<Packet8i>(mask, bit_mask);
1635 mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
1638 const __m256i ifrom = _mm256_castps_si256(from);
1639 EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 0), _mm256_extractf128_si256(mask, 0),
1640 reinterpret_cast<char*
>(to));
1641 EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 1), _mm256_extractf128_si256(mask, 1),
1642 reinterpret_cast<char*
>(to + 4));
1644 EIGEN_DEBUG_UNALIGNED_STORE _mm256_maskstore_ps(to, mask, from);
1653EIGEN_DEVICE_FUNC
inline Packet8f pgather<float, Packet8f>(
const float* from,
Index stride) {
1654 return _mm256_set_ps(from[7 * stride], from[6 * stride], from[5 * stride], from[4 * stride], from[3 * stride],
1655 from[2 * stride], from[1 * stride], from[0 * stride]);
1658EIGEN_DEVICE_FUNC
inline Packet4d pgather<double, Packet4d>(
const double* from,
Index stride) {
1659 return _mm256_set_pd(from[3 * stride], from[2 * stride], from[1 * stride], from[0 * stride]);
1662EIGEN_DEVICE_FUNC
inline Packet8i pgather<int, Packet8i>(
const int* from,
Index stride) {
1663 return _mm256_set_epi32(from[7 * stride], from[6 * stride], from[5 * stride], from[4 * stride], from[3 * stride],
1664 from[2 * stride], from[1 * stride], from[0 * stride]);
1667EIGEN_DEVICE_FUNC
inline Packet8ui pgather<uint32_t, Packet8ui>(
const uint32_t* from,
Index stride) {
1668 return (Packet8ui)pgather<int, Packet8i>((
int*)from, stride);
1672EIGEN_DEVICE_FUNC
inline void pscatter<float, Packet8f>(
float* to,
const Packet8f& from,
Index stride) {
1673 __m128 low = _mm256_extractf128_ps(from, 0);
1674 to[stride * 0] = _mm_cvtss_f32(low);
1675 to[stride * 1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
1676 to[stride * 2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
1677 to[stride * 3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
1679 __m128 high = _mm256_extractf128_ps(from, 1);
1680 to[stride * 4] = _mm_cvtss_f32(high);
1681 to[stride * 5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
1682 to[stride * 6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
1683 to[stride * 7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
1686EIGEN_DEVICE_FUNC
inline void pscatter<double, Packet4d>(
double* to,
const Packet4d& from,
Index stride) {
1687 __m128d low = _mm256_extractf128_pd(from, 0);
1688 to[stride * 0] = _mm_cvtsd_f64(low);
1689 to[stride * 1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
1690 __m128d high = _mm256_extractf128_pd(from, 1);
1691 to[stride * 2] = _mm_cvtsd_f64(high);
1692 to[stride * 3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
1695EIGEN_DEVICE_FUNC
inline void pscatter<int, Packet8i>(
int* to,
const Packet8i& from,
Index stride) {
1696 __m128i low = _mm256_extractf128_si256(from, 0);
1697 to[stride * 0] = _mm_extract_epi32(low, 0);
1698 to[stride * 1] = _mm_extract_epi32(low, 1);
1699 to[stride * 2] = _mm_extract_epi32(low, 2);
1700 to[stride * 3] = _mm_extract_epi32(low, 3);
1702 __m128i high = _mm256_extractf128_si256(from, 1);
1703 to[stride * 4] = _mm_extract_epi32(high, 0);
1704 to[stride * 5] = _mm_extract_epi32(high, 1);
1705 to[stride * 6] = _mm_extract_epi32(high, 2);
1706 to[stride * 7] = _mm_extract_epi32(high, 3);
1709EIGEN_DEVICE_FUNC
inline void pscatter<uint32_t, Packet8ui>(uint32_t* to,
const Packet8ui& from,
Index stride) {
1710 pscatter<int, Packet8i>((
int*)to, (Packet8i)from, stride);
1714EIGEN_STRONG_INLINE
void pstore1<Packet8f>(
float* to,
const float& a) {
1715 Packet8f pa = pset1<Packet8f>(a);
1719EIGEN_STRONG_INLINE
void pstore1<Packet4d>(
double* to,
const double& a) {
1720 Packet4d pa = pset1<Packet4d>(a);
1724EIGEN_STRONG_INLINE
void pstore1<Packet8i>(
int* to,
const int& a) {
1725 Packet8i pa = pset1<Packet8i>(a);
1729#ifndef EIGEN_VECTORIZE_AVX512
1731EIGEN_STRONG_INLINE
void prefetch<float>(
const float* addr) {
1732 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1735EIGEN_STRONG_INLINE
void prefetch<double>(
const double* addr) {
1736 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1739EIGEN_STRONG_INLINE
void prefetch<int>(
const int* addr) {
1740 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1743EIGEN_STRONG_INLINE
void prefetch<uint32_t>(
const uint32_t* addr) {
1744 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1749EIGEN_STRONG_INLINE
float pfirst<Packet8f>(
const Packet8f& a) {
1750 return _mm_cvtss_f32(_mm256_castps256_ps128(a));
1753EIGEN_STRONG_INLINE
double pfirst<Packet4d>(
const Packet4d& a) {
1754 return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
1757EIGEN_STRONG_INLINE
int pfirst<Packet8i>(
const Packet8i& a) {
1758 return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
1761EIGEN_STRONG_INLINE uint32_t pfirst<Packet8ui>(
const Packet8ui& a) {
1762 return numext::bit_cast<uint32_t>(_mm_cvtsi128_si32(_mm256_castsi256_si128(a)));
1766EIGEN_STRONG_INLINE Packet8f preverse(
const Packet8f& a) {
1767 __m256 tmp = _mm256_shuffle_ps(a, a, 0x1b);
1768 return _mm256_permute2f128_ps(tmp, tmp, 1);
1771EIGEN_STRONG_INLINE Packet4d preverse(
const Packet4d& a) {
1772 __m256d tmp = _mm256_shuffle_pd(a, a, 5);
1773 return _mm256_permute2f128_pd(tmp, tmp, 1);
1777 __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
1778 return _mm256_permute_pd(swap_halves,5);
1782EIGEN_STRONG_INLINE Packet8i preverse(
const Packet8i& a) {
1783 return _mm256_castps_si256(preverse(_mm256_castsi256_ps(a)));
1786EIGEN_STRONG_INLINE Packet8ui preverse(
const Packet8ui& a) {
1787 return _mm256_castps_si256(preverse(_mm256_castsi256_ps(a)));
1790#ifdef EIGEN_VECTORIZE_AVX2
1792EIGEN_STRONG_INLINE Packet4l preverse(
const Packet4l& a) {
1793 return _mm256_castpd_si256(preverse(_mm256_castsi256_pd(a)));
1796EIGEN_STRONG_INLINE Packet4ul preverse(
const Packet4ul& a) {
1797 return _mm256_castpd_si256(preverse(_mm256_castsi256_pd(a)));
1803EIGEN_STRONG_INLINE Packet8f pabs(
const Packet8f& a) {
1804 const Packet8f mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7FFFFFFF));
1805 return _mm256_and_ps(a, mask);
1808EIGEN_STRONG_INLINE Packet4d pabs(
const Packet4d& a) {
1809 const Packet4d mask = _mm256_castsi256_pd(_mm256_set1_epi64x(0x7FFFFFFFFFFFFFFF));
1810 return _mm256_and_pd(a, mask);
1813EIGEN_STRONG_INLINE Packet8i pabs(
const Packet8i& a) {
1814#ifdef EIGEN_VECTORIZE_AVX2
1815 return _mm256_abs_epi32(a);
1817 __m128i lo = _mm_abs_epi32(_mm256_extractf128_si256(a, 0));
1818 __m128i hi = _mm_abs_epi32(_mm256_extractf128_si256(a, 1));
1819 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1823EIGEN_STRONG_INLINE Packet8ui pabs(
const Packet8ui& a) {
1827#ifndef EIGEN_VECTORIZE_AVX512FP16
1829EIGEN_STRONG_INLINE Packet8h psignbit(
const Packet8h& a) {
1830 return _mm_cmpgt_epi16(_mm_setzero_si128(), a);
1835EIGEN_STRONG_INLINE Packet8bf psignbit(
const Packet8bf& a) {
1836 return _mm_cmpgt_epi16(_mm_setzero_si128(), a);
1839EIGEN_STRONG_INLINE Packet8f psignbit(
const Packet8f& a) {
1840#ifdef EIGEN_VECTORIZE_AVX2
1841 return _mm256_castsi256_ps(_mm256_cmpgt_epi32(_mm256_setzero_si256(), _mm256_castps_si256(a)));
1843 return _mm256_castsi256_ps(parithmetic_shift_right<31>(Packet8i(_mm256_castps_si256(a))));
1847EIGEN_STRONG_INLINE Packet8ui psignbit(
const Packet8ui& ) {
1848 return _mm256_setzero_si256();
1850#ifdef EIGEN_VECTORIZE_AVX2
1852EIGEN_STRONG_INLINE Packet4d psignbit(
const Packet4d& a) {
1853 return _mm256_castsi256_pd(_mm256_cmpgt_epi64(_mm256_setzero_si256(), _mm256_castpd_si256(a)));
1856EIGEN_STRONG_INLINE Packet4ul psignbit(
const Packet4ul& ) {
1857 return _mm256_setzero_si256();
1862EIGEN_STRONG_INLINE Packet8f pfrexp<Packet8f>(
const Packet8f& a, Packet8f& exponent) {
1863 return pfrexp_generic(a, exponent);
1868EIGEN_STRONG_INLINE Packet4d pfrexp_generic_get_biased_exponent(
const Packet4d& a) {
1869 const Packet4d cst_exp_mask = pset1frombits<Packet4d>(
static_cast<uint64_t
>(0x7ff0000000000000ull));
1870 __m256i a_expo = _mm256_castpd_si256(pand(a, cst_exp_mask));
1871#ifdef EIGEN_VECTORIZE_AVX2
1872 a_expo = _mm256_srli_epi64(a_expo, 52);
1873 __m128i lo = _mm256_extractf128_si256(a_expo, 0);
1874 __m128i hi = _mm256_extractf128_si256(a_expo, 1);
1876 __m128i lo = _mm256_extractf128_si256(a_expo, 0);
1877 __m128i hi = _mm256_extractf128_si256(a_expo, 1);
1878 lo = _mm_srli_epi64(lo, 52);
1879 hi = _mm_srli_epi64(hi, 52);
1881 Packet2d exponent_lo = _mm_cvtepi32_pd(vec4i_swizzle1(lo, 0, 2, 1, 3));
1882 Packet2d exponent_hi = _mm_cvtepi32_pd(vec4i_swizzle1(hi, 0, 2, 1, 3));
1883 Packet4d exponent = _mm256_insertf128_pd(_mm256_setzero_pd(), exponent_lo, 0);
1884 exponent = _mm256_insertf128_pd(exponent, exponent_hi, 1);
1889EIGEN_STRONG_INLINE Packet4d pfrexp<Packet4d>(
const Packet4d& a, Packet4d& exponent) {
1890 return pfrexp_generic(a, exponent);
1894EIGEN_STRONG_INLINE Packet8f pldexp<Packet8f>(
const Packet8f& a,
const Packet8f& exponent) {
1895 return pldexp_generic(a, exponent);
1899EIGEN_STRONG_INLINE Packet4d pldexp<Packet4d>(
const Packet4d& a,
const Packet4d& exponent) {
1901 const Packet4d max_exponent = pset1<Packet4d>(2099.0);
1902 const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
1905 const Packet4i bias = pset1<Packet4i>(1023);
1906 Packet4i b = parithmetic_shift_right<2>(e);
1909 Packet4i hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
1910 Packet4i lo = _mm_slli_epi64(hi, 52);
1911 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
1912 Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
1913 Packet4d out = pmul(pmul(pmul(a, c), c), c);
1916 b = psub(psub(psub(e, b), b), b);
1917 hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
1918 lo = _mm_slli_epi64(hi, 52);
1919 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
1920 c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
1926EIGEN_STRONG_INLINE Packet4d pldexp_fast<Packet4d>(
const Packet4d& a,
const Packet4d& exponent) {
1928 const Packet4d min_exponent = pset1<Packet4d>(-1023.0);
1929 const Packet4d max_exponent = pset1<Packet4d>(1024.0);
1930 const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, min_exponent), max_exponent));
1931 const Packet4i bias = pset1<Packet4i>(1023);
1934 Packet4i hi = vec4i_swizzle1(padd(e, bias), 0, 2, 1, 3);
1935 const Packet4i lo = _mm_slli_epi64(hi, 52);
1936 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
1937 const Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
1942EIGEN_STRONG_INLINE Packet4f predux_half_dowto4<Packet8f>(
const Packet8f& a) {
1943 return _mm_add_ps(_mm256_castps256_ps128(a), _mm256_extractf128_ps(a, 1));
1946EIGEN_STRONG_INLINE Packet4i predux_half_dowto4<Packet8i>(
const Packet8i& a) {
1947 return _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
1950EIGEN_STRONG_INLINE Packet4ui predux_half_dowto4<Packet8ui>(
const Packet8ui& a) {
1951 return _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
1954EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet8f, 8>& kernel) {
1955 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1956 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1957 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1958 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1959 __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
1960 __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
1961 __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
1962 __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
1963 __m256 S0 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1964 __m256 S1 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1965 __m256 S2 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1966 __m256 S3 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1967 __m256 S4 = _mm256_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
1968 __m256 S5 = _mm256_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
1969 __m256 S6 = _mm256_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
1970 __m256 S7 = _mm256_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
1971 kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
1972 kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
1973 kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
1974 kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
1975 kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
1976 kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
1977 kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
1978 kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
1981EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet8f, 4>& kernel) {
1982 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1983 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1984 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1985 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1987 __m256 S0 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1988 __m256 S1 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1989 __m256 S2 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1990 __m256 S3 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1992 kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
1993 kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
1994 kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
1995 kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
1998#define MM256_SHUFFLE_EPI32(A, B, M) \
1999 _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(A), _mm256_castsi256_ps(B), M))
2001#ifndef EIGEN_VECTORIZE_AVX2
2002#define MM256_UNPACKLO_EPI32(A, B) \
2003 _mm256_castps_si256(_mm256_unpacklo_ps(_mm256_castsi256_ps(A), _mm256_castsi256_ps(B)))
2004#define MM256_UNPACKHI_EPI32(A, B) \
2005 _mm256_castps_si256(_mm256_unpackhi_ps(_mm256_castsi256_ps(A), _mm256_castsi256_ps(B)))
2007#define MM256_UNPACKLO_EPI32(A, B) _mm256_unpacklo_epi32(A, B)
2008#define MM256_UNPACKHI_EPI32(A, B) _mm256_unpackhi_epi32(A, B)
2011EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet8i, 8>& kernel) {
2012 __m256i T0 = MM256_UNPACKLO_EPI32(kernel.packet[0], kernel.packet[1]);
2013 __m256i T1 = MM256_UNPACKHI_EPI32(kernel.packet[0], kernel.packet[1]);
2014 __m256i T2 = MM256_UNPACKLO_EPI32(kernel.packet[2], kernel.packet[3]);
2015 __m256i T3 = MM256_UNPACKHI_EPI32(kernel.packet[2], kernel.packet[3]);
2016 __m256i T4 = MM256_UNPACKLO_EPI32(kernel.packet[4], kernel.packet[5]);
2017 __m256i T5 = MM256_UNPACKHI_EPI32(kernel.packet[4], kernel.packet[5]);
2018 __m256i T6 = MM256_UNPACKLO_EPI32(kernel.packet[6], kernel.packet[7]);
2019 __m256i T7 = MM256_UNPACKHI_EPI32(kernel.packet[6], kernel.packet[7]);
2020 __m256i S0 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
2021 __m256i S1 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
2022 __m256i S2 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
2023 __m256i S3 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
2024 __m256i S4 = MM256_SHUFFLE_EPI32(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
2025 __m256i S5 = MM256_SHUFFLE_EPI32(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
2026 __m256i S6 = MM256_SHUFFLE_EPI32(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
2027 __m256i S7 = MM256_SHUFFLE_EPI32(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
2028 kernel.packet[0] = _mm256_permute2f128_si256(S0, S4, 0x20);
2029 kernel.packet[1] = _mm256_permute2f128_si256(S1, S5, 0x20);
2030 kernel.packet[2] = _mm256_permute2f128_si256(S2, S6, 0x20);
2031 kernel.packet[3] = _mm256_permute2f128_si256(S3, S7, 0x20);
2032 kernel.packet[4] = _mm256_permute2f128_si256(S0, S4, 0x31);
2033 kernel.packet[5] = _mm256_permute2f128_si256(S1, S5, 0x31);
2034 kernel.packet[6] = _mm256_permute2f128_si256(S2, S6, 0x31);
2035 kernel.packet[7] = _mm256_permute2f128_si256(S3, S7, 0x31);
2037EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet8ui, 8>& kernel) {
2038 ptranspose((PacketBlock<Packet8i, 8>&)kernel);
2041EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet8i, 4>& kernel) {
2042 __m256i T0 = MM256_UNPACKLO_EPI32(kernel.packet[0], kernel.packet[1]);
2043 __m256i T1 = MM256_UNPACKHI_EPI32(kernel.packet[0], kernel.packet[1]);
2044 __m256i T2 = MM256_UNPACKLO_EPI32(kernel.packet[2], kernel.packet[3]);
2045 __m256i T3 = MM256_UNPACKHI_EPI32(kernel.packet[2], kernel.packet[3]);
2047 __m256i S0 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
2048 __m256i S1 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
2049 __m256i S2 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
2050 __m256i S3 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
2052 kernel.packet[0] = _mm256_permute2f128_si256(S0, S1, 0x20);
2053 kernel.packet[1] = _mm256_permute2f128_si256(S2, S3, 0x20);
2054 kernel.packet[2] = _mm256_permute2f128_si256(S0, S1, 0x31);
2055 kernel.packet[3] = _mm256_permute2f128_si256(S2, S3, 0x31);
2057EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet8ui, 4>& kernel) {
2058 ptranspose((PacketBlock<Packet8i, 4>&)kernel);
2061EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet4d, 4>& kernel) {
2062 __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
2063 __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
2064 __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
2065 __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
2067 kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
2068 kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
2069 kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
2070 kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
2073EIGEN_STRONG_INLINE __m256i avx_blend_mask(
const Selector<4>& ifPacket) {
2074 return _mm256_set_epi64x(0 - ifPacket.select[3], 0 - ifPacket.select[2], 0 - ifPacket.select[1],
2075 0 - ifPacket.select[0]);
2078EIGEN_STRONG_INLINE __m256i avx_blend_mask(
const Selector<8>& ifPacket) {
2079 return _mm256_set_epi32(0 - ifPacket.select[7], 0 - ifPacket.select[6], 0 - ifPacket.select[5],
2080 0 - ifPacket.select[4], 0 - ifPacket.select[3], 0 - ifPacket.select[2],
2081 0 - ifPacket.select[1], 0 - ifPacket.select[0]);
2085EIGEN_STRONG_INLINE Packet8f pblend(
const Selector<8>& ifPacket,
const Packet8f& thenPacket,
2086 const Packet8f& elsePacket) {
2087 const __m256 true_mask = _mm256_castsi256_ps(avx_blend_mask(ifPacket));
2088 return pselect<Packet8f>(true_mask, thenPacket, elsePacket);
2092EIGEN_STRONG_INLINE Packet4d pblend(
const Selector<4>& ifPacket,
const Packet4d& thenPacket,
2093 const Packet4d& elsePacket) {
2094 const __m256d true_mask = _mm256_castsi256_pd(avx_blend_mask(ifPacket));
2095 return pselect<Packet4d>(true_mask, thenPacket, elsePacket);
2099#ifndef EIGEN_VECTORIZE_AVX512FP16
2101struct unpacket_traits<Packet8h> {
2102 typedef Eigen::half type;
2106 vectorizable =
true,
2107 masked_load_available =
false,
2108 masked_store_available =
false
2110 typedef Packet8h half;
2114EIGEN_STRONG_INLINE Packet8h pset1<Packet8h>(
const Eigen::half& from) {
2115 return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
2119EIGEN_STRONG_INLINE Eigen::half pfirst<Packet8h>(
const Packet8h& from) {
2120 return numext::bit_cast<Eigen::half>(
static_cast<numext::uint16_t
>(_mm_extract_epi16(from, 0)));
2124EIGEN_STRONG_INLINE Packet8h pload<Packet8h>(
const Eigen::half* from) {
2125 return _mm_load_si128(
reinterpret_cast<const __m128i*
>(from));
2129EIGEN_STRONG_INLINE Packet8h ploadu<Packet8h>(
const Eigen::half* from) {
2130 return _mm_loadu_si128(
reinterpret_cast<const __m128i*
>(from));
2134EIGEN_STRONG_INLINE
void pstore<Eigen::half>(Eigen::half* to,
const Packet8h& from) {
2135 _mm_store_si128(
reinterpret_cast<__m128i*
>(to), from);
2139EIGEN_STRONG_INLINE
void pstoreu<Eigen::half>(Eigen::half* to,
const Packet8h& from) {
2140 _mm_storeu_si128(
reinterpret_cast<__m128i*
>(to), from);
2144EIGEN_STRONG_INLINE Packet8h ploaddup<Packet8h>(
const Eigen::half* from) {
2145 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2146 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2147 const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
2148 const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
2149 return _mm_set_epi16(d, d, c, c, b, b, a, a);
2153EIGEN_STRONG_INLINE Packet8h ploadquad<Packet8h>(
const Eigen::half* from) {
2154 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2155 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2156 return _mm_set_epi16(b, b, b, b, a, a, a, a);
2160EIGEN_STRONG_INLINE Packet8h ptrue(
const Packet8h& a) {
2161 return _mm_cmpeq_epi32(a, a);
2165EIGEN_STRONG_INLINE Packet8h pabs(
const Packet8h& a) {
2166 const __m128i sign_mask = _mm_set1_epi16(
static_cast<numext::uint16_t
>(0x8000));
2167 return _mm_andnot_si128(sign_mask, a);
2170EIGEN_STRONG_INLINE Packet8f half2float(
const Packet8h& a) {
2171#ifdef EIGEN_HAS_FP16_C
2172 return _mm256_cvtph_ps(a);
2174 Eigen::internal::Packet8f pp = _mm256_castsi256_ps(
2175 _mm256_insertf128_si256(_mm256_castsi128_si256(half2floatsse(a)), half2floatsse(_mm_srli_si128(a, 8)), 1));
2180EIGEN_STRONG_INLINE Packet8h float2half(
const Packet8f& a) {
2181#ifdef EIGEN_HAS_FP16_C
2182 return _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT);
2184 __m128i lo = float2half(_mm256_extractf128_ps(a, 0));
2185 __m128i hi = float2half(_mm256_extractf128_ps(a, 1));
2186 return _mm_packus_epi32(lo, hi);
2191EIGEN_STRONG_INLINE Packet8h pmin<Packet8h>(
const Packet8h& a,
const Packet8h& b) {
2192 return float2half(pmin<Packet8f>(half2float(a), half2float(b)));
2196EIGEN_STRONG_INLINE Packet8h pmax<Packet8h>(
const Packet8h& a,
const Packet8h& b) {
2197 return float2half(pmax<Packet8f>(half2float(a), half2float(b)));
2201EIGEN_STRONG_INLINE Packet8h plset<Packet8h>(
const half& a) {
2202 return float2half(plset<Packet8f>(
static_cast<float>(a)));
2206EIGEN_STRONG_INLINE Packet8h por(
const Packet8h& a,
const Packet8h& b) {
2209 return _mm_or_si128(a, b);
2212EIGEN_STRONG_INLINE Packet8h pxor(
const Packet8h& a,
const Packet8h& b) {
2213 return _mm_xor_si128(a, b);
2216EIGEN_STRONG_INLINE Packet8h pand(
const Packet8h& a,
const Packet8h& b) {
2217 return _mm_and_si128(a, b);
2220EIGEN_STRONG_INLINE Packet8h pandnot(
const Packet8h& a,
const Packet8h& b) {
2221 return _mm_andnot_si128(b, a);
2225EIGEN_STRONG_INLINE Packet8h pselect(
const Packet8h& mask,
const Packet8h& a,
const Packet8h& b) {
2226 return _mm_blendv_epi8(b, a, mask);
2230EIGEN_STRONG_INLINE Packet8h pround<Packet8h>(
const Packet8h& a) {
2231 return float2half(pround<Packet8f>(half2float(a)));
2235EIGEN_STRONG_INLINE Packet8h print<Packet8h>(
const Packet8h& a) {
2236 return float2half(print<Packet8f>(half2float(a)));
2240EIGEN_STRONG_INLINE Packet8h pceil<Packet8h>(
const Packet8h& a) {
2241 return float2half(pceil<Packet8f>(half2float(a)));
2245EIGEN_STRONG_INLINE Packet8h pfloor<Packet8h>(
const Packet8h& a) {
2246 return float2half(pfloor<Packet8f>(half2float(a)));
2250EIGEN_STRONG_INLINE Packet8h ptrunc<Packet8h>(
const Packet8h& a) {
2251 return float2half(ptrunc<Packet8f>(half2float(a)));
2255EIGEN_STRONG_INLINE Packet8h pisinf<Packet8h>(
const Packet8h& a) {
2256 constexpr uint16_t kInf = ((1 << 5) - 1) << 10;
2257 constexpr uint16_t kAbsMask = (1 << 15) - 1;
2258 return _mm_cmpeq_epi16(_mm_and_si128(a.m_val, _mm_set1_epi16(kAbsMask)), _mm_set1_epi16(kInf));
2262EIGEN_STRONG_INLINE Packet8h pisnan<Packet8h>(
const Packet8h& a) {
2263 constexpr uint16_t kInf = ((1 << 5) - 1) << 10;
2264 constexpr uint16_t kAbsMask = (1 << 15) - 1;
2265 return _mm_cmpgt_epi16(_mm_and_si128(a.m_val, _mm_set1_epi16(kAbsMask)), _mm_set1_epi16(kInf));
2269EIGEN_STRONG_INLINE __m128i pmaptosigned(
const __m128i& a) {
2270 constexpr uint16_t kAbsMask = (1 << 15) - 1;
2272 return _mm_sign_epi16(_mm_and_si128(a, _mm_set1_epi16(kAbsMask)), a);
2276EIGEN_STRONG_INLINE Packet8h pisordered(
const Packet8h& a,
const Packet8h& b) {
2277 constexpr uint16_t kInf = ((1 << 5) - 1) << 10;
2278 constexpr uint16_t kAbsMask = (1 << 15) - 1;
2279 __m128i abs_a = _mm_and_si128(a.m_val, _mm_set1_epi16(kAbsMask));
2280 __m128i abs_b = _mm_and_si128(b.m_val, _mm_set1_epi16(kAbsMask));
2283 return _mm_cmplt_epi16(_mm_max_epu16(abs_a, abs_b), _mm_set1_epi16(kInf + 1));
2287EIGEN_STRONG_INLINE Packet8h pcmp_eq(
const Packet8h& a,
const Packet8h& b) {
2288 __m128i isOrdered = pisordered(a, b);
2289 __m128i isEqual = _mm_cmpeq_epi16(pmaptosigned(a.m_val), pmaptosigned(b.m_val));
2290 return _mm_and_si128(isOrdered, isEqual);
2294EIGEN_STRONG_INLINE Packet8h pcmp_le(
const Packet8h& a,
const Packet8h& b) {
2295 __m128i isOrdered = pisordered(a, b);
2296 __m128i isGreater = _mm_cmpgt_epi16(pmaptosigned(a.m_val), pmaptosigned(b.m_val));
2297 return _mm_andnot_si128(isGreater, isOrdered);
2301EIGEN_STRONG_INLINE Packet8h pcmp_lt(
const Packet8h& a,
const Packet8h& b) {
2302 __m128i isOrdered = pisordered(a, b);
2303 __m128i isLess = _mm_cmplt_epi16(pmaptosigned(a.m_val), pmaptosigned(b.m_val));
2304 return _mm_and_si128(isOrdered, isLess);
2308EIGEN_STRONG_INLINE Packet8h pcmp_lt_or_nan(
const Packet8h& a,
const Packet8h& b) {
2309 __m128i isUnordered = por(pisnan(a), pisnan(b));
2310 __m128i isLess = _mm_cmplt_epi16(pmaptosigned(a.m_val), pmaptosigned(b.m_val));
2311 return _mm_or_si128(isUnordered, isLess);
2315EIGEN_STRONG_INLINE Packet8h pconj(
const Packet8h& a) {
2320EIGEN_STRONG_INLINE Packet8h pnegate(
const Packet8h& a) {
2321 Packet8h sign_mask = _mm_set1_epi16(
static_cast<numext::uint16_t
>(0x8000));
2322 return _mm_xor_si128(a, sign_mask);
2325#ifndef EIGEN_VECTORIZE_AVX512FP16
2327EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(
const Packet8h& a,
const Packet8h& b) {
2328 Packet8f af = half2float(a);
2329 Packet8f bf = half2float(b);
2330 Packet8f rf = padd(af, bf);
2331 return float2half(rf);
2335EIGEN_STRONG_INLINE Packet8h psub<Packet8h>(
const Packet8h& a,
const Packet8h& b) {
2336 Packet8f af = half2float(a);
2337 Packet8f bf = half2float(b);
2338 Packet8f rf = psub(af, bf);
2339 return float2half(rf);
2343EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(
const Packet8h& a,
const Packet8h& b) {
2344 Packet8f af = half2float(a);
2345 Packet8f bf = half2float(b);
2346 Packet8f rf = pmul(af, bf);
2347 return float2half(rf);
2351EIGEN_STRONG_INLINE Packet8h pmadd<Packet8h>(
const Packet8h& a,
const Packet8h& b,
const Packet8h& c) {
2352 return float2half(pmadd(half2float(a), half2float(b), half2float(c)));
2356EIGEN_STRONG_INLINE Packet8h pmsub<Packet8h>(
const Packet8h& a,
const Packet8h& b,
const Packet8h& c) {
2357 return float2half(pmsub(half2float(a), half2float(b), half2float(c)));
2361EIGEN_STRONG_INLINE Packet8h pnmadd<Packet8h>(
const Packet8h& a,
const Packet8h& b,
const Packet8h& c) {
2362 return float2half(pnmadd(half2float(a), half2float(b), half2float(c)));
2366EIGEN_STRONG_INLINE Packet8h pnmsub<Packet8h>(
const Packet8h& a,
const Packet8h& b,
const Packet8h& c) {
2367 return float2half(pnmsub(half2float(a), half2float(b), half2float(c)));
2371EIGEN_STRONG_INLINE Packet8h pdiv<Packet8h>(
const Packet8h& a,
const Packet8h& b) {
2372 Packet8f af = half2float(a);
2373 Packet8f bf = half2float(b);
2374 Packet8f rf = pdiv(af, bf);
2375 return float2half(rf);
2380EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(
const Eigen::half* from,
Index stride) {
2381 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0 * stride]);
2382 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1 * stride]);
2383 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2 * stride]);
2384 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3 * stride]);
2385 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4 * stride]);
2386 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5 * stride]);
2387 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6 * stride]);
2388 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7 * stride]);
2389 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
2393EIGEN_STRONG_INLINE
void pscatter<Eigen::half, Packet8h>(Eigen::half* to,
const Packet8h& from,
Index stride) {
2394 EIGEN_ALIGN32 Eigen::half aux[8];
2396 to[stride * 0] = aux[0];
2397 to[stride * 1] = aux[1];
2398 to[stride * 2] = aux[2];
2399 to[stride * 3] = aux[3];
2400 to[stride * 4] = aux[4];
2401 to[stride * 5] = aux[5];
2402 to[stride * 6] = aux[6];
2403 to[stride * 7] = aux[7];
2407EIGEN_STRONG_INLINE Packet8h preverse(
const Packet8h& a) {
2408 __m128i m = _mm_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
2409 return _mm_shuffle_epi8(a, m);
2412EIGEN_STRONG_INLINE
void ptranspose(PacketBlock<Packet8h, 8>& kernel) {
2413 __m128i a = kernel.packet[0];
2414 __m128i b = kernel.packet[1];
2415 __m128i c = kernel.packet[2];
2416 __m128i d = kernel.packet[3];
2417 __m128i e = kernel.packet[4];
2418 __m128i f = kernel.packet[5];
2419 __m128i g = kernel.packet[6];
2420 __m128i h = kernel.packet[7];
2422 __m128i a03b03 = _mm_unpacklo_epi16(a, b);
2423 __m128i c03d03 = _mm_unpacklo_epi16(c, d);
2424 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
2425 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
2426 __m128i a47b47 = _mm_unpackhi_epi16(a, b);
2427 __m128i c47d47 = _mm_unpackhi_epi16(c, d);
2428 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
2429 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
2431 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
2432 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
2433 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
2434 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
2435 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
2436 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
2437 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
2438 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
2440 __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
2441 __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
2442 __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
2443 __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
2444 __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
2445 __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
2446 __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
2447 __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
2449 kernel.packet[0] = a0b0c0d0e0f0g0h0;
2450 kernel.packet[1] = a1b1c1d1e1f1g1h1;
2451 kernel.packet[2] = a2b2c2d2e2f2g2h2;
2452 kernel.packet[3] = a3b3c3d3e3f3g3h3;
2453 kernel.packet[4] = a4b4c4d4e4f4g4h4;
2454 kernel.packet[5] = a5b5c5d5e5f5g5h5;
2455 kernel.packet[6] = a6b6c6d6e6f6g6h6;
2456 kernel.packet[7] = a7b7c7d7e7f7g7h7;
2459EIGEN_STRONG_INLINE
void ptranspose(PacketBlock<Packet8h, 4>& kernel) {
2460 EIGEN_ALIGN32 Eigen::half in[4][8];
2461 pstore<Eigen::half>(in[0], kernel.packet[0]);
2462 pstore<Eigen::half>(in[1], kernel.packet[1]);
2463 pstore<Eigen::half>(in[2], kernel.packet[2]);
2464 pstore<Eigen::half>(in[3], kernel.packet[3]);
2466 EIGEN_ALIGN32 Eigen::half out[4][8];
2468 for (
int i = 0; i < 4; ++i) {
2469 for (
int j = 0; j < 4; ++j) {
2470 out[i][j] = in[j][2 * i];
2472 for (
int j = 0; j < 4; ++j) {
2473 out[i][j + 4] = in[j][2 * i + 1];
2477 kernel.packet[0] = pload<Packet8h>(out[0]);
2478 kernel.packet[1] = pload<Packet8h>(out[1]);
2479 kernel.packet[2] = pload<Packet8h>(out[2]);
2480 kernel.packet[3] = pload<Packet8h>(out[3]);
2487EIGEN_STRONG_INLINE Packet8f Bf16ToF32(
const Packet8bf& a) {
2488#ifdef EIGEN_VECTORIZE_AVX2
2489 __m256i extend = _mm256_cvtepu16_epi32(a);
2490 return _mm256_castsi256_ps(_mm256_slli_epi32(extend, 16));
2492 __m128i lo = _mm_cvtepu16_epi32(a);
2493 __m128i hi = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8));
2494 __m128i lo_shift = _mm_slli_epi32(lo, 16);
2495 __m128i hi_shift = _mm_slli_epi32(hi, 16);
2496 return _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo_shift), hi_shift, 1));
2501EIGEN_STRONG_INLINE Packet8bf F32ToBf16(
const Packet8f& a) {
2502 __m256i input = _mm256_castps_si256(a);
2504#ifdef EIGEN_VECTORIZE_AVX2
2506 __m256i t = _mm256_srli_epi32(input, 16);
2508 t = _mm256_and_si256(t, _mm256_set1_epi32(1));
2510 t = _mm256_add_epi32(t, _mm256_set1_epi32(0x7fff));
2512 t = _mm256_add_epi32(t, input);
2514 t = _mm256_srli_epi32(t, 16);
2516 __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
2517 __m256i nan = _mm256_set1_epi32(0x7fc0);
2518 t = _mm256_blendv_epi8(nan, t, _mm256_castps_si256(mask));
2520 return _mm_packus_epi32(_mm256_extractf128_si256(t, 0), _mm256_extractf128_si256(t, 1));
2523 __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(input, 0), 16);
2524 __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(input, 1), 16);
2526 lo = _mm_and_si128(lo, _mm_set1_epi32(1));
2527 hi = _mm_and_si128(hi, _mm_set1_epi32(1));
2529 lo = _mm_add_epi32(lo, _mm_set1_epi32(0x7fff));
2530 hi = _mm_add_epi32(hi, _mm_set1_epi32(0x7fff));
2532 lo = _mm_add_epi32(lo, _mm256_extractf128_si256(input, 0));
2533 hi = _mm_add_epi32(hi, _mm256_extractf128_si256(input, 1));
2535 lo = _mm_srli_epi32(lo, 16);
2536 hi = _mm_srli_epi32(hi, 16);
2538 __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
2539 __m128i nan = _mm_set1_epi32(0x7fc0);
2540 lo = _mm_blendv_epi8(nan, lo, _mm_castps_si128(_mm256_castps256_ps128(mask)));
2541 hi = _mm_blendv_epi8(nan, hi, _mm_castps_si128(_mm256_extractf128_ps(mask, 1)));
2543 return _mm_packus_epi32(lo, hi);
2548EIGEN_STRONG_INLINE Packet8bf pset1<Packet8bf>(
const bfloat16& from) {
2549 return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
2553EIGEN_STRONG_INLINE bfloat16 pfirst<Packet8bf>(
const Packet8bf& from) {
2554 return numext::bit_cast<bfloat16>(
static_cast<numext::uint16_t
>(_mm_extract_epi16(from, 0)));
2558EIGEN_STRONG_INLINE Packet8bf pload<Packet8bf>(
const bfloat16* from) {
2559 return _mm_load_si128(
reinterpret_cast<const __m128i*
>(from));
2563EIGEN_STRONG_INLINE Packet8bf ploadu<Packet8bf>(
const bfloat16* from) {
2564 return _mm_loadu_si128(
reinterpret_cast<const __m128i*
>(from));
2568EIGEN_STRONG_INLINE
void pstore<bfloat16>(bfloat16* to,
const Packet8bf& from) {
2569 _mm_store_si128(
reinterpret_cast<__m128i*
>(to), from);
2573EIGEN_STRONG_INLINE
void pstoreu<bfloat16>(bfloat16* to,
const Packet8bf& from) {
2574 _mm_storeu_si128(
reinterpret_cast<__m128i*
>(to), from);
2578EIGEN_STRONG_INLINE Packet8bf ploaddup<Packet8bf>(
const bfloat16* from) {
2579 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2580 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2581 const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
2582 const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
2583 return _mm_set_epi16(d, d, c, c, b, b, a, a);
2587EIGEN_STRONG_INLINE Packet8bf ploadquad<Packet8bf>(
const bfloat16* from) {
2588 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2589 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2590 return _mm_set_epi16(b, b, b, b, a, a, a, a);
2594EIGEN_STRONG_INLINE Packet8bf ptrue(
const Packet8bf& a) {
2595 return _mm_cmpeq_epi32(a, a);
2599EIGEN_STRONG_INLINE Packet8bf pabs(
const Packet8bf& a) {
2600 const __m128i sign_mask = _mm_set1_epi16(
static_cast<numext::uint16_t
>(0x8000));
2601 return _mm_andnot_si128(sign_mask, a);
2605EIGEN_STRONG_INLINE Packet8bf pmin<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b) {
2606 return F32ToBf16(pmin<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2610EIGEN_STRONG_INLINE Packet8bf pmax<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b) {
2611 return F32ToBf16(pmax<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2615EIGEN_STRONG_INLINE Packet8bf plset<Packet8bf>(
const bfloat16& a) {
2616 return F32ToBf16(plset<Packet8f>(
static_cast<float>(a)));
2620EIGEN_STRONG_INLINE Packet8bf por(
const Packet8bf& a,
const Packet8bf& b) {
2621 return _mm_or_si128(a, b);
2624EIGEN_STRONG_INLINE Packet8bf pxor(
const Packet8bf& a,
const Packet8bf& b) {
2625 return _mm_xor_si128(a, b);
2628EIGEN_STRONG_INLINE Packet8bf pand(
const Packet8bf& a,
const Packet8bf& b) {
2629 return _mm_and_si128(a, b);
2632EIGEN_STRONG_INLINE Packet8bf pandnot(
const Packet8bf& a,
const Packet8bf& b) {
2633 return _mm_andnot_si128(b, a);
2637EIGEN_STRONG_INLINE Packet8bf pselect(
const Packet8bf& mask,
const Packet8bf& a,
const Packet8bf& b) {
2638 return _mm_blendv_epi8(b, a, mask);
2642EIGEN_STRONG_INLINE Packet8bf pround<Packet8bf>(
const Packet8bf& a) {
2643 return F32ToBf16(pround<Packet8f>(Bf16ToF32(a)));
2647EIGEN_STRONG_INLINE Packet8bf print<Packet8bf>(
const Packet8bf& a) {
2648 return F32ToBf16(print<Packet8f>(Bf16ToF32(a)));
2652EIGEN_STRONG_INLINE Packet8bf pceil<Packet8bf>(
const Packet8bf& a) {
2653 return F32ToBf16(pceil<Packet8f>(Bf16ToF32(a)));
2657EIGEN_STRONG_INLINE Packet8bf pfloor<Packet8bf>(
const Packet8bf& a) {
2658 return F32ToBf16(pfloor<Packet8f>(Bf16ToF32(a)));
2662EIGEN_STRONG_INLINE Packet8bf ptrunc<Packet8bf>(
const Packet8bf& a) {
2663 return F32ToBf16(ptrunc<Packet8f>(Bf16ToF32(a)));
2667EIGEN_STRONG_INLINE Packet8bf pcmp_eq(
const Packet8bf& a,
const Packet8bf& b) {
2668 return Pack16To8(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
2672EIGEN_STRONG_INLINE Packet8bf pcmp_le(
const Packet8bf& a,
const Packet8bf& b) {
2673 return Pack16To8(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
2677EIGEN_STRONG_INLINE Packet8bf pcmp_lt(
const Packet8bf& a,
const Packet8bf& b) {
2678 return Pack16To8(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
2682EIGEN_STRONG_INLINE Packet8bf pcmp_lt_or_nan(
const Packet8bf& a,
const Packet8bf& b) {
2683 return Pack16To8(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
2687EIGEN_STRONG_INLINE Packet8bf pconj(
const Packet8bf& a) {
2692EIGEN_STRONG_INLINE Packet8bf pnegate(
const Packet8bf& a) {
2693 Packet8bf sign_mask = _mm_set1_epi16(
static_cast<numext::uint16_t
>(0x8000));
2694 return _mm_xor_si128(a, sign_mask);
2698EIGEN_STRONG_INLINE Packet8bf padd<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b) {
2699 return F32ToBf16(padd<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2703EIGEN_STRONG_INLINE Packet8bf psub<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b) {
2704 return F32ToBf16(psub<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2708EIGEN_STRONG_INLINE Packet8bf pmul<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b) {
2709 return F32ToBf16(pmul<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2713EIGEN_STRONG_INLINE Packet8bf pmadd<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b,
const Packet8bf& c) {
2714 return F32ToBf16(pmadd(Bf16ToF32(a), Bf16ToF32(b), Bf16ToF32(c)));
2718EIGEN_STRONG_INLINE Packet8bf pmsub<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b,
const Packet8bf& c) {
2719 return F32ToBf16(pmsub(Bf16ToF32(a), Bf16ToF32(b), Bf16ToF32(c)));
2723EIGEN_STRONG_INLINE Packet8bf pnmadd<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b,
const Packet8bf& c) {
2724 return F32ToBf16(pnmadd(Bf16ToF32(a), Bf16ToF32(b), Bf16ToF32(c)));
2728EIGEN_STRONG_INLINE Packet8bf pnmsub<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b,
const Packet8bf& c) {
2729 return F32ToBf16(pnmsub(Bf16ToF32(a), Bf16ToF32(b), Bf16ToF32(c)));
2733EIGEN_STRONG_INLINE Packet8bf pdiv<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b) {
2734 return F32ToBf16(pdiv<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2738EIGEN_STRONG_INLINE Packet8bf pgather<bfloat16, Packet8bf>(
const bfloat16* from,
Index stride) {
2739 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0 * stride]);
2740 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1 * stride]);
2741 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2 * stride]);
2742 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3 * stride]);
2743 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4 * stride]);
2744 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5 * stride]);
2745 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6 * stride]);
2746 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7 * stride]);
2747 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
2751EIGEN_STRONG_INLINE
void pscatter<bfloat16, Packet8bf>(bfloat16* to,
const Packet8bf& from,
Index stride) {
2752 EIGEN_ALIGN32 bfloat16 aux[8];
2754 to[stride * 0] = aux[0];
2755 to[stride * 1] = aux[1];
2756 to[stride * 2] = aux[2];
2757 to[stride * 3] = aux[3];
2758 to[stride * 4] = aux[4];
2759 to[stride * 5] = aux[5];
2760 to[stride * 6] = aux[6];
2761 to[stride * 7] = aux[7];
2765EIGEN_STRONG_INLINE Packet8bf preverse(
const Packet8bf& a) {
2766 __m128i m = _mm_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
2767 return _mm_shuffle_epi8(a, m);
2770EIGEN_STRONG_INLINE
void ptranspose(PacketBlock<Packet8bf, 8>& kernel) {
2771 __m128i a = kernel.packet[0];
2772 __m128i b = kernel.packet[1];
2773 __m128i c = kernel.packet[2];
2774 __m128i d = kernel.packet[3];
2775 __m128i e = kernel.packet[4];
2776 __m128i f = kernel.packet[5];
2777 __m128i g = kernel.packet[6];
2778 __m128i h = kernel.packet[7];
2780 __m128i a03b03 = _mm_unpacklo_epi16(a, b);
2781 __m128i c03d03 = _mm_unpacklo_epi16(c, d);
2782 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
2783 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
2784 __m128i a47b47 = _mm_unpackhi_epi16(a, b);
2785 __m128i c47d47 = _mm_unpackhi_epi16(c, d);
2786 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
2787 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
2789 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
2790 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
2791 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
2792 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
2793 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
2794 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
2795 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
2796 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
2798 kernel.packet[0] = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
2799 kernel.packet[1] = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
2800 kernel.packet[2] = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
2801 kernel.packet[3] = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
2802 kernel.packet[4] = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
2803 kernel.packet[5] = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
2804 kernel.packet[6] = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
2805 kernel.packet[7] = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
2808EIGEN_STRONG_INLINE
void ptranspose(PacketBlock<Packet8bf, 4>& kernel) {
2809 __m128i a = kernel.packet[0];
2810 __m128i b = kernel.packet[1];
2811 __m128i c = kernel.packet[2];
2812 __m128i d = kernel.packet[3];
2814 __m128i ab_03 = _mm_unpacklo_epi16(a, b);
2815 __m128i cd_03 = _mm_unpacklo_epi16(c, d);
2816 __m128i ab_47 = _mm_unpackhi_epi16(a, b);
2817 __m128i cd_47 = _mm_unpackhi_epi16(c, d);
2819 kernel.packet[0] = _mm_unpacklo_epi32(ab_03, cd_03);
2820 kernel.packet[1] = _mm_unpackhi_epi32(ab_03, cd_03);
2821 kernel.packet[2] = _mm_unpacklo_epi32(ab_47, cd_47);
2822 kernel.packet[3] = _mm_unpackhi_epi32(ab_47, cd_47);
2828inline __m128i segment_mask_4x8(
Index begin,
Index count) {
2829 eigen_assert(begin >= 0 && begin + count <= 4);
2831 mask <<= CHAR_BIT * count;
2833 mask <<= CHAR_BIT * begin;
2834#if !EIGEN_ARCH_x86_64
2835 return _mm_loadl_epi64(
reinterpret_cast<const __m128i*
>(&mask));
2837 return _mm_cvtsi64_si128(mask);
2842inline __m128i segment_mask_8x8(
Index begin,
Index count) {
2843 eigen_assert(begin >= 0 && begin + count <= 8);
2846 mask <<= (CHAR_BIT / 2) * count;
2847 mask <<= (CHAR_BIT / 2) * count;
2849 mask <<= CHAR_BIT * begin;
2850#if !EIGEN_ARCH_x86_64
2851 return _mm_loadl_epi64(
reinterpret_cast<const __m128i*
>(&mask));
2853 return _mm_cvtsi64_si128(mask);
2858inline __m128i segment_mask_4x32(
Index begin,
Index count) {
2859 eigen_assert(begin >= 0 && begin + count <= 4);
2860 return _mm_cvtepi8_epi32(segment_mask_4x8(begin, count));
2864inline __m128i segment_mask_2x64(
Index begin,
Index count) {
2865 eigen_assert(begin >= 0 && begin + count <= 2);
2866 return _mm_cvtepi8_epi64(segment_mask_4x8(begin, count));
2870inline __m256i segment_mask_8x32(
Index begin,
Index count) {
2871 __m128i mask_epi8 = segment_mask_8x8(begin, count);
2872#ifdef EIGEN_VECTORIZE_AVX2
2873 __m256i mask_epi32 = _mm256_cvtepi8_epi32(mask_epi8);
2875 __m128i mask_epi32_lo = _mm_cvtepi8_epi32(mask_epi8);
2876 __m128i mask_epi32_hi = _mm_cvtepi8_epi32(_mm_srli_epi64(mask_epi8, 32));
2877 __m256i mask_epi32 = _mm256_insertf128_si256(_mm256_castsi128_si256(mask_epi32_lo), mask_epi32_hi, 1);
2883inline __m256i segment_mask_4x64(
Index begin,
Index count) {
2884 __m128i mask_epi8 = segment_mask_4x8(begin, count);
2885#ifdef EIGEN_VECTORIZE_AVX2
2886 __m256i mask_epi64 = _mm256_cvtepi8_epi64(mask_epi8);
2888 __m128i mask_epi64_lo = _mm_cvtepi8_epi64(mask_epi8);
2889 __m128i mask_epi64_hi = _mm_cvtepi8_epi64(_mm_srli_epi64(mask_epi8, 16));
2890 __m256i mask_epi64 = _mm256_insertf128_si256(_mm256_castsi128_si256(mask_epi64_lo), mask_epi64_hi, 1);
2898struct has_packet_segment<Packet4f> : std::true_type {};
2901struct has_packet_segment<Packet8f> : std::true_type {};
2904inline Packet4f ploaduSegment<Packet4f>(
const float* from,
Index begin,
Index count) {
2905 return _mm_maskload_ps(from, segment_mask_4x32(begin, count));
2909inline void pstoreuSegment<float, Packet4f>(
float* to,
const Packet4f& from,
Index begin,
Index count) {
2910 _mm_maskstore_ps(to, segment_mask_4x32(begin, count), from);
2914inline Packet8f ploaduSegment<Packet8f>(
const float* from,
Index begin,
Index count) {
2915 return _mm256_maskload_ps(from, segment_mask_8x32(begin, count));
2919inline void pstoreuSegment<float, Packet8f>(
float* to,
const Packet8f& from,
Index begin,
Index count) {
2920 _mm256_maskstore_ps(to, segment_mask_8x32(begin, count), from);
2926struct has_packet_segment<Packet4i> : std::true_type {};
2929struct has_packet_segment<Packet8i> : std::true_type {};
2931#ifdef EIGEN_VECTORIZE_AVX2
2934inline Packet4i ploaduSegment<Packet4i>(
const int* from,
Index begin,
Index count) {
2935 return _mm_maskload_epi32(from, segment_mask_4x32(begin, count));
2939inline void pstoreuSegment<int, Packet4i>(
int* to,
const Packet4i& from,
Index begin,
Index count) {
2940 _mm_maskstore_epi32(to, segment_mask_4x32(begin, count), from);
2944inline Packet8i ploaduSegment<Packet8i>(
const int* from,
Index begin,
Index count) {
2945 return _mm256_maskload_epi32(from, segment_mask_8x32(begin, count));
2949inline void pstoreuSegment<int, Packet8i>(
int* to,
const Packet8i& from,
Index begin,
Index count) {
2950 _mm256_maskstore_epi32(to, segment_mask_8x32(begin, count), from);
2956inline Packet4i ploaduSegment<Packet4i>(
const int* from,
Index begin,
Index count) {
2957 return _mm_castps_si128(ploaduSegment<Packet4f>(
reinterpret_cast<const float*
>(from), begin, count));
2961inline void pstoreuSegment<int, Packet4i>(
int* to,
const Packet4i& from,
Index begin,
Index count) {
2962 pstoreuSegment<float, Packet4f>(
reinterpret_cast<float*
>(to), _mm_castsi128_ps(from), begin, count);
2966inline Packet8i ploaduSegment<Packet8i>(
const int* from,
Index begin,
Index count) {
2967 return _mm256_castps_si256(ploaduSegment<Packet8f>(
reinterpret_cast<const float*
>(from), begin, count));
2971inline void pstoreuSegment<int, Packet8i>(
int* to,
const Packet8i& from,
Index begin,
Index count) {
2972 pstoreuSegment<float, Packet8f>(
reinterpret_cast<float*
>(to), _mm256_castsi256_ps(from), begin, count);
2980struct has_packet_segment<Packet4ui> : std::true_type {};
2983struct has_packet_segment<Packet8ui> : std::true_type {};
2986inline Packet4ui ploaduSegment<Packet4ui>(
const uint32_t* from,
Index begin,
Index count) {
2987 return Packet4ui(ploaduSegment<Packet4i>(
reinterpret_cast<const int*
>(from), begin, count));
2991inline void pstoreuSegment<uint32_t, Packet4ui>(uint32_t* to,
const Packet4ui& from,
Index begin,
Index count) {
2992 pstoreuSegment<int, Packet4i>(
reinterpret_cast<int*
>(to), Packet4i(from), begin, count);
2996inline Packet8ui ploaduSegment<Packet8ui>(
const uint32_t* from,
Index begin,
Index count) {
2997 return Packet8ui(ploaduSegment<Packet8i>(
reinterpret_cast<const int*
>(from), begin, count));
3001inline void pstoreuSegment<uint32_t, Packet8ui>(uint32_t* to,
const Packet8ui& from,
Index begin,
Index count) {
3002 pstoreuSegment<int, Packet8i>(
reinterpret_cast<int*
>(to), Packet8i(from), begin, count);
3008struct has_packet_segment<Packet2d> : std::true_type {};
3011struct has_packet_segment<Packet4d> : std::true_type {};
3014inline Packet2d ploaduSegment<Packet2d>(
const double* from,
Index begin,
Index count) {
3015 return _mm_maskload_pd(from, segment_mask_2x64(begin, count));
3019inline void pstoreuSegment<double, Packet2d>(
double* to,
const Packet2d& from,
Index begin,
Index count) {
3020 _mm_maskstore_pd(to, segment_mask_2x64(begin, count), from);
3024inline Packet4d ploaduSegment<Packet4d>(
const double* from,
Index begin,
Index count) {
3025 return _mm256_maskload_pd(from, segment_mask_4x64(begin, count));
3029inline void pstoreuSegment<double, Packet4d>(
double* to,
const Packet4d& from,
Index begin,
Index count) {
3030 _mm256_maskstore_pd(to, segment_mask_4x64(begin, count), from);
3033#ifdef EIGEN_VECTORIZE_AVX2
3038struct has_packet_segment<Packet2l> : std::true_type {};
3041struct has_packet_segment<Packet4l> : std::true_type {};
3044inline Packet2l ploaduSegment<Packet2l>(
const int64_t* from,
Index begin,
Index count) {
3045 return _mm_maskload_epi64(
reinterpret_cast<const long long*
>(from), segment_mask_2x64(begin, count));
3048inline void pstoreuSegment<int64_t, Packet2l>(int64_t* to,
const Packet2l& from,
Index begin,
Index count) {
3049 _mm_maskstore_epi64(
reinterpret_cast<long long*
>(to), segment_mask_2x64(begin, count), from);
3052inline Packet4l ploaduSegment<Packet4l>(
const int64_t* from,
Index begin,
Index count) {
3053 return _mm256_maskload_epi64(
reinterpret_cast<const long long*
>(from), segment_mask_4x64(begin, count));
3056inline void pstoreuSegment<int64_t, Packet4l>(int64_t* to,
const Packet4l& from,
Index begin,
Index count) {
3057 _mm256_maskstore_epi64(
reinterpret_cast<long long*
>(to), segment_mask_4x64(begin, count), from);
3063struct has_packet_segment<Packet4ul> : std::true_type {};
3066inline Packet4ul ploaduSegment<Packet4ul>(
const uint64_t* from,
Index begin,
Index count) {
3067 return Packet4ul(ploaduSegment<Packet4l>(
reinterpret_cast<const int64_t*
>(from), begin, count));
3070inline void pstoreuSegment<uint64_t, Packet4ul>(uint64_t* to,
const Packet4ul& from,
Index begin,
Index count) {
3071 pstoreuSegment<int64_t, Packet4l>(
reinterpret_cast<int64_t*
>(to), Packet4l(from), begin, count);
@ Aligned32
Definition Constants.h:238
@ Aligned16
Definition Constants.h:237
Namespace containing all symbols from the Eigen library.
Definition B01_Experimental.dox:1
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:82