10#ifndef EIGEN_GENERAL_BLOCK_PANEL_H
11#define EIGEN_GENERAL_BLOCK_PANEL_H
18template<
typename _LhsScalar,
typename _RhsScalar,
bool _ConjLhs=false,
bool _ConjRhs=false>
23inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b)
28#if EIGEN_ARCH_i386_OR_x86_64
29const std::ptrdiff_t defaultL1CacheSize = 32*1024;
30const std::ptrdiff_t defaultL2CacheSize = 256*1024;
31const std::ptrdiff_t defaultL3CacheSize = 2*1024*1024;
33const std::ptrdiff_t defaultL1CacheSize = 16*1024;
34const std::ptrdiff_t defaultL2CacheSize = 512*1024;
35const std::ptrdiff_t defaultL3CacheSize = 512*1024;
40 CacheSizes(): m_l1(-1),m_l2(-1),m_l3(-1) {
43 m_l1 = manage_caching_sizes_helper(
l1CacheSize, defaultL1CacheSize);
44 m_l2 = manage_caching_sizes_helper(
l2CacheSize, defaultL2CacheSize);
45 m_l3 = manage_caching_sizes_helper(
l3CacheSize, defaultL3CacheSize);
55inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff_t* l2, std::ptrdiff_t* l3)
57 static CacheSizes m_cacheSizes;
62 eigen_internal_assert(l1!=0 && l2!=0);
63 m_cacheSizes.m_l1 = *l1;
64 m_cacheSizes.m_l2 = *l2;
65 m_cacheSizes.m_l3 = *l3;
67 else if(action==GetAction)
69 eigen_internal_assert(l1!=0 && l2!=0);
70 *l1 = m_cacheSizes.m_l1;
71 *l2 = m_cacheSizes.m_l2;
72 *l3 = m_cacheSizes.m_l3;
76 eigen_internal_assert(
false);
92template<
typename LhsScalar,
typename RhsScalar,
int KcFactor,
typename Index>
95 typedef gebp_traits<LhsScalar,RhsScalar> Traits;
102 std::ptrdiff_t l1, l2, l3;
103 manage_caching_sizes(GetAction, &l1, &l2, &l3);
105 if (num_threads > 1) {
106 typedef typename Traits::ResScalar ResScalar;
108 kdiv = KcFactor * (Traits::mr *
sizeof(LhsScalar) + Traits::nr *
sizeof(RhsScalar)),
109 ksub = Traits::mr * Traits::nr *
sizeof(ResScalar),
119 const Index k_cache = numext::maxi<Index>(kr, (numext::mini<Index>)((l1-ksub)/kdiv, 320));
121 k = k_cache - (k_cache % kr);
122 eigen_internal_assert(k > 0);
125 const Index n_cache = (l2-l1) / (nr *
sizeof(RhsScalar) * k);
126 const Index n_per_thread = numext::div_ceil(n, num_threads);
127 if (n_cache <= n_per_thread) {
129 eigen_internal_assert(n_cache >=
static_cast<Index>(nr));
130 n = n_cache - (n_cache % nr);
131 eigen_internal_assert(n > 0);
133 n = (numext::mini<Index>)(n, (n_per_thread + nr - 1) - ((n_per_thread + nr - 1) % nr));
138 const Index m_cache = (l3-l2) / (
sizeof(LhsScalar) * k * num_threads);
139 const Index m_per_thread = numext::div_ceil(m, num_threads);
140 if(m_cache < m_per_thread && m_cache >=
static_cast<Index>(mr)) {
141 m = m_cache - (m_cache % mr);
142 eigen_internal_assert(m > 0);
144 m = (numext::mini<Index>)(m, (m_per_thread + mr - 1) - ((m_per_thread + mr - 1) % mr));
151#ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
161 if((numext::maxi)(k,(numext::maxi)(m,n))<48)
164 typedef typename Traits::ResScalar ResScalar;
167 k_div = KcFactor * (Traits::mr *
sizeof(LhsScalar) + Traits::nr *
sizeof(RhsScalar)),
168 k_sub = Traits::mr * Traits::nr *
sizeof(ResScalar)
178 const Index max_kc = numext::maxi<Index>(((l1-k_sub)/k_div) & (~(k_peeling-1)),1);
179 const Index old_k = k;
185 k = (k%max_kc)==0 ? max_kc
186 : max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1)));
188 eigen_internal_assert(((old_k/k) == (old_k/max_kc)) &&
"the number of sweeps has to remain the same");
197 #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
198 const Index actual_l2 = l3;
200 const Index actual_l2 = 1572864;
210 const Index lhs_bytes = m * k *
sizeof(LhsScalar);
211 const Index remaining_l1 = l1- k_sub - lhs_bytes;
212 if(remaining_l1 >=
Index(Traits::nr*
sizeof(RhsScalar))*k)
215 max_nc = remaining_l1 / (k*
sizeof(RhsScalar));
220 max_nc = (3*actual_l2)/(2*2*max_kc*
sizeof(RhsScalar));
223 Index nc = numext::mini<Index>(actual_l2/(2*k*
sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1));
231 : (nc - Traits::nr * ((nc-(n%nc))/(Traits::nr*(n/nc+1))));
238 Index problem_size = k*n*
sizeof(LhsScalar);
239 Index actual_lm = actual_l2;
241 if(problem_size<=1024)
247 else if(l3!=0 && problem_size<=32768)
252 max_mc = (numext::mini<Index>)(576,max_mc);
254 Index mc = (numext::mini<Index>)(actual_lm/(3*k*
sizeof(LhsScalar)), max_mc);
255 if (mc > Traits::mr) mc -= mc % Traits::mr;
256 else if (mc==0)
return;
258 : (mc - Traits::mr * ((mc-(m%mc))/(Traits::mr*(m/mc+1))));
263template <
typename Index>
266#ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES
267 if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) {
268 k = numext::mini<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K);
269 m = numext::mini<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M);
270 n = numext::mini<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N);
274 EIGEN_UNUSED_VARIABLE(k)
275 EIGEN_UNUSED_VARIABLE(m)
276 EIGEN_UNUSED_VARIABLE(n)
297template<
typename LhsScalar,
typename RhsScalar,
int KcFactor,
typename Index>
300 if (!useSpecificBlockingSizes(k, m, n)) {
301 evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor, Index>(k, m, n, num_threads);
305template<
typename LhsScalar,
typename RhsScalar,
typename Index>
308 computeProductBlockingSizes<LhsScalar,RhsScalar,1,Index>(k, m, n, num_threads);
311#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
312 #define CJMADD(CJ,A,B,C,T) C = CJ.pmadd(A,B,C);
317 template<
typename CJ,
typename A,
typename B,
typename C,
typename T>
struct gebp_madd_selector {
318 EIGEN_ALWAYS_INLINE
static void run(
const CJ& cj, A& a, B& b, C& c, T& )
324 template<
typename CJ,
typename T>
struct gebp_madd_selector<CJ,T,T,T,T> {
325 EIGEN_ALWAYS_INLINE
static void run(
const CJ& cj, T& a, T& b, T& c, T& t)
327 t = b; t = cj.pmul(a,t); c = padd(c,t);
331 template<
typename CJ,
typename A,
typename B,
typename C,
typename T>
332 EIGEN_STRONG_INLINE
void gebp_madd(
const CJ& cj, A& a, B& b, C& c, T& t)
334 gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
337 #define CJMADD(CJ,A,B,C,T) gebp_madd(CJ,A,B,C,T);
351template<
typename _LhsScalar,
typename _RhsScalar,
bool _ConjLhs,
bool _ConjRhs>
355 typedef _LhsScalar LhsScalar;
356 typedef _RhsScalar RhsScalar;
357 typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
362 Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
363 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
364 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
365 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
367 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
373 default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
374#
if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
378 mr = Vectorizable ? 3*LhsPacketSize : default_mr,
383 LhsProgress = LhsPacketSize,
387 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
388 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
389 typedef typename packet_traits<ResScalar>::type _ResPacket;
391 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
392 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
393 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
395 typedef ResPacket AccPacket;
397 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
399 p = pset1<ResPacket>(ResScalar(0));
402 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
404 pbroadcast4(b, b0, b1, b2, b3);
412 template<
typename RhsPacketType>
413 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacketType& dest)
const
415 dest = pset1<RhsPacketType>(*b);
418 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, RhsPacket& dest)
const
420 dest = ploadquad<RhsPacket>(b);
423 template<
typename LhsPacketType>
424 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacketType& dest)
const
426 dest = pload<LhsPacketType>(a);
429 template<
typename LhsPacketType>
430 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacketType& dest)
const
432 dest = ploadu<LhsPacketType>(a);
435 template<
typename LhsPacketType,
typename RhsPacketType,
typename AccPacketType>
436 EIGEN_STRONG_INLINE
void madd(
const LhsPacketType& a,
const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp)
const
438 conj_helper<LhsPacketType,RhsPacketType,ConjLhs,ConjRhs> cj;
443#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
444 EIGEN_UNUSED_VARIABLE(tmp);
447 tmp = b; tmp = cj.pmul(a,tmp); c = padd(c,tmp);
451 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
453 r = pmadd(c,alpha,r);
456 template<
typename ResPacketHalf>
457 EIGEN_STRONG_INLINE
void acc(
const ResPacketHalf& c,
const ResPacketHalf& alpha, ResPacketHalf& r)
const
459 r = pmadd(c,alpha,r);
464template<
typename RealScalar,
bool _ConjLhs>
465class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
468 typedef std::complex<RealScalar> LhsScalar;
469 typedef RealScalar RhsScalar;
470 typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
475 Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
476 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
477 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
478 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
480 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
482#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
484 mr = 3*LhsPacketSize,
486 mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
489 LhsProgress = LhsPacketSize,
493 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
494 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
495 typedef typename packet_traits<ResScalar>::type _ResPacket;
497 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
498 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
499 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
501 typedef ResPacket AccPacket;
503 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
505 p = pset1<ResPacket>(ResScalar(0));
508 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacket& dest)
const
510 dest = pset1<RhsPacket>(*b);
513 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, RhsPacket& dest)
const
515 dest = pset1<RhsPacket>(*b);
518 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
520 dest = pload<LhsPacket>(a);
523 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacket& dest)
const
525 dest = ploadu<LhsPacket>(a);
528 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
530 pbroadcast4(b, b0, b1, b2, b3);
538 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp)
const
540 madd_impl(a, b, c, tmp,
typename conditional<Vectorizable,true_type,false_type>::type());
543 EIGEN_STRONG_INLINE
void madd_impl(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp,
const true_type&)
const
545#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
546 EIGEN_UNUSED_VARIABLE(tmp);
547 c.v = pmadd(a.v,b,c.v);
549 tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);
553 EIGEN_STRONG_INLINE
void madd_impl(
const LhsScalar& a,
const RhsScalar& b, ResScalar& c, RhsScalar& ,
const false_type&)
const
558 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
560 r = cj.pmadd(c,alpha,r);
564 conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
567template<
typename Packet>
574template<
typename Packet>
575DoublePacket<Packet> padd(
const DoublePacket<Packet> &a,
const DoublePacket<Packet> &b)
577 DoublePacket<Packet> res;
578 res.first = padd(a.first, b.first);
579 res.second = padd(a.second,b.second);
583template<
typename Packet>
584const DoublePacket<Packet>& predux_downto4(
const DoublePacket<Packet> &a)
589template<
typename Packet>
struct unpacket_traits<DoublePacket<Packet> > {
typedef DoublePacket<Packet> half; };
599template<
typename RealScalar,
bool _ConjLhs,
bool _ConjRhs>
600class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
603 typedef std::complex<RealScalar> Scalar;
604 typedef std::complex<RealScalar> LhsScalar;
605 typedef std::complex<RealScalar> RhsScalar;
606 typedef std::complex<RealScalar> ResScalar;
611 Vectorizable = packet_traits<RealScalar>::Vectorizable
612 && packet_traits<Scalar>::Vectorizable,
613 RealPacketSize = Vectorizable ? packet_traits<RealScalar>::size : 1,
614 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
615 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
616 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
622 LhsProgress = ResPacketSize,
626 typedef typename packet_traits<RealScalar>::type RealPacket;
627 typedef typename packet_traits<Scalar>::type ScalarPacket;
628 typedef DoublePacket<RealPacket> DoublePacketType;
630 typedef typename conditional<Vectorizable,RealPacket, Scalar>::type LhsPacket;
631 typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type RhsPacket;
632 typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
633 typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type AccPacket;
635 EIGEN_STRONG_INLINE
void initAcc(Scalar& p) { p = Scalar(0); }
637 EIGEN_STRONG_INLINE
void initAcc(DoublePacketType& p)
639 p.first = pset1<RealPacket>(RealScalar(0));
640 p.second = pset1<RealPacket>(RealScalar(0));
644 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, ResPacket& dest)
const
646 dest = pset1<ResPacket>(*b);
650 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, DoublePacketType& dest)
const
652 dest.first = pset1<RealPacket>(numext::real(*b));
653 dest.second = pset1<RealPacket>(numext::imag(*b));
656 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, ResPacket& dest)
const
660 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, DoublePacketType& dest)
const
662 eigen_internal_assert(unpacket_traits<ScalarPacket>::size<=4);
666 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
676 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1)
684 EIGEN_STRONG_INLINE
void broadcastRhs(
const RhsScalar* b, RhsScalar& b0, RhsScalar& b1)
692 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
694 dest = pload<LhsPacket>((
const typename unpacket_traits<LhsPacket>::type*)(a));
697 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacket& dest)
const
699 dest = ploadu<LhsPacket>((
const typename unpacket_traits<LhsPacket>::type*)(a));
702 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, DoublePacketType& c, RhsPacket& )
const
704 c.first = padd(pmul(a,b.first), c.first);
705 c.second = padd(pmul(a,b.second),c.second);
708 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, ResPacket& c, RhsPacket& )
const
713 EIGEN_STRONG_INLINE
void acc(
const Scalar& c,
const Scalar& alpha, Scalar& r)
const { r += alpha * c; }
715 EIGEN_STRONG_INLINE
void acc(
const DoublePacketType& c,
const ResPacket& alpha, ResPacket& r)
const
719 if((!ConjLhs)&&(!ConjRhs))
721 tmp = pcplxflip(pconj(ResPacket(c.second)));
722 tmp = padd(ResPacket(c.first),tmp);
724 else if((!ConjLhs)&&(ConjRhs))
726 tmp = pconj(pcplxflip(ResPacket(c.second)));
727 tmp = padd(ResPacket(c.first),tmp);
729 else if((ConjLhs)&&(!ConjRhs))
731 tmp = pcplxflip(ResPacket(c.second));
732 tmp = padd(pconj(ResPacket(c.first)),tmp);
734 else if((ConjLhs)&&(ConjRhs))
736 tmp = pcplxflip(ResPacket(c.second));
737 tmp = psub(pconj(ResPacket(c.first)),tmp);
740 r = pmadd(tmp,alpha,r);
744 conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
747template<
typename RealScalar,
bool _ConjRhs>
748class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >
751 typedef std::complex<RealScalar> Scalar;
752 typedef RealScalar LhsScalar;
753 typedef Scalar RhsScalar;
754 typedef Scalar ResScalar;
759 Vectorizable = packet_traits<RealScalar>::Vectorizable
760 && packet_traits<Scalar>::Vectorizable,
761 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
762 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
763 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
765 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
768 mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*ResPacketSize,
770 LhsProgress = ResPacketSize,
774 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
775 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
776 typedef typename packet_traits<ResScalar>::type _ResPacket;
778 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
779 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
780 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
782 typedef ResPacket AccPacket;
784 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
786 p = pset1<ResPacket>(ResScalar(0));
789 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacket& dest)
const
791 dest = pset1<RhsPacket>(*b);
794 void broadcastRhs(
const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
796 pbroadcast4(b, b0, b1, b2, b3);
806 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
808 dest = ploaddup<LhsPacket>(a);
811 EIGEN_STRONG_INLINE
void loadRhsQuad(
const RhsScalar* b, RhsPacket& dest)
const
813 eigen_internal_assert(unpacket_traits<RhsPacket>::size<=4);
817 EIGEN_STRONG_INLINE
void loadLhsUnaligned(
const LhsScalar* a, LhsPacket& dest)
const
819 dest = ploaddup<LhsPacket>(a);
822 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp)
const
824 madd_impl(a, b, c, tmp,
typename conditional<Vectorizable,true_type,false_type>::type());
827 EIGEN_STRONG_INLINE
void madd_impl(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp,
const true_type&)
const
829#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
830 EIGEN_UNUSED_VARIABLE(tmp);
831 c.v = pmadd(a,b.v,c.v);
833 tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);
838 EIGEN_STRONG_INLINE
void madd_impl(
const LhsScalar& a,
const RhsScalar& b, ResScalar& c, RhsScalar& ,
const false_type&)
const
843 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
845 r = cj.pmadd(alpha,c,r);
849 conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
859template<
typename LhsScalar,
typename RhsScalar,
typename Index,
typename DataMapper,
int mr,
int nr,
bool ConjugateLhs,
bool ConjugateRhs>
862 typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
863 typedef typename Traits::ResScalar ResScalar;
864 typedef typename Traits::LhsPacket LhsPacket;
865 typedef typename Traits::RhsPacket RhsPacket;
866 typedef typename Traits::ResPacket ResPacket;
867 typedef typename Traits::AccPacket AccPacket;
869 typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;
870 typedef typename SwappedTraits::ResScalar SResScalar;
871 typedef typename SwappedTraits::LhsPacket SLhsPacket;
872 typedef typename SwappedTraits::RhsPacket SRhsPacket;
873 typedef typename SwappedTraits::ResPacket SResPacket;
874 typedef typename SwappedTraits::AccPacket SAccPacket;
876 typedef typename DataMapper::LinearMapper LinearMapper;
879 Vectorizable = Traits::Vectorizable,
880 LhsProgress = Traits::LhsProgress,
881 RhsProgress = Traits::RhsProgress,
882 ResPacketSize = Traits::ResPacketSize
886 void operator()(
const DataMapper& res,
const LhsScalar* blockA,
const RhsScalar* blockB,
891template<
typename LhsScalar,
typename RhsScalar,
typename Index,
typename DataMapper,
int mr,
int nr,
bool ConjugateLhs,
bool ConjugateRhs>
893void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,ConjugateRhs>
894 ::operator()(
const DataMapper& res,
const LhsScalar* blockA,
const RhsScalar* blockB,
899 SwappedTraits straits;
901 if(strideA==-1) strideA = depth;
902 if(strideB==-1) strideB = depth;
903 conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
904 Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
905 const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0;
906 const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0;
907 const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0;
909 const Index peeled_kc = depth & ~(pk-1);
910 const Index prefetch_res_offset = 32/
sizeof(ResScalar);
916 if(mr>=3*Traits::LhsProgress)
923 const Index l1 = defaultL1CacheSize;
927 const Index actual_panel_rows = (3*LhsProgress) * std::max<Index>(1,( (l1 -
sizeof(ResScalar)*mr*nr - depth*nr*
sizeof(RhsScalar)) / (depth *
sizeof(LhsScalar) * 3*LhsProgress) ));
928 for(
Index i1=0; i1<peeled_mc3; i1+=actual_panel_rows)
930 const Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc3);
931 for(
Index j2=0; j2<packet_cols4; j2+=nr)
933 for(
Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
939 const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*LhsProgress)];
943 AccPacket C0, C1, C2, C3,
946 traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3);
947 traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7);
948 traits.initAcc(C8); traits.initAcc(C9); traits.initAcc(C10); traits.initAcc(C11);
950 LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
951 LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
952 LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
953 LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
961 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
965 for(
Index k=0; k<peeled_kc; k+=pk)
967 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 3pX4");
971#define EIGEN_GEBP_ONESTEP(K) \
973 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \
974 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
975 internal::prefetch(blA+(3*K+16)*LhsProgress); \
976 if (EIGEN_ARCH_ARM) { internal::prefetch(blB+(4*K+16)*RhsProgress); } \
977 traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
978 traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
979 traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
980 traits.loadRhs(blB + (0+4*K)*Traits::RhsProgress, B_0); \
981 traits.madd(A0, B_0, C0, T0); \
982 traits.madd(A1, B_0, C4, T0); \
983 traits.madd(A2, B_0, C8, B_0); \
984 traits.loadRhs(blB + (1+4*K)*Traits::RhsProgress, B_0); \
985 traits.madd(A0, B_0, C1, T0); \
986 traits.madd(A1, B_0, C5, T0); \
987 traits.madd(A2, B_0, C9, B_0); \
988 traits.loadRhs(blB + (2+4*K)*Traits::RhsProgress, B_0); \
989 traits.madd(A0, B_0, C2, T0); \
990 traits.madd(A1, B_0, C6, T0); \
991 traits.madd(A2, B_0, C10, B_0); \
992 traits.loadRhs(blB + (3+4*K)*Traits::RhsProgress, B_0); \
993 traits.madd(A0, B_0, C3 , T0); \
994 traits.madd(A1, B_0, C7, T0); \
995 traits.madd(A2, B_0, C11, B_0); \
996 EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \
999 internal::prefetch(blB);
1000 EIGEN_GEBP_ONESTEP(0);
1001 EIGEN_GEBP_ONESTEP(1);
1002 EIGEN_GEBP_ONESTEP(2);
1003 EIGEN_GEBP_ONESTEP(3);
1004 EIGEN_GEBP_ONESTEP(4);
1005 EIGEN_GEBP_ONESTEP(5);
1006 EIGEN_GEBP_ONESTEP(6);
1007 EIGEN_GEBP_ONESTEP(7);
1009 blB += pk*4*RhsProgress;
1010 blA += pk*3*Traits::LhsProgress;
1012 EIGEN_ASM_COMMENT(
"end gebp micro kernel 3pX4");
1015 for(
Index k=peeled_kc; k<depth; k++)
1019 EIGEN_GEBP_ONESTEP(0);
1020 blB += 4*RhsProgress;
1021 blA += 3*Traits::LhsProgress;
1024#undef EIGEN_GEBP_ONESTEP
1026 ResPacket R0, R1, R2;
1027 ResPacket alphav = pset1<ResPacket>(alpha);
1029 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1030 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1031 R2 = r0.loadPacket(2 * Traits::ResPacketSize);
1032 traits.acc(C0, alphav, R0);
1033 traits.acc(C4, alphav, R1);
1034 traits.acc(C8, alphav, R2);
1035 r0.storePacket(0 * Traits::ResPacketSize, R0);
1036 r0.storePacket(1 * Traits::ResPacketSize, R1);
1037 r0.storePacket(2 * Traits::ResPacketSize, R2);
1039 R0 = r1.loadPacket(0 * Traits::ResPacketSize);
1040 R1 = r1.loadPacket(1 * Traits::ResPacketSize);
1041 R2 = r1.loadPacket(2 * Traits::ResPacketSize);
1042 traits.acc(C1, alphav, R0);
1043 traits.acc(C5, alphav, R1);
1044 traits.acc(C9, alphav, R2);
1045 r1.storePacket(0 * Traits::ResPacketSize, R0);
1046 r1.storePacket(1 * Traits::ResPacketSize, R1);
1047 r1.storePacket(2 * Traits::ResPacketSize, R2);
1049 R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1050 R1 = r2.loadPacket(1 * Traits::ResPacketSize);
1051 R2 = r2.loadPacket(2 * Traits::ResPacketSize);
1052 traits.acc(C2, alphav, R0);
1053 traits.acc(C6, alphav, R1);
1054 traits.acc(C10, alphav, R2);
1055 r2.storePacket(0 * Traits::ResPacketSize, R0);
1056 r2.storePacket(1 * Traits::ResPacketSize, R1);
1057 r2.storePacket(2 * Traits::ResPacketSize, R2);
1059 R0 = r3.loadPacket(0 * Traits::ResPacketSize);
1060 R1 = r3.loadPacket(1 * Traits::ResPacketSize);
1061 R2 = r3.loadPacket(2 * Traits::ResPacketSize);
1062 traits.acc(C3, alphav, R0);
1063 traits.acc(C7, alphav, R1);
1064 traits.acc(C11, alphav, R2);
1065 r3.storePacket(0 * Traits::ResPacketSize, R0);
1066 r3.storePacket(1 * Traits::ResPacketSize, R1);
1067 r3.storePacket(2 * Traits::ResPacketSize, R2);
1072 for(
Index j2=packet_cols4; j2<cols; j2++)
1074 for(
Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
1077 const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*Traits::LhsProgress)];
1081 AccPacket C0, C4, C8;
1086 LinearMapper r0 = res.getLinearMapper(i, j2);
1090 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1091 LhsPacket A0, A1, A2;
1093 for(
Index k=0; k<peeled_kc; k+=pk)
1095 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 3pX1");
1097#define EIGEN_GEBGP_ONESTEP(K) \
1099 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX1"); \
1100 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1101 traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
1102 traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
1103 traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
1104 traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
1105 traits.madd(A0, B_0, C0, B_0); \
1106 traits.madd(A1, B_0, C4, B_0); \
1107 traits.madd(A2, B_0, C8, B_0); \
1108 EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX1"); \
1111 EIGEN_GEBGP_ONESTEP(0);
1112 EIGEN_GEBGP_ONESTEP(1);
1113 EIGEN_GEBGP_ONESTEP(2);
1114 EIGEN_GEBGP_ONESTEP(3);
1115 EIGEN_GEBGP_ONESTEP(4);
1116 EIGEN_GEBGP_ONESTEP(5);
1117 EIGEN_GEBGP_ONESTEP(6);
1118 EIGEN_GEBGP_ONESTEP(7);
1120 blB += pk*RhsProgress;
1121 blA += pk*3*Traits::LhsProgress;
1123 EIGEN_ASM_COMMENT(
"end gebp micro kernel 3pX1");
1127 for(
Index k=peeled_kc; k<depth; k++)
1130 EIGEN_GEBGP_ONESTEP(0);
1132 blA += 3*Traits::LhsProgress;
1134#undef EIGEN_GEBGP_ONESTEP
1135 ResPacket R0, R1, R2;
1136 ResPacket alphav = pset1<ResPacket>(alpha);
1138 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1139 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1140 R2 = r0.loadPacket(2 * Traits::ResPacketSize);
1141 traits.acc(C0, alphav, R0);
1142 traits.acc(C4, alphav, R1);
1143 traits.acc(C8, alphav, R2);
1144 r0.storePacket(0 * Traits::ResPacketSize, R0);
1145 r0.storePacket(1 * Traits::ResPacketSize, R1);
1146 r0.storePacket(2 * Traits::ResPacketSize, R2);
1153 if(mr>=2*Traits::LhsProgress)
1155 const Index l1 = defaultL1CacheSize;
1159 Index actual_panel_rows = (2*LhsProgress) * std::max<Index>(1,( (l1 -
sizeof(ResScalar)*mr*nr - depth*nr*
sizeof(RhsScalar)) / (depth *
sizeof(LhsScalar) * 2*LhsProgress) ));
1161 for(
Index i1=peeled_mc3; i1<peeled_mc2; i1+=actual_panel_rows)
1163 Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc2);
1164 for(
Index j2=0; j2<packet_cols4; j2+=nr)
1166 for(
Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
1172 const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
1176 AccPacket C0, C1, C2, C3,
1178 traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3);
1179 traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7);
1181 LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
1182 LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
1183 LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
1184 LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
1186 r0.prefetch(prefetch_res_offset);
1187 r1.prefetch(prefetch_res_offset);
1188 r2.prefetch(prefetch_res_offset);
1189 r3.prefetch(prefetch_res_offset);
1192 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1196 for(
Index k=0; k<peeled_kc; k+=pk)
1198 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 2pX4");
1199 RhsPacket B_0, B1, B2, B3, T0;
1203 #if EIGEN_GNUC_AT_LEAST(6,0) && defined(EIGEN_VECTORIZE_SSE)
1204 #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND __asm__ ("" : [a0] "+x,m" (A0),[a1] "+x,m" (A1));
1206 #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND
1208 #define EIGEN_GEBGP_ONESTEP(K) \
1210 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4"); \
1211 traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
1212 traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
1213 traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \
1214 traits.madd(A0, B_0, C0, T0); \
1215 traits.madd(A1, B_0, C4, B_0); \
1216 traits.madd(A0, B1, C1, T0); \
1217 traits.madd(A1, B1, C5, B1); \
1218 traits.madd(A0, B2, C2, T0); \
1219 traits.madd(A1, B2, C6, B2); \
1220 traits.madd(A0, B3, C3, T0); \
1221 traits.madd(A1, B3, C7, B3); \
1222 EIGEN_GEBP_2PX4_SPILLING_WORKAROUND \
1223 EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4"); \
1226 internal::prefetch(blB+(48+0));
1227 EIGEN_GEBGP_ONESTEP(0);
1228 EIGEN_GEBGP_ONESTEP(1);
1229 EIGEN_GEBGP_ONESTEP(2);
1230 EIGEN_GEBGP_ONESTEP(3);
1231 internal::prefetch(blB+(48+16));
1232 EIGEN_GEBGP_ONESTEP(4);
1233 EIGEN_GEBGP_ONESTEP(5);
1234 EIGEN_GEBGP_ONESTEP(6);
1235 EIGEN_GEBGP_ONESTEP(7);
1237 blB += pk*4*RhsProgress;
1238 blA += pk*(2*Traits::LhsProgress);
1240 EIGEN_ASM_COMMENT(
"end gebp micro kernel 2pX4");
1243 for(
Index k=peeled_kc; k<depth; k++)
1245 RhsPacket B_0, B1, B2, B3, T0;
1246 EIGEN_GEBGP_ONESTEP(0);
1247 blB += 4*RhsProgress;
1248 blA += 2*Traits::LhsProgress;
1250#undef EIGEN_GEBGP_ONESTEP
1252 ResPacket R0, R1, R2, R3;
1253 ResPacket alphav = pset1<ResPacket>(alpha);
1255 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1256 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1257 R2 = r1.loadPacket(0 * Traits::ResPacketSize);
1258 R3 = r1.loadPacket(1 * Traits::ResPacketSize);
1259 traits.acc(C0, alphav, R0);
1260 traits.acc(C4, alphav, R1);
1261 traits.acc(C1, alphav, R2);
1262 traits.acc(C5, alphav, R3);
1263 r0.storePacket(0 * Traits::ResPacketSize, R0);
1264 r0.storePacket(1 * Traits::ResPacketSize, R1);
1265 r1.storePacket(0 * Traits::ResPacketSize, R2);
1266 r1.storePacket(1 * Traits::ResPacketSize, R3);
1268 R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1269 R1 = r2.loadPacket(1 * Traits::ResPacketSize);
1270 R2 = r3.loadPacket(0 * Traits::ResPacketSize);
1271 R3 = r3.loadPacket(1 * Traits::ResPacketSize);
1272 traits.acc(C2, alphav, R0);
1273 traits.acc(C6, alphav, R1);
1274 traits.acc(C3, alphav, R2);
1275 traits.acc(C7, alphav, R3);
1276 r2.storePacket(0 * Traits::ResPacketSize, R0);
1277 r2.storePacket(1 * Traits::ResPacketSize, R1);
1278 r3.storePacket(0 * Traits::ResPacketSize, R2);
1279 r3.storePacket(1 * Traits::ResPacketSize, R3);
1284 for(
Index j2=packet_cols4; j2<cols; j2++)
1286 for(
Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
1289 const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
1297 LinearMapper r0 = res.getLinearMapper(i, j2);
1298 r0.prefetch(prefetch_res_offset);
1301 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1304 for(
Index k=0; k<peeled_kc; k+=pk)
1306 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 2pX1");
1309#define EIGEN_GEBGP_ONESTEP(K) \
1311 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX1"); \
1312 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1313 traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
1314 traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
1315 traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
1316 traits.madd(A0, B_0, C0, B1); \
1317 traits.madd(A1, B_0, C4, B_0); \
1318 EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX1"); \
1321 EIGEN_GEBGP_ONESTEP(0);
1322 EIGEN_GEBGP_ONESTEP(1);
1323 EIGEN_GEBGP_ONESTEP(2);
1324 EIGEN_GEBGP_ONESTEP(3);
1325 EIGEN_GEBGP_ONESTEP(4);
1326 EIGEN_GEBGP_ONESTEP(5);
1327 EIGEN_GEBGP_ONESTEP(6);
1328 EIGEN_GEBGP_ONESTEP(7);
1330 blB += pk*RhsProgress;
1331 blA += pk*2*Traits::LhsProgress;
1333 EIGEN_ASM_COMMENT(
"end gebp micro kernel 2pX1");
1337 for(
Index k=peeled_kc; k<depth; k++)
1340 EIGEN_GEBGP_ONESTEP(0);
1342 blA += 2*Traits::LhsProgress;
1344#undef EIGEN_GEBGP_ONESTEP
1346 ResPacket alphav = pset1<ResPacket>(alpha);
1348 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1349 R1 = r0.loadPacket(1 * Traits::ResPacketSize);
1350 traits.acc(C0, alphav, R0);
1351 traits.acc(C4, alphav, R1);
1352 r0.storePacket(0 * Traits::ResPacketSize, R0);
1353 r0.storePacket(1 * Traits::ResPacketSize, R1);
1359 if(mr>=1*Traits::LhsProgress)
1362 for(
Index i=peeled_mc2; i<peeled_mc1; i+=1*LhsProgress)
1365 for(
Index j2=0; j2<packet_cols4; j2+=nr)
1370 const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
1374 AccPacket C0, C1, C2, C3;
1380 LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
1381 LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
1382 LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
1383 LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
1385 r0.prefetch(prefetch_res_offset);
1386 r1.prefetch(prefetch_res_offset);
1387 r2.prefetch(prefetch_res_offset);
1388 r3.prefetch(prefetch_res_offset);
1391 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1395 for(
Index k=0; k<peeled_kc; k+=pk)
1397 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 1pX4");
1398 RhsPacket B_0, B1, B2, B3;
1400#define EIGEN_GEBGP_ONESTEP(K) \
1402 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX4"); \
1403 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1404 traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
1405 traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \
1406 traits.madd(A0, B_0, C0, B_0); \
1407 traits.madd(A0, B1, C1, B1); \
1408 traits.madd(A0, B2, C2, B2); \
1409 traits.madd(A0, B3, C3, B3); \
1410 EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX4"); \
1413 internal::prefetch(blB+(48+0));
1414 EIGEN_GEBGP_ONESTEP(0);
1415 EIGEN_GEBGP_ONESTEP(1);
1416 EIGEN_GEBGP_ONESTEP(2);
1417 EIGEN_GEBGP_ONESTEP(3);
1418 internal::prefetch(blB+(48+16));
1419 EIGEN_GEBGP_ONESTEP(4);
1420 EIGEN_GEBGP_ONESTEP(5);
1421 EIGEN_GEBGP_ONESTEP(6);
1422 EIGEN_GEBGP_ONESTEP(7);
1424 blB += pk*4*RhsProgress;
1425 blA += pk*1*LhsProgress;
1427 EIGEN_ASM_COMMENT(
"end gebp micro kernel 1pX4");
1430 for(
Index k=peeled_kc; k<depth; k++)
1432 RhsPacket B_0, B1, B2, B3;
1433 EIGEN_GEBGP_ONESTEP(0);
1434 blB += 4*RhsProgress;
1435 blA += 1*LhsProgress;
1437#undef EIGEN_GEBGP_ONESTEP
1440 ResPacket alphav = pset1<ResPacket>(alpha);
1442 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1443 R1 = r1.loadPacket(0 * Traits::ResPacketSize);
1444 traits.acc(C0, alphav, R0);
1445 traits.acc(C1, alphav, R1);
1446 r0.storePacket(0 * Traits::ResPacketSize, R0);
1447 r1.storePacket(0 * Traits::ResPacketSize, R1);
1449 R0 = r2.loadPacket(0 * Traits::ResPacketSize);
1450 R1 = r3.loadPacket(0 * Traits::ResPacketSize);
1451 traits.acc(C2, alphav, R0);
1452 traits.acc(C3, alphav, R1);
1453 r2.storePacket(0 * Traits::ResPacketSize, R0);
1454 r3.storePacket(0 * Traits::ResPacketSize, R1);
1458 for(
Index j2=packet_cols4; j2<cols; j2++)
1461 const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
1468 LinearMapper r0 = res.getLinearMapper(i, j2);
1471 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1474 for(
Index k=0; k<peeled_kc; k+=pk)
1476 EIGEN_ASM_COMMENT(
"begin gebp micro kernel 1pX1");
1479#define EIGEN_GEBGP_ONESTEP(K) \
1481 EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX1"); \
1482 EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
1483 traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
1484 traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
1485 traits.madd(A0, B_0, C0, B_0); \
1486 EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX1"); \
1489 EIGEN_GEBGP_ONESTEP(0);
1490 EIGEN_GEBGP_ONESTEP(1);
1491 EIGEN_GEBGP_ONESTEP(2);
1492 EIGEN_GEBGP_ONESTEP(3);
1493 EIGEN_GEBGP_ONESTEP(4);
1494 EIGEN_GEBGP_ONESTEP(5);
1495 EIGEN_GEBGP_ONESTEP(6);
1496 EIGEN_GEBGP_ONESTEP(7);
1498 blB += pk*RhsProgress;
1499 blA += pk*1*Traits::LhsProgress;
1501 EIGEN_ASM_COMMENT(
"end gebp micro kernel 1pX1");
1505 for(
Index k=peeled_kc; k<depth; k++)
1508 EIGEN_GEBGP_ONESTEP(0);
1510 blA += 1*Traits::LhsProgress;
1512#undef EIGEN_GEBGP_ONESTEP
1514 ResPacket alphav = pset1<ResPacket>(alpha);
1515 R0 = r0.loadPacket(0 * Traits::ResPacketSize);
1516 traits.acc(C0, alphav, R0);
1517 r0.storePacket(0 * Traits::ResPacketSize, R0);
1525 for(
Index j2=0; j2<packet_cols4; j2+=nr)
1528 for(
Index i=peeled_mc1; i<rows; i+=1)
1530 const LhsScalar* blA = &blockA[i*strideA+offsetA];
1532 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
1537 const int SResPacketHalfSize = unpacket_traits<typename unpacket_traits<SResPacket>::half>::size;
1538 if ((SwappedTraits::LhsProgress % 4) == 0 &&
1539 (SwappedTraits::LhsProgress <= 8) &&
1540 (SwappedTraits::LhsProgress!=8 || SResPacketHalfSize==nr))
1542 SAccPacket C0, C1, C2, C3;
1543 straits.initAcc(C0);
1544 straits.initAcc(C1);
1545 straits.initAcc(C2);
1546 straits.initAcc(C3);
1548 const Index spk = (std::max)(1,SwappedTraits::LhsProgress/4);
1549 const Index endk = (depth/spk)*spk;
1550 const Index endk4 = (depth/(spk*4))*(spk*4);
1553 for(; k<endk4; k+=4*spk)
1558 straits.loadLhsUnaligned(blB+0*SwappedTraits::LhsProgress, A0);
1559 straits.loadLhsUnaligned(blB+1*SwappedTraits::LhsProgress, A1);
1561 straits.loadRhsQuad(blA+0*spk, B_0);
1562 straits.loadRhsQuad(blA+1*spk, B_1);
1563 straits.madd(A0,B_0,C0,B_0);
1564 straits.madd(A1,B_1,C1,B_1);
1566 straits.loadLhsUnaligned(blB+2*SwappedTraits::LhsProgress, A0);
1567 straits.loadLhsUnaligned(blB+3*SwappedTraits::LhsProgress, A1);
1568 straits.loadRhsQuad(blA+2*spk, B_0);
1569 straits.loadRhsQuad(blA+3*spk, B_1);
1570 straits.madd(A0,B_0,C2,B_0);
1571 straits.madd(A1,B_1,C3,B_1);
1573 blB += 4*SwappedTraits::LhsProgress;
1576 C0 = padd(padd(C0,C1),padd(C2,C3));
1577 for(; k<endk; k+=spk)
1582 straits.loadLhsUnaligned(blB, A0);
1583 straits.loadRhsQuad(blA, B_0);
1584 straits.madd(A0,B_0,C0,B_0);
1586 blB += SwappedTraits::LhsProgress;
1589 if(SwappedTraits::LhsProgress==8)
1592 typedef typename conditional<SwappedTraits::LhsProgress>=8,
typename unpacket_traits<SResPacket>::half,SResPacket>::type SResPacketHalf;
1593 typedef typename conditional<SwappedTraits::LhsProgress>=8,
typename unpacket_traits<SLhsPacket>::half,SLhsPacket>::type SLhsPacketHalf;
1594 typedef typename conditional<SwappedTraits::LhsProgress>=8,
typename unpacket_traits<SLhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;
1595 typedef typename conditional<SwappedTraits::LhsProgress>=8,
typename unpacket_traits<SAccPacket>::half,SAccPacket>::type SAccPacketHalf;
1597 SResPacketHalf R = res.template gatherPacket<SResPacketHalf>(i, j2);
1598 SResPacketHalf alphav = pset1<SResPacketHalf>(alpha);
1605 straits.loadLhsUnaligned(blB, a0);
1606 straits.loadRhs(blA, b0);
1607 SAccPacketHalf c0 = predux_downto4(C0);
1608 straits.madd(a0,b0,c0,b0);
1609 straits.acc(c0, alphav, R);
1613 straits.acc(predux_downto4(C0), alphav, R);
1615 res.scatterPacket(i, j2, R);
1619 SResPacket R = res.template gatherPacket<SResPacket>(i, j2);
1620 SResPacket alphav = pset1<SResPacket>(alpha);
1621 straits.acc(C0, alphav, R);
1622 res.scatterPacket(i, j2, R);
1628 ResScalar C0(0), C1(0), C2(0), C3(0);
1630 for(
Index k=0; k<depth; k++)
1639 CJMADD(cj,A0,B_0,C0, B_0);
1640 CJMADD(cj,A0,B_1,C1, B_1);
1644 CJMADD(cj,A0,B_0,C2, B_0);
1645 CJMADD(cj,A0,B_1,C3, B_1);
1649 res(i, j2 + 0) += alpha * C0;
1650 res(i, j2 + 1) += alpha * C1;
1651 res(i, j2 + 2) += alpha * C2;
1652 res(i, j2 + 3) += alpha * C3;
1657 for(
Index j2=packet_cols4; j2<cols; j2++)
1660 for(
Index i=peeled_mc1; i<rows; i+=1)
1662 const LhsScalar* blA = &blockA[i*strideA+offsetA];
1666 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1667 for(
Index k=0; k<depth; k++)
1669 LhsScalar A0 = blA[k];
1670 RhsScalar B_0 = blB[k];
1671 CJMADD(cj, A0, B_0, C0, B_0);
1673 res(i, j2) += alpha * C0;
1696template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1697struct gemm_pack_lhs<Scalar,
Index, DataMapper, Pack1, Pack2,
ColMajor, Conjugate, PanelMode>
1699 typedef typename DataMapper::LinearMapper LinearMapper;
1700 EIGEN_DONT_INLINE
void operator()(Scalar* blockA,
const DataMapper& lhs,
Index depth,
Index rows,
Index stride=0,
Index offset=0);
1703template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1704EIGEN_DONT_INLINE
void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
1705 ::operator()(Scalar* blockA,
const DataMapper& lhs,
Index depth,
Index rows,
Index stride,
Index offset)
1707 typedef typename packet_traits<Scalar>::type Packet;
1708 enum { PacketSize = packet_traits<Scalar>::size };
1710 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK LHS");
1711 EIGEN_UNUSED_VARIABLE(stride);
1712 EIGEN_UNUSED_VARIABLE(offset);
1713 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1714 eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) );
1715 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1718 const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
1719 const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
1720 const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
1721 const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1
1722 : Pack2>1 ? (rows/Pack2)*Pack2 : 0;
1727 if(Pack1>=3*PacketSize)
1729 for(; i<peeled_mc3; i+=3*PacketSize)
1731 if(PanelMode) count += (3*PacketSize) * offset;
1733 for(
Index k=0; k<depth; k++)
1736 A = lhs.loadPacket(i+0*PacketSize, k);
1737 B = lhs.loadPacket(i+1*PacketSize, k);
1738 C = lhs.loadPacket(i+2*PacketSize, k);
1739 pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
1740 pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
1741 pstore(blockA+count, cj.pconj(C)); count+=PacketSize;
1743 if(PanelMode) count += (3*PacketSize) * (stride-offset-depth);
1747 if(Pack1>=2*PacketSize)
1749 for(; i<peeled_mc2; i+=2*PacketSize)
1751 if(PanelMode) count += (2*PacketSize) * offset;
1753 for(
Index k=0; k<depth; k++)
1756 A = lhs.loadPacket(i+0*PacketSize, k);
1757 B = lhs.loadPacket(i+1*PacketSize, k);
1758 pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
1759 pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
1761 if(PanelMode) count += (2*PacketSize) * (stride-offset-depth);
1765 if(Pack1>=1*PacketSize)
1767 for(; i<peeled_mc1; i+=1*PacketSize)
1769 if(PanelMode) count += (1*PacketSize) * offset;
1771 for(
Index k=0; k<depth; k++)
1774 A = lhs.loadPacket(i+0*PacketSize, k);
1775 pstore(blockA+count, cj.pconj(A));
1778 if(PanelMode) count += (1*PacketSize) * (stride-offset-depth);
1782 if(Pack2<PacketSize && Pack2>1)
1784 for(; i<peeled_mc0; i+=Pack2)
1786 if(PanelMode) count += Pack2 * offset;
1788 for(
Index k=0; k<depth; k++)
1789 for(
Index w=0; w<Pack2; w++)
1790 blockA[count++] = cj(lhs(i+w, k));
1792 if(PanelMode) count += Pack2 * (stride-offset-depth);
1797 if(PanelMode) count += offset;
1798 for(
Index k=0; k<depth; k++)
1799 blockA[count++] = cj(lhs(i, k));
1800 if(PanelMode) count += (stride-offset-depth);
1804template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1805struct gemm_pack_lhs<Scalar,
Index, DataMapper, Pack1, Pack2,
RowMajor, Conjugate, PanelMode>
1807 typedef typename DataMapper::LinearMapper LinearMapper;
1808 EIGEN_DONT_INLINE
void operator()(Scalar* blockA,
const DataMapper& lhs,
Index depth,
Index rows,
Index stride=0,
Index offset=0);
1811template<
typename Scalar,
typename Index,
typename DataMapper,
int Pack1,
int Pack2,
bool Conjugate,
bool PanelMode>
1812EIGEN_DONT_INLINE
void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
1813 ::operator()(Scalar* blockA,
const DataMapper& lhs,
Index depth,
Index rows,
Index stride,
Index offset)
1815 typedef typename packet_traits<Scalar>::type Packet;
1816 enum { PacketSize = packet_traits<Scalar>::size };
1818 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK LHS");
1819 EIGEN_UNUSED_VARIABLE(stride);
1820 EIGEN_UNUSED_VARIABLE(offset);
1821 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1822 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1833 Index remaining_rows = rows-i;
1834 Index peeled_mc = i+(remaining_rows/pack)*pack;
1835 for(; i<peeled_mc; i+=pack)
1837 if(PanelMode) count += pack * offset;
1839 const Index peeled_k = (depth/PacketSize)*PacketSize;
1841 if(pack>=PacketSize)
1843 for(; k<peeled_k; k+=PacketSize)
1845 for (
Index m = 0; m < pack; m += PacketSize)
1847 PacketBlock<Packet> kernel;
1848 for (
int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k);
1850 for (
int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));
1852 count += PacketSize*pack;
1858 for(; w<pack-3; w+=4)
1860 Scalar a(cj(lhs(i+w+0, k))),
1861 b(cj(lhs(i+w+1, k))),
1862 c(cj(lhs(i+w+2, k))),
1863 d(cj(lhs(i+w+3, k)));
1864 blockA[count++] = a;
1865 blockA[count++] = b;
1866 blockA[count++] = c;
1867 blockA[count++] = d;
1871 blockA[count++] = cj(lhs(i+w, k));
1874 if(PanelMode) count += pack * (stride-offset-depth);
1878 if(pack<Pack2 && (pack+PacketSize)!=Pack2)
1884 if(PanelMode) count += offset;
1885 for(
Index k=0; k<depth; k++)
1886 blockA[count++] = cj(lhs(i, k));
1887 if(PanelMode) count += (stride-offset-depth);
1898template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
1899struct gemm_pack_rhs<Scalar,
Index, DataMapper, nr,
ColMajor, Conjugate, PanelMode>
1901 typedef typename packet_traits<Scalar>::type Packet;
1902 typedef typename DataMapper::LinearMapper LinearMapper;
1903 enum { PacketSize = packet_traits<Scalar>::size };
1904 EIGEN_DONT_INLINE
void operator()(Scalar* blockB,
const DataMapper& rhs,
Index depth,
Index cols,
Index stride=0,
Index offset=0);
1907template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
1908EIGEN_DONT_INLINE
void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
1909 ::operator()(Scalar* blockB,
const DataMapper& rhs,
Index depth,
Index cols,
Index stride,
Index offset)
1911 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK RHS COLMAJOR");
1912 EIGEN_UNUSED_VARIABLE(stride);
1913 EIGEN_UNUSED_VARIABLE(offset);
1914 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1915 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1916 Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
1917 Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
1919 const Index peeled_k = (depth/PacketSize)*PacketSize;
1968 for(
Index j2=packet_cols8; j2<packet_cols4; j2+=4)
1971 if(PanelMode) count += 4 * offset;
1972 const LinearMapper dm0 = rhs.getLinearMapper(0, j2 + 0);
1973 const LinearMapper dm1 = rhs.getLinearMapper(0, j2 + 1);
1974 const LinearMapper dm2 = rhs.getLinearMapper(0, j2 + 2);
1975 const LinearMapper dm3 = rhs.getLinearMapper(0, j2 + 3);
1978 if((PacketSize%4)==0)
1980 for(; k<peeled_k; k+=PacketSize) {
1981 PacketBlock<Packet,(PacketSize%4)==0?4:PacketSize> kernel;
1982 kernel.packet[0] = dm0.loadPacket(k);
1983 kernel.packet[1%PacketSize] = dm1.loadPacket(k);
1984 kernel.packet[2%PacketSize] = dm2.loadPacket(k);
1985 kernel.packet[3%PacketSize] = dm3.loadPacket(k);
1987 pstoreu(blockB+count+0*PacketSize, cj.pconj(kernel.packet[0]));
1988 pstoreu(blockB+count+1*PacketSize, cj.pconj(kernel.packet[1%PacketSize]));
1989 pstoreu(blockB+count+2*PacketSize, cj.pconj(kernel.packet[2%PacketSize]));
1990 pstoreu(blockB+count+3*PacketSize, cj.pconj(kernel.packet[3%PacketSize]));
1991 count+=4*PacketSize;
1996 blockB[count+0] = cj(dm0(k));
1997 blockB[count+1] = cj(dm1(k));
1998 blockB[count+2] = cj(dm2(k));
1999 blockB[count+3] = cj(dm3(k));
2003 if(PanelMode) count += 4 * (stride-offset-depth);
2008 for(
Index j2=packet_cols4; j2<cols; ++j2)
2010 if(PanelMode) count += offset;
2011 const LinearMapper dm0 = rhs.getLinearMapper(0, j2);
2012 for(
Index k=0; k<depth; k++)
2014 blockB[count] = cj(dm0(k));
2017 if(PanelMode) count += (stride-offset-depth);
2022template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
2023struct gemm_pack_rhs<Scalar,
Index, DataMapper, nr,
RowMajor, Conjugate, PanelMode>
2025 typedef typename packet_traits<Scalar>::type Packet;
2026 typedef typename DataMapper::LinearMapper LinearMapper;
2027 enum { PacketSize = packet_traits<Scalar>::size };
2028 EIGEN_DONT_INLINE
void operator()(Scalar* blockB,
const DataMapper& rhs,
Index depth,
Index cols,
Index stride=0,
Index offset=0);
2031template<
typename Scalar,
typename Index,
typename DataMapper,
int nr,
bool Conjugate,
bool PanelMode>
2032EIGEN_DONT_INLINE
void gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
2033 ::operator()(Scalar* blockB,
const DataMapper& rhs,
Index depth,
Index cols,
Index stride,
Index offset)
2035 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK RHS ROWMAJOR");
2036 EIGEN_UNUSED_VARIABLE(stride);
2037 EIGEN_UNUSED_VARIABLE(offset);
2038 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
2039 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
2040 Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
2041 Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
2079 for(
Index j2=packet_cols8; j2<packet_cols4; j2+=4)
2082 if(PanelMode) count += 4 * offset;
2083 for(
Index k=0; k<depth; k++)
2085 if (PacketSize==4) {
2086 Packet A = rhs.loadPacket(k, j2);
2087 pstoreu(blockB+count, cj.pconj(A));
2088 count += PacketSize;
2090 const LinearMapper dm0 = rhs.getLinearMapper(k, j2);
2091 blockB[count+0] = cj(dm0(0));
2092 blockB[count+1] = cj(dm0(1));
2093 blockB[count+2] = cj(dm0(2));
2094 blockB[count+3] = cj(dm0(3));
2099 if(PanelMode) count += 4 * (stride-offset-depth);
2103 for(
Index j2=packet_cols4; j2<cols; ++j2)
2105 if(PanelMode) count += offset;
2106 for(
Index k=0; k<depth; k++)
2108 blockB[count] = cj(rhs(k, j2));
2111 if(PanelMode) count += stride-offset-depth;
2121 std::ptrdiff_t l1, l2, l3;
2122 internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
2130 std::ptrdiff_t l1, l2, l3;
2131 internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
2140 std::ptrdiff_t l1, l2, l3;
2141 internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
2152 internal::manage_caching_sizes(SetAction, &l1, &l2, &l3);
@ ColMajor
Definition Constants.h:320
@ RowMajor
Definition Constants.h:322
Namespace containing all symbols from the Eigen library.
Definition A05_PortingFrom2To3.dox:1
std::ptrdiff_t l1CacheSize()
Definition GeneralBlockPanelKernel.h:2119
std::ptrdiff_t l2CacheSize()
Definition GeneralBlockPanelKernel.h:2128
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:65
std::ptrdiff_t l3CacheSize()
Definition GeneralBlockPanelKernel.h:2138
void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2, std::ptrdiff_t l3)
Definition GeneralBlockPanelKernel.h:2150