Eigen-unsupported  3.4.1 (git rev 28ded8800c26864e537852658428ab44c8399e87)
 
Loading...
Searching...
No Matches
TensorContraction.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
11#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
12
13namespace Eigen {
14
15namespace internal {
16
17template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
18struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> >
19{
20 // Type promotion to handle the case where the types of the lhs and the rhs are different.
21 typedef typename gebp_traits<typename remove_const<typename LhsXprType::Scalar>::type,
22 typename remove_const<typename RhsXprType::Scalar>::type>::ResScalar Scalar;
23
24 typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
25 typename traits<RhsXprType>::StorageKind>::ret StorageKind;
26 typedef typename promote_index_type<typename traits<LhsXprType>::Index,
27 typename traits<RhsXprType>::Index>::type Index;
28 typedef typename LhsXprType::Nested LhsNested;
29 typedef typename RhsXprType::Nested RhsNested;
30 typedef typename remove_reference<LhsNested>::type _LhsNested;
31 typedef typename remove_reference<RhsNested>::type _RhsNested;
32
33 // From NumDims below.
34 static const int NumDimensions = traits<LhsXprType>::NumDimensions + traits<RhsXprType>::NumDimensions - 2 * array_size<Dimensions>::value;
35 static const int Layout = traits<LhsXprType>::Layout;
36 typedef typename conditional<Pointer_type_promotion<typename LhsXprType::Scalar, Scalar>::val,
37 typename traits<LhsXprType>::PointerType,
38 typename traits<RhsXprType>::PointerType>::type
39 PointerType;
40
41 enum {
42 Flags = 0
43 };
44};
45
46template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
47struct eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, Eigen::Dense>
48{
49 typedef const TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>& type;
50};
51
52template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
53struct nested<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, 1, typename eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> >::type>
54{
55 typedef TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> type;
56};
57
58template<typename Indices_, typename LeftArgType_, typename RightArgType_, typename OutputKernelType_, typename Device_>
59struct traits<TensorEvaluator<const TensorContractionOp<Indices_, LeftArgType_, RightArgType_, OutputKernelType_>, Device_> > {
60 typedef Indices_ Indices;
61 typedef LeftArgType_ LeftArgType;
62 typedef RightArgType_ RightArgType;
63 typedef OutputKernelType_ OutputKernelType;
64 typedef Device_ Device;
65
66 // From NumDims below.
67 static const int NumDimensions = traits<LeftArgType_>::NumDimensions + traits<RightArgType_>::NumDimensions - 2 * array_size<Indices_>::value;
68};
69
70// Helper class to allocate and deallocate temporary memory for packed buffers.
71template <typename LhsScalar, typename RhsScalar>
72struct TensorContractionBlockMemAllocator {
73 typedef void* BlockMemHandle;
74
75 template <typename Device>
76 EIGEN_DEVICE_FUNC static BlockMemHandle allocate(Device& d, const Index bm,
77 const Index bk,
78 const Index bn,
79 LhsScalar** lhs_block,
80 RhsScalar** rhs_block) {
81 eigen_assert(lhs_block);
82 eigen_assert(rhs_block);
83 BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
84 char* block_mem = static_cast<char*>(d.allocate(sz.lhs_size + sz.rhs_size));
85 *lhs_block = reinterpret_cast<LhsScalar*>(block_mem);
86 *rhs_block = reinterpret_cast<RhsScalar*>(block_mem + sz.lhs_size);
87 return block_mem;
88 }
89
90 template <typename Device>
91 EIGEN_DEVICE_FUNC static BlockMemHandle allocateSlices(
92 Device& d, const Index bm, const Index bk, const Index bn,
93 const Index num_lhs, const Index num_rhs, const Index num_slices,
94 std::vector<LhsScalar*>* lhs_blocks,
95 std::vector<RhsScalar*>* rhs_blocks) {
96 eigen_assert(num_slices > 0);
97 eigen_assert(num_lhs >= 0 && num_rhs >= 0);
98 eigen_assert(num_lhs == 0 || lhs_blocks);
99 eigen_assert(num_rhs == 0 || rhs_blocks);
100 BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
101 void* block_mem = d.allocate(
102 (num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices);
103 eigen_assert(block_mem);
104 char* mem = static_cast<char*>(block_mem);
105
106 for (Index x = 0; x < num_slices; x++) {
107 if (num_lhs > 0) lhs_blocks[x].resize(num_lhs);
108 for (Index m = 0; m < num_lhs; m++) {
109 lhs_blocks[x][m] = reinterpret_cast<LhsScalar*>(mem);
110 mem += sz.lhs_size;
111 }
112 if (num_rhs > 0) rhs_blocks[x].resize(num_rhs);
113 for (Index n = 0; n < num_rhs; n++) {
114 rhs_blocks[x][n] = reinterpret_cast<RhsScalar*>(mem);
115 mem += sz.rhs_size;
116 }
117 }
118
119 return block_mem;
120 }
121
122 template <typename Device>
123 EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) {
124 d.deallocate(handle);
125 }
126
127 private:
128 struct BlockSizes {
129 Index lhs_size;
130 Index rhs_size;
131 };
132 EIGEN_DEVICE_FUNC static BlockSizes ComputeLhsRhsBlockSizes(const Index bm,
133 const Index bk,
134 const Index bn) {
135 Index align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1);
136 BlockSizes sz;
137 sz.lhs_size = divup<Index>(bm * bk * sizeof(LhsScalar), align) * align;
138 sz.rhs_size = divup<Index>(bn * bk * sizeof(RhsScalar), align) * align;
139 return sz;
140 }
141};
142
143// WARNING: In this code we assume that Lhs and Rhs tensor expressions are in
144// ColMajor storage order. This property is guaranteed by the
145// TensorContractionOp evaluator. TensorContractionKernel specifies how we pack
146// blocks of Lhs and Rhs tensor expressions, and how we invoke matrix
147// multiplication for these blocks. Default tensor contraction uses
148// gemm_pack_rhs, gemm_pack_lhs and gebp_kernel from Eigen Core (see
149// GeneralBlocPanelKernel.h for details).
150//
151// By specializing contraction kernels we can use other low level libraries to
152// perform matrix multiplication, and still rely on Eigen contraction evaluator.
153// This also includes full support in TensorContractionThreadPool, assuming that
154// underlying gemm do not use it's own threading.
155//
156// - ResScalar/LhsScalar/RhsScalar - scalar type for the result of
157// multiplication, lhs tensor and rhs tensor respectively.
158//
159// - StorageIndex - index type for the tensor expressions. In practice almost
160// always is Eigen::Index.
161//
162// - OutputMapper provides access to the memory of the output matrix. In
163// practice it's always column major blas_data_mapper (it must be of ResScalar
164// type).
165//
166// - LhsMapper/RhsMapper similarly to blas_data_mapper provide a two dimensional
167// view into the Lhs/Rhs tensor expressions. In practice it's
168// TensorContractionInputMapper, or some specialization of it based on the
169// type of tensor expression (e.g. TensorImagePatchOp has optimized input
170// mapper).
171template <typename ResScalar, typename LhsScalar, typename RhsScalar,
172 typename StorageIndex, typename OutputMapper, typename LhsMapper,
173 typename RhsMapper>
174struct TensorContractionKernel {
175 // True if `invoke()` supports `beta` in `C <- alpha * A * B + beta * C`
176 // (otherwise beta should be always equal to 1).
177 enum { HasBeta = false };
178
179 EIGEN_DEVICE_FUNC
180 TensorContractionKernel(StorageIndex m_, StorageIndex k_, StorageIndex n_,
181 StorageIndex bm_, StorageIndex bk_, StorageIndex bn_)
182 : m(m_), k(k_), n(n_), bm(bm_), bk(bk_), bn(bn_) {}
183
184 // Pack blocks of Lhs and Rhs into contiguous blocks in memory.
185 typedef LhsScalar* LhsBlock;
186 typedef RhsScalar* RhsBlock;
187
188 // Packed Lhs/Rhs block memory allocator.
189 typedef TensorContractionBlockMemAllocator<LhsScalar, RhsScalar>
190 BlockMemAllocator;
191 typedef typename BlockMemAllocator::BlockMemHandle BlockMemHandle;
192
193 typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
194
195 typedef internal::gemm_pack_lhs<
196 LhsScalar, StorageIndex, typename LhsMapper::SubMapper, Traits::mr,
197 Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor>
198 LhsPacker;
199
200 typedef internal::gemm_pack_rhs<RhsScalar, StorageIndex,
201 typename RhsMapper::SubMapper, Traits::nr,
202 ColMajor>
203 RhsPacker;
204
205 typedef internal::gebp_kernel<LhsScalar, RhsScalar, StorageIndex,
206 OutputMapper, Traits::mr, Traits::nr,
207 /*ConjugateLhs*/ false, /*ConjugateRhs*/ false>
208 GebpKernel;
209
210 template <typename Device>
211 EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block,
212 RhsBlock* rhs_block) {
213 return BlockMemAllocator::allocate(d, bm, bk, bn, lhs_block, rhs_block);
214 }
215
216 template <typename Device>
217 EIGEN_DEVICE_FUNC BlockMemHandle allocateSlices(
218 Device& d, const StorageIndex num_lhs, const StorageIndex num_rhs,
219 const StorageIndex num_slices, std::vector<LhsBlock>* lhs_blocks,
220 std::vector<RhsBlock>* rhs_blocks) {
221 return BlockMemAllocator::allocateSlices(
222 d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_blocks, rhs_blocks);
223 }
224
225 template <typename Device>
226 EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) {
227 BlockMemAllocator::deallocate(d, handle);
228 }
229
230 EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packLhs(
231 LhsBlock* lhsBlock, const typename LhsMapper::SubMapper& data_mapper,
232 const StorageIndex depth, const StorageIndex rows) {
233 LhsPacker()(*lhsBlock, data_mapper, depth, rows, /*stride*/ 0,
234 /*offset*/ 0);
235 }
236
237 EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packRhs(
238 RhsBlock* rhsBlock, const typename RhsMapper::SubMapper& data_mapper,
239 const StorageIndex depth, const StorageIndex cols) {
240 RhsPacker()(*rhsBlock, data_mapper, depth, cols);
241 }
242
243 EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void invoke(
244 const OutputMapper& output_mapper, const LhsBlock& lhsBlock,
245 const RhsBlock& rhsBlock, const StorageIndex rows,
246 const StorageIndex depth, const StorageIndex cols,
247 const ResScalar alpha, const ResScalar beta) {
248 // Default GEBP kernel does not support beta.
249 eigen_assert(beta == ResScalar(1));
250 static const int kComputeStrideFromBlockDimensions = -1;
251 GebpKernel()(output_mapper, lhsBlock, rhsBlock, rows, depth, cols, alpha,
252 /*strideA*/ kComputeStrideFromBlockDimensions,
253 /*strideB*/ kComputeStrideFromBlockDimensions,
254 /*offsetA*/ 0, /*offsetB*/ 0);
255 }
256
257 private:
258 // These are dimensions of the original Tensors, and selected block sizes. The
259 // actual block sizes passed to all function above might be smaller because of
260 // the partial blocks at the end.
261 const StorageIndex m;
262 const StorageIndex k;
263 const StorageIndex n;
264 const StorageIndex bm;
265 const StorageIndex bk;
266 const StorageIndex bn;
267};
268
269} // end namespace internal
270
271// Tensor contraction params that should enable to get from output matrix
272// 2-dimensional coordinates to the output tensor dimensions.
273struct TensorContractionParams {
274 // TensorContraction evaluator assumes that both tensors are in ColMajor
275 // layout, if tensors are in RowMajor evaluator swap lhs with rhs.
276 bool swapped_arguments;
277};
278
279// Output kernel allows to fuse operations into the tensor contraction.
280//
281// Examples:
282// 1. Elementwise Relu transformation following Conv2D.
283// 2. AddBias to the Conv2D output channels dimension.
284//
285// The NoOpOutputKernel implements an output kernel that does absolutely nothing.
286struct NoOpOutputKernel {
302 template <typename Index, typename Scalar>
303 EIGEN_ALWAYS_INLINE void operator()(
304 const internal::blas_data_mapper<Scalar, Index, ColMajor>& output_mapper,
305 const TensorContractionParams& params, Index i,
306 Index j, Index num_rows, Index num_cols) const {
307 EIGEN_UNUSED_VARIABLE(output_mapper);
308 EIGEN_UNUSED_VARIABLE(params);
309 EIGEN_UNUSED_VARIABLE(i);
310 EIGEN_UNUSED_VARIABLE(j);
311 EIGEN_UNUSED_VARIABLE(num_rows);
312 EIGEN_UNUSED_VARIABLE(num_cols);
313 }
314};
315
319template <typename Indices, typename LhsXprType, typename RhsXprType,
320 typename OutputKernelType = const NoOpOutputKernel>
321class TensorContractionOp
322 : public TensorBase<TensorContractionOp<Indices, LhsXprType, RhsXprType, OutputKernelType>, ReadOnlyAccessors> {
323 public:
324 typedef typename Eigen::internal::traits<TensorContractionOp>::Scalar Scalar;
325 typedef typename internal::gebp_traits<typename LhsXprType::CoeffReturnType,
326 typename RhsXprType::CoeffReturnType>::ResScalar CoeffReturnType;
327 typedef typename Eigen::internal::nested<TensorContractionOp>::type Nested;
328 typedef typename Eigen::internal::traits<TensorContractionOp>::StorageKind StorageKind;
329 typedef typename Eigen::internal::traits<TensorContractionOp>::Index Index;
330
331 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp(
332 const LhsXprType& lhs, const RhsXprType& rhs, const Indices& dims,
333 const OutputKernelType& output_kernel = OutputKernelType())
334 : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims),
335 m_output_kernel(output_kernel) {}
336
337 EIGEN_DEVICE_FUNC
338 const Indices& indices() const { return m_indices; }
339
341 EIGEN_DEVICE_FUNC
342 const typename internal::remove_all<typename LhsXprType::Nested>::type&
343 lhsExpression() const { return m_lhs_xpr; }
344
345 EIGEN_DEVICE_FUNC
346 const typename internal::remove_all<typename RhsXprType::Nested>::type&
347 rhsExpression() const { return m_rhs_xpr; }
348
349 EIGEN_DEVICE_FUNC
350 const OutputKernelType& outputKernel() const { return m_output_kernel; }
351
352 protected:
353 typename LhsXprType::Nested m_lhs_xpr;
354 typename RhsXprType::Nested m_rhs_xpr;
355 const Indices m_indices;
356 const OutputKernelType m_output_kernel;
357};
358
359
360template<typename Derived>
361struct TensorContractionEvaluatorBase : internal::no_assignment_operator
362{
363 typedef typename internal::traits<Derived>::Indices Indices;
364 typedef typename internal::traits<Derived>::LeftArgType LeftArgType;
365 typedef typename internal::traits<Derived>::RightArgType RightArgType;
366 typedef typename internal::traits<Derived>::OutputKernelType OutputKernelType;
367 typedef typename internal::traits<Derived>::Device Device;
368
369 typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
370 typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
371 typedef typename XprType::Index Index;
372 typedef typename XprType::CoeffReturnType CoeffReturnType;
373 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
374 typedef StorageMemory<Scalar, Device> Storage;
375 typedef typename Storage::Type EvaluatorPointerType;
376
377 enum {
378 IsAligned = true,
379 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
380 BlockAccess = false,
381 PreferBlockAccess = false,
382 Layout = TensorEvaluator<LeftArgType, Device>::Layout,
383 CoordAccess = false, // to be implemented
384 RawAccess = true
385 };
386
387 //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
388 typedef internal::TensorBlockNotImplemented TensorBlock;
389 //===--------------------------------------------------------------------===//
390
391 // Most of the code is assuming that both input tensors are ColMajor. If the
392 // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
393 // If we want to compute A * B = C, where A is LHS and B is RHS, the code
394 // will pretend B is LHS and A is RHS.
395 typedef typename internal::conditional<
396 static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
397 typedef typename internal::conditional<
398 static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
399
400 typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluatorType;
401 typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluatorType;
402
403 static const int LDims =
404 internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
405 static const int RDims =
406 internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
407 static const int ContractDims = internal::array_size<Indices>::value;
408 static const int NumDims = LDims + RDims - 2 * ContractDims;
409
410 typedef array<Index, ContractDims> contract_t;
411 typedef array<Index, LDims - ContractDims> left_nocontract_t;
412 typedef array<Index, RDims - ContractDims> right_nocontract_t;
413
414 typedef DSizes<Index, NumDims> Dimensions;
415
416 EIGEN_STRONG_INLINE
417 TensorContractionEvaluatorBase(const XprType& op, const Device& device)
418 : m_leftImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
419 op.lhsExpression(), op.rhsExpression()), device),
420 m_rightImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
421 op.rhsExpression(), op.lhsExpression()), device),
422 m_device(device),
423 m_output_kernel(op.outputKernel()),
424 m_result(NULL) {
425 EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
426 static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
427 YOU_MADE_A_PROGRAMMING_MISTAKE);
428
429
430 DSizes<Index, LDims> eval_left_dims;
431 DSizes<Index, RDims> eval_right_dims;
432 array<IndexPair<Index>, ContractDims> eval_op_indices;
433 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
434 // For ColMajor, we keep using the existing dimensions
435 for (int i = 0; i < LDims; i++) {
436 eval_left_dims[i] = m_leftImpl.dimensions()[i];
437 }
438 for (int i = 0; i < RDims; i++) {
439 eval_right_dims[i] = m_rightImpl.dimensions()[i];
440 }
441 // We keep the pairs of contracting indices.
442 for (int i = 0; i < ContractDims; i++) {
443 eval_op_indices[i].first = op.indices()[i].first;
444 eval_op_indices[i].second = op.indices()[i].second;
445 }
446 } else {
447 // For RowMajor, we need to reverse the existing dimensions
448 for (int i = 0; i < LDims; i++) {
449 eval_left_dims[i] = m_leftImpl.dimensions()[LDims - i - 1];
450 }
451 for (int i = 0; i < RDims; i++) {
452 eval_right_dims[i] = m_rightImpl.dimensions()[RDims - i - 1];
453 }
454 // We need to flip all the pairs of contracting indices as well as
455 // reversing the dimensions.
456 for (int i = 0; i < ContractDims; i++) {
457 eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second;
458 eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first;
459 }
460 }
461
462 // Check for duplicate axes and make sure the first index in eval_op_indices
463 // is increasing. Using O(n^2) sorting is OK since ContractDims is small
464 for (int i = 0; i < ContractDims; i++) {
465 for (int j = i + 1; j < ContractDims; j++) {
466 eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first &&
467 eval_op_indices[j].second != eval_op_indices[i].second &&
468 "contraction axes should be unique");
469 if (eval_op_indices[j].first < eval_op_indices[i].first) {
470 numext::swap(eval_op_indices[j], eval_op_indices[i]);
471 }
472 }
473 }
474
475 array<Index, LDims> lhs_strides;
476 lhs_strides[0] = 1;
477 for (int i = 0; i < LDims-1; ++i) {
478 lhs_strides[i+1] = lhs_strides[i] * eval_left_dims[i];
479 }
480
481 array<Index, RDims> rhs_strides;
482 rhs_strides[0] = 1;
483 for (int i = 0; i < RDims-1; ++i) {
484 rhs_strides[i+1] = rhs_strides[i] * eval_right_dims[i];
485 }
486
487 if (m_i_strides.size() > 0) m_i_strides[0] = 1;
488 if (m_j_strides.size() > 0) m_j_strides[0] = 1;
489 if (m_k_strides.size() > 0) m_k_strides[0] = 1;
490
491 m_i_size = 1;
492 m_j_size = 1;
493 m_k_size = 1;
494
495 // To compute the dimension, we simply concatenate the non-contracting
496 // dimensions of the left and then the right tensor. Additionally, we also
497 // compute the strides corresponding to the left non-contracting
498 // dimensions and right non-contracting dimensions.
499 m_lhs_inner_dim_contiguous = true;
500 int dim_idx = 0;
501 Index nocontract_idx = 0;
502
503 for (int i = 0; i < LDims; i++) {
504 // find if we are contracting on index i of left tensor
505 bool contracting = false;
506 for (int j = 0; j < ContractDims; j++) {
507 if (eval_op_indices[j].first == i) {
508 contracting = true;
509 break;
510 }
511 }
512 if (!contracting) {
513 // add dimension size to output dimensions
514 m_dimensions[dim_idx] = eval_left_dims[i];
515 m_left_nocontract_strides[nocontract_idx] = lhs_strides[i];
516 if (dim_idx != i) {
517 m_lhs_inner_dim_contiguous = false;
518 }
519 if (nocontract_idx+1 < internal::array_size<left_nocontract_t>::value) {
520 m_i_strides[nocontract_idx+1] =
521 m_i_strides[nocontract_idx] * eval_left_dims[i];
522 } else {
523 m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i];
524 }
525 dim_idx++;
526 nocontract_idx++;
527 }
528 }
529
530 nocontract_idx = 0;
531 for (int i = 0; i < RDims; i++) {
532 bool contracting = false;
533 // find if we are contracting on index i of right tensor
534 for (int j = 0; j < ContractDims; j++) {
535 if (eval_op_indices[j].second == i) {
536 contracting = true;
537 break;
538 }
539 }
540 if (!contracting) {
541 m_dimensions[dim_idx] = eval_right_dims[i];
542 if (nocontract_idx+1 < internal::array_size<right_nocontract_t>::value) {
543 m_j_strides[nocontract_idx+1] =
544 m_j_strides[nocontract_idx] * eval_right_dims[i];
545 } else {
546 m_j_size = m_j_strides[nocontract_idx] * eval_right_dims[i];
547 }
548 m_right_nocontract_strides[nocontract_idx] = rhs_strides[i];
549 dim_idx++;
550 nocontract_idx++;
551 }
552 }
553
554 // Now compute the strides corresponding to the contracting dimensions. We
555 // assumed above that non-contracting axes are represented in the same order
556 // in the matrix as they are in the tensor. This is not the case for
557 // contracting axes. As the contracting axes must be of the same size in
558 // each tensor, we'll only look at the first tensor here.
559 m_rhs_inner_dim_contiguous = true;
560 m_rhs_inner_dim_reordered = false;
561 for (int i = 0; i < ContractDims; i++) {
562 Index left = eval_op_indices[i].first;
563 Index right = eval_op_indices[i].second;
564
565 Index size = eval_left_dims[left];
566 eigen_assert(size == eval_right_dims[right] &&
567 "Contraction axes must be same size");
568
569 if (i+1 < static_cast<int>(internal::array_size<contract_t>::value)) {
570 m_k_strides[i+1] = m_k_strides[i] * size;
571 } else {
572 m_k_size = m_k_strides[i] * size;
573 }
574 m_left_contracting_strides[i] = lhs_strides[left];
575 m_right_contracting_strides[i] = rhs_strides[right];
576
577 if (i > 0 && right < eval_op_indices[i-1].second) {
578 m_rhs_inner_dim_reordered = true;
579 }
580 if (right != i) {
581 m_rhs_inner_dim_contiguous = false;
582 }
583 }
584
585 // If the layout is RowMajor, we need to reverse the m_dimensions
586 if (static_cast<int>(Layout) == static_cast<int>(RowMajor)) {
587 for (int i = 0, j = NumDims - 1; i < j; i++, j--) {
588 numext::swap(m_dimensions[i], m_dimensions[j]);
589 }
590 }
591
592 // A set of parameters that will allow output kernel to get from output
593 // tensor dimensions (i, j) into the original tensor dimensions.
594 // TODO(ezhulenev): Add parameters required to infer output tensor index for
595 // more complex contractions than 2x2 on internal dimension.
596 m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor;
597 }
598
599 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
600
601 EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
602 m_leftImpl.evalSubExprsIfNeeded(NULL);
603 m_rightImpl.evalSubExprsIfNeeded(NULL);
604 if (data) {
605 evalTo(data);
606 return false;
607 } else {
608 m_result = static_cast<EvaluatorPointerType>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
609 evalTo(m_result);
610 return true;
611 }
612 }
613
614#ifdef EIGEN_USE_THREADS
615 template <typename EvalSubExprsCallback>
616 EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
617 EvaluatorPointerType dest, EvalSubExprsCallback done) {
618 m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
619 m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
620 if (dest) {
621 evalToAsync(dest, [done]() { done(false); });
622 } else {
623 m_result = static_cast<EvaluatorPointerType>(
624 m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
625 evalToAsync(m_result, [done]() { done(true); });
626 }
627 });
628 });
629 }
630#endif // EIGEN_USE_THREADS
631
632#ifndef TENSOR_CONTRACTION_DISPATCH
633#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
634 if (this->m_lhs_inner_dim_contiguous) { \
635 if (this->m_rhs_inner_dim_contiguous) { \
636 if (this->m_rhs_inner_dim_reordered) { \
637 METHOD<true, true, true, ALIGNMENT> ARGS; \
638 } else { \
639 METHOD<true, true, false, ALIGNMENT> ARGS; \
640 } \
641 } else { \
642 if (this->m_rhs_inner_dim_reordered) { \
643 METHOD<true, false, true, ALIGNMENT> ARGS; \
644 } else { \
645 METHOD<true, false, false, ALIGNMENT> ARGS; \
646 } \
647 } \
648 } else { \
649 if (this->m_rhs_inner_dim_contiguous) { \
650 if (this->m_rhs_inner_dim_reordered) { \
651 METHOD<false, true, true, ALIGNMENT> ARGS; \
652 } else { \
653 METHOD<false, true, false, ALIGNMENT> ARGS; \
654 } \
655 } else { \
656 if (this->m_rhs_inner_dim_reordered) { \
657 METHOD<false, false, true, ALIGNMENT> ARGS; \
658 } else { \
659 METHOD<false, false, false, ALIGNMENT> ARGS; \
660 } \
661 } \
662 }
663#endif
664
665#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH
666#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \
667 if (this->m_lhs_inner_dim_contiguous) { \
668 if (this->m_rhs_inner_dim_contiguous) { \
669 if (this->m_rhs_inner_dim_reordered) { \
670 (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \
671 } else { \
672 (new METHOD<DONE, true, true, false, ALIGNMENT> ARGS)->FN; \
673 } \
674 } else { \
675 if (this->m_rhs_inner_dim_reordered) { \
676 (new METHOD<DONE, true, false, true, ALIGNMENT> ARGS)->FN; \
677 } else { \
678 (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \
679 } \
680 } \
681 } else { \
682 if (this->m_rhs_inner_dim_contiguous) { \
683 if (this->m_rhs_inner_dim_reordered) { \
684 (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \
685 } else { \
686 (new METHOD<DONE, false, true, false, ALIGNMENT> ARGS)->FN; \
687 } \
688 } else { \
689 if (this->m_rhs_inner_dim_reordered) { \
690 (new METHOD<DONE, false, false, true, ALIGNMENT> ARGS)->FN; \
691 } else { \
692 (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \
693 } \
694 } \
695 }
696#endif
697
698 EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const {
699 static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer);
700 }
701
702#ifdef EIGEN_USE_THREADS
703 template <typename EvalToCallback>
704 void evalToAsync(Scalar* buffer, EvalToCallback done) const {
705 static_cast<const Derived*>(this)
706 ->template evalProductAsync<EvalToCallback, Unaligned>(buffer,
707 std::move(done));
708 }
709#endif // EIGEN_USE_THREADS
710
711 template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
712 bool rhs_inner_dim_reordered, int Alignment>
713 void evalProductSequential(Scalar* buffer) const {
714 if (this->m_j_size == 1) {
715 this->template evalGemv<lhs_inner_dim_contiguous,
716 rhs_inner_dim_contiguous, rhs_inner_dim_reordered,
717 Alignment>(buffer);
718 } else {
719 this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous,
720 rhs_inner_dim_reordered, Alignment>(buffer);
721 }
722 }
723
724 template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
725 #if !defined(EIGEN_HIPCC)
726 EIGEN_DEVICE_FUNC
727 #endif
728 void evalGemv(Scalar* buffer) const {
729 const Index rows = m_i_size;
730 const Index cols = m_k_size;
731
732 typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
733 typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
734 typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
735 typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
736 const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
737 const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
738 const int lhs_alignment = LeftEvaluator::IsAligned ? Aligned : Unaligned;
739 const int rhs_alignment = RightEvaluator::IsAligned ? Aligned : Unaligned;
740 typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
741 LeftEvaluator, left_nocontract_t,
742 contract_t, lhs_packet_size,
743 lhs_inner_dim_contiguous,
744 false, lhs_alignment> LhsMapper;
745
746 typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
747 RightEvaluator, right_nocontract_t,
748 contract_t, rhs_packet_size,
749 rhs_inner_dim_contiguous,
750 rhs_inner_dim_reordered, rhs_alignment> RhsMapper;
751
752 LhsMapper lhs(m_leftImpl, m_left_nocontract_strides, m_i_strides,
753 m_left_contracting_strides, m_k_strides);
754 RhsMapper rhs(m_rightImpl, m_right_nocontract_strides, m_j_strides,
755 m_right_contracting_strides, m_k_strides);
756
757 const Scalar alpha(1);
758 const Index resIncr(1);
759
760 // zero out the result buffer (which must be of size at least rows * sizeof(Scalar)
761 m_device.memset(buffer, 0, rows * sizeof(Scalar));
762
763 internal::general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,false,RhsScalar,RhsMapper,false>::run(
764 rows, cols, lhs, rhs,
765 buffer, resIncr, alpha);
766
767 typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
768 m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params,
769 static_cast<Index>(0), static_cast<Index>(0), rows,
770 static_cast<Index>(1));
771 }
772
773 template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
774 #if !defined(EIGEN_HIPCC)
775 EIGEN_DEVICE_FUNC
776 #endif
777 void evalGemm(Scalar* buffer) const {
778 // columns in left side, rows in right side
779 const Index k = this->m_k_size;
780 this->template evalGemmPartial<lhs_inner_dim_contiguous,
781 rhs_inner_dim_contiguous,
782 rhs_inner_dim_reordered,
783 Alignment, true>(buffer, 0, k, 1);
784 }
785
786 template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
787 bool rhs_inner_dim_reordered, int Alignment>
788 EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(
789 Scalar* buffer, Index k_start, Index k_end, int num_threads) const {
790 evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous,
791 rhs_inner_dim_reordered, Alignment,
792 /*use_output_kernel*/ false>(buffer, k_start, k_end,
793 num_threads);
794 }
795
796 template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel>
797 EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const {
798 eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size);
799 // columns in slice on left side, rows on right side
800 const Index k_slice = k_end - k_start;
801
802 // rows in left side
803 const Index m = this->m_i_size;
804
805 // columns in right side
806 const Index n = this->m_j_size;
807
808 // define data mappers for Lhs and Rhs
809 typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
810 typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
811
812 typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
813 typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
814
815 const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
816 const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
817
818 typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
819 LeftEvaluator, left_nocontract_t,
820 contract_t, lhs_packet_size,
821 lhs_inner_dim_contiguous,
822 false, Unaligned> LhsMapper;
823
824 typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
825 RightEvaluator, right_nocontract_t,
826 contract_t, rhs_packet_size,
827 rhs_inner_dim_contiguous,
828 rhs_inner_dim_reordered, Unaligned> RhsMapper;
829
830 typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
831
832 typedef internal::TensorContractionKernel<
833 Scalar, LhsScalar, RhsScalar, Index, OutputMapper, LhsMapper, RhsMapper>
834 TensorContractionKernel;
835
836 // initialize data mappers
837 LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
838 this->m_left_contracting_strides, this->m_k_strides);
839
840 RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
841 this->m_right_contracting_strides, this->m_k_strides);
842
843 OutputMapper output(buffer, m);
844
845 // Sizes of the blocks to load in cache. See the Goto paper for details.
846 internal::TensorContractionBlocking<Scalar, LhsScalar, RhsScalar,
847 Index, internal::ShardByCol>
848 blocking(k_slice, m, n, num_threads);
849 const Index kc = blocking.kc();
850 const Index mc = numext::mini(m, blocking.mc());
851 const Index nc = numext::mini(n, blocking.nc());
852
853 typedef typename TensorContractionKernel::LhsBlock LhsBlock;
854 typedef typename TensorContractionKernel::RhsBlock RhsBlock;
855
856 LhsBlock blockA;
857 RhsBlock blockB;
858
859 TensorContractionKernel kernel(m, k_slice, n, mc, kc, nc);
860
861 typedef typename TensorContractionKernel::BlockMemHandle BlockMemHandle;
862 const BlockMemHandle packed_mem =
863 kernel.allocate(this->m_device, &blockA, &blockB);
864
865 // If a contraction kernel does not support beta, explicitly initialize
866 // output buffer with zeroes.
867 if (!TensorContractionKernel::HasBeta) {
868 this->m_device.memset(buffer, 0, m * n * sizeof(Scalar));
869 }
870
871 for(Index i2=0; i2<m; i2+=mc)
872 {
873 const Index actual_mc = numext::mini(i2+mc,m)-i2;
874 for (Index k2 = k_start; k2 < k_end; k2 += kc) {
875 // make sure we don't overshoot right edge of left matrix, then pack vertical panel
876 const Index actual_kc = numext::mini(k2 + kc, k_end) - k2;
877 kernel.packLhs(&blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
878
879 // If kernel supports beta, there is no need to initialize output
880 // buffer with zeroes.
881 const Scalar alpha = Scalar(1);
882 const Scalar beta = (TensorContractionKernel::HasBeta && k2 == k_start)
883 ? Scalar(0)
884 : Scalar(1);
885
886 // series of horizontal blocks
887 for (Index j2 = 0; j2 < n; j2 += nc) {
888 // make sure we don't overshoot right edge of right matrix, then pack block
889 const Index actual_nc = numext::mini(j2 + nc, n) - j2;
890 kernel.packRhs(&blockB, rhs.getSubMapper(k2, j2), actual_kc,
891 actual_nc);
892
893 // call gebp (matrix kernel)
894 // The parameters here are copied from Eigen's GEMM implementation
895 const OutputMapper output_mapper = output.getSubMapper(i2, j2);
896 kernel.invoke(output_mapper, blockA, blockB, actual_mc, actual_kc,
897 actual_nc, alpha, beta);
898
899 // We are done with this [i2, j2] output block.
900 if (use_output_kernel && k2 + kc >= k_end) {
901 m_output_kernel(output_mapper, m_tensor_contraction_params, i2, j2,
902 actual_mc, actual_nc);
903 }
904 }
905 }
906 }
907
908 kernel.deallocate(this->m_device, packed_mem);
909 }
910
911 EIGEN_STRONG_INLINE void cleanup() {
912 m_leftImpl.cleanup();
913 m_rightImpl.cleanup();
914
915 if (m_result != NULL) {
916 m_device.deallocate(m_result);
917 m_result = NULL;
918 }
919 }
920
921 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
922 return m_result[index];
923 }
924
925 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const {
926 return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
927 }
928
929 template<int LoadMode>
930 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
931 return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
932 }
933
934 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return m_result; }
935
936protected:
937 Dimensions m_dimensions;
938
939 contract_t m_k_strides;
940 contract_t m_left_contracting_strides;
941 contract_t m_right_contracting_strides;
942
943 bool m_lhs_inner_dim_contiguous;
944 bool m_rhs_inner_dim_contiguous;
945 bool m_rhs_inner_dim_reordered;
946
947 left_nocontract_t m_i_strides;
948 right_nocontract_t m_j_strides;
949 left_nocontract_t m_left_nocontract_strides;
950 right_nocontract_t m_right_nocontract_strides;
951
952 Index m_i_size;
953 Index m_j_size;
954 Index m_k_size;
955
956 TensorContractionParams m_tensor_contraction_params;
957
958 TensorEvaluator<EvalLeftArgType, Device> m_leftImpl;
959 TensorEvaluator<EvalRightArgType, Device> m_rightImpl;
960 const Device EIGEN_DEVICE_REF m_device;
961 OutputKernelType m_output_kernel;
962 EvaluatorPointerType m_result;
963};
964
965
966// evaluator for default device
967template<typename Indices, typename LeftArgType, typename RightArgType, typename OutputKernelType, typename Device>
968struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> :
969 public TensorContractionEvaluatorBase<
970 TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> > {
971 typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> Self;
972 typedef TensorContractionEvaluatorBase<Self> Base;
973
974 typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
975 typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
976 typedef typename XprType::Index Index;
977 typedef typename XprType::CoeffReturnType CoeffReturnType;
978 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
979
980 enum {
981 Layout = TensorEvaluator<LeftArgType, Device>::Layout
982 };
983
984 // Most of the code is assuming that both input tensors are ColMajor. If the
985 // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
986 // If we want to compute A * B = C, where A is LHS and B is RHS, the code
987 // will pretend B is LHS and A is RHS.
988 typedef typename internal::conditional<
989 static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
990 typedef typename internal::conditional<
991 static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
992
993 static const int LDims =
994 internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
995 static const int RDims =
996 internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
997 static const int ContractDims = internal::array_size<Indices>::value;
998
999 typedef array<Index, ContractDims> contract_t;
1000 typedef array<Index, LDims - ContractDims> left_nocontract_t;
1001 typedef array<Index, RDims - ContractDims> right_nocontract_t;
1002
1003 static const int NumDims = LDims + RDims - 2 * ContractDims;
1004
1005 // Could we use NumDimensions here?
1006 typedef DSizes<Index, NumDims> Dimensions;
1007
1008 TensorEvaluator(const XprType& op, const Device& device) :
1009 Base(op, device) { }
1010
1011 template <int Alignment>
1012 void evalProduct(Scalar* buffer) const {
1013 TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, Alignment, (buffer));
1014 }
1015};
1016
1017} // end namespace Eigen
1018
1019#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
The tensor base class.
Definition TensorForwardDeclarations.h:56
Definition TensorContraction.h:322
const internal::remove_all< typenameLhsXprType::Nested >::type & lhsExpression() const
Definition TensorContraction.h:343
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The tensor evaluator class.
Definition TensorEvaluator.h:27