Eigen-unsupported  5.0.1-dev+284dcc12
 
Loading...
Searching...
No Matches
TensorContraction.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
11#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
12
13// IWYU pragma: private
14#include "./InternalHeaderCheck.h"
15
16namespace Eigen {
17
18namespace internal {
19
20template <typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
21struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>> {
22 // Type promotion to handle the case where the types of the lhs and the rhs are different.
23 typedef typename gebp_traits<std::remove_const_t<typename LhsXprType::Scalar>,
24 std::remove_const_t<typename RhsXprType::Scalar>>::ResScalar Scalar;
25
26 typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
27 typename traits<RhsXprType>::StorageKind>::ret StorageKind;
28 typedef
29 typename promote_index_type<typename traits<LhsXprType>::Index, typename traits<RhsXprType>::Index>::type Index;
30 typedef typename LhsXprType::Nested LhsNested;
31 typedef typename RhsXprType::Nested RhsNested;
32 typedef std::remove_reference_t<LhsNested> LhsNested_;
33 typedef std::remove_reference_t<RhsNested> RhsNested_;
34
35 // From NumDims below.
36 static constexpr int NumDimensions =
37 traits<LhsXprType>::NumDimensions + traits<RhsXprType>::NumDimensions - 2 * array_size<Dimensions>::value;
38 static constexpr int Layout = traits<LhsXprType>::Layout;
39 typedef std::conditional_t<Pointer_type_promotion<typename LhsXprType::Scalar, Scalar>::val,
40 typename traits<LhsXprType>::PointerType, typename traits<RhsXprType>::PointerType>
41 PointerType;
42
43 enum { Flags = 0 };
44};
45
46template <typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
47struct eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, Eigen::Dense> {
48 typedef const TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>& type;
49};
50
51template <typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
52struct nested<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, 1,
53 typename eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>>::type> {
54 typedef TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> type;
55};
56
57template <typename Indices_, typename LeftArgType_, typename RightArgType_, typename OutputKernelType_,
58 typename Device_>
59struct traits<
60 TensorEvaluator<const TensorContractionOp<Indices_, LeftArgType_, RightArgType_, OutputKernelType_>, Device_>> {
61 typedef Indices_ Indices;
62 typedef LeftArgType_ LeftArgType;
63 typedef RightArgType_ RightArgType;
64 typedef OutputKernelType_ OutputKernelType;
65 typedef Device_ Device;
66
67 // From NumDims below.
68 static constexpr int NumDimensions =
69 traits<LeftArgType_>::NumDimensions + traits<RightArgType_>::NumDimensions - 2 * array_size<Indices_>::value;
70};
71
72// Helper class to allocate and deallocate temporary memory for packed buffers.
73template <typename LhsScalar, typename RhsScalar>
74struct TensorContractionBlockMemAllocator {
75 typedef void* BlockMemHandle;
76
77 template <typename Device>
78 EIGEN_DEVICE_FUNC static BlockMemHandle allocate(Device& d, const Index bm, const Index bk, const Index bn,
79 LhsScalar** lhs_block, RhsScalar** rhs_block) {
80 eigen_assert(lhs_block);
81 eigen_assert(rhs_block);
82 BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
83 char* block_mem = static_cast<char*>(d.allocate(sz.lhs_size + sz.rhs_size));
84 *lhs_block = static_cast<LhsScalar*>(static_cast<void*>(block_mem));
85 *rhs_block = static_cast<RhsScalar*>(static_cast<void*>(block_mem + sz.lhs_size));
86 return block_mem;
87 }
88
89 template <typename Device>
90 EIGEN_DEVICE_FUNC static BlockMemHandle allocateSlices(Device& d, const Index bm, const Index bk, const Index bn,
91 const Index num_lhs, const Index num_rhs,
92 const Index num_slices, std::vector<LhsScalar*>* lhs_blocks,
93 std::vector<RhsScalar*>* rhs_blocks) {
94 eigen_assert(num_slices > 0);
95 eigen_assert(num_lhs >= 0 && num_rhs >= 0);
96 eigen_assert(num_lhs == 0 || lhs_blocks);
97 eigen_assert(num_rhs == 0 || rhs_blocks);
98 BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
99 void* block_mem = d.allocate((num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices);
100 eigen_assert(block_mem);
101 char* mem = static_cast<char*>(block_mem);
102
103 for (Index x = 0; x < num_slices; x++) {
104 if (num_lhs > 0) lhs_blocks[x].resize(num_lhs);
105 for (Index m = 0; m < num_lhs; m++) {
106 lhs_blocks[x][m] = static_cast<LhsScalar*>(static_cast<void*>(mem));
107 mem += sz.lhs_size;
108 }
109 if (num_rhs > 0) rhs_blocks[x].resize(num_rhs);
110 for (Index n = 0; n < num_rhs; n++) {
111 rhs_blocks[x][n] = static_cast<RhsScalar*>(static_cast<void*>(mem));
112 mem += sz.rhs_size;
113 }
114 }
115
116 return block_mem;
117 }
118
119 template <typename Device>
120 EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) {
121 d.deallocate(handle);
122 }
123
124 private:
125 struct BlockSizes {
126 Index lhs_size;
127 Index rhs_size;
128 };
129 EIGEN_DEVICE_FUNC static BlockSizes ComputeLhsRhsBlockSizes(const Index bm, const Index bk, const Index bn) {
130 Index align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1);
131 BlockSizes sz;
132 sz.lhs_size = numext::div_ceil<Index>(bm * bk * sizeof(LhsScalar), align) * align;
133 sz.rhs_size = numext::div_ceil<Index>(bn * bk * sizeof(RhsScalar), align) * align;
134 return sz;
135 }
136};
137
138// WARNING: In this code we assume that Lhs and Rhs tensor expressions are in
139// ColMajor storage order. This property is guaranteed by the
140// TensorContractionOp evaluator. TensorContractionKernel specifies how we pack
141// blocks of Lhs and Rhs tensor expressions, and how we invoke matrix
142// multiplication for these blocks. Default tensor contraction uses
143// gemm_pack_rhs, gemm_pack_lhs and gebp_kernel from Eigen Core (see
144// GeneralBlocPanelKernel.h for details).
145//
146// By specializing contraction kernels we can use other low level libraries to
147// perform matrix multiplication, and still rely on Eigen contraction evaluator.
148// This also includes full support in TensorContractionThreadPool, assuming that
149// underlying gemm do not use it's own threading.
150//
151// - ResScalar/LhsScalar/RhsScalar - scalar type for the result of
152// multiplication, lhs tensor and rhs tensor respectively.
153//
154// - StorageIndex - index type for the tensor expressions. In practice almost
155// always is Eigen::Index.
156//
157// - OutputMapper provides access to the memory of the output matrix. In
158// practice it's always column major blas_data_mapper (it must be of ResScalar
159// type).
160//
161// - LhsMapper/RhsMapper similarly to blas_data_mapper provide a two dimensional
162// view into the Lhs/Rhs tensor expressions. In practice it's
163// TensorContractionInputMapper, or some specialization of it based on the
164// type of tensor expression (e.g. TensorImagePatchOp has optimized input
165// mapper).
166template <typename ResScalar, typename LhsScalar, typename RhsScalar, typename StorageIndex, typename OutputMapper,
167 typename LhsMapper, typename RhsMapper>
168struct TensorContractionKernel {
169 // True if `invoke()` supports `beta` in `C <- alpha * A * B + beta * C`
170 // (otherwise beta should be always equal to 1).
171 enum { HasBeta = false };
172
173 EIGEN_DEVICE_FUNC TensorContractionKernel(StorageIndex m_, StorageIndex k_, StorageIndex n_, StorageIndex bm_,
174 StorageIndex bk_, StorageIndex bn_)
175 : m(m_), k(k_), n(n_), bm(bm_), bk(bk_), bn(bn_) {}
176
177 // Pack blocks of Lhs and Rhs into contiguous blocks in memory.
178 typedef LhsScalar* LhsBlock;
179 typedef RhsScalar* RhsBlock;
180
181 // Packed Lhs/Rhs block memory allocator.
182 typedef TensorContractionBlockMemAllocator<LhsScalar, RhsScalar> BlockMemAllocator;
183 typedef typename BlockMemAllocator::BlockMemHandle BlockMemHandle;
184
185 typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
186
187 typedef internal::gemm_pack_lhs<LhsScalar, StorageIndex, typename LhsMapper::SubMapper, Traits::mr,
188 Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor>
189 LhsPacker;
190
191 typedef internal::gemm_pack_rhs<RhsScalar, StorageIndex, typename RhsMapper::SubMapper, Traits::nr, ColMajor>
192 RhsPacker;
193
194 typedef internal::gebp_kernel<LhsScalar, RhsScalar, StorageIndex, OutputMapper, Traits::mr, Traits::nr,
195 /*ConjugateLhs*/ false, /*ConjugateRhs*/ false>
196 GebpKernel;
197
198 template <typename Device>
199 EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block, RhsBlock* rhs_block) {
200 return BlockMemAllocator::allocate(d, bm, bk, bn, lhs_block, rhs_block);
201 }
202
203 template <typename Device>
204 EIGEN_DEVICE_FUNC BlockMemHandle allocateSlices(Device& d, const StorageIndex num_lhs, const StorageIndex num_rhs,
205 const StorageIndex num_slices, std::vector<LhsBlock>* lhs_blocks,
206 std::vector<RhsBlock>* rhs_blocks) {
207 return BlockMemAllocator::allocateSlices(d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_blocks, rhs_blocks);
208 }
209
210 template <typename Device>
211 EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) {
212 BlockMemAllocator::deallocate(d, handle);
213 }
214
215 EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packLhs(LhsBlock* lhsBlock, const typename LhsMapper::SubMapper& data_mapper,
216 const StorageIndex depth, const StorageIndex rows) {
217 LhsPacker()(*lhsBlock, data_mapper, depth, rows, /*stride*/ 0,
218 /*offset*/ 0);
219 }
220
221 EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packRhs(RhsBlock* rhsBlock, const typename RhsMapper::SubMapper& data_mapper,
222 const StorageIndex depth, const StorageIndex cols) {
223 RhsPacker()(*rhsBlock, data_mapper, depth, cols);
224 }
225
226 EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void invoke(const OutputMapper& output_mapper, const LhsBlock& lhsBlock,
227 const RhsBlock& rhsBlock, const StorageIndex rows,
228 const StorageIndex depth, const StorageIndex cols,
229 const ResScalar alpha, const ResScalar beta) {
230 // Default GEBP kernel does not support beta.
231 eigen_assert(beta == ResScalar(1));
232 static const int kComputeStrideFromBlockDimensions = -1;
233 GebpKernel()(output_mapper, lhsBlock, rhsBlock, rows, depth, cols, alpha,
234 /*strideA*/ kComputeStrideFromBlockDimensions,
235 /*strideB*/ kComputeStrideFromBlockDimensions,
236 /*offsetA*/ 0, /*offsetB*/ 0);
237 }
238
239 private:
240 // These are dimensions of the original Tensors, and selected block sizes. The
241 // actual block sizes passed to all function above might be smaller because of
242 // the partial blocks at the end.
243 const StorageIndex m;
244 const StorageIndex k;
245 const StorageIndex n;
246 const StorageIndex bm;
247 const StorageIndex bk;
248 const StorageIndex bn;
249};
250
251} // end namespace internal
252
253// Tensor contraction params that should enable to get from output matrix
254// 2-dimensional coordinates to the output tensor dimensions.
255struct TensorContractionParams {
256 // TensorContraction evaluator assumes that both tensors are in ColMajor
257 // layout, if tensors are in RowMajor evaluator swap lhs with rhs.
258 bool swapped_arguments;
259};
260
261// Output kernel allows to fuse operations into the tensor contraction.
262//
263// Examples:
264// 1. Elementwise Relu transformation following Conv2D.
265// 2. AddBias to the Conv2D output channels dimension.
266//
267// The NoOpOutputKernel implements an output kernel that does absolutely nothing.
268struct NoOpOutputKernel {
284 template <typename Index, typename Scalar>
285 EIGEN_ALWAYS_INLINE void operator()(const internal::blas_data_mapper<Scalar, Index, ColMajor>& output_mapper,
286 const TensorContractionParams& params, Index i, Index j, Index num_rows,
287 Index num_cols) const {
288 EIGEN_UNUSED_VARIABLE(output_mapper);
289 EIGEN_UNUSED_VARIABLE(params);
290 EIGEN_UNUSED_VARIABLE(i);
291 EIGEN_UNUSED_VARIABLE(j);
292 EIGEN_UNUSED_VARIABLE(num_rows);
293 EIGEN_UNUSED_VARIABLE(num_cols);
294 }
295};
296
300template <typename Indices, typename LhsXprType, typename RhsXprType,
301 typename OutputKernelType = const NoOpOutputKernel>
302class TensorContractionOp
303 : public TensorBase<TensorContractionOp<Indices, LhsXprType, RhsXprType, OutputKernelType>, ReadOnlyAccessors> {
304 public:
305 typedef typename Eigen::internal::traits<TensorContractionOp>::Scalar Scalar;
306 typedef typename internal::gebp_traits<typename LhsXprType::CoeffReturnType,
307 typename RhsXprType::CoeffReturnType>::ResScalar CoeffReturnType;
308 typedef typename Eigen::internal::nested<TensorContractionOp>::type Nested;
309 typedef typename Eigen::internal::traits<TensorContractionOp>::StorageKind StorageKind;
310 typedef typename Eigen::internal::traits<TensorContractionOp>::Index Index;
311
312 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp(const LhsXprType& lhs, const RhsXprType& rhs,
313 const Indices& dims,
314 const OutputKernelType& output_kernel = OutputKernelType())
315 : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims), m_output_kernel(output_kernel) {}
316
317 EIGEN_DEVICE_FUNC const Indices& indices() const { return m_indices; }
318
320 EIGEN_DEVICE_FUNC const internal::remove_all_t<typename LhsXprType::Nested>& lhsExpression() const {
321 return m_lhs_xpr;
322 }
323
324 EIGEN_DEVICE_FUNC const internal::remove_all_t<typename RhsXprType::Nested>& rhsExpression() const {
325 return m_rhs_xpr;
326 }
327
328 EIGEN_DEVICE_FUNC const OutputKernelType& outputKernel() const { return m_output_kernel; }
329
330 protected:
331 typename LhsXprType::Nested m_lhs_xpr;
332 typename RhsXprType::Nested m_rhs_xpr;
333 const Indices m_indices;
334 const OutputKernelType m_output_kernel;
335};
336
337template <typename Derived>
338struct TensorContractionEvaluatorBase {
339 typedef typename internal::traits<Derived>::Indices Indices;
340 typedef typename internal::traits<Derived>::LeftArgType LeftArgType;
341 typedef typename internal::traits<Derived>::RightArgType RightArgType;
342 typedef typename internal::traits<Derived>::OutputKernelType OutputKernelType;
343 typedef typename internal::traits<Derived>::Device Device;
344
345 typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
346 typedef std::remove_const_t<typename XprType::Scalar> Scalar;
347 typedef typename XprType::Index Index;
348 typedef typename XprType::CoeffReturnType CoeffReturnType;
349 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
350 typedef StorageMemory<Scalar, Device> Storage;
351 typedef typename Storage::Type EvaluatorPointerType;
352
353 static constexpr int Layout = TensorEvaluator<LeftArgType, Device>::Layout;
354 enum {
355 IsAligned = true,
356 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
357 BlockAccess = false,
358 PreferBlockAccess = false,
359 CoordAccess = false, // to be implemented
360 RawAccess = true
361 };
362
363 //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
364 typedef internal::TensorBlockNotImplemented TensorBlock;
365 //===--------------------------------------------------------------------===//
366
367 // Most of the code is assuming that both input tensors are ColMajor. If the
368 // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
369 // If we want to compute A * B = C, where A is LHS and B is RHS, the code
370 // will pretend B is LHS and A is RHS.
371 typedef std::conditional_t<static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>
372 EvalLeftArgType;
373 typedef std::conditional_t<static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>
374 EvalRightArgType;
375
376 typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluatorType;
377 typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluatorType;
378
379 static constexpr int LDims =
380 internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
381 static constexpr int RDims =
382 internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
383 static constexpr int ContractDims = internal::array_size<Indices>::value;
384 static constexpr int NumDims = LDims + RDims - 2 * ContractDims;
385
386 typedef array<Index, ContractDims> contract_t;
387 typedef array<Index, LDims - ContractDims> left_nocontract_t;
388 typedef array<Index, RDims - ContractDims> right_nocontract_t;
389
390 typedef DSizes<Index, NumDims> Dimensions;
391
392 EIGEN_STRONG_INLINE TensorContractionEvaluatorBase(const XprType& op, const Device& device)
393 : m_leftImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(), op.lhsExpression(),
394 op.rhsExpression()),
395 device),
396 m_rightImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(), op.rhsExpression(),
397 op.lhsExpression()),
398 device),
399 m_device(device),
400 m_output_kernel(op.outputKernel()),
401 m_result(NULL) {
402 EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
403 static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
404 YOU_MADE_A_PROGRAMMING_MISTAKE);
405
406 DSizes<Index, LDims> eval_left_dims;
407 DSizes<Index, RDims> eval_right_dims;
408 array<IndexPair<Index>, ContractDims> eval_op_indices;
409 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
410 // For ColMajor, we keep using the existing dimensions
411 for (int i = 0; i < LDims; i++) {
412 eval_left_dims[i] = m_leftImpl.dimensions()[i];
413 }
414 for (int i = 0; i < RDims; i++) {
415 eval_right_dims[i] = m_rightImpl.dimensions()[i];
416 }
417 // We keep the pairs of contracting indices.
418 for (int i = 0; i < ContractDims; i++) {
419 eval_op_indices[i].first = op.indices()[i].first;
420 eval_op_indices[i].second = op.indices()[i].second;
421 }
422 } else {
423 // For RowMajor, we need to reverse the existing dimensions
424 for (int i = 0; i < LDims; i++) {
425 eval_left_dims[i] = m_leftImpl.dimensions()[LDims - i - 1];
426 }
427 for (int i = 0; i < RDims; i++) {
428 eval_right_dims[i] = m_rightImpl.dimensions()[RDims - i - 1];
429 }
430 // We need to flip all the pairs of contracting indices as well as
431 // reversing the dimensions.
432 for (int i = 0; i < ContractDims; i++) {
433 eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second;
434 eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first;
435 }
436 }
437
438 // Check for duplicate axes and make sure the first index in eval_op_indices
439 // is increasing. Using O(n^2) sorting is OK since ContractDims is small
440 for (int i = 0; i < ContractDims; i++) {
441 for (int j = i + 1; j < ContractDims; j++) {
442 eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first &&
443 eval_op_indices[j].second != eval_op_indices[i].second && "contraction axes should be unique");
444 if (eval_op_indices[j].first < eval_op_indices[i].first) {
445 numext::swap(eval_op_indices[j], eval_op_indices[i]);
446 }
447 }
448 }
449
450 array<Index, LDims> lhs_strides;
451 lhs_strides[0] = 1;
452 for (int i = 0; i < LDims - 1; ++i) {
453 lhs_strides[i + 1] = lhs_strides[i] * eval_left_dims[i];
454 }
455
456 array<Index, RDims> rhs_strides;
457 rhs_strides[0] = 1;
458 for (int i = 0; i < RDims - 1; ++i) {
459 rhs_strides[i + 1] = rhs_strides[i] * eval_right_dims[i];
460 }
461
462 if (m_i_strides.size() > 0) m_i_strides[0] = 1;
463 if (m_j_strides.size() > 0) m_j_strides[0] = 1;
464 if (m_k_strides.size() > 0) m_k_strides[0] = 1;
465
466 m_i_size = 1;
467 m_j_size = 1;
468 m_k_size = 1;
469
470 // To compute the dimension, we simply concatenate the non-contracting
471 // dimensions of the left and then the right tensor. Additionally, we also
472 // compute the strides corresponding to the left non-contracting
473 // dimensions and right non-contracting dimensions.
474 m_lhs_inner_dim_contiguous = true;
475 int dim_idx = 0;
476 Index nocontract_idx = 0;
477
478 for (int i = 0; i < LDims; i++) {
479 // find if we are contracting on index i of left tensor
480 bool contracting = false;
481 for (int j = 0; j < ContractDims; j++) {
482 if (eval_op_indices[j].first == i) {
483 contracting = true;
484 break;
485 }
486 }
487 if (!contracting) {
488 // add dimension size to output dimensions
489 m_dimensions[dim_idx] = eval_left_dims[i];
490 m_left_nocontract_strides[nocontract_idx] = lhs_strides[i];
491 if (dim_idx != i) {
492 m_lhs_inner_dim_contiguous = false;
493 }
494 if (nocontract_idx + 1 < internal::array_size<left_nocontract_t>::value) {
495 m_i_strides[nocontract_idx + 1] = m_i_strides[nocontract_idx] * eval_left_dims[i];
496 } else {
497 m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i];
498 }
499 dim_idx++;
500 nocontract_idx++;
501 }
502 }
503
504 nocontract_idx = 0;
505 for (int i = 0; i < RDims; i++) {
506 bool contracting = false;
507 // find if we are contracting on index i of right tensor
508 for (int j = 0; j < ContractDims; j++) {
509 if (eval_op_indices[j].second == i) {
510 contracting = true;
511 break;
512 }
513 }
514 if (!contracting) {
515 m_dimensions[dim_idx] = eval_right_dims[i];
516 if (nocontract_idx + 1 < internal::array_size<right_nocontract_t>::value) {
517 m_j_strides[nocontract_idx + 1] = m_j_strides[nocontract_idx] * eval_right_dims[i];
518 } else {
519 m_j_size = m_j_strides[nocontract_idx] * eval_right_dims[i];
520 }
521 m_right_nocontract_strides[nocontract_idx] = rhs_strides[i];
522 dim_idx++;
523 nocontract_idx++;
524 }
525 }
526
527 // Now compute the strides corresponding to the contracting dimensions. We
528 // assumed above that non-contracting axes are represented in the same order
529 // in the matrix as they are in the tensor. This is not the case for
530 // contracting axes. As the contracting axes must be of the same size in
531 // each tensor, we'll only look at the first tensor here.
532 m_rhs_inner_dim_contiguous = true;
533 m_rhs_inner_dim_reordered = false;
534 for (int i = 0; i < ContractDims; i++) {
535 Index left = eval_op_indices[i].first;
536 Index right = eval_op_indices[i].second;
537
538 Index size = eval_left_dims[left];
539 eigen_assert(size == eval_right_dims[right] && "Contraction axes must be same size");
540
541 if (i + 1 < static_cast<int>(internal::array_size<contract_t>::value)) {
542 m_k_strides[i + 1] = m_k_strides[i] * size;
543 } else {
544 m_k_size = m_k_strides[i] * size;
545 }
546 m_left_contracting_strides[i] = lhs_strides[left];
547 m_right_contracting_strides[i] = rhs_strides[right];
548
549 if (i > 0 && right < eval_op_indices[i - 1].second) {
550 m_rhs_inner_dim_reordered = true;
551 }
552 if (right != i) {
553 m_rhs_inner_dim_contiguous = false;
554 }
555 }
556
557 // If the layout is RowMajor, we need to reverse the m_dimensions
558 if (static_cast<int>(Layout) == static_cast<int>(RowMajor)) {
559 for (int i = 0, j = NumDims - 1; i < j; i++, j--) {
560 numext::swap(m_dimensions[i], m_dimensions[j]);
561 }
562 }
563
564 // A set of parameters that will allow output kernel to get from output
565 // tensor dimensions (i, j) into the original tensor dimensions.
566 // TODO(ezhulenev): Add parameters required to infer output tensor index for
567 // more complex contractions than 2x2 on internal dimension.
568 m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor;
569 }
570
571 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
572
573 EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
574 m_leftImpl.evalSubExprsIfNeeded(NULL);
575 m_rightImpl.evalSubExprsIfNeeded(NULL);
576 if (data) {
577 evalTo(data);
578 return false;
579 } else {
580 m_result = static_cast<EvaluatorPointerType>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
581 evalTo(m_result);
582 return true;
583 }
584 }
585
586#ifdef EIGEN_USE_THREADS
587 template <typename EvalSubExprsCallback>
588 EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) {
589 m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
590 m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
591 if (dest) {
592 evalToAsync(dest, [done]() { done(false); });
593 } else {
594 m_result = static_cast<EvaluatorPointerType>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
595 evalToAsync(m_result, [done]() { done(true); });
596 }
597 });
598 });
599 }
600#endif // EIGEN_USE_THREADS
601
602#ifndef TENSOR_CONTRACTION_DISPATCH
603#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
604 if (this->m_lhs_inner_dim_contiguous) { \
605 if (this->m_rhs_inner_dim_contiguous) { \
606 if (this->m_rhs_inner_dim_reordered) { \
607 METHOD<true, true, true, ALIGNMENT> ARGS; \
608 } else { \
609 METHOD<true, true, false, ALIGNMENT> ARGS; \
610 } \
611 } else { \
612 if (this->m_rhs_inner_dim_reordered) { \
613 METHOD<true, false, true, ALIGNMENT> ARGS; \
614 } else { \
615 METHOD<true, false, false, ALIGNMENT> ARGS; \
616 } \
617 } \
618 } else { \
619 if (this->m_rhs_inner_dim_contiguous) { \
620 if (this->m_rhs_inner_dim_reordered) { \
621 METHOD<false, true, true, ALIGNMENT> ARGS; \
622 } else { \
623 METHOD<false, true, false, ALIGNMENT> ARGS; \
624 } \
625 } else { \
626 if (this->m_rhs_inner_dim_reordered) { \
627 METHOD<false, false, true, ALIGNMENT> ARGS; \
628 } else { \
629 METHOD<false, false, false, ALIGNMENT> ARGS; \
630 } \
631 } \
632 }
633#endif
634
635#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH
636#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \
637 if (this->m_lhs_inner_dim_contiguous) { \
638 if (this->m_rhs_inner_dim_contiguous) { \
639 if (this->m_rhs_inner_dim_reordered) { \
640 (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \
641 } else { \
642 (new METHOD<DONE, true, true, false, ALIGNMENT> ARGS)->FN; \
643 } \
644 } else { \
645 if (this->m_rhs_inner_dim_reordered) { \
646 (new METHOD<DONE, true, false, true, ALIGNMENT> ARGS)->FN; \
647 } else { \
648 (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \
649 } \
650 } \
651 } else { \
652 if (this->m_rhs_inner_dim_contiguous) { \
653 if (this->m_rhs_inner_dim_reordered) { \
654 (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \
655 } else { \
656 (new METHOD<DONE, false, true, false, ALIGNMENT> ARGS)->FN; \
657 } \
658 } else { \
659 if (this->m_rhs_inner_dim_reordered) { \
660 (new METHOD<DONE, false, false, true, ALIGNMENT> ARGS)->FN; \
661 } else { \
662 (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \
663 } \
664 } \
665 }
666#endif
667
668 EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const {
669 static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer);
670 }
671
672#ifdef EIGEN_USE_THREADS
673 template <typename EvalToCallback>
674 void evalToAsync(Scalar* buffer, EvalToCallback done) const {
675 static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done));
676 }
677#endif // EIGEN_USE_THREADS
678
679 template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
680 void evalProductSequential(Scalar* buffer) const {
681 if (this->m_j_size == 1) {
682 this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(
683 buffer);
684 } else {
685 this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(
686 buffer);
687 }
688 }
689
690 template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
691#if !defined(EIGEN_HIPCC)
692 EIGEN_DEVICE_FUNC
693#endif
694 void
695 evalGemv(Scalar* buffer) const {
696 const Index rows = m_i_size;
697 const Index cols = m_k_size;
698
699 typedef std::remove_const_t<typename EvalLeftArgType::Scalar> LhsScalar;
700 typedef std::remove_const_t<typename EvalRightArgType::Scalar> RhsScalar;
701 typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
702 typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
703 const int lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
704 const int rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
705 const int lhs_alignment = LeftEvaluator::IsAligned ? Aligned : Unaligned;
706 const int rhs_alignment = RightEvaluator::IsAligned ? Aligned : Unaligned;
707 typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t,
708 contract_t, lhs_packet_size, lhs_inner_dim_contiguous, false,
709 lhs_alignment>
710 LhsMapper;
711
712 typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs, RightEvaluator, right_nocontract_t,
713 contract_t, rhs_packet_size, rhs_inner_dim_contiguous,
714 rhs_inner_dim_reordered, rhs_alignment>
715 RhsMapper;
716
717 LhsMapper lhs(m_leftImpl, m_left_nocontract_strides, m_i_strides, m_left_contracting_strides, m_k_strides);
718 RhsMapper rhs(m_rightImpl, m_right_nocontract_strides, m_j_strides, m_right_contracting_strides, m_k_strides);
719
720 const Scalar alpha(1);
721 const Index resIncr(1);
722
723 // zero out the result buffer (which must be of size at least rows * sizeof(Scalar)
724 m_device.fill(buffer, buffer + rows, Scalar(0));
725
726 internal::general_matrix_vector_product<Index, LhsScalar, LhsMapper, ColMajor, false, RhsScalar, RhsMapper,
727 false>::run(rows, cols, lhs, rhs, buffer, resIncr, alpha);
728
729 typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
730 m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0),
731 static_cast<Index>(0), rows, static_cast<Index>(1));
732 }
733
734 template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
735#if !defined(EIGEN_HIPCC)
736 EIGEN_DEVICE_FUNC
737#endif
738 void
739 evalGemm(Scalar* buffer) const {
740 // columns in left side, rows in right side
741 const Index k = this->m_k_size;
742 this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered,
743 Alignment, true>(buffer, 0, k, 1);
744 }
745
746 template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
747 EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end,
748 int num_threads) const {
749 evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment,
750 /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads);
751 }
752
753 template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment,
754 bool use_output_kernel>
755 EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const {
756 eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size);
757 // columns in slice on left side, rows on right side
758 const Index k_slice = k_end - k_start;
759
760 // rows in left side
761 const Index m = this->m_i_size;
762
763 // columns in right side
764 const Index n = this->m_j_size;
765
766 // define data mappers for Lhs and Rhs
767 typedef std::remove_const_t<typename EvalLeftArgType::Scalar> LhsScalar;
768 typedef std::remove_const_t<typename EvalRightArgType::Scalar> RhsScalar;
769
770 typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
771 typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
772
773 const int lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
774 const int rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
775
776 typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t,
777 contract_t, lhs_packet_size, lhs_inner_dim_contiguous, false,
778 Unaligned>
779 LhsMapper;
780
781 typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs, RightEvaluator, right_nocontract_t,
782 contract_t, rhs_packet_size, rhs_inner_dim_contiguous,
783 rhs_inner_dim_reordered, Unaligned>
784 RhsMapper;
785
786 typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
787
788 typedef internal::TensorContractionKernel<Scalar, LhsScalar, RhsScalar, Index, OutputMapper, LhsMapper, RhsMapper>
789 TensorContractionKernel;
790
791 // initialize data mappers
792 LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
793 this->m_left_contracting_strides, this->m_k_strides);
794
795 RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
796 this->m_right_contracting_strides, this->m_k_strides);
797
798 OutputMapper output(buffer, m);
799
800 // Sizes of the blocks to load in cache. See the Goto paper for details.
801 internal::TensorContractionBlocking<Scalar, LhsScalar, RhsScalar, Index, internal::ShardByCol> blocking(
802 k_slice, m, n, num_threads);
803 const Index kc = blocking.kc();
804 const Index mc = numext::mini(m, blocking.mc());
805 const Index nc = numext::mini(n, blocking.nc());
806
807 typedef typename TensorContractionKernel::LhsBlock LhsBlock;
808 typedef typename TensorContractionKernel::RhsBlock RhsBlock;
809
810 LhsBlock blockA;
811 RhsBlock blockB;
812
813 TensorContractionKernel kernel(m, k_slice, n, mc, kc, nc);
814
815 typedef typename TensorContractionKernel::BlockMemHandle BlockMemHandle;
816 const BlockMemHandle packed_mem = kernel.allocate(this->m_device, &blockA, &blockB);
817
818 // If a contraction kernel does not support beta, explicitly initialize
819 // output buffer with zeroes.
820 if (!TensorContractionKernel::HasBeta) {
821 this->m_device.fill(buffer, buffer + m * n, Scalar(0));
822 }
823
824 for (Index i2 = 0; i2 < m; i2 += mc) {
825 const Index actual_mc = numext::mini(i2 + mc, m) - i2;
826 for (Index k2 = k_start; k2 < k_end; k2 += kc) {
827 // make sure we don't overshoot right edge of left matrix, then pack vertical panel
828 const Index actual_kc = numext::mini(k2 + kc, k_end) - k2;
829 kernel.packLhs(&blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
830
831 // If kernel supports beta, there is no need to initialize output
832 // buffer with zeroes.
833 const Scalar alpha = Scalar(1);
834 const Scalar beta = (TensorContractionKernel::HasBeta && k2 == k_start) ? Scalar(0) : Scalar(1);
835
836 // series of horizontal blocks
837 for (Index j2 = 0; j2 < n; j2 += nc) {
838 // make sure we don't overshoot right edge of right matrix, then pack block
839 const Index actual_nc = numext::mini(j2 + nc, n) - j2;
840 kernel.packRhs(&blockB, rhs.getSubMapper(k2, j2), actual_kc, actual_nc);
841
842 // call gebp (matrix kernel)
843 // The parameters here are copied from Eigen's GEMM implementation
844 const OutputMapper output_mapper = output.getSubMapper(i2, j2);
845 kernel.invoke(output_mapper, blockA, blockB, actual_mc, actual_kc, actual_nc, alpha, beta);
846
847 // We are done with this [i2, j2] output block.
848 if (use_output_kernel && k2 + kc >= k_end) {
849 m_output_kernel(output_mapper, m_tensor_contraction_params, i2, j2, actual_mc, actual_nc);
850 }
851 }
852 }
853 }
854
855 kernel.deallocate(this->m_device, packed_mem);
856 }
857
858 EIGEN_STRONG_INLINE void cleanup() {
859 m_leftImpl.cleanup();
860 m_rightImpl.cleanup();
861
862 if (m_result != NULL) {
863 m_device.deallocate(m_result);
864 m_result = NULL;
865 }
866 }
867
868 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; }
869
870 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const {
871 return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
872 }
873
874 template <int LoadMode>
875 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
876 return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
877 }
878
879 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return m_result; }
880
881 protected:
882 Dimensions m_dimensions;
883
884 contract_t m_k_strides;
885 contract_t m_left_contracting_strides;
886 contract_t m_right_contracting_strides;
887
888 bool m_lhs_inner_dim_contiguous;
889 bool m_rhs_inner_dim_contiguous;
890 bool m_rhs_inner_dim_reordered;
891
892 left_nocontract_t m_i_strides;
893 right_nocontract_t m_j_strides;
894 left_nocontract_t m_left_nocontract_strides;
895 right_nocontract_t m_right_nocontract_strides;
896
897 Index m_i_size;
898 Index m_j_size;
899 Index m_k_size;
900
901 TensorContractionParams m_tensor_contraction_params;
902
903 TensorEvaluator<EvalLeftArgType, Device> m_leftImpl;
904 TensorEvaluator<EvalRightArgType, Device> m_rightImpl;
905 const Device EIGEN_DEVICE_REF m_device;
906 OutputKernelType m_output_kernel;
907 EvaluatorPointerType m_result;
908};
909
910// evaluator for default device
911template <typename Indices, typename LeftArgType, typename RightArgType, typename OutputKernelType, typename Device>
912struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device>
913 : public TensorContractionEvaluatorBase<
914 TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device>> {
915 typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> Self;
916 typedef TensorContractionEvaluatorBase<Self> Base;
917
918 typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
919 typedef std::remove_const_t<typename XprType::Scalar> Scalar;
920 typedef typename XprType::Index Index;
921 typedef typename XprType::CoeffReturnType CoeffReturnType;
922 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
923
924 static constexpr int Layout = TensorEvaluator<LeftArgType, Device>::Layout;
925
926 // Most of the code is assuming that both input tensors are ColMajor. If the
927 // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
928 // If we want to compute A * B = C, where A is LHS and B is RHS, the code
929 // will pretend B is LHS and A is RHS.
930 typedef std::conditional_t<Layout == static_cast<int>(ColMajor), LeftArgType, RightArgType> EvalLeftArgType;
931 typedef std::conditional_t<Layout == static_cast<int>(ColMajor), RightArgType, LeftArgType> EvalRightArgType;
932
933 static constexpr int LDims =
934 internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
935 static constexpr int RDims =
936 internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
937 static constexpr int ContractDims = internal::array_size<Indices>::value;
938
939 typedef array<Index, ContractDims> contract_t;
940 typedef array<Index, LDims - ContractDims> left_nocontract_t;
941 typedef array<Index, RDims - ContractDims> right_nocontract_t;
942
943 static constexpr int NumDims = LDims + RDims - 2 * ContractDims;
944
945 // Could we use NumDimensions here?
946 typedef DSizes<Index, NumDims> Dimensions;
947
948 TensorEvaluator(const XprType& op, const Device& device) : Base(op, device) {}
949
950 template <int Alignment>
951 void evalProduct(Scalar* buffer) const {
952 TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, Alignment, (buffer));
953 }
954};
955
956} // end namespace Eigen
957
958#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
The tensor base class.
Definition TensorForwardDeclarations.h:68
Definition TensorContraction.h:303
const internal::remove_all_t< typename LhsXprType::Nested > & lhsExpression() const
Definition TensorContraction.h:320
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The tensor evaluator class.
Definition TensorEvaluator.h:30