Eigen-unsupported  5.0.1-dev+284dcc12
 
Loading...
Searching...
No Matches
TensorShuffling.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
11#define EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
12
13// IWYU pragma: private
14#include "./InternalHeaderCheck.h"
15
16namespace Eigen {
17
18namespace internal {
19template <typename Shuffle, typename XprType>
20struct traits<TensorShufflingOp<Shuffle, XprType> > : public traits<XprType> {
21 typedef typename XprType::Scalar Scalar;
22 typedef traits<XprType> XprTraits;
23 typedef typename XprTraits::StorageKind StorageKind;
24 typedef typename XprTraits::Index Index;
25 typedef typename XprType::Nested Nested;
26 typedef std::remove_reference_t<Nested> Nested_;
27 static constexpr int NumDimensions = XprTraits::NumDimensions;
28 static constexpr int Layout = XprTraits::Layout;
29 typedef typename XprTraits::PointerType PointerType;
30};
31
32template <typename Shuffle, typename XprType>
33struct eval<TensorShufflingOp<Shuffle, XprType>, Eigen::Dense> {
34 typedef const TensorShufflingOp<Shuffle, XprType>& type;
35};
36
37template <typename Shuffle, typename XprType>
38struct nested<TensorShufflingOp<Shuffle, XprType>, 1, typename eval<TensorShufflingOp<Shuffle, XprType> >::type> {
39 typedef TensorShufflingOp<Shuffle, XprType> type;
40};
41
42} // end namespace internal
43
49template <typename Shuffle, typename XprType>
50class TensorShufflingOp : public TensorBase<TensorShufflingOp<Shuffle, XprType> > {
51 public:
53 typedef typename Eigen::internal::traits<TensorShufflingOp>::Scalar Scalar;
54 typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
55 typedef typename XprType::CoeffReturnType CoeffReturnType;
56 typedef typename Eigen::internal::nested<TensorShufflingOp>::type Nested;
57 typedef typename Eigen::internal::traits<TensorShufflingOp>::StorageKind StorageKind;
58 typedef typename Eigen::internal::traits<TensorShufflingOp>::Index Index;
59
60 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp(const XprType& expr, const Shuffle& shfl)
61 : m_xpr(expr), m_shuffle(shfl) {}
62
63 EIGEN_DEVICE_FUNC const Shuffle& shufflePermutation() const { return m_shuffle; }
64
65 EIGEN_DEVICE_FUNC const internal::remove_all_t<typename XprType::Nested>& expression() const { return m_xpr; }
66
67 EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorShufflingOp)
68
69 protected:
70 typename XprType::Nested m_xpr;
71 const Shuffle m_shuffle;
72};
73
74// Eval as rvalue
75template <typename Shuffle, typename ArgType, typename Device>
76struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> {
79 typedef typename XprType::Index Index;
80 static constexpr int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
81 typedef DSizes<Index, NumDims> Dimensions;
82 typedef typename XprType::Scalar Scalar;
83 typedef typename XprType::CoeffReturnType CoeffReturnType;
84 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
85 static constexpr int PacketSize = PacketType<CoeffReturnType, Device>::size;
86 typedef StorageMemory<CoeffReturnType, Device> Storage;
87 typedef typename Storage::Type EvaluatorPointerType;
88
89 static constexpr int Layout = TensorEvaluator<ArgType, Device>::Layout;
90 enum {
91 IsAligned = false,
92 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
94 PreferBlockAccess = true,
95 CoordAccess = false, // to be implemented
96 RawAccess = false
97 };
98
99 typedef std::remove_const_t<Scalar> ScalarNoConst;
100
101 //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
102 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
103 typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
104
105 typedef typename internal::TensorMaterializedBlock<ScalarNoConst, NumDims, Layout, Index> TensorBlock;
106 //===--------------------------------------------------------------------===//
107
108 EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
109 : m_device(device), m_impl(op.expression(), device) {
110 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
111 const Shuffle& shuffle = op.shufflePermutation();
112 m_is_identity = true;
113 for (int i = 0; i < NumDims; ++i) {
114 m_shuffle[i] = static_cast<int>(shuffle[i]);
115 m_dimensions[i] = input_dims[shuffle[i]];
116 m_inverseShuffle[shuffle[i]] = i;
117 if (m_is_identity && shuffle[i] != i) {
118 m_is_identity = false;
119 }
120 }
121
122 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
123 m_unshuffledInputStrides[0] = 1;
124 m_outputStrides[0] = 1;
125
126 for (int i = 1; i < NumDims; ++i) {
127 m_unshuffledInputStrides[i] = m_unshuffledInputStrides[i - 1] * input_dims[i - 1];
128 m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
129 m_fastOutputStrides[i] =
130 internal::TensorIntDivisor<Index>(m_outputStrides[i] > 0 ? m_outputStrides[i] : Index(1));
131 }
132 } else {
133 m_unshuffledInputStrides[NumDims - 1] = 1;
134 m_outputStrides[NumDims - 1] = 1;
135 for (int i = NumDims - 2; i >= 0; --i) {
136 m_unshuffledInputStrides[i] = m_unshuffledInputStrides[i + 1] * input_dims[i + 1];
137 m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
138 m_fastOutputStrides[i] =
139 internal::TensorIntDivisor<Index>(m_outputStrides[i] > 0 ? m_outputStrides[i] : Index(1));
140 }
141 }
142
143 for (int i = 0; i < NumDims; ++i) {
144 m_inputStrides[i] = m_unshuffledInputStrides[shuffle[i]];
145 }
146 }
147
148 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
149
150 EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
151 m_impl.evalSubExprsIfNeeded(NULL);
152 return true;
153 }
154
155#ifdef EIGEN_USE_THREADS
156 template <typename EvalSubExprsCallback>
157 EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType, EvalSubExprsCallback done) {
158 m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); });
159 }
160#endif // EIGEN_USE_THREADS
161
162 EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); }
163
164 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
165 if (m_is_identity) {
166 return m_impl.coeff(index);
167 } else {
168 return m_impl.coeff(srcCoeff(index));
169 }
170 }
171
172 template <int LoadMode, typename Self, bool ImplPacketAccess>
173 struct PacketLoader {
174 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static PacketReturnType Run(const Self& self, Index index) {
175 EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
176 EIGEN_UNROLL_LOOP
177 for (int i = 0; i < PacketSize; ++i) {
178 values[i] = self.coeff(index + i);
179 }
180 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
181 return rslt;
182 }
183 };
184
185 template <int LoadMode, typename Self>
186 struct PacketLoader<LoadMode, Self, true> {
187 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static PacketReturnType Run(const Self& self, Index index) {
188 if (self.m_is_identity) {
189 return self.m_impl.template packet<LoadMode>(index);
190 } else {
191 EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
192 EIGEN_UNROLL_LOOP
193 for (int i = 0; i < PacketSize; ++i) {
194 values[i] = self.coeff(index + i);
195 }
196 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
197 return rslt;
198 }
199 }
200 };
201
202 template <int LoadMode>
203 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
204 eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
205 return PacketLoader<LoadMode, Self, TensorEvaluator<ArgType, Device>::PacketAccess>::Run(*this, index);
206 }
207
208 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements() const {
209 static const int inner_dim = Layout == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
210
211 const size_t target_size = m_device.firstLevelCacheSize();
212 const bool inner_dim_shuffled = m_shuffle[inner_dim] != inner_dim;
213
214 // Shuffled inner dimensions leads to a random memory access, which is not
215 // captured by default cost model bytes loaded/stored. We add this cost
216 // explicitly. The number of cycles picked based on the benchmarks.
217 // TODO(ezhulenev): This number was picked based on a very questionable
218 // benchmarks, add benchmarks that are representative of real workloads.
219 using BlockRequirements = internal::TensorBlockResourceRequirements;
220 if (inner_dim_shuffled) {
221 return BlockRequirements::uniform<Scalar>(target_size).addCostPerCoeff({0, 0, NumDims * 28});
222 } else {
223 return BlockRequirements::skewed<Scalar>(target_size);
224 }
225 }
226
227 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
228 bool root_of_expr_ast = false) const {
229 eigen_assert(m_impl.data() != NULL);
230
231 typedef internal::TensorBlockIO<ScalarNoConst, Index, NumDims, Layout> TensorBlockIO;
232 typedef typename TensorBlockIO::Dst TensorBlockIODst;
233 typedef typename TensorBlockIO::Src TensorBlockIOSrc;
234
235 const typename TensorBlock::Storage block_storage =
236 TensorBlock::prepareStorage(desc, scratch, /*allow_strided_storage=*/root_of_expr_ast);
237
238 typename TensorBlockIO::Dimensions input_strides(m_unshuffledInputStrides);
239 TensorBlockIOSrc src(input_strides, m_impl.data(), srcCoeff(desc.offset()));
240
241 TensorBlockIODst dst(block_storage.dimensions(), block_storage.strides(), block_storage.data());
242
243 typename TensorBlockIO::DimensionsMap dst_to_src_dim_map(m_shuffle);
244 TensorBlockIO::Copy(dst, src, dst_to_src_dim_map);
245
246 return block_storage.AsTensorMaterializedBlock();
247 }
248
249 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
250 const double compute_cost = m_is_identity
251 ? TensorOpCost::AddCost<Index>()
252 : NumDims * (2 * TensorOpCost::AddCost<Index>() +
253 2 * TensorOpCost::MulCost<Index>() + TensorOpCost::DivCost<Index>());
254 return m_impl.costPerCoeff(vectorized) +
255 TensorOpCost(0, 0, compute_cost, m_is_identity /* vectorized */, PacketSize);
256 }
257
258 EIGEN_DEVICE_FUNC typename Storage::Type data() const { return NULL; }
259
260 protected:
261 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index
262 GetBlockOutputIndex(Index input_index, const DSizes<Index, NumDims>& input_block_strides,
263 const DSizes<Index, NumDims>& output_block_strides,
264 const DSizes<internal::TensorIntDivisor<Index>, NumDims>& fast_input_block_strides) const {
265 Index output_index = 0;
266 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
267 for (int i = NumDims - 1; i > 0; --i) {
268 const Index idx = input_index / fast_input_block_strides[i];
269 output_index += idx * output_block_strides[m_inverseShuffle[i]];
270 input_index -= idx * input_block_strides[i];
271 }
272 return output_index + input_index * output_block_strides[m_inverseShuffle[0]];
273 } else {
274 for (int i = 0; i < NumDims - 1; ++i) {
275 const Index idx = input_index / fast_input_block_strides[i];
276 output_index += idx * output_block_strides[m_inverseShuffle[i]];
277 input_index -= idx * input_block_strides[i];
278 }
279 return output_index + input_index * output_block_strides[m_inverseShuffle[NumDims - 1]];
280 }
281 }
282
283 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const {
284 Index inputIndex = 0;
285 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
286 for (int i = NumDims - 1; i > 0; --i) {
287 const Index idx = index / m_fastOutputStrides[i];
288 inputIndex += idx * m_inputStrides[i];
289 index -= idx * m_outputStrides[i];
290 }
291 return inputIndex + index * m_inputStrides[0];
292 } else {
293 for (int i = 0; i < NumDims - 1; ++i) {
294 const Index idx = index / m_fastOutputStrides[i];
295 inputIndex += idx * m_inputStrides[i];
296 index -= idx * m_outputStrides[i];
297 }
298 return inputIndex + index * m_inputStrides[NumDims - 1];
299 }
300 }
301
302 Dimensions m_dimensions;
303 bool m_is_identity;
304 array<int, NumDims> m_shuffle;
305 array<Index, NumDims> m_inverseShuffle; // TODO(ezhulenev): Make it int type.
306 array<Index, NumDims> m_outputStrides;
307 array<internal::TensorIntDivisor<Index>, NumDims> m_fastOutputStrides;
308 array<Index, NumDims> m_inputStrides;
309 array<Index, NumDims> m_unshuffledInputStrides;
310
311 const Device EIGEN_DEVICE_REF m_device;
312 TensorEvaluator<ArgType, Device> m_impl;
313};
314
315// Eval as lvalue
316template <typename Shuffle, typename ArgType, typename Device>
317struct TensorEvaluator<TensorShufflingOp<Shuffle, ArgType>, Device>
318 : public TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> {
319 typedef TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> Base;
320
321 typedef TensorShufflingOp<Shuffle, ArgType> XprType;
322 typedef typename XprType::Index Index;
323 static constexpr int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
324 typedef DSizes<Index, NumDims> Dimensions;
325 typedef typename XprType::Scalar Scalar;
326 typedef typename XprType::CoeffReturnType CoeffReturnType;
327 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
328 static constexpr int PacketSize = PacketType<CoeffReturnType, Device>::size;
329 static constexpr int Layout = TensorEvaluator<ArgType, Device>::Layout;
330
331 enum {
332 IsAligned = false,
333 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
334 BlockAccess = TensorEvaluator<ArgType, Device>::RawAccess,
335 PreferBlockAccess = true,
336 RawAccess = false
337 };
338
339 typedef std::remove_const_t<Scalar> ScalarNoConst;
340
341 //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
342 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
343 //===--------------------------------------------------------------------===//
344
345 EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : Base(op, device) {}
346
347 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) const {
348 return this->m_impl.coeffRef(this->srcCoeff(index));
349 }
350
351 template <int StoreMode>
352 EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType& x) const {
353 EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
354 internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
355 EIGEN_UNROLL_LOOP
356 for (int i = 0; i < PacketSize; ++i) {
357 this->coeffRef(index + i) = values[i];
358 }
359 }
360
361 template <typename TensorBlock>
362 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock(const TensorBlockDesc& desc, const TensorBlock& block) {
363 eigen_assert(this->m_impl.data() != NULL);
364
365 typedef internal::TensorBlockIO<ScalarNoConst, Index, NumDims, Layout> TensorBlockIO;
366 typedef typename TensorBlockIO::Dst TensorBlockIODst;
367 typedef typename TensorBlockIO::Src TensorBlockIOSrc;
368
369 const Scalar* block_buffer = block.data();
370
371 // TODO(ezhulenev): TensorBlockIO should be able to read from any Eigen
372 // expression with coefficient and packet access as `src`.
373 void* mem = NULL;
374 if (block_buffer == NULL) {
375 mem = this->m_device.allocate(desc.size() * sizeof(Scalar));
376 ScalarNoConst* buf = static_cast<ScalarNoConst*>(mem);
377
378 typedef internal::TensorBlockAssignment<ScalarNoConst, NumDims, typename TensorBlock::XprType, Index>
379 TensorBlockAssignment;
380
381 TensorBlockAssignment::Run(
382 TensorBlockAssignment::target(desc.dimensions(), internal::strides<Layout>(desc.dimensions()), buf),
383 block.expr());
384
385 block_buffer = buf;
386 }
387
388 // Read from block.
389 TensorBlockIOSrc src(internal::strides<Layout>(desc.dimensions()), block_buffer);
390
391 // Write to the output buffer.
392 typename TensorBlockIO::Dimensions output_strides(this->m_unshuffledInputStrides);
393 typename TensorBlockIO::Dimensions output_dimensions;
394 for (int i = 0; i < NumDims; ++i) {
395 output_dimensions[this->m_shuffle[i]] = desc.dimension(i);
396 }
397 TensorBlockIODst dst(output_dimensions, output_strides, this->m_impl.data(), this->srcCoeff(desc.offset()));
398
399 // Reorder dimensions according to the shuffle.
400 typename TensorBlockIO::DimensionsMap dst_to_src_dim_map;
401 for (int i = 0; i < NumDims; ++i) {
402 dst_to_src_dim_map[i] = static_cast<int>(this->m_inverseShuffle[i]);
403 }
404 TensorBlockIO::Copy(dst, src, dst_to_src_dim_map);
405
406 // Deallocate temporary buffer used for the block materialization.
407 if (mem != NULL) this->m_device.deallocate(mem);
408 }
409};
410
411} // end namespace Eigen
412
413#endif // EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
The tensor base class.
Definition TensorForwardDeclarations.h:68
Tensor shuffling class.
Definition TensorShuffling.h:50
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The tensor evaluator class.
Definition TensorEvaluator.h:30