10#ifndef EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
11#define EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
14#include "./InternalHeaderCheck.h"
19template <
typename Shuffle,
typename XprType>
20struct traits<TensorShufflingOp<Shuffle, XprType> > :
public traits<XprType> {
21 typedef typename XprType::Scalar Scalar;
22 typedef traits<XprType> XprTraits;
23 typedef typename XprTraits::StorageKind StorageKind;
24 typedef typename XprTraits::Index
Index;
25 typedef typename XprType::Nested Nested;
26 typedef std::remove_reference_t<Nested> Nested_;
27 static constexpr int NumDimensions = XprTraits::NumDimensions;
28 static constexpr int Layout = XprTraits::Layout;
29 typedef typename XprTraits::PointerType PointerType;
32template <
typename Shuffle,
typename XprType>
33struct eval<TensorShufflingOp<Shuffle, XprType>, Eigen::Dense> {
34 typedef const TensorShufflingOp<Shuffle, XprType>& type;
37template <
typename Shuffle,
typename XprType>
38struct nested<TensorShufflingOp<Shuffle, XprType>, 1, typename eval<TensorShufflingOp<Shuffle, XprType> >::type> {
39 typedef TensorShufflingOp<Shuffle, XprType> type;
49template <
typename Shuffle,
typename XprType>
50class TensorShufflingOp :
public TensorBase<TensorShufflingOp<Shuffle, XprType> > {
53 typedef typename Eigen::internal::traits<TensorShufflingOp>::Scalar Scalar;
55 typedef typename XprType::CoeffReturnType CoeffReturnType;
56 typedef typename Eigen::internal::nested<TensorShufflingOp>::type Nested;
57 typedef typename Eigen::internal::traits<TensorShufflingOp>::StorageKind StorageKind;
58 typedef typename Eigen::internal::traits<TensorShufflingOp>::Index Index;
60 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp(
const XprType& expr,
const Shuffle& shfl)
61 : m_xpr(expr), m_shuffle(shfl) {}
63 EIGEN_DEVICE_FUNC
const Shuffle& shufflePermutation()
const {
return m_shuffle; }
65 EIGEN_DEVICE_FUNC
const internal::remove_all_t<typename XprType::Nested>& expression()
const {
return m_xpr; }
67 EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorShufflingOp)
70 typename XprType::Nested m_xpr;
71 const Shuffle m_shuffle;
75template <
typename Shuffle,
typename ArgType,
typename Device>
79 typedef typename XprType::Index
Index;
80 static constexpr int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
82 typedef typename XprType::Scalar
Scalar;
84 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
85 static constexpr int PacketSize = PacketType<CoeffReturnType, Device>::size;
86 typedef StorageMemory<CoeffReturnType, Device> Storage;
87 typedef typename Storage::Type EvaluatorPointerType;
89 static constexpr int Layout = TensorEvaluator<ArgType, Device>::Layout;
92 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
94 PreferBlockAccess =
true,
99 typedef std::remove_const_t<Scalar> ScalarNoConst;
102 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
103 typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
105 typedef typename internal::TensorMaterializedBlock<ScalarNoConst, NumDims, Layout, Index> TensorBlock;
108 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
109 : m_device(device), m_impl(op.expression(), device) {
110 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
111 const Shuffle& shuffle = op.shufflePermutation();
112 m_is_identity =
true;
113 for (
int i = 0; i < NumDims; ++i) {
114 m_shuffle[i] =
static_cast<int>(shuffle[i]);
115 m_dimensions[i] = input_dims[shuffle[i]];
116 m_inverseShuffle[shuffle[i]] = i;
117 if (m_is_identity && shuffle[i] != i) {
118 m_is_identity =
false;
122 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
123 m_unshuffledInputStrides[0] = 1;
124 m_outputStrides[0] = 1;
126 for (
int i = 1; i < NumDims; ++i) {
127 m_unshuffledInputStrides[i] = m_unshuffledInputStrides[i - 1] * input_dims[i - 1];
128 m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
129 m_fastOutputStrides[i] =
130 internal::TensorIntDivisor<Index>(m_outputStrides[i] > 0 ? m_outputStrides[i] : Index(1));
133 m_unshuffledInputStrides[NumDims - 1] = 1;
134 m_outputStrides[NumDims - 1] = 1;
135 for (
int i = NumDims - 2; i >= 0; --i) {
136 m_unshuffledInputStrides[i] = m_unshuffledInputStrides[i + 1] * input_dims[i + 1];
137 m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
138 m_fastOutputStrides[i] =
139 internal::TensorIntDivisor<Index>(m_outputStrides[i] > 0 ? m_outputStrides[i] : Index(1));
143 for (
int i = 0; i < NumDims; ++i) {
144 m_inputStrides[i] = m_unshuffledInputStrides[shuffle[i]];
148 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
150 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType ) {
151 m_impl.evalSubExprsIfNeeded(NULL);
155#ifdef EIGEN_USE_THREADS
156 template <
typename EvalSubExprsCallback>
157 EIGEN_STRONG_INLINE
void evalSubExprsIfNeededAsync(EvaluatorPointerType, EvalSubExprsCallback done) {
158 m_impl.evalSubExprsIfNeededAsync(
nullptr, [done](
bool) { done(
true); });
162 EIGEN_STRONG_INLINE
void cleanup() { m_impl.cleanup(); }
164 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const {
166 return m_impl.coeff(index);
168 return m_impl.coeff(srcCoeff(index));
172 template <
int LoadMode,
typename Self,
bool ImplPacketAccess>
173 struct PacketLoader {
174 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
static PacketReturnType Run(
const Self& self, Index index) {
175 EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
177 for (
int i = 0; i < PacketSize; ++i) {
178 values[i] = self.coeff(index + i);
180 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
185 template <
int LoadMode,
typename Self>
186 struct PacketLoader<LoadMode, Self, true> {
187 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
static PacketReturnType Run(
const Self& self, Index index) {
188 if (self.m_is_identity) {
189 return self.m_impl.template packet<LoadMode>(index);
191 EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
193 for (
int i = 0; i < PacketSize; ++i) {
194 values[i] = self.coeff(index + i);
196 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
202 template <
int LoadMode>
203 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const {
204 eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
205 return PacketLoader<LoadMode, Self, TensorEvaluator<ArgType, Device>::PacketAccess>::Run(*
this, index);
208 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements()
const {
209 static const int inner_dim = Layout ==
static_cast<int>(
ColMajor) ? 0 : NumDims - 1;
211 const size_t target_size = m_device.firstLevelCacheSize();
212 const bool inner_dim_shuffled = m_shuffle[inner_dim] != inner_dim;
219 using BlockRequirements = internal::TensorBlockResourceRequirements;
220 if (inner_dim_shuffled) {
221 return BlockRequirements::uniform<Scalar>(target_size).addCostPerCoeff({0, 0, NumDims * 28});
223 return BlockRequirements::skewed<Scalar>(target_size);
227 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
228 bool root_of_expr_ast =
false)
const {
229 eigen_assert(m_impl.data() != NULL);
231 typedef internal::TensorBlockIO<ScalarNoConst, Index, NumDims, Layout> TensorBlockIO;
232 typedef typename TensorBlockIO::Dst TensorBlockIODst;
233 typedef typename TensorBlockIO::Src TensorBlockIOSrc;
235 const typename TensorBlock::Storage block_storage =
236 TensorBlock::prepareStorage(desc, scratch, root_of_expr_ast);
238 typename TensorBlockIO::Dimensions input_strides(m_unshuffledInputStrides);
239 TensorBlockIOSrc src(input_strides, m_impl.data(), srcCoeff(desc.offset()));
241 TensorBlockIODst dst(block_storage.dimensions(), block_storage.strides(), block_storage.data());
243 typename TensorBlockIO::DimensionsMap dst_to_src_dim_map(m_shuffle);
244 TensorBlockIO::Copy(dst, src, dst_to_src_dim_map);
246 return block_storage.AsTensorMaterializedBlock();
249 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(
bool vectorized)
const {
250 const double compute_cost = m_is_identity
251 ? TensorOpCost::AddCost<Index>()
252 : NumDims * (2 * TensorOpCost::AddCost<Index>() +
253 2 * TensorOpCost::MulCost<Index>() + TensorOpCost::DivCost<Index>());
254 return m_impl.costPerCoeff(vectorized) +
255 TensorOpCost(0, 0, compute_cost, m_is_identity , PacketSize);
258 EIGEN_DEVICE_FUNC
typename Storage::Type data()
const {
return NULL; }
261 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index
262 GetBlockOutputIndex(Index input_index,
const DSizes<Index, NumDims>& input_block_strides,
263 const DSizes<Index, NumDims>& output_block_strides,
264 const DSizes<internal::TensorIntDivisor<Index>, NumDims>& fast_input_block_strides)
const {
265 Index output_index = 0;
266 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
267 for (
int i = NumDims - 1; i > 0; --i) {
268 const Index idx = input_index / fast_input_block_strides[i];
269 output_index += idx * output_block_strides[m_inverseShuffle[i]];
270 input_index -= idx * input_block_strides[i];
272 return output_index + input_index * output_block_strides[m_inverseShuffle[0]];
274 for (
int i = 0; i < NumDims - 1; ++i) {
275 const Index idx = input_index / fast_input_block_strides[i];
276 output_index += idx * output_block_strides[m_inverseShuffle[i]];
277 input_index -= idx * input_block_strides[i];
279 return output_index + input_index * output_block_strides[m_inverseShuffle[NumDims - 1]];
283 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index)
const {
284 Index inputIndex = 0;
285 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
286 for (
int i = NumDims - 1; i > 0; --i) {
287 const Index idx = index / m_fastOutputStrides[i];
288 inputIndex += idx * m_inputStrides[i];
289 index -= idx * m_outputStrides[i];
291 return inputIndex + index * m_inputStrides[0];
293 for (
int i = 0; i < NumDims - 1; ++i) {
294 const Index idx = index / m_fastOutputStrides[i];
295 inputIndex += idx * m_inputStrides[i];
296 index -= idx * m_outputStrides[i];
298 return inputIndex + index * m_inputStrides[NumDims - 1];
302 Dimensions m_dimensions;
304 array<int, NumDims> m_shuffle;
305 array<Index, NumDims> m_inverseShuffle;
306 array<Index, NumDims> m_outputStrides;
307 array<internal::TensorIntDivisor<Index>, NumDims> m_fastOutputStrides;
308 array<Index, NumDims> m_inputStrides;
309 array<Index, NumDims> m_unshuffledInputStrides;
311 const Device EIGEN_DEVICE_REF m_device;
312 TensorEvaluator<ArgType, Device> m_impl;
316template <
typename Shuffle,
typename ArgType,
typename Device>
318 :
public TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> {
319 typedef TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> Base;
321 typedef TensorShufflingOp<Shuffle, ArgType> XprType;
322 typedef typename XprType::Index Index;
323 static constexpr int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
324 typedef DSizes<Index, NumDims> Dimensions;
325 typedef typename XprType::Scalar Scalar;
326 typedef typename XprType::CoeffReturnType CoeffReturnType;
327 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
328 static constexpr int PacketSize = PacketType<CoeffReturnType, Device>::size;
329 static constexpr int Layout = TensorEvaluator<ArgType, Device>::Layout;
333 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
334 BlockAccess = TensorEvaluator<ArgType, Device>::RawAccess,
335 PreferBlockAccess =
true,
339 typedef std::remove_const_t<Scalar> ScalarNoConst;
342 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
345 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device) : Base(op, device) {}
347 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
const {
348 return this->m_impl.coeffRef(this->srcCoeff(index));
351 template <
int StoreMode>
352 EIGEN_STRONG_INLINE
void writePacket(Index index,
const PacketReturnType& x)
const {
353 EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
354 internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
356 for (
int i = 0; i < PacketSize; ++i) {
357 this->coeffRef(index + i) = values[i];
361 template <
typename TensorBlock>
362 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writeBlock(
const TensorBlockDesc& desc,
const TensorBlock& block) {
363 eigen_assert(this->m_impl.data() != NULL);
365 typedef internal::TensorBlockIO<ScalarNoConst, Index, NumDims, Layout> TensorBlockIO;
366 typedef typename TensorBlockIO::Dst TensorBlockIODst;
367 typedef typename TensorBlockIO::Src TensorBlockIOSrc;
369 const Scalar* block_buffer = block.data();
374 if (block_buffer == NULL) {
375 mem = this->m_device.allocate(desc.size() *
sizeof(Scalar));
376 ScalarNoConst* buf =
static_cast<ScalarNoConst*
>(mem);
378 typedef internal::TensorBlockAssignment<ScalarNoConst, NumDims, typename TensorBlock::XprType, Index>
379 TensorBlockAssignment;
381 TensorBlockAssignment::Run(
382 TensorBlockAssignment::target(desc.dimensions(), internal::strides<Layout>(desc.dimensions()), buf),
389 TensorBlockIOSrc src(internal::strides<Layout>(desc.dimensions()), block_buffer);
392 typename TensorBlockIO::Dimensions output_strides(this->m_unshuffledInputStrides);
393 typename TensorBlockIO::Dimensions output_dimensions;
394 for (
int i = 0; i < NumDims; ++i) {
395 output_dimensions[this->m_shuffle[i]] = desc.dimension(i);
397 TensorBlockIODst dst(output_dimensions, output_strides, this->m_impl.data(), this->srcCoeff(desc.offset()));
400 typename TensorBlockIO::DimensionsMap dst_to_src_dim_map;
401 for (
int i = 0; i < NumDims; ++i) {
402 dst_to_src_dim_map[i] =
static_cast<int>(this->m_inverseShuffle[i]);
404 TensorBlockIO::Copy(dst, src, dst_to_src_dim_map);
407 if (mem != NULL) this->m_device.deallocate(mem);
The tensor base class.
Definition TensorForwardDeclarations.h:68
Tensor shuffling class.
Definition TensorShuffling.h:50
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The tensor evaluator class.
Definition TensorEvaluator.h:30