Eigen-unsupported  5.0.1-dev+284dcc12
 
Loading...
Searching...
No Matches
TensorStriding.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
11#define EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
12
13// IWYU pragma: private
14#include "./InternalHeaderCheck.h"
15
16namespace Eigen {
17
18namespace internal {
19template <typename Strides, typename XprType>
20struct traits<TensorStridingOp<Strides, XprType> > : public traits<XprType> {
21 typedef typename XprType::Scalar Scalar;
22 typedef traits<XprType> XprTraits;
23 typedef typename XprTraits::StorageKind StorageKind;
24 typedef typename XprTraits::Index Index;
25 typedef typename XprType::Nested Nested;
26 typedef std::remove_reference_t<Nested> Nested_;
27 static constexpr int NumDimensions = XprTraits::NumDimensions;
28 static constexpr int Layout = XprTraits::Layout;
29 typedef typename XprTraits::PointerType PointerType;
30};
31
32template <typename Strides, typename XprType>
33struct eval<TensorStridingOp<Strides, XprType>, Eigen::Dense> {
34 typedef const TensorStridingOp<Strides, XprType> EIGEN_DEVICE_REF type;
35};
36
37template <typename Strides, typename XprType>
38struct nested<TensorStridingOp<Strides, XprType>, 1, typename eval<TensorStridingOp<Strides, XprType> >::type> {
39 typedef TensorStridingOp<Strides, XprType> type;
40};
41
42} // end namespace internal
43
49template <typename Strides, typename XprType>
50class TensorStridingOp : public TensorBase<TensorStridingOp<Strides, XprType> > {
51 public:
53 typedef typename Eigen::internal::traits<TensorStridingOp>::Scalar Scalar;
54 typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
55 typedef typename XprType::CoeffReturnType CoeffReturnType;
56 typedef typename Eigen::internal::nested<TensorStridingOp>::type Nested;
57 typedef typename Eigen::internal::traits<TensorStridingOp>::StorageKind StorageKind;
58 typedef typename Eigen::internal::traits<TensorStridingOp>::Index Index;
59
60 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingOp(const XprType& expr, const Strides& dims)
61 : m_xpr(expr), m_dims(dims) {}
62
63 EIGEN_DEVICE_FUNC const Strides& strides() const { return m_dims; }
64
65 EIGEN_DEVICE_FUNC const internal::remove_all_t<typename XprType::Nested>& expression() const { return m_xpr; }
66
67 EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorStridingOp)
68
69 protected:
70 typename XprType::Nested m_xpr;
71 const Strides m_dims;
72};
73
74// Eval as rvalue
75template <typename Strides, typename ArgType, typename Device>
76struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device> {
78 typedef typename XprType::Index Index;
79 static constexpr int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
80 typedef DSizes<Index, NumDims> Dimensions;
81 typedef typename XprType::Scalar Scalar;
82 typedef typename XprType::CoeffReturnType CoeffReturnType;
83 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
84 static constexpr int PacketSize = PacketType<CoeffReturnType, Device>::size;
85 typedef StorageMemory<CoeffReturnType, Device> Storage;
86 typedef typename Storage::Type EvaluatorPointerType;
87
88 static constexpr int Layout = TensorEvaluator<ArgType, Device>::Layout;
89 enum {
90 IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false,
91 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
92 BlockAccess = false,
94 CoordAccess = false, // to be implemented
95 RawAccess = false
96 };
97
98 //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
99 typedef internal::TensorBlockNotImplemented TensorBlock;
100 //===--------------------------------------------------------------------===//
101
102 EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_impl(op.expression(), device) {
103 m_dimensions = m_impl.dimensions();
104 for (int i = 0; i < NumDims; ++i) {
105 m_dimensions[i] = Eigen::numext::ceil(static_cast<float>(m_dimensions[i]) / op.strides()[i]);
106 }
107
108 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
109 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
110 m_outputStrides[0] = 1;
111 m_inputStrides[0] = 1;
112 for (int i = 1; i < NumDims; ++i) {
113 m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
114 m_inputStrides[i] = m_inputStrides[i - 1] * input_dims[i - 1];
115 m_inputStrides[i - 1] *= op.strides()[i - 1];
116 }
117 m_inputStrides[NumDims - 1] *= op.strides()[NumDims - 1];
118 } else { // RowMajor
119 m_outputStrides[NumDims - 1] = 1;
120 m_inputStrides[NumDims - 1] = 1;
121 for (int i = NumDims - 2; i >= 0; --i) {
122 m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
123 m_inputStrides[i] = m_inputStrides[i + 1] * input_dims[i + 1];
124 m_inputStrides[i + 1] *= op.strides()[i + 1];
125 }
126 m_inputStrides[0] *= op.strides()[0];
127 }
128 }
129
130 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
131
132 EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
133 m_impl.evalSubExprsIfNeeded(NULL);
134 return true;
135 }
136 EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); }
137
138 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
139 return m_impl.coeff(srcCoeff(index));
140 }
141
142 template <int LoadMode>
143 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
144 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
145 eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
146
147 Index inputIndices[] = {0, 0};
148 Index indices[] = {index, index + PacketSize - 1};
149 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
150 EIGEN_UNROLL_LOOP
151 for (int i = NumDims - 1; i > 0; --i) {
152 const Index idx0 = indices[0] / m_outputStrides[i];
153 const Index idx1 = indices[1] / m_outputStrides[i];
154 inputIndices[0] += idx0 * m_inputStrides[i];
155 inputIndices[1] += idx1 * m_inputStrides[i];
156 indices[0] -= idx0 * m_outputStrides[i];
157 indices[1] -= idx1 * m_outputStrides[i];
158 }
159 inputIndices[0] += indices[0] * m_inputStrides[0];
160 inputIndices[1] += indices[1] * m_inputStrides[0];
161 } else { // RowMajor
162 EIGEN_UNROLL_LOOP
163 for (int i = 0; i < NumDims - 1; ++i) {
164 const Index idx0 = indices[0] / m_outputStrides[i];
165 const Index idx1 = indices[1] / m_outputStrides[i];
166 inputIndices[0] += idx0 * m_inputStrides[i];
167 inputIndices[1] += idx1 * m_inputStrides[i];
168 indices[0] -= idx0 * m_outputStrides[i];
169 indices[1] -= idx1 * m_outputStrides[i];
170 }
171 inputIndices[0] += indices[0] * m_inputStrides[NumDims - 1];
172 inputIndices[1] += indices[1] * m_inputStrides[NumDims - 1];
173 }
174 if (inputIndices[1] - inputIndices[0] == PacketSize - 1) {
175 PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
176 return rslt;
177 } else {
178 EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
179 values[0] = m_impl.coeff(inputIndices[0]);
180 values[PacketSize - 1] = m_impl.coeff(inputIndices[1]);
181 EIGEN_UNROLL_LOOP
182 for (int i = 1; i < PacketSize - 1; ++i) {
183 values[i] = coeff(index + i);
184 }
185 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
186 return rslt;
187 }
188 }
189
190 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
191 double compute_cost = (NumDims - 1) * (TensorOpCost::AddCost<Index>() + TensorOpCost::MulCost<Index>() +
192 TensorOpCost::DivCost<Index>()) +
193 TensorOpCost::MulCost<Index>();
194 if (vectorized) {
195 compute_cost *= 2; // packet() computes two indices
196 }
197 const int innerDim = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? 0 : (NumDims - 1);
198 return m_impl.costPerCoeff(vectorized && m_inputStrides[innerDim] == 1) +
199 // Computation is not vectorized per se, but it is done once per packet.
200 TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
201 }
202
203 EIGEN_DEVICE_FUNC typename Storage::Type data() const { return NULL; }
204
205 protected:
206 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const {
207 Index inputIndex = 0;
208 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
209 EIGEN_UNROLL_LOOP
210 for (int i = NumDims - 1; i > 0; --i) {
211 const Index idx = index / m_outputStrides[i];
212 inputIndex += idx * m_inputStrides[i];
213 index -= idx * m_outputStrides[i];
214 }
215 inputIndex += index * m_inputStrides[0];
216 } else { // RowMajor
217 EIGEN_UNROLL_LOOP
218 for (int i = 0; i < NumDims - 1; ++i) {
219 const Index idx = index / m_outputStrides[i];
220 inputIndex += idx * m_inputStrides[i];
221 index -= idx * m_outputStrides[i];
222 }
223 inputIndex += index * m_inputStrides[NumDims - 1];
224 }
225 return inputIndex;
226 }
227
228 Dimensions m_dimensions;
229 array<Index, NumDims> m_outputStrides;
230 array<Index, NumDims> m_inputStrides;
231 TensorEvaluator<ArgType, Device> m_impl;
232};
233
234// Eval as lvalue
235template <typename Strides, typename ArgType, typename Device>
236struct TensorEvaluator<TensorStridingOp<Strides, ArgType>, Device>
237 : public TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device> {
238 typedef TensorStridingOp<Strides, ArgType> XprType;
239 typedef TensorEvaluator<const XprType, Device> Base;
240 // typedef typename XprType::Index Index;
241 static constexpr int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
242 // typedef DSizes<Index, NumDims> Dimensions;
243
244 static constexpr int Layout = TensorEvaluator<ArgType, Device>::Layout;
245 enum {
246 IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false,
247 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
248 PreferBlockAccess = false,
249 CoordAccess = false, // to be implemented
250 RawAccess = false
251 };
252
253 EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : Base(op, device) {}
254
255 typedef typename XprType::Index Index;
256 typedef typename XprType::Scalar Scalar;
257 typedef typename XprType::CoeffReturnType CoeffReturnType;
258 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
259 static constexpr int PacketSize = PacketType<CoeffReturnType, Device>::size;
260
261 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) const {
262 return this->m_impl.coeffRef(this->srcCoeff(index));
263 }
264
265 template <int StoreMode>
266 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType& x) const {
267 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
268 eigen_assert(index + PacketSize - 1 < this->dimensions().TotalSize());
269
270 Index inputIndices[] = {0, 0};
271 Index indices[] = {index, index + PacketSize - 1};
272 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
273 EIGEN_UNROLL_LOOP
274 for (int i = NumDims - 1; i > 0; --i) {
275 const Index idx0 = indices[0] / this->m_outputStrides[i];
276 const Index idx1 = indices[1] / this->m_outputStrides[i];
277 inputIndices[0] += idx0 * this->m_inputStrides[i];
278 inputIndices[1] += idx1 * this->m_inputStrides[i];
279 indices[0] -= idx0 * this->m_outputStrides[i];
280 indices[1] -= idx1 * this->m_outputStrides[i];
281 }
282 inputIndices[0] += indices[0] * this->m_inputStrides[0];
283 inputIndices[1] += indices[1] * this->m_inputStrides[0];
284 } else { // RowMajor
285 EIGEN_UNROLL_LOOP
286 for (int i = 0; i < NumDims - 1; ++i) {
287 const Index idx0 = indices[0] / this->m_outputStrides[i];
288 const Index idx1 = indices[1] / this->m_outputStrides[i];
289 inputIndices[0] += idx0 * this->m_inputStrides[i];
290 inputIndices[1] += idx1 * this->m_inputStrides[i];
291 indices[0] -= idx0 * this->m_outputStrides[i];
292 indices[1] -= idx1 * this->m_outputStrides[i];
293 }
294 inputIndices[0] += indices[0] * this->m_inputStrides[NumDims - 1];
295 inputIndices[1] += indices[1] * this->m_inputStrides[NumDims - 1];
296 }
297 if (inputIndices[1] - inputIndices[0] == PacketSize - 1) {
298 this->m_impl.template writePacket<Unaligned>(inputIndices[0], x);
299 } else {
300 EIGEN_ALIGN_MAX Scalar values[PacketSize];
301 internal::pstore<Scalar, PacketReturnType>(values, x);
302 this->m_impl.coeffRef(inputIndices[0]) = values[0];
303 this->m_impl.coeffRef(inputIndices[1]) = values[PacketSize - 1];
304 EIGEN_UNROLL_LOOP
305 for (int i = 1; i < PacketSize - 1; ++i) {
306 this->coeffRef(index + i) = values[i];
307 }
308 }
309 }
310};
311
312} // end namespace Eigen
313
314#endif // EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
The tensor base class.
Definition TensorForwardDeclarations.h:68
Tensor striding class.
Definition TensorStriding.h:50
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The tensor evaluator class.
Definition TensorEvaluator.h:30