Eigen-unsupported  3.4.1 (git rev 28ded8800c26864e537852658428ab44c8399e87)
 
Loading...
Searching...
No Matches
TensorGenerator.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
11#define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
12
13namespace Eigen {
14
15namespace internal {
16template<typename Generator, typename XprType>
17struct traits<TensorGeneratorOp<Generator, XprType> > : public traits<XprType>
18{
19 typedef typename XprType::Scalar Scalar;
20 typedef traits<XprType> XprTraits;
21 typedef typename XprTraits::StorageKind StorageKind;
22 typedef typename XprTraits::Index Index;
23 typedef typename XprType::Nested Nested;
24 typedef typename remove_reference<Nested>::type _Nested;
25 static const int NumDimensions = XprTraits::NumDimensions;
26 static const int Layout = XprTraits::Layout;
27 typedef typename XprTraits::PointerType PointerType;
28};
29
30template<typename Generator, typename XprType>
31struct eval<TensorGeneratorOp<Generator, XprType>, Eigen::Dense>
32{
33 typedef const TensorGeneratorOp<Generator, XprType>& type;
34};
35
36template<typename Generator, typename XprType>
37struct nested<TensorGeneratorOp<Generator, XprType>, 1, typename eval<TensorGeneratorOp<Generator, XprType> >::type>
38{
39 typedef TensorGeneratorOp<Generator, XprType> type;
40};
41
42} // end namespace internal
43
49template <typename Generator, typename XprType>
50class TensorGeneratorOp : public TensorBase<TensorGeneratorOp<Generator, XprType>, ReadOnlyAccessors> {
51 public:
52 typedef typename Eigen::internal::traits<TensorGeneratorOp>::Scalar Scalar;
53 typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
54 typedef typename XprType::CoeffReturnType CoeffReturnType;
55 typedef typename Eigen::internal::nested<TensorGeneratorOp>::type Nested;
56 typedef typename Eigen::internal::traits<TensorGeneratorOp>::StorageKind StorageKind;
57 typedef typename Eigen::internal::traits<TensorGeneratorOp>::Index Index;
58
59 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorGeneratorOp(const XprType& expr, const Generator& generator)
60 : m_xpr(expr), m_generator(generator) {}
61
62 EIGEN_DEVICE_FUNC
63 const Generator& generator() const { return m_generator; }
64
65 EIGEN_DEVICE_FUNC
66 const typename internal::remove_all<typename XprType::Nested>::type&
67 expression() const { return m_xpr; }
68
69 protected:
70 typename XprType::Nested m_xpr;
71 const Generator m_generator;
72};
73
74
75// Eval as rvalue
76template<typename Generator, typename ArgType, typename Device>
77struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
78{
80 typedef typename XprType::Index Index;
81 typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
82 static const int NumDims = internal::array_size<Dimensions>::value;
83 typedef typename XprType::Scalar Scalar;
84 typedef typename XprType::CoeffReturnType CoeffReturnType;
85 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
86 typedef StorageMemory<CoeffReturnType, Device> Storage;
87 typedef typename Storage::Type EvaluatorPointerType;
88 enum {
89 IsAligned = false,
90 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
91 BlockAccess = true,
92 PreferBlockAccess = true,
93 Layout = TensorEvaluator<ArgType, Device>::Layout,
94 CoordAccess = false, // to be implemented
95 RawAccess = false
96 };
97
98 typedef internal::TensorIntDivisor<Index> IndexDivisor;
99
100 //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
101 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
102 typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
103
104 typedef typename internal::TensorMaterializedBlock<CoeffReturnType, NumDims,
105 Layout, Index>
106 TensorBlock;
107 //===--------------------------------------------------------------------===//
108
109 EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
110 : m_device(device), m_generator(op.generator())
111 {
112 TensorEvaluator<ArgType, Device> argImpl(op.expression(), device);
113 m_dimensions = argImpl.dimensions();
114
115 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
116 m_strides[0] = 1;
117 EIGEN_UNROLL_LOOP
118 for (int i = 1; i < NumDims; ++i) {
119 m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
120 if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
121 }
122 } else {
123 m_strides[NumDims - 1] = 1;
124 EIGEN_UNROLL_LOOP
125 for (int i = NumDims - 2; i >= 0; --i) {
126 m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
127 if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
128 }
129 }
130 }
131
132 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
133
134 EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
135 return true;
136 }
137 EIGEN_STRONG_INLINE void cleanup() {
138 }
139
140 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
141 {
142 array<Index, NumDims> coords;
143 extract_coordinates(index, coords);
144 return m_generator(coords);
145 }
146
147 template<int LoadMode>
148 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
149 {
150 const int packetSize = PacketType<CoeffReturnType, Device>::size;
151 EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
152 eigen_assert(index+packetSize-1 < dimensions().TotalSize());
153
154 EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[packetSize];
155 for (int i = 0; i < packetSize; ++i) {
156 values[i] = coeff(index+i);
157 }
158 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
159 return rslt;
160 }
161
162 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
163 internal::TensorBlockResourceRequirements getResourceRequirements() const {
164 const size_t target_size = m_device.firstLevelCacheSize();
165 // TODO(ezhulenev): Generator should have a cost.
166 return internal::TensorBlockResourceRequirements::skewed<Scalar>(
167 target_size);
168 }
169
170 struct BlockIteratorState {
171 Index stride;
172 Index span;
173 Index size;
174 Index count;
175 };
176
177 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
178 block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
179 bool /*root_of_expr_ast*/ = false) const {
180 static const bool is_col_major =
181 static_cast<int>(Layout) == static_cast<int>(ColMajor);
182
183 // Compute spatial coordinates for the first block element.
184 array<Index, NumDims> coords;
185 extract_coordinates(desc.offset(), coords);
186 array<Index, NumDims> initial_coords = coords;
187
188 // Offset in the output block buffer.
189 Index offset = 0;
190
191 // Initialize output block iterator state. Dimension in this array are
192 // always in inner_most -> outer_most order (col major layout).
193 array<BlockIteratorState, NumDims> it;
194 for (int i = 0; i < NumDims; ++i) {
195 const int dim = is_col_major ? i : NumDims - 1 - i;
196 it[i].size = desc.dimension(dim);
197 it[i].stride = i == 0 ? 1 : (it[i - 1].size * it[i - 1].stride);
198 it[i].span = it[i].stride * (it[i].size - 1);
199 it[i].count = 0;
200 }
201 eigen_assert(it[0].stride == 1);
202
203 // Prepare storage for the materialized generator result.
204 const typename TensorBlock::Storage block_storage =
205 TensorBlock::prepareStorage(desc, scratch);
206
207 CoeffReturnType* block_buffer = block_storage.data();
208
209 static const int packet_size = PacketType<CoeffReturnType, Device>::size;
210
211 static const int inner_dim = is_col_major ? 0 : NumDims - 1;
212 const Index inner_dim_size = it[0].size;
213 const Index inner_dim_vectorized = inner_dim_size - packet_size;
214
215 while (it[NumDims - 1].count < it[NumDims - 1].size) {
216 Index i = 0;
217 // Generate data for the vectorized part of the inner-most dimension.
218 for (; i <= inner_dim_vectorized; i += packet_size) {
219 for (Index j = 0; j < packet_size; ++j) {
220 array<Index, NumDims> j_coords = coords; // Break loop dependence.
221 j_coords[inner_dim] += j;
222 *(block_buffer + offset + i + j) = m_generator(j_coords);
223 }
224 coords[inner_dim] += packet_size;
225 }
226 // Finalize non-vectorized part of the inner-most dimension.
227 for (; i < inner_dim_size; ++i) {
228 *(block_buffer + offset + i) = m_generator(coords);
229 coords[inner_dim]++;
230 }
231 coords[inner_dim] = initial_coords[inner_dim];
232
233 // For the 1d tensor we need to generate only one inner-most dimension.
234 if (NumDims == 1) break;
235
236 // Update offset.
237 for (i = 1; i < NumDims; ++i) {
238 if (++it[i].count < it[i].size) {
239 offset += it[i].stride;
240 coords[is_col_major ? i : NumDims - 1 - i]++;
241 break;
242 }
243 if (i != NumDims - 1) it[i].count = 0;
244 coords[is_col_major ? i : NumDims - 1 - i] =
245 initial_coords[is_col_major ? i : NumDims - 1 - i];
246 offset -= it[i].span;
247 }
248 }
249
250 return block_storage.AsTensorMaterializedBlock();
251 }
252
253 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
254 costPerCoeff(bool) const {
255 // TODO(rmlarsen): This is just a placeholder. Define interface to make
256 // generators return their cost.
257 return TensorOpCost(0, 0, TensorOpCost::AddCost<Scalar>() +
258 TensorOpCost::MulCost<Scalar>());
259 }
260
261 EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
262
263#ifdef EIGEN_USE_SYCL
264 // binding placeholder accessors to a command group handler for SYCL
265 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler&) const {}
266#endif
267
268 protected:
269 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
270 void extract_coordinates(Index index, array<Index, NumDims>& coords) const {
271 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
272 for (int i = NumDims - 1; i > 0; --i) {
273 const Index idx = index / m_fast_strides[i];
274 index -= idx * m_strides[i];
275 coords[i] = idx;
276 }
277 coords[0] = index;
278 } else {
279 for (int i = 0; i < NumDims - 1; ++i) {
280 const Index idx = index / m_fast_strides[i];
281 index -= idx * m_strides[i];
282 coords[i] = idx;
283 }
284 coords[NumDims-1] = index;
285 }
286 }
287
288 const Device EIGEN_DEVICE_REF m_device;
289 Dimensions m_dimensions;
290 array<Index, NumDims> m_strides;
291 array<IndexDivisor, NumDims> m_fast_strides;
292 Generator m_generator;
293};
294
295} // end namespace Eigen
296
297#endif // EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
The tensor base class.
Definition TensorForwardDeclarations.h:56
Tensor generator class.
Definition TensorGenerator.h:50
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The tensor evaluator class.
Definition TensorEvaluator.h:27