Eigen-unsupported  3.4.1 (git rev 28ded8800c26864e537852658428ab44c8399e87)
 
Loading...
Searching...
No Matches
TensorFixedSize.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
11#define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
12
13namespace Eigen {
14
25template <typename Scalar_, typename Dimensions_, int Options_, typename IndexType>
26class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> > {
27 public:
28 typedef TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> Self;
30 typedef typename Eigen::internal::nested<Self>::type Nested;
31 typedef typename internal::traits<Self>::StorageKind StorageKind;
32 typedef typename internal::traits<Self>::Index Index;
33 typedef Scalar_ Scalar;
34 typedef typename NumTraits<Scalar>::Real RealScalar;
35 typedef typename Base::CoeffReturnType CoeffReturnType;
36
37 static const int Options = Options_;
38
39 enum {
40 IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0),
41 PacketAccess = (internal::packet_traits<Scalar>::size > 1),
42 BlockAccess = false,
43 PreferBlockAccess = false,
44 Layout = Options_ & RowMajor ? RowMajor : ColMajor,
45 CoordAccess = true,
46 RawAccess = true
47 };
48
49 //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
50 typedef internal::TensorBlockNotImplemented TensorBlock;
51 //===--------------------------------------------------------------------===//
52
53 typedef Dimensions_ Dimensions;
54 static const std::size_t NumIndices = Dimensions::count;
55
56 protected:
57 TensorStorage<Scalar, Dimensions, Options> m_storage;
58
59 public:
60 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
61 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
62 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions dimensions() const { return m_storage.dimensions(); }
63 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
64 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
65 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
66
67 // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
68 // work, because that uses base().coeffRef() - and we don't yet
69 // implement a similar class hierarchy
70 inline Self& base() { return *this; }
71 inline const Self& base() const { return *this; }
72
73#if EIGEN_HAS_VARIADIC_TEMPLATES
74 template<typename... IndexTypes>
75 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const
76 {
77 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
78 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
79 return coeff(array<Index, NumIndices>{{firstIndex, otherIndices...}});
80 }
81#endif
82
83 EIGEN_DEVICE_FUNC
84 EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
85 {
86 eigen_internal_assert(checkIndexRange(indices));
87 return m_storage.data()[linearizedIndex(indices)];
88 }
89
90 EIGEN_DEVICE_FUNC
91 EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
92 {
93 eigen_internal_assert(index >= 0 && index < size());
94 return m_storage.data()[index];
95 }
96
97 EIGEN_DEVICE_FUNC
98 EIGEN_STRONG_INLINE const Scalar& coeff() const
99 {
100 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
101 return m_storage.data()[0];
102 }
103
104
105#if EIGEN_HAS_VARIADIC_TEMPLATES
106 template<typename... IndexTypes>
107 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices)
108 {
109 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
110 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
111 return coeffRef(array<Index, NumIndices>{{firstIndex, otherIndices...}});
112 }
113#endif
114
115 EIGEN_DEVICE_FUNC
116 EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
117 {
118 eigen_internal_assert(checkIndexRange(indices));
119 return m_storage.data()[linearizedIndex(indices)];
120 }
121
122 EIGEN_DEVICE_FUNC
123 EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
124 {
125 eigen_internal_assert(index >= 0 && index < size());
126 return m_storage.data()[index];
127 }
128
129 EIGEN_DEVICE_FUNC
130 EIGEN_STRONG_INLINE Scalar& coeffRef()
131 {
132 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
133 return m_storage.data()[0];
134 }
135
136#if EIGEN_HAS_VARIADIC_TEMPLATES
137 template<typename... IndexTypes>
138 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
139 {
140 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
141 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
142 return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
143 }
144#else
145 EIGEN_DEVICE_FUNC
146 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
147 {
148 if (Options&RowMajor) {
149 const Index index = i1 + i0 * m_storage.dimensions()[1];
150 return m_storage.data()[index];
151 } else {
152 const Index index = i0 + i1 * m_storage.dimensions()[0];
153 return m_storage.data()[index];
154 }
155 }
156 EIGEN_DEVICE_FUNC
157 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
158 {
159 if (Options&RowMajor) {
160 const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
161 return m_storage.data()[index];
162 } else {
163 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
164 return m_storage.data()[index];
165 }
166 }
167 EIGEN_DEVICE_FUNC
168 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
169 {
170 if (Options&RowMajor) {
171 const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
172 return m_storage.data()[index];
173 } else {
174 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
175 return m_storage.data()[index];
176 }
177 }
178 EIGEN_DEVICE_FUNC
179 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
180 {
181 if (Options&RowMajor) {
182 const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
183 return m_storage.data()[index];
184 } else {
185 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
186 return m_storage.data()[index];
187 }
188 }
189#endif
190
191
192 EIGEN_DEVICE_FUNC
193 EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
194 {
195 eigen_assert(checkIndexRange(indices));
196 return coeff(indices);
197 }
198
199 EIGEN_DEVICE_FUNC
200 EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
201 {
202 eigen_internal_assert(index >= 0 && index < size());
203 return coeff(index);
204 }
205
206 EIGEN_DEVICE_FUNC
207 EIGEN_STRONG_INLINE const Scalar& operator()() const
208 {
209 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
210 return coeff();
211 }
212
213 EIGEN_DEVICE_FUNC
214 EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
215 {
216 // The bracket operator is only for vectors, use the parenthesis operator instead.
217 EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
218 return coeff(index);
219 }
220
221#if EIGEN_HAS_VARIADIC_TEMPLATES
222 template<typename... IndexTypes>
223 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
224 {
225 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
226 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
227 return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
228 }
229#else
230 EIGEN_DEVICE_FUNC
231 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
232 {
233 if (Options&RowMajor) {
234 const Index index = i1 + i0 * m_storage.dimensions()[1];
235 return m_storage.data()[index];
236 } else {
237 const Index index = i0 + i1 * m_storage.dimensions()[0];
238 return m_storage.data()[index];
239 }
240 }
241 EIGEN_DEVICE_FUNC
242 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
243 {
244 if (Options&RowMajor) {
245 const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
246 return m_storage.data()[index];
247 } else {
248 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
249 return m_storage.data()[index];
250 }
251 }
252 EIGEN_DEVICE_FUNC
253 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
254 {
255 if (Options&RowMajor) {
256 const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
257 return m_storage.data()[index];
258 } else {
259 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
260 return m_storage.data()[index];
261 }
262 }
263 EIGEN_DEVICE_FUNC
264 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
265 {
266 if (Options&RowMajor) {
267 const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
268 return m_storage.data()[index];
269 } else {
270 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
271 return m_storage.data()[index];
272 }
273 }
274#endif
275
276 EIGEN_DEVICE_FUNC
277 EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
278 {
279 eigen_assert(checkIndexRange(indices));
280 return coeffRef(indices);
281 }
282
283 EIGEN_DEVICE_FUNC
284 EIGEN_STRONG_INLINE Scalar& operator()(Index index)
285 {
286 eigen_assert(index >= 0 && index < size());
287 return coeffRef(index);
288 }
289
290 EIGEN_DEVICE_FUNC
291 EIGEN_STRONG_INLINE Scalar& operator()()
292 {
293 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
294 return coeffRef();
295 }
296
297 EIGEN_DEVICE_FUNC
298 EIGEN_STRONG_INLINE Scalar& operator[](Index index)
299 {
300 // The bracket operator is only for vectors, use the parenthesis operator instead
301 EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
302 return coeffRef(index);
303 }
304
305 EIGEN_DEVICE_FUNC
306 EIGEN_STRONG_INLINE TensorFixedSize()
307 : m_storage()
308 {
309 }
310
311 EIGEN_DEVICE_FUNC
312 EIGEN_STRONG_INLINE TensorFixedSize(const Self& other)
313 : m_storage(other.m_storage)
314 {
315 }
316
317#if EIGEN_HAS_RVALUE_REFERENCES
318 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(Self&& other)
319 : m_storage(other.m_storage)
320 {
321 }
322#endif
323
324 template<typename OtherDerived>
325 EIGEN_DEVICE_FUNC
326 EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
327 {
329 Assign assign(*this, other.derived());
330 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
331 }
332 template<typename OtherDerived>
333 EIGEN_DEVICE_FUNC
334 EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, WriteAccessors>& other)
335 {
337 Assign assign(*this, other.derived());
338 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
339 }
340
341 // FIXME: check that the dimensions of other match the dimensions of *this.
342 // Unfortunately this isn't possible yet when the rhs is an expression.
343 EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(TensorFixedSize)
344
345
346 protected:
347 EIGEN_DEVICE_FUNC
348 EIGEN_STRONG_INLINE bool checkIndexRange(const array<Index, NumIndices>& /*indices*/) const
349 {
350 using internal::array_apply_and_reduce;
351 using internal::array_zip_and_reduce;
352 using internal::greater_equal_zero_op;
353 using internal::logical_and_op;
354 using internal::lesser_op;
355
356 return true;
357 // check whether the indices are all >= 0
358 /* array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
359 // check whether the indices fit in the dimensions
360 array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());*/
361 }
362
363 EIGEN_DEVICE_FUNC
364 EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
365 {
366 if (Options&RowMajor) {
367 return m_storage.dimensions().IndexOfRowMajor(indices);
368 } else {
369 return m_storage.dimensions().IndexOfColMajor(indices);
370 }
371 }
372};
373
374
375} // end namespace Eigen
376
377#endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
Definition TensorAssign.h:57
The tensor base class.
Definition TensorForwardDeclarations.h:56
Namespace containing all symbols from the Eigen library.