Loading...
Searching...
No Matches
TensorFixedSize.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
11#define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
12
13namespace Eigen {
14
24template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType>
25class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> >
26{
27 public:
28 typedef TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> Self;
30 typedef typename Eigen::internal::nested<Self>::type Nested;
31 typedef typename internal::traits<Self>::StorageKind StorageKind;
32 typedef typename internal::traits<Self>::Index Index;
33 typedef Scalar_ Scalar;
34 typedef typename NumTraits<Scalar>::Real RealScalar;
35 typedef typename Base::CoeffReturnType CoeffReturnType;
36
37 static const int Options = Options_;
38
39 enum {
40 IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0),
41 Layout = Options_ & RowMajor ? RowMajor : ColMajor,
42 CoordAccess = true,
43 RawAccess = true
44 };
45
46 typedef Dimensions_ Dimensions;
47 static const std::size_t NumIndices = Dimensions::count;
48
49 protected:
50 TensorStorage<Scalar, Dimensions, Options> m_storage;
51
52 public:
53 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
54 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
55 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
56 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
57 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
58 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
59
60 // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
61 // work, because that uses base().coeffRef() - and we don't yet
62 // implement a similar class hierarchy
63 inline Self& base() { return *this; }
64 inline const Self& base() const { return *this; }
65
66#if EIGEN_HAS_VARIADIC_TEMPLATES
67 template<typename... IndexTypes>
68 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const
69 {
70 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
71 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
72 return coeff(array<Index, NumIndices>{{firstIndex, otherIndices...}});
73 }
74#endif
75
76 EIGEN_DEVICE_FUNC
77 EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
78 {
79 eigen_internal_assert(checkIndexRange(indices));
80 return m_storage.data()[linearizedIndex(indices)];
81 }
82
83 EIGEN_DEVICE_FUNC
84 EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
85 {
86 eigen_internal_assert(index >= 0 && index < size());
87 return m_storage.data()[index];
88 }
89
90 EIGEN_DEVICE_FUNC
91 EIGEN_STRONG_INLINE const Scalar& coeff() const
92 {
93 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
94 return m_storage.data()[0];
95 }
96
97
98#if EIGEN_HAS_VARIADIC_TEMPLATES
99 template<typename... IndexTypes>
100 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices)
101 {
102 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
103 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
104 return coeffRef(array<Index, NumIndices>{{firstIndex, otherIndices...}});
105 }
106#endif
107
108 EIGEN_DEVICE_FUNC
109 EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
110 {
111 eigen_internal_assert(checkIndexRange(indices));
112 return m_storage.data()[linearizedIndex(indices)];
113 }
114
115 EIGEN_DEVICE_FUNC
116 EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
117 {
118 eigen_internal_assert(index >= 0 && index < size());
119 return m_storage.data()[index];
120 }
121
122 EIGEN_DEVICE_FUNC
123 EIGEN_STRONG_INLINE Scalar& coeffRef()
124 {
125 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
126 return m_storage.data()[0];
127 }
128
129#if EIGEN_HAS_VARIADIC_TEMPLATES
130 template<typename... IndexTypes>
131 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
132 {
133 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
134 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
135 return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
136 }
137#else
138 EIGEN_DEVICE_FUNC
139 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
140 {
141 if (Options&RowMajor) {
142 const Index index = i1 + i0 * m_storage.dimensions()[1];
143 return m_storage.data()[index];
144 } else {
145 const Index index = i0 + i1 * m_storage.dimensions()[0];
146 return m_storage.data()[index];
147 }
148 }
149 EIGEN_DEVICE_FUNC
150 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
151 {
152 if (Options&RowMajor) {
153 const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
154 return m_storage.data()[index];
155 } else {
156 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
157 return m_storage.data()[index];
158 }
159 }
160 EIGEN_DEVICE_FUNC
161 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
162 {
163 if (Options&RowMajor) {
164 const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
165 return m_storage.data()[index];
166 } else {
167 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
168 return m_storage.data()[index];
169 }
170 }
171 EIGEN_DEVICE_FUNC
172 EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
173 {
174 if (Options&RowMajor) {
175 const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
176 return m_storage.data()[index];
177 } else {
178 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
179 return m_storage.data()[index];
180 }
181 }
182#endif
183
184
185 EIGEN_DEVICE_FUNC
186 EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
187 {
188 eigen_assert(checkIndexRange(indices));
189 return coeff(indices);
190 }
191
192 EIGEN_DEVICE_FUNC
193 EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
194 {
195 eigen_internal_assert(index >= 0 && index < size());
196 return coeff(index);
197 }
198
199 EIGEN_DEVICE_FUNC
200 EIGEN_STRONG_INLINE const Scalar& operator()() const
201 {
202 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
203 return coeff();
204 }
205
206 EIGEN_DEVICE_FUNC
207 EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
208 {
209 // The bracket operator is only for vectors, use the parenthesis operator instead.
210 EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
211 return coeff(index);
212 }
213
214#if EIGEN_HAS_VARIADIC_TEMPLATES
215 template<typename... IndexTypes>
216 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
217 {
218 // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
219 EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
220 return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
221 }
222#else
223 EIGEN_DEVICE_FUNC
224 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
225 {
226 if (Options&RowMajor) {
227 const Index index = i1 + i0 * m_storage.dimensions()[1];
228 return m_storage.data()[index];
229 } else {
230 const Index index = i0 + i1 * m_storage.dimensions()[0];
231 return m_storage.data()[index];
232 }
233 }
234 EIGEN_DEVICE_FUNC
235 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
236 {
237 if (Options&RowMajor) {
238 const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
239 return m_storage.data()[index];
240 } else {
241 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
242 return m_storage.data()[index];
243 }
244 }
245 EIGEN_DEVICE_FUNC
246 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
247 {
248 if (Options&RowMajor) {
249 const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
250 return m_storage.data()[index];
251 } else {
252 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
253 return m_storage.data()[index];
254 }
255 }
256 EIGEN_DEVICE_FUNC
257 EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
258 {
259 if (Options&RowMajor) {
260 const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
261 return m_storage.data()[index];
262 } else {
263 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
264 return m_storage.data()[index];
265 }
266 }
267#endif
268
269 EIGEN_DEVICE_FUNC
270 EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
271 {
272 eigen_assert(checkIndexRange(indices));
273 return coeffRef(indices);
274 }
275
276 EIGEN_DEVICE_FUNC
277 EIGEN_STRONG_INLINE Scalar& operator()(Index index)
278 {
279 eigen_assert(index >= 0 && index < size());
280 return coeffRef(index);
281 }
282
283 EIGEN_DEVICE_FUNC
284 EIGEN_STRONG_INLINE Scalar& operator()()
285 {
286 EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
287 return coeffRef();
288 }
289
290 EIGEN_DEVICE_FUNC
291 EIGEN_STRONG_INLINE Scalar& operator[](Index index)
292 {
293 // The bracket operator is only for vectors, use the parenthesis operator instead
294 EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
295 return coeffRef(index);
296 }
297
298 EIGEN_DEVICE_FUNC
299 EIGEN_STRONG_INLINE TensorFixedSize()
300 : m_storage()
301 {
302 }
303
304 EIGEN_DEVICE_FUNC
305 EIGEN_STRONG_INLINE TensorFixedSize(const Self& other)
306 : m_storage(other.m_storage)
307 {
308 }
309
310#if EIGEN_HAS_RVALUE_REFERENCES
311 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(Self&& other)
312 : m_storage(other.m_storage)
313 {
314 }
315#endif
316
317 template<typename OtherDerived>
318 EIGEN_DEVICE_FUNC
319 EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
320 {
322 Assign assign(*this, other.derived());
323 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
324 }
325 template<typename OtherDerived>
326 EIGEN_DEVICE_FUNC
327 EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, WriteAccessors>& other)
328 {
330 Assign assign(*this, other.derived());
331 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
332 }
333
334 EIGEN_DEVICE_FUNC
335 EIGEN_STRONG_INLINE TensorFixedSize& operator=(const TensorFixedSize& other)
336 {
337 // FIXME: check that the dimensions of other match the dimensions of *this.
338 // Unfortunately this isn't possible yet when the rhs is an expression.
340 Assign assign(*this, other);
341 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
342 return *this;
343 }
344 template<typename OtherDerived>
345 EIGEN_DEVICE_FUNC
346 EIGEN_STRONG_INLINE TensorFixedSize& operator=(const OtherDerived& other)
347 {
348 // FIXME: check that the dimensions of other match the dimensions of *this.
349 // Unfortunately this isn't possible yet when the rhs is an expression.
351 Assign assign(*this, other);
352 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
353 return *this;
354 }
355
356 protected:
357 EIGEN_DEVICE_FUNC
358 EIGEN_STRONG_INLINE bool checkIndexRange(const array<Index, NumIndices>& /*indices*/) const
359 {
360 using internal::array_apply_and_reduce;
361 using internal::array_zip_and_reduce;
362 using internal::greater_equal_zero_op;
363 using internal::logical_and_op;
364 using internal::lesser_op;
365
366 return true;
367 // check whether the indices are all >= 0
368 /* array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
369 // check whether the indices fit in the dimensions
370 array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());*/
371 }
372
373 EIGEN_DEVICE_FUNC
374 EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
375 {
376 if (Options&RowMajor) {
377 return m_storage.dimensions().IndexOfRowMajor(indices);
378 } else {
379 return m_storage.dimensions().IndexOfColMajor(indices);
380 }
381 }
382};
383
384
385} // end namespace Eigen
386
387#endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
Definition TensorAssign.h:56
The tensor base class.
Definition TensorForwardDeclarations.h:29
Namespace containing all symbols from the Eigen library.