Eigen-unsupported  3.4.1 (git rev 28ded8800c26864e537852658428ab44c8399e87)
 
Loading...
Searching...
No Matches
TensorConversion.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
11#define EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
12
13namespace Eigen {
14
15namespace internal {
16template<typename TargetType, typename XprType>
17struct traits<TensorConversionOp<TargetType, XprType> >
18{
19 // Type promotion to handle the case where the types of the lhs and the rhs are different.
20 typedef TargetType Scalar;
21 typedef typename traits<XprType>::StorageKind StorageKind;
22 typedef typename traits<XprType>::Index Index;
23 typedef typename XprType::Nested Nested;
24 typedef typename remove_reference<Nested>::type _Nested;
25 static const int NumDimensions = traits<XprType>::NumDimensions;
26 static const int Layout = traits<XprType>::Layout;
27 enum { Flags = 0 };
28 typedef typename TypeConversion<Scalar, typename traits<XprType>::PointerType>::type PointerType;
29};
30
31template<typename TargetType, typename XprType>
32struct eval<TensorConversionOp<TargetType, XprType>, Eigen::Dense>
33{
34 typedef const TensorConversionOp<TargetType, XprType>& type;
35};
36
37template<typename TargetType, typename XprType>
38struct nested<TensorConversionOp<TargetType, XprType>, 1, typename eval<TensorConversionOp<TargetType, XprType> >::type>
39{
40 typedef TensorConversionOp<TargetType, XprType> type;
41};
42
43} // end namespace internal
44
45
46template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket, int SrcCoeffRatio, int TgtCoeffRatio>
47struct PacketConverter;
48
49template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
50struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 1, 1> {
51 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
52 PacketConverter(const TensorEvaluator& impl)
53 : m_impl(impl) {}
54
55 template<int LoadMode, typename Index>
56 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
57 return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<LoadMode>(index));
58 }
59
60 private:
61 const TensorEvaluator& m_impl;
62};
63
64
65template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
66struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 2, 1> {
67 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
68 PacketConverter(const TensorEvaluator& impl)
69 : m_impl(impl) {}
70
71 template<int LoadMode, typename Index>
72 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
73 const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
74
75 SrcPacket src1 = m_impl.template packet<LoadMode>(index);
76 SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
77 TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2);
78 return result;
79 }
80
81 private:
82 const TensorEvaluator& m_impl;
83};
84
85template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
86struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 4, 1> {
87 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
88 PacketConverter(const TensorEvaluator& impl)
89 : m_impl(impl) {}
90
91 template<int LoadMode, typename Index>
92 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
93 const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
94
95 SrcPacket src1 = m_impl.template packet<LoadMode>(index);
96 SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
97 SrcPacket src3 = m_impl.template packet<LoadMode>(index + 2 * SrcPacketSize);
98 SrcPacket src4 = m_impl.template packet<LoadMode>(index + 3 * SrcPacketSize);
99 TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2, src3, src4);
100 return result;
101 }
102
103 private:
104 const TensorEvaluator& m_impl;
105};
106
107template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
108struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 8, 1> {
109 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
110 PacketConverter(const TensorEvaluator& impl)
111 : m_impl(impl) {}
112
113 template<int LoadMode, typename Index>
114 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
115 const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
116
117 SrcPacket src1 = m_impl.template packet<LoadMode>(index);
118 SrcPacket src2 = m_impl.template packet<LoadMode>(index + 1 * SrcPacketSize);
119 SrcPacket src3 = m_impl.template packet<LoadMode>(index + 2 * SrcPacketSize);
120 SrcPacket src4 = m_impl.template packet<LoadMode>(index + 3 * SrcPacketSize);
121 SrcPacket src5 = m_impl.template packet<LoadMode>(index + 4 * SrcPacketSize);
122 SrcPacket src6 = m_impl.template packet<LoadMode>(index + 5 * SrcPacketSize);
123 SrcPacket src7 = m_impl.template packet<LoadMode>(index + 6 * SrcPacketSize);
124 SrcPacket src8 = m_impl.template packet<LoadMode>(index + 7 * SrcPacketSize);
125 TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2, src3, src4, src5, src6, src7, src8);
126 return result;
127 }
128
129 private:
130 const TensorEvaluator& m_impl;
131};
132
133template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket, int TgtCoeffRatio>
134struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 1, TgtCoeffRatio> {
135 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
136 PacketConverter(const TensorEvaluator& impl)
137 : m_impl(impl), m_maxIndex(impl.dimensions().TotalSize()) {}
138
139 template<int LoadMode, typename Index>
140 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
141 const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
142 // Only call m_impl.packet() when we have direct access to the underlying data. This
143 // ensures that we don't compute the subexpression twice. We may however load some
144 // coefficients twice, but in practice this doesn't negatively impact performance.
145 if (m_impl.data() && (index + SrcPacketSize < m_maxIndex)) {
146 // Force unaligned memory loads since we can't ensure alignment anymore
147 return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<Unaligned>(index));
148 } else {
149 const int TgtPacketSize = internal::unpacket_traits<TgtPacket>::size;
150 typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
151 typedef typename internal::unpacket_traits<TgtPacket>::type TgtType;
152 internal::scalar_cast_op<SrcType, TgtType> converter;
153 EIGEN_ALIGN_MAX typename internal::unpacket_traits<TgtPacket>::type values[TgtPacketSize];
154 EIGEN_UNROLL_LOOP
155 for (int i = 0; i < TgtPacketSize; ++i) {
156 values[i] = converter(m_impl.coeff(index+i));
157 }
158 TgtPacket rslt = internal::pload<TgtPacket>(values);
159 return rslt;
160 }
161 }
162
163 private:
164 const TensorEvaluator& m_impl;
165 const typename TensorEvaluator::Index m_maxIndex;
166};
167
175template <typename TargetType, typename XprType>
176class TensorConversionOp : public TensorBase<TensorConversionOp<TargetType, XprType>, ReadOnlyAccessors> {
177 public:
178 typedef typename internal::traits<TensorConversionOp>::Scalar Scalar;
179 typedef typename internal::traits<TensorConversionOp>::StorageKind StorageKind;
180 typedef typename internal::traits<TensorConversionOp>::Index Index;
181 typedef typename internal::nested<TensorConversionOp>::type Nested;
182 typedef Scalar CoeffReturnType;
183 typedef typename NumTraits<Scalar>::Real RealScalar;
184
185 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConversionOp(const XprType& xpr)
186 : m_xpr(xpr) {}
187
188 EIGEN_DEVICE_FUNC
189 const typename internal::remove_all<typename XprType::Nested>::type&
190 expression() const { return m_xpr; }
191
192 protected:
193 typename XprType::Nested m_xpr;
194};
195
196template <bool SameType, typename Eval, typename EvalPointerType> struct ConversionSubExprEval {
197 static EIGEN_STRONG_INLINE bool run(Eval& impl, EvalPointerType) {
198 impl.evalSubExprsIfNeeded(NULL);
199 return true;
200 }
201};
202
203template <typename Eval, typename EvalPointerType> struct ConversionSubExprEval<true, Eval, EvalPointerType> {
204 static EIGEN_STRONG_INLINE bool run(Eval& impl, EvalPointerType data) {
205 return impl.evalSubExprsIfNeeded(data);
206 }
207};
208
209#ifdef EIGEN_USE_THREADS
210template <bool SameType, typename Eval, typename EvalPointerType,
211 typename EvalSubExprsCallback>
212struct ConversionSubExprEvalAsync {
213 static EIGEN_STRONG_INLINE void run(Eval& impl, EvalPointerType, EvalSubExprsCallback done) {
214 impl.evalSubExprsIfNeededAsync(nullptr, std::move(done));
215 }
216};
217
218template <typename Eval, typename EvalPointerType,
219 typename EvalSubExprsCallback>
220struct ConversionSubExprEvalAsync<true, Eval, EvalPointerType,
221 EvalSubExprsCallback> {
222 static EIGEN_STRONG_INLINE void run(Eval& impl, EvalPointerType data, EvalSubExprsCallback done) {
223 impl.evalSubExprsIfNeededAsync(data, std::move(done));
224 }
225};
226#endif
227
228namespace internal {
229
230template <typename SrcType, typename TargetType, bool IsSameT>
231struct CoeffConv {
232 template <typename ArgType, typename Device>
233 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
234 internal::scalar_cast_op<SrcType, TargetType> converter;
235 return converter(impl.coeff(index));
236 }
237};
238
239template <typename SrcType, typename TargetType>
240struct CoeffConv<SrcType, TargetType, true> {
241 template <typename ArgType, typename Device>
242 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
243 return impl.coeff(index);
244 }
245};
246
247template <typename SrcPacket, typename TargetPacket, int LoadMode, bool ActuallyVectorize, bool IsSameT>
248struct PacketConv {
249 typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
250 typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
251
252 static const int PacketSize = internal::unpacket_traits<TargetPacket>::size;
253
254 template <typename ArgType, typename Device>
255 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
256 internal::scalar_cast_op<SrcType, TargetType> converter;
257 EIGEN_ALIGN_MAX typename internal::remove_const<TargetType>::type values[PacketSize];
258 EIGEN_UNROLL_LOOP
259 for (int i = 0; i < PacketSize; ++i) {
260 values[i] = converter(impl.coeff(index+i));
261 }
262 TargetPacket rslt = internal::pload<TargetPacket>(values);
263 return rslt;
264 }
265};
266
267template <typename SrcPacket, typename TargetPacket, int LoadMode, bool IsSameT>
268struct PacketConv<SrcPacket, TargetPacket, LoadMode, true, IsSameT> {
269 typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
270 typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
271
272 template <typename ArgType, typename Device>
273 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
274 const int SrcCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
275 const int TgtCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
276 PacketConverter<TensorEvaluator<ArgType, Device>, SrcPacket, TargetPacket,
277 SrcCoeffRatio, TgtCoeffRatio> converter(impl);
278 return converter.template packet<LoadMode>(index);
279 }
280};
281
282template <typename SrcPacket, typename TargetPacket, int LoadMode>
283struct PacketConv<SrcPacket, TargetPacket, LoadMode, /*ActuallyVectorize=*/false, /*IsSameT=*/true> {
284 typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
285 static const int PacketSize = internal::unpacket_traits<TargetPacket>::size;
286
287 template <typename ArgType, typename Device>
288 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
289 EIGEN_ALIGN_MAX typename internal::remove_const<TargetType>::type values[PacketSize];
290 for (int i = 0; i < PacketSize; ++i) values[i] = impl.coeff(index+i);
291 return internal::pload<TargetPacket>(values);
292 }
293};
294
295template <typename SrcPacket, typename TargetPacket, int LoadMode>
296struct PacketConv<SrcPacket, TargetPacket, LoadMode, /*ActuallyVectorize=*/true, /*IsSameT=*/true> {
297 template <typename ArgType, typename Device>
298 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
299 return impl.template packet<LoadMode>(index);
300 }
301};
302
303} // namespace internal
304
305// Eval as rvalue
306template<typename TargetType, typename ArgType, typename Device>
307struct TensorEvaluator<const TensorConversionOp<TargetType, ArgType>, Device>
308{
309 typedef TensorConversionOp<TargetType, ArgType> XprType;
310 typedef typename XprType::Index Index;
311 typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
312 typedef TargetType Scalar;
313 typedef TargetType CoeffReturnType;
314 typedef typename internal::remove_all<typename internal::traits<ArgType>::Scalar>::type SrcType;
315 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
316 typedef typename PacketType<SrcType, Device>::type PacketSourceType;
317 static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
318 static const bool IsSameType = internal::is_same<TargetType, SrcType>::value;
319 typedef StorageMemory<CoeffReturnType, Device> Storage;
320 typedef typename Storage::Type EvaluatorPointerType;
321
322 enum {
323 IsAligned = false,
324 PacketAccess =
325 #ifndef EIGEN_USE_SYCL
326 true,
327 #else
328 TensorEvaluator<ArgType, Device>::PacketAccess &
329 internal::type_casting_traits<SrcType, TargetType>::VectorizedCast,
330 #endif
331 BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
332 PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
333 Layout = TensorEvaluator<ArgType, Device>::Layout,
334 RawAccess = false
335 };
336
337 static const int NumDims = internal::array_size<Dimensions>::value;
338
339 //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
340 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
341 typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
342
343 typedef typename TensorEvaluator<const ArgType, Device>::TensorBlock
344 ArgTensorBlock;
345
346 struct TensorConversionOpBlockFactory {
347 template <typename ArgXprType>
348 struct XprType {
349 typedef TensorConversionOp<TargetType, const ArgXprType> type;
350 };
351
352 template <typename ArgXprType>
353 typename XprType<ArgXprType>::type expr(const ArgXprType& expr) const {
354 return typename XprType<ArgXprType>::type(expr);
355 }
356 };
357
358 typedef internal::TensorUnaryExprBlock<TensorConversionOpBlockFactory,
359 ArgTensorBlock>
360 TensorBlock;
361 //===--------------------------------------------------------------------===//
362
363 EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
364 : m_impl(op.expression(), device)
365 {
366 }
367
368 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_impl.dimensions(); }
369
370 EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data)
371 {
372 return ConversionSubExprEval<IsSameType, TensorEvaluator<ArgType, Device>, EvaluatorPointerType>::run(m_impl, data);
373 }
374
375#ifdef EIGEN_USE_THREADS
376 template <typename EvalSubExprsCallback>
377 EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
378 EvaluatorPointerType data, EvalSubExprsCallback done) {
379 ConversionSubExprEvalAsync<IsSameType, TensorEvaluator<ArgType, Device>,
380 EvaluatorPointerType,
381 EvalSubExprsCallback>::run(m_impl, data, std::move(done));
382 }
383#endif
384
385 EIGEN_STRONG_INLINE void cleanup()
386 {
387 m_impl.cleanup();
388 }
389
390 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
391 {
392 return internal::CoeffConv<SrcType, TargetType, IsSameType>::run(m_impl,index);
393 }
394
395 template<int LoadMode>
396 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType
397 packet(Index index) const {
398 // If we are not going to do the cast, we just need to check that base
399 // TensorEvaluator has packet access. Otherwise we also need to make sure,
400 // that we have an implementation of vectorized cast.
401 const bool Vectorizable =
402 IsSameType
403 ? TensorEvaluator<ArgType, Device>::PacketAccess
404 : int(TensorEvaluator<ArgType, Device>::PacketAccess) &
405 int(internal::type_casting_traits<SrcType, TargetType>::VectorizedCast);
406
407 return internal::PacketConv<PacketSourceType, PacketReturnType, LoadMode,
408 Vectorizable, IsSameType>::run(m_impl, index);
409 }
410
411 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
412 costPerCoeff(bool vectorized) const {
413 const double cast_cost = TensorOpCost::CastCost<SrcType, TargetType>();
414 if (vectorized) {
415 const double SrcCoeffRatio =
416 internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
417 const double TgtCoeffRatio =
418 internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
419 return m_impl.costPerCoeff(vectorized) * (SrcCoeffRatio / PacketSize) +
420 TensorOpCost(0, 0, TgtCoeffRatio * (cast_cost / PacketSize));
421 } else {
422 return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, cast_cost);
423 }
424 }
425
426 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
427 internal::TensorBlockResourceRequirements getResourceRequirements() const {
428 return m_impl.getResourceRequirements();
429 }
430
431 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
432 block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
433 bool /*root_of_expr_ast*/ = false) const {
434 return TensorBlock(m_impl.block(desc, scratch),
435 TensorConversionOpBlockFactory());
436 }
437
438 EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
439
441 const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
442#ifdef EIGEN_USE_SYCL
443 // binding placeholder accessors to a command group handler for SYCL
444 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
445 m_impl.bind(cgh);
446 }
447#endif
448
449 protected:
450 TensorEvaluator<ArgType, Device> m_impl;
451};
452
453} // end namespace Eigen
454
455#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
The tensor base class.
Definition TensorForwardDeclarations.h:56
Tensor conversion class. This class makes it possible to vectorize type casting operations when the n...
Definition TensorConversion.h:176
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The tensor evaluator class.
Definition TensorEvaluator.h:27