10#ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
11#define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
14#include "./InternalHeaderCheck.h"
19template <
typename Generator,
typename XprType>
20struct traits<TensorGeneratorOp<Generator, XprType> > :
public traits<XprType> {
21 typedef typename XprType::Scalar Scalar;
22 typedef traits<XprType> XprTraits;
23 typedef typename XprTraits::StorageKind StorageKind;
24 typedef typename XprTraits::Index
Index;
25 typedef typename XprType::Nested Nested;
26 typedef std::remove_reference_t<Nested> Nested_;
27 static constexpr int NumDimensions = XprTraits::NumDimensions;
28 static constexpr int Layout = XprTraits::Layout;
29 typedef typename XprTraits::PointerType PointerType;
32template <
typename Generator,
typename XprType>
33struct eval<TensorGeneratorOp<Generator, XprType>, Eigen::Dense> {
34 typedef const TensorGeneratorOp<Generator, XprType>& type;
37template <
typename Generator,
typename XprType>
38struct nested<TensorGeneratorOp<Generator, XprType>, 1, typename eval<TensorGeneratorOp<Generator, XprType> >::type> {
39 typedef TensorGeneratorOp<Generator, XprType> type;
49template <
typename Generator,
typename XprType>
50class TensorGeneratorOp :
public TensorBase<TensorGeneratorOp<Generator, XprType>, ReadOnlyAccessors> {
52 typedef typename Eigen::internal::traits<TensorGeneratorOp>::Scalar Scalar;
54 typedef typename XprType::CoeffReturnType CoeffReturnType;
55 typedef typename Eigen::internal::nested<TensorGeneratorOp>::type Nested;
56 typedef typename Eigen::internal::traits<TensorGeneratorOp>::StorageKind StorageKind;
57 typedef typename Eigen::internal::traits<TensorGeneratorOp>::Index Index;
59 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorGeneratorOp(
const XprType& expr,
const Generator& generator)
60 : m_xpr(expr), m_generator(generator) {}
62 EIGEN_DEVICE_FUNC
const Generator& generator()
const {
return m_generator; }
64 EIGEN_DEVICE_FUNC
const internal::remove_all_t<typename XprType::Nested>& expression()
const {
return m_xpr; }
67 typename XprType::Nested m_xpr;
68 const Generator m_generator;
72template <
typename Generator,
typename ArgType,
typename Device>
75 typedef typename XprType::Index
Index;
76 typedef typename TensorEvaluator<ArgType, Device>::Dimensions
Dimensions;
77 static constexpr int NumDims = internal::array_size<Dimensions>::value;
78 typedef typename XprType::Scalar
Scalar;
80 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
81 typedef StorageMemory<CoeffReturnType, Device> Storage;
82 typedef typename Storage::Type EvaluatorPointerType;
83 static constexpr int Layout = TensorEvaluator<ArgType, Device>::Layout;
86 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
88 PreferBlockAccess =
true,
93 typedef internal::TensorIntDivisor<Index> IndexDivisor;
96 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
97 typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
99 typedef typename internal::TensorMaterializedBlock<CoeffReturnType, NumDims, Layout, Index> TensorBlock;
102 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
103 : m_device(device), m_generator(op.generator()) {
104 TensorEvaluator<ArgType, Device> argImpl(op.expression(), device);
105 m_dimensions = argImpl.dimensions();
107 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
110 for (
int i = 1; i < NumDims; ++i) {
111 m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
112 if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
115 m_strides[NumDims - 1] = 1;
117 for (
int i = NumDims - 2; i >= 0; --i) {
118 m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
119 if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
124 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
126 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType ) {
return true; }
127 EIGEN_STRONG_INLINE
void cleanup() {}
129 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const {
130 array<Index, NumDims> coords;
131 extract_coordinates(index, coords);
132 return m_generator(coords);
135 template <
int LoadMode>
136 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const {
137 const int packetSize = PacketType<CoeffReturnType, Device>::size;
138 eigen_assert(index + packetSize - 1 < dimensions().TotalSize());
140 EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[packetSize];
141 for (
int i = 0; i < packetSize; ++i) {
142 values[i] = coeff(index + i);
144 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
148 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements()
const {
149 const size_t target_size = m_device.firstLevelCacheSize();
151 return internal::TensorBlockResourceRequirements::skewed<Scalar>(target_size);
154 struct BlockIteratorState {
161 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
162 bool =
false)
const {
163 static const bool is_col_major =
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor);
166 array<Index, NumDims> coords;
167 extract_coordinates(desc.offset(), coords);
168 array<Index, NumDims> initial_coords = coords;
175 array<BlockIteratorState, NumDims> it;
176 for (
int i = 0; i < NumDims; ++i) {
177 const int dim = is_col_major ? i : NumDims - 1 - i;
178 it[i].size = desc.dimension(dim);
179 it[i].stride = i == 0 ? 1 : (it[i - 1].size * it[i - 1].stride);
180 it[i].span = it[i].stride * (it[i].size - 1);
183 eigen_assert(it[0].stride == 1);
186 const typename TensorBlock::Storage block_storage = TensorBlock::prepareStorage(desc, scratch);
188 CoeffReturnType* block_buffer = block_storage.data();
190 static const int packet_size = PacketType<CoeffReturnType, Device>::size;
192 static const int inner_dim = is_col_major ? 0 : NumDims - 1;
193 const Index inner_dim_size = it[0].size;
194 const Index inner_dim_vectorized = inner_dim_size - packet_size;
196 while (it[NumDims - 1].count < it[NumDims - 1].size) {
199 for (; i <= inner_dim_vectorized; i += packet_size) {
200 for (Index j = 0; j < packet_size; ++j) {
201 array<Index, NumDims> j_coords = coords;
202 j_coords[inner_dim] += j;
203 *(block_buffer + offset + i + j) = m_generator(j_coords);
205 coords[inner_dim] += packet_size;
208 for (; i < inner_dim_size; ++i) {
209 *(block_buffer + offset + i) = m_generator(coords);
212 coords[inner_dim] = initial_coords[inner_dim];
215 if (NumDims == 1)
break;
218 for (i = 1; i < NumDims; ++i) {
219 if (++it[i].count < it[i].size) {
220 offset += it[i].stride;
221 coords[is_col_major ? i : NumDims - 1 - i]++;
224 if (i != NumDims - 1) it[i].count = 0;
225 coords[is_col_major ? i : NumDims - 1 - i] = initial_coords[is_col_major ? i : NumDims - 1 - i];
226 offset -= it[i].span;
230 return block_storage.AsTensorMaterializedBlock();
233 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(
bool)
const {
236 return TensorOpCost(0, 0, TensorOpCost::AddCost<Scalar>() + TensorOpCost::MulCost<Scalar>());
239 EIGEN_DEVICE_FUNC EvaluatorPointerType data()
const {
return NULL; }
242 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void extract_coordinates(Index index, array<Index, NumDims>& coords)
const {
243 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
244 for (
int i = NumDims - 1; i > 0; --i) {
245 const Index idx = index / m_fast_strides[i];
246 index -= idx * m_strides[i];
251 for (
int i = 0; i < NumDims - 1; ++i) {
252 const Index idx = index / m_fast_strides[i];
253 index -= idx * m_strides[i];
256 coords[NumDims - 1] = index;
260 const Device EIGEN_DEVICE_REF m_device;
261 Dimensions m_dimensions;
262 array<Index, NumDims> m_strides;
263 array<IndexDivisor, NumDims> m_fast_strides;
264 Generator m_generator;
The tensor base class.
Definition TensorForwardDeclarations.h:68
Tensor generator class.
Definition TensorGenerator.h:50
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The tensor evaluator class.
Definition TensorEvaluator.h:30