10#ifndef EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
11#define EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
14#include "./InternalHeaderCheck.h"
19template <
typename Str
ides,
typename XprType>
20struct traits<TensorStridingOp<Strides, XprType> > :
public traits<XprType> {
21 typedef typename XprType::Scalar Scalar;
22 typedef traits<XprType> XprTraits;
23 typedef typename XprTraits::StorageKind StorageKind;
24 typedef typename XprTraits::Index
Index;
25 typedef typename XprType::Nested Nested;
26 typedef std::remove_reference_t<Nested> Nested_;
27 static constexpr int NumDimensions = XprTraits::NumDimensions;
28 static constexpr int Layout = XprTraits::Layout;
29 typedef typename XprTraits::PointerType PointerType;
32template <
typename Str
ides,
typename XprType>
33struct eval<TensorStridingOp<Strides, XprType>, Eigen::Dense> {
34 typedef const TensorStridingOp<Strides, XprType> EIGEN_DEVICE_REF type;
37template <
typename Str
ides,
typename XprType>
38struct nested<TensorStridingOp<Strides, XprType>, 1, typename eval<TensorStridingOp<Strides, XprType> >::type> {
39 typedef TensorStridingOp<Strides, XprType> type;
49template <
typename Str
ides,
typename XprType>
50class TensorStridingOp :
public TensorBase<TensorStridingOp<Strides, XprType> > {
53 typedef typename Eigen::internal::traits<TensorStridingOp>::Scalar Scalar;
55 typedef typename XprType::CoeffReturnType CoeffReturnType;
56 typedef typename Eigen::internal::nested<TensorStridingOp>::type Nested;
57 typedef typename Eigen::internal::traits<TensorStridingOp>::StorageKind StorageKind;
58 typedef typename Eigen::internal::traits<TensorStridingOp>::Index Index;
60 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingOp(
const XprType& expr,
const Strides& dims)
61 : m_xpr(expr), m_dims(dims) {}
63 EIGEN_DEVICE_FUNC
const Strides& strides()
const {
return m_dims; }
65 EIGEN_DEVICE_FUNC
const internal::remove_all_t<typename XprType::Nested>& expression()
const {
return m_xpr; }
67 EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorStridingOp)
70 typename XprType::Nested m_xpr;
75template <
typename Str
ides,
typename ArgType,
typename Device>
78 typedef typename XprType::Index
Index;
79 static constexpr int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
81 typedef typename XprType::Scalar
Scalar;
83 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
84 static constexpr int PacketSize = PacketType<CoeffReturnType, Device>::size;
85 typedef StorageMemory<CoeffReturnType, Device> Storage;
86 typedef typename Storage::Type EvaluatorPointerType;
88 static constexpr int Layout = TensorEvaluator<ArgType, Device>::Layout;
91 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
99 typedef internal::TensorBlockNotImplemented TensorBlock;
102 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device) : m_impl(op.expression(), device) {
103 m_dimensions = m_impl.dimensions();
104 for (
int i = 0; i < NumDims; ++i) {
105 m_dimensions[i] = Eigen::numext::ceil(
static_cast<float>(m_dimensions[i]) / op.strides()[i]);
108 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
109 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
110 m_outputStrides[0] = 1;
111 m_inputStrides[0] = 1;
112 for (
int i = 1; i < NumDims; ++i) {
113 m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
114 m_inputStrides[i] = m_inputStrides[i - 1] * input_dims[i - 1];
115 m_inputStrides[i - 1] *= op.strides()[i - 1];
117 m_inputStrides[NumDims - 1] *= op.strides()[NumDims - 1];
119 m_outputStrides[NumDims - 1] = 1;
120 m_inputStrides[NumDims - 1] = 1;
121 for (
int i = NumDims - 2; i >= 0; --i) {
122 m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
123 m_inputStrides[i] = m_inputStrides[i + 1] * input_dims[i + 1];
124 m_inputStrides[i + 1] *= op.strides()[i + 1];
126 m_inputStrides[0] *= op.strides()[0];
130 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
132 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType ) {
133 m_impl.evalSubExprsIfNeeded(NULL);
136 EIGEN_STRONG_INLINE
void cleanup() { m_impl.cleanup(); }
138 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const {
139 return m_impl.coeff(srcCoeff(index));
142 template <
int LoadMode>
143 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const {
144 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
145 eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
147 Index inputIndices[] = {0, 0};
148 Index indices[] = {index, index + PacketSize - 1};
149 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
151 for (
int i = NumDims - 1; i > 0; --i) {
152 const Index idx0 = indices[0] / m_outputStrides[i];
153 const Index idx1 = indices[1] / m_outputStrides[i];
154 inputIndices[0] += idx0 * m_inputStrides[i];
155 inputIndices[1] += idx1 * m_inputStrides[i];
156 indices[0] -= idx0 * m_outputStrides[i];
157 indices[1] -= idx1 * m_outputStrides[i];
159 inputIndices[0] += indices[0] * m_inputStrides[0];
160 inputIndices[1] += indices[1] * m_inputStrides[0];
163 for (
int i = 0; i < NumDims - 1; ++i) {
164 const Index idx0 = indices[0] / m_outputStrides[i];
165 const Index idx1 = indices[1] / m_outputStrides[i];
166 inputIndices[0] += idx0 * m_inputStrides[i];
167 inputIndices[1] += idx1 * m_inputStrides[i];
168 indices[0] -= idx0 * m_outputStrides[i];
169 indices[1] -= idx1 * m_outputStrides[i];
171 inputIndices[0] += indices[0] * m_inputStrides[NumDims - 1];
172 inputIndices[1] += indices[1] * m_inputStrides[NumDims - 1];
174 if (inputIndices[1] - inputIndices[0] == PacketSize - 1) {
175 PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
178 EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
179 values[0] = m_impl.coeff(inputIndices[0]);
180 values[PacketSize - 1] = m_impl.coeff(inputIndices[1]);
182 for (
int i = 1; i < PacketSize - 1; ++i) {
183 values[i] = coeff(index + i);
185 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
190 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(
bool vectorized)
const {
191 double compute_cost = (NumDims - 1) * (TensorOpCost::AddCost<Index>() + TensorOpCost::MulCost<Index>() +
192 TensorOpCost::DivCost<Index>()) +
193 TensorOpCost::MulCost<Index>();
197 const int innerDim = (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) ? 0 : (NumDims - 1);
198 return m_impl.costPerCoeff(vectorized && m_inputStrides[innerDim] == 1) +
200 TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
203 EIGEN_DEVICE_FUNC
typename Storage::Type data()
const {
return NULL; }
206 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index)
const {
207 Index inputIndex = 0;
208 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
210 for (
int i = NumDims - 1; i > 0; --i) {
211 const Index idx = index / m_outputStrides[i];
212 inputIndex += idx * m_inputStrides[i];
213 index -= idx * m_outputStrides[i];
215 inputIndex += index * m_inputStrides[0];
218 for (
int i = 0; i < NumDims - 1; ++i) {
219 const Index idx = index / m_outputStrides[i];
220 inputIndex += idx * m_inputStrides[i];
221 index -= idx * m_outputStrides[i];
223 inputIndex += index * m_inputStrides[NumDims - 1];
228 Dimensions m_dimensions;
229 array<Index, NumDims> m_outputStrides;
230 array<Index, NumDims> m_inputStrides;
231 TensorEvaluator<ArgType, Device> m_impl;
235template <
typename Str
ides,
typename ArgType,
typename Device>
237 :
public TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device> {
238 typedef TensorStridingOp<Strides, ArgType> XprType;
239 typedef TensorEvaluator<const XprType, Device> Base;
241 static constexpr int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
244 static constexpr int Layout = TensorEvaluator<ArgType, Device>::Layout;
247 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
248 PreferBlockAccess =
false,
253 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device) : Base(op, device) {}
255 typedef typename XprType::Index Index;
256 typedef typename XprType::Scalar Scalar;
257 typedef typename XprType::CoeffReturnType CoeffReturnType;
258 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
259 static constexpr int PacketSize = PacketType<CoeffReturnType, Device>::size;
261 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
const {
262 return this->m_impl.coeffRef(this->srcCoeff(index));
265 template <
int StoreMode>
266 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index,
const PacketReturnType& x)
const {
267 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
268 eigen_assert(index + PacketSize - 1 < this->dimensions().TotalSize());
270 Index inputIndices[] = {0, 0};
271 Index indices[] = {index, index + PacketSize - 1};
272 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
274 for (
int i = NumDims - 1; i > 0; --i) {
275 const Index idx0 = indices[0] / this->m_outputStrides[i];
276 const Index idx1 = indices[1] / this->m_outputStrides[i];
277 inputIndices[0] += idx0 * this->m_inputStrides[i];
278 inputIndices[1] += idx1 * this->m_inputStrides[i];
279 indices[0] -= idx0 * this->m_outputStrides[i];
280 indices[1] -= idx1 * this->m_outputStrides[i];
282 inputIndices[0] += indices[0] * this->m_inputStrides[0];
283 inputIndices[1] += indices[1] * this->m_inputStrides[0];
286 for (
int i = 0; i < NumDims - 1; ++i) {
287 const Index idx0 = indices[0] / this->m_outputStrides[i];
288 const Index idx1 = indices[1] / this->m_outputStrides[i];
289 inputIndices[0] += idx0 * this->m_inputStrides[i];
290 inputIndices[1] += idx1 * this->m_inputStrides[i];
291 indices[0] -= idx0 * this->m_outputStrides[i];
292 indices[1] -= idx1 * this->m_outputStrides[i];
294 inputIndices[0] += indices[0] * this->m_inputStrides[NumDims - 1];
295 inputIndices[1] += indices[1] * this->m_inputStrides[NumDims - 1];
297 if (inputIndices[1] - inputIndices[0] == PacketSize - 1) {
298 this->m_impl.template writePacket<Unaligned>(inputIndices[0], x);
300 EIGEN_ALIGN_MAX Scalar values[PacketSize];
301 internal::pstore<Scalar, PacketReturnType>(values, x);
302 this->m_impl.coeffRef(inputIndices[0]) = values[0];
303 this->m_impl.coeffRef(inputIndices[1]) = values[PacketSize - 1];
305 for (
int i = 1; i < PacketSize - 1; ++i) {
306 this->coeffRef(index + i) = values[i];
The tensor base class.
Definition TensorForwardDeclarations.h:68
Tensor striding class.
Definition TensorStriding.h:50
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The tensor evaluator class.
Definition TensorEvaluator.h:30