10#ifndef EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
11#define EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
17template<DenseIndex Rows, DenseIndex Cols,
typename XprType>
18struct traits<TensorImagePatchOp<Rows, Cols, XprType> > :
public traits<XprType>
20 typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
21 typedef traits<XprType> XprTraits;
22 typedef typename XprTraits::StorageKind StorageKind;
23 typedef typename XprTraits::Index
Index;
24 typedef typename XprType::Nested Nested;
25 typedef typename remove_reference<Nested>::type _Nested;
26 static const int NumDimensions = XprTraits::NumDimensions + 1;
27 static const int Layout = XprTraits::Layout;
28 typedef typename XprTraits::PointerType PointerType;
31template<DenseIndex Rows, DenseIndex Cols,
typename XprType>
32struct eval<TensorImagePatchOp<Rows, Cols, XprType>, Eigen::Dense>
34 typedef const TensorImagePatchOp<Rows, Cols, XprType>& type;
37template<DenseIndex Rows, DenseIndex Cols,
typename XprType>
38struct nested<TensorImagePatchOp<Rows, Cols, XprType>, 1, typename eval<TensorImagePatchOp<Rows, Cols, XprType> >::type>
40 typedef TensorImagePatchOp<Rows, Cols, XprType> type;
43template <
typename Self,
bool Vectorizable>
44struct ImagePatchCopyOp {
45 typedef typename Self::Index Index;
46 typedef typename Self::Scalar Scalar;
47 typedef typename Self::Impl Impl;
48 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void Run(
49 const Self& self,
const Index num_coeff_to_copy,
const Index dst_index,
50 Scalar* dst_data,
const Index src_index) {
51 const Impl& impl = self.impl();
52 for (Index i = 0; i < num_coeff_to_copy; ++i) {
53 dst_data[dst_index + i] = impl.coeff(src_index + i);
58template <
typename Self>
59struct ImagePatchCopyOp<Self, true> {
60 typedef typename Self::Index Index;
61 typedef typename Self::Scalar Scalar;
62 typedef typename Self::Impl Impl;
63 typedef typename packet_traits<Scalar>::type Packet;
64 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void Run(
65 const Self& self,
const Index num_coeff_to_copy,
const Index dst_index,
66 Scalar* dst_data,
const Index src_index) {
67 const Impl& impl = self.impl();
68 const Index packet_size = internal::unpacket_traits<Packet>::size;
69 const Index vectorized_size =
70 (num_coeff_to_copy / packet_size) * packet_size;
71 for (Index i = 0; i < vectorized_size; i += packet_size) {
72 Packet p = impl.template packet<Unaligned>(src_index + i);
73 internal::pstoret<Scalar, Packet, Unaligned>(dst_data + dst_index + i, p);
75 for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) {
76 dst_data[dst_index + i] = impl.coeff(src_index + i);
81template <
typename Self>
82struct ImagePatchPaddingOp {
83 typedef typename Self::Index Index;
84 typedef typename Self::Scalar Scalar;
85 typedef typename packet_traits<Scalar>::type Packet;
86 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void Run(
87 const Index num_coeff_to_pad,
const Scalar padding_value,
88 const Index dst_index, Scalar* dst_data) {
89 const Index packet_size = internal::unpacket_traits<Packet>::size;
90 const Packet padded_packet = internal::pset1<Packet>(padding_value);
91 const Index vectorized_size =
92 (num_coeff_to_pad / packet_size) * packet_size;
93 for (Index i = 0; i < vectorized_size; i += packet_size) {
94 internal::pstoret<Scalar, Packet, Unaligned>(dst_data + dst_index + i,
97 for (Index i = vectorized_size; i < num_coeff_to_pad; ++i) {
98 dst_data[dst_index + i] = padding_value;
119template <DenseIndex Rows, DenseIndex Cols,
typename XprType>
120class TensorImagePatchOp :
public TensorBase<TensorImagePatchOp<Rows, Cols, XprType>, ReadOnlyAccessors> {
122 typedef typename Eigen::internal::traits<TensorImagePatchOp>::Scalar Scalar;
124 typedef typename XprType::CoeffReturnType CoeffReturnType;
125 typedef typename Eigen::internal::nested<TensorImagePatchOp>::type Nested;
126 typedef typename Eigen::internal::traits<TensorImagePatchOp>::StorageKind StorageKind;
127 typedef typename Eigen::internal::traits<TensorImagePatchOp>::Index Index;
129 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(
const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
130 DenseIndex row_strides, DenseIndex col_strides,
131 DenseIndex in_row_strides, DenseIndex in_col_strides,
132 DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
133 PaddingType padding_type, Scalar padding_value)
134 : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
135 m_row_strides(row_strides), m_col_strides(col_strides),
136 m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
137 m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
138 m_padding_explicit(
false), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
139 m_padding_type(padding_type), m_padding_value(padding_value) {}
141 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(
const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
142 DenseIndex row_strides, DenseIndex col_strides,
143 DenseIndex in_row_strides, DenseIndex in_col_strides,
144 DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
145 DenseIndex padding_top, DenseIndex padding_bottom,
146 DenseIndex padding_left, DenseIndex padding_right,
147 Scalar padding_value)
148 : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
149 m_row_strides(row_strides), m_col_strides(col_strides),
150 m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
151 m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
152 m_padding_explicit(
true), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
153 m_padding_left(padding_left), m_padding_right(padding_right),
154 m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
158 DenseIndex patch_rows()
const {
return m_patch_rows; }
160 DenseIndex patch_cols()
const {
return m_patch_cols; }
162 DenseIndex row_strides()
const {
return m_row_strides; }
164 DenseIndex col_strides()
const {
return m_col_strides; }
166 DenseIndex in_row_strides()
const {
return m_in_row_strides; }
168 DenseIndex in_col_strides()
const {
return m_in_col_strides; }
170 DenseIndex row_inflate_strides()
const {
return m_row_inflate_strides; }
172 DenseIndex col_inflate_strides()
const {
return m_col_inflate_strides; }
174 bool padding_explicit()
const {
return m_padding_explicit; }
176 DenseIndex padding_top()
const {
return m_padding_top; }
178 DenseIndex padding_bottom()
const {
return m_padding_bottom; }
180 DenseIndex padding_left()
const {
return m_padding_left; }
182 DenseIndex padding_right()
const {
return m_padding_right; }
184 PaddingType padding_type()
const {
return m_padding_type; }
186 Scalar padding_value()
const {
return m_padding_value; }
189 const typename internal::remove_all<typename XprType::Nested>::type&
190 expression()
const {
return m_xpr; }
193 typename XprType::Nested m_xpr;
194 const DenseIndex m_patch_rows;
195 const DenseIndex m_patch_cols;
196 const DenseIndex m_row_strides;
197 const DenseIndex m_col_strides;
198 const DenseIndex m_in_row_strides;
199 const DenseIndex m_in_col_strides;
200 const DenseIndex m_row_inflate_strides;
201 const DenseIndex m_col_inflate_strides;
202 const bool m_padding_explicit;
203 const DenseIndex m_padding_top;
204 const DenseIndex m_padding_bottom;
205 const DenseIndex m_padding_left;
206 const DenseIndex m_padding_right;
207 const PaddingType m_padding_type;
208 const Scalar m_padding_value;
212template<DenseIndex Rows, DenseIndex Cols,
typename ArgType,
typename Device>
216 typedef typename XprType::Index
Index;
217 static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
218 static const int NumDims = NumInputDims + 1;
220 typedef typename internal::remove_const<typename XprType::Scalar>::type
Scalar;
225 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
226 static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
227 typedef StorageMemory<CoeffReturnType, Device> Storage;
228 typedef typename Storage::Type EvaluatorPointerType;
232 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
234 PreferBlockAccess =
true,
235 Layout = TensorEvaluator<ArgType, Device>::Layout,
241 typedef internal::TensorBlockNotImplemented TensorBlock;
244 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
245 : m_device(device), m_impl(op.expression(), device)
247 EIGEN_STATIC_ASSERT((NumDims >= 4), YOU_MADE_A_PROGRAMMING_MISTAKE);
249 m_paddingValue = op.padding_value();
251 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
254 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
255 m_inputDepth = input_dims[0];
256 m_inputRows = input_dims[1];
257 m_inputCols = input_dims[2];
259 m_inputDepth = input_dims[NumInputDims-1];
260 m_inputRows = input_dims[NumInputDims-2];
261 m_inputCols = input_dims[NumInputDims-3];
264 m_row_strides = op.row_strides();
265 m_col_strides = op.col_strides();
268 m_in_row_strides = op.in_row_strides();
269 m_in_col_strides = op.in_col_strides();
270 m_row_inflate_strides = op.row_inflate_strides();
271 m_col_inflate_strides = op.col_inflate_strides();
285 m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1;
286 m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1;
287 m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1);
288 m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1);
290 if (op.padding_explicit()) {
291 m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) /
static_cast<float>(m_row_strides));
292 m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) /
static_cast<float>(m_col_strides));
293 m_rowPaddingTop = op.padding_top();
294 m_colPaddingLeft = op.padding_left();
297 switch (op.padding_type()) {
299 m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) /
static_cast<float>(m_row_strides));
300 m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) /
static_cast<float>(m_col_strides));
302 m_rowPaddingTop = numext::maxi<Index>(0, ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2);
303 m_colPaddingLeft = numext::maxi<Index>(0, ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2);
306 m_outputRows = numext::ceil(m_input_rows_eff /
static_cast<float>(m_row_strides));
307 m_outputCols = numext::ceil(m_input_cols_eff /
static_cast<float>(m_col_strides));
309 m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2;
310 m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2;
313 m_rowPaddingTop = numext::maxi<Index>(0, m_rowPaddingTop);
314 m_colPaddingLeft = numext::maxi<Index>(0, m_colPaddingLeft);
317 eigen_assert(
false &&
"unexpected padding");
322 eigen_assert(m_outputRows > 0);
323 eigen_assert(m_outputCols > 0);
326 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
333 m_dimensions[0] = input_dims[0];
334 m_dimensions[1] = op.patch_rows();
335 m_dimensions[2] = op.patch_cols();
336 m_dimensions[3] = m_outputRows * m_outputCols;
337 for (
int i = 4; i < NumDims; ++i) {
338 m_dimensions[i] = input_dims[i-1];
347 m_dimensions[NumDims-1] = input_dims[NumInputDims-1];
348 m_dimensions[NumDims-2] = op.patch_rows();
349 m_dimensions[NumDims-3] = op.patch_cols();
350 m_dimensions[NumDims-4] = m_outputRows * m_outputCols;
351 for (
int i = NumDims-5; i >= 0; --i) {
352 m_dimensions[i] = input_dims[i];
357 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
358 m_colStride = m_dimensions[1];
359 m_patchStride = m_colStride * m_dimensions[2] * m_dimensions[0];
360 m_otherStride = m_patchStride * m_dimensions[3];
362 m_colStride = m_dimensions[NumDims-2];
363 m_patchStride = m_colStride * m_dimensions[NumDims-3] * m_dimensions[NumDims-1];
364 m_otherStride = m_patchStride * m_dimensions[NumDims-4];
368 m_rowInputStride = m_inputDepth;
369 m_colInputStride = m_inputDepth * m_inputRows;
370 m_patchInputStride = m_inputDepth * m_inputRows * m_inputCols;
373 m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
374 m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
375 m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
376 m_fastInflateRowStride = internal::TensorIntDivisor<Index>(m_row_inflate_strides);
377 m_fastInflateColStride = internal::TensorIntDivisor<Index>(m_col_inflate_strides);
378 m_fastInputColsEff = internal::TensorIntDivisor<Index>(m_input_cols_eff);
381 m_fastOutputRows = internal::TensorIntDivisor<Index>(m_outputRows);
382 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
383 m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[0]);
385 m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[NumDims-1]);
389 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
391 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType ) {
392 m_impl.evalSubExprsIfNeeded(NULL);
396#ifdef EIGEN_USE_THREADS
397 template <
typename EvalSubExprsCallback>
398 EIGEN_STRONG_INLINE
void evalSubExprsIfNeededAsync(
399 EvaluatorPointerType, EvalSubExprsCallback done) {
400 m_impl.evalSubExprsIfNeededAsync(
nullptr, [done](
bool) { done(
true); });
404 EIGEN_STRONG_INLINE
void cleanup() {
408 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const
411 const Index patchIndex = index / m_fastPatchStride;
413 const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth;
416 const Index otherIndex = (NumDims == 4) ? 0 : index / m_fastOtherStride;
417 const Index patch2DIndex = (NumDims == 4) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride;
420 const Index colIndex = patch2DIndex / m_fastOutputRows;
421 const Index colOffset = patchOffset / m_fastColStride;
422 const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft;
423 const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInflateColStride) : 0);
424 if (inputCol < 0 || inputCol >= m_input_cols_eff ||
425 ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) {
426 return Scalar(m_paddingValue);
430 const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
431 const Index rowOffset = patchOffset - colOffset * m_colStride;
432 const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop;
433 const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInflateRowStride) : 0);
434 if (inputRow < 0 || inputRow >= m_input_rows_eff ||
435 ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) {
436 return Scalar(m_paddingValue);
439 const int depth_index =
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor) ? 0 : NumDims - 1;
440 const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
442 const Index inputIndex = depth + origInputRow * m_rowInputStride + origInputCol * m_colInputStride + otherIndex * m_patchInputStride;
443 return m_impl.coeff(inputIndex);
446 template<
int LoadMode>
447 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const
449 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
450 eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
452 if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1) {
453 return packetWithPossibleZero(index);
456 const Index indices[2] = {index, index + PacketSize - 1};
457 const Index patchIndex = indices[0] / m_fastPatchStride;
458 if (patchIndex != indices[1] / m_fastPatchStride) {
459 return packetWithPossibleZero(index);
461 const Index otherIndex = (NumDims == 4) ? 0 : indices[0] / m_fastOtherStride;
462 eigen_assert(otherIndex == indices[1] / m_fastOtherStride);
465 const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth,
466 (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth};
468 const Index patch2DIndex = (NumDims == 4) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride;
469 eigen_assert(patch2DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride);
471 const Index colIndex = patch2DIndex / m_fastOutputRows;
472 const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, patchOffsets[1] / m_fastColStride};
475 const Index inputCols[2] = {colIndex * m_col_strides + colOffsets[0] -
476 m_colPaddingLeft, colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft};
477 if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) {
478 return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
481 if (inputCols[0] == inputCols[1]) {
482 const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
483 const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0]*m_colStride, patchOffsets[1] - colOffsets[1]*m_colStride};
484 eigen_assert(rowOffsets[0] <= rowOffsets[1]);
486 const Index inputRows[2] = {rowIndex * m_row_strides + rowOffsets[0] -
487 m_rowPaddingTop, rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop};
489 if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) {
490 return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
493 if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) {
495 const int depth_index =
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor) ? 0 : NumDims - 1;
496 const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
497 const Index inputIndex = depth + inputRows[0] * m_rowInputStride + inputCols[0] * m_colInputStride + otherIndex * m_patchInputStride;
498 return m_impl.template packet<Unaligned>(inputIndex);
502 return packetWithPossibleZero(index);
505 EIGEN_DEVICE_FUNC EvaluatorPointerType data()
const {
return NULL; }
507 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorEvaluator<ArgType, Device>& impl()
const {
return m_impl; }
511 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void bind(cl::sycl::handler &cgh)
const {
516 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowPaddingTop()
const {
return m_rowPaddingTop; }
517 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colPaddingLeft()
const {
return m_colPaddingLeft; }
518 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputRows()
const {
return m_outputRows; }
519 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputCols()
const {
return m_outputCols; }
520 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userRowStride()
const {
return m_row_strides; }
521 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userColStride()
const {
return m_col_strides; }
522 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInRowStride()
const {
return m_in_row_strides; }
523 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInColStride()
const {
return m_in_col_strides; }
524 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowInflateStride()
const {
return m_row_inflate_strides; }
525 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colInflateStride()
const {
return m_col_inflate_strides; }
527 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
528 costPerCoeff(
bool vectorized)
const {
532 const double compute_cost = 3 * TensorOpCost::DivCost<Index>() +
533 6 * TensorOpCost::MulCost<Index>() +
534 8 * TensorOpCost::MulCost<Index>();
535 return m_impl.costPerCoeff(vectorized) +
536 TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
540 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index)
const
542 EIGEN_ALIGN_MAX
typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
544 for (
int i = 0; i < PacketSize; ++i) {
545 values[i] = coeff(index+i);
547 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
551 Dimensions m_dimensions;
559 Index m_in_row_strides;
560 Index m_in_col_strides;
561 Index m_row_inflate_strides;
562 Index m_col_inflate_strides;
564 Index m_input_rows_eff;
565 Index m_input_cols_eff;
566 Index m_patch_rows_eff;
567 Index m_patch_cols_eff;
569 internal::TensorIntDivisor<Index> m_fastOtherStride;
570 internal::TensorIntDivisor<Index> m_fastPatchStride;
571 internal::TensorIntDivisor<Index> m_fastColStride;
572 internal::TensorIntDivisor<Index> m_fastInflateRowStride;
573 internal::TensorIntDivisor<Index> m_fastInflateColStride;
574 internal::TensorIntDivisor<Index> m_fastInputColsEff;
576 Index m_rowInputStride;
577 Index m_colInputStride;
578 Index m_patchInputStride;
587 Index m_rowPaddingTop;
588 Index m_colPaddingLeft;
590 internal::TensorIntDivisor<Index> m_fastOutputRows;
591 internal::TensorIntDivisor<Index> m_fastOutputDepth;
593 Scalar m_paddingValue;
595 const Device EIGEN_DEVICE_REF m_device;
596 TensorEvaluator<ArgType, Device> m_impl;
The tensor base class.
Definition TensorForwardDeclarations.h:56
Patch extraction specialized for image processing. This assumes that the input has a least 3 dimensio...
Definition TensorImagePatch.h:120
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The tensor evaluator class.
Definition TensorEvaluator.h:27