10#ifndef EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
11#define EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
14#include "./InternalHeaderCheck.h"
20template <DenseIndex Rows, DenseIndex Cols,
typename XprType>
21struct traits<TensorImagePatchOp<Rows, Cols, XprType> > :
public traits<XprType> {
22 typedef std::remove_const_t<typename XprType::Scalar> Scalar;
23 typedef traits<XprType> XprTraits;
24 typedef typename XprTraits::StorageKind StorageKind;
25 typedef typename XprTraits::Index
Index;
26 typedef typename XprType::Nested Nested;
27 typedef std::remove_reference_t<Nested> Nested_;
28 static constexpr int NumDimensions = XprTraits::NumDimensions + 1;
29 static constexpr int Layout = XprTraits::Layout;
30 typedef typename XprTraits::PointerType PointerType;
33template <DenseIndex Rows, DenseIndex Cols,
typename XprType>
34struct eval<TensorImagePatchOp<Rows, Cols, XprType>, Eigen::Dense> {
35 typedef const TensorImagePatchOp<Rows, Cols, XprType>& type;
38template <DenseIndex Rows, DenseIndex Cols,
typename XprType>
39struct nested<TensorImagePatchOp<Rows, Cols, XprType>, 1,
40 typename eval<TensorImagePatchOp<Rows, Cols, XprType> >::type> {
41 typedef TensorImagePatchOp<Rows, Cols, XprType> type;
44template <
typename Self,
bool Vectorizable>
45struct ImagePatchCopyOp {
46 typedef typename Self::Index Index;
47 typedef typename Self::Scalar Scalar;
48 typedef typename Self::Impl Impl;
49 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void Run(
const Self& self,
const Index num_coeff_to_copy,
50 const Index dst_index, Scalar* dst_data,
51 const Index src_index) {
52 const Impl& impl = self.impl();
53 for (Index i = 0; i < num_coeff_to_copy; ++i) {
54 dst_data[dst_index + i] = impl.coeff(src_index + i);
59template <
typename Self>
60struct ImagePatchCopyOp<Self, true> {
61 typedef typename Self::Index Index;
62 typedef typename Self::Scalar Scalar;
63 typedef typename Self::Impl Impl;
64 typedef typename packet_traits<Scalar>::type Packet;
65 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void Run(
const Self& self,
const Index num_coeff_to_copy,
66 const Index dst_index, Scalar* dst_data,
67 const Index src_index) {
68 const Impl& impl = self.impl();
69 const Index packet_size = internal::unpacket_traits<Packet>::size;
70 const Index vectorized_size = (num_coeff_to_copy / packet_size) * packet_size;
71 for (Index i = 0; i < vectorized_size; i += packet_size) {
72 Packet p = impl.template packet<Unaligned>(src_index + i);
73 internal::pstoret<Scalar, Packet, Unaligned>(dst_data + dst_index + i, p);
75 for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) {
76 dst_data[dst_index + i] = impl.coeff(src_index + i);
81template <
typename Self>
82struct ImagePatchPaddingOp {
83 typedef typename Self::Index Index;
84 typedef typename Self::Scalar Scalar;
85 typedef typename packet_traits<Scalar>::type Packet;
86 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void Run(
const Index num_coeff_to_pad,
const Scalar padding_value,
87 const Index dst_index, Scalar* dst_data) {
88 const Index packet_size = internal::unpacket_traits<Packet>::size;
89 const Packet padded_packet = internal::pset1<Packet>(padding_value);
90 const Index vectorized_size = (num_coeff_to_pad / packet_size) * packet_size;
91 for (Index i = 0; i < vectorized_size; i += packet_size) {
92 internal::pstoret<Scalar, Packet, Unaligned>(dst_data + dst_index + i, padded_packet);
94 for (Index i = vectorized_size; i < num_coeff_to_pad; ++i) {
95 dst_data[dst_index + i] = padding_value;
116template <DenseIndex Rows, DenseIndex Cols,
typename XprType>
117class TensorImagePatchOp :
public TensorBase<TensorImagePatchOp<Rows, Cols, XprType>, ReadOnlyAccessors> {
119 typedef typename Eigen::internal::traits<TensorImagePatchOp>::Scalar Scalar;
121 typedef typename XprType::CoeffReturnType CoeffReturnType;
122 typedef typename Eigen::internal::nested<TensorImagePatchOp>::type Nested;
123 typedef typename Eigen::internal::traits<TensorImagePatchOp>::StorageKind StorageKind;
124 typedef typename Eigen::internal::traits<TensorImagePatchOp>::Index Index;
126 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(
const XprType& expr, DenseIndex patch_rows,
127 DenseIndex patch_cols, DenseIndex row_strides,
128 DenseIndex col_strides, DenseIndex in_row_strides,
129 DenseIndex in_col_strides, DenseIndex row_inflate_strides,
130 DenseIndex col_inflate_strides, PaddingType padding_type,
131 Scalar padding_value)
133 m_patch_rows(patch_rows),
134 m_patch_cols(patch_cols),
135 m_row_strides(row_strides),
136 m_col_strides(col_strides),
137 m_in_row_strides(in_row_strides),
138 m_in_col_strides(in_col_strides),
139 m_row_inflate_strides(row_inflate_strides),
140 m_col_inflate_strides(col_inflate_strides),
141 m_padding_explicit(
false),
146 m_padding_type(padding_type),
147 m_padding_value(padding_value) {}
149 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(
const XprType& expr, DenseIndex patch_rows,
150 DenseIndex patch_cols, DenseIndex row_strides,
151 DenseIndex col_strides, DenseIndex in_row_strides,
152 DenseIndex in_col_strides, DenseIndex row_inflate_strides,
153 DenseIndex col_inflate_strides, DenseIndex padding_top,
154 DenseIndex padding_bottom, DenseIndex padding_left,
155 DenseIndex padding_right, Scalar padding_value)
157 m_patch_rows(patch_rows),
158 m_patch_cols(patch_cols),
159 m_row_strides(row_strides),
160 m_col_strides(col_strides),
161 m_in_row_strides(in_row_strides),
162 m_in_col_strides(in_col_strides),
163 m_row_inflate_strides(row_inflate_strides),
164 m_col_inflate_strides(col_inflate_strides),
165 m_padding_explicit(
true),
166 m_padding_top(padding_top),
167 m_padding_bottom(padding_bottom),
168 m_padding_left(padding_left),
169 m_padding_right(padding_right),
170 m_padding_type(PADDING_VALID),
171 m_padding_value(padding_value) {}
173 EIGEN_DEVICE_FUNC DenseIndex patch_rows()
const {
return m_patch_rows; }
174 EIGEN_DEVICE_FUNC DenseIndex patch_cols()
const {
return m_patch_cols; }
175 EIGEN_DEVICE_FUNC DenseIndex row_strides()
const {
return m_row_strides; }
176 EIGEN_DEVICE_FUNC DenseIndex col_strides()
const {
return m_col_strides; }
177 EIGEN_DEVICE_FUNC DenseIndex in_row_strides()
const {
return m_in_row_strides; }
178 EIGEN_DEVICE_FUNC DenseIndex in_col_strides()
const {
return m_in_col_strides; }
179 EIGEN_DEVICE_FUNC DenseIndex row_inflate_strides()
const {
return m_row_inflate_strides; }
180 EIGEN_DEVICE_FUNC DenseIndex col_inflate_strides()
const {
return m_col_inflate_strides; }
181 EIGEN_DEVICE_FUNC
bool padding_explicit()
const {
return m_padding_explicit; }
182 EIGEN_DEVICE_FUNC DenseIndex padding_top()
const {
return m_padding_top; }
183 EIGEN_DEVICE_FUNC DenseIndex padding_bottom()
const {
return m_padding_bottom; }
184 EIGEN_DEVICE_FUNC DenseIndex padding_left()
const {
return m_padding_left; }
185 EIGEN_DEVICE_FUNC DenseIndex padding_right()
const {
return m_padding_right; }
186 EIGEN_DEVICE_FUNC PaddingType padding_type()
const {
return m_padding_type; }
187 EIGEN_DEVICE_FUNC Scalar padding_value()
const {
return m_padding_value; }
189 EIGEN_DEVICE_FUNC
const internal::remove_all_t<typename XprType::Nested>& expression()
const {
return m_xpr; }
192 typename XprType::Nested m_xpr;
193 const DenseIndex m_patch_rows;
194 const DenseIndex m_patch_cols;
195 const DenseIndex m_row_strides;
196 const DenseIndex m_col_strides;
197 const DenseIndex m_in_row_strides;
198 const DenseIndex m_in_col_strides;
199 const DenseIndex m_row_inflate_strides;
200 const DenseIndex m_col_inflate_strides;
201 const bool m_padding_explicit;
202 const DenseIndex m_padding_top;
203 const DenseIndex m_padding_bottom;
204 const DenseIndex m_padding_left;
205 const DenseIndex m_padding_right;
206 const PaddingType m_padding_type;
207 const Scalar m_padding_value;
211template <DenseIndex Rows, DenseIndex Cols,
typename ArgType,
typename Device>
214 typedef typename XprType::Index
Index;
215 static constexpr int NumInputDims =
216 internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
217 static constexpr int NumDims = NumInputDims + 1;
219 typedef std::remove_const_t<typename XprType::Scalar>
Scalar;
223 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
224 static constexpr int PacketSize = PacketType<CoeffReturnType, Device>::size;
225 typedef StorageMemory<CoeffReturnType, Device> Storage;
226 typedef typename Storage::Type EvaluatorPointerType;
228 static constexpr int Layout = TensorEvaluator<ArgType, Device>::Layout;
231 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
233 PreferBlockAccess =
true,
239 typedef internal::TensorBlockNotImplemented TensorBlock;
242 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
243 : m_device(device), m_impl(op.expression(), device) {
244 EIGEN_STATIC_ASSERT((NumDims >= 4), YOU_MADE_A_PROGRAMMING_MISTAKE);
246 m_paddingValue = op.padding_value();
248 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
251 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
252 m_inputDepth = input_dims[0];
253 m_inputRows = input_dims[1];
254 m_inputCols = input_dims[2];
256 m_inputDepth = input_dims[NumInputDims - 1];
257 m_inputRows = input_dims[NumInputDims - 2];
258 m_inputCols = input_dims[NumInputDims - 3];
261 m_row_strides = op.row_strides();
262 m_col_strides = op.col_strides();
265 m_in_row_strides = op.in_row_strides();
266 m_in_col_strides = op.in_col_strides();
267 m_row_inflate_strides = op.row_inflate_strides();
268 m_col_inflate_strides = op.col_inflate_strides();
282 m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1;
283 m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1;
284 m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1);
285 m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1);
287 if (op.padding_explicit()) {
288 m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) /
289 static_cast<float>(m_row_strides));
290 m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) /
291 static_cast<float>(m_col_strides));
292 m_rowPaddingTop = op.padding_top();
293 m_colPaddingLeft = op.padding_left();
296 switch (op.padding_type()) {
298 m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) /
static_cast<float>(m_row_strides));
299 m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) /
static_cast<float>(m_col_strides));
302 numext::maxi<Index>(0, ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2);
304 numext::maxi<Index>(0, ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2);
307 m_outputRows = numext::ceil(m_input_rows_eff /
static_cast<float>(m_row_strides));
308 m_outputCols = numext::ceil(m_input_cols_eff /
static_cast<float>(m_col_strides));
310 m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2;
311 m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2;
314 m_rowPaddingTop = numext::maxi<Index>(0, m_rowPaddingTop);
315 m_colPaddingLeft = numext::maxi<Index>(0, m_colPaddingLeft);
318 eigen_assert(
false &&
"unexpected padding");
323 eigen_assert(m_outputRows > 0);
324 eigen_assert(m_outputCols > 0);
327 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
334 m_dimensions[0] = input_dims[0];
335 m_dimensions[1] = op.patch_rows();
336 m_dimensions[2] = op.patch_cols();
337 m_dimensions[3] = m_outputRows * m_outputCols;
338 for (
int i = 4; i < NumDims; ++i) {
339 m_dimensions[i] = input_dims[i - 1];
348 m_dimensions[NumDims - 1] = input_dims[NumInputDims - 1];
349 m_dimensions[NumDims - 2] = op.patch_rows();
350 m_dimensions[NumDims - 3] = op.patch_cols();
351 m_dimensions[NumDims - 4] = m_outputRows * m_outputCols;
352 for (
int i = NumDims - 5; i >= 0; --i) {
353 m_dimensions[i] = input_dims[i];
358 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
359 m_colStride = m_dimensions[1];
360 m_patchStride = m_colStride * m_dimensions[2] * m_dimensions[0];
361 m_otherStride = m_patchStride * m_dimensions[3];
363 m_colStride = m_dimensions[NumDims - 2];
364 m_patchStride = m_colStride * m_dimensions[NumDims - 3] * m_dimensions[NumDims - 1];
365 m_otherStride = m_patchStride * m_dimensions[NumDims - 4];
369 m_rowInputStride = m_inputDepth;
370 m_colInputStride = m_inputDepth * m_inputRows;
371 m_patchInputStride = m_inputDepth * m_inputRows * m_inputCols;
374 m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
375 m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
376 m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
377 m_fastInflateRowStride = internal::TensorIntDivisor<Index>(m_row_inflate_strides);
378 m_fastInflateColStride = internal::TensorIntDivisor<Index>(m_col_inflate_strides);
379 m_fastInputColsEff = internal::TensorIntDivisor<Index>(m_input_cols_eff);
382 m_fastOutputRows = internal::TensorIntDivisor<Index>(m_outputRows);
383 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
384 m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[0]);
386 m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[NumDims - 1]);
390 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
392 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType ) {
393 m_impl.evalSubExprsIfNeeded(NULL);
397#ifdef EIGEN_USE_THREADS
398 template <
typename EvalSubExprsCallback>
399 EIGEN_STRONG_INLINE
void evalSubExprsIfNeededAsync(EvaluatorPointerType, EvalSubExprsCallback done) {
400 m_impl.evalSubExprsIfNeededAsync(
nullptr, [done](
bool) { done(
true); });
404 EIGEN_STRONG_INLINE
void cleanup() { m_impl.cleanup(); }
406 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const {
408 const Index patchIndex = index / m_fastPatchStride;
410 const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth;
413 const Index otherIndex = (NumDims == 4) ? 0 : index / m_fastOtherStride;
414 const Index patch2DIndex = (NumDims == 4) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride;
417 const Index colIndex = patch2DIndex / m_fastOutputRows;
418 const Index colOffset = patchOffset / m_fastColStride;
419 const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft;
420 const Index origInputCol =
421 (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInflateColStride) : 0);
422 if (inputCol < 0 || inputCol >= m_input_cols_eff ||
423 ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) {
424 return Scalar(m_paddingValue);
428 const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
429 const Index rowOffset = patchOffset - colOffset * m_colStride;
430 const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop;
431 const Index origInputRow =
432 (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInflateRowStride) : 0);
433 if (inputRow < 0 || inputRow >= m_input_rows_eff ||
434 ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) {
435 return Scalar(m_paddingValue);
438 const int depth_index =
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor) ? 0 : NumDims - 1;
439 const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
441 const Index inputIndex =
442 depth + origInputRow * m_rowInputStride + origInputCol * m_colInputStride + otherIndex * m_patchInputStride;
443 return m_impl.coeff(inputIndex);
446 template <
int LoadMode>
447 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const {
448 eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
450 if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1) {
451 return packetWithPossibleZero(index);
454 const Index indices[2] = {index, index + PacketSize - 1};
455 const Index patchIndex = indices[0] / m_fastPatchStride;
456 if (patchIndex != indices[1] / m_fastPatchStride) {
457 return packetWithPossibleZero(index);
459 const Index otherIndex = (NumDims == 4) ? 0 : indices[0] / m_fastOtherStride;
460 eigen_assert(otherIndex == indices[1] / m_fastOtherStride);
463 const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth,
464 (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth};
466 const Index patch2DIndex =
467 (NumDims == 4) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride;
468 eigen_assert(patch2DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride);
470 const Index colIndex = patch2DIndex / m_fastOutputRows;
471 const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, patchOffsets[1] / m_fastColStride};
474 const Index inputCols[2] = {colIndex * m_col_strides + colOffsets[0] - m_colPaddingLeft,
475 colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft};
476 if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) {
477 return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
480 if (inputCols[0] == inputCols[1]) {
481 const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
482 const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0] * m_colStride,
483 patchOffsets[1] - colOffsets[1] * m_colStride};
484 eigen_assert(rowOffsets[0] <= rowOffsets[1]);
486 const Index inputRows[2] = {rowIndex * m_row_strides + rowOffsets[0] - m_rowPaddingTop,
487 rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop};
489 if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) {
490 return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
493 if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) {
495 const int depth_index =
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor) ? 0 : NumDims - 1;
496 const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
497 const Index inputIndex =
498 depth + inputRows[0] * m_rowInputStride + inputCols[0] * m_colInputStride + otherIndex * m_patchInputStride;
499 return m_impl.template packet<Unaligned>(inputIndex);
503 return packetWithPossibleZero(index);
506 EIGEN_DEVICE_FUNC EvaluatorPointerType data()
const {
return NULL; }
508 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorEvaluator<ArgType, Device>& impl()
const {
return m_impl; }
509 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowPaddingTop()
const {
return m_rowPaddingTop; }
510 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colPaddingLeft()
const {
return m_colPaddingLeft; }
511 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputRows()
const {
return m_outputRows; }
512 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputCols()
const {
return m_outputCols; }
513 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userRowStride()
const {
return m_row_strides; }
514 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userColStride()
const {
return m_col_strides; }
515 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInRowStride()
const {
return m_in_row_strides; }
516 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInColStride()
const {
return m_in_col_strides; }
517 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowInflateStride()
const {
return m_row_inflate_strides; }
518 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colInflateStride()
const {
return m_col_inflate_strides; }
520 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(
bool vectorized)
const {
524 const double compute_cost =
525 3 * TensorOpCost::DivCost<Index>() + 6 * TensorOpCost::MulCost<Index>() + 8 * TensorOpCost::MulCost<Index>();
526 return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
530 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index)
const {
531 EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
533 for (
int i = 0; i < PacketSize; ++i) {
534 values[i] = coeff(index + i);
536 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
540 Dimensions m_dimensions;
548 Index m_in_row_strides;
549 Index m_in_col_strides;
550 Index m_row_inflate_strides;
551 Index m_col_inflate_strides;
553 Index m_input_rows_eff;
554 Index m_input_cols_eff;
555 Index m_patch_rows_eff;
556 Index m_patch_cols_eff;
558 internal::TensorIntDivisor<Index> m_fastOtherStride;
559 internal::TensorIntDivisor<Index> m_fastPatchStride;
560 internal::TensorIntDivisor<Index> m_fastColStride;
561 internal::TensorIntDivisor<Index> m_fastInflateRowStride;
562 internal::TensorIntDivisor<Index> m_fastInflateColStride;
563 internal::TensorIntDivisor<Index> m_fastInputColsEff;
565 Index m_rowInputStride;
566 Index m_colInputStride;
567 Index m_patchInputStride;
576 Index m_rowPaddingTop;
577 Index m_colPaddingLeft;
579 internal::TensorIntDivisor<Index> m_fastOutputRows;
580 internal::TensorIntDivisor<Index> m_fastOutputDepth;
582 Scalar m_paddingValue;
584 const Device EIGEN_DEVICE_REF m_device;
585 TensorEvaluator<ArgType, Device> m_impl;
The tensor base class.
Definition TensorForwardDeclarations.h:68
Patch extraction specialized for image processing. This assumes that the input has a least 3 dimensio...
Definition TensorImagePatch.h:117
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The tensor evaluator class.
Definition TensorEvaluator.h:30