11#ifndef EIGEN_SPARSEBLOCKMATRIX_H
12#define EIGEN_SPARSEBLOCKMATRIX_H
15#include "./InternalHeaderCheck.h"
57template <
typename Scalar_,
int _BlockAtCompileTime = Dynamic,
int Options_ = ColMajor,
typename StorageIndex_ =
int>
60template <
typename BlockSparseMatrixT>
61class BlockSparseMatrixView;
64template <
typename Scalar_,
int _BlockAtCompileTime,
int Options_,
typename Index_>
65struct traits<BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, Index_> > {
66 typedef Scalar_ Scalar;
68 typedef Sparse StorageKind;
69 typedef MatrixXpr XprKind;
75 BlockSize = _BlockAtCompileTime,
76 Flags = Options_ | NestByRefBit |
LvalueBit,
77 CoeffReadCost = NumTraits<Scalar>::ReadCost,
78 SupportedAccessPatterns = InnerRandomAccessPattern
81template <
typename BlockSparseMatrixT>
82struct traits<BlockSparseMatrixView<BlockSparseMatrixT> > {
84 Matrix<typename BlockSparseMatrixT::Scalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> >
87 Matrix<typename BlockSparseMatrixT::RealScalar, BlockSparseMatrixT::BlockSize, BlockSparseMatrixT::BlockSize> >
92template <
typename Iterator,
bool IsColMajor>
94 typedef typename Iterator::value_type Triplet;
95 bool operator()(
const Triplet& a,
const Triplet& b) {
97 return ((a.col() == b.col() && a.row() < b.row()) || (a.col() < b.col()));
99 return ((a.row() == b.row() && a.col() < b.col()) || (a.row() < b.row()));
105template <
typename BlockSparseMatrixT>
108 typedef Ref<typename BlockSparseMatrixT::BlockScalar> Scalar;
109 typedef Ref<typename BlockSparseMatrixT::BlockRealScalar> RealScalar;
110 typedef typename BlockSparseMatrixT::Index Index;
111 typedef BlockSparseMatrixT Nested;
113 Flags = BlockSparseMatrixT::Options,
114 Options = BlockSparseMatrixT::Options,
115 RowsAtCompileTime = BlockSparseMatrixT::RowsAtCompileTime,
116 ColsAtCompileTime = BlockSparseMatrixT::ColsAtCompileTime,
117 MaxColsAtCompileTime = BlockSparseMatrixT::MaxColsAtCompileTime,
118 MaxRowsAtCompileTime = BlockSparseMatrixT::MaxRowsAtCompileTime
122 BlockSparseMatrixView(
const BlockSparseMatrixT& spblockmat) : m_spblockmat(spblockmat) {}
124 Index outerSize()
const {
return (Flags &
RowMajorBit) == 1 ? this->rows() : this->cols(); }
125 Index cols()
const {
return m_spblockmat.blockCols(); }
126 Index rows()
const {
return m_spblockmat.blockRows(); }
127 Scalar coeff(Index
row, Index
col) {
return m_spblockmat.coeff(
row,
col); }
128 Scalar coeffRef(Index
row, Index
col) {
return m_spblockmat.coeffRef(
row,
col); }
130 class InnerIterator :
public BlockSparseMatrixT::BlockInnerIterator {
132 InnerIterator(
const BlockSparseMatrixView& mat, Index outer)
133 : BlockSparseMatrixT::BlockInnerIterator(mat.m_spblockmat, outer) {}
137 const BlockSparseMatrixT& m_spblockmat;
141template <
typename BlockSparseMatrixT,
typename VectorType>
142class BlockVectorView {
145 BlockSize = BlockSparseMatrixT::BlockSize,
146 ColsAtCompileTime = VectorType::ColsAtCompileTime,
147 RowsAtCompileTime = VectorType::RowsAtCompileTime,
148 Flags = VectorType::Flags
150 typedef Ref<
const Matrix<
typename BlockSparseMatrixT::Scalar, (RowsAtCompileTime == 1) ? 1 : BlockSize,
151 (ColsAtCompileTime == 1) ? 1 : BlockSize> >
153 typedef typename BlockSparseMatrixT::Index Index;
156 BlockVectorView(
const BlockSparseMatrixT& spblockmat,
const VectorType& vec) : m_spblockmat(spblockmat), m_vec(vec) {}
157 inline Index cols()
const {
return m_vec.cols(); }
158 inline Index size()
const {
return m_spblockmat.blockRows(); }
159 inline Scalar coeff(Index bi)
const {
160 Index startRow = m_spblockmat.blockRowsIndex(bi);
161 Index rowSize = m_spblockmat.blockRowsIndex(bi + 1) - startRow;
162 return m_vec.middleRows(startRow, rowSize);
164 inline Scalar coeff(Index bi, Index j)
const {
165 Index startRow = m_spblockmat.blockRowsIndex(bi);
166 Index rowSize = m_spblockmat.blockRowsIndex(bi + 1) - startRow;
167 return m_vec.block(startRow, j, rowSize, 1);
171 const BlockSparseMatrixT& m_spblockmat;
172 const VectorType& m_vec;
175template <
typename VectorType,
typename Index>
176class BlockVectorReturn;
179template <
typename BlockSparseMatrixT,
typename VectorType>
180class BlockVectorReturn {
183 ColsAtCompileTime = VectorType::ColsAtCompileTime,
184 RowsAtCompileTime = VectorType::RowsAtCompileTime,
185 Flags = VectorType::Flags
187 typedef Ref<Matrix<typename VectorType::Scalar, RowsAtCompileTime, ColsAtCompileTime> > Scalar;
188 typedef typename BlockSparseMatrixT::Index Index;
191 BlockVectorReturn(
const BlockSparseMatrixT& spblockmat, VectorType& vec) : m_spblockmat(spblockmat), m_vec(vec) {}
192 inline Index size()
const {
return m_spblockmat.blockRows(); }
193 inline Scalar coeffRef(Index bi) {
194 Index startRow = m_spblockmat.blockRowsIndex(bi);
195 Index rowSize = m_spblockmat.blockRowsIndex(bi + 1) - startRow;
196 return m_vec.middleRows(startRow, rowSize);
198 inline Scalar coeffRef(Index bi, Index j) {
199 Index startRow = m_spblockmat.blockRowsIndex(bi);
200 Index rowSize = m_spblockmat.blockRowsIndex(bi + 1) - startRow;
201 return m_vec.block(startRow, j, rowSize, 1);
205 const BlockSparseMatrixT& m_spblockmat;
210template <
typename Lhs,
typename Rhs>
211class BlockSparseTimeDenseProduct;
215template <
typename BlockSparseMatrixT,
typename VecType>
216struct traits<BlockSparseTimeDenseProduct<BlockSparseMatrixT, VecType> > {
217 typedef Dense StorageKind;
218 typedef MatrixXpr XprKind;
219 typedef typename BlockSparseMatrixT::Scalar Scalar;
220 typedef typename BlockSparseMatrixT::Index
Index;
224 MaxRowsAtCompileTime =
Dynamic,
225 MaxColsAtCompileTime =
Dynamic,
227 CoeffReadCost = internal::traits<BlockSparseMatrixT>::CoeffReadCost
232template <
typename Lhs,
typename Rhs>
233class BlockSparseTimeDenseProduct :
public ProductBase<BlockSparseTimeDenseProduct<Lhs, Rhs>, Lhs, Rhs> {
235 EIGEN_PRODUCT_PUBLIC_INTERFACE(BlockSparseTimeDenseProduct)
237 BlockSparseTimeDenseProduct(
const Lhs& lhs,
const Rhs& rhs) : Base(lhs, rhs) {}
239 template <
typename Dest>
240 void scaleAndAddTo(Dest& dest,
const typename Rhs::Scalar& alpha)
const {
241 BlockVectorReturn<Lhs, Dest> tmpDest(m_lhs, dest);
242 internal::sparse_time_dense_product(BlockSparseMatrixView<Lhs>(m_lhs), BlockVectorView<Lhs, Rhs>(m_lhs, m_rhs),
247 BlockSparseTimeDenseProduct& operator=(
const BlockSparseTimeDenseProduct&);
250template <
typename Scalar_,
int _BlockAtCompileTime,
int Options_,
typename StorageIndex_>
251class BlockSparseMatrix
252 :
public SparseMatrixBase<BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex_> > {
254 typedef Scalar_ Scalar;
256 typedef StorageIndex_ StorageIndex;
258 typename internal::ref_selector<BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex_> >::type
264 BlockSize = _BlockAtCompileTime,
267 MaxRowsAtCompileTime =
Dynamic,
268 MaxColsAtCompileTime =
Dynamic,
269 IsVectorAtCompileTime = 0,
275 typedef std::conditional_t<_BlockAtCompileTime == Dynamic, Scalar, BlockScalar> BlockScalarReturnType;
276 typedef BlockSparseMatrix<Scalar, BlockSize, IsColMajor ? ColMajor : RowMajor, StorageIndex> PlainObject;
290 m_blockSize(BlockSize) {}
297 : m_innerBSize(IsColMajor ? brow : bcol),
298 m_outerBSize(IsColMajor ? bcol : brow),
306 m_blockSize(BlockSize) {}
312 : m_innerBSize(other.m_innerBSize),
313 m_outerBSize(other.m_outerBSize),
314 m_nonzerosblocks(other.m_nonzerosblocks),
315 m_nonzeros(other.m_nonzeros),
317 m_blockSize(other.m_blockSize) {
319 eigen_assert(m_blockSize == BlockSize &&
" CAN NOT COPY BETWEEN FIXED-SIZE AND VARIABLE-SIZE BLOCKS");
321 std::copy(other.m_innerOffset, other.m_innerOffset + m_innerBSize + 1, m_innerOffset);
322 std::copy(other.m_outerOffset, other.m_outerOffset + m_outerBSize + 1, m_outerOffset);
323 std::copy(other.m_values, other.m_values + m_nonzeros, m_values);
325 if (m_blockSize !=
Dynamic) std::copy(other.m_blockPtr, other.m_blockPtr + m_nonzerosblocks, m_blockPtr);
327 std::copy(other.m_indices, other.m_indices + m_nonzerosblocks, m_indices);
328 std::copy(other.m_outerIndex, other.m_outerIndex + m_outerBSize, m_outerIndex);
332 std::swap(first.m_innerBSize, second.m_innerBSize);
333 std::swap(first.m_outerBSize, second.m_outerBSize);
334 std::swap(first.m_innerOffset, second.m_innerOffset);
335 std::swap(first.m_outerOffset, second.m_outerOffset);
336 std::swap(first.m_nonzerosblocks, second.m_nonzerosblocks);
337 std::swap(first.m_nonzeros, second.m_nonzeros);
338 std::swap(first.m_values, second.m_values);
339 std::swap(first.m_blockPtr, second.m_blockPtr);
340 std::swap(first.m_indices, second.m_indices);
341 std::swap(first.m_outerIndex, second.m_outerIndex);
342 std::swap(first.m_BlockSize, second.m_blockSize);
345 BlockSparseMatrix& operator=(BlockSparseMatrix other) {
352 ~BlockSparseMatrix() {
353 delete[] m_outerIndex;
354 delete[] m_innerOffset;
355 delete[] m_outerOffset;
365 template <
typename MatrixType>
367 EIGEN_STATIC_ASSERT((m_blockSize !=
Dynamic), THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE);
380 template <
typename MatrixType>
381 inline BlockSparseMatrix&
operator=(
const MatrixType& spmat) {
382 eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) &&
383 "Trying to assign to a zero-size matrix, call resize() first");
384 eigen_assert(((MatrixType::Options &
RowMajorBit) != IsColMajor) &&
"Wrong storage order");
390 for (StorageIndex bj = 0; bj < m_outerBSize; ++bj) {
392 std::vector<bool> nzblocksFlag(m_innerBSize,
false);
393 blockPattern.startVec(bj);
394 for (StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj + 1); ++j) {
395 typename MatrixType::InnerIterator it_spmat(spmat, j);
396 for (; it_spmat; ++it_spmat) {
398 if (!nzblocksFlag[bi]) {
400 nzblocksFlag[bi] =
true;
401 blockPattern.insertBackByOuterInnerUnordered(bj, bi) =
true;
403 m_nonzeros += blockOuterSize(bj) * blockInnerSize(bi);
408 blockPattern.finalize();
413 for (StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
414 for (StorageIndex bj = 0; bj < m_outerBSize; ++bj) {
416 for (StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj + 1); ++j) {
418 typename MatrixType::InnerIterator it_spmat(spmat, j);
419 for (; it_spmat; ++it_spmat) {
420 StorageIndex idx = 0;
423 while (bi > m_indices[m_outerIndex[bj] + idx]) ++idx;
427 idxVal = m_blockPtr[m_outerIndex[bj] + idx];
429 idxVal += (j - blockOuterIndex(bj)) * blockOuterSize(bj) + it_spmat.index() - m_innerOffset[bi];
432 idxVal = (m_outerIndex[bj] + idx) * m_blockSize * m_blockSize;
434 idxVal += (j - blockOuterIndex(bj)) * m_blockSize + (it_spmat.index() % m_blockSize);
437 m_values[idxVal] = it_spmat.value();
462 template <
typename MatrixType>
464 resize(blockPattern.rows(), blockPattern.cols());
465 reserve(blockPattern.nonZeros());
469 if (m_blockSize ==
Dynamic) m_blockPtr[0] = 0;
470 for (StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0);
471 for (StorageIndex bj = 0; bj < m_outerBSize; ++bj) {
476 std::vector<int> nzBlockIdx;
477 typename MatrixType::InnerIterator it(blockPattern, bj);
479 nzBlockIdx.push_back(it.index());
481 std::sort(nzBlockIdx.begin(), nzBlockIdx.end());
484 for (StorageIndex idx = 0; idx < nzBlockIdx.size(); ++idx) {
485 StorageIndex offset = m_outerIndex[bj] + idx;
486 m_indices[offset] = nzBlockIdx[idx];
488 m_blockPtr[offset] = m_blockPtr[offset - 1] + blockInnerSize(nzBlockIdx[idx]) * blockOuterSize(bj);
492 m_outerIndex[bj + 1] = m_outerIndex[bj] + nzBlockIdx.size();
500 m_innerBSize = IsColMajor ? brow : bcol;
501 m_outerBSize = IsColMajor ? bcol : brow;
521 const VectorXi& innerBlocks = IsColMajor ? rowBlocks : colBlocks;
522 const VectorXi& outerBlocks = IsColMajor ? colBlocks : rowBlocks;
523 eigen_assert(m_innerBSize == innerBlocks.size() &&
"CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
524 eigen_assert(m_outerBSize == outerBlocks.size() &&
"CHECK THE NUMBER OF ROW OR COLUMN BLOCKS");
525 m_outerBSize = outerBlocks.size();
527 m_innerOffset =
new StorageIndex[m_innerBSize + 1];
528 m_outerOffset =
new StorageIndex[m_outerBSize + 1];
529 m_innerOffset[0] = 0;
530 m_outerOffset[0] = 0;
531 std::partial_sum(&innerBlocks[0], &innerBlocks[m_innerBSize - 1] + 1, &m_innerOffset[1]);
532 std::partial_sum(&outerBlocks[0], &outerBlocks[m_outerBSize - 1] + 1, &m_outerOffset[1]);
536 for (StorageIndex bj = 0; bj < m_outerBSize; ++bj)
537 for (StorageIndex bi = 0; bi < m_innerBSize; ++bi) m_nonzeros += outerBlocks[bj] * innerBlocks[bi];
551 eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) &&
552 "TRYING TO RESERVE ZERO-SIZE MATRICES, CALL resize() first");
555 m_outerIndex =
new StorageIndex[m_outerBSize + 1];
557 m_nonzerosblocks = nonzerosblocks;
559 m_nonzeros = nonzerosblocks * (m_blockSize * m_blockSize);
563 m_blockPtr =
new StorageIndex[m_nonzerosblocks + 1];
565 m_indices =
new StorageIndex[m_nonzerosblocks + 1];
566 m_values =
new Scalar[m_nonzeros];
579 template <
typename InputIterator>
581 eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) &&
"ZERO BLOCKS, PLEASE CALL resize() before");
587 internal::TripletComp<InputIterator, IsColMajor> tripletcomp;
588 std::sort(begin, end, tripletcomp);
597 VectorXi nzblock_outer(m_outerBSize);
601 for (InputIterator it(begin); it != end; ++it) {
602 eigen_assert(it->row() >= 0 && it->row() < this->blockRows() && it->col() >= 0 && it->col() < this->blockCols());
603 eigen_assert((it->value().rows() == it->value().cols() && (it->value().rows() == m_blockSize)) ||
607 eigen_assert((rowBlocks[it->row()] == 0 || rowBlocks[it->row()] == it->value().rows()) &&
608 "NON CORRESPONDING SIZES FOR ROW BLOCKS");
609 eigen_assert((colBlocks[it->col()] == 0 || colBlocks[it->col()] == it->value().cols()) &&
610 "NON CORRESPONDING SIZES FOR COLUMN BLOCKS");
611 rowBlocks[it->row()] = it->value().rows();
612 colBlocks[it->col()] = it->value().cols();
614 nz_outer(IsColMajor ? it->col() : it->row()) += it->value().rows() * it->value().cols();
615 nzblock_outer(IsColMajor ? it->col() : it->row())++;
619 StorageIndex nzblocks = nzblock_outer.sum();
627 if (m_blockSize ==
Dynamic) m_blockPtr[0] = 0;
628 for (StorageIndex bj = 0; bj < m_outerBSize; ++bj) {
629 m_outerIndex[bj + 1] = m_outerIndex[bj] + nzblock_outer(bj);
630 block_id(bj) = m_outerIndex[bj];
632 m_blockPtr[m_outerIndex[bj + 1]] = m_blockPtr[m_outerIndex[bj]] + nz_outer(bj);
637 for (InputIterator it(begin); it != end; ++it) {
638 StorageIndex outer = IsColMajor ? it->col() : it->row();
639 StorageIndex inner = IsColMajor ? it->row() : it->col();
640 m_indices[block_id(outer)] = inner;
641 StorageIndex block_size = it->value().rows() * it->value().cols();
642 StorageIndex nz_marker =
blockPtr(block_id[outer]);
643 memcpy(&(m_values[nz_marker]), it->value().data(), block_size *
sizeof(Scalar));
645 m_blockPtr[block_id(outer) + 1] = m_blockPtr[block_id(outer)] + block_size;
682 return (IsColMajor ? innerSize() : outerSize());
690 return (IsColMajor ? outerSize() : innerSize());
693 inline Index innerSize()
const {
695 return m_innerOffset[m_innerBSize];
697 return (m_innerBSize * m_blockSize);
700 inline Index outerSize()
const {
702 return m_outerOffset[m_outerBSize];
704 return (m_outerBSize * m_blockSize);
707 inline Index blockRows()
const {
return (IsColMajor ? m_innerBSize : m_outerBSize); }
709 inline Index blockCols()
const {
return (IsColMajor ? m_outerBSize : m_innerBSize); }
711 inline Index outerBlocks()
const {
return m_outerBSize; }
712 inline Index innerBlocks()
const {
return m_innerBSize; }
716 eigen_assert(outer < outerSize() &&
"OUTER INDEX OUT OF BOUNDS");
718 if (m_blockSize !=
Dynamic)
return (outer / m_blockSize);
720 StorageIndex b_outer = 0;
721 while (m_outerOffset[b_outer] <= outer) ++b_outer;
726 eigen_assert(inner < innerSize() &&
"OUTER INDEX OUT OF BOUNDS");
728 if (m_blockSize !=
Dynamic)
return (inner / m_blockSize);
730 StorageIndex b_inner = 0;
731 while (m_innerOffset[b_inner] <= inner) ++b_inner;
739 eigen_assert(brow <
blockRows() &&
"BLOCK ROW INDEX OUT OF BOUNDS");
740 eigen_assert(bcol <
blockCols() &&
"BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS");
742 StorageIndex rsize = IsColMajor ? blockInnerSize(brow) : blockOuterSize(bcol);
743 StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
744 StorageIndex inner = IsColMajor ? brow : bcol;
745 StorageIndex outer = IsColMajor ? bcol : brow;
746 StorageIndex offset = m_outerIndex[outer];
747 while (offset < m_outerIndex[outer + 1] && m_indices[offset] != inner) offset++;
748 if (m_indices[offset] == inner) {
752 eigen_assert(
"DYNAMIC INSERTION IS NOT YET SUPPORTED");
760 eigen_assert(brow <
blockRows() &&
"BLOCK ROW INDEX OUT OF BOUNDS");
761 eigen_assert(bcol <
blockCols() &&
"BLOCK COLUMN OUT OF BOUNDS");
763 StorageIndex rsize = IsColMajor ? blockInnerSize(brow) : blockOuterSize(bcol);
764 StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow);
765 StorageIndex inner = IsColMajor ? brow : bcol;
766 StorageIndex outer = IsColMajor ? bcol : brow;
767 StorageIndex offset = m_outerIndex[outer];
768 while (offset < m_outerIndex[outer + 1] && m_indices[offset] != inner) offset++;
769 if (m_indices[offset] == inner) {
773 eigen_assert(
"NOT YET SUPPORTED");
777 template <
typename VecType>
778 BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType>
operator*(
const VecType& lhs)
const {
779 return BlockSparseTimeDenseProduct<BlockSparseMatrix, VecType>(*
this, lhs);
787 inline BlockScalarReturnType* valuePtr() {
return static_cast<BlockScalarReturnType*
>(m_values); }
789 inline StorageIndex* innerIndexPtr() {
return m_indices; }
790 inline const StorageIndex* innerIndexPtr()
const {
return m_indices; }
791 inline StorageIndex* outerIndexPtr() {
return m_outerIndex; }
792 inline const StorageIndex* outerIndexPtr()
const {
return m_outerIndex; }
806 inline Index blockOuterIndex(
Index bj)
const {
807 return (m_blockSize ==
Dynamic) ? m_outerOffset[bj] : (bj * m_blockSize);
809 inline Index blockInnerIndex(Index bi)
const {
810 return (m_blockSize ==
Dynamic) ? m_innerOffset[bi] : (bi * m_blockSize);
815 return (m_blockSize ==
Dynamic) ? (m_innerOffset[bi + 1] - m_innerOffset[bi]) : m_blockSize;
818 return (m_blockSize ==
Dynamic) ? (m_outerOffset[bj + 1] - m_outerOffset[bj]) : m_blockSize;
829 class BlockInnerIterator;
831 friend std::ostream& operator<<(std::ostream& s,
const BlockSparseMatrix& m) {
832 for (StorageIndex j = 0; j < m.outerBlocks(); ++j) {
833 BlockInnerIterator itb(m, j);
835 s <<
"(" << itb.row() <<
", " << itb.col() <<
")\n";
836 s << itb.value() <<
"\n";
848 return m_blockPtr[id];
850 return id * m_blockSize * m_blockSize;
870 StorageIndex* m_innerOffset;
871 StorageIndex* m_outerOffset;
872 Index m_nonzerosblocks;
875 StorageIndex* m_blockPtr;
877 StorageIndex* m_indices;
878 StorageIndex* m_outerIndex;
882template <
typename Scalar_,
int _BlockAtCompileTime,
int Options_,
typename StorageIndex_>
883class BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex_>::BlockInnerIterator {
885 enum { Flags = Options_ };
887 BlockInnerIterator(
const BlockSparseMatrix& mat,
const Index outer)
888 : m_mat(mat), m_outer(outer), m_id(mat.m_outerIndex[outer]), m_end(mat.m_outerIndex[outer + 1]) {}
890 inline BlockInnerIterator& operator++() {
895 inline const Map<const BlockScalar> value()
const {
896 return Map<const BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
rows(),
cols());
898 inline Map<BlockScalar> valueRef() {
899 return Map<BlockScalar>(&(m_mat.m_values[m_mat.blockPtr(m_id)]),
rows(),
cols());
902 inline Index index()
const {
return m_mat.m_indices[m_id]; }
903 inline Index outer()
const {
return m_outer; }
905 inline Index row()
const {
return index(); }
907 inline Index col()
const {
return outer(); }
910 return (m_mat.m_blockSize ==
Dynamic) ? (m_mat.m_innerOffset[index() + 1] - m_mat.m_innerOffset[index()])
915 return (m_mat.m_blockSize ==
Dynamic) ? (m_mat.m_outerOffset[m_outer + 1] - m_mat.m_outerOffset[m_outer])
918 inline operator bool()
const {
return (m_id < m_end); }
921 const BlockSparseMatrix<Scalar_, _BlockAtCompileTime, Options_, StorageIndex>& m_mat;
927template <
typename Scalar_,
int _BlockAtCompileTime,
int Options_,
typename StorageIndex_>
930 InnerIterator(
const BlockSparseMatrix& mat,
Index outer)
935 m_offset(outer - mat.blockOuterIndex(m_outerB)) {
937 m_id = m_mat.blockInnerIndex(itb.index());
939 m_end = m_mat.blockInnerIndex(itb.index() + 1);
942 inline InnerIterator& operator++() {
947 m_id = m_mat.blockInnerIndex(itb.index());
949 m_end = m_mat.blockInnerIndex(itb.index() + 1);
954 inline const Scalar& value()
const {
return itb.value().coeff(m_id - m_start, m_offset); }
955 inline Scalar& valueRef() {
return itb.valueRef().coeff(m_id - m_start, m_offset); }
956 inline Index index()
const {
return m_id; }
957 inline Index outer()
const {
return m_outer; }
958 inline Index col()
const {
return outer(); }
959 inline Index row()
const {
return index(); }
960 inline operator bool()
const {
return itb; }
963 const BlockSparseMatrix& m_mat;
965 const Index m_outerB;
966 BlockInnerIterator itb;
967 const Index m_offset;
A versatile sparse matrix representation where each element is a block.
Definition BlockSparseMatrix.h:252
Ref< BlockScalar > coeffRef(Index brow, Index bcol)
Definition BlockSparseMatrix.h:738
Index innerToBlock(Index inner) const
Definition BlockSparseMatrix.h:725
void setBlockStructure(const MatrixType &blockPattern)
Set the nonzero block pattern of the matrix.
Definition BlockSparseMatrix.h:463
Index blockCols() const
Definition BlockSparseMatrix.h:709
BlockSparseMatrix(Index brow, Index bcol)
Construct and resize.
Definition BlockSparseMatrix.h:296
Index blockPtr(Index id) const
Definition BlockSparseMatrix.h:846
void reserve(const Index nonzerosblocks)
Allocate the internal array of pointers to blocks and their inner indices.
Definition BlockSparseMatrix.h:550
Index outerToBlock(Index outer) const
Definition BlockSparseMatrix.h:715
Index blockRowsIndex(Index bi) const
Definition BlockSparseMatrix.h:799
Index cols() const
Definition BlockSparseMatrix.h:688
BlockSparseMatrix(const MatrixType &spmat)
Constructor from a sparse matrix.
Definition BlockSparseMatrix.h:366
Index rows() const
Definition BlockSparseMatrix.h:680
void setFromTriplets(const InputIterator &begin, const InputIterator &end)
Fill values in a matrix from a triplet list.
Definition BlockSparseMatrix.h:580
void setBlockLayout(const VectorXi &rowBlocks, const VectorXi &colBlocks)
Set the row and column block layouts,.
Definition BlockSparseMatrix.h:520
Index nonZeros() const
Definition BlockSparseMatrix.h:785
BlockSparseMatrix(const BlockSparseMatrix &other)
Copy-constructor.
Definition BlockSparseMatrix.h:311
Index blockRows() const
Definition BlockSparseMatrix.h:707
void resize(Index brow, Index bcol)
Set the number of rows and columns blocks.
Definition BlockSparseMatrix.h:499
BlockSparseMatrix & operator=(const MatrixType &spmat)
Assignment from a sparse matrix with the same storage order.
Definition BlockSparseMatrix.h:381
Map< const BlockScalar > coeff(Index brow, Index bcol) const
Definition BlockSparseMatrix.h:759
bool isCompressed() const
for compatibility purposes with the SparseMatrix class
Definition BlockSparseMatrix.h:795
Index blockColsIndex(Index bj) const
Definition BlockSparseMatrix.h:804
Index nonZerosBlocks() const
Definition BlockSparseMatrix.h:783
void setBlockSize(Index blockSize)
set the block size at runtime for fixed-size block layout
Definition BlockSparseMatrix.h:509
Derived & setZero(Index rows, Index cols)
const unsigned int LvalueBit
const unsigned int RowMajorBit
Matrix< int, Dynamic, 1 > VectorXi
Namespace containing all symbols from the Eigen library.
std::enable_if_t< std::is_base_of< DenseBase< std::decay_t< DerivedA > >, std::decay_t< DerivedA > >::value &&std::is_base_of< DenseBase< std::decay_t< DerivedB > >, std::decay_t< DerivedB > >::value, void > swap(DerivedA &&a, DerivedB &&b)
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
const Product< Inverse< PermutationType >, SparseDerived, AliasFreeProduct > operator*(const InverseImpl< PermutationType, PermutationStorage > &tperm, const SparseMatrixBase< SparseDerived > &matrix)