10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
14#include "./InternalHeaderCheck.h"
50template <
typename Scalar_,
int Options_,
typename StorageIndex_>
51struct traits<SparseMatrix<Scalar_, Options_, StorageIndex_>> {
52 typedef Scalar_ Scalar;
53 typedef StorageIndex_ StorageIndex;
54 typedef Sparse StorageKind;
55 typedef MatrixXpr XprKind;
63 SupportedAccessPatterns = InnerRandomAccessPattern
67template <
typename Scalar_,
int Options_,
typename StorageIndex_,
int DiagIndex>
68struct traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>> {
69 typedef SparseMatrix<Scalar_, Options_, StorageIndex_> MatrixType;
70 typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
71 typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
73 typedef Scalar_ Scalar;
74 typedef Dense StorageKind;
75 typedef StorageIndex_ StorageIndex;
76 typedef MatrixXpr XprKind;
80 ColsAtCompileTime = 1,
82 MaxColsAtCompileTime = 1,
87template <
typename Scalar_,
int Options_,
typename StorageIndex_,
int DiagIndex>
88struct traits<Diagonal<const SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>>
89 :
public traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>> {
93template <
typename StorageIndex>
94struct sparse_reserve_op {
95 EIGEN_DEVICE_FUNC sparse_reserve_op(
Index begin,
Index end,
Index size) {
96 Index range = numext::mini(end - begin, size);
98 m_end = begin + range;
99 m_val = StorageIndex(size / range);
100 m_remainder = StorageIndex(size % range);
102 template <
typename IndexType>
103 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex operator()(IndexType i)
const {
104 if ((i >= m_begin) && (i < m_end))
105 return m_val + ((i - m_begin) < m_remainder ? 1 : 0);
109 StorageIndex m_val, m_remainder;
110 Index m_begin, m_end;
113template <
typename Scalar>
114struct functor_traits<sparse_reserve_op<Scalar>> {
115 enum { Cost = 1, PacketAccess =
false, IsRepeatable =
true };
120template <
typename Scalar_,
int Options_,
typename StorageIndex_>
123 using Base::convert_index;
125 template <
typename,
typename,
typename,
typename,
typename>
126 friend struct internal::Assignment;
132 using Base::operator+=;
133 using Base::operator-=;
138 typedef typename Base::InnerIterator InnerIterator;
139 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
141 using Base::IsRowMajor;
142 typedef internal::CompressedStorage<Scalar, StorageIndex> Storage;
143 enum { Options = Options_ };
145 typedef typename Base::IndexVector IndexVector;
146 typedef typename Base::ScalarVector ScalarVector;
159 inline Index rows()
const {
return IsRowMajor ? m_outerSize : m_innerSize; }
161 inline Index cols()
const {
return IsRowMajor ? m_innerSize : m_outerSize; }
205 constexpr Storage& data() {
return m_data; }
207 constexpr const Storage& data()
const {
return m_data; }
216 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer + 1];
217 return m_data.atInRange(m_outerIndex[outer], end, inner);
235 Index start = m_outerIndex[outer];
236 Index end =
isCompressed() ? m_outerIndex[outer + 1] : m_outerIndex[outer] + m_innerNonZeros[outer];
237 eigen_assert(end >= start &&
"you probably called coeffRef on a non finalized matrix");
238 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
240 Index capacity = m_outerIndex[outer + 1] - end;
243 m_innerNonZeros[outer]++;
245 m_data.value(end) =
Scalar(0);
246 if (inserted !=
nullptr) {
249 return m_data.value(end);
252 if ((dst < end) && (m_data.index(dst) == inner)) {
254 if (inserted !=
nullptr) {
257 return m_data.value(dst);
259 if (inserted !=
nullptr) {
263 return insertAtByOuterInner(outer, inner, dst);
307 if (m_innerNonZeros) {
316 eigen_assert(
isCompressed() &&
"This function does not make sense in non compressed mode.");
317 m_data.reserve(reserveSize);
320#ifdef EIGEN_PARSED_BY_DOXYGEN
333 template <
class SizesType>
334 inline void reserve(
const SizesType& reserveSizes);
336 template <
class SizesType>
337 inline void reserve(
const SizesType& reserveSizes,
338 const typename SizesType::value_type& enableif =
typename SizesType::value_type()) {
339 EIGEN_UNUSED_VARIABLE(enableif);
340 reserveInnerVectors(reserveSizes);
344 template <
class SizesType>
345 inline void reserveInnerVectors(
const SizesType& reserveSizes) {
347 Index totalReserveSize = 0;
348 for (
Index j = 0; j < m_outerSize; ++j) totalReserveSize += internal::convert_index<Index>(reserveSizes[j]);
351 if (totalReserveSize == 0)
return;
354 m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
357 StorageIndex* newOuterIndex = m_innerNonZeros;
360 for (
Index j = 0; j < m_outerSize; ++j) {
361 newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
362 Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
363 count += reserveSize + internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j]);
366 m_data.reserve(totalReserveSize);
367 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
368 for (Index j = m_outerSize - 1; j >= 0; --j) {
369 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
370 StorageIndex begin = m_outerIndex[j];
371 StorageIndex end = begin + innerNNZ;
372 StorageIndex target = newOuterIndex[j];
375 previousOuterIndex = m_outerIndex[j];
376 m_outerIndex[j] = newOuterIndex[j];
377 m_innerNonZeros[j] = innerNNZ;
380 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1] +
381 internal::convert_index<StorageIndex>(reserveSizes[m_outerSize - 1]);
383 m_data.resize(m_outerIndex[m_outerSize]);
385 StorageIndex* newOuterIndex = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + 1);
388 for (
Index j = 0; j < m_outerSize; ++j) {
389 newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
390 Index alreadyReserved =
391 internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j] - m_innerNonZeros[j]);
392 Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
393 Index toReserve = numext::maxi(reserveSize, alreadyReserved);
394 count += toReserve + internal::convert_index<Index>(m_innerNonZeros[j]);
396 newOuterIndex[m_outerSize] = internal::convert_index<StorageIndex>(count);
398 m_data.resize(count);
399 for (
Index j = m_outerSize - 1; j >= 0; --j) {
403 m_data.moveChunk(begin, target, innerNNZ);
406 std::swap(m_outerIndex, newOuterIndex);
407 internal::conditional_aligned_delete_auto<StorageIndex, true>(newOuterIndex, m_outerSize + 1);
425 return insertBackByOuterInner(IsRowMajor ?
row :
col, IsRowMajor ?
col :
row);
430 inline Scalar& insertBackByOuterInner(
Index outer,
Index inner) {
431 eigen_assert(
Index(m_outerIndex[outer + 1]) == m_data.size() &&
"Invalid ordered insertion (invalid outer index)");
432 eigen_assert((m_outerIndex[outer + 1] - m_outerIndex[outer] == 0 || m_data.index(m_data.size() - 1) < inner) &&
433 "Invalid ordered insertion (invalid inner index)");
435 ++m_outerIndex[outer + 1];
436 m_data.append(Scalar(0), inner);
437 return m_data.value(p);
442 inline Scalar& insertBackByOuterInnerUnordered(
Index outer,
Index inner) {
444 ++m_outerIndex[outer + 1];
445 m_data.append(Scalar(0), inner);
446 return m_data.value(p);
451 inline void startVec(
Index outer) {
452 eigen_assert(m_outerIndex[outer] ==
Index(m_data.size()) &&
453 "You must call startVec for each inner vector sequentially");
454 eigen_assert(m_outerIndex[outer + 1] == 0 &&
"You must call startVec for each inner vector sequentially");
455 m_outerIndex[outer + 1] = m_outerIndex[outer];
461 inline void finalize() {
464 Index i = m_outerSize;
466 while (i >= 0 && m_outerIndex[i] == 0) --i;
468 while (i <= m_outerSize) {
469 m_outerIndex[i] =
size;
476 void removeOuterVectors(
Index j,
Index num = 1) {
477 eigen_assert(num >= 0 && j >= 0 && j + num <= m_outerSize &&
"Invalid parameters");
479 const Index newRows = IsRowMajor ? m_outerSize - num :
rows();
480 const Index newCols = IsRowMajor ?
cols() : m_outerSize - num;
482 const Index begin = j + num;
483 const Index end = m_outerSize;
484 const Index target = j;
487 if (m_outerIndex[j + num] > m_outerIndex[j])
uncompress();
490 internal::smart_memmove(m_outerIndex + begin, m_outerIndex + end + 1, m_outerIndex + target);
492 internal::smart_memmove(m_innerNonZeros + begin, m_innerNonZeros + end, m_innerNonZeros + target);
497 const Index from = internal::convert_index<Index>(m_outerIndex[0]);
499 const Index chunkSize = internal::convert_index<Index>(m_innerNonZeros[0]);
500 m_data.moveChunk(from, to, chunkSize);
509 void insertEmptyOuterVectors(
Index j,
Index num = 1) {
511 eigen_assert(num >= 0 && j >= 0 && j < m_outerSize &&
"Invalid parameters");
513 const Index newRows = IsRowMajor ? m_outerSize + num :
rows();
514 const Index newCols = IsRowMajor ?
cols() : m_outerSize + num;
516 const Index begin = j;
517 const Index end = m_outerSize;
518 const Index target = j + num;
524 internal::smart_memmove(m_outerIndex + begin, m_outerIndex + end + 1, m_outerIndex + target);
526 fill_n(m_outerIndex + begin, num, m_outerIndex[begin]);
529 internal::smart_memmove(m_innerNonZeros + begin, m_innerNonZeros + end, m_innerNonZeros + target);
535 template <
typename InputIterators>
538 template <
typename InputIterators,
typename DupFunctor>
539 void setFromTriplets(
const InputIterators& begin,
const InputIterators& end, DupFunctor dup_func);
541 template <
typename Derived,
typename DupFunctor>
544 template <
typename InputIterators>
547 template <
typename InputIterators,
typename DupFunctor>
550 template <
typename InputIterators>
553 template <
typename InputIterators,
typename DupFunctor>
554 void insertFromTriplets(
const InputIterators& begin,
const InputIterators& end, DupFunctor dup_func);
556 template <
typename InputIterators>
559 template <
typename InputIterators,
typename DupFunctor>
567 eigen_assert(j >= 0 && j < m_outerSize &&
"invalid outer index");
568 eigen_assert(i >= 0 && i < m_innerSize &&
"invalid inner index");
569 Index start = m_outerIndex[j];
571 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, i);
573 Index capacity = m_outerIndex[j + 1] - end;
576 m_innerNonZeros[j]++;
578 m_data.value(end) =
Scalar(0);
579 return m_data.value(end);
582 eigen_assert((dst == end || m_data.index(dst) != i) &&
583 "you cannot insert an element that already exists, you must call coeffRef to this end");
584 return insertAtByOuterInner(j, i, dst);
592 eigen_internal_assert(m_outerIndex != 0 && m_outerSize > 0);
595 m_outerIndex[1] = m_innerNonZeros[0];
597 Index copyStart = start;
598 Index copyTarget = m_innerNonZeros[0];
599 for (
Index j = 1; j < m_outerSize; j++) {
603 bool breakUpCopy = (end != nextStart) || (j == m_outerSize - 1);
605 Index chunkSize = end - copyStart;
606 if (chunkSize > 0) m_data.moveChunk(copyStart, copyTarget, chunkSize);
607 copyStart = nextStart;
608 copyTarget += chunkSize;
611 m_outerIndex[j + 1] = m_outerIndex[j] + m_innerNonZeros[j];
613 m_data.resize(m_outerIndex[m_outerSize]);
616 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
624 m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
625 if (m_outerIndex[m_outerSize] == 0) {
629 for (
Index j = 0; j < m_outerSize; j++) m_innerNonZeros[j] = m_outerIndex[j + 1] - m_outerIndex[j];
635 prune(default_prunning_func(reference, epsilon));
645 template <
typename KeepFunc>
646 void prune(
const KeepFunc& keep = KeepFunc()) {
648 for (
Index j = 0; j < m_outerSize; ++j) {
658 bool keepEntry = keep(
row,
col, m_data.value(i));
660 m_data.value(k) = m_data.value(i);
661 m_data.index(k) = m_data.index(i);
664 m_innerNonZeros[j]--;
668 m_outerIndex[m_outerSize] = k;
688 Index innerChange = newInnerSize - m_innerSize;
689 Index outerChange = newOuterSize - m_outerSize;
691 if (outerChange != 0) {
692 m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, newOuterSize + 1,
696 m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_innerNonZeros,
697 newOuterSize, m_outerSize);
699 if (outerChange > 0) {
702 fill_n(m_outerIndex + m_outerSize, outerChange + 1, lastIdx);
707 m_outerSize = newOuterSize;
709 if (innerChange < 0) {
710 for (
Index j = 0; j < m_outerSize; j++) {
711 Index start = m_outerIndex[j];
713 Index lb = m_data.searchLowerIndex(start, end, newInnerSize);
720 m_innerSize = newInnerSize;
722 Index newSize = m_outerIndex[m_outerSize];
723 eigen_assert(newSize <= m_data.size());
724 m_data.resize(newSize);
736 m_innerSize = IsRowMajor ?
cols :
rows;
739 if ((m_outerIndex == 0) || (m_outerSize !=
outerSize)) {
740 m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex,
outerSize + 1,
745 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
757 const ConstDiagonalReturnType
diagonal()
const {
return ConstDiagonalReturnType(*
this); }
763 DiagonalReturnType
diagonal() {
return DiagonalReturnType(*
this); }
766 inline SparseMatrix() : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
resize(0, 0); }
774 template <
typename OtherDerived>
776 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
778 (internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
779 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
784#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
785 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
787 internal::call_assignment_no_alias(*
this, other.
derived());
792 template <
typename OtherDerived,
unsigned int UpLo>
794 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
795 Base::operator=(other);
801 template <
typename OtherDerived>
803 *
this = other.
derived().markAsRValue();
808 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
813 template <
typename OtherDerived>
815 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
816 initAssignment(other);
821 template <
typename OtherDerived>
823 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
831 std::swap(m_outerIndex, other.m_outerIndex);
832 std::swap(m_innerSize, other.m_innerSize);
833 std::swap(m_outerSize, other.m_outerSize);
834 std::swap(m_innerNonZeros, other.m_innerNonZeros);
835 m_data.swap(other.m_data);
843 eigen_assert(m_outerSize == m_innerSize &&
"ONLY FOR SQUARED MATRICES");
844 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
846 m_data.resize(m_outerSize);
849 std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1,
StorageIndex(0));
856 if (other.isRValue()) {
857 swap(other.const_cast_derived());
858 }
else if (
this != &other) {
859#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
860 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
862 initAssignment(other);
864 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
865 m_data = other.m_data;
867 Base::operator=(other);
878 template <
typename OtherDerived>
879 inline SparseMatrix& operator=(
const EigenBase<OtherDerived>& other) {
880 return Base::operator=(other.derived());
883 template <
typename Lhs,
typename Rhs>
884 inline SparseMatrix& operator=(
const Product<Lhs, Rhs, AliasFreeProduct>& other);
886 template <
typename OtherDerived>
887 EIGEN_DONT_INLINE
SparseMatrix& operator=(
const SparseMatrixBase<OtherDerived>& other);
889 template <
typename OtherDerived>
891 *
this = other.
derived().markAsRValue();
896 friend std::ostream& operator<<(std::ostream& s,
const SparseMatrix& m) {
898 s <<
"Nonzero entries:\n";
if (m.isCompressed()) {
899 for (Index i = 0; i < m.nonZeros(); ++i) s <<
"(" << m.m_data.value(i) <<
"," << m.m_data.index(i) <<
") ";
901 for (Index i = 0; i < m.outerSize(); ++i) {
902 Index p = m.m_outerIndex[i];
903 Index pe = m.m_outerIndex[i] + m.m_innerNonZeros[i];
905 for (; k < pe; ++k) {
906 s <<
"(" << m.m_data.value(k) <<
"," << m.m_data.index(k) <<
") ";
908 for (; k < m.m_outerIndex[i + 1]; ++k) {
913 s << std::endl; s <<
"Outer pointers:\n";
914 for (
Index i = 0; i < m.outerSize(); ++i) { s << m.m_outerIndex[i] <<
" "; } s <<
" $" << std::endl;
915 if (!m.isCompressed()) {
916 s <<
"Inner non zeros:\n";
917 for (Index i = 0; i < m.outerSize(); ++i) {
918 s << m.m_innerNonZeros[i] <<
" ";
920 s <<
" $" << std::endl;
923 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
930 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
931 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
937#ifdef EIGEN_SPARSEMATRIX_PLUGIN
938#include EIGEN_SPARSEMATRIX_PLUGIN
942 template <
typename Other>
943 void initAssignment(
const Other& other) {
944 resize(other.rows(), other.cols());
945 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
951 EIGEN_DEPRECATED EIGEN_DONT_INLINE
Scalar& insertCompressed(
Index row,
Index col);
955 class SingletonVector {
956 StorageIndex m_index;
957 StorageIndex m_value;
960 typedef StorageIndex value_type;
961 SingletonVector(
Index i,
Index v) : m_index(convert_index(i)), m_value(convert_index(v)) {}
963 StorageIndex operator[](Index i)
const {
return i == m_index ? m_value : 0; }
968 EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
973 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col) {
974 const Index outer = IsRowMajor ? row : col;
975 const Index inner = IsRowMajor ? col : row;
977 eigen_assert(!isCompressed());
978 eigen_assert(m_innerNonZeros[outer] <= (m_outerIndex[outer + 1] - m_outerIndex[outer]));
980 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
981 m_data.index(p) = StorageIndex(inner);
982 m_data.value(p) = Scalar(0);
983 return m_data.value(p);
987 struct IndexPosPair {
988 IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
1002 template <
typename DiagXpr,
typename Func>
1003 void assignDiagonal(
const DiagXpr diagXpr,
const Func& assignFunc) {
1004 constexpr StorageIndex kEmptyIndexVal(-1);
1005 typedef typename ScalarVector::AlignedMapType ValueMap;
1007 Index n = diagXpr.size();
1009 const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar, Scalar>>::value;
1011 if ((m_outerSize != n) || (m_innerSize != n)) resize(n, n);
1014 if (m_data.size() == 0 || overwrite) {
1015 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1016 m_innerNonZeros = 0;
1018 ValueMap valueMap(valuePtr(), n);
1019 std::iota(m_outerIndex, m_outerIndex + n + 1, StorageIndex(0));
1020 std::iota(innerIndexPtr(), innerIndexPtr() + n, StorageIndex(0));
1022 internal::call_assignment_no_alias(valueMap, diagXpr, assignFunc);
1024 internal::evaluator<DiagXpr> diaEval(diagXpr);
1026 ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, n, 0);
1027 typename IndexVector::AlignedMapType insertionLocations(tmp, n);
1028 insertionLocations.setConstant(kEmptyIndexVal);
1030 Index deferredInsertions = 0;
1033 for (Index j = 0; j < n; j++) {
1034 Index begin = m_outerIndex[j];
1035 Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1036 Index capacity = m_outerIndex[j + 1] - end;
1037 Index dst = m_data.searchLowerIndex(begin, end, j);
1039 if (dst != end && m_data.index(dst) == StorageIndex(j))
1040 assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1042 else if (dst == end && capacity > 0)
1043 assignFunc.assignCoeff(insertBackUncompressed(j, j), diaEval.coeff(j));
1046 insertionLocations.coeffRef(j) = StorageIndex(dst);
1047 deferredInsertions++;
1049 if (capacity == 0) shift++;
1053 if (deferredInsertions > 0) {
1054 m_data.resize(m_data.size() + shift);
1055 Index copyEnd = isCompressed() ? m_outerIndex[m_outerSize]
1056 : m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1];
1057 for (Index j = m_outerSize - 1; deferredInsertions > 0; j--) {
1058 Index begin = m_outerIndex[j];
1059 Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1060 Index capacity = m_outerIndex[j + 1] - end;
1062 bool doInsertion = insertionLocations(j) >= 0;
1063 bool breakUpCopy = doInsertion && (capacity > 0);
1069 Index copyBegin = m_outerIndex[j + 1];
1070 Index to = copyBegin + shift;
1071 Index chunkSize = copyEnd - copyBegin;
1072 m_data.moveChunk(copyBegin, to, chunkSize);
1076 m_outerIndex[j + 1] += shift;
1080 if (capacity > 0) shift++;
1081 Index copyBegin = insertionLocations(j);
1082 Index to = copyBegin + shift;
1083 Index chunkSize = copyEnd - copyBegin;
1084 m_data.moveChunk(copyBegin, to, chunkSize);
1086 m_data.index(dst) = StorageIndex(j);
1087 m_data.value(dst) = Scalar(0);
1088 assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1089 if (!isCompressed()) m_innerNonZeros[j]++;
1091 deferredInsertions--;
1092 copyEnd = copyBegin;
1096 eigen_assert((shift == 0) && (deferredInsertions == 0));
1102 EIGEN_STRONG_INLINE Scalar& insertAtByOuterInner(Index outer, Index inner, Index dst);
1103 Scalar& insertCompressedAtByOuterInner(Index outer, Index inner, Index dst);
1104 Scalar& insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst);
1107 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
1108 EIGEN_STATIC_ASSERT((Options & (ColMajor | RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
1110 struct default_prunning_func {
1111 default_prunning_func(
const Scalar& ref,
const RealScalar& eps) : reference(ref), epsilon(eps) {}
1112 inline bool operator()(
const Index&,
const Index&,
const Scalar& value)
const {
1113 return !internal::isMuchSmallerThan(value, reference, epsilon);
1124template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1125void set_from_triplets(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat,
1126 DupFunctor dup_func) {
1127 constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1128 using StorageIndex =
typename SparseMatrixType::StorageIndex;
1129 using IndexMap =
typename VectorX<StorageIndex>::AlignedMapType;
1130 using TransposedSparseMatrix =
1131 SparseMatrix<typename SparseMatrixType::Scalar, IsRowMajor ? ColMajor : RowMajor, StorageIndex>;
1143 TransposedSparseMatrix trmat(mat.rows(), mat.cols());
1147 for (InputIterator it(begin); it != end; ++it) {
1148 eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1149 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1150 if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
1151 trmat.outerIndexPtr()[j + 1]++;
1155 std::partial_sum(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize() + 1, trmat.outerIndexPtr());
1156 eigen_assert(nonZeros == trmat.outerIndexPtr()[trmat.outerSize()]);
1157 trmat.resizeNonZeros(nonZeros);
1160 ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, numext::maxi(mat.innerSize(), mat.outerSize()), 0);
1161 smart_copy(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize(), tmp);
1164 for (InputIterator it(begin); it != end; ++it) {
1165 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1166 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1167 StorageIndex k = tmp[j];
1168 trmat.data().index(k) = i;
1169 trmat.data().value(k) = it->value();
1173 IndexMap wi(tmp, trmat.innerSize());
1174 trmat.collapseDuplicates(wi, dup_func);
1180template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1181void set_from_triplets_sorted(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat,
1182 DupFunctor dup_func) {
1183 constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1184 using StorageIndex =
typename SparseMatrixType::StorageIndex;
1186 if (begin == end)
return;
1188 constexpr StorageIndex kEmptyIndexValue(-1);
1190 mat.resize(mat.rows(), mat.cols());
1192 StorageIndex previous_j = kEmptyIndexValue;
1193 StorageIndex previous_i = kEmptyIndexValue;
1196 for (InputIterator it(begin); it != end; ++it) {
1197 eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1198 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1199 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1200 eigen_assert(j > previous_j || (j == previous_j && i >= previous_i));
1202 bool duplicate = (previous_j == j) && (previous_i == i);
1204 if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
1206 mat.outerIndexPtr()[j + 1]++;
1213 std::partial_sum(mat.outerIndexPtr(), mat.outerIndexPtr() + mat.outerSize() + 1, mat.outerIndexPtr());
1214 eigen_assert(nonZeros == mat.outerIndexPtr()[mat.outerSize()]);
1215 mat.resizeNonZeros(nonZeros);
1217 previous_i = kEmptyIndexValue;
1218 previous_j = kEmptyIndexValue;
1220 for (InputIterator it(begin); it != end; ++it) {
1221 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1222 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1223 bool duplicate = (previous_j == j) && (previous_i == i);
1225 mat.data().value(back - 1) = dup_func(mat.data().value(back - 1), it->value());
1228 mat.data().index(back) = i;
1229 mat.data().value(back) = it->value();
1235 eigen_assert(back == nonZeros);
1241template <
typename DupFunctor,
typename LhsScalar,
typename RhsScalar = LhsScalar>
1242struct scalar_disjunction_op {
1243 using result_type =
typename result_of<DupFunctor(LhsScalar, RhsScalar)>::type;
1244 scalar_disjunction_op(
const DupFunctor& op) : m_functor(op) {}
1245 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator()(
const LhsScalar& a,
const RhsScalar& b)
const {
1246 return m_functor(a, b);
1248 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const DupFunctor& functor()
const {
return m_functor; }
1249 const DupFunctor& m_functor;
1252template <
typename DupFunctor,
typename LhsScalar,
typename RhsScalar>
1253struct functor_traits<scalar_disjunction_op<DupFunctor, LhsScalar, RhsScalar>> :
public functor_traits<DupFunctor> {};
1256template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1257void insert_from_triplets(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat,
1258 DupFunctor dup_func) {
1259 using Scalar =
typename SparseMatrixType::Scalar;
1261 CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>,
const SparseMatrixType,
const SparseMatrixType>;
1264 SparseMatrixType trips(mat.rows(), mat.cols());
1265 set_from_triplets(begin, end, trips, dup_func);
1267 SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1269 assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1273template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1274void insert_from_triplets_sorted(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat,
1275 DupFunctor dup_func) {
1276 using Scalar =
typename SparseMatrixType::Scalar;
1278 CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>,
const SparseMatrixType,
const SparseMatrixType>;
1281 SparseMatrixType trips(mat.rows(), mat.cols());
1282 set_from_triplets_sorted(begin, end, trips, dup_func);
1284 SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1286 assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1328template <
typename Scalar,
int Options_,
typename StorageIndex_>
1329template <
typename InputIterators>
1331 const InputIterators& end) {
1332 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1333 begin, end, *
this, internal::scalar_sum_op<Scalar, Scalar>());
1345template <
typename Scalar,
int Options_,
typename StorageIndex_>
1346template <
typename InputIterators,
typename DupFunctor>
1348 const InputIterators& end, DupFunctor dup_func) {
1349 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1350 begin, end, *
this, dup_func);
1357template <
typename Scalar,
int Options_,
typename StorageIndex_>
1358template <
typename InputIterators>
1360 const InputIterators& end) {
1361 internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1362 begin, end, *
this, internal::scalar_sum_op<Scalar, Scalar>());
1374template <
typename Scalar,
int Options_,
typename StorageIndex_>
1375template <
typename InputIterators,
typename DupFunctor>
1377 const InputIterators& end,
1378 DupFunctor dup_func) {
1379 internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1380 begin, end, *
this, dup_func);
1421template <
typename Scalar,
int Options_,
typename StorageIndex_>
1422template <
typename InputIterators>
1424 const InputIterators& end) {
1425 internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1426 begin, end, *
this, internal::scalar_sum_op<Scalar, Scalar>());
1438template <
typename Scalar,
int Options_,
typename StorageIndex_>
1439template <
typename InputIterators,
typename DupFunctor>
1441 const InputIterators& end, DupFunctor dup_func) {
1442 internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1443 begin, end, *
this, dup_func);
1450template <
typename Scalar,
int Options_,
typename StorageIndex_>
1451template <
typename InputIterators>
1453 const InputIterators& end) {
1454 internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1455 begin, end, *
this, internal::scalar_sum_op<Scalar, Scalar>());
1467template <
typename Scalar,
int Options_,
typename StorageIndex_>
1468template <
typename InputIterators,
typename DupFunctor>
1470 const InputIterators& end,
1471 DupFunctor dup_func) {
1472 internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1473 begin, end, *
this, dup_func);
1477template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1478template <
typename Derived,
typename DupFunctor>
1479void SparseMatrix<Scalar_, Options_, StorageIndex_>::collapseDuplicates(
DenseBase<Derived>& wi, DupFunctor dup_func) {
1483 eigen_assert(wi.size() == m_innerSize);
1484 constexpr StorageIndex kEmptyIndexValue(-1);
1486 StorageIndex count = 0;
1487 const bool is_compressed = isCompressed();
1489 for (
Index j = 0; j < m_outerSize; ++j) {
1490 const StorageIndex newBegin = count;
1491 const StorageIndex end = is_compressed ? m_outerIndex[j + 1] : m_outerIndex[j] + m_innerNonZeros[j];
1492 for (StorageIndex k = m_outerIndex[j]; k < end; ++k) {
1493 StorageIndex i = m_data.index(k);
1494 if (wi(i) >= newBegin) {
1497 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1501 m_data.index(count) = i;
1502 m_data.value(count) = m_data.value(k);
1507 m_outerIndex[j] = newBegin;
1509 m_outerIndex[m_outerSize] = count;
1510 m_data.resize(count);
1513 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1514 m_innerNonZeros = 0;
1518template <
typename Scalar,
int Options_,
typename StorageIndex_>
1519template <
typename OtherDerived>
1520EIGEN_DONT_INLINE SparseMatrix<Scalar, Options_, StorageIndex_>&
1521SparseMatrix<Scalar, Options_, StorageIndex_>::operator=(
const SparseMatrixBase<OtherDerived>& other) {
1522 EIGEN_STATIC_ASSERT(
1523 (internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1524 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1526#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1527 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1530 const bool needToTranspose = (Flags &
RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1531 if (needToTranspose) {
1532#ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1533 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1540 typename internal::nested_eval<OtherDerived, 2, typename internal::plain_matrix_type<OtherDerived>::type>::type
1542 typedef internal::remove_all_t<OtherCopy> OtherCopy_;
1543 typedef internal::evaluator<OtherCopy_> OtherCopyEval;
1544 OtherCopy otherCopy(other.derived());
1545 OtherCopyEval otherCopyEval(otherCopy);
1547 SparseMatrix dest(other.rows(), other.cols());
1552 for (Index j = 0; j < otherCopy.outerSize(); ++j)
1553 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) ++dest.m_outerIndex[it.index()];
1556 StorageIndex count = 0;
1557 IndexVector positions(dest.outerSize());
1558 for (Index j = 0; j < dest.outerSize(); ++j) {
1559 StorageIndex tmp = dest.m_outerIndex[j];
1560 dest.m_outerIndex[j] = count;
1561 positions[j] = count;
1564 dest.m_outerIndex[dest.outerSize()] = count;
1566 dest.m_data.resize(count);
1568 for (StorageIndex j = 0; j < otherCopy.outerSize(); ++j) {
1569 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) {
1570 Index pos = positions[it.index()]++;
1571 dest.m_data.index(pos) = j;
1572 dest.m_data.value(pos) = it.value();
1578 if (other.isRValue()) {
1579 initAssignment(other.derived());
1582 return Base::operator=(other.derived());
1586template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1587inline typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1589 return insertByOuterInner(IsRowMajor ?
row :
col, IsRowMajor ?
col :
row);
1592template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1593EIGEN_STRONG_INLINE
typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1594SparseMatrix<Scalar_, Options_, StorageIndex_>::insertAtByOuterInner(
Index outer,
Index inner,
Index dst) {
1597 return insertUncompressedAtByOuterInner(outer, inner, dst);
1600template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1601EIGEN_DEPRECATED EIGEN_DONT_INLINE
typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1602SparseMatrix<Scalar_, Options_, StorageIndex_>::insertUncompressed(Index row, Index col) {
1603 eigen_assert(!isCompressed());
1604 Index outer = IsRowMajor ? row : col;
1605 Index inner = IsRowMajor ? col : row;
1606 Index start = m_outerIndex[outer];
1607 Index end = start + m_innerNonZeros[outer];
1608 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1610 Index capacity = m_outerIndex[outer + 1] - end;
1613 m_innerNonZeros[outer]++;
1614 m_data.index(end) = StorageIndex(inner);
1615 m_data.value(end) = Scalar(0);
1616 return m_data.value(end);
1619 eigen_assert((dst == end || m_data.index(dst) != inner) &&
1620 "you cannot insert an element that already exists, you must call coeffRef to this end");
1621 return insertUncompressedAtByOuterInner(outer, inner, dst);
1624template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1625EIGEN_DEPRECATED EIGEN_DONT_INLINE
typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1626SparseMatrix<Scalar_, Options_, StorageIndex_>::insertCompressed(Index row, Index col) {
1627 eigen_assert(isCompressed());
1628 Index outer = IsRowMajor ? row : col;
1629 Index inner = IsRowMajor ? col : row;
1630 Index start = m_outerIndex[outer];
1631 Index end = m_outerIndex[outer + 1];
1632 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1633 eigen_assert((dst == end || m_data.index(dst) != inner) &&
1634 "you cannot insert an element that already exists, you must call coeffRef to this end");
1635 return insertCompressedAtByOuterInner(outer, inner, dst);
1638template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1639typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1640SparseMatrix<Scalar_, Options_, StorageIndex_>::insertCompressedAtByOuterInner(Index outer, Index inner, Index dst) {
1641 eigen_assert(isCompressed());
1644 if (m_data.allocatedSize() <= m_data.size()) {
1647 Index minReserve = 32;
1648 Index reserveSize = numext::maxi(minReserve, m_data.allocatedSize());
1649 m_data.reserve(reserveSize);
1651 m_data.resize(m_data.size() + 1);
1652 Index chunkSize = m_outerIndex[m_outerSize] - dst;
1654 m_data.moveChunk(dst, dst + 1, chunkSize);
1657 for (Index j = outer; j < m_outerSize; j++) m_outerIndex[j + 1]++;
1659 m_data.index(dst) = StorageIndex(inner);
1660 m_data.value(dst) = Scalar(0);
1662 return m_data.value(dst);
1665template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1666typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1667SparseMatrix<Scalar_, Options_, StorageIndex_>::insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst) {
1668 eigen_assert(!isCompressed());
1670 for (Index leftTarget = outer - 1, rightTarget = outer; (leftTarget >= 0) || (rightTarget < m_outerSize);) {
1671 if (rightTarget < m_outerSize) {
1672 Index start = m_outerIndex[rightTarget];
1673 Index end = start + m_innerNonZeros[rightTarget];
1674 Index nextStart = m_outerIndex[rightTarget + 1];
1675 Index capacity = nextStart - end;
1678 Index chunkSize = end - dst;
1679 if (chunkSize > 0) m_data.moveChunk(dst, dst + 1, chunkSize);
1680 m_innerNonZeros[outer]++;
1681 for (Index j = outer; j < rightTarget; j++) m_outerIndex[j + 1]++;
1682 m_data.index(dst) = StorageIndex(inner);
1683 m_data.value(dst) = Scalar(0);
1684 return m_data.value(dst);
1688 if (leftTarget >= 0) {
1689 Index start = m_outerIndex[leftTarget];
1690 Index end = start + m_innerNonZeros[leftTarget];
1691 Index nextStart = m_outerIndex[leftTarget + 1];
1692 Index capacity = nextStart - end;
1696 Index chunkSize = dst - nextStart;
1697 if (chunkSize > 0) m_data.moveChunk(nextStart, nextStart - 1, chunkSize);
1698 m_innerNonZeros[outer]++;
1699 for (Index j = leftTarget; j < outer; j++) m_outerIndex[j + 1]--;
1700 m_data.index(dst - 1) = StorageIndex(inner);
1701 m_data.value(dst - 1) = Scalar(0);
1702 return m_data.value(dst - 1);
1711 Index dst_offset = dst - m_outerIndex[outer];
1713 if (m_data.allocatedSize() == 0) {
1715 m_data.resize(m_outerSize);
1716 std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
1719 Index maxReserveSize =
static_cast<Index
>(NumTraits<StorageIndex>::highest()) - m_data.allocatedSize();
1720 eigen_assert(maxReserveSize > 0);
1721 if (m_outerSize <= maxReserveSize) {
1723 reserveInnerVectors(IndexVector::Constant(m_outerSize, 1));
1727 typedef internal::sparse_reserve_op<StorageIndex> ReserveSizesOp;
1728 typedef CwiseNullaryOp<ReserveSizesOp, IndexVector> ReserveSizesXpr;
1729 ReserveSizesXpr reserveSizesXpr(m_outerSize, 1, ReserveSizesOp(outer, m_outerSize, maxReserveSize));
1730 reserveInnerVectors(reserveSizesXpr);
1734 Index start = m_outerIndex[outer];
1735 Index end = start + m_innerNonZeros[outer];
1736 Index new_dst = start + dst_offset;
1737 Index chunkSize = end - new_dst;
1738 if (chunkSize > 0) m_data.moveChunk(new_dst, new_dst + 1, chunkSize);
1739 m_innerNonZeros[outer]++;
1740 m_data.index(new_dst) = StorageIndex(inner);
1741 m_data.value(new_dst) = Scalar(0);
1742 return m_data.value(new_dst);
1747template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1748struct evaluator<SparseMatrix<Scalar_, Options_, StorageIndex_>>
1749 : evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> {
1750 typedef evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> Base;
1751 typedef SparseMatrix<Scalar_, Options_, StorageIndex_> SparseMatrixType;
1752 evaluator() : Base() {}
1753 explicit evaluator(
const SparseMatrixType& mat) : Base(mat) {}
1761template <
typename Scalar,
int Options,
typename StorageIndex>
1762class Serializer<SparseMatrix<Scalar, Options, StorageIndex>, void> {
1764 typedef SparseMatrix<Scalar, Options, StorageIndex> SparseMat;
1767 typename SparseMat::Index rows;
1768 typename SparseMat::Index cols;
1771 Index inner_buffer_size;
1774 EIGEN_DEVICE_FUNC
size_t size(
const SparseMat& value)
const {
1776 std::size_t num_storage_indices = value.isCompressed() ? 0 : value.outerSize();
1778 num_storage_indices += value.outerSize() + 1;
1780 const StorageIndex inner_buffer_size = value.outerIndexPtr()[value.outerSize()];
1781 num_storage_indices += inner_buffer_size;
1783 std::size_t num_values = inner_buffer_size;
1784 return sizeof(Header) +
sizeof(Scalar) * num_values +
sizeof(StorageIndex) * num_storage_indices;
1787 EIGEN_DEVICE_FUNC uint8_t*
serialize(uint8_t* dest, uint8_t* end,
const SparseMat& value) {
1788 if (EIGEN_PREDICT_FALSE(dest ==
nullptr))
return nullptr;
1789 if (EIGEN_PREDICT_FALSE(dest + size(value) > end))
return nullptr;
1791 const size_t header_bytes =
sizeof(Header);
1792 Header header = {value.rows(), value.cols(), value.isCompressed(), value.outerSize(),
1793 value.outerIndexPtr()[value.outerSize()]};
1794 EIGEN_USING_STD(memcpy)
1795 memcpy(dest, &header, header_bytes);
1796 dest += header_bytes;
1799 if (!header.compressed) {
1800 std::size_t data_bytes =
sizeof(StorageIndex) * header.outer_size;
1801 memcpy(dest, value.innerNonZeroPtr(), data_bytes);
1806 std::size_t data_bytes =
sizeof(StorageIndex) * (header.outer_size + 1);
1807 memcpy(dest, value.outerIndexPtr(), data_bytes);
1811 data_bytes =
sizeof(StorageIndex) * header.inner_buffer_size;
1812 memcpy(dest, value.innerIndexPtr(), data_bytes);
1816 data_bytes =
sizeof(Scalar) * header.inner_buffer_size;
1817 memcpy(dest, value.valuePtr(), data_bytes);
1823 EIGEN_DEVICE_FUNC
const uint8_t*
deserialize(
const uint8_t* src,
const uint8_t* end, SparseMat& value)
const {
1824 if (EIGEN_PREDICT_FALSE(src ==
nullptr))
return nullptr;
1825 if (EIGEN_PREDICT_FALSE(src +
sizeof(Header) > end))
return nullptr;
1827 const size_t header_bytes =
sizeof(Header);
1829 EIGEN_USING_STD(memcpy)
1830 memcpy(&header, src, header_bytes);
1831 src += header_bytes;
1834 value.resize(header.rows, header.cols);
1835 if (header.compressed) {
1836 value.makeCompressed();
1842 value.data().resize(header.inner_buffer_size);
1845 if (!header.compressed) {
1847 std::size_t data_bytes =
sizeof(StorageIndex) * header.outer_size;
1848 if (EIGEN_PREDICT_FALSE(src + data_bytes > end))
return nullptr;
1849 memcpy(value.innerNonZeroPtr(), src, data_bytes);
1854 std::size_t data_bytes =
sizeof(StorageIndex) * (header.outer_size + 1);
1855 if (EIGEN_PREDICT_FALSE(src + data_bytes > end))
return nullptr;
1856 memcpy(value.outerIndexPtr(), src, data_bytes);
1860 data_bytes =
sizeof(StorageIndex) * header.inner_buffer_size;
1861 if (EIGEN_PREDICT_FALSE(src + data_bytes > end))
return nullptr;
1862 memcpy(value.innerIndexPtr(), src, data_bytes);
1866 data_bytes =
sizeof(Scalar) * header.inner_buffer_size;
1867 if (EIGEN_PREDICT_FALSE(src + data_bytes > end))
return nullptr;
1868 memcpy(value.valuePtr(), src, data_bytes);
Base class for all dense matrices, vectors, and arrays.
Definition DenseBase.h:44
Derived & setConstant(const Scalar &value)
Definition CwiseNullaryOp.h:347
Base class for diagonal matrices and expressions.
Definition DiagonalMatrix.h:33
const Derived & derived() const
Definition DiagonalMatrix.h:57
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition Diagonal.h:68
A matrix or vector expression mapping an existing array of data.
Definition Map.h:96
Common base class for sparse [compressed]-{row|column}-storage format.
Definition SparseCompressedBase.h:43
Index nonZeros() const
Definition SparseCompressedBase.h:64
bool isCompressed() const
Definition SparseCompressedBase.h:114
SparseCompressedBase()
Definition SparseCompressedBase.h:177
Base class of any sparse matrices or sparse expressions.
Definition SparseMatrixBase.h:30
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
Definition SparseMatrixBase.h:44
Index size() const
Definition SparseMatrixBase.h:187
RowXpr row(Index i)
Definition SparseMatrixBase.h:1085
NumTraits< Scalar >::Real RealScalar
Definition SparseMatrixBase.h:127
ColXpr col(Index i)
Definition SparseMatrixBase.h:1072
@ Flags
Definition SparseMatrixBase.h:94
A versatible sparse matrix representation.
Definition SparseMatrix.h:121
Scalar coeff(Index row, Index col) const
Definition SparseMatrix.h:211
const ConstDiagonalReturnType diagonal() const
Definition SparseMatrix.h:757
void swap(SparseMatrix &other)
Definition SparseMatrix.h:829
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1469
const StorageIndex * innerIndexPtr() const
Definition SparseMatrix.h:180
void setZero()
Definition SparseMatrix.h:303
bool isCompressed() const
Definition SparseCompressedBase.h:114
Index cols() const
Definition SparseMatrix.h:161
StorageIndex * innerIndexPtr()
Definition SparseMatrix.h:184
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:814
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1359
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1347
Index outerSize() const
Definition SparseMatrix.h:166
SparseMatrix()
Definition SparseMatrix.h:766
void uncompress()
Definition SparseMatrix.h:622
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1452
const Scalar * valuePtr() const
Definition SparseMatrix.h:171
void makeCompressed()
Definition SparseMatrix.h:589
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1376
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition SparseMatrix.h:793
void resize(Index rows, Index cols)
Definition SparseMatrix.h:734
Index rows() const
Definition SparseMatrix.h:159
SparseMatrix(const SparseMatrix &other)
Definition SparseMatrix.h:807
StorageIndex * innerNonZeroPtr()
Definition SparseMatrix.h:202
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1330
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:822
void setIdentity()
Definition SparseMatrix.h:842
Index innerSize() const
Definition SparseMatrix.h:164
SparseMatrix(Index rows, Index cols)
Definition SparseMatrix.h:769
StorageIndex * outerIndexPtr()
Definition SparseMatrix.h:193
const StorageIndex * innerNonZeroPtr() const
Definition SparseMatrix.h:198
void conservativeResize(Index rows, Index cols)
Definition SparseMatrix.h:681
void insertFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1440
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition SparseMatrix.h:634
const StorageIndex * outerIndexPtr() const
Definition SparseMatrix.h:189
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition SparseMatrix.h:775
friend void swap(SparseMatrix &a, SparseMatrix &b)
Definition SparseMatrix.h:838
~SparseMatrix()
Definition SparseMatrix.h:929
Scalar * valuePtr()
Definition SparseMatrix.h:175
void prune(const KeepFunc &keep=KeepFunc())
Definition SparseMatrix.h:646
Scalar sum() const
Definition SparseRedux.h:30
void reserve(Index reserveSize)
Definition SparseMatrix.h:315
Scalar & coeffRef(Index row, Index col)
Definition SparseMatrix.h:275
Scalar & insert(Index row, Index col)
Definition SparseMatrix.h:1588
void reserve(const SizesType &reserveSizes)
DiagonalReturnType diagonal()
Definition SparseMatrix.h:763
Scalar & findOrInsertCoeff(Index row, Index col, bool *inserted)
Definition SparseMatrix.h:231
void insertFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1423
SparseMatrix(SparseMatrix &&other)
Definition SparseMatrix.h:799
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition SparseSelfAdjointView.h:52
a sparse vector class
Definition SparseVector.h:62
const unsigned int LvalueBit
Definition Constants.h:148
const unsigned int RowMajorBit
Definition Constants.h:70
const unsigned int CompressedAccessBit
Definition Constants.h:195
Namespace containing all symbols from the Eigen library.
Definition B01_Experimental.dox:1
std::enable_if_t< std::is_base_of< DenseBase< std::decay_t< DerivedA > >, std::decay_t< DerivedA > >::value &&std::is_base_of< DenseBase< std::decay_t< DerivedB > >, std::decay_t< DerivedB > >::value, void > swap(DerivedA &&a, DerivedB &&b)
Definition DenseBase.h:667
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:82
uint8_t * serialize(uint8_t *dest, uint8_t *end, const Args &... args)
Definition Serializer.h:189
const int Dynamic
Definition Constants.h:25
const uint8_t * deserialize(const uint8_t *src, const uint8_t *end, Args &... args)
Definition Serializer.h:202
constexpr Derived & derived()
Definition EigenBase.h:49
Eigen::Index Index
Definition EigenBase.h:43
constexpr Index size() const noexcept
Definition EigenBase.h:64