10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
46template<
typename _Scalar,
int _Options,
typename _StorageIndex>
47struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
49 typedef _Scalar Scalar;
50 typedef _StorageIndex StorageIndex;
51 typedef Sparse StorageKind;
52 typedef MatrixXpr XprKind;
59 SupportedAccessPatterns = InnerRandomAccessPattern
63template<
typename _Scalar,
int _Options,
typename _StorageIndex,
int DiagIndex>
64struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
66 typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
67 typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
68 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
70 typedef _Scalar Scalar;
71 typedef Dense StorageKind;
72 typedef _StorageIndex StorageIndex;
73 typedef MatrixXpr XprKind;
77 ColsAtCompileTime = 1,
79 MaxColsAtCompileTime = 1,
84template<
typename _Scalar,
int _Options,
typename _StorageIndex,
int DiagIndex>
85struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86 :
public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
95template<
typename _Scalar,
int _Options,
typename _StorageIndex>
100 using Base::convert_index;
106 using Base::operator+=;
107 using Base::operator-=;
112 typedef typename Base::InnerIterator InnerIterator;
113 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
116 using Base::IsRowMajor;
117 typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
122 typedef typename Base::IndexVector IndexVector;
123 typedef typename Base::ScalarVector ScalarVector;
136 inline Index rows()
const {
return IsRowMajor ? m_outerSize : m_innerSize; }
138 inline Index cols()
const {
return IsRowMajor ? m_innerSize : m_outerSize; }
182 inline Storage& data() {
return m_data; }
184 inline const Storage& data()
const {
return m_data; }
194 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
195 return m_data.atInRange(m_outerIndex[outer], end,
StorageIndex(inner));
213 Index start = m_outerIndex[outer];
214 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
215 eigen_assert(end>=start &&
"you probably called coeffRef on a non finalized matrix");
219 if((p<end) && (m_data.index(p)==inner))
220 return m_data.value(p);
254 memset(m_outerIndex, 0, (m_outerSize+1)*
sizeof(
StorageIndex));
256 memset(m_innerNonZeros, 0, (m_outerSize)*
sizeof(
StorageIndex));
264 eigen_assert(
isCompressed() &&
"This function does not make sense in non compressed mode.");
265 m_data.reserve(reserveSize);
268 #ifdef EIGEN_PARSED_BY_DOXYGEN
281 template<
class SizesType>
282 inline void reserve(
const SizesType& reserveSizes);
284 template<
class SizesType>
285 inline void reserve(
const SizesType& reserveSizes,
const typename SizesType::value_type& enableif =
286 #
if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500)
289 SizesType::value_type())
291 EIGEN_UNUSED_VARIABLE(enableif);
292 reserveInnerVectors(reserveSizes);
296 template<
class SizesType>
297 inline void reserveInnerVectors(
const SizesType& reserveSizes)
301 Index totalReserveSize = 0;
303 m_innerNonZeros =
static_cast<StorageIndex*
>(std::malloc(m_outerSize *
sizeof(StorageIndex)));
304 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
307 StorageIndex* newOuterIndex = m_innerNonZeros;
309 StorageIndex count = 0;
310 for(
Index j=0; j<m_outerSize; ++j)
312 newOuterIndex[j] = count;
313 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
314 totalReserveSize += reserveSizes[j];
316 m_data.reserve(totalReserveSize);
317 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
318 for(Index j=m_outerSize-1; j>=0; --j)
320 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
321 for(Index i=innerNNZ-1; i>=0; --i)
323 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
324 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
326 previousOuterIndex = m_outerIndex[j];
327 m_outerIndex[j] = newOuterIndex[j];
328 m_innerNonZeros[j] = innerNNZ;
331 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
333 m_data.resize(m_outerIndex[m_outerSize]);
338 if (!newOuterIndex) internal::throw_std_bad_alloc();
341 for(
Index j=0; j<m_outerSize; ++j)
343 newOuterIndex[j] = count;
344 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
345 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
346 count += toReserve + m_innerNonZeros[j];
348 newOuterIndex[m_outerSize] = count;
350 m_data.resize(count);
351 for(
Index j=m_outerSize-1; j>=0; --j)
353 Index offset = newOuterIndex[j] - m_outerIndex[j];
357 for(
Index i=innerNNZ-1; i>=0; --i)
359 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
360 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
365 std::swap(m_outerIndex, newOuterIndex);
366 std::free(newOuterIndex);
386 return insertBackByOuterInner(IsRowMajor?
row:
col, IsRowMajor?
col:
row);
391 inline Scalar& insertBackByOuterInner(
Index outer,
Index inner)
393 eigen_assert(
Index(m_outerIndex[outer+1]) == m_data.size() &&
"Invalid ordered insertion (invalid outer index)");
394 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) &&
"Invalid ordered insertion (invalid inner index)");
395 Index p = m_outerIndex[outer+1];
396 ++m_outerIndex[outer+1];
397 m_data.append(Scalar(0), inner);
398 return m_data.value(p);
403 inline Scalar& insertBackByOuterInnerUnordered(
Index outer,
Index inner)
405 Index p = m_outerIndex[outer+1];
406 ++m_outerIndex[outer+1];
407 m_data.append(Scalar(0), inner);
408 return m_data.value(p);
413 inline void startVec(
Index outer)
415 eigen_assert(m_outerIndex[outer]==
Index(m_data.size()) &&
"You must call startVec for each inner vector sequentially");
416 eigen_assert(m_outerIndex[outer+1]==0 &&
"You must call startVec for each inner vector sequentially");
417 m_outerIndex[outer+1] = m_outerIndex[outer];
423 inline void finalize()
428 Index i = m_outerSize;
430 while (i>=0 && m_outerIndex[i]==0)
433 while (i<=m_outerSize)
435 m_outerIndex[i] =
size;
443 template<
typename InputIterators>
446 template<
typename InputIterators,
typename DupFunctor>
447 void setFromTriplets(
const InputIterators& begin,
const InputIterators& end, DupFunctor dup_func);
449 void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
451 template<
typename DupFunctor>
452 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
460 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
470 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
472 Index oldStart = m_outerIndex[1];
473 m_outerIndex[1] = m_innerNonZeros[0];
474 for(
Index j=1; j<m_outerSize; ++j)
476 Index nextOldStart = m_outerIndex[j+1];
477 Index offset = oldStart - m_outerIndex[j];
480 for(
Index k=0; k<m_innerNonZeros[j]; ++k)
482 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
483 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
486 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
487 oldStart = nextOldStart;
489 std::free(m_innerNonZeros);
491 m_data.resize(m_outerIndex[m_outerSize]);
498 if(m_innerNonZeros != 0)
501 for (
Index i = 0; i < m_outerSize; i++)
503 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
508 void prune(
const Scalar& reference,
const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
510 prune(default_prunning_func(reference,epsilon));
520 template<
typename KeepFunc>
521 void prune(
const KeepFunc& keep = KeepFunc())
527 for(
Index j=0; j<m_outerSize; ++j)
529 Index previousStart = m_outerIndex[j];
531 Index end = m_outerIndex[j+1];
532 for(
Index i=previousStart; i<end; ++i)
534 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
536 m_data.value(k) = m_data.value(i);
537 m_data.index(k) = m_data.index(i);
542 m_outerIndex[m_outerSize] = k;
557 if (this->
rows() == rows && this->
cols() == cols)
return;
571 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
572 m_innerNonZeros = newInnerNonZeros;
574 for(
Index i=m_outerSize; i<m_outerSize+outerChange; i++)
575 m_innerNonZeros[i] = 0;
577 else if (innerChange < 0)
581 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
582 for(
Index i = 0; i < m_outerSize + (std::min)(outerChange,
Index(0)); i++)
583 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
584 for(
Index i = m_outerSize; i < m_outerSize + outerChange; i++)
585 m_innerNonZeros[i] = 0;
589 if (m_innerNonZeros && innerChange < 0)
591 for(
Index i = 0; i < m_outerSize + (std::min)(outerChange,
Index(0)); i++)
595 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
599 m_innerSize = newInnerSize;
602 if (outerChange == 0)
606 if (!newOuterIndex) internal::throw_std_bad_alloc();
607 m_outerIndex = newOuterIndex;
610 StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
611 for(
Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
612 m_outerIndex[i] = last;
614 m_outerSize += outerChange;
627 m_innerSize = IsRowMajor ?
cols :
rows;
629 if (m_outerSize !=
outerSize || m_outerSize==0)
631 std::free(m_outerIndex);
633 if (!m_outerIndex) internal::throw_std_bad_alloc();
639 std::free(m_innerNonZeros);
642 memset(m_outerIndex, 0, (m_outerSize+1)*
sizeof(
StorageIndex));
653 const ConstDiagonalReturnType
diagonal()
const {
return ConstDiagonalReturnType(*
this); }
659 DiagonalReturnType
diagonal() {
return DiagonalReturnType(*
this); }
663 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
665 check_template_parameters();
671 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
673 check_template_parameters();
678 template<
typename OtherDerived>
680 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
682 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
683 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
684 check_template_parameters();
690 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
691 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
693 internal::call_assignment_no_alias(*
this, other.
derived());
698 template<
typename OtherDerived,
unsigned int UpLo>
700 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
702 check_template_parameters();
703 Base::operator=(other);
708 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
710 check_template_parameters();
715 template<
typename OtherDerived>
717 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
719 check_template_parameters();
720 initAssignment(other);
725 template<
typename OtherDerived>
727 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
729 check_template_parameters();
730 *
this = other.derived();
738 std::swap(m_outerIndex, other.m_outerIndex);
739 std::swap(m_innerSize, other.m_innerSize);
740 std::swap(m_outerSize, other.m_outerSize);
741 std::swap(m_innerNonZeros, other.m_innerNonZeros);
742 m_data.swap(other.m_data);
749 eigen_assert(
rows() ==
cols() &&
"ONLY FOR SQUARED MATRICES");
750 this->m_data.resize(
rows());
754 std::free(m_innerNonZeros);
759 if (other.isRValue())
761 swap(other.const_cast_derived());
763 else if(
this!=&other)
765 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
766 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
768 initAssignment(other);
771 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
772 m_data = other.m_data;
776 Base::operator=(other);
782#ifndef EIGEN_PARSED_BY_DOXYGEN
783 template<
typename OtherDerived>
784 inline SparseMatrix& operator=(
const EigenBase<OtherDerived>& other)
785 {
return Base::operator=(other.derived()); }
788 template<
typename OtherDerived>
789 EIGEN_DONT_INLINE
SparseMatrix& operator=(
const SparseMatrixBase<OtherDerived>& other);
791 friend std::ostream & operator << (std::ostream & s,
const SparseMatrix& m)
794 s <<
"Nonzero entries:\n";
797 for (Index i=0; i<m.nonZeros(); ++i)
798 s <<
"(" << m.m_data.value(i) <<
"," << m.m_data.index(i) <<
") ";
802 for (Index i=0; i<m.outerSize(); ++i)
804 Index p = m.m_outerIndex[i];
805 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
808 s <<
"(" << m.m_data.value(k) <<
"," << m.m_data.index(k) <<
") ";
810 for (; k<m.m_outerIndex[i+1]; ++k) {
817 s <<
"Outer pointers:\n";
818 for (
Index i=0; i<m.outerSize(); ++i) {
819 s << m.m_outerIndex[i] <<
" ";
821 s <<
" $" << std::endl;
822 if(!m.isCompressed())
824 s <<
"Inner non zeros:\n";
825 for (Index i=0; i<m.outerSize(); ++i) {
826 s << m.m_innerNonZeros[i] <<
" ";
828 s <<
" $" << std::endl;
832 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
839 std::free(m_outerIndex);
840 std::free(m_innerNonZeros);
846# ifdef EIGEN_SPARSEMATRIX_PLUGIN
847# include EIGEN_SPARSEMATRIX_PLUGIN
852 template<
typename Other>
853 void initAssignment(
const Other& other)
855 resize(other.rows(), other.cols());
858 std::free(m_innerNonZeros);
865 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
869 class SingletonVector
871 StorageIndex m_index;
872 StorageIndex m_value;
874 typedef StorageIndex value_type;
875 SingletonVector(Index i, Index v)
876 : m_index(convert_index(i)), m_value(convert_index(v))
879 StorageIndex operator[](Index i)
const {
return i==m_index ? m_value : 0; }
884 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
889 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
891 const Index outer = IsRowMajor ? row : col;
892 const Index inner = IsRowMajor ? col : row;
894 eigen_assert(!isCompressed());
895 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
897 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
898 m_data.index(p) = convert_index(inner);
899 return (m_data.value(p) = Scalar(0));
903 static void check_template_parameters()
905 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
906 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
909 struct default_prunning_func {
910 default_prunning_func(
const Scalar& ref,
const RealScalar& eps) : reference(ref), epsilon(eps) {}
911 inline bool operator() (
const Index&,
const Index&,
const Scalar& value)
const
913 return !internal::isMuchSmallerThan(value, reference, epsilon);
922template<
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
923void set_from_triplets(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
925 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
926 typedef typename SparseMatrixType::Scalar Scalar;
927 typedef typename SparseMatrixType::StorageIndex StorageIndex;
928 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
933 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
935 for(InputIterator it(begin); it!=end; ++it)
937 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
938 wi(IsRowMajor ? it->col() : it->row())++;
943 for(InputIterator it(begin); it!=end; ++it)
944 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
947 trMat.collapseDuplicates(dup_func);
994template<
typename Scalar,
int _Options,
typename _StorageIndex>
995template<
typename InputIterators>
998 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *
this, internal::scalar_sum_op<Scalar,Scalar>());
1010template<
typename Scalar,
int _Options,
typename _StorageIndex>
1011template<
typename InputIterators,
typename DupFunctor>
1014 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *
this, dup_func);
1018template<
typename Scalar,
int _Options,
typename _StorageIndex>
1019template<
typename DupFunctor>
1020void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
1022 eigen_assert(!isCompressed());
1024 IndexVector wi(innerSize());
1026 StorageIndex count = 0;
1028 for(
Index j=0; j<outerSize(); ++j)
1030 StorageIndex start = count;
1031 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1032 for(
Index k=m_outerIndex[j]; k<oldEnd; ++k)
1034 Index i = m_data.index(k);
1038 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1042 m_data.value(count) = m_data.value(k);
1043 m_data.index(count) = m_data.index(k);
1048 m_outerIndex[j] = start;
1050 m_outerIndex[m_outerSize] = count;
1053 std::free(m_innerNonZeros);
1054 m_innerNonZeros = 0;
1055 m_data.resize(m_outerIndex[m_outerSize]);
1058template<
typename Scalar,
int _Options,
typename _StorageIndex>
1059template<
typename OtherDerived>
1060EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(
const SparseMatrixBase<OtherDerived>& other)
1062 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1063 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1065 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1066 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1069 const bool needToTranspose = (Flags &
RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1070 if (needToTranspose)
1072 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1073 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1079 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1080 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1081 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1082 OtherCopy otherCopy(other.derived());
1083 OtherCopyEval otherCopyEval(otherCopy);
1085 SparseMatrix dest(other.rows(),other.cols());
1090 for (Index j=0; j<otherCopy.outerSize(); ++j)
1091 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1092 ++dest.m_outerIndex[it.index()];
1095 StorageIndex count = 0;
1096 IndexVector positions(dest.outerSize());
1097 for (Index j=0; j<dest.outerSize(); ++j)
1099 StorageIndex tmp = dest.m_outerIndex[j];
1100 dest.m_outerIndex[j] = count;
1101 positions[j] = count;
1104 dest.m_outerIndex[dest.outerSize()] = count;
1106 dest.m_data.resize(count);
1108 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1110 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1112 Index pos = positions[it.index()]++;
1113 dest.m_data.index(pos) = j;
1114 dest.m_data.value(pos) = it.value();
1122 if(other.isRValue())
1124 initAssignment(other.derived());
1127 return Base::operator=(other.derived());
1131template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1144 if(m_data.allocatedSize()==0)
1145 m_data.reserve(2*m_innerSize);
1149 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1151 memset(m_innerNonZeros, 0, (m_outerSize)*
sizeof(
StorageIndex));
1155 StorageIndex end = convert_index(m_data.allocatedSize());
1156 for(
Index j=1; j<=m_outerSize; ++j)
1157 m_outerIndex[j] = end;
1163 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1164 for(
Index j=0; j<m_outerSize; ++j)
1165 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1170 Index data_end = m_data.allocatedSize();
1174 if(m_outerIndex[outer]==data_end)
1176 eigen_internal_assert(m_innerNonZeros[outer]==0);
1182 while(j>=0 && m_innerNonZeros[j]==0)
1183 m_outerIndex[j--] = p;
1186 ++m_innerNonZeros[outer];
1187 m_data.append(
Scalar(0), inner);
1190 if(data_end != m_data.allocatedSize())
1195 eigen_internal_assert(data_end < m_data.allocatedSize());
1196 StorageIndex new_end = convert_index(m_data.allocatedSize());
1197 for(
Index k=outer+1; k<=m_outerSize; ++k)
1198 if(m_outerIndex[k]==data_end)
1199 m_outerIndex[k] = new_end;
1201 return m_data.value(p);
1206 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1208 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1211 ++m_innerNonZeros[outer];
1212 m_data.resize(m_data.size()+1);
1215 if(data_end != m_data.allocatedSize())
1220 eigen_internal_assert(data_end < m_data.allocatedSize());
1221 StorageIndex new_end = convert_index(m_data.allocatedSize());
1222 for(
Index k=outer+1; k<=m_outerSize; ++k)
1223 if(m_outerIndex[k]==data_end)
1224 m_outerIndex[k] = new_end;
1228 Index startId = m_outerIndex[outer];
1229 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1230 while ( (p > startId) && (m_data.index(p-1) > inner) )
1232 m_data.index(p) = m_data.index(p-1);
1233 m_data.value(p) = m_data.value(p-1);
1237 m_data.index(p) = convert_index(inner);
1238 return (m_data.value(p) =
Scalar(0));
1241 if(m_data.size() != m_data.allocatedSize())
1244 m_data.resize(m_data.allocatedSize());
1248 return insertUncompressed(
row,
col);
1251template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1252EIGEN_DONT_INLINE
typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(
Index row,
Index col)
1254 eigen_assert(!isCompressed());
1256 const Index outer = IsRowMajor ? row : col;
1257 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1259 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1260 StorageIndex innerNNZ = m_innerNonZeros[outer];
1264 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1267 Index startId = m_outerIndex[outer];
1268 Index p = startId + m_innerNonZeros[outer];
1269 while ( (p > startId) && (m_data.index(p-1) > inner) )
1271 m_data.index(p) = m_data.index(p-1);
1272 m_data.value(p) = m_data.value(p-1);
1275 eigen_assert((p<=startId || m_data.index(p-1)!=inner) &&
"you cannot insert an element that already exists, you must call coeffRef to this end");
1277 m_innerNonZeros[outer]++;
1279 m_data.index(p) = inner;
1280 return (m_data.value(p) = Scalar(0));
1283template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1284EIGEN_DONT_INLINE
typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1286 eigen_assert(isCompressed());
1288 const Index outer = IsRowMajor ? row : col;
1289 const Index inner = IsRowMajor ? col : row;
1291 Index previousOuter = outer;
1292 if (m_outerIndex[outer+1]==0)
1295 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1297 m_outerIndex[previousOuter] = convert_index(m_data.size());
1300 m_outerIndex[outer+1] = m_outerIndex[outer];
1306 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1307 && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1309 std::size_t startId = m_outerIndex[outer];
1311 std::size_t p = m_outerIndex[outer+1];
1312 ++m_outerIndex[outer+1];
1314 double reallocRatio = 1;
1315 if (m_data.allocatedSize()<=m_data.size())
1318 if (m_data.size()==0)
1327 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1328 reallocRatio = (nnzEstimate-double(m_data.size()))/
double(m_data.size());
1332 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1335 m_data.resize(m_data.size()+1,reallocRatio);
1339 if (previousOuter==-1)
1343 for (Index k=0; k<=(outer+1); ++k)
1344 m_outerIndex[k] = 0;
1346 while(m_outerIndex[k]==0)
1347 m_outerIndex[k++] = 1;
1348 while (k<=m_outerSize && m_outerIndex[k]!=0)
1349 m_outerIndex[k++]++;
1352 k = m_outerIndex[k]-1;
1355 m_data.index(k) = m_data.index(k-1);
1356 m_data.value(k) = m_data.value(k-1);
1365 while (j<=m_outerSize && m_outerIndex[j]!=0)
1366 m_outerIndex[j++]++;
1369 Index k = m_outerIndex[j]-1;
1372 m_data.index(k) = m_data.index(k-1);
1373 m_data.value(k) = m_data.value(k-1);
1379 while ( (p > startId) && (m_data.index(p-1) > inner) )
1381 m_data.index(p) = m_data.index(p-1);
1382 m_data.value(p) = m_data.value(p-1);
1386 m_data.index(p) = inner;
1387 return (m_data.value(p) = Scalar(0));
1392template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1393struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1394 : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1396 typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
1397 typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
1398 evaluator() : Base() {}
1399 explicit evaluator(
const SparseMatrixType &mat) : Base(mat) {}
static const ConstantReturnType Constant(Index rows, Index cols, const Scalar &value)
Definition CwiseNullaryOp.h:174
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition Diagonal.h:65
A matrix or vector expression mapping an existing array of data.
Definition Map.h:96
Sparse matrix.
Definition MappedSparseMatrix.h:34
Index nonZeros() const
Definition SparseCompressedBase.h:56
bool isCompressed() const
Definition SparseCompressedBase.h:107
SparseCompressedBase()
Definition SparseCompressedBase.h:130
Base class of any sparse matrices or sparse expressions.
Definition SparseMatrixBase.h:28
internal::traits< SparseMatrix< _Scalar, _Options, _StorageIndex > >::StorageIndex StorageIndex
Definition SparseMatrixBase.h:43
Index size() const
Definition SparseMatrixBase.h:176
RowXpr row(Index i)
Definition SparseMatrixBase.h:860
ColXpr col(Index i)
Definition SparseMatrixBase.h:839
@ Flags
Definition SparseMatrixBase.h:90
A versatible sparse matrix representation.
Definition SparseMatrix.h:98
Scalar sum() const
Definition SparseRedux.h:30
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition SparseMatrix.h:508
void prune(const KeepFunc &keep=KeepFunc())
Definition SparseMatrix.h:521
Index innerSize() const
Definition SparseMatrix.h:141
void reserve(Index reserveSize)
Definition SparseMatrix.h:262
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:726
StorageIndex * outerIndexPtr()
Definition SparseMatrix.h:170
const StorageIndex * innerNonZeroPtr() const
Definition SparseMatrix.h:175
bool isCompressed() const
Definition SparseCompressedBase.h:107
~SparseMatrix()
Definition SparseMatrix.h:837
Scalar * valuePtr()
Definition SparseMatrix.h:152
const ConstDiagonalReturnType diagonal() const
Definition SparseMatrix.h:653
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:716
Index outerSize() const
Definition SparseMatrix.h:143
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition SparseMatrix.h:679
Scalar coeff(Index row, Index col) const
Definition SparseMatrix.h:188
void makeCompressed()
Definition SparseMatrix.h:465
Index rows() const
Definition SparseMatrix.h:136
SparseMatrix()
Definition SparseMatrix.h:662
SparseMatrix(Index rows, Index cols)
Definition SparseMatrix.h:670
void uncompress()
Definition SparseMatrix.h:496
const StorageIndex * innerIndexPtr() const
Definition SparseMatrix.h:157
const StorageIndex * outerIndexPtr() const
Definition SparseMatrix.h:166
void setIdentity()
Definition SparseMatrix.h:747
void conservativeResize(Index rows, Index cols)
Definition SparseMatrix.h:554
Index cols() const
Definition SparseMatrix.h:138
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition SparseMatrix.h:699
Scalar & insert(Index row, Index col)
Definition SparseMatrix.h:1132
const Scalar * valuePtr() const
Definition SparseMatrix.h:148
StorageIndex * innerNonZeroPtr()
Definition SparseMatrix.h:179
StorageIndex * innerIndexPtr()
Definition SparseMatrix.h:161
void reserve(const SizesType &reserveSizes)
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:996
void setZero()
Definition SparseMatrix.h:251
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1012
Scalar & coeffRef(Index row, Index col)
Definition SparseMatrix.h:206
void swap(SparseMatrix &other)
Definition SparseMatrix.h:735
SparseMatrix(const SparseMatrix &other)
Definition SparseMatrix.h:707
Index nonZeros() const
Definition SparseCompressedBase.h:56
DiagonalReturnType diagonal()
Definition SparseMatrix.h:659
void resize(Index rows, Index cols)
Definition SparseMatrix.h:624
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition SparseSelfAdjointView.h:45
a sparse vector class
Definition SparseVector.h:66
const unsigned int LvalueBit
Definition Constants.h:139
const unsigned int RowMajorBit
Definition Constants.h:61
const unsigned int CompressedAccessBit
Definition Constants.h:186
Namespace containing all symbols from the Eigen library.
Definition A05_PortingFrom2To3.dox:1
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:65
const int Dynamic
Definition Constants.h:21
Eigen::Index Index
Definition EigenBase.h:38
Derived & derived()
Definition EigenBase.h:45
Index size() const
Definition EigenBase.h:66