Eigen  3.3.9
 
Loading...
Searching...
No Matches
SparseMatrix.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
12
13namespace Eigen {
14
44
45namespace internal {
46template<typename _Scalar, int _Options, typename _StorageIndex>
47struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48{
49 typedef _Scalar Scalar;
50 typedef _StorageIndex StorageIndex;
51 typedef Sparse StorageKind;
52 typedef MatrixXpr XprKind;
53 enum {
54 RowsAtCompileTime = Dynamic,
55 ColsAtCompileTime = Dynamic,
56 MaxRowsAtCompileTime = Dynamic,
57 MaxColsAtCompileTime = Dynamic,
58 Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
59 SupportedAccessPatterns = InnerRandomAccessPattern
60 };
61};
62
63template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65{
66 typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
67 typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
68 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
69
70 typedef _Scalar Scalar;
71 typedef Dense StorageKind;
72 typedef _StorageIndex StorageIndex;
73 typedef MatrixXpr XprKind;
74
75 enum {
76 RowsAtCompileTime = Dynamic,
77 ColsAtCompileTime = 1,
78 MaxRowsAtCompileTime = Dynamic,
79 MaxColsAtCompileTime = 1,
80 Flags = LvalueBit
81 };
82};
83
84template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86 : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87{
88 enum {
89 Flags = 0
90 };
91};
92
93} // end namespace internal
94
95template<typename _Scalar, int _Options, typename _StorageIndex>
97 : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98{
100 using Base::convert_index;
101 friend class SparseVector<_Scalar,0,_StorageIndex>;
102 public:
103 using Base::isCompressed;
104 using Base::nonZeros;
105 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
106 using Base::operator+=;
107 using Base::operator-=;
108
110 typedef Diagonal<SparseMatrix> DiagonalReturnType;
111 typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
112 typedef typename Base::InnerIterator InnerIterator;
113 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
114
115
116 using Base::IsRowMajor;
117 typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
118 enum {
119 Options = _Options
120 };
121
122 typedef typename Base::IndexVector IndexVector;
123 typedef typename Base::ScalarVector ScalarVector;
124 protected:
125 typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
126
127 Index m_outerSize;
128 Index m_innerSize;
129 StorageIndex* m_outerIndex;
130 StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
131 Storage m_data;
132
133 public:
134
136 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
138 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
139
141 inline Index innerSize() const { return m_innerSize; }
143 inline Index outerSize() const { return m_outerSize; }
144
148 inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
152 inline Scalar* valuePtr() { return m_data.valuePtr(); }
153
157 inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
161 inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
162
166 inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
170 inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
171
175 inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
179 inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
180
182 inline Storage& data() { return m_data; }
184 inline const Storage& data() const { return m_data; }
185
188 inline Scalar coeff(Index row, Index col) const
189 {
190 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
191
192 const Index outer = IsRowMajor ? row : col;
193 const Index inner = IsRowMajor ? col : row;
194 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
195 return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
196 }
197
207 {
208 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
209
210 const Index outer = IsRowMajor ? row : col;
211 const Index inner = IsRowMajor ? col : row;
212
213 Index start = m_outerIndex[outer];
214 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
215 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
216 if(end<=start)
217 return insert(row,col);
218 const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
219 if((p<end) && (m_data.index(p)==inner))
220 return m_data.value(p);
221 else
222 return insert(row,col);
223 }
224
241
242 public:
243
251 inline void setZero()
252 {
253 m_data.clear();
254 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
255 if(m_innerNonZeros)
256 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
257 }
258
262 inline void reserve(Index reserveSize)
263 {
264 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
265 m_data.reserve(reserveSize);
266 }
267
268 #ifdef EIGEN_PARSED_BY_DOXYGEN
281 template<class SizesType>
282 inline void reserve(const SizesType& reserveSizes);
283 #else
284 template<class SizesType>
285 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
286 #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
287 typename
288 #endif
289 SizesType::value_type())
290 {
291 EIGEN_UNUSED_VARIABLE(enableif);
292 reserveInnerVectors(reserveSizes);
293 }
294 #endif // EIGEN_PARSED_BY_DOXYGEN
295 protected:
296 template<class SizesType>
297 inline void reserveInnerVectors(const SizesType& reserveSizes)
298 {
299 if(isCompressed())
300 {
301 Index totalReserveSize = 0;
302 // turn the matrix into non-compressed mode
303 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
304 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
305
306 // temporarily use m_innerSizes to hold the new starting points.
307 StorageIndex* newOuterIndex = m_innerNonZeros;
308
309 StorageIndex count = 0;
310 for(Index j=0; j<m_outerSize; ++j)
311 {
312 newOuterIndex[j] = count;
313 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
314 totalReserveSize += reserveSizes[j];
315 }
316 m_data.reserve(totalReserveSize);
317 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
318 for(Index j=m_outerSize-1; j>=0; --j)
319 {
320 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
321 for(Index i=innerNNZ-1; i>=0; --i)
322 {
323 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
324 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
325 }
326 previousOuterIndex = m_outerIndex[j];
327 m_outerIndex[j] = newOuterIndex[j];
328 m_innerNonZeros[j] = innerNNZ;
329 }
330 if(m_outerSize>0)
331 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
332
333 m_data.resize(m_outerIndex[m_outerSize]);
334 }
335 else
336 {
337 StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
338 if (!newOuterIndex) internal::throw_std_bad_alloc();
339
340 StorageIndex count = 0;
341 for(Index j=0; j<m_outerSize; ++j)
342 {
343 newOuterIndex[j] = count;
344 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
345 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
346 count += toReserve + m_innerNonZeros[j];
347 }
348 newOuterIndex[m_outerSize] = count;
349
350 m_data.resize(count);
351 for(Index j=m_outerSize-1; j>=0; --j)
352 {
353 Index offset = newOuterIndex[j] - m_outerIndex[j];
354 if(offset>0)
355 {
356 StorageIndex innerNNZ = m_innerNonZeros[j];
357 for(Index i=innerNNZ-1; i>=0; --i)
358 {
359 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
360 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
361 }
362 }
363 }
364
365 std::swap(m_outerIndex, newOuterIndex);
366 std::free(newOuterIndex);
367 }
368
369 }
370 public:
371
372 //--- low level purely coherent filling ---
373
384 inline Scalar& insertBack(Index row, Index col)
385 {
386 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
387 }
388
391 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
392 {
393 eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
394 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
395 Index p = m_outerIndex[outer+1];
396 ++m_outerIndex[outer+1];
397 m_data.append(Scalar(0), inner);
398 return m_data.value(p);
399 }
400
403 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
404 {
405 Index p = m_outerIndex[outer+1];
406 ++m_outerIndex[outer+1];
407 m_data.append(Scalar(0), inner);
408 return m_data.value(p);
409 }
410
413 inline void startVec(Index outer)
414 {
415 eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
416 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
417 m_outerIndex[outer+1] = m_outerIndex[outer];
418 }
419
423 inline void finalize()
424 {
425 if(isCompressed())
426 {
427 StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
428 Index i = m_outerSize;
429 // find the last filled column
430 while (i>=0 && m_outerIndex[i]==0)
431 --i;
432 ++i;
433 while (i<=m_outerSize)
434 {
435 m_outerIndex[i] = size;
436 ++i;
437 }
438 }
439 }
440
441 //---
442
443 template<typename InputIterators>
444 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
445
446 template<typename InputIterators,typename DupFunctor>
447 void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
448
449 void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
450
451 template<typename DupFunctor>
452 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
453
454 //---
455
458 Scalar& insertByOuterInner(Index j, Index i)
459 {
460 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
461 }
462
466 {
467 if(isCompressed())
468 return;
469
470 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
471
472 Index oldStart = m_outerIndex[1];
473 m_outerIndex[1] = m_innerNonZeros[0];
474 for(Index j=1; j<m_outerSize; ++j)
475 {
476 Index nextOldStart = m_outerIndex[j+1];
477 Index offset = oldStart - m_outerIndex[j];
478 if(offset>0)
479 {
480 for(Index k=0; k<m_innerNonZeros[j]; ++k)
481 {
482 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
483 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
484 }
485 }
486 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
487 oldStart = nextOldStart;
488 }
489 std::free(m_innerNonZeros);
490 m_innerNonZeros = 0;
491 m_data.resize(m_outerIndex[m_outerSize]);
492 m_data.squeeze();
493 }
494
497 {
498 if(m_innerNonZeros != 0)
499 return;
500 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
501 for (Index i = 0; i < m_outerSize; i++)
502 {
503 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
504 }
505 }
506
508 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
509 {
510 prune(default_prunning_func(reference,epsilon));
511 }
512
520 template<typename KeepFunc>
521 void prune(const KeepFunc& keep = KeepFunc())
522 {
523 // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
525
526 StorageIndex k = 0;
527 for(Index j=0; j<m_outerSize; ++j)
528 {
529 Index previousStart = m_outerIndex[j];
530 m_outerIndex[j] = k;
531 Index end = m_outerIndex[j+1];
532 for(Index i=previousStart; i<end; ++i)
533 {
534 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
535 {
536 m_data.value(k) = m_data.value(i);
537 m_data.index(k) = m_data.index(i);
538 ++k;
539 }
540 }
541 }
542 m_outerIndex[m_outerSize] = k;
543 m_data.resize(k,0);
544 }
545
555 {
556 // No change
557 if (this->rows() == rows && this->cols() == cols) return;
558
559 // If one dimension is null, then there is nothing to be preserved
560 if(rows==0 || cols==0) return resize(rows,cols);
561
562 Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
563 Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
564 StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
565
566 // Deals with inner non zeros
567 if (m_innerNonZeros)
568 {
569 // Resize m_innerNonZeros
570 StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
571 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
572 m_innerNonZeros = newInnerNonZeros;
573
574 for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
575 m_innerNonZeros[i] = 0;
576 }
577 else if (innerChange < 0)
578 {
579 // Inner size decreased: allocate a new m_innerNonZeros
580 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
581 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
582 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
583 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
584 for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
585 m_innerNonZeros[i] = 0;
586 }
587
588 // Change the m_innerNonZeros in case of a decrease of inner size
589 if (m_innerNonZeros && innerChange < 0)
590 {
591 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
592 {
593 StorageIndex &n = m_innerNonZeros[i];
594 StorageIndex start = m_outerIndex[i];
595 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
596 }
597 }
598
599 m_innerSize = newInnerSize;
600
601 // Re-allocate outer index structure if necessary
602 if (outerChange == 0)
603 return;
604
605 StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
606 if (!newOuterIndex) internal::throw_std_bad_alloc();
607 m_outerIndex = newOuterIndex;
608 if (outerChange > 0)
609 {
610 StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
611 for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
612 m_outerIndex[i] = last;
613 }
614 m_outerSize += outerChange;
615 }
616
625 {
626 const Index outerSize = IsRowMajor ? rows : cols;
627 m_innerSize = IsRowMajor ? cols : rows;
628 m_data.clear();
629 if (m_outerSize != outerSize || m_outerSize==0)
630 {
631 std::free(m_outerIndex);
632 m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
633 if (!m_outerIndex) internal::throw_std_bad_alloc();
634
635 m_outerSize = outerSize;
636 }
637 if(m_innerNonZeros)
638 {
639 std::free(m_innerNonZeros);
640 m_innerNonZeros = 0;
641 }
642 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
643 }
644
647 void resizeNonZeros(Index size)
648 {
649 m_data.resize(size);
650 }
651
653 const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
654
659 DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
660
663 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
664 {
665 check_template_parameters();
666 resize(0, 0);
667 }
668
671 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
672 {
673 check_template_parameters();
674 resize(rows, cols);
675 }
676
678 template<typename OtherDerived>
680 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
681 {
682 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
683 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
684 check_template_parameters();
685 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
686 if (needToTranspose)
687 *this = other.derived();
688 else
689 {
690 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
691 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
692 #endif
693 internal::call_assignment_no_alias(*this, other.derived());
694 }
695 }
696
698 template<typename OtherDerived, unsigned int UpLo>
700 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
701 {
702 check_template_parameters();
703 Base::operator=(other);
704 }
705
707 inline SparseMatrix(const SparseMatrix& other)
708 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
709 {
710 check_template_parameters();
711 *this = other.derived();
712 }
713
715 template<typename OtherDerived>
716 SparseMatrix(const ReturnByValue<OtherDerived>& other)
717 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
718 {
719 check_template_parameters();
720 initAssignment(other);
721 other.evalTo(*this);
722 }
723
725 template<typename OtherDerived>
726 explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
727 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
728 {
729 check_template_parameters();
730 *this = other.derived();
731 }
732
735 inline void swap(SparseMatrix& other)
736 {
737 //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
738 std::swap(m_outerIndex, other.m_outerIndex);
739 std::swap(m_innerSize, other.m_innerSize);
740 std::swap(m_outerSize, other.m_outerSize);
741 std::swap(m_innerNonZeros, other.m_innerNonZeros);
742 m_data.swap(other.m_data);
743 }
744
747 inline void setIdentity()
748 {
749 eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
750 this->m_data.resize(rows());
751 Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
752 Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
753 Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
754 std::free(m_innerNonZeros);
755 m_innerNonZeros = 0;
756 }
757 inline SparseMatrix& operator=(const SparseMatrix& other)
758 {
759 if (other.isRValue())
760 {
761 swap(other.const_cast_derived());
762 }
763 else if(this!=&other)
764 {
765 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
766 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
767 #endif
768 initAssignment(other);
769 if(other.isCompressed())
770 {
771 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
772 m_data = other.m_data;
773 }
774 else
775 {
776 Base::operator=(other);
777 }
778 }
779 return *this;
780 }
781
782#ifndef EIGEN_PARSED_BY_DOXYGEN
783 template<typename OtherDerived>
784 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
785 { return Base::operator=(other.derived()); }
786#endif // EIGEN_PARSED_BY_DOXYGEN
787
788 template<typename OtherDerived>
789 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
790
791 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
792 {
793 EIGEN_DBG_SPARSE(
794 s << "Nonzero entries:\n";
795 if(m.isCompressed())
796 {
797 for (Index i=0; i<m.nonZeros(); ++i)
798 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
799 }
800 else
801 {
802 for (Index i=0; i<m.outerSize(); ++i)
803 {
804 Index p = m.m_outerIndex[i];
805 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
806 Index k=p;
807 for (; k<pe; ++k) {
808 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
809 }
810 for (; k<m.m_outerIndex[i+1]; ++k) {
811 s << "(_,_) ";
812 }
813 }
814 }
815 s << std::endl;
816 s << std::endl;
817 s << "Outer pointers:\n";
818 for (Index i=0; i<m.outerSize(); ++i) {
819 s << m.m_outerIndex[i] << " ";
820 }
821 s << " $" << std::endl;
822 if(!m.isCompressed())
823 {
824 s << "Inner non zeros:\n";
825 for (Index i=0; i<m.outerSize(); ++i) {
826 s << m.m_innerNonZeros[i] << " ";
827 }
828 s << " $" << std::endl;
829 }
830 s << std::endl;
831 );
832 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
833 return s;
834 }
835
838 {
839 std::free(m_outerIndex);
840 std::free(m_innerNonZeros);
841 }
842
844 Scalar sum() const;
845
846# ifdef EIGEN_SPARSEMATRIX_PLUGIN
847# include EIGEN_SPARSEMATRIX_PLUGIN
848# endif
849
850protected:
851
852 template<typename Other>
853 void initAssignment(const Other& other)
854 {
855 resize(other.rows(), other.cols());
856 if(m_innerNonZeros)
857 {
858 std::free(m_innerNonZeros);
859 m_innerNonZeros = 0;
860 }
861 }
862
865 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
866
869 class SingletonVector
870 {
871 StorageIndex m_index;
872 StorageIndex m_value;
873 public:
874 typedef StorageIndex value_type;
875 SingletonVector(Index i, Index v)
876 : m_index(convert_index(i)), m_value(convert_index(v))
877 {}
878
879 StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
880 };
881
884 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
885
886public:
889 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
890 {
891 const Index outer = IsRowMajor ? row : col;
892 const Index inner = IsRowMajor ? col : row;
893
894 eigen_assert(!isCompressed());
895 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
896
897 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
898 m_data.index(p) = convert_index(inner);
899 return (m_data.value(p) = Scalar(0));
900 }
901
902private:
903 static void check_template_parameters()
904 {
905 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
906 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
907 }
908
909 struct default_prunning_func {
910 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
911 inline bool operator() (const Index&, const Index&, const Scalar& value) const
912 {
913 return !internal::isMuchSmallerThan(value, reference, epsilon);
914 }
915 Scalar reference;
916 RealScalar epsilon;
917 };
918};
919
920namespace internal {
921
922template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
923void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
924{
925 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
926 typedef typename SparseMatrixType::Scalar Scalar;
927 typedef typename SparseMatrixType::StorageIndex StorageIndex;
928 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
929
930 if(begin!=end)
931 {
932 // pass 1: count the nnz per inner-vector
933 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
934 wi.setZero();
935 for(InputIterator it(begin); it!=end; ++it)
936 {
937 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
938 wi(IsRowMajor ? it->col() : it->row())++;
939 }
940
941 // pass 2: insert all the elements into trMat
942 trMat.reserve(wi);
943 for(InputIterator it(begin); it!=end; ++it)
944 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
945
946 // pass 3:
947 trMat.collapseDuplicates(dup_func);
948 }
949
950 // pass 4: transposed copy -> implicit sorting
951 mat = trMat;
952}
953
954}
955
956
994template<typename Scalar, int _Options, typename _StorageIndex>
995template<typename InputIterators>
996void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
997{
998 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
999}
1000
1010template<typename Scalar, int _Options, typename _StorageIndex>
1011template<typename InputIterators,typename DupFunctor>
1012void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1013{
1014 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1015}
1016
1018template<typename Scalar, int _Options, typename _StorageIndex>
1019template<typename DupFunctor>
1020void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
1021{
1022 eigen_assert(!isCompressed());
1023 // TODO, in practice we should be able to use m_innerNonZeros for that task
1024 IndexVector wi(innerSize());
1025 wi.fill(-1);
1026 StorageIndex count = 0;
1027 // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1028 for(Index j=0; j<outerSize(); ++j)
1029 {
1030 StorageIndex start = count;
1031 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1032 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1033 {
1034 Index i = m_data.index(k);
1035 if(wi(i)>=start)
1036 {
1037 // we already meet this entry => accumulate it
1038 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1039 }
1040 else
1041 {
1042 m_data.value(count) = m_data.value(k);
1043 m_data.index(count) = m_data.index(k);
1044 wi(i) = count;
1045 ++count;
1046 }
1047 }
1048 m_outerIndex[j] = start;
1049 }
1050 m_outerIndex[m_outerSize] = count;
1051
1052 // turn the matrix into compressed form
1053 std::free(m_innerNonZeros);
1054 m_innerNonZeros = 0;
1055 m_data.resize(m_outerIndex[m_outerSize]);
1056}
1057
1058template<typename Scalar, int _Options, typename _StorageIndex>
1059template<typename OtherDerived>
1060EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
1061{
1062 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1063 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1064
1065 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1066 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1067 #endif
1068
1069 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1070 if (needToTranspose)
1071 {
1072 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1073 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1074 #endif
1075 // two passes algorithm:
1076 // 1 - compute the number of coeffs per dest inner vector
1077 // 2 - do the actual copy/eval
1078 // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1079 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1080 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1081 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1082 OtherCopy otherCopy(other.derived());
1083 OtherCopyEval otherCopyEval(otherCopy);
1084
1085 SparseMatrix dest(other.rows(),other.cols());
1086 Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1087
1088 // pass 1
1089 // FIXME the above copy could be merged with that pass
1090 for (Index j=0; j<otherCopy.outerSize(); ++j)
1091 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1092 ++dest.m_outerIndex[it.index()];
1093
1094 // prefix sum
1095 StorageIndex count = 0;
1096 IndexVector positions(dest.outerSize());
1097 for (Index j=0; j<dest.outerSize(); ++j)
1098 {
1099 StorageIndex tmp = dest.m_outerIndex[j];
1100 dest.m_outerIndex[j] = count;
1101 positions[j] = count;
1102 count += tmp;
1103 }
1104 dest.m_outerIndex[dest.outerSize()] = count;
1105 // alloc
1106 dest.m_data.resize(count);
1107 // pass 2
1108 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1109 {
1110 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1111 {
1112 Index pos = positions[it.index()]++;
1113 dest.m_data.index(pos) = j;
1114 dest.m_data.value(pos) = it.value();
1115 }
1116 }
1117 this->swap(dest);
1118 return *this;
1119 }
1120 else
1121 {
1122 if(other.isRValue())
1123 {
1124 initAssignment(other.derived());
1125 }
1126 // there is no special optimization
1127 return Base::operator=(other.derived());
1128 }
1129}
1130
1131template<typename _Scalar, int _Options, typename _StorageIndex>
1132typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
1133{
1134 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1135
1136 const Index outer = IsRowMajor ? row : col;
1137 const Index inner = IsRowMajor ? col : row;
1138
1139 if(isCompressed())
1140 {
1141 if(nonZeros()==0)
1142 {
1143 // reserve space if not already done
1144 if(m_data.allocatedSize()==0)
1145 m_data.reserve(2*m_innerSize);
1146
1147 // turn the matrix into non-compressed mode
1148 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1149 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1150
1151 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1152
1153 // pack all inner-vectors to the end of the pre-allocated space
1154 // and allocate the entire free-space to the first inner-vector
1155 StorageIndex end = convert_index(m_data.allocatedSize());
1156 for(Index j=1; j<=m_outerSize; ++j)
1157 m_outerIndex[j] = end;
1158 }
1159 else
1160 {
1161 // turn the matrix into non-compressed mode
1162 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1163 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1164 for(Index j=0; j<m_outerSize; ++j)
1165 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1166 }
1167 }
1168
1169 // check whether we can do a fast "push back" insertion
1170 Index data_end = m_data.allocatedSize();
1171
1172 // First case: we are filling a new inner vector which is packed at the end.
1173 // We assume that all remaining inner-vectors are also empty and packed to the end.
1174 if(m_outerIndex[outer]==data_end)
1175 {
1176 eigen_internal_assert(m_innerNonZeros[outer]==0);
1177
1178 // pack previous empty inner-vectors to end of the used-space
1179 // and allocate the entire free-space to the current inner-vector.
1180 StorageIndex p = convert_index(m_data.size());
1181 Index j = outer;
1182 while(j>=0 && m_innerNonZeros[j]==0)
1183 m_outerIndex[j--] = p;
1184
1185 // push back the new element
1186 ++m_innerNonZeros[outer];
1187 m_data.append(Scalar(0), inner);
1188
1189 // check for reallocation
1190 if(data_end != m_data.allocatedSize())
1191 {
1192 // m_data has been reallocated
1193 // -> move remaining inner-vectors back to the end of the free-space
1194 // so that the entire free-space is allocated to the current inner-vector.
1195 eigen_internal_assert(data_end < m_data.allocatedSize());
1196 StorageIndex new_end = convert_index(m_data.allocatedSize());
1197 for(Index k=outer+1; k<=m_outerSize; ++k)
1198 if(m_outerIndex[k]==data_end)
1199 m_outerIndex[k] = new_end;
1200 }
1201 return m_data.value(p);
1202 }
1203
1204 // Second case: the next inner-vector is packed to the end
1205 // and the current inner-vector end match the used-space.
1206 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1207 {
1208 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1209
1210 // add space for the new element
1211 ++m_innerNonZeros[outer];
1212 m_data.resize(m_data.size()+1);
1213
1214 // check for reallocation
1215 if(data_end != m_data.allocatedSize())
1216 {
1217 // m_data has been reallocated
1218 // -> move remaining inner-vectors back to the end of the free-space
1219 // so that the entire free-space is allocated to the current inner-vector.
1220 eigen_internal_assert(data_end < m_data.allocatedSize());
1221 StorageIndex new_end = convert_index(m_data.allocatedSize());
1222 for(Index k=outer+1; k<=m_outerSize; ++k)
1223 if(m_outerIndex[k]==data_end)
1224 m_outerIndex[k] = new_end;
1225 }
1226
1227 // and insert it at the right position (sorted insertion)
1228 Index startId = m_outerIndex[outer];
1229 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1230 while ( (p > startId) && (m_data.index(p-1) > inner) )
1231 {
1232 m_data.index(p) = m_data.index(p-1);
1233 m_data.value(p) = m_data.value(p-1);
1234 --p;
1235 }
1236
1237 m_data.index(p) = convert_index(inner);
1238 return (m_data.value(p) = Scalar(0));
1239 }
1240
1241 if(m_data.size() != m_data.allocatedSize())
1242 {
1243 // make sure the matrix is compatible to random un-compressed insertion:
1244 m_data.resize(m_data.allocatedSize());
1245 this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1246 }
1247
1248 return insertUncompressed(row,col);
1249}
1250
1251template<typename _Scalar, int _Options, typename _StorageIndex>
1252EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
1253{
1254 eigen_assert(!isCompressed());
1255
1256 const Index outer = IsRowMajor ? row : col;
1257 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1258
1259 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1260 StorageIndex innerNNZ = m_innerNonZeros[outer];
1261 if(innerNNZ>=room)
1262 {
1263 // this inner vector is full, we need to reallocate the whole buffer :(
1264 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1265 }
1266
1267 Index startId = m_outerIndex[outer];
1268 Index p = startId + m_innerNonZeros[outer];
1269 while ( (p > startId) && (m_data.index(p-1) > inner) )
1270 {
1271 m_data.index(p) = m_data.index(p-1);
1272 m_data.value(p) = m_data.value(p-1);
1273 --p;
1274 }
1275 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1276
1277 m_innerNonZeros[outer]++;
1278
1279 m_data.index(p) = inner;
1280 return (m_data.value(p) = Scalar(0));
1281}
1282
1283template<typename _Scalar, int _Options, typename _StorageIndex>
1284EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1285{
1286 eigen_assert(isCompressed());
1287
1288 const Index outer = IsRowMajor ? row : col;
1289 const Index inner = IsRowMajor ? col : row;
1290
1291 Index previousOuter = outer;
1292 if (m_outerIndex[outer+1]==0)
1293 {
1294 // we start a new inner vector
1295 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1296 {
1297 m_outerIndex[previousOuter] = convert_index(m_data.size());
1298 --previousOuter;
1299 }
1300 m_outerIndex[outer+1] = m_outerIndex[outer];
1301 }
1302
1303 // here we have to handle the tricky case where the outerIndex array
1304 // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1305 // the 2nd inner vector...
1306 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1307 && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1308
1309 std::size_t startId = m_outerIndex[outer];
1310 // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1311 std::size_t p = m_outerIndex[outer+1];
1312 ++m_outerIndex[outer+1];
1313
1314 double reallocRatio = 1;
1315 if (m_data.allocatedSize()<=m_data.size())
1316 {
1317 // if there is no preallocated memory, let's reserve a minimum of 32 elements
1318 if (m_data.size()==0)
1319 {
1320 m_data.reserve(32);
1321 }
1322 else
1323 {
1324 // we need to reallocate the data, to reduce multiple reallocations
1325 // we use a smart resize algorithm based on the current filling ratio
1326 // in addition, we use double to avoid integers overflows
1327 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1328 reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1329 // furthermore we bound the realloc ratio to:
1330 // 1) reduce multiple minor realloc when the matrix is almost filled
1331 // 2) avoid to allocate too much memory when the matrix is almost empty
1332 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1333 }
1334 }
1335 m_data.resize(m_data.size()+1,reallocRatio);
1336
1337 if (!isLastVec)
1338 {
1339 if (previousOuter==-1)
1340 {
1341 // oops wrong guess.
1342 // let's correct the outer offsets
1343 for (Index k=0; k<=(outer+1); ++k)
1344 m_outerIndex[k] = 0;
1345 Index k=outer+1;
1346 while(m_outerIndex[k]==0)
1347 m_outerIndex[k++] = 1;
1348 while (k<=m_outerSize && m_outerIndex[k]!=0)
1349 m_outerIndex[k++]++;
1350 p = 0;
1351 --k;
1352 k = m_outerIndex[k]-1;
1353 while (k>0)
1354 {
1355 m_data.index(k) = m_data.index(k-1);
1356 m_data.value(k) = m_data.value(k-1);
1357 k--;
1358 }
1359 }
1360 else
1361 {
1362 // we are not inserting into the last inner vec
1363 // update outer indices:
1364 Index j = outer+2;
1365 while (j<=m_outerSize && m_outerIndex[j]!=0)
1366 m_outerIndex[j++]++;
1367 --j;
1368 // shift data of last vecs:
1369 Index k = m_outerIndex[j]-1;
1370 while (k>=Index(p))
1371 {
1372 m_data.index(k) = m_data.index(k-1);
1373 m_data.value(k) = m_data.value(k-1);
1374 k--;
1375 }
1376 }
1377 }
1378
1379 while ( (p > startId) && (m_data.index(p-1) > inner) )
1380 {
1381 m_data.index(p) = m_data.index(p-1);
1382 m_data.value(p) = m_data.value(p-1);
1383 --p;
1384 }
1385
1386 m_data.index(p) = inner;
1387 return (m_data.value(p) = Scalar(0));
1388}
1389
1390namespace internal {
1391
1392template<typename _Scalar, int _Options, typename _StorageIndex>
1393struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1394 : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1395{
1396 typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
1397 typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
1398 evaluator() : Base() {}
1399 explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1400};
1401
1402}
1403
1404} // end namespace Eigen
1405
1406#endif // EIGEN_SPARSEMATRIX_H
static const ConstantReturnType Constant(Index rows, Index cols, const Scalar &value)
Definition CwiseNullaryOp.h:174
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition Diagonal.h:65
A matrix or vector expression mapping an existing array of data.
Definition Map.h:96
Sparse matrix.
Definition MappedSparseMatrix.h:34
Index nonZeros() const
Definition SparseCompressedBase.h:56
bool isCompressed() const
Definition SparseCompressedBase.h:107
Base class of any sparse matrices or sparse expressions.
Definition SparseMatrixBase.h:28
internal::traits< SparseMatrix< _Scalar, _Options, _StorageIndex > >::StorageIndex StorageIndex
Definition SparseMatrixBase.h:43
RowXpr row(Index i)
Definition SparseMatrixBase.h:860
ColXpr col(Index i)
Definition SparseMatrixBase.h:839
A versatible sparse matrix representation.
Definition SparseMatrix.h:98
Scalar sum() const
Definition SparseRedux.h:30
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition SparseMatrix.h:508
void prune(const KeepFunc &keep=KeepFunc())
Definition SparseMatrix.h:521
Index innerSize() const
Definition SparseMatrix.h:141
void reserve(Index reserveSize)
Definition SparseMatrix.h:262
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:726
StorageIndex * outerIndexPtr()
Definition SparseMatrix.h:170
const StorageIndex * innerNonZeroPtr() const
Definition SparseMatrix.h:175
bool isCompressed() const
Definition SparseCompressedBase.h:107
~SparseMatrix()
Definition SparseMatrix.h:837
Scalar * valuePtr()
Definition SparseMatrix.h:152
const ConstDiagonalReturnType diagonal() const
Definition SparseMatrix.h:653
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:716
Index outerSize() const
Definition SparseMatrix.h:143
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition SparseMatrix.h:679
Scalar coeff(Index row, Index col) const
Definition SparseMatrix.h:188
void makeCompressed()
Definition SparseMatrix.h:465
Index rows() const
Definition SparseMatrix.h:136
SparseMatrix()
Definition SparseMatrix.h:662
SparseMatrix(Index rows, Index cols)
Definition SparseMatrix.h:670
void uncompress()
Definition SparseMatrix.h:496
const StorageIndex * innerIndexPtr() const
Definition SparseMatrix.h:157
const StorageIndex * outerIndexPtr() const
Definition SparseMatrix.h:166
void setIdentity()
Definition SparseMatrix.h:747
void conservativeResize(Index rows, Index cols)
Definition SparseMatrix.h:554
Index cols() const
Definition SparseMatrix.h:138
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition SparseMatrix.h:699
Scalar & insert(Index row, Index col)
Definition SparseMatrix.h:1132
const Scalar * valuePtr() const
Definition SparseMatrix.h:148
StorageIndex * innerNonZeroPtr()
Definition SparseMatrix.h:179
StorageIndex * innerIndexPtr()
Definition SparseMatrix.h:161
void reserve(const SizesType &reserveSizes)
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:996
void setZero()
Definition SparseMatrix.h:251
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1012
Scalar & coeffRef(Index row, Index col)
Definition SparseMatrix.h:206
void swap(SparseMatrix &other)
Definition SparseMatrix.h:735
SparseMatrix(const SparseMatrix &other)
Definition SparseMatrix.h:707
Index nonZeros() const
Definition SparseCompressedBase.h:56
DiagonalReturnType diagonal()
Definition SparseMatrix.h:659
void resize(Index rows, Index cols)
Definition SparseMatrix.h:624
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition SparseSelfAdjointView.h:45
a sparse vector class
Definition SparseVector.h:66
const unsigned int LvalueBit
Definition Constants.h:139
const unsigned int RowMajorBit
Definition Constants.h:61
const unsigned int CompressedAccessBit
Definition Constants.h:186
Namespace containing all symbols from the Eigen library.
Definition A05_PortingFrom2To3.dox:1
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:65
const int Dynamic
Definition Constants.h:21
Derived & derived()
Definition EigenBase.h:45