SparseMatrix.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
12
13namespace Eigen {
14
40
41namespace internal {
42template<typename _Scalar, int _Options, typename _Index>
43struct traits<SparseMatrix<_Scalar, _Options, _Index> >
44{
45 typedef _Scalar Scalar;
46 typedef _Index Index;
47 typedef Sparse StorageKind;
48 typedef MatrixXpr XprKind;
49 enum {
50 RowsAtCompileTime = Dynamic,
51 ColsAtCompileTime = Dynamic,
52 MaxRowsAtCompileTime = Dynamic,
53 MaxColsAtCompileTime = Dynamic,
54 Flags = _Options | NestByRefBit | LvalueBit,
55 CoeffReadCost = NumTraits<Scalar>::ReadCost,
56 SupportedAccessPatterns = InnerRandomAccessPattern
57 };
58};
59
60template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
61struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
62{
63 typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
64 typedef typename nested<MatrixType>::type MatrixTypeNested;
65 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
66
67 typedef _Scalar Scalar;
68 typedef Dense StorageKind;
69 typedef _Index Index;
70 typedef MatrixXpr XprKind;
71
72 enum {
73 RowsAtCompileTime = Dynamic,
74 ColsAtCompileTime = 1,
75 MaxRowsAtCompileTime = Dynamic,
76 MaxColsAtCompileTime = 1,
77 Flags = 0,
78 CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
79 };
80};
81
82} // end namespace internal
83
84template<typename _Scalar, int _Options, typename _Index>
86 : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
87{
88 public:
89 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
90 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
91 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
92
94 using Base::IsRowMajor;
95 typedef internal::CompressedStorage<Scalar,Index> Storage;
96 enum {
97 Options = _Options
98 };
99
100 protected:
101
102 typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
103
104 Index m_outerSize;
105 Index m_innerSize;
106 Index* m_outerIndex;
107 Index* m_innerNonZeros; // optional, if null then the data is compressed
108 Storage m_data;
109
110 Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
111 const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
112
113 public:
114
116 inline bool isCompressed() const { return m_innerNonZeros==0; }
117
119 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
121 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
122
124 inline Index innerSize() const { return m_innerSize; }
126 inline Index outerSize() const { return m_outerSize; }
127
131 inline const Scalar* valuePtr() const { return &m_data.value(0); }
135 inline Scalar* valuePtr() { return &m_data.value(0); }
136
140 inline const Index* innerIndexPtr() const { return &m_data.index(0); }
144 inline Index* innerIndexPtr() { return &m_data.index(0); }
145
149 inline const Index* outerIndexPtr() const { return m_outerIndex; }
153 inline Index* outerIndexPtr() { return m_outerIndex; }
154
158 inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
162 inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
163
165 inline Storage& data() { return m_data; }
167 inline const Storage& data() const { return m_data; }
168
171 inline Scalar coeff(Index row, Index col) const
172 {
173 const Index outer = IsRowMajor ? row : col;
174 const Index inner = IsRowMajor ? col : row;
175 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
176 return m_data.atInRange(m_outerIndex[outer], end, inner);
177 }
178
187 inline Scalar& coeffRef(Index row, Index col)
188 {
189 const Index outer = IsRowMajor ? row : col;
190 const Index inner = IsRowMajor ? col : row;
191
192 Index start = m_outerIndex[outer];
193 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
194 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
195 if(end<=start)
196 return insert(row,col);
197 const Index p = m_data.searchLowerIndex(start,end-1,inner);
198 if((p<end) && (m_data.index(p)==inner))
199 return m_data.value(p);
200 else
201 return insert(row,col);
202 }
203
216 EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
217 {
218 if(isCompressed())
219 {
221 }
222 return insertUncompressed(row,col);
223 }
224
225 public:
226
227 class InnerIterator;
228 class ReverseInnerIterator;
229
231 inline void setZero()
232 {
233 m_data.clear();
234 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
235 if(m_innerNonZeros)
236 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
237 }
238
240 inline Index nonZeros() const
241 {
242 if(m_innerNonZeros)
243 return innerNonZeros().sum();
244 return static_cast<Index>(m_data.size());
245 }
246
250 inline void reserve(Index reserveSize)
251 {
252 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
253 m_data.reserve(reserveSize);
254 }
255
256 #ifdef EIGEN_PARSED_BY_DOXYGEN
260 template<class SizesType>
261 inline void reserve(const SizesType& reserveSizes);
262 #else
263 template<class SizesType>
264 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
265 {
266 EIGEN_UNUSED_VARIABLE(enableif);
267 reserveInnerVectors(reserveSizes);
268 }
269 template<class SizesType>
270 inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
271 #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
272 typename
273 #endif
274 SizesType::Scalar())
275 {
276 EIGEN_UNUSED_VARIABLE(enableif);
277 reserveInnerVectors(reserveSizes);
278 }
279 #endif // EIGEN_PARSED_BY_DOXYGEN
280 protected:
281 template<class SizesType>
282 inline void reserveInnerVectors(const SizesType& reserveSizes)
283 {
284
285 if(isCompressed())
286 {
287 std::size_t totalReserveSize = 0;
288 // turn the matrix into non-compressed mode
289 m_innerNonZeros = new Index[m_outerSize];
290
291 // temporarily use m_innerSizes to hold the new starting points.
292 Index* newOuterIndex = m_innerNonZeros;
293
294 Index count = 0;
295 for(Index j=0; j<m_outerSize; ++j)
296 {
297 newOuterIndex[j] = count;
298 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
299 totalReserveSize += reserveSizes[j];
300 }
301 m_data.reserve(totalReserveSize);
302 std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
303 for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
304 {
305 ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
306 for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
307 {
308 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
309 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
310 }
311 previousOuterIndex = m_outerIndex[j];
312 m_outerIndex[j] = newOuterIndex[j];
313 m_innerNonZeros[j] = innerNNZ;
314 }
315 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
316
317 m_data.resize(m_outerIndex[m_outerSize]);
318 }
319 else
320 {
321 Index* newOuterIndex = new Index[m_outerSize+1];
322 Index count = 0;
323 for(Index j=0; j<m_outerSize; ++j)
324 {
325 newOuterIndex[j] = count;
326 Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
327 Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved);
328 count += toReserve + m_innerNonZeros[j];
329 }
330 newOuterIndex[m_outerSize] = count;
331
332 m_data.resize(count);
333 for(ptrdiff_t j=m_outerSize-1; j>=0; --j)
334 {
335 std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
336 if(offset>0)
337 {
338 std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
339 for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
340 {
341 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
342 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
343 }
344 }
345 }
346
347 std::swap(m_outerIndex, newOuterIndex);
348 delete[] newOuterIndex;
349 }
350
351 }
352 public:
353
354 //--- low level purely coherent filling ---
355
366 inline Scalar& insertBack(Index row, Index col)
367 {
368 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
369 }
370
373 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
374 {
375 eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
376 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
377 Index p = m_outerIndex[outer+1];
378 ++m_outerIndex[outer+1];
379 m_data.append(0, inner);
380 return m_data.value(p);
381 }
382
385 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
386 {
387 Index p = m_outerIndex[outer+1];
388 ++m_outerIndex[outer+1];
389 m_data.append(0, inner);
390 return m_data.value(p);
391 }
392
395 inline void startVec(Index outer)
396 {
397 eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
398 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
399 m_outerIndex[outer+1] = m_outerIndex[outer];
400 }
401
405 inline void finalize()
406 {
407 if(isCompressed())
408 {
409 Index size = static_cast<Index>(m_data.size());
410 Index i = m_outerSize;
411 // find the last filled column
412 while (i>=0 && m_outerIndex[i]==0)
413 --i;
414 ++i;
415 while (i<=m_outerSize)
416 {
417 m_outerIndex[i] = size;
418 ++i;
419 }
420 }
421 }
422
423 //---
424
425 template<typename InputIterators>
426 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
427
428 void sumupDuplicates();
429
430 //---
431
434 EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i)
435 {
436 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
437 }
438
442 {
443 if(isCompressed())
444 return;
445
446 Index oldStart = m_outerIndex[1];
447 m_outerIndex[1] = m_innerNonZeros[0];
448 for(Index j=1; j<m_outerSize; ++j)
449 {
450 Index nextOldStart = m_outerIndex[j+1];
451 std::ptrdiff_t offset = oldStart - m_outerIndex[j];
452 if(offset>0)
453 {
454 for(Index k=0; k<m_innerNonZeros[j]; ++k)
455 {
456 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
457 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
458 }
459 }
460 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
461 oldStart = nextOldStart;
462 }
463 delete[] m_innerNonZeros;
464 m_innerNonZeros = 0;
465 m_data.resize(m_outerIndex[m_outerSize]);
466 m_data.squeeze();
467 }
468
470 void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
471 {
472 prune(default_prunning_func(reference,epsilon));
473 }
474
482 template<typename KeepFunc>
483 void prune(const KeepFunc& keep = KeepFunc())
484 {
485 // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
486 // TODO also implement a unit test
488
489 Index k = 0;
490 for(Index j=0; j<m_outerSize; ++j)
491 {
492 Index previousStart = m_outerIndex[j];
493 m_outerIndex[j] = k;
494 Index end = m_outerIndex[j+1];
495 for(Index i=previousStart; i<end; ++i)
496 {
497 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
498 {
499 m_data.value(k) = m_data.value(i);
500 m_data.index(k) = m_data.index(i);
501 ++k;
502 }
503 }
504 }
505 m_outerIndex[m_outerSize] = k;
506 m_data.resize(k,0);
507 }
508
512 void resize(Index rows, Index cols)
513 {
514 const Index outerSize = IsRowMajor ? rows : cols;
515 m_innerSize = IsRowMajor ? cols : rows;
516 m_data.clear();
517 if (m_outerSize != outerSize || m_outerSize==0)
518 {
519 delete[] m_outerIndex;
520 m_outerIndex = new Index [outerSize+1];
521 m_outerSize = outerSize;
522 }
523 if(m_innerNonZeros)
524 {
525 delete[] m_innerNonZeros;
526 m_innerNonZeros = 0;
527 }
528 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
529 }
530
533 void resizeNonZeros(Index size)
534 {
535 // TODO remove this function
536 m_data.resize(size);
537 }
538
540 const Diagonal<const SparseMatrix> diagonal() const { return *this; }
541
544 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
545 {
546 check_template_parameters();
547 resize(0, 0);
548 }
549
551 inline SparseMatrix(Index rows, Index cols)
552 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
553 {
554 check_template_parameters();
555 resize(rows, cols);
556 }
557
559 template<typename OtherDerived>
560 inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
561 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
562 {
563 check_template_parameters();
564 *this = other.derived();
565 }
566
568 inline SparseMatrix(const SparseMatrix& other)
569 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
570 {
571 check_template_parameters();
572 *this = other.derived();
573 }
574
576 template<typename OtherDerived>
577 SparseMatrix(const ReturnByValue<OtherDerived>& other)
578 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
579 {
580 check_template_parameters();
581 initAssignment(other);
582 other.evalTo(*this);
583 }
584
587 inline void swap(SparseMatrix& other)
588 {
589 //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
590 std::swap(m_outerIndex, other.m_outerIndex);
591 std::swap(m_innerSize, other.m_innerSize);
592 std::swap(m_outerSize, other.m_outerSize);
593 std::swap(m_innerNonZeros, other.m_innerNonZeros);
594 m_data.swap(other.m_data);
595 }
596
597 inline SparseMatrix& operator=(const SparseMatrix& other)
598 {
599 if (other.isRValue())
600 {
601 swap(other.const_cast_derived());
602 }
603 else
604 {
605 initAssignment(other);
606 if(other.isCompressed())
607 {
608 memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
609 m_data = other.m_data;
610 }
611 else
612 {
613 Base::operator=(other);
614 }
615 }
616 return *this;
617 }
618
619 #ifndef EIGEN_PARSED_BY_DOXYGEN
620 template<typename Lhs, typename Rhs>
621 inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
622 { return Base::operator=(product); }
623
624 template<typename OtherDerived>
625 inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
626 {
627 initAssignment(other);
628 return Base::operator=(other.derived());
629 }
630
631 template<typename OtherDerived>
632 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
633 { return Base::operator=(other.derived()); }
634 #endif
635
636 template<typename OtherDerived>
637 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
638 {
639 const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
640 if (needToTranspose)
641 {
642 // two passes algorithm:
643 // 1 - compute the number of coeffs per dest inner vector
644 // 2 - do the actual copy/eval
645 // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
646 typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
647 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
648 OtherCopy otherCopy(other.derived());
649
650 SparseMatrix dest(other.rows(),other.cols());
651 Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
652
653 // pass 1
654 // FIXME the above copy could be merged with that pass
655 for (Index j=0; j<otherCopy.outerSize(); ++j)
656 for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
657 ++dest.m_outerIndex[it.index()];
658
659 // prefix sum
660 Index count = 0;
661 VectorXi positions(dest.outerSize());
662 for (Index j=0; j<dest.outerSize(); ++j)
663 {
664 Index tmp = dest.m_outerIndex[j];
665 dest.m_outerIndex[j] = count;
666 positions[j] = count;
667 count += tmp;
668 }
669 dest.m_outerIndex[dest.outerSize()] = count;
670 // alloc
671 dest.m_data.resize(count);
672 // pass 2
673 for (Index j=0; j<otherCopy.outerSize(); ++j)
674 {
675 for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
676 {
677 Index pos = positions[it.index()]++;
678 dest.m_data.index(pos) = j;
679 dest.m_data.value(pos) = it.value();
680 }
681 }
682 this->swap(dest);
683 return *this;
684 }
685 else
686 {
687 if(other.isRValue())
688 initAssignment(other.derived());
689 // there is no special optimization
690 return Base::operator=(other.derived());
691 }
692 }
693
694 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
695 {
696 EIGEN_DBG_SPARSE(
697 s << "Nonzero entries:\n";
698 if(m.isCompressed())
699 for (Index i=0; i<m.nonZeros(); ++i)
700 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
701 else
702 for (Index i=0; i<m.outerSize(); ++i)
703 {
704 int p = m.m_outerIndex[i];
705 int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
706 Index k=p;
707 for (; k<pe; ++k)
708 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
709 for (; k<m.m_outerIndex[i+1]; ++k)
710 s << "(_,_) ";
711 }
712 s << std::endl;
713 s << std::endl;
714 s << "Outer pointers:\n";
715 for (Index i=0; i<m.outerSize(); ++i)
716 s << m.m_outerIndex[i] << " ";
717 s << " $" << std::endl;
718 if(!m.isCompressed())
719 {
720 s << "Inner non zeros:\n";
721 for (Index i=0; i<m.outerSize(); ++i)
722 s << m.m_innerNonZeros[i] << " ";
723 s << " $" << std::endl;
724 }
725 s << std::endl;
726 );
727 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
728 return s;
729 }
730
733 {
734 delete[] m_outerIndex;
735 delete[] m_innerNonZeros;
736 }
737
738#ifndef EIGEN_PARSED_BY_DOXYGEN
740 Scalar sum() const;
741#endif
742
743# ifdef EIGEN_SPARSEMATRIX_PLUGIN
744# include EIGEN_SPARSEMATRIX_PLUGIN
745# endif
746
747protected:
748
749 template<typename Other>
750 void initAssignment(const Other& other)
751 {
752 resize(other.rows(), other.cols());
753 if(m_innerNonZeros)
754 {
755 delete[] m_innerNonZeros;
756 m_innerNonZeros = 0;
757 }
758 }
759
762 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
763 {
764 eigen_assert(isCompressed());
765
766 const Index outer = IsRowMajor ? row : col;
767 const Index inner = IsRowMajor ? col : row;
768
769 Index previousOuter = outer;
770 if (m_outerIndex[outer+1]==0)
771 {
772 // we start a new inner vector
773 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
774 {
775 m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
776 --previousOuter;
777 }
778 m_outerIndex[outer+1] = m_outerIndex[outer];
779 }
780
781 // here we have to handle the tricky case where the outerIndex array
782 // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
783 // the 2nd inner vector...
784 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
785 && (size_t(m_outerIndex[outer+1]) == m_data.size());
786
787 size_t startId = m_outerIndex[outer];
788 // FIXME let's make sure sizeof(long int) == sizeof(size_t)
789 size_t p = m_outerIndex[outer+1];
790 ++m_outerIndex[outer+1];
791
792 float reallocRatio = 1;
793 if (m_data.allocatedSize()<=m_data.size())
794 {
795 // if there is no preallocated memory, let's reserve a minimum of 32 elements
796 if (m_data.size()==0)
797 {
798 m_data.reserve(32);
799 }
800 else
801 {
802 // we need to reallocate the data, to reduce multiple reallocations
803 // we use a smart resize algorithm based on the current filling ratio
804 // in addition, we use float to avoid integers overflows
805 float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
806 reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
807 // furthermore we bound the realloc ratio to:
808 // 1) reduce multiple minor realloc when the matrix is almost filled
809 // 2) avoid to allocate too much memory when the matrix is almost empty
810 reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
811 }
812 }
813 m_data.resize(m_data.size()+1,reallocRatio);
814
815 if (!isLastVec)
816 {
817 if (previousOuter==-1)
818 {
819 // oops wrong guess.
820 // let's correct the outer offsets
821 for (Index k=0; k<=(outer+1); ++k)
822 m_outerIndex[k] = 0;
823 Index k=outer+1;
824 while(m_outerIndex[k]==0)
825 m_outerIndex[k++] = 1;
826 while (k<=m_outerSize && m_outerIndex[k]!=0)
827 m_outerIndex[k++]++;
828 p = 0;
829 --k;
830 k = m_outerIndex[k]-1;
831 while (k>0)
832 {
833 m_data.index(k) = m_data.index(k-1);
834 m_data.value(k) = m_data.value(k-1);
835 k--;
836 }
837 }
838 else
839 {
840 // we are not inserting into the last inner vec
841 // update outer indices:
842 Index j = outer+2;
843 while (j<=m_outerSize && m_outerIndex[j]!=0)
844 m_outerIndex[j++]++;
845 --j;
846 // shift data of last vecs:
847 Index k = m_outerIndex[j]-1;
848 while (k>=Index(p))
849 {
850 m_data.index(k) = m_data.index(k-1);
851 m_data.value(k) = m_data.value(k-1);
852 k--;
853 }
854 }
855 }
856
857 while ( (p > startId) && (m_data.index(p-1) > inner) )
858 {
859 m_data.index(p) = m_data.index(p-1);
860 m_data.value(p) = m_data.value(p-1);
861 --p;
862 }
863
864 m_data.index(p) = inner;
865 return (m_data.value(p) = 0);
866 }
867
870 class SingletonVector
871 {
872 Index m_index;
873 Index m_value;
874 public:
875 typedef Index value_type;
876 SingletonVector(Index i, Index v)
877 : m_index(i), m_value(v)
878 {}
879
880 Index operator[](Index i) const { return i==m_index ? m_value : 0; }
881 };
882
885 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
886 {
887 eigen_assert(!isCompressed());
888
889 const Index outer = IsRowMajor ? row : col;
890 const Index inner = IsRowMajor ? col : row;
891
892 std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
893 std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
894 if(innerNNZ>=room)
895 {
896 // this inner vector is full, we need to reallocate the whole buffer :(
897 reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
898 }
899
900 Index startId = m_outerIndex[outer];
901 Index p = startId + m_innerNonZeros[outer];
902 while ( (p > startId) && (m_data.index(p-1) > inner) )
903 {
904 m_data.index(p) = m_data.index(p-1);
905 m_data.value(p) = m_data.value(p-1);
906 --p;
907 }
908 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
909
910 m_innerNonZeros[outer]++;
911
912 m_data.index(p) = inner;
913 return (m_data.value(p) = 0);
914 }
915
916public:
919 inline Scalar& insertBackUncompressed(Index row, Index col)
920 {
921 const Index outer = IsRowMajor ? row : col;
922 const Index inner = IsRowMajor ? col : row;
923
924 eigen_assert(!isCompressed());
925 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
926
927 Index p = m_outerIndex[outer] + m_innerNonZeros[outer];
928 m_innerNonZeros[outer]++;
929 m_data.index(p) = inner;
930 return (m_data.value(p) = 0);
931 }
932
933private:
934 static void check_template_parameters()
935 {
936 EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
937 }
938
939 struct default_prunning_func {
940 default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
941 inline bool operator() (const Index&, const Index&, const Scalar& value) const
942 {
943 return !internal::isMuchSmallerThan(value, reference, epsilon);
944 }
945 Scalar reference;
946 RealScalar epsilon;
947 };
948};
949
950template<typename Scalar, int _Options, typename _Index>
951class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
952{
953 public:
954 InnerIterator(const SparseMatrix& mat, Index outer)
955 : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
956 {
957 if(mat.isCompressed())
958 m_end = mat.m_outerIndex[outer+1];
959 else
960 m_end = m_id + mat.m_innerNonZeros[outer];
961 }
962
963 inline InnerIterator& operator++() { m_id++; return *this; }
964
965 inline const Scalar& value() const { return m_values[m_id]; }
966 inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
967
968 inline Index index() const { return m_indices[m_id]; }
969 inline Index outer() const { return m_outer; }
970 inline Index row() const { return IsRowMajor ? m_outer : index(); }
971 inline Index col() const { return IsRowMajor ? index() : m_outer; }
972
973 inline operator bool() const { return (m_id < m_end); }
974
975 protected:
976 const Scalar* m_values;
977 const Index* m_indices;
978 const Index m_outer;
979 Index m_id;
980 Index m_end;
981};
982
983template<typename Scalar, int _Options, typename _Index>
984class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
985{
986 public:
987 ReverseInnerIterator(const SparseMatrix& mat, Index outer)
988 : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
989 {
990 if(mat.isCompressed())
991 m_id = mat.m_outerIndex[outer+1];
992 else
993 m_id = m_start + mat.m_innerNonZeros[outer];
994 }
995
996 inline ReverseInnerIterator& operator--() { --m_id; return *this; }
997
998 inline const Scalar& value() const { return m_values[m_id-1]; }
999 inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
1000
1001 inline Index index() const { return m_indices[m_id-1]; }
1002 inline Index outer() const { return m_outer; }
1003 inline Index row() const { return IsRowMajor ? m_outer : index(); }
1004 inline Index col() const { return IsRowMajor ? index() : m_outer; }
1005
1006 inline operator bool() const { return (m_id > m_start); }
1007
1008 protected:
1009 const Scalar* m_values;
1010 const Index* m_indices;
1011 const Index m_outer;
1012 Index m_id;
1013 const Index m_start;
1014};
1015
1016namespace internal {
1017
1018template<typename InputIterator, typename SparseMatrixType>
1019void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
1020{
1021 EIGEN_UNUSED_VARIABLE(Options);
1022 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1023 typedef typename SparseMatrixType::Scalar Scalar;
1024 typedef typename SparseMatrixType::Index Index;
1025 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
1026
1027 // pass 1: count the nnz per inner-vector
1028 VectorXi wi(trMat.outerSize());
1029 wi.setZero();
1030 for(InputIterator it(begin); it!=end; ++it)
1031 wi(IsRowMajor ? it->col() : it->row())++;
1032
1033 // pass 2: insert all the elements into trMat
1034 trMat.reserve(wi);
1035 for(InputIterator it(begin); it!=end; ++it)
1036 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1037
1038 // pass 3:
1039 trMat.sumupDuplicates();
1040
1041 // pass 4: transposed copy -> implicit sorting
1042 mat = trMat;
1043}
1044
1045}
1046
1047
1085template<typename Scalar, int _Options, typename _Index>
1086template<typename InputIterators>
1087void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1088{
1089 internal::set_from_triplets(begin, end, *this);
1090}
1091
1093template<typename Scalar, int _Options, typename _Index>
1094void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
1095{
1096 eigen_assert(!isCompressed());
1097 // TODO, in practice we should be able to use m_innerNonZeros for that task
1098 VectorXi wi(innerSize());
1099 wi.fill(-1);
1100 Index count = 0;
1101 // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1102 for(int j=0; j<outerSize(); ++j)
1103 {
1104 Index start = count;
1105 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1106 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1107 {
1108 Index i = m_data.index(k);
1109 if(wi(i)>=start)
1110 {
1111 // we already meet this entry => accumulate it
1112 m_data.value(wi(i)) += m_data.value(k);
1113 }
1114 else
1115 {
1116 m_data.value(count) = m_data.value(k);
1117 m_data.index(count) = m_data.index(k);
1118 wi(i) = count;
1119 ++count;
1120 }
1121 }
1122 m_outerIndex[j] = start;
1123 }
1124 m_outerIndex[m_outerSize] = count;
1125
1126 // turn the matrix into compressed form
1127 delete[] m_innerNonZeros;
1128 m_innerNonZeros = 0;
1129 m_data.resize(m_outerIndex[m_outerSize]);
1130}
1131
1132} // end namespace Eigen
1133
1134#endif // EIGEN_SPARSEMATRIX_H
static const ConstantReturnType Constant(Index rows, Index cols, const Scalar &value)
Definition CwiseNullaryOp.h:179
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition Diagonal.h:66
A matrix or vector expression mapping an existing array of data.
Definition Map.h:106
Sparse matrix.
Definition MappedSparseMatrix.h:33
SparseInnerVectorSet< SparseMatrix< Scalar, _Options, _Index >, 1 > col(Index j)
Definition SparseBlock.h:306
SparseInnerVectorSet< SparseMatrix< Scalar, _Options, _Index >, 1 > row(Index i)
Definition SparseBlock.h:289
A versatible sparse matrix representation.
Definition SparseMatrix.h:87
Index cols() const
Definition SparseMatrix.h:121
Scalar * valuePtr()
Definition SparseMatrix.h:135
Index nonZeros() const
Definition SparseMatrix.h:240
Index innerSize() const
Definition SparseMatrix.h:124
const Index * innerIndexPtr() const
Definition SparseMatrix.h:140
const Index * innerNonZeroPtr() const
Definition SparseMatrix.h:158
void reserve(const SizesType &reserveSizes)
void prune(const KeepFunc &keep=KeepFunc())
Definition SparseMatrix.h:483
void swap(SparseMatrix &other)
Definition SparseMatrix.h:587
SparseMatrix(Index rows, Index cols)
Definition SparseMatrix.h:551
const Index * outerIndexPtr() const
Definition SparseMatrix.h:149
EIGEN_DONT_INLINE Scalar & insert(Index row, Index col)
Definition SparseMatrix.h:216
void setZero()
Definition SparseMatrix.h:231
Index * innerIndexPtr()
Definition SparseMatrix.h:144
bool isCompressed() const
Definition SparseMatrix.h:116
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1087
void makeCompressed()
Definition SparseMatrix.h:441
void reserve(Index reserveSize)
Definition SparseMatrix.h:250
SparseMatrix(const SparseMatrix &other)
Definition SparseMatrix.h:568
SparseMatrix()
Definition SparseMatrix.h:543
Scalar & coeffRef(Index row, Index col)
Definition SparseMatrix.h:187
Index * innerNonZeroPtr()
Definition SparseMatrix.h:162
const Scalar * valuePtr() const
Definition SparseMatrix.h:131
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition SparseMatrix.h:560
~SparseMatrix()
Definition SparseMatrix.h:732
Index rows() const
Definition SparseMatrix.h:119
void resize(Index rows, Index cols)
Definition SparseMatrix.h:512
Index outerSize() const
Definition SparseMatrix.h:126
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:577
void prune(Scalar reference, RealScalar epsilon=NumTraits< RealScalar >::dummy_precision())
Definition SparseMatrix.h:470
const Diagonal< const SparseMatrix > diagonal() const
Definition SparseMatrix.h:540
Scalar coeff(Index row, Index col) const
Definition SparseMatrix.h:171
Index * outerIndexPtr()
Definition SparseMatrix.h:153
const unsigned int LvalueBit
Definition Constants.h:126
const unsigned int RowMajorBit
Definition Constants.h:48
Definition LDLT.h:18
Derived & derived()
Definition EigenBase.h:34