Eigen  5.0.1-dev+60122df6
 
Loading...
Searching...
No Matches
SparseMatrix.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
12
13// IWYU pragma: private
14#include "./InternalHeaderCheck.h"
15
16namespace Eigen {
17
48
49namespace internal {
50template <typename Scalar_, int Options_, typename StorageIndex_>
51struct traits<SparseMatrix<Scalar_, Options_, StorageIndex_>> {
52 typedef Scalar_ Scalar;
53 typedef StorageIndex_ StorageIndex;
54 typedef Sparse StorageKind;
55 typedef MatrixXpr XprKind;
56 enum {
57 RowsAtCompileTime = Dynamic,
58 ColsAtCompileTime = Dynamic,
59 MaxRowsAtCompileTime = Dynamic,
60 MaxColsAtCompileTime = Dynamic,
61 Options = Options_,
62 Flags = Options_ | NestByRefBit | LvalueBit | CompressedAccessBit,
63 SupportedAccessPatterns = InnerRandomAccessPattern
64 };
65};
66
67template <typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
68struct traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>> {
69 typedef SparseMatrix<Scalar_, Options_, StorageIndex_> MatrixType;
70 typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
71 typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
72
73 typedef Scalar_ Scalar;
74 typedef Dense StorageKind;
75 typedef StorageIndex_ StorageIndex;
76 typedef MatrixXpr XprKind;
77
78 enum {
79 RowsAtCompileTime = Dynamic,
80 ColsAtCompileTime = 1,
81 MaxRowsAtCompileTime = Dynamic,
82 MaxColsAtCompileTime = 1,
83 Flags = LvalueBit
84 };
85};
86
87template <typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
88struct traits<Diagonal<const SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>>
89 : public traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>> {
90 enum { Flags = 0 };
91};
92
93template <typename StorageIndex>
94struct sparse_reserve_op {
95 EIGEN_DEVICE_FUNC sparse_reserve_op(Index begin, Index end, Index size) {
96 Index range = numext::mini(end - begin, size);
97 m_begin = begin;
98 m_end = begin + range;
99 m_val = StorageIndex(size / range);
100 m_remainder = StorageIndex(size % range);
101 }
102 template <typename IndexType>
103 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex operator()(IndexType i) const {
104 if ((i >= m_begin) && (i < m_end))
105 return m_val + ((i - m_begin) < m_remainder ? 1 : 0);
106 else
107 return 0;
108 }
109 StorageIndex m_val, m_remainder;
110 Index m_begin, m_end;
111};
112
113template <typename Scalar>
114struct functor_traits<sparse_reserve_op<Scalar>> {
115 enum { Cost = 1, PacketAccess = false, IsRepeatable = true };
116};
117
118} // end namespace internal
119
120template <typename Scalar_, int Options_, typename StorageIndex_>
121class SparseMatrix : public SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>> {
123 using Base::convert_index;
124 friend class SparseVector<Scalar_, 0, StorageIndex_>;
125 template <typename, typename, typename, typename, typename>
126 friend struct internal::Assignment;
127
128 public:
129 using Base::isCompressed;
130 using Base::nonZeros;
131 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
132 using Base::operator+=;
133 using Base::operator-=;
134
136 typedef Diagonal<SparseMatrix> DiagonalReturnType;
137 typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
138 typedef typename Base::InnerIterator InnerIterator;
139 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
140
141 using Base::IsRowMajor;
142 typedef internal::CompressedStorage<Scalar, StorageIndex> Storage;
143 enum { Options = Options_ };
144
145 typedef typename Base::IndexVector IndexVector;
146 typedef typename Base::ScalarVector ScalarVector;
147
148 protected:
150
151 Index m_outerSize;
152 Index m_innerSize;
153 StorageIndex* m_outerIndex;
154 StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
155 Storage m_data;
156
157 public:
159 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
161 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
162
164 inline Index innerSize() const { return m_innerSize; }
166 inline Index outerSize() const { return m_outerSize; }
167
171 inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
175 inline Scalar* valuePtr() { return m_data.valuePtr(); }
176
180 inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
184 inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
185
189 inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
193 inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
194
198 inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
202 inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
203
205 constexpr Storage& data() { return m_data; }
207 constexpr const Storage& data() const { return m_data; }
208
211 inline Scalar coeff(Index row, Index col) const {
212 eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
213
214 const Index outer = IsRowMajor ? row : col;
215 const Index inner = IsRowMajor ? col : row;
216 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer + 1];
217 return m_data.atInRange(m_outerIndex[outer], end, inner);
218 }
219
231 inline Scalar& findOrInsertCoeff(Index row, Index col, bool* inserted) {
232 eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
233 const Index outer = IsRowMajor ? row : col;
234 const Index inner = IsRowMajor ? col : row;
235 Index start = m_outerIndex[outer];
236 Index end = isCompressed() ? m_outerIndex[outer + 1] : m_outerIndex[outer] + m_innerNonZeros[outer];
237 eigen_assert(end >= start && "you probably called coeffRef on a non finalized matrix");
238 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
239 if (dst == end) {
240 Index capacity = m_outerIndex[outer + 1] - end;
241 if (capacity > 0) {
242 // implies uncompressed: push to back of vector
243 m_innerNonZeros[outer]++;
244 m_data.index(end) = StorageIndex(inner);
245 m_data.value(end) = Scalar(0);
246 if (inserted != nullptr) {
247 *inserted = true;
248 }
249 return m_data.value(end);
250 }
251 }
252 if ((dst < end) && (m_data.index(dst) == inner)) {
253 // this coefficient exists, return a reference to it
254 if (inserted != nullptr) {
255 *inserted = false;
256 }
257 return m_data.value(dst);
258 } else {
259 if (inserted != nullptr) {
260 *inserted = true;
261 }
262 // insertion will require reconfiguring the buffer
263 return insertAtByOuterInner(outer, inner, dst);
264 }
265 }
266
275 inline Scalar& coeffRef(Index row, Index col) { return findOrInsertCoeff(row, col, nullptr); }
276
294
295 public:
303 inline void setZero() {
304 m_data.clear();
305 using std::fill_n;
306 fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
307 if (m_innerNonZeros) {
308 fill_n(m_innerNonZeros, m_outerSize, StorageIndex(0));
309 }
310 }
311
315 inline void reserve(Index reserveSize) {
316 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
317 m_data.reserve(reserveSize);
318 }
319
320#ifdef EIGEN_PARSED_BY_DOXYGEN
333 template <class SizesType>
334 inline void reserve(const SizesType& reserveSizes);
335#else
336 template <class SizesType>
337 inline void reserve(const SizesType& reserveSizes,
338 const typename SizesType::value_type& enableif = typename SizesType::value_type()) {
339 EIGEN_UNUSED_VARIABLE(enableif);
340 reserveInnerVectors(reserveSizes);
341 }
342#endif // EIGEN_PARSED_BY_DOXYGEN
343 protected:
344 template <class SizesType>
345 inline void reserveInnerVectors(const SizesType& reserveSizes) {
346 if (isCompressed()) {
347 Index totalReserveSize = 0;
348 for (Index j = 0; j < m_outerSize; ++j) totalReserveSize += internal::convert_index<Index>(reserveSizes[j]);
349
350 // if reserveSizes is empty, don't do anything!
351 if (totalReserveSize == 0) return;
352
353 // turn the matrix into non-compressed mode
354 m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
355
356 // temporarily use m_innerSizes to hold the new starting points.
357 StorageIndex* newOuterIndex = m_innerNonZeros;
358
359 Index count = 0;
360 for (Index j = 0; j < m_outerSize; ++j) {
361 newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
362 Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
363 count += reserveSize + internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j]);
364 }
365
366 m_data.reserve(totalReserveSize);
367 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
368 for (Index j = m_outerSize - 1; j >= 0; --j) {
369 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
370 StorageIndex begin = m_outerIndex[j];
371 StorageIndex end = begin + innerNNZ;
372 StorageIndex target = newOuterIndex[j];
373 internal::smart_memmove(innerIndexPtr() + begin, innerIndexPtr() + end, innerIndexPtr() + target);
374 internal::smart_memmove(valuePtr() + begin, valuePtr() + end, valuePtr() + target);
375 previousOuterIndex = m_outerIndex[j];
376 m_outerIndex[j] = newOuterIndex[j];
377 m_innerNonZeros[j] = innerNNZ;
378 }
379 if (m_outerSize > 0)
380 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1] +
381 internal::convert_index<StorageIndex>(reserveSizes[m_outerSize - 1]);
382
383 m_data.resize(m_outerIndex[m_outerSize]);
384 } else {
385 StorageIndex* newOuterIndex = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + 1);
386
387 Index count = 0;
388 for (Index j = 0; j < m_outerSize; ++j) {
389 newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
390 Index alreadyReserved =
391 internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j] - m_innerNonZeros[j]);
392 Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
393 Index toReserve = numext::maxi(reserveSize, alreadyReserved);
394 count += toReserve + internal::convert_index<Index>(m_innerNonZeros[j]);
395 }
396 newOuterIndex[m_outerSize] = internal::convert_index<StorageIndex>(count);
397
398 m_data.resize(count);
399 for (Index j = m_outerSize - 1; j >= 0; --j) {
400 StorageIndex innerNNZ = m_innerNonZeros[j];
401 StorageIndex begin = m_outerIndex[j];
402 StorageIndex target = newOuterIndex[j];
403 m_data.moveChunk(begin, target, innerNNZ);
404 }
405
406 std::swap(m_outerIndex, newOuterIndex);
407 internal::conditional_aligned_delete_auto<StorageIndex, true>(newOuterIndex, m_outerSize + 1);
408 }
409 }
410
411 public:
412 //--- low level purely coherent filling ---
413
424 inline Scalar& insertBack(Index row, Index col) {
425 return insertBackByOuterInner(IsRowMajor ? row : col, IsRowMajor ? col : row);
426 }
427
430 inline Scalar& insertBackByOuterInner(Index outer, Index inner) {
431 eigen_assert(Index(m_outerIndex[outer + 1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
432 eigen_assert((m_outerIndex[outer + 1] - m_outerIndex[outer] == 0 || m_data.index(m_data.size() - 1) < inner) &&
433 "Invalid ordered insertion (invalid inner index)");
434 StorageIndex p = m_outerIndex[outer + 1];
435 ++m_outerIndex[outer + 1];
436 m_data.append(Scalar(0), inner);
437 return m_data.value(p);
438 }
439
442 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner) {
443 StorageIndex p = m_outerIndex[outer + 1];
444 ++m_outerIndex[outer + 1];
445 m_data.append(Scalar(0), inner);
446 return m_data.value(p);
447 }
448
451 inline void startVec(Index outer) {
452 eigen_assert(m_outerIndex[outer] == Index(m_data.size()) &&
453 "You must call startVec for each inner vector sequentially");
454 eigen_assert(m_outerIndex[outer + 1] == 0 && "You must call startVec for each inner vector sequentially");
455 m_outerIndex[outer + 1] = m_outerIndex[outer];
456 }
457
461 inline void finalize() {
462 if (isCompressed()) {
463 StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
464 Index i = m_outerSize;
465 // find the last filled column
466 while (i >= 0 && m_outerIndex[i] == 0) --i;
467 ++i;
468 while (i <= m_outerSize) {
469 m_outerIndex[i] = size;
470 ++i;
471 }
472 }
473 }
474
475 // remove outer vectors j, j+1 ... j+num-1 and resize the matrix
476 void removeOuterVectors(Index j, Index num = 1) {
477 eigen_assert(num >= 0 && j >= 0 && j + num <= m_outerSize && "Invalid parameters");
478
479 const Index newRows = IsRowMajor ? m_outerSize - num : rows();
480 const Index newCols = IsRowMajor ? cols() : m_outerSize - num;
481
482 const Index begin = j + num;
483 const Index end = m_outerSize;
484 const Index target = j;
485
486 // if the removed vectors are not empty, uncompress the matrix
487 if (m_outerIndex[j + num] > m_outerIndex[j]) uncompress();
488
489 // shift m_outerIndex and m_innerNonZeros [num] to the left
490 internal::smart_memmove(m_outerIndex + begin, m_outerIndex + end + 1, m_outerIndex + target);
491 if (!isCompressed())
492 internal::smart_memmove(m_innerNonZeros + begin, m_innerNonZeros + end, m_innerNonZeros + target);
493
494 // if m_outerIndex[0] > 0, shift the data within the first vector while it is easy to do so
495 if (m_outerIndex[0] > StorageIndex(0)) {
496 uncompress();
497 const Index from = internal::convert_index<Index>(m_outerIndex[0]);
498 const Index to = Index(0);
499 const Index chunkSize = internal::convert_index<Index>(m_innerNonZeros[0]);
500 m_data.moveChunk(from, to, chunkSize);
501 m_outerIndex[0] = StorageIndex(0);
502 }
503
504 // truncate the matrix to the smaller size
505 conservativeResize(newRows, newCols);
506 }
507
508 // insert empty outer vectors at indices j, j+1 ... j+num-1 and resize the matrix
509 void insertEmptyOuterVectors(Index j, Index num = 1) {
510 using std::fill_n;
511 eigen_assert(num >= 0 && j >= 0 && j < m_outerSize && "Invalid parameters");
512
513 const Index newRows = IsRowMajor ? m_outerSize + num : rows();
514 const Index newCols = IsRowMajor ? cols() : m_outerSize + num;
515
516 const Index begin = j;
517 const Index end = m_outerSize;
518 const Index target = j + num;
519
520 // expand the matrix to the larger size
521 conservativeResize(newRows, newCols);
522
523 // shift m_outerIndex and m_innerNonZeros [num] to the right
524 internal::smart_memmove(m_outerIndex + begin, m_outerIndex + end + 1, m_outerIndex + target);
525 // m_outerIndex[begin] == m_outerIndex[target], set all indices in this range to same value
526 fill_n(m_outerIndex + begin, num, m_outerIndex[begin]);
527
528 if (!isCompressed()) {
529 internal::smart_memmove(m_innerNonZeros + begin, m_innerNonZeros + end, m_innerNonZeros + target);
530 // set the nonzeros of the newly inserted vectors to 0
531 fill_n(m_innerNonZeros + begin, num, StorageIndex(0));
532 }
533 }
534
535 template <typename InputIterators>
536 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
537
538 template <typename InputIterators, typename DupFunctor>
539 void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
540
541 template <typename Derived, typename DupFunctor>
542 void collapseDuplicates(DenseBase<Derived>& wi, DupFunctor dup_func = DupFunctor());
543
544 template <typename InputIterators>
545 void setFromSortedTriplets(const InputIterators& begin, const InputIterators& end);
546
547 template <typename InputIterators, typename DupFunctor>
548 void setFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
549
550 template <typename InputIterators>
551 void insertFromTriplets(const InputIterators& begin, const InputIterators& end);
552
553 template <typename InputIterators, typename DupFunctor>
554 void insertFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
555
556 template <typename InputIterators>
557 void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end);
558
559 template <typename InputIterators, typename DupFunctor>
560 void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
561
562 //---
563
566 Scalar& insertByOuterInner(Index j, Index i) {
567 eigen_assert(j >= 0 && j < m_outerSize && "invalid outer index");
568 eigen_assert(i >= 0 && i < m_innerSize && "invalid inner index");
569 Index start = m_outerIndex[j];
570 Index end = isCompressed() ? m_outerIndex[j + 1] : start + m_innerNonZeros[j];
571 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, i);
572 if (dst == end) {
573 Index capacity = m_outerIndex[j + 1] - end;
574 if (capacity > 0) {
575 // implies uncompressed: push to back of vector
576 m_innerNonZeros[j]++;
577 m_data.index(end) = StorageIndex(i);
578 m_data.value(end) = Scalar(0);
579 return m_data.value(end);
580 }
581 }
582 eigen_assert((dst == end || m_data.index(dst) != i) &&
583 "you cannot insert an element that already exists, you must call coeffRef to this end");
584 return insertAtByOuterInner(j, i, dst);
585 }
586
590 if (isCompressed()) return;
591
592 eigen_internal_assert(m_outerIndex != 0 && m_outerSize > 0);
593
594 StorageIndex start = m_outerIndex[1];
595 m_outerIndex[1] = m_innerNonZeros[0];
596 // try to move fewer, larger contiguous chunks
597 Index copyStart = start;
598 Index copyTarget = m_innerNonZeros[0];
599 for (Index j = 1; j < m_outerSize; j++) {
600 StorageIndex end = start + m_innerNonZeros[j];
601 StorageIndex nextStart = m_outerIndex[j + 1];
602 // dont forget to move the last chunk!
603 bool breakUpCopy = (end != nextStart) || (j == m_outerSize - 1);
604 if (breakUpCopy) {
605 Index chunkSize = end - copyStart;
606 if (chunkSize > 0) m_data.moveChunk(copyStart, copyTarget, chunkSize);
607 copyStart = nextStart;
608 copyTarget += chunkSize;
609 }
610 start = nextStart;
611 m_outerIndex[j + 1] = m_outerIndex[j] + m_innerNonZeros[j];
612 }
613 m_data.resize(m_outerIndex[m_outerSize]);
614
615 // release as much memory as possible
616 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
617 m_innerNonZeros = 0;
618 m_data.squeeze();
619 }
620
622 void uncompress() {
623 if (!isCompressed()) return;
624 m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
625 if (m_outerIndex[m_outerSize] == 0) {
626 using std::fill_n;
627 fill_n(m_innerNonZeros, m_outerSize, StorageIndex(0));
628 } else {
629 for (Index j = 0; j < m_outerSize; j++) m_innerNonZeros[j] = m_outerIndex[j + 1] - m_outerIndex[j];
630 }
631 }
632
634 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) {
635 prune(default_prunning_func(reference, epsilon));
636 }
637
645 template <typename KeepFunc>
646 void prune(const KeepFunc& keep = KeepFunc()) {
647 StorageIndex k = 0;
648 for (Index j = 0; j < m_outerSize; ++j) {
649 StorageIndex previousStart = m_outerIndex[j];
650 if (isCompressed())
651 m_outerIndex[j] = k;
652 else
653 k = m_outerIndex[j];
654 StorageIndex end = isCompressed() ? m_outerIndex[j + 1] : previousStart + m_innerNonZeros[j];
655 for (StorageIndex i = previousStart; i < end; ++i) {
656 StorageIndex row = IsRowMajor ? StorageIndex(j) : m_data.index(i);
657 StorageIndex col = IsRowMajor ? m_data.index(i) : StorageIndex(j);
658 bool keepEntry = keep(row, col, m_data.value(i));
659 if (keepEntry) {
660 m_data.value(k) = m_data.value(i);
661 m_data.index(k) = m_data.index(i);
662 ++k;
663 } else if (!isCompressed())
664 m_innerNonZeros[j]--;
665 }
666 }
667 if (isCompressed()) {
668 m_outerIndex[m_outerSize] = k;
669 m_data.resize(k, 0);
670 }
671 }
672
682 // If one dimension is null, then there is nothing to be preserved
683 if (rows == 0 || cols == 0) return resize(rows, cols);
684
685 Index newOuterSize = IsRowMajor ? rows : cols;
686 Index newInnerSize = IsRowMajor ? cols : rows;
687
688 Index innerChange = newInnerSize - m_innerSize;
689 Index outerChange = newOuterSize - m_outerSize;
690
691 if (outerChange != 0) {
692 m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, newOuterSize + 1,
693 m_outerSize + 1);
694
695 if (!isCompressed())
696 m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_innerNonZeros,
697 newOuterSize, m_outerSize);
698
699 if (outerChange > 0) {
700 StorageIndex lastIdx = m_outerSize == 0 ? StorageIndex(0) : m_outerIndex[m_outerSize];
701 using std::fill_n;
702 fill_n(m_outerIndex + m_outerSize, outerChange + 1, lastIdx);
703
704 if (!isCompressed()) fill_n(m_innerNonZeros + m_outerSize, outerChange, StorageIndex(0));
705 }
706 }
707 m_outerSize = newOuterSize;
708
709 if (innerChange < 0) {
710 for (Index j = 0; j < m_outerSize; j++) {
711 Index start = m_outerIndex[j];
712 Index end = isCompressed() ? m_outerIndex[j + 1] : start + m_innerNonZeros[j];
713 Index lb = m_data.searchLowerIndex(start, end, newInnerSize);
714 if (lb != end) {
715 uncompress();
716 m_innerNonZeros[j] = StorageIndex(lb - start);
717 }
718 }
719 }
720 m_innerSize = newInnerSize;
721
722 Index newSize = m_outerIndex[m_outerSize];
723 eigen_assert(newSize <= m_data.size());
724 m_data.resize(newSize);
725 }
726
735 const Index outerSize = IsRowMajor ? rows : cols;
736 m_innerSize = IsRowMajor ? cols : rows;
737 m_data.clear();
738
739 if ((m_outerIndex == 0) || (m_outerSize != outerSize)) {
740 m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, outerSize + 1,
741 m_outerSize + 1);
742 m_outerSize = outerSize;
743 }
744
745 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
746 m_innerNonZeros = 0;
747
748 using std::fill_n;
749 fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
750 }
751
754 void resizeNonZeros(Index size) { m_data.resize(size); }
755
757 const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
758
763 DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
764
766 inline SparseMatrix() : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { resize(0, 0); }
767
769 inline SparseMatrix(Index rows, Index cols) : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
770 resize(rows, cols);
771 }
772
774 template <typename OtherDerived>
776 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
777 EIGEN_STATIC_ASSERT(
778 (internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
779 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
780 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
781 if (needToTranspose)
782 *this = other.derived();
783 else {
784#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
785 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
786#endif
787 internal::call_assignment_no_alias(*this, other.derived());
788 }
789 }
790
792 template <typename OtherDerived, unsigned int UpLo>
794 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
795 Base::operator=(other);
796 }
797
799 inline SparseMatrix(SparseMatrix&& other) : SparseMatrix() { this->swap(other); }
800
801 template <typename OtherDerived>
803 *this = other.derived().markAsRValue();
804 }
805
807 inline SparseMatrix(const SparseMatrix& other)
808 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
809 *this = other.derived();
810 }
811
813 template <typename OtherDerived>
814 SparseMatrix(const ReturnByValue<OtherDerived>& other)
815 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
816 initAssignment(other);
817 other.evalTo(*this);
818 }
819
821 template <typename OtherDerived>
823 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
824 *this = other.derived();
825 }
826
829 inline void swap(SparseMatrix& other) {
830 // EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
831 std::swap(m_outerIndex, other.m_outerIndex);
832 std::swap(m_innerSize, other.m_innerSize);
833 std::swap(m_outerSize, other.m_outerSize);
834 std::swap(m_innerNonZeros, other.m_innerNonZeros);
835 m_data.swap(other.m_data);
836 }
837
838 friend EIGEN_DEVICE_FUNC void swap(SparseMatrix& a, SparseMatrix& b) { a.swap(b); }
839
842 inline void setIdentity() {
843 eigen_assert(m_outerSize == m_innerSize && "ONLY FOR SQUARED MATRICES");
844 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
845 m_innerNonZeros = 0;
846 m_data.resize(m_outerSize);
847 // is it necessary to squeeze?
848 m_data.squeeze();
849 std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
850 std::iota(innerIndexPtr(), innerIndexPtr() + m_outerSize, StorageIndex(0));
851 using std::fill_n;
852 fill_n(valuePtr(), m_outerSize, Scalar(1));
853 }
854
855 inline SparseMatrix& operator=(const SparseMatrix& other) {
856 if (other.isRValue()) {
857 swap(other.const_cast_derived());
858 } else if (this != &other) {
859#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
860 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
861#endif
862 initAssignment(other);
863 if (other.isCompressed()) {
864 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
865 m_data = other.m_data;
866 } else {
867 Base::operator=(other);
868 }
869 }
870 return *this;
871 }
872
873 inline SparseMatrix& operator=(SparseMatrix&& other) {
874 this->swap(other);
875 return *this;
876 }
877
878 template <typename OtherDerived>
879 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other) {
880 return Base::operator=(other.derived());
881 }
882
883 template <typename Lhs, typename Rhs>
884 inline SparseMatrix& operator=(const Product<Lhs, Rhs, AliasFreeProduct>& other);
885
886 template <typename OtherDerived>
887 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
888
889 template <typename OtherDerived>
890 inline SparseMatrix& operator=(SparseCompressedBase<OtherDerived>&& other) {
891 *this = other.derived().markAsRValue();
892 return *this;
893 }
894
895#ifndef EIGEN_NO_IO
896 friend std::ostream& operator<<(std::ostream& s, const SparseMatrix& m) {
897 EIGEN_DBG_SPARSE(
898 s << "Nonzero entries:\n"; if (m.isCompressed()) {
899 for (Index i = 0; i < m.nonZeros(); ++i) s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
900 } else {
901 for (Index i = 0; i < m.outerSize(); ++i) {
902 Index p = m.m_outerIndex[i];
903 Index pe = m.m_outerIndex[i] + m.m_innerNonZeros[i];
904 Index k = p;
905 for (; k < pe; ++k) {
906 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
907 }
908 for (; k < m.m_outerIndex[i + 1]; ++k) {
909 s << "(_,_) ";
910 }
911 }
912 } s << std::endl;
913 s << std::endl; s << "Outer pointers:\n";
914 for (Index i = 0; i < m.outerSize(); ++i) { s << m.m_outerIndex[i] << " "; } s << " $" << std::endl;
915 if (!m.isCompressed()) {
916 s << "Inner non zeros:\n";
917 for (Index i = 0; i < m.outerSize(); ++i) {
918 s << m.m_innerNonZeros[i] << " ";
919 }
920 s << " $" << std::endl;
921 } s
922 << std::endl;);
923 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
924 return s;
925 }
926#endif
927
929 inline ~SparseMatrix() {
930 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
931 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
932 }
933
935 Scalar sum() const;
936
937#ifdef EIGEN_SPARSEMATRIX_PLUGIN
938#include EIGEN_SPARSEMATRIX_PLUGIN
939#endif
940
941 protected:
942 template <typename Other>
943 void initAssignment(const Other& other) {
944 resize(other.rows(), other.cols());
945 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
946 m_innerNonZeros = 0;
947 }
948
951 EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
952
955 class SingletonVector {
956 StorageIndex m_index;
957 StorageIndex m_value;
958
959 public:
960 typedef StorageIndex value_type;
961 SingletonVector(Index i, Index v) : m_index(convert_index(i)), m_value(convert_index(v)) {}
962
963 StorageIndex operator[](Index i) const { return i == m_index ? m_value : 0; }
964 };
965
968 EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
969
970 public:
973 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col) {
974 const Index outer = IsRowMajor ? row : col;
975 const Index inner = IsRowMajor ? col : row;
976
977 eigen_assert(!isCompressed());
978 eigen_assert(m_innerNonZeros[outer] <= (m_outerIndex[outer + 1] - m_outerIndex[outer]));
979
980 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
981 m_data.index(p) = StorageIndex(inner);
982 m_data.value(p) = Scalar(0);
983 return m_data.value(p);
984 }
985
986 protected:
987 struct IndexPosPair {
988 IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
989 Index i;
990 Index p;
991 };
992
1002 template <typename DiagXpr, typename Func>
1003 void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc) {
1004 constexpr StorageIndex kEmptyIndexVal(-1);
1005 typedef typename ScalarVector::AlignedMapType ValueMap;
1006
1007 Index n = diagXpr.size();
1008
1009 const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar, Scalar>>::value;
1010 if (overwrite) {
1011 if ((m_outerSize != n) || (m_innerSize != n)) resize(n, n);
1012 }
1013
1014 if (m_data.size() == 0 || overwrite) {
1015 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1016 m_innerNonZeros = 0;
1017 resizeNonZeros(n);
1018 ValueMap valueMap(valuePtr(), n);
1019 std::iota(m_outerIndex, m_outerIndex + n + 1, StorageIndex(0));
1020 std::iota(innerIndexPtr(), innerIndexPtr() + n, StorageIndex(0));
1021 valueMap.setZero();
1022 internal::call_assignment_no_alias(valueMap, diagXpr, assignFunc);
1023 } else {
1024 internal::evaluator<DiagXpr> diaEval(diagXpr);
1025
1026 ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, n, 0);
1027 typename IndexVector::AlignedMapType insertionLocations(tmp, n);
1028 insertionLocations.setConstant(kEmptyIndexVal);
1029
1030 Index deferredInsertions = 0;
1031 Index shift = 0;
1032
1033 for (Index j = 0; j < n; j++) {
1034 Index begin = m_outerIndex[j];
1035 Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1036 Index capacity = m_outerIndex[j + 1] - end;
1037 Index dst = m_data.searchLowerIndex(begin, end, j);
1038 // the entry exists: update it now
1039 if (dst != end && m_data.index(dst) == StorageIndex(j))
1040 assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1041 // the entry belongs at the back of the vector: push to back
1042 else if (dst == end && capacity > 0)
1043 assignFunc.assignCoeff(insertBackUncompressed(j, j), diaEval.coeff(j));
1044 // the insertion requires a data move, record insertion location and handle in second pass
1045 else {
1046 insertionLocations.coeffRef(j) = StorageIndex(dst);
1047 deferredInsertions++;
1048 // if there is no capacity, all vectors to the right of this are shifted
1049 if (capacity == 0) shift++;
1050 }
1051 }
1052
1053 if (deferredInsertions > 0) {
1054 m_data.resize(m_data.size() + shift);
1055 Index copyEnd = isCompressed() ? m_outerIndex[m_outerSize]
1056 : m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1];
1057 for (Index j = m_outerSize - 1; deferredInsertions > 0; j--) {
1058 Index begin = m_outerIndex[j];
1059 Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1060 Index capacity = m_outerIndex[j + 1] - end;
1061
1062 bool doInsertion = insertionLocations(j) >= 0;
1063 bool breakUpCopy = doInsertion && (capacity > 0);
1064 // break up copy for sorted insertion into inactive nonzeros
1065 // optionally, add another criterium, i.e. 'breakUpCopy || (capacity > threhsold)'
1066 // where `threshold >= 0` to skip inactive nonzeros in each vector
1067 // this reduces the total number of copied elements, but requires more moveChunk calls
1068 if (breakUpCopy) {
1069 Index copyBegin = m_outerIndex[j + 1];
1070 Index to = copyBegin + shift;
1071 Index chunkSize = copyEnd - copyBegin;
1072 m_data.moveChunk(copyBegin, to, chunkSize);
1073 copyEnd = end;
1074 }
1075
1076 m_outerIndex[j + 1] += shift;
1077
1078 if (doInsertion) {
1079 // if there is capacity, shift into the inactive nonzeros
1080 if (capacity > 0) shift++;
1081 Index copyBegin = insertionLocations(j);
1082 Index to = copyBegin + shift;
1083 Index chunkSize = copyEnd - copyBegin;
1084 m_data.moveChunk(copyBegin, to, chunkSize);
1085 Index dst = to - 1;
1086 m_data.index(dst) = StorageIndex(j);
1087 m_data.value(dst) = Scalar(0);
1088 assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1089 if (!isCompressed()) m_innerNonZeros[j]++;
1090 shift--;
1091 deferredInsertions--;
1092 copyEnd = copyBegin;
1093 }
1094 }
1095 }
1096 eigen_assert((shift == 0) && (deferredInsertions == 0));
1097 }
1098 }
1099
1100 /* These functions are used to avoid a redundant binary search operation in functions such as coeffRef() and assume
1101 * `dst` is the appropriate sorted insertion point */
1102 EIGEN_STRONG_INLINE Scalar& insertAtByOuterInner(Index outer, Index inner, Index dst);
1103 Scalar& insertCompressedAtByOuterInner(Index outer, Index inner, Index dst);
1104 Scalar& insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst);
1105
1106 private:
1107 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
1108 EIGEN_STATIC_ASSERT((Options & (ColMajor | RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
1109
1110 struct default_prunning_func {
1111 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1112 inline bool operator()(const Index&, const Index&, const Scalar& value) const {
1113 return !internal::isMuchSmallerThan(value, reference, epsilon);
1114 }
1115 Scalar reference;
1116 RealScalar epsilon;
1117 };
1118};
1119
1120namespace internal {
1121
1122// Creates a compressed sparse matrix from a range of unsorted triplets
1123// Requires temporary storage to handle duplicate entries
1124template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1125void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1126 DupFunctor dup_func) {
1127 constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1128 using StorageIndex = typename SparseMatrixType::StorageIndex;
1129 using IndexMap = typename VectorX<StorageIndex>::AlignedMapType;
1130 using TransposedSparseMatrix =
1131 SparseMatrix<typename SparseMatrixType::Scalar, IsRowMajor ? ColMajor : RowMajor, StorageIndex>;
1132
1133 if (begin == end) {
1134 // Clear out existing data (if any).
1135 mat.setZero();
1136 return;
1137 }
1138
1139 // There are two strategies to consider for constructing a matrix from unordered triplets:
1140 // A) construct the 'mat' in its native storage order and sort in-place (less memory); or,
1141 // B) construct the transposed matrix and use an implicit sort upon assignment to `mat` (less time).
1142 // This routine uses B) for faster execution time.
1143 TransposedSparseMatrix trmat(mat.rows(), mat.cols());
1144
1145 // scan triplets to determine allocation size before constructing matrix
1146 Index nonZeros = 0;
1147 for (InputIterator it(begin); it != end; ++it) {
1148 eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1149 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1150 if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
1151 trmat.outerIndexPtr()[j + 1]++;
1152 nonZeros++;
1153 }
1154
1155 std::partial_sum(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize() + 1, trmat.outerIndexPtr());
1156 eigen_assert(nonZeros == trmat.outerIndexPtr()[trmat.outerSize()]);
1157 trmat.resizeNonZeros(nonZeros);
1158
1159 // construct temporary array to track insertions (outersize) and collapse duplicates (innersize)
1160 ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, numext::maxi(mat.innerSize(), mat.outerSize()), 0);
1161 smart_copy(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize(), tmp);
1162
1163 // push triplets to back of each vector
1164 for (InputIterator it(begin); it != end; ++it) {
1165 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1166 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1167 StorageIndex k = tmp[j];
1168 trmat.data().index(k) = i;
1169 trmat.data().value(k) = it->value();
1170 tmp[j]++;
1171 }
1172
1173 IndexMap wi(tmp, trmat.innerSize());
1174 trmat.collapseDuplicates(wi, dup_func);
1175 // implicit sorting
1176 mat = trmat;
1177}
1178
1179// Creates a compressed sparse matrix from a sorted range of triplets
1180template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1181void set_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1182 DupFunctor dup_func) {
1183 constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1184 using StorageIndex = typename SparseMatrixType::StorageIndex;
1185
1186 if (begin == end) return;
1187
1188 constexpr StorageIndex kEmptyIndexValue(-1);
1189 // deallocate inner nonzeros if present and zero outerIndexPtr
1190 mat.resize(mat.rows(), mat.cols());
1191 // use outer indices to count non zero entries (excluding duplicate entries)
1192 StorageIndex previous_j = kEmptyIndexValue;
1193 StorageIndex previous_i = kEmptyIndexValue;
1194 // scan triplets to determine allocation size before constructing matrix
1195 Index nonZeros = 0;
1196 for (InputIterator it(begin); it != end; ++it) {
1197 eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1198 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1199 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1200 eigen_assert(j > previous_j || (j == previous_j && i >= previous_i));
1201 // identify duplicates by examining previous location
1202 bool duplicate = (previous_j == j) && (previous_i == i);
1203 if (!duplicate) {
1204 if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
1205 nonZeros++;
1206 mat.outerIndexPtr()[j + 1]++;
1207 previous_j = j;
1208 previous_i = i;
1209 }
1210 }
1211
1212 // finalize outer indices and allocate memory
1213 std::partial_sum(mat.outerIndexPtr(), mat.outerIndexPtr() + mat.outerSize() + 1, mat.outerIndexPtr());
1214 eigen_assert(nonZeros == mat.outerIndexPtr()[mat.outerSize()]);
1215 mat.resizeNonZeros(nonZeros);
1216
1217 previous_i = kEmptyIndexValue;
1218 previous_j = kEmptyIndexValue;
1219 Index back = 0;
1220 for (InputIterator it(begin); it != end; ++it) {
1221 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1222 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1223 bool duplicate = (previous_j == j) && (previous_i == i);
1224 if (duplicate) {
1225 mat.data().value(back - 1) = dup_func(mat.data().value(back - 1), it->value());
1226 } else {
1227 // push triplets to back
1228 mat.data().index(back) = i;
1229 mat.data().value(back) = it->value();
1230 previous_j = j;
1231 previous_i = i;
1232 back++;
1233 }
1234 }
1235 eigen_assert(back == nonZeros);
1236 // matrix is finalized
1237}
1238
1239// thin wrapper around a generic binary functor to use the sparse disjunction evaluator instead of the default
1240// "arithmetic" evaluator
1241template <typename DupFunctor, typename LhsScalar, typename RhsScalar = LhsScalar>
1242struct scalar_disjunction_op {
1243 using result_type = typename result_of<DupFunctor(LhsScalar, RhsScalar)>::type;
1244 scalar_disjunction_op(const DupFunctor& op) : m_functor(op) {}
1245 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator()(const LhsScalar& a, const RhsScalar& b) const {
1246 return m_functor(a, b);
1247 }
1248 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const DupFunctor& functor() const { return m_functor; }
1249 const DupFunctor& m_functor;
1250};
1251
1252template <typename DupFunctor, typename LhsScalar, typename RhsScalar>
1253struct functor_traits<scalar_disjunction_op<DupFunctor, LhsScalar, RhsScalar>> : public functor_traits<DupFunctor> {};
1254
1255// Creates a compressed sparse matrix from its existing entries and those from an unsorted range of triplets
1256template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1257void insert_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1258 DupFunctor dup_func) {
1259 using Scalar = typename SparseMatrixType::Scalar;
1260 using SrcXprType =
1261 CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
1262
1263 // set_from_triplets is necessary to sort the inner indices and remove the duplicate entries
1264 SparseMatrixType trips(mat.rows(), mat.cols());
1265 set_from_triplets(begin, end, trips, dup_func);
1266
1267 SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1268 // the sparse assignment procedure creates a temporary matrix and swaps the final result
1269 assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1270}
1271
1272// Creates a compressed sparse matrix from its existing entries and those from an sorted range of triplets
1273template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1274void insert_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1275 DupFunctor dup_func) {
1276 using Scalar = typename SparseMatrixType::Scalar;
1277 using SrcXprType =
1278 CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
1279
1280 // TODO: process triplets without making a copy
1281 SparseMatrixType trips(mat.rows(), mat.cols());
1282 set_from_triplets_sorted(begin, end, trips, dup_func);
1283
1284 SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1285 // the sparse assignment procedure creates a temporary matrix and swaps the final result
1286 assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1287}
1288
1289} // namespace internal
1290
1328template <typename Scalar, int Options_, typename StorageIndex_>
1329template <typename InputIterators>
1331 const InputIterators& end) {
1332 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1333 begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1334}
1335
1345template <typename Scalar, int Options_, typename StorageIndex_>
1346template <typename InputIterators, typename DupFunctor>
1348 const InputIterators& end, DupFunctor dup_func) {
1349 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1350 begin, end, *this, dup_func);
1351}
1352
1357template <typename Scalar, int Options_, typename StorageIndex_>
1358template <typename InputIterators>
1360 const InputIterators& end) {
1361 internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1362 begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1363}
1364
1374template <typename Scalar, int Options_, typename StorageIndex_>
1375template <typename InputIterators, typename DupFunctor>
1377 const InputIterators& end,
1378 DupFunctor dup_func) {
1379 internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1380 begin, end, *this, dup_func);
1381}
1382
1421template <typename Scalar, int Options_, typename StorageIndex_>
1422template <typename InputIterators>
1424 const InputIterators& end) {
1425 internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1426 begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1427}
1428
1438template <typename Scalar, int Options_, typename StorageIndex_>
1439template <typename InputIterators, typename DupFunctor>
1441 const InputIterators& end, DupFunctor dup_func) {
1442 internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1443 begin, end, *this, dup_func);
1444}
1445
1450template <typename Scalar, int Options_, typename StorageIndex_>
1451template <typename InputIterators>
1453 const InputIterators& end) {
1454 internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1455 begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1456}
1457
1467template <typename Scalar, int Options_, typename StorageIndex_>
1468template <typename InputIterators, typename DupFunctor>
1470 const InputIterators& end,
1471 DupFunctor dup_func) {
1472 internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1473 begin, end, *this, dup_func);
1474}
1475
1477template <typename Scalar_, int Options_, typename StorageIndex_>
1478template <typename Derived, typename DupFunctor>
1479void SparseMatrix<Scalar_, Options_, StorageIndex_>::collapseDuplicates(DenseBase<Derived>& wi, DupFunctor dup_func) {
1480 // removes duplicate entries and compresses the matrix
1481 // the excess allocated memory is not released
1482 // the inner indices do not need to be sorted, nor is the matrix returned in a sorted state
1483 eigen_assert(wi.size() == m_innerSize);
1484 constexpr StorageIndex kEmptyIndexValue(-1);
1485 wi.setConstant(kEmptyIndexValue);
1486 StorageIndex count = 0;
1487 const bool is_compressed = isCompressed();
1488 // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1489 for (Index j = 0; j < m_outerSize; ++j) {
1490 const StorageIndex newBegin = count;
1491 const StorageIndex end = is_compressed ? m_outerIndex[j + 1] : m_outerIndex[j] + m_innerNonZeros[j];
1492 for (StorageIndex k = m_outerIndex[j]; k < end; ++k) {
1493 StorageIndex i = m_data.index(k);
1494 if (wi(i) >= newBegin) {
1495 // entry at k is a duplicate
1496 // accumulate it into the primary entry located at wi(i)
1497 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1498 } else {
1499 // k is the primary entry in j with inner index i
1500 // shift it to the left and record its location at wi(i)
1501 m_data.index(count) = i;
1502 m_data.value(count) = m_data.value(k);
1503 wi(i) = count;
1504 ++count;
1505 }
1506 }
1507 m_outerIndex[j] = newBegin;
1508 }
1509 m_outerIndex[m_outerSize] = count;
1510 m_data.resize(count);
1511
1512 // turn the matrix into compressed form (if it is not already)
1513 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1514 m_innerNonZeros = 0;
1515}
1516
1518template <typename Scalar, int Options_, typename StorageIndex_>
1519template <typename OtherDerived>
1520EIGEN_DONT_INLINE SparseMatrix<Scalar, Options_, StorageIndex_>&
1521SparseMatrix<Scalar, Options_, StorageIndex_>::operator=(const SparseMatrixBase<OtherDerived>& other) {
1522 EIGEN_STATIC_ASSERT(
1523 (internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1524 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1525
1526#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1527 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1528#endif
1529
1530 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1531 if (needToTranspose) {
1532#ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1533 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1534#endif
1535 // two passes algorithm:
1536 // 1 - compute the number of coeffs per dest inner vector
1537 // 2 - do the actual copy/eval
1538 // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1539 typedef
1540 typename internal::nested_eval<OtherDerived, 2, typename internal::plain_matrix_type<OtherDerived>::type>::type
1541 OtherCopy;
1542 typedef internal::remove_all_t<OtherCopy> OtherCopy_;
1543 typedef internal::evaluator<OtherCopy_> OtherCopyEval;
1544 OtherCopy otherCopy(other.derived());
1545 OtherCopyEval otherCopyEval(otherCopy);
1546
1547 SparseMatrix dest(other.rows(), other.cols());
1548 Eigen::Map<IndexVector>(dest.m_outerIndex, dest.outerSize()).setZero();
1549
1550 // pass 1
1551 // FIXME the above copy could be merged with that pass
1552 for (Index j = 0; j < otherCopy.outerSize(); ++j)
1553 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) ++dest.m_outerIndex[it.index()];
1554
1555 // prefix sum
1556 StorageIndex count = 0;
1557 IndexVector positions(dest.outerSize());
1558 for (Index j = 0; j < dest.outerSize(); ++j) {
1559 StorageIndex tmp = dest.m_outerIndex[j];
1560 dest.m_outerIndex[j] = count;
1561 positions[j] = count;
1562 count += tmp;
1563 }
1564 dest.m_outerIndex[dest.outerSize()] = count;
1565 // alloc
1566 dest.m_data.resize(count);
1567 // pass 2
1568 for (StorageIndex j = 0; j < otherCopy.outerSize(); ++j) {
1569 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) {
1570 Index pos = positions[it.index()]++;
1571 dest.m_data.index(pos) = j;
1572 dest.m_data.value(pos) = it.value();
1573 }
1574 }
1575 this->swap(dest);
1576 return *this;
1577 } else {
1578 if (other.isRValue()) {
1579 initAssignment(other.derived());
1580 }
1581 // there is no special optimization
1582 return Base::operator=(other.derived());
1583 }
1584}
1585
1586template <typename Scalar_, int Options_, typename StorageIndex_>
1587inline typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1589 return insertByOuterInner(IsRowMajor ? row : col, IsRowMajor ? col : row);
1590}
1591
1592template <typename Scalar_, int Options_, typename StorageIndex_>
1593EIGEN_STRONG_INLINE typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1594SparseMatrix<Scalar_, Options_, StorageIndex_>::insertAtByOuterInner(Index outer, Index inner, Index dst) {
1595 // random insertion into compressed matrix is very slow
1596 uncompress();
1597 return insertUncompressedAtByOuterInner(outer, inner, dst);
1598}
1599
1600template <typename Scalar_, int Options_, typename StorageIndex_>
1601EIGEN_DEPRECATED EIGEN_DONT_INLINE typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1602SparseMatrix<Scalar_, Options_, StorageIndex_>::insertUncompressed(Index row, Index col) {
1603 eigen_assert(!isCompressed());
1604 Index outer = IsRowMajor ? row : col;
1605 Index inner = IsRowMajor ? col : row;
1606 Index start = m_outerIndex[outer];
1607 Index end = start + m_innerNonZeros[outer];
1608 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1609 if (dst == end) {
1610 Index capacity = m_outerIndex[outer + 1] - end;
1611 if (capacity > 0) {
1612 // implies uncompressed: push to back of vector
1613 m_innerNonZeros[outer]++;
1614 m_data.index(end) = StorageIndex(inner);
1615 m_data.value(end) = Scalar(0);
1616 return m_data.value(end);
1617 }
1618 }
1619 eigen_assert((dst == end || m_data.index(dst) != inner) &&
1620 "you cannot insert an element that already exists, you must call coeffRef to this end");
1621 return insertUncompressedAtByOuterInner(outer, inner, dst);
1622}
1623
1624template <typename Scalar_, int Options_, typename StorageIndex_>
1625EIGEN_DEPRECATED EIGEN_DONT_INLINE typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1626SparseMatrix<Scalar_, Options_, StorageIndex_>::insertCompressed(Index row, Index col) {
1627 eigen_assert(isCompressed());
1628 Index outer = IsRowMajor ? row : col;
1629 Index inner = IsRowMajor ? col : row;
1630 Index start = m_outerIndex[outer];
1631 Index end = m_outerIndex[outer + 1];
1632 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1633 eigen_assert((dst == end || m_data.index(dst) != inner) &&
1634 "you cannot insert an element that already exists, you must call coeffRef to this end");
1635 return insertCompressedAtByOuterInner(outer, inner, dst);
1636}
1637
1638template <typename Scalar_, int Options_, typename StorageIndex_>
1639typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1640SparseMatrix<Scalar_, Options_, StorageIndex_>::insertCompressedAtByOuterInner(Index outer, Index inner, Index dst) {
1641 eigen_assert(isCompressed());
1642 // compressed insertion always requires expanding the buffer
1643 // first, check if there is adequate allocated memory
1644 if (m_data.allocatedSize() <= m_data.size()) {
1645 // if there is no capacity for a single insertion, double the capacity
1646 // increase capacity by a minimum of 32
1647 Index minReserve = 32;
1648 Index reserveSize = numext::maxi(minReserve, m_data.allocatedSize());
1649 m_data.reserve(reserveSize);
1650 }
1651 m_data.resize(m_data.size() + 1);
1652 Index chunkSize = m_outerIndex[m_outerSize] - dst;
1653 // shift the existing data to the right if necessary
1654 m_data.moveChunk(dst, dst + 1, chunkSize);
1655 // update nonzero counts
1656 // potentially O(outerSize) bottleneck!
1657 for (Index j = outer; j < m_outerSize; j++) m_outerIndex[j + 1]++;
1658 // initialize the coefficient
1659 m_data.index(dst) = StorageIndex(inner);
1660 m_data.value(dst) = Scalar(0);
1661 // return a reference to the coefficient
1662 return m_data.value(dst);
1663}
1664
1665template <typename Scalar_, int Options_, typename StorageIndex_>
1666typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1667SparseMatrix<Scalar_, Options_, StorageIndex_>::insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst) {
1668 eigen_assert(!isCompressed());
1669 // find a vector with capacity, starting at `outer` and searching to the left and right
1670 for (Index leftTarget = outer - 1, rightTarget = outer; (leftTarget >= 0) || (rightTarget < m_outerSize);) {
1671 if (rightTarget < m_outerSize) {
1672 Index start = m_outerIndex[rightTarget];
1673 Index end = start + m_innerNonZeros[rightTarget];
1674 Index nextStart = m_outerIndex[rightTarget + 1];
1675 Index capacity = nextStart - end;
1676 if (capacity > 0) {
1677 // move [dst, end) to dst+1 and insert at dst
1678 Index chunkSize = end - dst;
1679 if (chunkSize > 0) m_data.moveChunk(dst, dst + 1, chunkSize);
1680 m_innerNonZeros[outer]++;
1681 for (Index j = outer; j < rightTarget; j++) m_outerIndex[j + 1]++;
1682 m_data.index(dst) = StorageIndex(inner);
1683 m_data.value(dst) = Scalar(0);
1684 return m_data.value(dst);
1685 }
1686 rightTarget++;
1687 }
1688 if (leftTarget >= 0) {
1689 Index start = m_outerIndex[leftTarget];
1690 Index end = start + m_innerNonZeros[leftTarget];
1691 Index nextStart = m_outerIndex[leftTarget + 1];
1692 Index capacity = nextStart - end;
1693 if (capacity > 0) {
1694 // tricky: dst is a lower bound, so we must insert at dst-1 when shifting left
1695 // move [nextStart, dst) to nextStart-1 and insert at dst-1
1696 Index chunkSize = dst - nextStart;
1697 if (chunkSize > 0) m_data.moveChunk(nextStart, nextStart - 1, chunkSize);
1698 m_innerNonZeros[outer]++;
1699 for (Index j = leftTarget; j < outer; j++) m_outerIndex[j + 1]--;
1700 m_data.index(dst - 1) = StorageIndex(inner);
1701 m_data.value(dst - 1) = Scalar(0);
1702 return m_data.value(dst - 1);
1703 }
1704 leftTarget--;
1705 }
1706 }
1707
1708 // no room for interior insertion
1709 // nonZeros() == m_data.size()
1710 // record offset as outerIndxPtr will change
1711 Index dst_offset = dst - m_outerIndex[outer];
1712 // allocate space for random insertion
1713 if (m_data.allocatedSize() == 0) {
1714 // fast method to allocate space for one element per vector in empty matrix
1715 m_data.resize(m_outerSize);
1716 std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
1717 } else {
1718 // check for integer overflow: if maxReserveSize == 0, insertion is not possible
1719 Index maxReserveSize = static_cast<Index>(NumTraits<StorageIndex>::highest()) - m_data.allocatedSize();
1720 eigen_assert(maxReserveSize > 0);
1721 if (m_outerSize <= maxReserveSize) {
1722 // allocate space for one additional element per vector
1723 reserveInnerVectors(IndexVector::Constant(m_outerSize, 1));
1724 } else {
1725 // handle the edge case where StorageIndex is insufficient to reserve outerSize additional elements
1726 // allocate space for one additional element in the interval [outer,maxReserveSize)
1727 typedef internal::sparse_reserve_op<StorageIndex> ReserveSizesOp;
1728 typedef CwiseNullaryOp<ReserveSizesOp, IndexVector> ReserveSizesXpr;
1729 ReserveSizesXpr reserveSizesXpr(m_outerSize, 1, ReserveSizesOp(outer, m_outerSize, maxReserveSize));
1730 reserveInnerVectors(reserveSizesXpr);
1731 }
1732 }
1733 // insert element at `dst` with new outer indices
1734 Index start = m_outerIndex[outer];
1735 Index end = start + m_innerNonZeros[outer];
1736 Index new_dst = start + dst_offset;
1737 Index chunkSize = end - new_dst;
1738 if (chunkSize > 0) m_data.moveChunk(new_dst, new_dst + 1, chunkSize);
1739 m_innerNonZeros[outer]++;
1740 m_data.index(new_dst) = StorageIndex(inner);
1741 m_data.value(new_dst) = Scalar(0);
1742 return m_data.value(new_dst);
1743}
1744
1745namespace internal {
1746
1747template <typename Scalar_, int Options_, typename StorageIndex_>
1748struct evaluator<SparseMatrix<Scalar_, Options_, StorageIndex_>>
1749 : evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> {
1750 typedef evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> Base;
1751 typedef SparseMatrix<Scalar_, Options_, StorageIndex_> SparseMatrixType;
1752 evaluator() : Base() {}
1753 explicit evaluator(const SparseMatrixType& mat) : Base(mat) {}
1754};
1755
1756} // namespace internal
1757
1758// Specialization for SparseMatrix.
1759// Serializes [rows, cols, isCompressed, outerSize, innerBufferSize,
1760// innerNonZeros, outerIndices, innerIndices, values].
1761template <typename Scalar, int Options, typename StorageIndex>
1762class Serializer<SparseMatrix<Scalar, Options, StorageIndex>, void> {
1763 public:
1764 typedef SparseMatrix<Scalar, Options, StorageIndex> SparseMat;
1765
1766 struct Header {
1767 typename SparseMat::Index rows;
1768 typename SparseMat::Index cols;
1769 bool compressed;
1770 Index outer_size;
1771 Index inner_buffer_size;
1772 };
1773
1774 EIGEN_DEVICE_FUNC size_t size(const SparseMat& value) const {
1775 // innerNonZeros.
1776 std::size_t num_storage_indices = value.isCompressed() ? 0 : value.outerSize();
1777 // Outer indices.
1778 num_storage_indices += value.outerSize() + 1;
1779 // Inner indices.
1780 const StorageIndex inner_buffer_size = value.outerIndexPtr()[value.outerSize()];
1781 num_storage_indices += inner_buffer_size;
1782 // Values.
1783 std::size_t num_values = inner_buffer_size;
1784 return sizeof(Header) + sizeof(Scalar) * num_values + sizeof(StorageIndex) * num_storage_indices;
1785 }
1786
1787 EIGEN_DEVICE_FUNC uint8_t* serialize(uint8_t* dest, uint8_t* end, const SparseMat& value) {
1788 if (EIGEN_PREDICT_FALSE(dest == nullptr)) return nullptr;
1789 if (EIGEN_PREDICT_FALSE(dest + size(value) > end)) return nullptr;
1790
1791 const size_t header_bytes = sizeof(Header);
1792 Header header = {value.rows(), value.cols(), value.isCompressed(), value.outerSize(),
1793 value.outerIndexPtr()[value.outerSize()]};
1794 EIGEN_USING_STD(memcpy)
1795 memcpy(dest, &header, header_bytes);
1796 dest += header_bytes;
1797
1798 // innerNonZeros.
1799 if (!header.compressed) {
1800 std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
1801 memcpy(dest, value.innerNonZeroPtr(), data_bytes);
1802 dest += data_bytes;
1803 }
1804
1805 // Outer indices.
1806 std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
1807 memcpy(dest, value.outerIndexPtr(), data_bytes);
1808 dest += data_bytes;
1809
1810 // Inner indices.
1811 data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
1812 memcpy(dest, value.innerIndexPtr(), data_bytes);
1813 dest += data_bytes;
1814
1815 // Values.
1816 data_bytes = sizeof(Scalar) * header.inner_buffer_size;
1817 memcpy(dest, value.valuePtr(), data_bytes);
1818 dest += data_bytes;
1819
1820 return dest;
1821 }
1822
1823 EIGEN_DEVICE_FUNC const uint8_t* deserialize(const uint8_t* src, const uint8_t* end, SparseMat& value) const {
1824 if (EIGEN_PREDICT_FALSE(src == nullptr)) return nullptr;
1825 if (EIGEN_PREDICT_FALSE(src + sizeof(Header) > end)) return nullptr;
1826
1827 const size_t header_bytes = sizeof(Header);
1828 Header header;
1829 EIGEN_USING_STD(memcpy)
1830 memcpy(&header, src, header_bytes);
1831 src += header_bytes;
1832
1833 value.setZero();
1834 value.resize(header.rows, header.cols);
1835 if (header.compressed) {
1836 value.makeCompressed();
1837 } else {
1838 value.uncompress();
1839 }
1840
1841 // Adjust value ptr size.
1842 value.data().resize(header.inner_buffer_size);
1843
1844 // Initialize compressed state and inner non-zeros.
1845 if (!header.compressed) {
1846 // Inner non-zero counts.
1847 std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
1848 if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1849 memcpy(value.innerNonZeroPtr(), src, data_bytes);
1850 src += data_bytes;
1851 }
1852
1853 // Outer indices.
1854 std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
1855 if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1856 memcpy(value.outerIndexPtr(), src, data_bytes);
1857 src += data_bytes;
1858
1859 // Inner indices.
1860 data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
1861 if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1862 memcpy(value.innerIndexPtr(), src, data_bytes);
1863 src += data_bytes;
1864
1865 // Values.
1866 data_bytes = sizeof(Scalar) * header.inner_buffer_size;
1867 if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1868 memcpy(value.valuePtr(), src, data_bytes);
1869 src += data_bytes;
1870 return src;
1871 }
1872};
1873
1874} // end namespace Eigen
1875
1876#endif // EIGEN_SPARSEMATRIX_H
Base class for all dense matrices, vectors, and arrays.
Definition DenseBase.h:44
Derived & setConstant(const Scalar &value)
Definition CwiseNullaryOp.h:347
Base class for diagonal matrices and expressions.
Definition DiagonalMatrix.h:33
const Derived & derived() const
Definition DiagonalMatrix.h:57
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition Diagonal.h:68
A matrix or vector expression mapping an existing array of data.
Definition Map.h:96
Common base class for sparse [compressed]-{row|column}-storage format.
Definition SparseCompressedBase.h:43
Index nonZeros() const
Definition SparseCompressedBase.h:64
bool isCompressed() const
Definition SparseCompressedBase.h:114
Base class of any sparse matrices or sparse expressions.
Definition SparseMatrixBase.h:30
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
Definition SparseMatrixBase.h:44
RowXpr row(Index i)
Definition SparseMatrixBase.h:1085
NumTraits< Scalar >::Real RealScalar
Definition SparseMatrixBase.h:127
ColXpr col(Index i)
Definition SparseMatrixBase.h:1072
@ Flags
Definition SparseMatrixBase.h:94
A versatible sparse matrix representation.
Definition SparseMatrix.h:121
Scalar coeff(Index row, Index col) const
Definition SparseMatrix.h:211
const ConstDiagonalReturnType diagonal() const
Definition SparseMatrix.h:757
void swap(SparseMatrix &other)
Definition SparseMatrix.h:829
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1469
const StorageIndex * innerIndexPtr() const
Definition SparseMatrix.h:180
void setZero()
Definition SparseMatrix.h:303
bool isCompressed() const
Definition SparseCompressedBase.h:114
Index cols() const
Definition SparseMatrix.h:161
StorageIndex * innerIndexPtr()
Definition SparseMatrix.h:184
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:814
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1359
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1347
Index outerSize() const
Definition SparseMatrix.h:166
SparseMatrix()
Definition SparseMatrix.h:766
void uncompress()
Definition SparseMatrix.h:622
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1452
const Scalar * valuePtr() const
Definition SparseMatrix.h:171
void makeCompressed()
Definition SparseMatrix.h:589
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1376
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition SparseMatrix.h:793
void resize(Index rows, Index cols)
Definition SparseMatrix.h:734
Index rows() const
Definition SparseMatrix.h:159
SparseMatrix(const SparseMatrix &other)
Definition SparseMatrix.h:807
StorageIndex * innerNonZeroPtr()
Definition SparseMatrix.h:202
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1330
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:822
void setIdentity()
Definition SparseMatrix.h:842
Index innerSize() const
Definition SparseMatrix.h:164
SparseMatrix(Index rows, Index cols)
Definition SparseMatrix.h:769
StorageIndex * outerIndexPtr()
Definition SparseMatrix.h:193
const StorageIndex * innerNonZeroPtr() const
Definition SparseMatrix.h:198
void conservativeResize(Index rows, Index cols)
Definition SparseMatrix.h:681
void insertFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1440
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition SparseMatrix.h:634
const StorageIndex * outerIndexPtr() const
Definition SparseMatrix.h:189
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition SparseMatrix.h:775
friend void swap(SparseMatrix &a, SparseMatrix &b)
Definition SparseMatrix.h:838
~SparseMatrix()
Definition SparseMatrix.h:929
Scalar * valuePtr()
Definition SparseMatrix.h:175
void prune(const KeepFunc &keep=KeepFunc())
Definition SparseMatrix.h:646
Scalar sum() const
Definition SparseRedux.h:30
void reserve(Index reserveSize)
Definition SparseMatrix.h:315
Scalar & coeffRef(Index row, Index col)
Definition SparseMatrix.h:275
Scalar & insert(Index row, Index col)
Definition SparseMatrix.h:1588
void reserve(const SizesType &reserveSizes)
DiagonalReturnType diagonal()
Definition SparseMatrix.h:763
Scalar & findOrInsertCoeff(Index row, Index col, bool *inserted)
Definition SparseMatrix.h:231
void insertFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1423
SparseMatrix(SparseMatrix &&other)
Definition SparseMatrix.h:799
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition SparseSelfAdjointView.h:52
a sparse vector class
Definition SparseVector.h:62
const unsigned int LvalueBit
Definition Constants.h:148
const unsigned int RowMajorBit
Definition Constants.h:70
const unsigned int CompressedAccessBit
Definition Constants.h:195
Namespace containing all symbols from the Eigen library.
Definition B01_Experimental.dox:1
std::enable_if_t< std::is_base_of< DenseBase< std::decay_t< DerivedA > >, std::decay_t< DerivedA > >::value &&std::is_base_of< DenseBase< std::decay_t< DerivedB > >, std::decay_t< DerivedB > >::value, void > swap(DerivedA &&a, DerivedB &&b)
Definition DenseBase.h:667
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:82
uint8_t * serialize(uint8_t *dest, uint8_t *end, const Args &... args)
Definition Serializer.h:189
const int Dynamic
Definition Constants.h:25
const uint8_t * deserialize(const uint8_t *src, const uint8_t *end, Args &... args)
Definition Serializer.h:202
constexpr Derived & derived()
Definition EigenBase.h:49
constexpr Index size() const noexcept
Definition EigenBase.h:64