143 template <
class Comp = std::less<>>
146 internal::inner_sort_impl<Derived, Comp, IsVectorAtCompileTime>::run(*
this, begin, end);
151 template <
class Comp = std::less<>>
154 return internal::inner_sort_impl<Derived, Comp, IsVectorAtCompileTime>::check(*
this, begin, end);
159 template <
class Comp = std::less<>>
163 internal::inner_sort_impl<Derived, Comp, IsVectorAtCompileTime>::run(*
this, begin, end);
168 template <
class Comp = std::less<>>
172 return internal::inner_sort_impl<Derived, Comp, IsVectorAtCompileTime>::check(*
this, begin, end);
185 const Index outer = Derived::IsRowMajor ?
row :
col;
186 const Index inner = Derived::IsRowMajor ?
col :
row;
191 eigen_assert(end >= start &&
"you are using a non finalized sparse matrix or written coefficient does not exist");
192 internal::LowerBoundIndex p;
195 p.found = (p.value < end) && (this->
innerIndexPtr()[p.value] == inner);
202 template <
typename OtherDerived>
206template <
typename Derived>
209 InnerIterator() : m_values(0), m_indices(0), m_outer(0), m_id(0), m_end(0) {}
211 InnerIterator(
const InnerIterator& other)
212 : m_values(other.m_values),
213 m_indices(other.m_indices),
214 m_outer(other.m_outer),
216 m_end(other.m_end) {}
218 InnerIterator& operator=(
const InnerIterator& other) {
219 m_values = other.m_values;
220 m_indices = other.m_indices;
221 const_cast<OuterType&
>(m_outer).setValue(other.m_outer.value());
229 if (Derived::IsVectorAtCompileTime && mat.outerIndexPtr() == 0) {
231 m_end = mat.nonZeros();
233 m_id = mat.outerIndexPtr()[outer];
234 if (mat.isCompressed())
235 m_end = mat.outerIndexPtr()[outer + 1];
237 m_end = m_id + mat.innerNonZeroPtr()[outer];
242 EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
245 explicit InnerIterator(
const internal::CompressedStorage<Scalar, StorageIndex>& data)
246 : m_values(data.
valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_id(0), m_end(data.
size()) {
247 EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
250 inline InnerIterator& operator++() {
254 inline InnerIterator& operator+=(
Index i) {
260 InnerIterator result = *
this;
265 inline const Scalar& value()
const {
return m_values[m_id]; }
266 inline Scalar& valueRef() {
return const_cast<Scalar&
>(m_values[m_id]); }
268 inline StorageIndex index()
const {
return m_indices[m_id]; }
269 inline Index outer()
const {
return m_outer.value(); }
270 inline Index row()
const {
return IsRowMajor ? m_outer.value() : index(); }
271 inline Index col()
const {
return IsRowMajor ? index() : m_outer.value(); }
273 inline operator bool()
const {
return (m_id < m_end); }
276 const Scalar* m_values;
278 typedef internal::variable_if_dynamic<Index, Derived::IsVectorAtCompileTime ? 0 : Dynamic> OuterType;
279 const OuterType m_outer;
287 template <
typename T>
288 InnerIterator(
const SparseMatrixBase<T>&,
Index outer);
291template <
typename Derived>
296 if (Derived::IsVectorAtCompileTime && mat.outerIndexPtr() == 0) {
298 m_id = mat.nonZeros();
300 m_start = mat.outerIndexPtr()[outer];
301 if (mat.isCompressed())
302 m_id = mat.outerIndexPtr()[outer + 1];
304 m_id = m_start + mat.innerNonZeroPtr()[outer];
310 EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
313 explicit ReverseInnerIterator(
const internal::CompressedStorage<Scalar, StorageIndex>& data)
314 : m_values(data.
valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_start(0), m_id(data.
size()) {
315 EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
318 inline ReverseInnerIterator& operator--() {
322 inline ReverseInnerIterator& operator-=(
Index i) {
328 ReverseInnerIterator result = *
this;
333 inline const Scalar& value()
const {
return m_values[m_id - 1]; }
334 inline Scalar& valueRef() {
return const_cast<Scalar&
>(m_values[m_id - 1]); }
336 inline StorageIndex index()
const {
return m_indices[m_id - 1]; }
337 inline Index outer()
const {
return m_outer.value(); }
338 inline Index row()
const {
return IsRowMajor ? m_outer.value() : index(); }
339 inline Index col()
const {
return IsRowMajor ? index() : m_outer.value(); }
341 inline operator bool()
const {
return (m_id > m_start); }
344 const Scalar* m_values;
346 typedef internal::variable_if_dynamic<Index, Derived::IsVectorAtCompileTime ? 0 : Dynamic> OuterType;
347 const OuterType m_outer;
356template <
typename Scalar,
typename StorageIndex>
358template <
typename Scalar,
typename StorageIndex>
360template <
typename Scalar,
typename StorageIndex>
361class CompressedStorageIterator;
364template <
typename Scalar,
typename StorageIndex>
367 StorageVal(
const StorageIndex& innerIndex,
const Scalar& value) : m_innerIndex(innerIndex), m_value(value) {}
368 StorageVal(
const StorageVal& other) : m_innerIndex(other.m_innerIndex), m_value(other.m_value) {}
369 StorageVal(StorageVal&& other) =
default;
371 inline const StorageIndex& key()
const {
return m_innerIndex; }
372 inline StorageIndex& key() {
return m_innerIndex; }
373 inline const Scalar& value()
const {
return m_value; }
374 inline Scalar& value() {
return m_value; }
377 inline operator StorageIndex()
const {
return m_innerIndex; }
380 StorageIndex m_innerIndex;
384 StorageVal() =
delete;
388template <
typename Scalar,
typename StorageIndex>
391 using value_type = StorageVal<Scalar, StorageIndex>;
394 StorageRef(StorageRef&& other) =
default;
396 inline StorageRef& operator=(
const StorageRef& other) {
398 value() = other.value();
401 inline StorageRef& operator=(
const value_type& other) {
403 value() = other.value();
406 inline operator value_type()
const {
return value_type(key(), value()); }
407 inline friend void swap(
const StorageRef& a,
const StorageRef& b) {
408 std::iter_swap(a.keyPtr(), b.keyPtr());
409 std::iter_swap(a.valuePtr(), b.valuePtr());
412 inline const StorageIndex& key()
const {
return *m_innerIndexIterator; }
413 inline StorageIndex& key() {
return *m_innerIndexIterator; }
414 inline const Scalar& value()
const {
return *m_valueIterator; }
415 inline Scalar& value() {
return *m_valueIterator; }
416 inline StorageIndex* keyPtr()
const {
return m_innerIndexIterator; }
417 inline Scalar* valuePtr()
const {
return m_valueIterator; }
420 inline operator StorageIndex()
const {
return *m_innerIndexIterator; }
423 StorageIndex* m_innerIndexIterator;
424 Scalar* m_valueIterator;
427 StorageRef() =
delete;
429 StorageRef(StorageIndex* innerIndexIterator, Scalar* valueIterator)
430 : m_innerIndexIterator(innerIndexIterator), m_valueIterator(valueIterator) {}
431 StorageRef(
const StorageRef& other)
432 : m_innerIndexIterator(other.m_innerIndexIterator), m_valueIterator(other.m_valueIterator) {}
434 friend class CompressedStorageIterator<Scalar, StorageIndex>;
438template <
typename Scalar,
typename StorageIndex>
439class CompressedStorageIterator {
441 using iterator_category = std::random_access_iterator_tag;
442 using reference = StorageRef<Scalar, StorageIndex>;
443 using difference_type =
Index;
444 using value_type =
typename reference::value_type;
445 using pointer = value_type*;
447 CompressedStorageIterator() =
delete;
448 CompressedStorageIterator(difference_type index, StorageIndex* innerIndexPtr, Scalar* valuePtr)
449 : m_index(index), m_data(innerIndexPtr, valuePtr) {}
450 CompressedStorageIterator(difference_type index, reference data) : m_index(index), m_data(data) {}
451 CompressedStorageIterator(
const CompressedStorageIterator& other) : m_index(other.m_index), m_data(other.m_data) {}
452 CompressedStorageIterator(CompressedStorageIterator&& other) =
default;
453 inline CompressedStorageIterator& operator=(
const CompressedStorageIterator& other) {
454 m_index = other.m_index;
455 m_data = other.m_data;
459 inline CompressedStorageIterator operator+(difference_type offset)
const {
460 return CompressedStorageIterator(m_index + offset, m_data);
462 inline CompressedStorageIterator operator-(difference_type offset)
const {
463 return CompressedStorageIterator(m_index - offset, m_data);
465 inline difference_type operator-(
const CompressedStorageIterator& other)
const {
return m_index - other.m_index; }
466 inline CompressedStorageIterator& operator++() {
470 inline CompressedStorageIterator& operator--() {
474 inline CompressedStorageIterator& operator+=(difference_type offset) {
478 inline CompressedStorageIterator& operator-=(difference_type offset) {
482 inline reference operator*()
const {
return reference(m_data.keyPtr() + m_index, m_data.valuePtr() + m_index); }
483 inline reference operator[](
int index) {
return *(*
this + index); }
485#define MAKE_COMP(OP) \
486 inline bool operator OP(const CompressedStorageIterator& other) const { return m_index OP other.m_index; }
496 difference_type m_index;
500template <
typename Derived,
class Comp,
bool IsVector>
501struct inner_sort_impl {
502 typedef typename Derived::Scalar Scalar;
503 typedef typename Derived::StorageIndex StorageIndex;
504 static inline void run(SparseCompressedBase<Derived>& obj,
Index begin,
Index end) {
505 const bool is_compressed = obj.isCompressed();
506 for (
Index outer = begin; outer < end; outer++) {
507 Index begin_offset = obj.outerIndexPtr()[outer];
508 Index end_offset = is_compressed ? obj.outerIndexPtr()[outer + 1] : (begin_offset + obj.innerNonZeroPtr()[outer]);
509 CompressedStorageIterator<Scalar, StorageIndex> begin_it(begin_offset, obj.innerIndexPtr(), obj.valuePtr());
510 CompressedStorageIterator<Scalar, StorageIndex> end_it(end_offset, obj.innerIndexPtr(), obj.valuePtr());
511 std::sort(begin_it, end_it, Comp());
514 static inline Index check(
const SparseCompressedBase<Derived>& obj,
Index begin,
Index end) {
515 const bool is_compressed = obj.isCompressed();
516 for (
Index outer = begin; outer < end; outer++) {
517 Index begin_offset = obj.outerIndexPtr()[outer];
518 Index end_offset = is_compressed ? obj.outerIndexPtr()[outer + 1] : (begin_offset + obj.innerNonZeroPtr()[outer]);
519 const StorageIndex* begin_it = obj.innerIndexPtr() + begin_offset;
520 const StorageIndex* end_it = obj.innerIndexPtr() + end_offset;
521 bool is_sorted = std::is_sorted(begin_it, end_it, Comp());
522 if (!is_sorted)
return outer;
527template <
typename Derived,
class Comp>
528struct inner_sort_impl<Derived, Comp, true> {
529 typedef typename Derived::Scalar Scalar;
530 typedef typename Derived::StorageIndex StorageIndex;
531 static inline void run(SparseCompressedBase<Derived>& obj,
Index,
Index) {
532 Index begin_offset = 0;
533 Index end_offset = obj.nonZeros();
534 CompressedStorageIterator<Scalar, StorageIndex> begin_it(begin_offset, obj.innerIndexPtr(), obj.valuePtr());
535 CompressedStorageIterator<Scalar, StorageIndex> end_it(end_offset, obj.innerIndexPtr(), obj.valuePtr());
536 std::sort(begin_it, end_it, Comp());
538 static inline Index check(
const SparseCompressedBase<Derived>& obj,
Index,
Index) {
539 Index begin_offset = 0;
540 Index end_offset = obj.nonZeros();
541 const StorageIndex* begin_it = obj.innerIndexPtr() + begin_offset;
542 const StorageIndex* end_it = obj.innerIndexPtr() + end_offset;
543 return std::is_sorted(begin_it, end_it, Comp()) ? 1 : 0;
547template <
typename Derived>
548struct evaluator<SparseCompressedBase<Derived>> : evaluator_base<Derived> {
549 typedef typename Derived::Scalar Scalar;
550 typedef typename Derived::InnerIterator InnerIterator;
552 enum { CoeffReadCost = NumTraits<Scalar>::ReadCost, Flags = Derived::Flags };
554 evaluator() : m_matrix(0), m_zero(0) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); }
555 explicit evaluator(
const Derived& mat) : m_matrix(&mat), m_zero(0) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); }
557 inline Index nonZerosEstimate()
const {
return m_matrix->nonZeros(); }
559 operator Derived&() {
return m_matrix->const_cast_derived(); }
560 operator const Derived&()
const {
return *m_matrix; }
562 typedef typename DenseCoeffsBase<Derived, ReadOnlyAccessors>::CoeffReturnType CoeffReturnType;
563 const Scalar& coeff(
Index row,
Index col)
const {
564 Index p = find(row, col);
569 return m_matrix->const_cast_derived().valuePtr()[p];
573 Index p = find(row, col);
574 eigen_assert(p !=
Dynamic &&
"written coefficient does not exist");
575 return m_matrix->const_cast_derived().valuePtr()[p];
580 internal::LowerBoundIndex p = m_matrix->lower_bound(row, col);
581 return p.found ? p.value :
Dynamic;
584 const Derived* m_matrix;