Eigen  3.4.90 (git rev 5a9f66fb35d03a4da9ef8976e67a61b30aa16dcf)
 
Loading...
Searching...
No Matches
SparseMatrix.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
12
13// IWYU pragma: private
14#include "./InternalHeaderCheck.h"
15
16namespace Eigen {
17
49namespace internal {
50template <typename Scalar_, int Options_, typename StorageIndex_>
51struct traits<SparseMatrix<Scalar_, Options_, StorageIndex_>> {
52 typedef Scalar_ Scalar;
53 typedef StorageIndex_ StorageIndex;
54 typedef Sparse StorageKind;
55 typedef MatrixXpr XprKind;
56 enum {
57 RowsAtCompileTime = Dynamic,
58 ColsAtCompileTime = Dynamic,
59 MaxRowsAtCompileTime = Dynamic,
60 MaxColsAtCompileTime = Dynamic,
61 Options = Options_,
62 Flags = Options_ | NestByRefBit | LvalueBit | CompressedAccessBit,
63 SupportedAccessPatterns = InnerRandomAccessPattern
64 };
65};
66
67template <typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
68struct traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>> {
69 typedef SparseMatrix<Scalar_, Options_, StorageIndex_> MatrixType;
70 typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
71 typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
72
73 typedef Scalar_ Scalar;
74 typedef Dense StorageKind;
75 typedef StorageIndex_ StorageIndex;
76 typedef MatrixXpr XprKind;
77
78 enum {
79 RowsAtCompileTime = Dynamic,
80 ColsAtCompileTime = 1,
81 MaxRowsAtCompileTime = Dynamic,
82 MaxColsAtCompileTime = 1,
83 Flags = LvalueBit
84 };
85};
86
87template <typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
88struct traits<Diagonal<const SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>>
89 : public traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>> {
90 enum { Flags = 0 };
91};
92
93template <typename StorageIndex>
94struct sparse_reserve_op {
95 EIGEN_DEVICE_FUNC sparse_reserve_op(Index begin, Index end, Index size) {
96 Index range = numext::mini(end - begin, size);
97 m_begin = begin;
98 m_end = begin + range;
99 m_val = StorageIndex(size / range);
100 m_remainder = StorageIndex(size % range);
101 }
102 template <typename IndexType>
103 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex operator()(IndexType i) const {
104 if ((i >= m_begin) && (i < m_end))
105 return m_val + ((i - m_begin) < m_remainder ? 1 : 0);
106 else
107 return 0;
108 }
109 StorageIndex m_val, m_remainder;
110 Index m_begin, m_end;
111};
112
113template <typename Scalar>
114struct functor_traits<sparse_reserve_op<Scalar>> {
115 enum { Cost = 1, PacketAccess = false, IsRepeatable = true };
116};
117
118} // end namespace internal
119
120template <typename Scalar_, int Options_, typename StorageIndex_>
121class SparseMatrix : public SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>> {
123 using Base::convert_index;
124 friend class SparseVector<Scalar_, 0, StorageIndex_>;
125 template <typename, typename, typename, typename, typename>
126 friend struct internal::Assignment;
127
128 public:
129 using Base::isCompressed;
130 using Base::nonZeros;
131 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
132 using Base::operator+=;
133 using Base::operator-=;
134
136 typedef Diagonal<SparseMatrix> DiagonalReturnType;
137 typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
138 typedef typename Base::InnerIterator InnerIterator;
139 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
140
141 using Base::IsRowMajor;
142 typedef internal::CompressedStorage<Scalar, StorageIndex> Storage;
143 enum { Options = Options_ };
144
145 typedef typename Base::IndexVector IndexVector;
146 typedef typename Base::ScalarVector ScalarVector;
147
148 protected:
150
151 Index m_outerSize;
152 Index m_innerSize;
153 StorageIndex* m_outerIndex;
154 StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
155 Storage m_data;
156
157 public:
159 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
161 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
162
164 inline Index innerSize() const { return m_innerSize; }
166 inline Index outerSize() const { return m_outerSize; }
167
171 inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
175 inline Scalar* valuePtr() { return m_data.valuePtr(); }
176
180 inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
184 inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
185
189 inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
193 inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
194
198 inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
202 inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
203
205 inline Storage& data() { return m_data; }
207 inline const Storage& data() const { return m_data; }
208
211 inline Scalar coeff(Index row, Index col) const {
212 eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
213
214 const Index outer = IsRowMajor ? row : col;
215 const Index inner = IsRowMajor ? col : row;
216 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer + 1];
217 return m_data.atInRange(m_outerIndex[outer], end, inner);
218 }
219
231 inline Scalar& findOrInsertCoeff(Index row, Index col, bool* inserted) {
232 eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
233 const Index outer = IsRowMajor ? row : col;
234 const Index inner = IsRowMajor ? col : row;
235 Index start = m_outerIndex[outer];
236 Index end = isCompressed() ? m_outerIndex[outer + 1] : m_outerIndex[outer] + m_innerNonZeros[outer];
237 eigen_assert(end >= start && "you probably called coeffRef on a non finalized matrix");
238 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
239 if (dst == end) {
240 Index capacity = m_outerIndex[outer + 1] - end;
241 if (capacity > 0) {
242 // implies uncompressed: push to back of vector
243 m_innerNonZeros[outer]++;
244 m_data.index(end) = StorageIndex(inner);
245 m_data.value(end) = Scalar(0);
246 if (inserted != nullptr) {
247 *inserted = true;
248 }
249 return m_data.value(end);
250 }
251 }
252 if ((dst < end) && (m_data.index(dst) == inner)) {
253 // this coefficient exists, return a refernece to it
254 if (inserted != nullptr) {
255 *inserted = false;
256 }
257 return m_data.value(dst);
258 } else {
259 if (inserted != nullptr) {
260 *inserted = true;
261 }
262 // insertion will require reconfiguring the buffer
263 return insertAtByOuterInner(outer, inner, dst);
264 }
265 }
266
275 inline Scalar& coeffRef(Index row, Index col) { return findOrInsertCoeff(row, col, nullptr); }
276
293 inline Scalar& insert(Index row, Index col);
294
295 public:
303 inline void setZero() {
304 m_data.clear();
305 std::fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
306 if (m_innerNonZeros) {
307 std::fill_n(m_innerNonZeros, m_outerSize, StorageIndex(0));
308 }
309 }
310
314 inline void reserve(Index reserveSize) {
315 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
316 m_data.reserve(reserveSize);
317 }
318
319#ifdef EIGEN_PARSED_BY_DOXYGEN
332 template <class SizesType>
333 inline void reserve(const SizesType& reserveSizes);
334#else
335 template <class SizesType>
336 inline void reserve(const SizesType& reserveSizes,
337 const typename SizesType::value_type& enableif = typename SizesType::value_type()) {
338 EIGEN_UNUSED_VARIABLE(enableif);
339 reserveInnerVectors(reserveSizes);
340 }
341#endif // EIGEN_PARSED_BY_DOXYGEN
342 protected:
343 template <class SizesType>
344 inline void reserveInnerVectors(const SizesType& reserveSizes) {
345 if (isCompressed()) {
346 Index totalReserveSize = 0;
347 for (Index j = 0; j < m_outerSize; ++j) totalReserveSize += internal::convert_index<Index>(reserveSizes[j]);
348
349 // if reserveSizes is empty, don't do anything!
350 if (totalReserveSize == 0) return;
351
352 // turn the matrix into non-compressed mode
353 m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
354
355 // temporarily use m_innerSizes to hold the new starting points.
356 StorageIndex* newOuterIndex = m_innerNonZeros;
357
358 Index count = 0;
359 for (Index j = 0; j < m_outerSize; ++j) {
360 newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
361 Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
362 count += reserveSize + internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j]);
363 }
364
365 m_data.reserve(totalReserveSize);
366 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
367 for (Index j = m_outerSize - 1; j >= 0; --j) {
368 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
369 StorageIndex begin = m_outerIndex[j];
370 StorageIndex end = begin + innerNNZ;
371 StorageIndex target = newOuterIndex[j];
372 internal::smart_memmove(innerIndexPtr() + begin, innerIndexPtr() + end, innerIndexPtr() + target);
373 internal::smart_memmove(valuePtr() + begin, valuePtr() + end, valuePtr() + target);
374 previousOuterIndex = m_outerIndex[j];
375 m_outerIndex[j] = newOuterIndex[j];
376 m_innerNonZeros[j] = innerNNZ;
377 }
378 if (m_outerSize > 0)
379 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1] +
380 internal::convert_index<StorageIndex>(reserveSizes[m_outerSize - 1]);
381
382 m_data.resize(m_outerIndex[m_outerSize]);
383 } else {
384 StorageIndex* newOuterIndex = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + 1);
385
386 Index count = 0;
387 for (Index j = 0; j < m_outerSize; ++j) {
388 newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
389 Index alreadyReserved =
390 internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j] - m_innerNonZeros[j]);
391 Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
392 Index toReserve = numext::maxi(reserveSize, alreadyReserved);
393 count += toReserve + internal::convert_index<Index>(m_innerNonZeros[j]);
394 }
395 newOuterIndex[m_outerSize] = internal::convert_index<StorageIndex>(count);
396
397 m_data.resize(count);
398 for (Index j = m_outerSize - 1; j >= 0; --j) {
399 StorageIndex innerNNZ = m_innerNonZeros[j];
400 StorageIndex begin = m_outerIndex[j];
401 StorageIndex target = newOuterIndex[j];
402 m_data.moveChunk(begin, target, innerNNZ);
403 }
404
405 std::swap(m_outerIndex, newOuterIndex);
406 internal::conditional_aligned_delete_auto<StorageIndex, true>(newOuterIndex, m_outerSize + 1);
407 }
408 }
409
410 public:
411 //--- low level purely coherent filling ---
412
423 inline Scalar& insertBack(Index row, Index col) {
424 return insertBackByOuterInner(IsRowMajor ? row : col, IsRowMajor ? col : row);
425 }
426
429 inline Scalar& insertBackByOuterInner(Index outer, Index inner) {
430 eigen_assert(Index(m_outerIndex[outer + 1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
431 eigen_assert((m_outerIndex[outer + 1] - m_outerIndex[outer] == 0 || m_data.index(m_data.size() - 1) < inner) &&
432 "Invalid ordered insertion (invalid inner index)");
433 StorageIndex p = m_outerIndex[outer + 1];
434 ++m_outerIndex[outer + 1];
435 m_data.append(Scalar(0), inner);
436 return m_data.value(p);
437 }
438
441 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner) {
442 StorageIndex p = m_outerIndex[outer + 1];
443 ++m_outerIndex[outer + 1];
444 m_data.append(Scalar(0), inner);
445 return m_data.value(p);
446 }
447
450 inline void startVec(Index outer) {
451 eigen_assert(m_outerIndex[outer] == Index(m_data.size()) &&
452 "You must call startVec for each inner vector sequentially");
453 eigen_assert(m_outerIndex[outer + 1] == 0 && "You must call startVec for each inner vector sequentially");
454 m_outerIndex[outer + 1] = m_outerIndex[outer];
455 }
456
460 inline void finalize() {
461 if (isCompressed()) {
462 StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
463 Index i = m_outerSize;
464 // find the last filled column
465 while (i >= 0 && m_outerIndex[i] == 0) --i;
466 ++i;
467 while (i <= m_outerSize) {
468 m_outerIndex[i] = size;
469 ++i;
470 }
471 }
472 }
473
474 // remove outer vectors j, j+1 ... j+num-1 and resize the matrix
475 void removeOuterVectors(Index j, Index num = 1) {
476 eigen_assert(num >= 0 && j >= 0 && j + num <= m_outerSize && "Invalid parameters");
477
478 const Index newRows = IsRowMajor ? m_outerSize - num : rows();
479 const Index newCols = IsRowMajor ? cols() : m_outerSize - num;
480
481 const Index begin = j + num;
482 const Index end = m_outerSize;
483 const Index target = j;
484
485 // if the removed vectors are not empty, uncompress the matrix
486 if (m_outerIndex[j + num] > m_outerIndex[j]) uncompress();
487
488 // shift m_outerIndex and m_innerNonZeros [num] to the left
489 internal::smart_memmove(m_outerIndex + begin, m_outerIndex + end + 1, m_outerIndex + target);
490 if (!isCompressed())
491 internal::smart_memmove(m_innerNonZeros + begin, m_innerNonZeros + end, m_innerNonZeros + target);
492
493 // if m_outerIndex[0] > 0, shift the data within the first vector while it is easy to do so
494 if (m_outerIndex[0] > StorageIndex(0)) {
495 uncompress();
496 const Index from = internal::convert_index<Index>(m_outerIndex[0]);
497 const Index to = Index(0);
498 const Index chunkSize = internal::convert_index<Index>(m_innerNonZeros[0]);
499 m_data.moveChunk(from, to, chunkSize);
500 m_outerIndex[0] = StorageIndex(0);
501 }
502
503 // truncate the matrix to the smaller size
504 conservativeResize(newRows, newCols);
505 }
506
507 // insert empty outer vectors at indices j, j+1 ... j+num-1 and resize the matrix
508 void insertEmptyOuterVectors(Index j, Index num = 1) {
509 EIGEN_USING_STD(fill_n);
510 eigen_assert(num >= 0 && j >= 0 && j < m_outerSize && "Invalid parameters");
511
512 const Index newRows = IsRowMajor ? m_outerSize + num : rows();
513 const Index newCols = IsRowMajor ? cols() : m_outerSize + num;
514
515 const Index begin = j;
516 const Index end = m_outerSize;
517 const Index target = j + num;
518
519 // expand the matrix to the larger size
520 conservativeResize(newRows, newCols);
521
522 // shift m_outerIndex and m_innerNonZeros [num] to the right
523 internal::smart_memmove(m_outerIndex + begin, m_outerIndex + end + 1, m_outerIndex + target);
524 // m_outerIndex[begin] == m_outerIndex[target], set all indices in this range to same value
525 fill_n(m_outerIndex + begin, num, m_outerIndex[begin]);
526
527 if (!isCompressed()) {
528 internal::smart_memmove(m_innerNonZeros + begin, m_innerNonZeros + end, m_innerNonZeros + target);
529 // set the nonzeros of the newly inserted vectors to 0
530 fill_n(m_innerNonZeros + begin, num, StorageIndex(0));
531 }
532 }
533
534 template <typename InputIterators>
535 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
536
537 template <typename InputIterators, typename DupFunctor>
538 void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
539
540 template <typename Derived, typename DupFunctor>
541 void collapseDuplicates(DenseBase<Derived>& wi, DupFunctor dup_func = DupFunctor());
542
543 template <typename InputIterators>
544 void setFromSortedTriplets(const InputIterators& begin, const InputIterators& end);
545
546 template <typename InputIterators, typename DupFunctor>
547 void setFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
548
549 template <typename InputIterators>
550 void insertFromTriplets(const InputIterators& begin, const InputIterators& end);
551
552 template <typename InputIterators, typename DupFunctor>
553 void insertFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
554
555 template <typename InputIterators>
556 void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end);
557
558 template <typename InputIterators, typename DupFunctor>
559 void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
560
561 //---
562
565 Scalar& insertByOuterInner(Index j, Index i) {
566 eigen_assert(j >= 0 && j < m_outerSize && "invalid outer index");
567 eigen_assert(i >= 0 && i < m_innerSize && "invalid inner index");
568 Index start = m_outerIndex[j];
569 Index end = isCompressed() ? m_outerIndex[j + 1] : start + m_innerNonZeros[j];
570 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, i);
571 if (dst == end) {
572 Index capacity = m_outerIndex[j + 1] - end;
573 if (capacity > 0) {
574 // implies uncompressed: push to back of vector
575 m_innerNonZeros[j]++;
576 m_data.index(end) = StorageIndex(i);
577 m_data.value(end) = Scalar(0);
578 return m_data.value(end);
579 }
580 }
581 eigen_assert((dst == end || m_data.index(dst) != i) &&
582 "you cannot insert an element that already exists, you must call coeffRef to this end");
583 return insertAtByOuterInner(j, i, dst);
584 }
585
589 if (isCompressed()) return;
590
591 eigen_internal_assert(m_outerIndex != 0 && m_outerSize > 0);
592
593 StorageIndex start = m_outerIndex[1];
594 m_outerIndex[1] = m_innerNonZeros[0];
595 // try to move fewer, larger contiguous chunks
596 Index copyStart = start;
597 Index copyTarget = m_innerNonZeros[0];
598 for (Index j = 1; j < m_outerSize; j++) {
599 StorageIndex end = start + m_innerNonZeros[j];
600 StorageIndex nextStart = m_outerIndex[j + 1];
601 // dont forget to move the last chunk!
602 bool breakUpCopy = (end != nextStart) || (j == m_outerSize - 1);
603 if (breakUpCopy) {
604 Index chunkSize = end - copyStart;
605 if (chunkSize > 0) m_data.moveChunk(copyStart, copyTarget, chunkSize);
606 copyStart = nextStart;
607 copyTarget += chunkSize;
608 }
609 start = nextStart;
610 m_outerIndex[j + 1] = m_outerIndex[j] + m_innerNonZeros[j];
611 }
612 m_data.resize(m_outerIndex[m_outerSize]);
613
614 // release as much memory as possible
615 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
616 m_innerNonZeros = 0;
617 m_data.squeeze();
618 }
619
621 void uncompress() {
622 if (!isCompressed()) return;
623 m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
624 if (m_outerIndex[m_outerSize] == 0)
625 std::fill_n(m_innerNonZeros, m_outerSize, StorageIndex(0));
626 else
627 for (Index j = 0; j < m_outerSize; j++) m_innerNonZeros[j] = m_outerIndex[j + 1] - m_outerIndex[j];
628 }
629
631 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) {
632 prune(default_prunning_func(reference, epsilon));
633 }
634
642 template <typename KeepFunc>
643 void prune(const KeepFunc& keep = KeepFunc()) {
644 StorageIndex k = 0;
645 for (Index j = 0; j < m_outerSize; ++j) {
646 StorageIndex previousStart = m_outerIndex[j];
647 if (isCompressed())
648 m_outerIndex[j] = k;
649 else
650 k = m_outerIndex[j];
651 StorageIndex end = isCompressed() ? m_outerIndex[j + 1] : previousStart + m_innerNonZeros[j];
652 for (StorageIndex i = previousStart; i < end; ++i) {
653 StorageIndex row = IsRowMajor ? StorageIndex(j) : m_data.index(i);
654 StorageIndex col = IsRowMajor ? m_data.index(i) : StorageIndex(j);
655 bool keepEntry = keep(row, col, m_data.value(i));
656 if (keepEntry) {
657 m_data.value(k) = m_data.value(i);
658 m_data.index(k) = m_data.index(i);
659 ++k;
660 } else if (!isCompressed())
661 m_innerNonZeros[j]--;
662 }
663 }
664 if (isCompressed()) {
665 m_outerIndex[m_outerSize] = k;
666 m_data.resize(k, 0);
667 }
668 }
669
679 // If one dimension is null, then there is nothing to be preserved
680 if (rows == 0 || cols == 0) return resize(rows, cols);
681
682 Index newOuterSize = IsRowMajor ? rows : cols;
683 Index newInnerSize = IsRowMajor ? cols : rows;
684
685 Index innerChange = newInnerSize - m_innerSize;
686 Index outerChange = newOuterSize - m_outerSize;
687
688 if (outerChange != 0) {
689 m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, newOuterSize + 1,
690 m_outerSize + 1);
691
692 if (!isCompressed())
693 m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_innerNonZeros,
694 newOuterSize, m_outerSize);
695
696 if (outerChange > 0) {
697 StorageIndex lastIdx = m_outerSize == 0 ? StorageIndex(0) : m_outerIndex[m_outerSize];
698 std::fill_n(m_outerIndex + m_outerSize, outerChange + 1, lastIdx);
699
700 if (!isCompressed()) std::fill_n(m_innerNonZeros + m_outerSize, outerChange, StorageIndex(0));
701 }
702 }
703 m_outerSize = newOuterSize;
704
705 if (innerChange < 0) {
706 for (Index j = 0; j < m_outerSize; j++) {
707 Index start = m_outerIndex[j];
708 Index end = isCompressed() ? m_outerIndex[j + 1] : start + m_innerNonZeros[j];
709 Index lb = m_data.searchLowerIndex(start, end, newInnerSize);
710 if (lb != end) {
711 uncompress();
712 m_innerNonZeros[j] = StorageIndex(lb - start);
713 }
714 }
715 }
716 m_innerSize = newInnerSize;
717
718 Index newSize = m_outerIndex[m_outerSize];
719 eigen_assert(newSize <= m_data.size());
720 m_data.resize(newSize);
721 }
722
731 const Index outerSize = IsRowMajor ? rows : cols;
732 m_innerSize = IsRowMajor ? cols : rows;
733 m_data.clear();
734
735 if ((m_outerIndex == 0) || (m_outerSize != outerSize)) {
736 m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, outerSize + 1,
737 m_outerSize + 1);
738 m_outerSize = outerSize;
739 }
740
741 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
742 m_innerNonZeros = 0;
743
744 std::fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
745 }
746
749 void resizeNonZeros(Index size) { m_data.resize(size); }
750
753
759
761 inline SparseMatrix() : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { resize(0, 0); }
762
764 inline SparseMatrix(Index rows, Index cols) : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
765 resize(rows, cols);
766 }
767
769 template <typename OtherDerived>
771 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
772 EIGEN_STATIC_ASSERT(
773 (internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
774 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
775 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
776 if (needToTranspose)
777 *this = other.derived();
778 else {
779#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
780 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
781#endif
782 internal::call_assignment_no_alias(*this, other.derived());
783 }
784 }
785
787 template <typename OtherDerived, unsigned int UpLo>
789 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
790 Base::operator=(other);
791 }
792
794 inline SparseMatrix(SparseMatrix&& other) : SparseMatrix() { this->swap(other); }
795
796 template <typename OtherDerived>
798 *this = other.derived().markAsRValue();
799 }
800
802 inline SparseMatrix(const SparseMatrix& other)
803 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
804 *this = other.derived();
805 }
806
808 template <typename OtherDerived>
809 SparseMatrix(const ReturnByValue<OtherDerived>& other)
810 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
811 initAssignment(other);
812 other.evalTo(*this);
813 }
814
816 template <typename OtherDerived>
818 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
819 *this = other.derived();
820 }
821
824 inline void swap(SparseMatrix& other) {
825 // EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
826 std::swap(m_outerIndex, other.m_outerIndex);
827 std::swap(m_innerSize, other.m_innerSize);
828 std::swap(m_outerSize, other.m_outerSize);
829 std::swap(m_innerNonZeros, other.m_innerNonZeros);
830 m_data.swap(other.m_data);
831 }
832
835 inline void setIdentity() {
836 eigen_assert(m_outerSize == m_innerSize && "ONLY FOR SQUARED MATRICES");
837 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
838 m_innerNonZeros = 0;
839 m_data.resize(m_outerSize);
840 // is it necessary to squeeze?
841 m_data.squeeze();
842 std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
843 std::iota(innerIndexPtr(), innerIndexPtr() + m_outerSize, StorageIndex(0));
844 std::fill_n(valuePtr(), m_outerSize, Scalar(1));
845 }
846
847 inline SparseMatrix& operator=(const SparseMatrix& other) {
848 if (other.isRValue()) {
849 swap(other.const_cast_derived());
850 } else if (this != &other) {
851#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
852 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
853#endif
854 initAssignment(other);
855 if (other.isCompressed()) {
856 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
857 m_data = other.m_data;
858 } else {
859 Base::operator=(other);
860 }
861 }
862 return *this;
863 }
864
865 inline SparseMatrix& operator=(SparseMatrix&& other) {
866 this->swap(other);
867 return *this;
868 }
869
870#ifndef EIGEN_PARSED_BY_DOXYGEN
871 template <typename OtherDerived>
872 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other) {
873 return Base::operator=(other.derived());
874 }
875
876 template <typename Lhs, typename Rhs>
877 inline SparseMatrix& operator=(const Product<Lhs, Rhs, AliasFreeProduct>& other);
878#endif // EIGEN_PARSED_BY_DOXYGEN
879
880 template <typename OtherDerived>
881 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
882
883 template <typename OtherDerived>
884 inline SparseMatrix& operator=(SparseCompressedBase<OtherDerived>&& other) {
885 *this = other.derived().markAsRValue();
886 return *this;
887 }
888
889#ifndef EIGEN_NO_IO
890 friend std::ostream& operator<<(std::ostream& s, const SparseMatrix& m) {
891 EIGEN_DBG_SPARSE(
892 s << "Nonzero entries:\n"; if (m.isCompressed()) {
893 for (Index i = 0; i < m.nonZeros(); ++i) s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
894 } else {
895 for (Index i = 0; i < m.outerSize(); ++i) {
896 Index p = m.m_outerIndex[i];
897 Index pe = m.m_outerIndex[i] + m.m_innerNonZeros[i];
898 Index k = p;
899 for (; k < pe; ++k) {
900 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
901 }
902 for (; k < m.m_outerIndex[i + 1]; ++k) {
903 s << "(_,_) ";
904 }
905 }
906 } s << std::endl;
907 s << std::endl; s << "Outer pointers:\n";
908 for (Index i = 0; i < m.outerSize(); ++i) { s << m.m_outerIndex[i] << " "; } s << " $" << std::endl;
909 if (!m.isCompressed()) {
910 s << "Inner non zeros:\n";
911 for (Index i = 0; i < m.outerSize(); ++i) {
912 s << m.m_innerNonZeros[i] << " ";
913 }
914 s << " $" << std::endl;
915 } s
916 << std::endl;);
917 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
918 return s;
919 }
920#endif
921
923 inline ~SparseMatrix() {
924 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
925 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
926 }
927
929 Scalar sum() const;
930
931#ifdef EIGEN_SPARSEMATRIX_PLUGIN
932#include EIGEN_SPARSEMATRIX_PLUGIN
933#endif
934
935 protected:
936 template <typename Other>
937 void initAssignment(const Other& other) {
938 resize(other.rows(), other.cols());
939 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
940 m_innerNonZeros = 0;
941 }
942
945 EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
946
949 class SingletonVector {
950 StorageIndex m_index;
951 StorageIndex m_value;
952
953 public:
954 typedef StorageIndex value_type;
955 SingletonVector(Index i, Index v) : m_index(convert_index(i)), m_value(convert_index(v)) {}
956
957 StorageIndex operator[](Index i) const { return i == m_index ? m_value : 0; }
958 };
959
962 EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
963
964 public:
967 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col) {
968 const Index outer = IsRowMajor ? row : col;
969 const Index inner = IsRowMajor ? col : row;
970
971 eigen_assert(!isCompressed());
972 eigen_assert(m_innerNonZeros[outer] <= (m_outerIndex[outer + 1] - m_outerIndex[outer]));
973
974 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
975 m_data.index(p) = StorageIndex(inner);
976 m_data.value(p) = Scalar(0);
977 return m_data.value(p);
978 }
979
980 protected:
981 struct IndexPosPair {
982 IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
983 Index i;
984 Index p;
985 };
986
996 template <typename DiagXpr, typename Func>
997 void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc) {
998 constexpr StorageIndex kEmptyIndexVal(-1);
999 typedef typename ScalarVector::AlignedMapType ValueMap;
1000
1001 Index n = diagXpr.size();
1002
1003 const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar, Scalar>>::value;
1004 if (overwrite) {
1005 if ((m_outerSize != n) || (m_innerSize != n)) resize(n, n);
1006 }
1007
1008 if (m_data.size() == 0 || overwrite) {
1009 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1010 m_innerNonZeros = 0;
1011 resizeNonZeros(n);
1012 ValueMap valueMap(valuePtr(), n);
1013 std::iota(m_outerIndex, m_outerIndex + n + 1, StorageIndex(0));
1014 std::iota(innerIndexPtr(), innerIndexPtr() + n, StorageIndex(0));
1015 valueMap.setZero();
1016 internal::call_assignment_no_alias(valueMap, diagXpr, assignFunc);
1017 } else {
1018 internal::evaluator<DiagXpr> diaEval(diagXpr);
1019
1020 ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, n, 0);
1021 typename IndexVector::AlignedMapType insertionLocations(tmp, n);
1022 insertionLocations.setConstant(kEmptyIndexVal);
1023
1024 Index deferredInsertions = 0;
1025 Index shift = 0;
1026
1027 for (Index j = 0; j < n; j++) {
1028 Index begin = m_outerIndex[j];
1029 Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1030 Index capacity = m_outerIndex[j + 1] - end;
1031 Index dst = m_data.searchLowerIndex(begin, end, j);
1032 // the entry exists: update it now
1033 if (dst != end && m_data.index(dst) == StorageIndex(j))
1034 assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1035 // the entry belongs at the back of the vector: push to back
1036 else if (dst == end && capacity > 0)
1037 assignFunc.assignCoeff(insertBackUncompressed(j, j), diaEval.coeff(j));
1038 // the insertion requires a data move, record insertion location and handle in second pass
1039 else {
1040 insertionLocations.coeffRef(j) = StorageIndex(dst);
1041 deferredInsertions++;
1042 // if there is no capacity, all vectors to the right of this are shifted
1043 if (capacity == 0) shift++;
1044 }
1045 }
1046
1047 if (deferredInsertions > 0) {
1048 m_data.resize(m_data.size() + shift);
1049 Index copyEnd = isCompressed() ? m_outerIndex[m_outerSize]
1050 : m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1];
1051 for (Index j = m_outerSize - 1; deferredInsertions > 0; j--) {
1052 Index begin = m_outerIndex[j];
1053 Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1054 Index capacity = m_outerIndex[j + 1] - end;
1055
1056 bool doInsertion = insertionLocations(j) >= 0;
1057 bool breakUpCopy = doInsertion && (capacity > 0);
1058 // break up copy for sorted insertion into inactive nonzeros
1059 // optionally, add another criterium, i.e. 'breakUpCopy || (capacity > threhsold)'
1060 // where `threshold >= 0` to skip inactive nonzeros in each vector
1061 // this reduces the total number of copied elements, but requires more moveChunk calls
1062 if (breakUpCopy) {
1063 Index copyBegin = m_outerIndex[j + 1];
1064 Index to = copyBegin + shift;
1065 Index chunkSize = copyEnd - copyBegin;
1066 m_data.moveChunk(copyBegin, to, chunkSize);
1067 copyEnd = end;
1068 }
1069
1070 m_outerIndex[j + 1] += shift;
1071
1072 if (doInsertion) {
1073 // if there is capacity, shift into the inactive nonzeros
1074 if (capacity > 0) shift++;
1075 Index copyBegin = insertionLocations(j);
1076 Index to = copyBegin + shift;
1077 Index chunkSize = copyEnd - copyBegin;
1078 m_data.moveChunk(copyBegin, to, chunkSize);
1079 Index dst = to - 1;
1080 m_data.index(dst) = StorageIndex(j);
1081 m_data.value(dst) = Scalar(0);
1082 assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1083 if (!isCompressed()) m_innerNonZeros[j]++;
1084 shift--;
1085 deferredInsertions--;
1086 copyEnd = copyBegin;
1087 }
1088 }
1089 }
1090 eigen_assert((shift == 0) && (deferredInsertions == 0));
1091 }
1092 }
1093
1094 /* These functions are used to avoid a redundant binary search operation in functions such as coeffRef() and assume
1095 * `dst` is the appropriate sorted insertion point */
1096 EIGEN_STRONG_INLINE Scalar& insertAtByOuterInner(Index outer, Index inner, Index dst);
1097 Scalar& insertCompressedAtByOuterInner(Index outer, Index inner, Index dst);
1098 Scalar& insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst);
1099
1100 private:
1101 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
1102 EIGEN_STATIC_ASSERT((Options & (ColMajor | RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
1103
1104 struct default_prunning_func {
1105 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1106 inline bool operator()(const Index&, const Index&, const Scalar& value) const {
1107 return !internal::isMuchSmallerThan(value, reference, epsilon);
1108 }
1109 Scalar reference;
1110 RealScalar epsilon;
1111 };
1112};
1113
1114namespace internal {
1115
1116// Creates a compressed sparse matrix from a range of unsorted triplets
1117// Requires temporary storage to handle duplicate entries
1118template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1119void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1120 DupFunctor dup_func) {
1121 constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1122 using StorageIndex = typename SparseMatrixType::StorageIndex;
1123 using IndexMap = typename VectorX<StorageIndex>::AlignedMapType;
1124 using TransposedSparseMatrix =
1125 SparseMatrix<typename SparseMatrixType::Scalar, IsRowMajor ? ColMajor : RowMajor, StorageIndex>;
1126
1127 if (begin == end) return;
1128
1129 // There are two strategies to consider for constructing a matrix from unordered triplets:
1130 // A) construct the 'mat' in its native storage order and sort in-place (less memory); or,
1131 // B) construct the transposed matrix and use an implicit sort upon assignment to `mat` (less time).
1132 // This routine uses B) for faster execution time.
1133 TransposedSparseMatrix trmat(mat.rows(), mat.cols());
1134
1135 // scan triplets to determine allocation size before constructing matrix
1136 Index nonZeros = 0;
1137 for (InputIterator it(begin); it != end; ++it) {
1138 eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1139 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1140 if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
1141 trmat.outerIndexPtr()[j + 1]++;
1142 nonZeros++;
1143 }
1144
1145 std::partial_sum(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize() + 1, trmat.outerIndexPtr());
1146 eigen_assert(nonZeros == trmat.outerIndexPtr()[trmat.outerSize()]);
1147 trmat.resizeNonZeros(nonZeros);
1148
1149 // construct temporary array to track insertions (outersize) and collapse duplicates (innersize)
1150 ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, numext::maxi(mat.innerSize(), mat.outerSize()), 0);
1151 smart_copy(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize(), tmp);
1152
1153 // push triplets to back of each vector
1154 for (InputIterator it(begin); it != end; ++it) {
1155 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1156 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1157 StorageIndex k = tmp[j];
1158 trmat.data().index(k) = i;
1159 trmat.data().value(k) = it->value();
1160 tmp[j]++;
1161 }
1162
1163 IndexMap wi(tmp, trmat.innerSize());
1164 trmat.collapseDuplicates(wi, dup_func);
1165 // implicit sorting
1166 mat = trmat;
1167}
1168
1169// Creates a compressed sparse matrix from a sorted range of triplets
1170template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1171void set_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1172 DupFunctor dup_func) {
1173 constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1174 using StorageIndex = typename SparseMatrixType::StorageIndex;
1175
1176 if (begin == end) return;
1177
1178 constexpr StorageIndex kEmptyIndexValue(-1);
1179 // deallocate inner nonzeros if present and zero outerIndexPtr
1180 mat.resize(mat.rows(), mat.cols());
1181 // use outer indices to count non zero entries (excluding duplicate entries)
1182 StorageIndex previous_j = kEmptyIndexValue;
1183 StorageIndex previous_i = kEmptyIndexValue;
1184 // scan triplets to determine allocation size before constructing matrix
1185 Index nonZeros = 0;
1186 for (InputIterator it(begin); it != end; ++it) {
1187 eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1188 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1189 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1190 eigen_assert(j > previous_j || (j == previous_j && i >= previous_i));
1191 // identify duplicates by examining previous location
1192 bool duplicate = (previous_j == j) && (previous_i == i);
1193 if (!duplicate) {
1194 if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
1195 nonZeros++;
1196 mat.outerIndexPtr()[j + 1]++;
1197 previous_j = j;
1198 previous_i = i;
1199 }
1200 }
1201
1202 // finalize outer indices and allocate memory
1203 std::partial_sum(mat.outerIndexPtr(), mat.outerIndexPtr() + mat.outerSize() + 1, mat.outerIndexPtr());
1204 eigen_assert(nonZeros == mat.outerIndexPtr()[mat.outerSize()]);
1205 mat.resizeNonZeros(nonZeros);
1206
1207 previous_i = kEmptyIndexValue;
1208 previous_j = kEmptyIndexValue;
1209 Index back = 0;
1210 for (InputIterator it(begin); it != end; ++it) {
1211 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1212 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1213 bool duplicate = (previous_j == j) && (previous_i == i);
1214 if (duplicate) {
1215 mat.data().value(back - 1) = dup_func(mat.data().value(back - 1), it->value());
1216 } else {
1217 // push triplets to back
1218 mat.data().index(back) = i;
1219 mat.data().value(back) = it->value();
1220 previous_j = j;
1221 previous_i = i;
1222 back++;
1223 }
1224 }
1225 eigen_assert(back == nonZeros);
1226 // matrix is finalized
1227}
1228
1229// thin wrapper around a generic binary functor to use the sparse disjunction evaulator instead of the default
1230// "arithmetic" evaulator
1231template <typename DupFunctor, typename LhsScalar, typename RhsScalar = LhsScalar>
1232struct scalar_disjunction_op {
1233 using result_type = typename result_of<DupFunctor(LhsScalar, RhsScalar)>::type;
1234 scalar_disjunction_op(const DupFunctor& op) : m_functor(op) {}
1235 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator()(const LhsScalar& a, const RhsScalar& b) const {
1236 return m_functor(a, b);
1237 }
1238 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const DupFunctor& functor() const { return m_functor; }
1239 const DupFunctor& m_functor;
1240};
1241
1242template <typename DupFunctor, typename LhsScalar, typename RhsScalar>
1243struct functor_traits<scalar_disjunction_op<DupFunctor, LhsScalar, RhsScalar>> : public functor_traits<DupFunctor> {};
1244
1245// Creates a compressed sparse matrix from its existing entries and those from an unsorted range of triplets
1246template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1247void insert_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1248 DupFunctor dup_func) {
1249 using Scalar = typename SparseMatrixType::Scalar;
1250 using SrcXprType =
1251 CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
1252
1253 // set_from_triplets is necessary to sort the inner indices and remove the duplicate entries
1254 SparseMatrixType trips(mat.rows(), mat.cols());
1255 set_from_triplets(begin, end, trips, dup_func);
1256
1257 SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1258 // the sparse assignment procedure creates a temporary matrix and swaps the final result
1259 assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1260}
1261
1262// Creates a compressed sparse matrix from its existing entries and those from an sorted range of triplets
1263template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1264void insert_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1265 DupFunctor dup_func) {
1266 using Scalar = typename SparseMatrixType::Scalar;
1267 using SrcXprType =
1268 CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
1269
1270 // TODO: process triplets without making a copy
1271 SparseMatrixType trips(mat.rows(), mat.cols());
1272 set_from_triplets_sorted(begin, end, trips, dup_func);
1273
1274 SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1275 // the sparse assignment procedure creates a temporary matrix and swaps the final result
1276 assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1277}
1278
1279} // namespace internal
1280
1318template <typename Scalar, int Options_, typename StorageIndex_>
1319template <typename InputIterators>
1321 const InputIterators& end) {
1322 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1323 begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1324}
1325
1335template <typename Scalar, int Options_, typename StorageIndex_>
1336template <typename InputIterators, typename DupFunctor>
1338 const InputIterators& end, DupFunctor dup_func) {
1339 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1340 begin, end, *this, dup_func);
1341}
1342
1347template <typename Scalar, int Options_, typename StorageIndex_>
1348template <typename InputIterators>
1350 const InputIterators& end) {
1351 internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1352 begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1353}
1354
1364template <typename Scalar, int Options_, typename StorageIndex_>
1365template <typename InputIterators, typename DupFunctor>
1367 const InputIterators& end,
1368 DupFunctor dup_func) {
1369 internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1370 begin, end, *this, dup_func);
1371}
1372
1411template <typename Scalar, int Options_, typename StorageIndex_>
1412template <typename InputIterators>
1414 const InputIterators& end) {
1415 internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1416 begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1417}
1418
1428template <typename Scalar, int Options_, typename StorageIndex_>
1429template <typename InputIterators, typename DupFunctor>
1431 const InputIterators& end, DupFunctor dup_func) {
1432 internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1433 begin, end, *this, dup_func);
1434}
1435
1440template <typename Scalar, int Options_, typename StorageIndex_>
1441template <typename InputIterators>
1443 const InputIterators& end) {
1444 internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1445 begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1446}
1447
1457template <typename Scalar, int Options_, typename StorageIndex_>
1458template <typename InputIterators, typename DupFunctor>
1460 const InputIterators& end,
1461 DupFunctor dup_func) {
1462 internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1463 begin, end, *this, dup_func);
1464}
1465
1467template <typename Scalar_, int Options_, typename StorageIndex_>
1468template <typename Derived, typename DupFunctor>
1470 // removes duplicate entries and compresses the matrix
1471 // the excess allocated memory is not released
1472 // the inner indices do not need to be sorted, nor is the matrix returned in a sorted state
1473 eigen_assert(wi.size() == m_innerSize);
1474 constexpr StorageIndex kEmptyIndexValue(-1);
1475 wi.setConstant(kEmptyIndexValue);
1476 StorageIndex count = 0;
1477 const bool is_compressed = isCompressed();
1478 // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1479 for (Index j = 0; j < m_outerSize; ++j) {
1480 const StorageIndex newBegin = count;
1481 const StorageIndex end = is_compressed ? m_outerIndex[j + 1] : m_outerIndex[j] + m_innerNonZeros[j];
1482 for (StorageIndex k = m_outerIndex[j]; k < end; ++k) {
1483 StorageIndex i = m_data.index(k);
1484 if (wi(i) >= newBegin) {
1485 // entry at k is a duplicate
1486 // accumulate it into the primary entry located at wi(i)
1487 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1488 } else {
1489 // k is the primary entry in j with inner index i
1490 // shift it to the left and record its location at wi(i)
1491 m_data.index(count) = i;
1492 m_data.value(count) = m_data.value(k);
1493 wi(i) = count;
1494 ++count;
1495 }
1496 }
1497 m_outerIndex[j] = newBegin;
1498 }
1499 m_outerIndex[m_outerSize] = count;
1500 m_data.resize(count);
1501
1502 // turn the matrix into compressed form (if it is not already)
1503 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1504 m_innerNonZeros = 0;
1505}
1506
1508template <typename Scalar, int Options_, typename StorageIndex_>
1509template <typename OtherDerived>
1510EIGEN_DONT_INLINE SparseMatrix<Scalar, Options_, StorageIndex_>&
1511SparseMatrix<Scalar, Options_, StorageIndex_>::operator=(const SparseMatrixBase<OtherDerived>& other) {
1512 EIGEN_STATIC_ASSERT(
1513 (internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1514 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1515
1516#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1517 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1518#endif
1519
1520 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1521 if (needToTranspose) {
1522#ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1523 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1524#endif
1525 // two passes algorithm:
1526 // 1 - compute the number of coeffs per dest inner vector
1527 // 2 - do the actual copy/eval
1528 // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1529 typedef
1530 typename internal::nested_eval<OtherDerived, 2, typename internal::plain_matrix_type<OtherDerived>::type>::type
1531 OtherCopy;
1532 typedef internal::remove_all_t<OtherCopy> OtherCopy_;
1533 typedef internal::evaluator<OtherCopy_> OtherCopyEval;
1534 OtherCopy otherCopy(other.derived());
1535 OtherCopyEval otherCopyEval(otherCopy);
1536
1537 SparseMatrix dest(other.rows(), other.cols());
1538 Eigen::Map<IndexVector>(dest.m_outerIndex, dest.outerSize()).setZero();
1539
1540 // pass 1
1541 // FIXME the above copy could be merged with that pass
1542 for (Index j = 0; j < otherCopy.outerSize(); ++j)
1543 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) ++dest.m_outerIndex[it.index()];
1544
1545 // prefix sum
1546 StorageIndex count = 0;
1547 IndexVector positions(dest.outerSize());
1548 for (Index j = 0; j < dest.outerSize(); ++j) {
1549 StorageIndex tmp = dest.m_outerIndex[j];
1550 dest.m_outerIndex[j] = count;
1551 positions[j] = count;
1552 count += tmp;
1553 }
1554 dest.m_outerIndex[dest.outerSize()] = count;
1555 // alloc
1556 dest.m_data.resize(count);
1557 // pass 2
1558 for (StorageIndex j = 0; j < otherCopy.outerSize(); ++j) {
1559 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) {
1560 Index pos = positions[it.index()]++;
1561 dest.m_data.index(pos) = j;
1562 dest.m_data.value(pos) = it.value();
1563 }
1564 }
1565 this->swap(dest);
1566 return *this;
1567 } else {
1568 if (other.isRValue()) {
1569 initAssignment(other.derived());
1570 }
1571 // there is no special optimization
1572 return Base::operator=(other.derived());
1573 }
1574}
1575
1576template <typename Scalar_, int Options_, typename StorageIndex_>
1577inline typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1579 return insertByOuterInner(IsRowMajor ? row : col, IsRowMajor ? col : row);
1580}
1581
1582template <typename Scalar_, int Options_, typename StorageIndex_>
1583EIGEN_STRONG_INLINE typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1585 // random insertion into compressed matrix is very slow
1586 uncompress();
1587 return insertUncompressedAtByOuterInner(outer, inner, dst);
1588}
1589
1590template <typename Scalar_, int Options_, typename StorageIndex_>
1591EIGEN_DEPRECATED EIGEN_DONT_INLINE typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1592SparseMatrix<Scalar_, Options_, StorageIndex_>::insertUncompressed(Index row, Index col) {
1593 eigen_assert(!isCompressed());
1594 Index outer = IsRowMajor ? row : col;
1595 Index inner = IsRowMajor ? col : row;
1596 Index start = m_outerIndex[outer];
1597 Index end = start + m_innerNonZeros[outer];
1598 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1599 if (dst == end) {
1600 Index capacity = m_outerIndex[outer + 1] - end;
1601 if (capacity > 0) {
1602 // implies uncompressed: push to back of vector
1603 m_innerNonZeros[outer]++;
1604 m_data.index(end) = StorageIndex(inner);
1605 m_data.value(end) = Scalar(0);
1606 return m_data.value(end);
1607 }
1608 }
1609 eigen_assert((dst == end || m_data.index(dst) != inner) &&
1610 "you cannot insert an element that already exists, you must call coeffRef to this end");
1611 return insertUncompressedAtByOuterInner(outer, inner, dst);
1612}
1613
1614template <typename Scalar_, int Options_, typename StorageIndex_>
1615EIGEN_DEPRECATED EIGEN_DONT_INLINE typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1616SparseMatrix<Scalar_, Options_, StorageIndex_>::insertCompressed(Index row, Index col) {
1617 eigen_assert(isCompressed());
1618 Index outer = IsRowMajor ? row : col;
1619 Index inner = IsRowMajor ? col : row;
1620 Index start = m_outerIndex[outer];
1621 Index end = m_outerIndex[outer + 1];
1622 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1623 eigen_assert((dst == end || m_data.index(dst) != inner) &&
1624 "you cannot insert an element that already exists, you must call coeffRef to this end");
1625 return insertCompressedAtByOuterInner(outer, inner, dst);
1626}
1627
1628template <typename Scalar_, int Options_, typename StorageIndex_>
1629typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1630SparseMatrix<Scalar_, Options_, StorageIndex_>::insertCompressedAtByOuterInner(Index outer, Index inner, Index dst) {
1631 eigen_assert(isCompressed());
1632 // compressed insertion always requires expanding the buffer
1633 // first, check if there is adequate allocated memory
1634 if (m_data.allocatedSize() <= m_data.size()) {
1635 // if there is no capacity for a single insertion, double the capacity
1636 // increase capacity by a mininum of 32
1637 Index minReserve = 32;
1638 Index reserveSize = numext::maxi(minReserve, m_data.allocatedSize());
1639 m_data.reserve(reserveSize);
1640 }
1641 m_data.resize(m_data.size() + 1);
1642 Index chunkSize = m_outerIndex[m_outerSize] - dst;
1643 // shift the existing data to the right if necessary
1644 m_data.moveChunk(dst, dst + 1, chunkSize);
1645 // update nonzero counts
1646 // potentially O(outerSize) bottleneck!
1647 for (Index j = outer; j < m_outerSize; j++) m_outerIndex[j + 1]++;
1648 // initialize the coefficient
1649 m_data.index(dst) = StorageIndex(inner);
1650 m_data.value(dst) = Scalar(0);
1651 // return a reference to the coefficient
1652 return m_data.value(dst);
1653}
1654
1655template <typename Scalar_, int Options_, typename StorageIndex_>
1656typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1657SparseMatrix<Scalar_, Options_, StorageIndex_>::insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst) {
1658 eigen_assert(!isCompressed());
1659 // find a vector with capacity, starting at `outer` and searching to the left and right
1660 for (Index leftTarget = outer - 1, rightTarget = outer; (leftTarget >= 0) || (rightTarget < m_outerSize);) {
1661 if (rightTarget < m_outerSize) {
1662 Index start = m_outerIndex[rightTarget];
1663 Index end = start + m_innerNonZeros[rightTarget];
1664 Index nextStart = m_outerIndex[rightTarget + 1];
1665 Index capacity = nextStart - end;
1666 if (capacity > 0) {
1667 // move [dst, end) to dst+1 and insert at dst
1668 Index chunkSize = end - dst;
1669 if (chunkSize > 0) m_data.moveChunk(dst, dst + 1, chunkSize);
1670 m_innerNonZeros[outer]++;
1671 for (Index j = outer; j < rightTarget; j++) m_outerIndex[j + 1]++;
1672 m_data.index(dst) = StorageIndex(inner);
1673 m_data.value(dst) = Scalar(0);
1674 return m_data.value(dst);
1675 }
1676 rightTarget++;
1677 }
1678 if (leftTarget >= 0) {
1679 Index start = m_outerIndex[leftTarget];
1680 Index end = start + m_innerNonZeros[leftTarget];
1681 Index nextStart = m_outerIndex[leftTarget + 1];
1682 Index capacity = nextStart - end;
1683 if (capacity > 0) {
1684 // tricky: dst is a lower bound, so we must insert at dst-1 when shifting left
1685 // move [nextStart, dst) to nextStart-1 and insert at dst-1
1686 Index chunkSize = dst - nextStart;
1687 if (chunkSize > 0) m_data.moveChunk(nextStart, nextStart - 1, chunkSize);
1688 m_innerNonZeros[outer]++;
1689 for (Index j = leftTarget; j < outer; j++) m_outerIndex[j + 1]--;
1690 m_data.index(dst - 1) = StorageIndex(inner);
1691 m_data.value(dst - 1) = Scalar(0);
1692 return m_data.value(dst - 1);
1693 }
1694 leftTarget--;
1695 }
1696 }
1697
1698 // no room for interior insertion
1699 // nonZeros() == m_data.size()
1700 // record offset as outerIndxPtr will change
1701 Index dst_offset = dst - m_outerIndex[outer];
1702 // allocate space for random insertion
1703 if (m_data.allocatedSize() == 0) {
1704 // fast method to allocate space for one element per vector in empty matrix
1705 m_data.resize(m_outerSize);
1706 std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
1707 } else {
1708 // check for integer overflow: if maxReserveSize == 0, insertion is not possible
1709 Index maxReserveSize = static_cast<Index>(NumTraits<StorageIndex>::highest()) - m_data.allocatedSize();
1710 eigen_assert(maxReserveSize > 0);
1711 if (m_outerSize <= maxReserveSize) {
1712 // allocate space for one additional element per vector
1713 reserveInnerVectors(IndexVector::Constant(m_outerSize, 1));
1714 } else {
1715 // handle the edge case where StorageIndex is insufficient to reserve outerSize additional elements
1716 // allocate space for one additional element in the interval [outer,maxReserveSize)
1717 typedef internal::sparse_reserve_op<StorageIndex> ReserveSizesOp;
1718 typedef CwiseNullaryOp<ReserveSizesOp, IndexVector> ReserveSizesXpr;
1719 ReserveSizesXpr reserveSizesXpr(m_outerSize, 1, ReserveSizesOp(outer, m_outerSize, maxReserveSize));
1720 reserveInnerVectors(reserveSizesXpr);
1721 }
1722 }
1723 // insert element at `dst` with new outer indices
1724 Index start = m_outerIndex[outer];
1725 Index end = start + m_innerNonZeros[outer];
1726 Index new_dst = start + dst_offset;
1727 Index chunkSize = end - new_dst;
1728 if (chunkSize > 0) m_data.moveChunk(new_dst, new_dst + 1, chunkSize);
1729 m_innerNonZeros[outer]++;
1730 m_data.index(new_dst) = StorageIndex(inner);
1731 m_data.value(new_dst) = Scalar(0);
1732 return m_data.value(new_dst);
1733}
1734
1735namespace internal {
1736
1737template <typename Scalar_, int Options_, typename StorageIndex_>
1738struct evaluator<SparseMatrix<Scalar_, Options_, StorageIndex_>>
1739 : evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> {
1740 typedef evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> Base;
1741 typedef SparseMatrix<Scalar_, Options_, StorageIndex_> SparseMatrixType;
1742 evaluator() : Base() {}
1743 explicit evaluator(const SparseMatrixType& mat) : Base(mat) {}
1744};
1745
1746} // namespace internal
1747
1748// Specialization for SparseMatrix.
1749// Serializes [rows, cols, isCompressed, outerSize, innerBufferSize,
1750// innerNonZeros, outerIndices, innerIndices, values].
1751template <typename Scalar, int Options, typename StorageIndex>
1752class Serializer<SparseMatrix<Scalar, Options, StorageIndex>, void> {
1753 public:
1754 typedef SparseMatrix<Scalar, Options, StorageIndex> SparseMat;
1755
1756 struct Header {
1757 typename SparseMat::Index rows;
1758 typename SparseMat::Index cols;
1759 bool compressed;
1760 Index outer_size;
1761 Index inner_buffer_size;
1762 };
1763
1764 EIGEN_DEVICE_FUNC size_t size(const SparseMat& value) const {
1765 // innerNonZeros.
1766 std::size_t num_storage_indices = value.isCompressed() ? 0 : value.outerSize();
1767 // Outer indices.
1768 num_storage_indices += value.outerSize() + 1;
1769 // Inner indices.
1770 const StorageIndex inner_buffer_size = value.outerIndexPtr()[value.outerSize()];
1771 num_storage_indices += inner_buffer_size;
1772 // Values.
1773 std::size_t num_values = inner_buffer_size;
1774 return sizeof(Header) + sizeof(Scalar) * num_values + sizeof(StorageIndex) * num_storage_indices;
1775 }
1776
1777 EIGEN_DEVICE_FUNC uint8_t* serialize(uint8_t* dest, uint8_t* end, const SparseMat& value) {
1778 if (EIGEN_PREDICT_FALSE(dest == nullptr)) return nullptr;
1779 if (EIGEN_PREDICT_FALSE(dest + size(value) > end)) return nullptr;
1780
1781 const size_t header_bytes = sizeof(Header);
1782 Header header = {value.rows(), value.cols(), value.isCompressed(), value.outerSize(),
1783 value.outerIndexPtr()[value.outerSize()]};
1784 EIGEN_USING_STD(memcpy)
1785 memcpy(dest, &header, header_bytes);
1786 dest += header_bytes;
1787
1788 // innerNonZeros.
1789 if (!header.compressed) {
1790 std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
1791 memcpy(dest, value.innerNonZeroPtr(), data_bytes);
1792 dest += data_bytes;
1793 }
1794
1795 // Outer indices.
1796 std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
1797 memcpy(dest, value.outerIndexPtr(), data_bytes);
1798 dest += data_bytes;
1799
1800 // Inner indices.
1801 data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
1802 memcpy(dest, value.innerIndexPtr(), data_bytes);
1803 dest += data_bytes;
1804
1805 // Values.
1806 data_bytes = sizeof(Scalar) * header.inner_buffer_size;
1807 memcpy(dest, value.valuePtr(), data_bytes);
1808 dest += data_bytes;
1809
1810 return dest;
1811 }
1812
1813 EIGEN_DEVICE_FUNC const uint8_t* deserialize(const uint8_t* src, const uint8_t* end, SparseMat& value) const {
1814 if (EIGEN_PREDICT_FALSE(src == nullptr)) return nullptr;
1815 if (EIGEN_PREDICT_FALSE(src + sizeof(Header) > end)) return nullptr;
1816
1817 const size_t header_bytes = sizeof(Header);
1818 Header header;
1819 EIGEN_USING_STD(memcpy)
1820 memcpy(&header, src, header_bytes);
1821 src += header_bytes;
1822
1823 value.setZero();
1824 value.resize(header.rows, header.cols);
1825 if (header.compressed) {
1826 value.makeCompressed();
1827 } else {
1828 value.uncompress();
1829 }
1830
1831 // Adjust value ptr size.
1832 value.data().resize(header.inner_buffer_size);
1833
1834 // Initialize compressed state and inner non-zeros.
1835 if (!header.compressed) {
1836 // Inner non-zero counts.
1837 std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
1838 if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1839 memcpy(value.innerNonZeroPtr(), src, data_bytes);
1840 src += data_bytes;
1841 }
1842
1843 // Outer indices.
1844 std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
1845 if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1846 memcpy(value.outerIndexPtr(), src, data_bytes);
1847 src += data_bytes;
1848
1849 // Inner indices.
1850 data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
1851 if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1852 memcpy(value.innerIndexPtr(), src, data_bytes);
1853 src += data_bytes;
1854
1855 // Values.
1856 data_bytes = sizeof(Scalar) * header.inner_buffer_size;
1857 if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1858 memcpy(value.valuePtr(), src, data_bytes);
1859 src += data_bytes;
1860 return src;
1861 }
1862};
1863
1864} // end namespace Eigen
1865
1866#endif // EIGEN_SPARSEMATRIX_H
Base class for all dense matrices, vectors, and arrays.
Definition DenseBase.h:44
Derived & setConstant(const Scalar &value)
Definition CwiseNullaryOp.h:345
EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT
Definition EigenBase.h:64
Base class for diagonal matrices and expressions.
Definition DiagonalMatrix.h:33
const Derived & derived() const
Definition DiagonalMatrix.h:57
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition Diagonal.h:68
A matrix or vector expression mapping an existing array of data.
Definition Map.h:96
Common base class for sparse [compressed]-{row|column}-storage format.
Definition SparseCompressedBase.h:43
Index nonZeros() const
Definition SparseCompressedBase.h:64
bool isCompressed() const
Definition SparseCompressedBase.h:114
Base class of any sparse matrices or sparse expressions.
Definition SparseMatrixBase.h:30
internal::traits< SparseMatrix< Scalar_, Options_, int > >::StorageIndex StorageIndex
Definition SparseMatrixBase.h:44
Index size() const
Definition SparseMatrixBase.h:187
A versatible sparse matrix representation.
Definition SparseUtil.h:47
void makeCompressed()
Definition SparseMatrix.h:588
const StorageIndex * innerIndexPtr() const
Definition SparseMatrix.h:180
const Scalar * valuePtr() const
Definition SparseMatrix.h:171
Scalar & findOrInsertCoeff(Index row, Index col, bool *inserted)
Definition SparseMatrix.h:231
void reserve(const SizesType &reserveSizes)
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1459
Index cols() const
Definition SparseMatrix.h:161
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition SparseMatrix.h:631
SparseMatrix(SparseMatrix &&other)
Definition SparseMatrix.h:794
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1349
~SparseMatrix()
Definition SparseMatrix.h:923
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1337
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition SparseMatrix.h:770
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition SparseMatrix.h:788
Scalar & coeffRef(Index row, Index col)
Definition SparseMatrix.h:275
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:817
StorageIndex * outerIndexPtr()
Definition SparseMatrix.h:193
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1442
Index rows() const
Definition SparseMatrix.h:159
const StorageIndex * innerNonZeroPtr() const
Definition SparseMatrix.h:198
Index outerSize() const
Definition SparseMatrix.h:166
StorageIndex * innerNonZeroPtr()
Definition SparseMatrix.h:202
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1366
SparseMatrix()
Definition SparseMatrix.h:761
void conservativeResize(Index rows, Index cols)
Definition SparseMatrix.h:678
void setZero()
Definition SparseMatrix.h:303
Scalar * valuePtr()
Definition SparseMatrix.h:175
bool isCompressed() const
Definition SparseCompressedBase.h:114
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1320
Index innerSize() const
Definition SparseMatrix.h:164
StorageIndex * innerIndexPtr()
Definition SparseMatrix.h:184
const ConstDiagonalReturnType diagonal() const
Definition SparseMatrix.h:752
Scalar coeff(Index row, Index col) const
Definition SparseMatrix.h:211
void uncompress()
Definition SparseMatrix.h:621
void prune(const KeepFunc &keep=KeepFunc())
Definition SparseMatrix.h:643
void insertFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1430
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:809
const StorageIndex * outerIndexPtr() const
Definition SparseMatrix.h:189
void setIdentity()
Definition SparseMatrix.h:835
void resize(Index rows, Index cols)
Definition SparseMatrix.h:730
void reserve(Index reserveSize)
Definition SparseMatrix.h:314
Scalar & insert(Index row, Index col)
Definition SparseMatrix.h:1578
SparseMatrix(const SparseMatrix &other)
Definition SparseMatrix.h:802
DiagonalReturnType diagonal()
Definition SparseMatrix.h:758
void insertFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1413
void swap(SparseMatrix &other)
Definition SparseMatrix.h:824
SparseMatrix(Index rows, Index cols)
Definition SparseMatrix.h:764
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition SparseUtil.h:52
const unsigned int LvalueBit
Definition Constants.h:148
const unsigned int RowMajorBit
Definition Constants.h:70
const unsigned int CompressedAccessBit
Definition Constants.h:195
Namespace containing all symbols from the Eigen library.
Definition Core:137
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:83
uint8_t * serialize(uint8_t *dest, uint8_t *end, const Args &... args)
Definition Serializer.h:188
const int Dynamic
Definition Constants.h:25
const uint8_t * deserialize(const uint8_t *src, const uint8_t *end, Args &... args)
Definition Serializer.h:201
Derived & derived()
Definition EigenBase.h:49
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition Meta.h:523