558 template <
typename InputIterators,
typename DupFunctor>
565 Scalar& insertByOuterInner(
Index j,
Index i) {
566 eigen_assert(j >= 0 && j < m_outerSize &&
"invalid outer index");
567 eigen_assert(i >= 0 && i < m_innerSize &&
"invalid inner index");
568 Index start = m_outerIndex[j];
570 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, i);
572 Index capacity = m_outerIndex[j + 1] - end;
575 m_innerNonZeros[j]++;
577 m_data.value(end) = Scalar(0);
578 return m_data.value(end);
581 eigen_assert((dst == end || m_data.index(dst) != i) &&
582 "you cannot insert an element that already exists, you must call coeffRef to this end");
583 return insertAtByOuterInner(j, i, dst);
591 eigen_internal_assert(m_outerIndex != 0 && m_outerSize > 0);
594 m_outerIndex[1] = m_innerNonZeros[0];
596 Index copyStart = start;
597 Index copyTarget = m_innerNonZeros[0];
598 for (
Index j = 1; j < m_outerSize; j++) {
602 bool breakUpCopy = (end != nextStart) || (j == m_outerSize - 1);
604 Index chunkSize = end - copyStart;
605 if (chunkSize > 0) m_data.moveChunk(copyStart, copyTarget, chunkSize);
606 copyStart = nextStart;
607 copyTarget += chunkSize;
610 m_outerIndex[j + 1] = m_outerIndex[j] + m_innerNonZeros[j];
612 m_data.resize(m_outerIndex[m_outerSize]);
615 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
623 m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
624 if (m_outerIndex[m_outerSize] == 0)
625 std::fill_n(m_innerNonZeros, m_outerSize,
StorageIndex(0));
627 for (
Index j = 0; j < m_outerSize; j++) m_innerNonZeros[j] = m_outerIndex[j + 1] - m_outerIndex[j];
632 prune(default_prunning_func(reference, epsilon));
642 template <
typename KeepFunc>
643 void prune(
const KeepFunc& keep = KeepFunc()) {
645 for (
Index j = 0; j < m_outerSize; ++j) {
655 bool keepEntry = keep(row, col, m_data.value(i));
657 m_data.value(k) = m_data.value(i);
658 m_data.index(k) = m_data.index(i);
661 m_innerNonZeros[j]--;
665 m_outerIndex[m_outerSize] = k;
685 Index innerChange = newInnerSize - m_innerSize;
686 Index outerChange = newOuterSize - m_outerSize;
688 if (outerChange != 0) {
689 m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, newOuterSize + 1,
693 m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_innerNonZeros,
694 newOuterSize, m_outerSize);
696 if (outerChange > 0) {
698 std::fill_n(m_outerIndex + m_outerSize, outerChange + 1, lastIdx);
703 m_outerSize = newOuterSize;
705 if (innerChange < 0) {
706 for (
Index j = 0; j < m_outerSize; j++) {
707 Index start = m_outerIndex[j];
709 Index lb = m_data.searchLowerIndex(start, end, newInnerSize);
716 m_innerSize = newInnerSize;
718 Index newSize = m_outerIndex[m_outerSize];
719 eigen_assert(newSize <= m_data.size());
720 m_data.resize(newSize);
732 m_innerSize = IsRowMajor ?
cols :
rows;
735 if ((m_outerIndex == 0) || (m_outerSize !=
outerSize)) {
736 m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex,
outerSize + 1,
741 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
744 std::fill_n(m_outerIndex, m_outerSize + 1,
StorageIndex(0));
761 inline SparseMatrix() : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
resize(0, 0); }
769 template <
typename OtherDerived>
771 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
773 (internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
774 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
779#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
780 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
782 internal::call_assignment_no_alias(*
this, other.
derived());
787 template <
typename OtherDerived,
unsigned int UpLo>
789 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
790 Base::operator=(other);
796 template <
typename OtherDerived>
798 *
this = other.
derived().markAsRValue();
803 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
804 *
this = other.derived();
808 template <
typename OtherDerived>
810 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
811 initAssignment(other);
816 template <
typename OtherDerived>
818 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
826 std::swap(m_outerIndex, other.m_outerIndex);
827 std::swap(m_innerSize, other.m_innerSize);
828 std::swap(m_outerSize, other.m_outerSize);
829 std::swap(m_innerNonZeros, other.m_innerNonZeros);
830 m_data.swap(other.m_data);
836 eigen_assert(m_outerSize == m_innerSize &&
"ONLY FOR SQUARED MATRICES");
837 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
839 m_data.resize(m_outerSize);
842 std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1,
StorageIndex(0));
844 std::fill_n(
valuePtr(), m_outerSize, Scalar(1));
848 if (other.isRValue()) {
849 swap(other.const_cast_derived());
850 }
else if (
this != &other) {
851#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
852 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
854 initAssignment(other);
855 if (other.isCompressed()) {
856 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
857 m_data = other.m_data;
859 Base::operator=(other);
870#ifndef EIGEN_PARSED_BY_DOXYGEN
871 template <
typename OtherDerived>
872 inline SparseMatrix& operator=(
const EigenBase<OtherDerived>& other) {
873 return Base::operator=(other.derived());
876 template <
typename Lhs,
typename Rhs>
877 inline SparseMatrix& operator=(
const Product<Lhs, Rhs, AliasFreeProduct>& other);
880 template <
typename OtherDerived>
881 EIGEN_DONT_INLINE
SparseMatrix& operator=(
const SparseMatrixBase<OtherDerived>& other);
883 template <
typename OtherDerived>
885 *
this = other.derived().markAsRValue();
890 friend std::ostream& operator<<(std::ostream& s,
const SparseMatrix& m) {
892 s <<
"Nonzero entries:\n";
if (m.isCompressed()) {
893 for (Index i = 0; i < m.nonZeros(); ++i) s <<
"(" << m.m_data.value(i) <<
"," << m.m_data.index(i) <<
") ";
895 for (Index i = 0; i < m.outerSize(); ++i) {
896 Index p = m.m_outerIndex[i];
897 Index pe = m.m_outerIndex[i] + m.m_innerNonZeros[i];
899 for (; k < pe; ++k) {
900 s <<
"(" << m.m_data.value(k) <<
"," << m.m_data.index(k) <<
") ";
902 for (; k < m.m_outerIndex[i + 1]; ++k) {
907 s << std::endl; s <<
"Outer pointers:\n";
908 for (Index i = 0; i < m.outerSize(); ++i) { s << m.m_outerIndex[i] <<
" "; } s <<
" $" << std::endl;
909 if (!m.isCompressed()) {
910 s <<
"Inner non zeros:\n";
911 for (Index i = 0; i < m.outerSize(); ++i) {
912 s << m.m_innerNonZeros[i] <<
" ";
914 s <<
" $" << std::endl;
917 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
924 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
925 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
931#ifdef EIGEN_SPARSEMATRIX_PLUGIN
932#include EIGEN_SPARSEMATRIX_PLUGIN
936 template <
typename Other>
937 void initAssignment(
const Other& other) {
938 resize(other.rows(), other.cols());
939 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
945 EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
949 class SingletonVector {
950 StorageIndex m_index;
951 StorageIndex m_value;
954 typedef StorageIndex value_type;
955 SingletonVector(Index i, Index v) : m_index(convert_index(i)), m_value(convert_index(v)) {}
957 StorageIndex operator[](Index i)
const {
return i == m_index ? m_value : 0; }
962 EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
967 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col) {
968 const Index outer = IsRowMajor ? row : col;
969 const Index inner = IsRowMajor ? col : row;
971 eigen_assert(!isCompressed());
972 eigen_assert(m_innerNonZeros[outer] <= (m_outerIndex[outer + 1] - m_outerIndex[outer]));
974 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
975 m_data.index(p) = StorageIndex(inner);
976 m_data.value(p) = Scalar(0);
977 return m_data.value(p);
981 struct IndexPosPair {
982 IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
996 template <
typename DiagXpr,
typename Func>
997 void assignDiagonal(
const DiagXpr diagXpr,
const Func& assignFunc) {
998 constexpr StorageIndex kEmptyIndexVal(-1);
999 typedef typename ScalarVector::AlignedMapType ValueMap;
1001 Index n = diagXpr.size();
1003 const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar, Scalar>>::value;
1005 if ((m_outerSize != n) || (m_innerSize != n)) resize(n, n);
1008 if (m_data.size() == 0 || overwrite) {
1009 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1010 m_innerNonZeros = 0;
1012 ValueMap valueMap(valuePtr(), n);
1013 std::iota(m_outerIndex, m_outerIndex + n + 1, StorageIndex(0));
1014 std::iota(innerIndexPtr(), innerIndexPtr() + n, StorageIndex(0));
1016 internal::call_assignment_no_alias(valueMap, diagXpr, assignFunc);
1018 internal::evaluator<DiagXpr> diaEval(diagXpr);
1020 ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, n, 0);
1021 typename IndexVector::AlignedMapType insertionLocations(tmp, n);
1022 insertionLocations.setConstant(kEmptyIndexVal);
1024 Index deferredInsertions = 0;
1027 for (Index j = 0; j < n; j++) {
1028 Index begin = m_outerIndex[j];
1029 Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1030 Index capacity = m_outerIndex[j + 1] - end;
1031 Index dst = m_data.searchLowerIndex(begin, end, j);
1033 if (dst != end && m_data.index(dst) == StorageIndex(j))
1034 assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1036 else if (dst == end && capacity > 0)
1037 assignFunc.assignCoeff(insertBackUncompressed(j, j), diaEval.coeff(j));
1040 insertionLocations.coeffRef(j) = StorageIndex(dst);
1041 deferredInsertions++;
1043 if (capacity == 0) shift++;
1047 if (deferredInsertions > 0) {
1048 m_data.resize(m_data.size() + shift);
1049 Index copyEnd = isCompressed() ? m_outerIndex[m_outerSize]
1050 : m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1];
1051 for (Index j = m_outerSize - 1; deferredInsertions > 0; j--) {
1052 Index begin = m_outerIndex[j];
1053 Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1054 Index capacity = m_outerIndex[j + 1] - end;
1056 bool doInsertion = insertionLocations(j) >= 0;
1057 bool breakUpCopy = doInsertion && (capacity > 0);
1063 Index copyBegin = m_outerIndex[j + 1];
1064 Index to = copyBegin + shift;
1065 Index chunkSize = copyEnd - copyBegin;
1066 m_data.moveChunk(copyBegin, to, chunkSize);
1070 m_outerIndex[j + 1] += shift;
1074 if (capacity > 0) shift++;
1075 Index copyBegin = insertionLocations(j);
1076 Index to = copyBegin + shift;
1077 Index chunkSize = copyEnd - copyBegin;
1078 m_data.moveChunk(copyBegin, to, chunkSize);
1080 m_data.index(dst) = StorageIndex(j);
1081 m_data.value(dst) = Scalar(0);
1082 assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1083 if (!isCompressed()) m_innerNonZeros[j]++;
1085 deferredInsertions--;
1086 copyEnd = copyBegin;
1090 eigen_assert((shift == 0) && (deferredInsertions == 0));
1096 EIGEN_STRONG_INLINE Scalar& insertAtByOuterInner(Index outer, Index inner, Index dst);
1097 Scalar& insertCompressedAtByOuterInner(Index outer, Index inner, Index dst);
1098 Scalar& insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst);
1101 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
1102 EIGEN_STATIC_ASSERT((Options & (ColMajor | RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
1104 struct default_prunning_func {
1105 default_prunning_func(
const Scalar& ref,
const RealScalar& eps) : reference(ref), epsilon(eps) {}
1106 inline bool operator()(
const Index&,
const Index&,
const Scalar& value)
const {
1107 return !internal::isMuchSmallerThan(value, reference, epsilon);
1118template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1119void set_from_triplets(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat,
1120 DupFunctor dup_func) {
1121 constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1122 using StorageIndex =
typename SparseMatrixType::StorageIndex;
1123 using IndexMap =
typename VectorX<StorageIndex>::AlignedMapType;
1124 using TransposedSparseMatrix =
1125 SparseMatrix<typename SparseMatrixType::Scalar, IsRowMajor ? ColMajor : RowMajor, StorageIndex>;
1127 if (begin == end)
return;
1133 TransposedSparseMatrix trmat(mat.rows(), mat.cols());
1137 for (InputIterator it(begin); it != end; ++it) {
1138 eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1139 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1140 if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
1141 trmat.outerIndexPtr()[j + 1]++;
1145 std::partial_sum(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize() + 1, trmat.outerIndexPtr());
1146 eigen_assert(nonZeros == trmat.outerIndexPtr()[trmat.outerSize()]);
1147 trmat.resizeNonZeros(nonZeros);
1150 ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, numext::maxi(mat.innerSize(), mat.outerSize()), 0);
1151 smart_copy(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize(), tmp);
1154 for (InputIterator it(begin); it != end; ++it) {
1155 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1156 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1157 StorageIndex k = tmp[j];
1158 trmat.data().index(k) = i;
1159 trmat.data().value(k) = it->value();
1163 IndexMap wi(tmp, trmat.innerSize());
1164 trmat.collapseDuplicates(wi, dup_func);
1170template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1171void set_from_triplets_sorted(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat,
1172 DupFunctor dup_func) {
1173 constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1174 using StorageIndex =
typename SparseMatrixType::StorageIndex;
1176 if (begin == end)
return;
1178 constexpr StorageIndex kEmptyIndexValue(-1);
1180 mat.resize(mat.rows(), mat.cols());
1182 StorageIndex previous_j = kEmptyIndexValue;
1183 StorageIndex previous_i = kEmptyIndexValue;
1186 for (InputIterator it(begin); it != end; ++it) {
1187 eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1188 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1189 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1190 eigen_assert(j > previous_j || (j == previous_j && i >= previous_i));
1192 bool duplicate = (previous_j == j) && (previous_i == i);
1194 if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
1196 mat.outerIndexPtr()[j + 1]++;
1203 std::partial_sum(mat.outerIndexPtr(), mat.outerIndexPtr() + mat.outerSize() + 1, mat.outerIndexPtr());
1204 eigen_assert(nonZeros == mat.outerIndexPtr()[mat.outerSize()]);
1205 mat.resizeNonZeros(nonZeros);
1207 previous_i = kEmptyIndexValue;
1208 previous_j = kEmptyIndexValue;
1210 for (InputIterator it(begin); it != end; ++it) {
1211 StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1212 StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1213 bool duplicate = (previous_j == j) && (previous_i == i);
1215 mat.data().value(back - 1) = dup_func(mat.data().value(back - 1), it->value());
1218 mat.data().index(back) = i;
1219 mat.data().value(back) = it->value();
1225 eigen_assert(back == nonZeros);
1231template <
typename DupFunctor,
typename LhsScalar,
typename RhsScalar = LhsScalar>
1232struct scalar_disjunction_op {
1233 using result_type =
typename result_of<DupFunctor(LhsScalar, RhsScalar)>::type;
1234 scalar_disjunction_op(
const DupFunctor& op) : m_functor(op) {}
1235 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator()(
const LhsScalar& a,
const RhsScalar& b)
const {
1236 return m_functor(a, b);
1238 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const DupFunctor& functor()
const {
return m_functor; }
1239 const DupFunctor& m_functor;
1242template <
typename DupFunctor,
typename LhsScalar,
typename RhsScalar>
1243struct functor_traits<scalar_disjunction_op<DupFunctor, LhsScalar, RhsScalar>> :
public functor_traits<DupFunctor> {};
1246template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1247void insert_from_triplets(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat,
1248 DupFunctor dup_func) {
1249 using Scalar =
typename SparseMatrixType::Scalar;
1251 CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>,
const SparseMatrixType,
const SparseMatrixType>;
1254 SparseMatrixType trips(mat.rows(), mat.cols());
1255 set_from_triplets(begin, end, trips, dup_func);
1257 SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1259 assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1263template <
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1264void insert_from_triplets_sorted(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat,
1265 DupFunctor dup_func) {
1266 using Scalar =
typename SparseMatrixType::Scalar;
1268 CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>,
const SparseMatrixType,
const SparseMatrixType>;
1271 SparseMatrixType trips(mat.rows(), mat.cols());
1272 set_from_triplets_sorted(begin, end, trips, dup_func);
1274 SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1276 assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1318template <
typename Scalar,
int Options_,
typename StorageIndex_>
1319template <
typename InputIterators>
1321 const InputIterators& end) {
1322 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1323 begin, end, *
this, internal::scalar_sum_op<Scalar, Scalar>());
1335template <
typename Scalar,
int Options_,
typename StorageIndex_>
1336template <
typename InputIterators,
typename DupFunctor>
1338 const InputIterators& end, DupFunctor dup_func) {
1339 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1340 begin, end, *
this, dup_func);
1347template <
typename Scalar,
int Options_,
typename StorageIndex_>
1348template <
typename InputIterators>
1350 const InputIterators& end) {
1351 internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1352 begin, end, *
this, internal::scalar_sum_op<Scalar, Scalar>());
1364template <
typename Scalar,
int Options_,
typename StorageIndex_>
1365template <
typename InputIterators,
typename DupFunctor>
1367 const InputIterators& end,
1368 DupFunctor dup_func) {
1369 internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1370 begin, end, *
this, dup_func);
1411template <
typename Scalar,
int Options_,
typename StorageIndex_>
1412template <
typename InputIterators>
1414 const InputIterators& end) {
1415 internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1416 begin, end, *
this, internal::scalar_sum_op<Scalar, Scalar>());
1428template <
typename Scalar,
int Options_,
typename StorageIndex_>
1429template <
typename InputIterators,
typename DupFunctor>
1431 const InputIterators& end, DupFunctor dup_func) {
1432 internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1433 begin, end, *
this, dup_func);
1440template <
typename Scalar,
int Options_,
typename StorageIndex_>
1441template <
typename InputIterators>
1443 const InputIterators& end) {
1444 internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1445 begin, end, *
this, internal::scalar_sum_op<Scalar, Scalar>());
1457template <
typename Scalar,
int Options_,
typename StorageIndex_>
1458template <
typename InputIterators,
typename DupFunctor>
1460 const InputIterators& end,
1461 DupFunctor dup_func) {
1462 internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1463 begin, end, *
this, dup_func);
1467template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1468template <
typename Derived,
typename DupFunctor>
1473 eigen_assert(wi.
size() == m_innerSize);
1474 constexpr StorageIndex kEmptyIndexValue(-1);
1476 StorageIndex count = 0;
1477 const bool is_compressed = isCompressed();
1479 for (
Index j = 0; j < m_outerSize; ++j) {
1480 const StorageIndex newBegin = count;
1481 const StorageIndex end = is_compressed ? m_outerIndex[j + 1] : m_outerIndex[j] + m_innerNonZeros[j];
1482 for (StorageIndex k = m_outerIndex[j]; k < end; ++k) {
1483 StorageIndex i = m_data.index(k);
1484 if (wi(i) >= newBegin) {
1487 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1491 m_data.index(count) = i;
1492 m_data.value(count) = m_data.value(k);
1497 m_outerIndex[j] = newBegin;
1499 m_outerIndex[m_outerSize] = count;
1500 m_data.resize(count);
1503 internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1504 m_innerNonZeros = 0;
1508template <
typename Scalar,
int Options_,
typename StorageIndex_>
1509template <
typename OtherDerived>
1510EIGEN_DONT_INLINE SparseMatrix<Scalar, Options_, StorageIndex_>&
1511SparseMatrix<Scalar, Options_, StorageIndex_>::operator=(
const SparseMatrixBase<OtherDerived>& other) {
1512 EIGEN_STATIC_ASSERT(
1513 (internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1514 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1516#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1517 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1520 const bool needToTranspose = (Flags &
RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1521 if (needToTranspose) {
1522#ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1523 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1530 typename internal::nested_eval<OtherDerived, 2, typename internal::plain_matrix_type<OtherDerived>::type>::type
1532 typedef internal::remove_all_t<OtherCopy> OtherCopy_;
1533 typedef internal::evaluator<OtherCopy_> OtherCopyEval;
1534 OtherCopy otherCopy(other.derived());
1535 OtherCopyEval otherCopyEval(otherCopy);
1537 SparseMatrix dest(other.rows(), other.cols());
1542 for (Index j = 0; j < otherCopy.outerSize(); ++j)
1543 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) ++dest.m_outerIndex[it.index()];
1546 StorageIndex count = 0;
1547 IndexVector positions(dest.outerSize());
1548 for (Index j = 0; j < dest.outerSize(); ++j) {
1549 StorageIndex tmp = dest.m_outerIndex[j];
1550 dest.m_outerIndex[j] = count;
1551 positions[j] = count;
1554 dest.m_outerIndex[dest.outerSize()] = count;
1556 dest.m_data.resize(count);
1558 for (StorageIndex j = 0; j < otherCopy.outerSize(); ++j) {
1559 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) {
1560 Index pos = positions[it.index()]++;
1561 dest.m_data.index(pos) = j;
1562 dest.m_data.value(pos) = it.value();
1568 if (other.isRValue()) {
1569 initAssignment(other.derived());
1572 return Base::operator=(other.derived());
1576template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1577inline typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1579 return insertByOuterInner(IsRowMajor ? row : col, IsRowMajor ? col : row);
1582template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1583EIGEN_STRONG_INLINE
typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1587 return insertUncompressedAtByOuterInner(outer, inner, dst);
1590template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1591EIGEN_DEPRECATED EIGEN_DONT_INLINE
typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1592SparseMatrix<Scalar_, Options_, StorageIndex_>::insertUncompressed(Index row, Index col) {
1593 eigen_assert(!isCompressed());
1594 Index outer = IsRowMajor ? row : col;
1595 Index inner = IsRowMajor ? col : row;
1596 Index start = m_outerIndex[outer];
1597 Index end = start + m_innerNonZeros[outer];
1598 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1600 Index capacity = m_outerIndex[outer + 1] - end;
1603 m_innerNonZeros[outer]++;
1604 m_data.index(end) = StorageIndex(inner);
1605 m_data.value(end) = Scalar(0);
1606 return m_data.value(end);
1609 eigen_assert((dst == end || m_data.index(dst) != inner) &&
1610 "you cannot insert an element that already exists, you must call coeffRef to this end");
1611 return insertUncompressedAtByOuterInner(outer, inner, dst);
1614template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1615EIGEN_DEPRECATED EIGEN_DONT_INLINE
typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1616SparseMatrix<Scalar_, Options_, StorageIndex_>::insertCompressed(Index row, Index col) {
1617 eigen_assert(isCompressed());
1618 Index outer = IsRowMajor ? row : col;
1619 Index inner = IsRowMajor ? col : row;
1620 Index start = m_outerIndex[outer];
1621 Index end = m_outerIndex[outer + 1];
1622 Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1623 eigen_assert((dst == end || m_data.index(dst) != inner) &&
1624 "you cannot insert an element that already exists, you must call coeffRef to this end");
1625 return insertCompressedAtByOuterInner(outer, inner, dst);
1628template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1629typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1630SparseMatrix<Scalar_, Options_, StorageIndex_>::insertCompressedAtByOuterInner(Index outer, Index inner, Index dst) {
1631 eigen_assert(isCompressed());
1634 if (m_data.allocatedSize() <= m_data.size()) {
1637 Index minReserve = 32;
1638 Index reserveSize = numext::maxi(minReserve, m_data.allocatedSize());
1639 m_data.reserve(reserveSize);
1641 m_data.resize(m_data.size() + 1);
1642 Index chunkSize = m_outerIndex[m_outerSize] - dst;
1644 m_data.moveChunk(dst, dst + 1, chunkSize);
1647 for (Index j = outer; j < m_outerSize; j++) m_outerIndex[j + 1]++;
1649 m_data.index(dst) = StorageIndex(inner);
1650 m_data.value(dst) = Scalar(0);
1652 return m_data.value(dst);
1655template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1656typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1657SparseMatrix<Scalar_, Options_, StorageIndex_>::insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst) {
1658 eigen_assert(!isCompressed());
1660 for (Index leftTarget = outer - 1, rightTarget = outer; (leftTarget >= 0) || (rightTarget < m_outerSize);) {
1661 if (rightTarget < m_outerSize) {
1662 Index start = m_outerIndex[rightTarget];
1663 Index end = start + m_innerNonZeros[rightTarget];
1664 Index nextStart = m_outerIndex[rightTarget + 1];
1665 Index capacity = nextStart - end;
1668 Index chunkSize = end - dst;
1669 if (chunkSize > 0) m_data.moveChunk(dst, dst + 1, chunkSize);
1670 m_innerNonZeros[outer]++;
1671 for (Index j = outer; j < rightTarget; j++) m_outerIndex[j + 1]++;
1672 m_data.index(dst) = StorageIndex(inner);
1673 m_data.value(dst) = Scalar(0);
1674 return m_data.value(dst);
1678 if (leftTarget >= 0) {
1679 Index start = m_outerIndex[leftTarget];
1680 Index end = start + m_innerNonZeros[leftTarget];
1681 Index nextStart = m_outerIndex[leftTarget + 1];
1682 Index capacity = nextStart - end;
1686 Index chunkSize = dst - nextStart;
1687 if (chunkSize > 0) m_data.moveChunk(nextStart, nextStart - 1, chunkSize);
1688 m_innerNonZeros[outer]++;
1689 for (Index j = leftTarget; j < outer; j++) m_outerIndex[j + 1]--;
1690 m_data.index(dst - 1) = StorageIndex(inner);
1691 m_data.value(dst - 1) = Scalar(0);
1692 return m_data.value(dst - 1);
1701 Index dst_offset = dst - m_outerIndex[outer];
1703 if (m_data.allocatedSize() == 0) {
1705 m_data.resize(m_outerSize);
1706 std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
1709 Index maxReserveSize =
static_cast<Index
>(NumTraits<StorageIndex>::highest()) - m_data.allocatedSize();
1710 eigen_assert(maxReserveSize > 0);
1711 if (m_outerSize <= maxReserveSize) {
1713 reserveInnerVectors(IndexVector::Constant(m_outerSize, 1));
1717 typedef internal::sparse_reserve_op<StorageIndex> ReserveSizesOp;
1718 typedef CwiseNullaryOp<ReserveSizesOp, IndexVector> ReserveSizesXpr;
1719 ReserveSizesXpr reserveSizesXpr(m_outerSize, 1, ReserveSizesOp(outer, m_outerSize, maxReserveSize));
1720 reserveInnerVectors(reserveSizesXpr);
1724 Index start = m_outerIndex[outer];
1725 Index end = start + m_innerNonZeros[outer];
1726 Index new_dst = start + dst_offset;
1727 Index chunkSize = end - new_dst;
1728 if (chunkSize > 0) m_data.moveChunk(new_dst, new_dst + 1, chunkSize);
1729 m_innerNonZeros[outer]++;
1730 m_data.index(new_dst) = StorageIndex(inner);
1731 m_data.value(new_dst) = Scalar(0);
1732 return m_data.value(new_dst);
1737template <
typename Scalar_,
int Options_,
typename StorageIndex_>
1738struct evaluator<SparseMatrix<Scalar_, Options_, StorageIndex_>>
1739 : evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> {
1740 typedef evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> Base;
1741 typedef SparseMatrix<Scalar_, Options_, StorageIndex_> SparseMatrixType;
1742 evaluator() : Base() {}
1743 explicit evaluator(
const SparseMatrixType& mat) : Base(mat) {}
1751template <
typename Scalar,
int Options,
typename StorageIndex>
1752class Serializer<SparseMatrix<Scalar, Options, StorageIndex>, void> {
1754 typedef SparseMatrix<Scalar, Options, StorageIndex> SparseMat;
1757 typename SparseMat::Index rows;
1758 typename SparseMat::Index cols;
1761 Index inner_buffer_size;
1764 EIGEN_DEVICE_FUNC
size_t size(
const SparseMat& value)
const {
1766 std::size_t num_storage_indices = value.isCompressed() ? 0 : value.outerSize();
1768 num_storage_indices += value.outerSize() + 1;
1770 const StorageIndex inner_buffer_size = value.outerIndexPtr()[value.outerSize()];
1771 num_storage_indices += inner_buffer_size;
1773 std::size_t num_values = inner_buffer_size;
1774 return sizeof(Header) +
sizeof(Scalar) * num_values +
sizeof(StorageIndex) * num_storage_indices;
1777 EIGEN_DEVICE_FUNC uint8_t*
serialize(uint8_t* dest, uint8_t* end,
const SparseMat& value) {
1778 if (EIGEN_PREDICT_FALSE(dest ==
nullptr))
return nullptr;
1779 if (EIGEN_PREDICT_FALSE(dest + size(value) > end))
return nullptr;
1781 const size_t header_bytes =
sizeof(Header);
1782 Header header = {value.rows(), value.cols(), value.isCompressed(), value.outerSize(),
1783 value.outerIndexPtr()[value.outerSize()]};
1784 EIGEN_USING_STD(memcpy)
1785 memcpy(dest, &header, header_bytes);
1786 dest += header_bytes;
1789 if (!header.compressed) {
1790 std::size_t data_bytes =
sizeof(StorageIndex) * header.outer_size;
1791 memcpy(dest, value.innerNonZeroPtr(), data_bytes);
1796 std::size_t data_bytes =
sizeof(StorageIndex) * (header.outer_size + 1);
1797 memcpy(dest, value.outerIndexPtr(), data_bytes);
1801 data_bytes =
sizeof(StorageIndex) * header.inner_buffer_size;
1802 memcpy(dest, value.innerIndexPtr(), data_bytes);
1806 data_bytes =
sizeof(Scalar) * header.inner_buffer_size;
1807 memcpy(dest, value.valuePtr(), data_bytes);
1813 EIGEN_DEVICE_FUNC
const uint8_t*
deserialize(
const uint8_t* src,
const uint8_t* end, SparseMat& value)
const {
1814 if (EIGEN_PREDICT_FALSE(src ==
nullptr))
return nullptr;
1815 if (EIGEN_PREDICT_FALSE(src +
sizeof(Header) > end))
return nullptr;
1817 const size_t header_bytes =
sizeof(Header);
1819 EIGEN_USING_STD(memcpy)
1820 memcpy(&header, src, header_bytes);
1821 src += header_bytes;
1824 value.resize(header.rows, header.cols);
1825 if (header.compressed) {
1826 value.makeCompressed();
1832 value.data().resize(header.inner_buffer_size);
1835 if (!header.compressed) {
1837 std::size_t data_bytes =
sizeof(StorageIndex) * header.outer_size;
1838 if (EIGEN_PREDICT_FALSE(src + data_bytes > end))
return nullptr;
1839 memcpy(value.innerNonZeroPtr(), src, data_bytes);
1844 std::size_t data_bytes =
sizeof(StorageIndex) * (header.outer_size + 1);
1845 if (EIGEN_PREDICT_FALSE(src + data_bytes > end))
return nullptr;
1846 memcpy(value.outerIndexPtr(), src, data_bytes);
1850 data_bytes =
sizeof(StorageIndex) * header.inner_buffer_size;
1851 if (EIGEN_PREDICT_FALSE(src + data_bytes > end))
return nullptr;
1852 memcpy(value.innerIndexPtr(), src, data_bytes);
1856 data_bytes =
sizeof(Scalar) * header.inner_buffer_size;
1857 if (EIGEN_PREDICT_FALSE(src + data_bytes > end))
return nullptr;
1858 memcpy(value.valuePtr(), src, data_bytes);