Eigen  3.4.90 (git rev 5a9f66fb35d03a4da9ef8976e67a61b30aa16dcf)
 
Loading...
Searching...
No Matches
SparseVector.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEVECTOR_H
11#define EIGEN_SPARSEVECTOR_H
12
13// IWYU pragma: private
14#include "./InternalHeaderCheck.h"
15
16namespace Eigen {
17
31namespace internal {
32template <typename Scalar_, int Options_, typename StorageIndex_>
33struct traits<SparseVector<Scalar_, Options_, StorageIndex_> > {
34 typedef Scalar_ Scalar;
35 typedef StorageIndex_ StorageIndex;
36 typedef Sparse StorageKind;
37 typedef MatrixXpr XprKind;
38 enum {
39 IsColVector = (Options_ & RowMajorBit) ? 0 : 1,
40
41 RowsAtCompileTime = IsColVector ? Dynamic : 1,
42 ColsAtCompileTime = IsColVector ? 1 : Dynamic,
43 MaxRowsAtCompileTime = RowsAtCompileTime,
44 MaxColsAtCompileTime = ColsAtCompileTime,
45 Flags = Options_ | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit,
46 SupportedAccessPatterns = InnerRandomAccessPattern
47 };
48};
49
50// Sparse-Vector-Assignment kinds:
51enum { SVA_RuntimeSwitch, SVA_Inner, SVA_Outer };
52
53template <typename Dest, typename Src,
54 int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch
55 : Src::InnerSizeAtCompileTime == 1 ? SVA_Outer
56 : SVA_Inner>
57struct sparse_vector_assign_selector;
58
59} // namespace internal
60
61template <typename Scalar_, int Options_, typename StorageIndex_>
62class SparseVector : public SparseCompressedBase<SparseVector<Scalar_, Options_, StorageIndex_> > {
64 using Base::convert_index;
65
66 public:
67 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
68 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
69 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
70
71 typedef internal::CompressedStorage<Scalar, StorageIndex> Storage;
72 enum { IsColVector = internal::traits<SparseVector>::IsColVector };
73
74 enum { Options = Options_ };
75
76 EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
77 EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
78 EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
79 EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
80
81 EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); }
82 EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); }
83
84 EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
85 EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
86
87 inline const StorageIndex* outerIndexPtr() const { return 0; }
88 inline StorageIndex* outerIndexPtr() { return 0; }
89 inline const StorageIndex* innerNonZeroPtr() const { return 0; }
90 inline StorageIndex* innerNonZeroPtr() { return 0; }
91
93 inline Storage& data() { return m_data; }
95 inline const Storage& data() const { return m_data; }
96
97 inline Scalar coeff(Index row, Index col) const {
98 eigen_assert(IsColVector ? (col == 0 && row >= 0 && row < m_size) : (row == 0 && col >= 0 && col < m_size));
99 return coeff(IsColVector ? row : col);
100 }
101 inline Scalar coeff(Index i) const {
102 eigen_assert(i >= 0 && i < m_size);
103 return m_data.at(StorageIndex(i));
104 }
105
106 inline Scalar& coeffRef(Index row, Index col) {
107 eigen_assert(IsColVector ? (col == 0 && row >= 0 && row < m_size) : (row == 0 && col >= 0 && col < m_size));
108 return coeffRef(IsColVector ? row : col);
109 }
110
117 inline Scalar& coeffRef(Index i) {
118 eigen_assert(i >= 0 && i < m_size);
119
120 return m_data.atWithInsertion(StorageIndex(i));
121 }
122
123 public:
124 typedef typename Base::InnerIterator InnerIterator;
125 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
126
127 inline void setZero() { m_data.clear(); }
128
130 inline Index nonZeros() const { return m_data.size(); }
131
132 inline void startVec(Index outer) {
133 EIGEN_UNUSED_VARIABLE(outer);
134 eigen_assert(outer == 0);
135 }
136
137 inline Scalar& insertBackByOuterInner(Index outer, Index inner) {
138 EIGEN_UNUSED_VARIABLE(outer);
139 eigen_assert(outer == 0);
140 return insertBack(inner);
141 }
142 inline Scalar& insertBack(Index i) {
143 m_data.append(0, i);
144 return m_data.value(m_data.size() - 1);
145 }
146
147 Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner) {
148 EIGEN_UNUSED_VARIABLE(outer);
149 eigen_assert(outer == 0);
150 return insertBackUnordered(inner);
151 }
152 inline Scalar& insertBackUnordered(Index i) {
153 m_data.append(0, i);
154 return m_data.value(m_data.size() - 1);
155 }
156
157 inline Scalar& insert(Index row, Index col) {
158 eigen_assert(IsColVector ? (col == 0 && row >= 0 && row < m_size) : (row == 0 && col >= 0 && col < m_size));
159
160 Index inner = IsColVector ? row : col;
161 Index outer = IsColVector ? col : row;
162 EIGEN_ONLY_USED_FOR_DEBUG(outer);
163 eigen_assert(outer == 0);
164 return insert(inner);
165 }
166 Scalar& insert(Index i) {
167 eigen_assert(i >= 0 && i < m_size);
168
169 Index startId = 0;
170 Index p = Index(m_data.size()) - 1;
171 // TODO smart realloc
172 m_data.resize(p + 2, 1);
173
174 while ((p >= startId) && (m_data.index(p) > i)) {
175 m_data.index(p + 1) = m_data.index(p);
176 m_data.value(p + 1) = m_data.value(p);
177 --p;
178 }
179 m_data.index(p + 1) = convert_index(i);
180 m_data.value(p + 1) = 0;
181 return m_data.value(p + 1);
182 }
183
186 inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
187
188 inline void finalize() {}
189
191 Index prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) {
192 return prune([&](const Scalar& val) { return !internal::isMuchSmallerThan(val, reference, epsilon); });
193 }
194
202 template <class F>
203 Index prune(F&& keep_predicate) {
204 Index k = 0;
205 Index n = m_data.size();
206 for (Index i = 0; i < n; ++i) {
207 if (keep_predicate(m_data.value(i))) {
208 m_data.value(k) = std::move(m_data.value(i));
209 m_data.index(k) = m_data.index(i);
210 ++k;
211 }
212 }
213 m_data.resize(k);
214 return k;
215 }
216
225 void resize(Index rows, Index cols) {
226 eigen_assert((IsColVector ? cols : rows) == 1 && "Outer dimension must equal 1");
227 resize(IsColVector ? rows : cols);
228 }
229
234 void resize(Index newSize) {
235 m_size = newSize;
236 m_data.clear();
237 }
238
246 void conservativeResize(Index newSize) {
247 if (newSize < m_size) {
248 Index i = 0;
249 while (i < m_data.size() && m_data.index(i) < newSize) ++i;
250 m_data.resize(i);
251 }
252 m_size = newSize;
253 }
254
255 void resizeNonZeros(Index size) { m_data.resize(size); }
256
257 inline SparseVector() : m_size(0) { resize(0); }
258
259 explicit inline SparseVector(Index size) : m_size(0) { resize(size); }
260
261 inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows, cols); }
262
263 template <typename OtherDerived>
264 inline SparseVector(const SparseMatrixBase<OtherDerived>& other) : m_size(0) {
265#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
266 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
267#endif
268 *this = other.derived();
269 }
270
271 inline SparseVector(const SparseVector& other) : Base(other), m_size(0) { *this = other.derived(); }
272
277 inline void swap(SparseVector& other) {
278 std::swap(m_size, other.m_size);
279 m_data.swap(other.m_data);
280 }
281
282 template <int OtherOptions>
283 inline void swap(SparseMatrix<Scalar, OtherOptions, StorageIndex>& other) {
284 eigen_assert(other.outerSize() == 1);
285 std::swap(m_size, other.m_innerSize);
286 m_data.swap(other.m_data);
287 }
288
289 inline SparseVector& operator=(const SparseVector& other) {
290 if (other.isRValue()) {
291 swap(other.const_cast_derived());
292 } else {
293 resize(other.size());
294 m_data = other.m_data;
295 }
296 return *this;
297 }
298
299 template <typename OtherDerived>
300 inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other) {
301 SparseVector tmp(other.size());
302 internal::sparse_vector_assign_selector<SparseVector, OtherDerived>::run(tmp, other.derived());
303 this->swap(tmp);
304 return *this;
305 }
306
307 inline SparseVector(SparseVector&& other) : SparseVector() { this->swap(other); }
308
309 template <typename OtherDerived>
310 inline SparseVector(SparseCompressedBase<OtherDerived>&& other) : SparseVector() {
311 *this = other.derived().markAsRValue();
312 }
313
314 inline SparseVector& operator=(SparseVector&& other) {
315 this->swap(other);
316 return *this;
317 }
318
319 template <typename OtherDerived>
320 inline SparseVector& operator=(SparseCompressedBase<OtherDerived>&& other) {
321 *this = other.derived().markAsRValue();
322 return *this;
323 }
324
325#ifndef EIGEN_PARSED_BY_DOXYGEN
326 template <typename Lhs, typename Rhs>
327 inline SparseVector& operator=(const SparseSparseProduct<Lhs, Rhs>& product) {
328 return Base::operator=(product);
329 }
330#endif
331
332#ifndef EIGEN_NO_IO
333 friend std::ostream& operator<<(std::ostream& s, const SparseVector& m) {
334 for (Index i = 0; i < m.nonZeros(); ++i) s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
335 s << std::endl;
336 return s;
337 }
338#endif
339
341 inline ~SparseVector() {}
342
344 Scalar sum() const;
345
346 public:
348 EIGEN_DEPRECATED void startFill(Index reserve) {
349 setZero();
350 m_data.reserve(reserve);
351 }
352
354 EIGEN_DEPRECATED Scalar& fill(Index r, Index c) {
355 eigen_assert(r == 0 || c == 0);
356 return fill(IsColVector ? r : c);
357 }
358
360 EIGEN_DEPRECATED Scalar& fill(Index i) {
361 m_data.append(0, i);
362 return m_data.value(m_data.size() - 1);
363 }
364
366 EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c) {
367 eigen_assert(r == 0 || c == 0);
368 return fillrand(IsColVector ? r : c);
369 }
370
372 EIGEN_DEPRECATED Scalar& fillrand(Index i) { return insert(i); }
373
375 EIGEN_DEPRECATED void endFill() {}
376
377 // These two functions were here in the 3.1 release, so let's keep them in case some code rely on them.
379 EIGEN_DEPRECATED Storage& _data() { return m_data; }
381 EIGEN_DEPRECATED const Storage& _data() const { return m_data; }
382
383#ifdef EIGEN_SPARSEVECTOR_PLUGIN
384#include EIGEN_SPARSEVECTOR_PLUGIN
385#endif
386
387 protected:
388 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
389 EIGEN_STATIC_ASSERT((Options_ & (ColMajor | RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
390
391 Storage m_data;
392 Index m_size;
393};
394
395namespace internal {
396
397template <typename Scalar_, int Options_, typename Index_>
398struct evaluator<SparseVector<Scalar_, Options_, Index_> > : evaluator_base<SparseVector<Scalar_, Options_, Index_> > {
399 typedef SparseVector<Scalar_, Options_, Index_> SparseVectorType;
400 typedef evaluator_base<SparseVectorType> Base;
401 typedef typename SparseVectorType::InnerIterator InnerIterator;
402 typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;
403
404 enum { CoeffReadCost = NumTraits<Scalar_>::ReadCost, Flags = SparseVectorType::Flags };
405
406 evaluator() : Base() {}
407
408 explicit evaluator(const SparseVectorType& mat) : m_matrix(&mat) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); }
409
410 inline Index nonZerosEstimate() const { return m_matrix->nonZeros(); }
411
412 operator SparseVectorType&() { return m_matrix->const_cast_derived(); }
413 operator const SparseVectorType&() const { return *m_matrix; }
414
415 const SparseVectorType* m_matrix;
416};
417
418template <typename Dest, typename Src>
419struct sparse_vector_assign_selector<Dest, Src, SVA_Inner> {
420 static void run(Dest& dst, const Src& src) {
421 eigen_internal_assert(src.innerSize() == src.size());
422 typedef internal::evaluator<Src> SrcEvaluatorType;
423 SrcEvaluatorType srcEval(src);
424 for (typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it) dst.insert(it.index()) = it.value();
425 }
426};
427
428template <typename Dest, typename Src>
429struct sparse_vector_assign_selector<Dest, Src, SVA_Outer> {
430 static void run(Dest& dst, const Src& src) {
431 eigen_internal_assert(src.outerSize() == src.size());
432 typedef internal::evaluator<Src> SrcEvaluatorType;
433 SrcEvaluatorType srcEval(src);
434 for (Index i = 0; i < src.size(); ++i) {
435 typename SrcEvaluatorType::InnerIterator it(srcEval, i);
436 if (it) dst.insert(i) = it.value();
437 }
438 }
439};
440
441template <typename Dest, typename Src>
442struct sparse_vector_assign_selector<Dest, Src, SVA_RuntimeSwitch> {
443 static void run(Dest& dst, const Src& src) {
444 if (src.outerSize() == 1)
445 sparse_vector_assign_selector<Dest, Src, SVA_Inner>::run(dst, src);
446 else
447 sparse_vector_assign_selector<Dest, Src, SVA_Outer>::run(dst, src);
448 }
449};
450
451} // namespace internal
452
453// Specialization for SparseVector.
454// Serializes [size, numNonZeros, innerIndices, values].
455template <typename Scalar, int Options, typename StorageIndex>
456class Serializer<SparseVector<Scalar, Options, StorageIndex>, void> {
457 public:
458 typedef SparseVector<Scalar, Options, StorageIndex> SparseMat;
459
460 struct Header {
461 typename SparseMat::Index size;
462 Index num_non_zeros;
463 };
464
465 EIGEN_DEVICE_FUNC size_t size(const SparseMat& value) const {
466 return sizeof(Header) + (sizeof(Scalar) + sizeof(StorageIndex)) * value.nonZeros();
467 }
468
469 EIGEN_DEVICE_FUNC uint8_t* serialize(uint8_t* dest, uint8_t* end, const SparseMat& value) {
470 if (EIGEN_PREDICT_FALSE(dest == nullptr)) return nullptr;
471 if (EIGEN_PREDICT_FALSE(dest + size(value) > end)) return nullptr;
472
473 const size_t header_bytes = sizeof(Header);
474 Header header = {value.innerSize(), value.nonZeros()};
475 EIGEN_USING_STD(memcpy)
476 memcpy(dest, &header, header_bytes);
477 dest += header_bytes;
478
479 // Inner indices.
480 std::size_t data_bytes = sizeof(StorageIndex) * header.num_non_zeros;
481 memcpy(dest, value.innerIndexPtr(), data_bytes);
482 dest += data_bytes;
483
484 // Values.
485 data_bytes = sizeof(Scalar) * header.num_non_zeros;
486 memcpy(dest, value.valuePtr(), data_bytes);
487 dest += data_bytes;
488
489 return dest;
490 }
491
492 EIGEN_DEVICE_FUNC const uint8_t* deserialize(const uint8_t* src, const uint8_t* end, SparseMat& value) const {
493 if (EIGEN_PREDICT_FALSE(src == nullptr)) return nullptr;
494 if (EIGEN_PREDICT_FALSE(src + sizeof(Header) > end)) return nullptr;
495
496 const size_t header_bytes = sizeof(Header);
497 Header header;
498 EIGEN_USING_STD(memcpy)
499 memcpy(&header, src, header_bytes);
500 src += header_bytes;
501
502 value.setZero();
503 value.resize(header.size);
504 value.resizeNonZeros(header.num_non_zeros);
505
506 // Inner indices.
507 std::size_t data_bytes = sizeof(StorageIndex) * header.num_non_zeros;
508 if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
509 memcpy(value.innerIndexPtr(), src, data_bytes);
510 src += data_bytes;
511
512 // Values.
513 data_bytes = sizeof(Scalar) * header.num_non_zeros;
514 if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
515 memcpy(value.valuePtr(), src, data_bytes);
516 src += data_bytes;
517 return src;
518 }
519};
520
521} // end namespace Eigen
522
523#endif // EIGEN_SPARSEVECTOR_H
Common base class for sparse [compressed]-{row|column}-storage format.
Definition SparseCompressedBase.h:43
internal::traits< SparseVector< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
Definition SparseMatrixBase.h:44
A versatible sparse matrix representation.
Definition SparseUtil.h:47
Index outerSize() const
Definition SparseMatrix.h:166
a sparse vector class
Definition SparseVector.h:62
void conservativeResize(Index newSize)
Definition SparseVector.h:246
Scalar & coeffRef(Index i)
Definition SparseVector.h:117
Index prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition SparseVector.h:191
void resize(Index rows, Index cols)
Definition SparseVector.h:225
~SparseVector()
Definition SparseVector.h:341
void swap(SparseVector &other)
Definition SparseVector.h:277
Index prune(F &&keep_predicate)
Prunes the entries of the vector based on a predicate
Definition SparseVector.h:203
Index nonZeros() const
Definition SparseVector.h:130
void resize(Index newSize)
Definition SparseVector.h:234
const unsigned int LvalueBit
Definition Constants.h:148
const unsigned int RowMajorBit
Definition Constants.h:70
const unsigned int CompressedAccessBit
Definition Constants.h:195
Namespace containing all symbols from the Eigen library.
Definition Core:137
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:83
uint8_t * serialize(uint8_t *dest, uint8_t *end, const Args &... args)
Definition Serializer.h:188
const int Dynamic
Definition Constants.h:25
const uint8_t * deserialize(const uint8_t *src, const uint8_t *end, Args &... args)
Definition Serializer.h:201
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition Meta.h:523