Eigen  3.4.90 (git rev 5a9f66fb35d03a4da9ef8976e67a61b30aa16dcf)
 
Loading...
Searching...
No Matches
AmbiVector.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008 Gael Guennebaud <[email protected]>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_AMBIVECTOR_H
11#define EIGEN_AMBIVECTOR_H
12
13// IWYU pragma: private
14#include "./InternalHeaderCheck.h"
15
16namespace Eigen {
17
18namespace internal {
19
25template <typename Scalar_, typename StorageIndex_>
26class AmbiVector {
27 public:
28 typedef Scalar_ Scalar;
29 typedef StorageIndex_ StorageIndex;
30 typedef typename NumTraits<Scalar>::Real RealScalar;
31
32 explicit AmbiVector(Index size)
33 : m_buffer(0), m_zero(0), m_size(0), m_end(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1) {
34 resize(size);
35 }
36
37 void init(double estimatedDensity);
38 void init(int mode);
39
40 Index nonZeros() const;
41
43 void setBounds(Index start, Index end) {
44 m_start = convert_index(start);
45 m_end = convert_index(end);
46 }
47
48 void setZero();
49
50 void restart();
51 Scalar& coeffRef(Index i);
52 Scalar& coeff(Index i);
53
54 class Iterator;
55
56 ~AmbiVector() { delete[] m_buffer; }
57
58 void resize(Index size) {
59 if (m_allocatedSize < size) reallocate(size);
60 m_size = convert_index(size);
61 }
62
63 StorageIndex size() const { return m_size; }
64
65 protected:
66 StorageIndex convert_index(Index idx) { return internal::convert_index<StorageIndex>(idx); }
67
68 void reallocate(Index size) {
69 // if the size of the matrix is not too large, let's allocate a bit more than needed such
70 // that we can handle dense vector even in sparse mode.
71 delete[] m_buffer;
72 if (size < 1000) {
73 Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1) / sizeof(Scalar);
74 m_allocatedElements = convert_index((allocSize * sizeof(Scalar)) / sizeof(ListEl));
75 m_buffer = new Scalar[allocSize];
76 } else {
77 m_allocatedElements = convert_index((size * sizeof(Scalar)) / sizeof(ListEl));
78 m_buffer = new Scalar[size];
79 }
80 m_size = convert_index(size);
81 m_start = 0;
82 m_end = m_size;
83 }
84
85 void reallocateSparse() {
86 Index copyElements = m_allocatedElements;
87 m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements * 1.5), m_size);
88 Index allocSize = m_allocatedElements * sizeof(ListEl);
89 allocSize = (allocSize + sizeof(Scalar) - 1) / sizeof(Scalar);
90 Scalar* newBuffer = new Scalar[allocSize];
91 std::memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
92 delete[] m_buffer;
93 m_buffer = newBuffer;
94 }
95
96 protected:
97 // element type of the linked list
98 struct ListEl {
99 StorageIndex next;
100 StorageIndex index;
101 Scalar value;
102 };
103
104 // used to store data in both mode
105 Scalar* m_buffer;
106 Scalar m_zero;
107 StorageIndex m_size;
108 StorageIndex m_start;
109 StorageIndex m_end;
110 StorageIndex m_allocatedSize;
111 StorageIndex m_allocatedElements;
112 StorageIndex m_mode;
113
114 // linked list mode
115 StorageIndex m_llStart;
116 StorageIndex m_llCurrent;
117 StorageIndex m_llSize;
118};
119
121template <typename Scalar_, typename StorageIndex_>
122Index AmbiVector<Scalar_, StorageIndex_>::nonZeros() const {
123 if (m_mode == IsSparse)
124 return m_llSize;
125 else
126 return m_end - m_start;
127}
128
129template <typename Scalar_, typename StorageIndex_>
130void AmbiVector<Scalar_, StorageIndex_>::init(double estimatedDensity) {
131 if (estimatedDensity > 0.1)
132 init(IsDense);
133 else
134 init(IsSparse);
135}
136
137template <typename Scalar_, typename StorageIndex_>
138void AmbiVector<Scalar_, StorageIndex_>::init(int mode) {
139 m_mode = mode;
140 // This is only necessary in sparse mode, but we set these unconditionally to avoid some maybe-uninitialized warnings
141 // if (m_mode==IsSparse)
142 {
143 m_llSize = 0;
144 m_llStart = -1;
145 }
146}
147
153template <typename Scalar_, typename StorageIndex_>
154void AmbiVector<Scalar_, StorageIndex_>::restart() {
155 m_llCurrent = m_llStart;
156}
157
159template <typename Scalar_, typename StorageIndex_>
160void AmbiVector<Scalar_, StorageIndex_>::setZero() {
161 if (m_mode == IsDense) {
162 for (Index i = m_start; i < m_end; ++i) m_buffer[i] = Scalar(0);
163 } else {
164 eigen_assert(m_mode == IsSparse);
165 m_llSize = 0;
166 m_llStart = -1;
167 }
168}
169
170template <typename Scalar_, typename StorageIndex_>
171Scalar_& AmbiVector<Scalar_, StorageIndex_>::coeffRef(Index i) {
172 if (m_mode == IsDense)
173 return m_buffer[i];
174 else {
175 ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
176 // TODO factorize the following code to reduce code generation
177 eigen_assert(m_mode == IsSparse);
178 if (m_llSize == 0) {
179 // this is the first element
180 m_llStart = 0;
181 m_llCurrent = 0;
182 ++m_llSize;
183 llElements[0].value = Scalar(0);
184 llElements[0].index = convert_index(i);
185 llElements[0].next = -1;
186 return llElements[0].value;
187 } else if (i < llElements[m_llStart].index) {
188 // this is going to be the new first element of the list
189 ListEl& el = llElements[m_llSize];
190 el.value = Scalar(0);
191 el.index = convert_index(i);
192 el.next = m_llStart;
193 m_llStart = m_llSize;
194 ++m_llSize;
195 m_llCurrent = m_llStart;
196 return el.value;
197 } else {
198 StorageIndex nextel = llElements[m_llCurrent].next;
199 eigen_assert(i >= llElements[m_llCurrent].index &&
200 "you must call restart() before inserting an element with lower or equal index");
201 while (nextel >= 0 && llElements[nextel].index <= i) {
202 m_llCurrent = nextel;
203 nextel = llElements[nextel].next;
204 }
205
206 if (llElements[m_llCurrent].index == i) {
207 // the coefficient already exists and we found it !
208 return llElements[m_llCurrent].value;
209 } else {
210 if (m_llSize >= m_allocatedElements) {
211 reallocateSparse();
212 llElements = reinterpret_cast<ListEl*>(m_buffer);
213 }
214 eigen_internal_assert(m_llSize < m_allocatedElements && "internal error: overflow in sparse mode");
215 // let's insert a new coefficient
216 ListEl& el = llElements[m_llSize];
217 el.value = Scalar(0);
218 el.index = convert_index(i);
219 el.next = llElements[m_llCurrent].next;
220 llElements[m_llCurrent].next = m_llSize;
221 ++m_llSize;
222 return el.value;
223 }
224 }
225 }
226}
227
228template <typename Scalar_, typename StorageIndex_>
229Scalar_& AmbiVector<Scalar_, StorageIndex_>::coeff(Index i) {
230 if (m_mode == IsDense)
231 return m_buffer[i];
232 else {
233 ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
234 eigen_assert(m_mode == IsSparse);
235 if ((m_llSize == 0) || (i < llElements[m_llStart].index)) {
236 return m_zero;
237 } else {
238 Index elid = m_llStart;
239 while (elid >= 0 && llElements[elid].index < i) elid = llElements[elid].next;
240
241 if (llElements[elid].index == i)
242 return llElements[m_llCurrent].value;
243 else
244 return m_zero;
245 }
246 }
247}
248
250template <typename Scalar_, typename StorageIndex_>
251class AmbiVector<Scalar_, StorageIndex_>::Iterator {
252 public:
253 typedef Scalar_ Scalar;
254 typedef typename NumTraits<Scalar>::Real RealScalar;
255
262 explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0) : m_vector(vec) {
263 using std::abs;
264 m_epsilon = epsilon;
265 m_isDense = m_vector.m_mode == IsDense;
266 if (m_isDense) {
267 m_currentEl = 0; // this is to avoid a compilation warning
268 m_cachedValue = 0; // this is to avoid a compilation warning
269 m_cachedIndex = m_vector.m_start - 1;
270 ++(*this);
271 } else {
272 ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
273 m_currentEl = m_vector.m_llStart;
274 while (m_currentEl >= 0 && abs(llElements[m_currentEl].value) <= m_epsilon)
275 m_currentEl = llElements[m_currentEl].next;
276 if (m_currentEl < 0) {
277 m_cachedValue = 0; // this is to avoid a compilation warning
278 m_cachedIndex = -1;
279 } else {
280 m_cachedIndex = llElements[m_currentEl].index;
281 m_cachedValue = llElements[m_currentEl].value;
282 }
283 }
284 }
285
286 StorageIndex index() const { return m_cachedIndex; }
287 Scalar value() const { return m_cachedValue; }
288
289 operator bool() const { return m_cachedIndex >= 0; }
290
291 Iterator& operator++() {
292 using std::abs;
293 if (m_isDense) {
294 do {
295 ++m_cachedIndex;
296 } while (m_cachedIndex < m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex]) <= m_epsilon);
297 if (m_cachedIndex < m_vector.m_end)
298 m_cachedValue = m_vector.m_buffer[m_cachedIndex];
299 else
300 m_cachedIndex = -1;
301 } else {
302 ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
303 do {
304 m_currentEl = llElements[m_currentEl].next;
305 } while (m_currentEl >= 0 && abs(llElements[m_currentEl].value) <= m_epsilon);
306 if (m_currentEl < 0) {
307 m_cachedIndex = -1;
308 } else {
309 m_cachedIndex = llElements[m_currentEl].index;
310 m_cachedValue = llElements[m_currentEl].value;
311 }
312 }
313 return *this;
314 }
315
316 protected:
317 const AmbiVector& m_vector; // the target vector
318 StorageIndex m_currentEl; // the current element in sparse/linked-list mode
319 RealScalar m_epsilon; // epsilon used to prune zero coefficients
320 StorageIndex m_cachedIndex; // current coordinate
321 Scalar m_cachedValue; // current value
322 bool m_isDense; // mode of the vector
323};
324
325} // end namespace internal
326
327} // end namespace Eigen
328
329#endif // EIGEN_AMBIVECTOR_H
Namespace containing all symbols from the Eigen library.
Definition Core:137
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_abs_op< typename Derived::Scalar >, const Derived > abs(const Eigen::ArrayBase< Derived > &x)