Eigen  3.4.90 (git rev 5a9f66fb35d03a4da9ef8976e67a61b30aa16dcf)
 
Loading...
Searching...
No Matches
Redux.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008 Gael Guennebaud <[email protected]>
5// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
6//
7// This Source Code Form is subject to the terms of the Mozilla
8// Public License v. 2.0. If a copy of the MPL was not distributed
9// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
11#ifndef EIGEN_REDUX_H
12#define EIGEN_REDUX_H
13
14// IWYU pragma: private
15#include "./InternalHeaderCheck.h"
16
17namespace Eigen {
18
19namespace internal {
20
21// TODO
22// * implement other kind of vectorization
23// * factorize code
24
25/***************************************************************************
26 * Part 1 : the logic deciding a strategy for vectorization and unrolling
27 ***************************************************************************/
28
29template <typename Func, typename Evaluator>
30struct redux_traits {
31 public:
32 typedef typename find_best_packet<typename Evaluator::Scalar, Evaluator::SizeAtCompileTime>::type PacketType;
33 enum {
34 PacketSize = unpacket_traits<PacketType>::size,
35 InnerMaxSize = int(Evaluator::IsRowMajor) ? Evaluator::MaxColsAtCompileTime : Evaluator::MaxRowsAtCompileTime,
36 OuterMaxSize = int(Evaluator::IsRowMajor) ? Evaluator::MaxRowsAtCompileTime : Evaluator::MaxColsAtCompileTime,
37 SliceVectorizedWork = int(InnerMaxSize) == Dynamic ? Dynamic
38 : int(OuterMaxSize) == Dynamic ? (int(InnerMaxSize) >= int(PacketSize) ? Dynamic : 0)
39 : (int(InnerMaxSize) / int(PacketSize)) * int(OuterMaxSize)
40 };
41
42 enum {
43 MayLinearize = (int(Evaluator::Flags) & LinearAccessBit),
44 MightVectorize = (int(Evaluator::Flags) & ActualPacketAccessBit) && (functor_traits<Func>::PacketAccess),
45 MayLinearVectorize = bool(MightVectorize) && bool(MayLinearize),
46 MaySliceVectorize = bool(MightVectorize) && (int(SliceVectorizedWork) == Dynamic || int(SliceVectorizedWork) >= 3)
47 };
48
49 public:
50 enum {
51 Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
52 : int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
53 : int(MayLinearize) ? int(LinearTraversal)
54 : int(DefaultTraversal)
55 };
56
57 public:
58 enum {
59 Cost = Evaluator::SizeAtCompileTime == Dynamic
60 ? HugeCost
61 : int(Evaluator::SizeAtCompileTime) * int(Evaluator::CoeffReadCost) +
62 (Evaluator::SizeAtCompileTime - 1) * functor_traits<Func>::Cost,
63 UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize))
64 };
65
66 public:
67 enum { Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling };
68
69#ifdef EIGEN_DEBUG_ASSIGN
70 static void debug() {
71 std::cerr << "Xpr: " << typeid(typename Evaluator::XprType).name() << std::endl;
72 std::cerr.setf(std::ios::hex, std::ios::basefield);
73 EIGEN_DEBUG_VAR(Evaluator::Flags)
74 std::cerr.unsetf(std::ios::hex);
75 EIGEN_DEBUG_VAR(InnerMaxSize)
76 EIGEN_DEBUG_VAR(OuterMaxSize)
77 EIGEN_DEBUG_VAR(SliceVectorizedWork)
78 EIGEN_DEBUG_VAR(PacketSize)
79 EIGEN_DEBUG_VAR(MightVectorize)
80 EIGEN_DEBUG_VAR(MayLinearVectorize)
81 EIGEN_DEBUG_VAR(MaySliceVectorize)
82 std::cerr << "Traversal"
83 << " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl;
84 EIGEN_DEBUG_VAR(UnrollingLimit)
85 std::cerr << "Unrolling"
86 << " = " << Unrolling << " (" << demangle_unrolling(Unrolling) << ")" << std::endl;
87 std::cerr << std::endl;
88 }
89#endif
90};
91
92/***************************************************************************
93 * Part 2 : unrollers
94 ***************************************************************************/
95
96/*** no vectorization ***/
97
98template <typename Func, typename Evaluator, Index Start, Index Length>
99struct redux_novec_unroller {
100 static constexpr Index HalfLength = Length / 2;
101
102 typedef typename Evaluator::Scalar Scalar;
103
104 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func) {
105 return func(redux_novec_unroller<Func, Evaluator, Start, HalfLength>::run(eval, func),
106 redux_novec_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::run(eval, func));
107 }
108};
109
110template <typename Func, typename Evaluator, Index Start>
111struct redux_novec_unroller<Func, Evaluator, Start, 1> {
112 static constexpr Index outer = Start / Evaluator::InnerSizeAtCompileTime;
113 static constexpr Index inner = Start % Evaluator::InnerSizeAtCompileTime;
114
115 typedef typename Evaluator::Scalar Scalar;
116
117 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func&) {
118 return eval.coeffByOuterInner(outer, inner);
119 }
120};
121
122// This is actually dead code and will never be called. It is required
123// to prevent false warnings regarding failed inlining though
124// for 0 length run() will never be called at all.
125template <typename Func, typename Evaluator, Index Start>
126struct redux_novec_unroller<Func, Evaluator, Start, 0> {
127 typedef typename Evaluator::Scalar Scalar;
128 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); }
129};
130
131template <typename Func, typename Evaluator, Index Start, Index Length>
132struct redux_novec_linear_unroller {
133 static constexpr Index HalfLength = Length / 2;
134
135 typedef typename Evaluator::Scalar Scalar;
136
137 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func) {
138 return func(redux_novec_linear_unroller<Func, Evaluator, Start, HalfLength>::run(eval, func),
139 redux_novec_linear_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::run(eval, func));
140 }
141};
142
143template <typename Func, typename Evaluator, Index Start>
144struct redux_novec_linear_unroller<Func, Evaluator, Start, 1> {
145 typedef typename Evaluator::Scalar Scalar;
146
147 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func&) {
148 return eval.coeff(Start);
149 }
150};
151
152// This is actually dead code and will never be called. It is required
153// to prevent false warnings regarding failed inlining though
154// for 0 length run() will never be called at all.
155template <typename Func, typename Evaluator, Index Start>
156struct redux_novec_linear_unroller<Func, Evaluator, Start, 0> {
157 typedef typename Evaluator::Scalar Scalar;
158 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); }
159};
160
161/*** vectorization ***/
162
163template <typename Func, typename Evaluator, Index Start, Index Length>
164struct redux_vec_unroller {
165 template <typename PacketType>
166 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func& func) {
167 constexpr Index HalfLength = Length / 2;
168
169 return func.packetOp(
170 redux_vec_unroller<Func, Evaluator, Start, HalfLength>::template run<PacketType>(eval, func),
171 redux_vec_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::template run<PacketType>(eval,
172 func));
173 }
174};
175
176template <typename Func, typename Evaluator, Index Start>
177struct redux_vec_unroller<Func, Evaluator, Start, 1> {
178 template <typename PacketType>
179 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func&) {
180 constexpr Index PacketSize = unpacket_traits<PacketType>::size;
181 constexpr Index index = Start * PacketSize;
182 constexpr Index outer = index / int(Evaluator::InnerSizeAtCompileTime);
183 constexpr Index inner = index % int(Evaluator::InnerSizeAtCompileTime);
184 constexpr int alignment = Evaluator::Alignment;
185
186 return eval.template packetByOuterInner<alignment, PacketType>(outer, inner);
187 }
188};
189
190template <typename Func, typename Evaluator, Index Start, Index Length>
191struct redux_vec_linear_unroller {
192 template <typename PacketType>
193 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func& func) {
194 constexpr Index HalfLength = Length / 2;
195
196 return func.packetOp(
197 redux_vec_linear_unroller<Func, Evaluator, Start, HalfLength>::template run<PacketType>(eval, func),
198 redux_vec_linear_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::template run<PacketType>(
199 eval, func));
200 }
201};
202
203template <typename Func, typename Evaluator, Index Start>
204struct redux_vec_linear_unroller<Func, Evaluator, Start, 1> {
205 template <typename PacketType>
206 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func&) {
207 constexpr Index PacketSize = unpacket_traits<PacketType>::size;
208 constexpr Index index = (Start * PacketSize);
209 constexpr int alignment = Evaluator::Alignment;
210 return eval.template packet<alignment, PacketType>(index);
211 }
212};
213
214/***************************************************************************
215 * Part 3 : implementation of all cases
216 ***************************************************************************/
217
218template <typename Func, typename Evaluator, int Traversal = redux_traits<Func, Evaluator>::Traversal,
219 int Unrolling = redux_traits<Func, Evaluator>::Unrolling>
220struct redux_impl;
221
222template <typename Func, typename Evaluator>
223struct redux_impl<Func, Evaluator, DefaultTraversal, NoUnrolling> {
224 typedef typename Evaluator::Scalar Scalar;
225
226 template <typename XprType>
227 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
228 eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix");
229 Scalar res = eval.coeffByOuterInner(0, 0);
230 for (Index i = 1; i < xpr.innerSize(); ++i) res = func(res, eval.coeffByOuterInner(0, i));
231 for (Index i = 1; i < xpr.outerSize(); ++i)
232 for (Index j = 0; j < xpr.innerSize(); ++j) res = func(res, eval.coeffByOuterInner(i, j));
233 return res;
234 }
235};
236
237template <typename Func, typename Evaluator>
238struct redux_impl<Func, Evaluator, LinearTraversal, NoUnrolling> {
239 typedef typename Evaluator::Scalar Scalar;
240
241 template <typename XprType>
242 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
243 eigen_assert(xpr.size() > 0 && "you are using an empty matrix");
244 Scalar res = eval.coeff(0);
245 for (Index k = 1; k < xpr.size(); ++k) res = func(res, eval.coeff(k));
246 return res;
247 }
248};
249
250template <typename Func, typename Evaluator>
251struct redux_impl<Func, Evaluator, DefaultTraversal, CompleteUnrolling>
252 : redux_novec_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> {
253 typedef redux_novec_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> Base;
254 typedef typename Evaluator::Scalar Scalar;
255 template <typename XprType>
256 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func,
257 const XprType& /*xpr*/) {
258 return Base::run(eval, func);
259 }
260};
261
262template <typename Func, typename Evaluator>
263struct redux_impl<Func, Evaluator, LinearTraversal, CompleteUnrolling>
264 : redux_novec_linear_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> {
265 typedef redux_novec_linear_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> Base;
266 typedef typename Evaluator::Scalar Scalar;
267 template <typename XprType>
268 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func,
269 const XprType& /*xpr*/) {
270 return Base::run(eval, func);
271 }
272};
273
274template <typename Func, typename Evaluator>
275struct redux_impl<Func, Evaluator, LinearVectorizedTraversal, NoUnrolling> {
276 typedef typename Evaluator::Scalar Scalar;
277 typedef typename redux_traits<Func, Evaluator>::PacketType PacketScalar;
278
279 template <typename XprType>
280 static Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
281 const Index size = xpr.size();
282
283 constexpr Index packetSize = redux_traits<Func, Evaluator>::PacketSize;
284 constexpr int packetAlignment = unpacket_traits<PacketScalar>::alignment;
285 constexpr int alignment0 =
286 (bool(Evaluator::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar))
287 ? int(packetAlignment)
288 : int(Unaligned);
289 constexpr int alignment = plain_enum_max(alignment0, Evaluator::Alignment);
290 const Index alignedStart = internal::first_default_aligned(xpr);
291 const Index alignedSize2 = ((size - alignedStart) / (2 * packetSize)) * (2 * packetSize);
292 const Index alignedSize = ((size - alignedStart) / (packetSize)) * (packetSize);
293 const Index alignedEnd2 = alignedStart + alignedSize2;
294 const Index alignedEnd = alignedStart + alignedSize;
295 Scalar res;
296 if (alignedSize) {
297 PacketScalar packet_res0 = eval.template packet<alignment, PacketScalar>(alignedStart);
298 if (alignedSize > packetSize) // we have at least two packets to partly unroll the loop
299 {
300 PacketScalar packet_res1 = eval.template packet<alignment, PacketScalar>(alignedStart + packetSize);
301 for (Index index = alignedStart + 2 * packetSize; index < alignedEnd2; index += 2 * packetSize) {
302 packet_res0 = func.packetOp(packet_res0, eval.template packet<alignment, PacketScalar>(index));
303 packet_res1 = func.packetOp(packet_res1, eval.template packet<alignment, PacketScalar>(index + packetSize));
304 }
305
306 packet_res0 = func.packetOp(packet_res0, packet_res1);
307 if (alignedEnd > alignedEnd2)
308 packet_res0 = func.packetOp(packet_res0, eval.template packet<alignment, PacketScalar>(alignedEnd2));
309 }
310 res = func.predux(packet_res0);
311
312 for (Index index = 0; index < alignedStart; ++index) res = func(res, eval.coeff(index));
313
314 for (Index index = alignedEnd; index < size; ++index) res = func(res, eval.coeff(index));
315 } else // too small to vectorize anything.
316 // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
317 {
318 res = eval.coeff(0);
319 for (Index index = 1; index < size; ++index) res = func(res, eval.coeff(index));
320 }
321
322 return res;
323 }
324};
325
326// NOTE: for SliceVectorizedTraversal we simply bypass unrolling
327template <typename Func, typename Evaluator, int Unrolling>
328struct redux_impl<Func, Evaluator, SliceVectorizedTraversal, Unrolling> {
329 typedef typename Evaluator::Scalar Scalar;
330 typedef typename redux_traits<Func, Evaluator>::PacketType PacketType;
331
332 template <typename XprType>
333 EIGEN_DEVICE_FUNC static Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
334 eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix");
335 constexpr Index packetSize = redux_traits<Func, Evaluator>::PacketSize;
336 const Index innerSize = xpr.innerSize();
337 const Index outerSize = xpr.outerSize();
338 const Index packetedInnerSize = ((innerSize) / packetSize) * packetSize;
339 Scalar res;
340 if (packetedInnerSize) {
341 PacketType packet_res = eval.template packet<Unaligned, PacketType>(0, 0);
342 for (Index j = 0; j < outerSize; ++j)
343 for (Index i = (j == 0 ? packetSize : 0); i < packetedInnerSize; i += Index(packetSize))
344 packet_res = func.packetOp(packet_res, eval.template packetByOuterInner<Unaligned, PacketType>(j, i));
345
346 res = func.predux(packet_res);
347 for (Index j = 0; j < outerSize; ++j)
348 for (Index i = packetedInnerSize; i < innerSize; ++i) res = func(res, eval.coeffByOuterInner(j, i));
349 } else // too small to vectorize anything.
350 // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
351 {
352 res = redux_impl<Func, Evaluator, DefaultTraversal, NoUnrolling>::run(eval, func, xpr);
353 }
354
355 return res;
356 }
357};
358
359template <typename Func, typename Evaluator>
360struct redux_impl<Func, Evaluator, LinearVectorizedTraversal, CompleteUnrolling> {
361 typedef typename Evaluator::Scalar Scalar;
362
363 typedef typename redux_traits<Func, Evaluator>::PacketType PacketType;
364 static constexpr Index PacketSize = redux_traits<Func, Evaluator>::PacketSize;
365 static constexpr Index Size = Evaluator::SizeAtCompileTime;
366 static constexpr Index VectorizedSize = (int(Size) / int(PacketSize)) * int(PacketSize);
367
368 template <typename XprType>
369 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
370 EIGEN_ONLY_USED_FOR_DEBUG(xpr)
371 eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix");
372 if (VectorizedSize > 0) {
373 Scalar res = func.predux(
374 redux_vec_linear_unroller<Func, Evaluator, 0, Size / PacketSize>::template run<PacketType>(eval, func));
375 if (VectorizedSize != Size)
376 res = func(
377 res, redux_novec_linear_unroller<Func, Evaluator, VectorizedSize, Size - VectorizedSize>::run(eval, func));
378 return res;
379 } else {
380 return redux_novec_linear_unroller<Func, Evaluator, 0, Size>::run(eval, func);
381 }
382 }
383};
384
385// evaluator adaptor
386template <typename XprType_>
387class redux_evaluator : public internal::evaluator<XprType_> {
388 typedef internal::evaluator<XprType_> Base;
389
390 public:
391 typedef XprType_ XprType;
392 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit redux_evaluator(const XprType& xpr) : Base(xpr) {}
393
394 typedef typename XprType::Scalar Scalar;
395 typedef typename XprType::CoeffReturnType CoeffReturnType;
396 typedef typename XprType::PacketScalar PacketScalar;
397
398 enum {
399 MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime,
400 MaxColsAtCompileTime = XprType::MaxColsAtCompileTime,
401 // TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime
402 // from the evaluator
403 Flags = Base::Flags & ~DirectAccessBit,
404 IsRowMajor = XprType::IsRowMajor,
405 SizeAtCompileTime = XprType::SizeAtCompileTime,
406 InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime
407 };
408
409 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const {
410 return Base::coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer);
411 }
412
413 template <int LoadMode, typename PacketType>
414 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packetByOuterInner(Index outer, Index inner) const {
415 return Base::template packet<LoadMode, PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer);
416 }
417};
419} // end namespace internal
420
421/***************************************************************************
422 * Part 4 : public API
423 ***************************************************************************/
424
434template <typename Derived>
435template <typename Func>
436EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::redux(
437 const Func& func) const {
438 eigen_assert(this->rows() > 0 && this->cols() > 0 && "you are using an empty matrix");
439
440 typedef typename internal::redux_evaluator<Derived> ThisEvaluator;
441 ThisEvaluator thisEval(derived());
442
443 // The initial expression is passed to the reducer as an additional argument instead of
444 // passing it as a member of redux_evaluator to help
445 return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func, derived());
446}
447
455template <typename Derived>
456template <int NaNPropagation>
457EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::minCoeff() const {
458 return derived().redux(Eigen::internal::scalar_min_op<Scalar, Scalar, NaNPropagation>());
459}
460
468template <typename Derived>
469template <int NaNPropagation>
470EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::maxCoeff() const {
471 return derived().redux(Eigen::internal::scalar_max_op<Scalar, Scalar, NaNPropagation>());
472}
473
480template <typename Derived>
481EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::sum() const {
482 if (SizeAtCompileTime == 0 || (SizeAtCompileTime == Dynamic && size() == 0)) return Scalar(0);
483 return derived().redux(Eigen::internal::scalar_sum_op<Scalar, Scalar>());
484}
485
490template <typename Derived>
491EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::mean() const {
492#ifdef __INTEL_COMPILER
493#pragma warning push
494#pragma warning(disable : 2259)
495#endif
496 return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar, Scalar>())) / Scalar(this->size());
497#ifdef __INTEL_COMPILER
498#pragma warning pop
499#endif
500}
501
509template <typename Derived>
510EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::prod() const {
511 if (SizeAtCompileTime == 0 || (SizeAtCompileTime == Dynamic && size() == 0)) return Scalar(1);
512 return derived().redux(Eigen::internal::scalar_product_op<Scalar>());
513}
514
521template <typename Derived>
522EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar MatrixBase<Derived>::trace() const {
523 return derived().diagonal().sum();
524}
525
526} // end namespace Eigen
527
528#endif // EIGEN_REDUX_H
Base class for all dense matrices, vectors, and arrays.
Definition DenseBase.h:44
internal::traits< Derived >::Scalar minCoeff() const
Definition Redux.h:457
Scalar mean() const
Definition Redux.h:491
internal::traits< Derived >::Scalar Scalar
Definition DenseBase.h:62
internal::traits< Derived >::Scalar maxCoeff() const
Definition Redux.h:470
Scalar sum() const
Definition Redux.h:481
Scalar prod() const
Definition Redux.h:510
Scalar trace() const
Definition Redux.h:522
const unsigned int ActualPacketAccessBit
Definition Constants.h:108
const unsigned int LinearAccessBit
Definition Constants.h:133
Namespace containing all symbols from the Eigen library.
Definition Core:137
const int HugeCost
Definition Constants.h:48
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:83
const int Dynamic
Definition Constants.h:25