Eigen  3.4.90 (git rev 5a9f66fb35d03a4da9ef8976e67a61b30aa16dcf)
 
Loading...
Searching...
No Matches
SparseSelfAdjointView.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2009-2014 Gael Guennebaud <[email protected]>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11#define EIGEN_SPARSE_SELFADJOINTVIEW_H
12
13// IWYU pragma: private
14#include "./InternalHeaderCheck.h"
15
16namespace Eigen {
17
32namespace internal {
33
34template <typename MatrixType, unsigned int Mode>
35struct traits<SparseSelfAdjointView<MatrixType, Mode> > : traits<MatrixType> {};
36
37template <int SrcMode, int DstMode, bool NonHermitian, typename MatrixType, int DestOrder>
38void permute_symm_to_symm(
39 const MatrixType& mat,
40 SparseMatrix<typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex>& _dest,
41 const typename MatrixType::StorageIndex* perm = 0);
42
43template <int Mode, bool NonHermitian, typename MatrixType, int DestOrder>
44void permute_symm_to_fullsymm(
45 const MatrixType& mat,
46 SparseMatrix<typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex>& _dest,
47 const typename MatrixType::StorageIndex* perm = 0);
48
49} // namespace internal
50
51template <typename MatrixType, unsigned int Mode_>
52class SparseSelfAdjointView : public EigenBase<SparseSelfAdjointView<MatrixType, Mode_> > {
53 public:
54 enum {
55 Mode = Mode_,
56 TransposeMode = ((int(Mode) & int(Upper)) ? Lower : 0) | ((int(Mode) & int(Lower)) ? Upper : 0),
57 RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
58 ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
59 };
60
61 typedef EigenBase<SparseSelfAdjointView> Base;
62 typedef typename MatrixType::Scalar Scalar;
63 typedef typename MatrixType::StorageIndex StorageIndex;
64 typedef Matrix<StorageIndex, Dynamic, 1> VectorI;
65 typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
66 typedef internal::remove_all_t<MatrixTypeNested> MatrixTypeNested_;
67
68 explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix) {
69 eigen_assert(rows() == cols() && "SelfAdjointView is only for squared matrices");
70 }
71
72 inline Index rows() const { return m_matrix.rows(); }
73 inline Index cols() const { return m_matrix.cols(); }
74
76 const MatrixTypeNested_& matrix() const { return m_matrix; }
77 std::remove_reference_t<MatrixTypeNested>& matrix() { return m_matrix; }
78
86 template <typename OtherDerived>
90
98 template <typename OtherDerived>
103
105 template <typename OtherDerived>
109
111 template <typename OtherDerived>
116
125 template <typename DerivedU>
126 SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
127
129 // TODO implement twists in a more evaluator friendly fashion
130 SparseSymmetricPermutationProduct<MatrixTypeNested_, Mode> twistedBy(
132 return SparseSymmetricPermutationProduct<MatrixTypeNested_, Mode>(m_matrix, perm);
133 }
134
135 template <typename SrcMatrixType, int SrcMode>
136 SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType, SrcMode>& permutedMatrix) {
137 internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
138 return *this;
139 }
140
141 SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src) {
142 PermutationMatrix<Dynamic, Dynamic, StorageIndex> pnull;
143 return *this = src.twistedBy(pnull);
144 }
145
146 // Since we override the copy-assignment operator, we need to explicitly re-declare the copy-constructor
147 EIGEN_DEFAULT_COPY_CONSTRUCTOR(SparseSelfAdjointView)
148
149 template <typename SrcMatrixType, unsigned int SrcMode>
150 SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType, SrcMode>& src) {
151 PermutationMatrix<Dynamic, Dynamic, StorageIndex> pnull;
152 return *this = src.twistedBy(pnull);
153 }
154
155 void resize(Index rows, Index cols) {
156 EIGEN_ONLY_USED_FOR_DEBUG(rows);
157 EIGEN_ONLY_USED_FOR_DEBUG(cols);
158 eigen_assert(rows == this->rows() && cols == this->cols() &&
159 "SparseSelfadjointView::resize() does not actually allow to resize.");
160 }
161
162 protected:
163 MatrixTypeNested m_matrix;
164 // mutable VectorI m_countPerRow;
165 // mutable VectorI m_countPerCol;
166 private:
167 template <typename Dest>
168 void evalTo(Dest&) const;
169};
170
171/***************************************************************************
172 * Implementation of SparseMatrixBase methods
173 ***************************************************************************/
174
175template <typename Derived>
176template <unsigned int UpLo>
177typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type
178SparseMatrixBase<Derived>::selfadjointView() const {
179 return SparseSelfAdjointView<const Derived, UpLo>(derived());
180}
181
182template <typename Derived>
183template <unsigned int UpLo>
184typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type
185SparseMatrixBase<Derived>::selfadjointView() {
186 return SparseSelfAdjointView<Derived, UpLo>(derived());
187}
188
189/***************************************************************************
190 * Implementation of SparseSelfAdjointView methods
191 ***************************************************************************/
192
193template <typename MatrixType, unsigned int Mode>
194template <typename DerivedU>
195SparseSelfAdjointView<MatrixType, Mode>& SparseSelfAdjointView<MatrixType, Mode>::rankUpdate(
196 const SparseMatrixBase<DerivedU>& u, const Scalar& alpha) {
197 SparseMatrix<Scalar, (MatrixType::Flags & RowMajorBit) ? RowMajor : ColMajor> tmp = u * u.adjoint();
198 if (alpha == Scalar(0))
199 m_matrix = tmp.template triangularView<Mode>();
200 else
201 m_matrix += alpha * tmp.template triangularView<Mode>();
202
203 return *this;
204}
205
206namespace internal {
207
208// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
209// in the future selfadjoint-ness should be defined by the expression traits
210// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to
211// make it work)
212template <typename MatrixType, unsigned int Mode>
213struct evaluator_traits<SparseSelfAdjointView<MatrixType, Mode> > {
214 typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
215 typedef SparseSelfAdjointShape Shape;
216};
217
218struct SparseSelfAdjoint2Sparse {};
219
220template <>
221struct AssignmentKind<SparseShape, SparseSelfAdjointShape> {
222 typedef SparseSelfAdjoint2Sparse Kind;
223};
224template <>
225struct AssignmentKind<SparseSelfAdjointShape, SparseShape> {
226 typedef Sparse2Sparse Kind;
227};
228
229template <typename DstXprType, typename SrcXprType, typename Functor>
230struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse> {
231 typedef typename DstXprType::StorageIndex StorageIndex;
232 typedef internal::assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar> AssignOpType;
233
234 template <typename DestScalar, int StorageOrder>
235 static void run(SparseMatrix<DestScalar, StorageOrder, StorageIndex>& dst, const SrcXprType& src,
236 const AssignOpType& /*func*/) {
237 internal::permute_symm_to_fullsymm<SrcXprType::Mode, false>(src.matrix(), dst);
238 }
239
240 // FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced
241 // to:
242 template <typename DestScalar, int StorageOrder, typename AssignFunc>
243 static void run(SparseMatrix<DestScalar, StorageOrder, StorageIndex>& dst, const SrcXprType& src,
244 const AssignFunc& func) {
245 SparseMatrix<DestScalar, StorageOrder, StorageIndex> tmp(src.rows(), src.cols());
246 run(tmp, src, AssignOpType());
247 call_assignment_no_alias_no_transpose(dst, tmp, func);
248 }
249
250 template <typename DestScalar, int StorageOrder>
251 static void run(SparseMatrix<DestScalar, StorageOrder, StorageIndex>& dst, const SrcXprType& src,
252 const internal::add_assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar>& /* func */) {
253 SparseMatrix<DestScalar, StorageOrder, StorageIndex> tmp(src.rows(), src.cols());
254 run(tmp, src, AssignOpType());
255 dst += tmp;
256 }
257
258 template <typename DestScalar, int StorageOrder>
259 static void run(SparseMatrix<DestScalar, StorageOrder, StorageIndex>& dst, const SrcXprType& src,
260 const internal::sub_assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar>& /* func */) {
261 SparseMatrix<DestScalar, StorageOrder, StorageIndex> tmp(src.rows(), src.cols());
262 run(tmp, src, AssignOpType());
263 dst -= tmp;
264 }
265};
266
267} // end namespace internal
268
269/***************************************************************************
270 * Implementation of sparse self-adjoint time dense matrix
271 ***************************************************************************/
272
273namespace internal {
274
275template <int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
276inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res,
277 const AlphaType& alpha) {
278 EIGEN_ONLY_USED_FOR_DEBUG(alpha);
279
280 typedef typename internal::nested_eval<SparseLhsType, DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
281 typedef internal::remove_all_t<SparseLhsTypeNested> SparseLhsTypeNestedCleaned;
282 typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
283 typedef typename LhsEval::InnerIterator LhsIterator;
284 typedef typename SparseLhsType::Scalar LhsScalar;
285
286 enum {
287 LhsIsRowMajor = (LhsEval::Flags & RowMajorBit) == RowMajorBit,
288 ProcessFirstHalf = ((Mode & (Upper | Lower)) == (Upper | Lower)) || ((Mode & Upper) && !LhsIsRowMajor) ||
289 ((Mode & Lower) && LhsIsRowMajor),
290 ProcessSecondHalf = !ProcessFirstHalf
291 };
292
293 SparseLhsTypeNested lhs_nested(lhs);
294 LhsEval lhsEval(lhs_nested);
295
296 // work on one column at once
297 for (Index k = 0; k < rhs.cols(); ++k) {
298 for (Index j = 0; j < lhs.outerSize(); ++j) {
299 LhsIterator i(lhsEval, j);
300 // handle diagonal coeff
301 if (ProcessSecondHalf) {
302 while (i && i.index() < j) ++i;
303 if (i && i.index() == j) {
304 res.coeffRef(j, k) += alpha * i.value() * rhs.coeff(j, k);
305 ++i;
306 }
307 }
308
309 // premultiplied rhs for scatters
310 typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha * rhs(j, k));
311 // accumulator for partial scalar product
312 typename DenseResType::Scalar res_j(0);
313 for (; (ProcessFirstHalf ? i && i.index() < j : i); ++i) {
314 LhsScalar lhs_ij = i.value();
315 if (!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
316 res_j += lhs_ij * rhs.coeff(i.index(), k);
317 res(i.index(), k) += numext::conj(lhs_ij) * rhs_j;
318 }
319 res.coeffRef(j, k) += alpha * res_j;
320
321 // handle diagonal coeff
322 if (ProcessFirstHalf && i && (i.index() == j)) res.coeffRef(j, k) += alpha * i.value() * rhs.coeff(j, k);
323 }
324 }
325}
326
327template <typename LhsView, typename Rhs, int ProductType>
328struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
329 : generic_product_impl_base<LhsView, Rhs,
330 generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> > {
331 template <typename Dest>
332 static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha) {
333 typedef typename LhsView::MatrixTypeNested_ Lhs;
334 typedef typename nested_eval<Lhs, Dynamic>::type LhsNested;
335 typedef typename nested_eval<Rhs, Dynamic>::type RhsNested;
336 LhsNested lhsNested(lhsView.matrix());
337 RhsNested rhsNested(rhs);
338
339 internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
340 }
341};
342
343template <typename Lhs, typename RhsView, int ProductType>
344struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
345 : generic_product_impl_base<Lhs, RhsView,
346 generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> > {
347 template <typename Dest>
348 static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha) {
349 typedef typename RhsView::MatrixTypeNested_ Rhs;
350 typedef typename nested_eval<Lhs, Dynamic>::type LhsNested;
351 typedef typename nested_eval<Rhs, Dynamic>::type RhsNested;
352 LhsNested lhsNested(lhs);
353 RhsNested rhsNested(rhsView.matrix());
354
355 // transpose everything
356 Transpose<Dest> dstT(dst);
357 internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(),
358 lhsNested.transpose(), dstT, alpha);
359 }
360};
361
362// NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
363// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
364
365template <typename LhsView, typename Rhs, int ProductTag>
366struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
367 : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject> {
368 typedef Product<LhsView, Rhs, DefaultProduct> XprType;
369 typedef typename XprType::PlainObject PlainObject;
370 typedef evaluator<PlainObject> Base;
371
372 product_evaluator(const XprType& xpr) : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols()) {
373 internal::construct_at<Base>(this, m_result);
374 generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs,
375 xpr.rhs());
376 }
377
378 protected:
379 typename Rhs::PlainObject m_lhs;
380 PlainObject m_result;
381};
382
383template <typename Lhs, typename RhsView, int ProductTag>
384struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
385 : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject> {
386 typedef Product<Lhs, RhsView, DefaultProduct> XprType;
387 typedef typename XprType::PlainObject PlainObject;
388 typedef evaluator<PlainObject> Base;
389
390 product_evaluator(const XprType& xpr) : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols()) {
391 ::new (static_cast<Base*>(this)) Base(m_result);
392 generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(
393 m_result, xpr.lhs(), m_rhs);
394 }
395
396 protected:
397 typename Lhs::PlainObject m_rhs;
398 PlainObject m_result;
399};
400
401} // namespace internal
402
403/***************************************************************************
404 * Implementation of symmetric copies and permutations
405 ***************************************************************************/
406namespace internal {
407
408template <int Mode, bool NonHermitian, typename MatrixType, int DestOrder>
409void permute_symm_to_fullsymm(
410 const MatrixType& mat,
411 SparseMatrix<typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex>& _dest,
412 const typename MatrixType::StorageIndex* perm) {
413 typedef typename MatrixType::StorageIndex StorageIndex;
414 typedef typename MatrixType::Scalar Scalar;
415 typedef SparseMatrix<Scalar, DestOrder, StorageIndex> Dest;
416 typedef Matrix<StorageIndex, Dynamic, 1> VectorI;
417 typedef evaluator<MatrixType> MatEval;
418 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
419
420 MatEval matEval(mat);
421 Dest& dest(_dest.derived());
422 enum { StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor) };
423
424 Index size = mat.rows();
425 VectorI count;
426 count.resize(size);
427 count.setZero();
428 dest.resize(size, size);
429 for (Index j = 0; j < size; ++j) {
430 Index jp = perm ? perm[j] : j;
431 for (MatIterator it(matEval, j); it; ++it) {
432 Index i = it.index();
433 Index r = it.row();
434 Index c = it.col();
435 Index ip = perm ? perm[i] : i;
436 if (Mode == int(Upper | Lower))
437 count[StorageOrderMatch ? jp : ip]++;
438 else if (r == c)
439 count[ip]++;
440 else if ((Mode == Lower && r > c) || (Mode == Upper && r < c)) {
441 count[ip]++;
442 count[jp]++;
443 }
444 }
445 }
446 Index nnz = count.sum();
447
448 // reserve space
449 dest.resizeNonZeros(nnz);
450 dest.outerIndexPtr()[0] = 0;
451 for (Index j = 0; j < size; ++j) dest.outerIndexPtr()[j + 1] = dest.outerIndexPtr()[j] + count[j];
452 for (Index j = 0; j < size; ++j) count[j] = dest.outerIndexPtr()[j];
453
454 // copy data
455 for (StorageIndex j = 0; j < size; ++j) {
456 for (MatIterator it(matEval, j); it; ++it) {
457 StorageIndex i = internal::convert_index<StorageIndex>(it.index());
458 Index r = it.row();
459 Index c = it.col();
460
461 StorageIndex jp = perm ? perm[j] : j;
462 StorageIndex ip = perm ? perm[i] : i;
463
464 if (Mode == int(Upper | Lower)) {
465 Index k = count[StorageOrderMatch ? jp : ip]++;
466 dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
467 dest.valuePtr()[k] = it.value();
468 } else if (r == c) {
469 Index k = count[ip]++;
470 dest.innerIndexPtr()[k] = ip;
471 dest.valuePtr()[k] = it.value();
472 } else if (((Mode & Lower) == Lower && r > c) || ((Mode & Upper) == Upper && r < c)) {
473 if (!StorageOrderMatch) std::swap(ip, jp);
474 Index k = count[jp]++;
475 dest.innerIndexPtr()[k] = ip;
476 dest.valuePtr()[k] = it.value();
477 k = count[ip]++;
478 dest.innerIndexPtr()[k] = jp;
479 dest.valuePtr()[k] = (NonHermitian ? it.value() : numext::conj(it.value()));
480 }
481 }
482 }
483}
484
485template <int SrcMode_, int DstMode_, bool NonHermitian, typename MatrixType, int DstOrder>
486void permute_symm_to_symm(const MatrixType& mat,
487 SparseMatrix<typename MatrixType::Scalar, DstOrder, typename MatrixType::StorageIndex>& _dest,
488 const typename MatrixType::StorageIndex* perm) {
489 typedef typename MatrixType::StorageIndex StorageIndex;
490 typedef typename MatrixType::Scalar Scalar;
491 SparseMatrix<Scalar, DstOrder, StorageIndex>& dest(_dest.derived());
492 typedef Matrix<StorageIndex, Dynamic, 1> VectorI;
493 typedef evaluator<MatrixType> MatEval;
494 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
495
496 enum {
497 SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
498 StorageOrderMatch = int(SrcOrder) == int(DstOrder),
499 DstMode = DstOrder == RowMajor ? (DstMode_ == Upper ? Lower : Upper) : DstMode_,
500 SrcMode = SrcOrder == RowMajor ? (SrcMode_ == Upper ? Lower : Upper) : SrcMode_
501 };
502
503 MatEval matEval(mat);
504
505 Index size = mat.rows();
506 VectorI count(size);
507 count.setZero();
508 dest.resize(size, size);
509 for (StorageIndex j = 0; j < size; ++j) {
510 StorageIndex jp = perm ? perm[j] : j;
511 for (MatIterator it(matEval, j); it; ++it) {
512 StorageIndex i = it.index();
513 if ((int(SrcMode) == int(Lower) && i < j) || (int(SrcMode) == int(Upper) && i > j)) continue;
514
515 StorageIndex ip = perm ? perm[i] : i;
516 count[int(DstMode) == int(Lower) ? (std::min)(ip, jp) : (std::max)(ip, jp)]++;
517 }
518 }
519 dest.outerIndexPtr()[0] = 0;
520 for (Index j = 0; j < size; ++j) dest.outerIndexPtr()[j + 1] = dest.outerIndexPtr()[j] + count[j];
521 dest.resizeNonZeros(dest.outerIndexPtr()[size]);
522 for (Index j = 0; j < size; ++j) count[j] = dest.outerIndexPtr()[j];
523
524 for (StorageIndex j = 0; j < size; ++j) {
525 for (MatIterator it(matEval, j); it; ++it) {
526 StorageIndex i = it.index();
527 if ((int(SrcMode) == int(Lower) && i < j) || (int(SrcMode) == int(Upper) && i > j)) continue;
528
529 StorageIndex jp = perm ? perm[j] : j;
530 StorageIndex ip = perm ? perm[i] : i;
531
532 Index k = count[int(DstMode) == int(Lower) ? (std::min)(ip, jp) : (std::max)(ip, jp)]++;
533 dest.innerIndexPtr()[k] = int(DstMode) == int(Lower) ? (std::max)(ip, jp) : (std::min)(ip, jp);
534
535 if (!StorageOrderMatch) std::swap(ip, jp);
536 if (((int(DstMode) == int(Lower) && ip < jp) || (int(DstMode) == int(Upper) && ip > jp)))
537 dest.valuePtr()[k] = (NonHermitian ? it.value() : numext::conj(it.value()));
538 else
539 dest.valuePtr()[k] = it.value();
540 }
541 }
542}
543
544} // namespace internal
545
546// TODO implement twists in a more evaluator friendly fashion
547
548namespace internal {
549
550template <typename MatrixType, int Mode>
551struct traits<SparseSymmetricPermutationProduct<MatrixType, Mode> > : traits<MatrixType> {};
552
553} // namespace internal
554
555template <typename MatrixType, int Mode>
556class SparseSymmetricPermutationProduct : public EigenBase<SparseSymmetricPermutationProduct<MatrixType, Mode> > {
557 public:
558 typedef typename MatrixType::Scalar Scalar;
559 typedef typename MatrixType::StorageIndex StorageIndex;
560 enum {
561 RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
562 ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
563 };
564
565 protected:
566 typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> Perm;
567
568 public:
569 typedef Matrix<StorageIndex, Dynamic, 1> VectorI;
570 typedef typename MatrixType::Nested MatrixTypeNested;
571 typedef internal::remove_all_t<MatrixTypeNested> NestedExpression;
572
573 SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm) : m_matrix(mat), m_perm(perm) {}
574
575 inline Index rows() const { return m_matrix.rows(); }
576 inline Index cols() const { return m_matrix.cols(); }
577
578 const NestedExpression& matrix() const { return m_matrix; }
579 const Perm& perm() const { return m_perm; }
580
581 protected:
582 MatrixTypeNested m_matrix;
583 const Perm& m_perm;
584};
585
586namespace internal {
587
588template <typename DstXprType, typename MatrixType, int Mode, typename Scalar>
589struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType, Mode>,
590 internal::assign_op<Scalar, typename MatrixType::Scalar>, Sparse2Sparse> {
591 typedef SparseSymmetricPermutationProduct<MatrixType, Mode> SrcXprType;
592 typedef typename DstXprType::StorageIndex DstIndex;
593 template <int Options>
594 static void run(SparseMatrix<Scalar, Options, DstIndex>& dst, const SrcXprType& src,
595 const internal::assign_op<Scalar, typename MatrixType::Scalar>&) {
596 // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
597 SparseMatrix<Scalar, (Options & RowMajor) == RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
598 internal::permute_symm_to_fullsymm<Mode, false>(src.matrix(), tmp, src.perm().indices().data());
599 dst = tmp;
600 }
601
602 template <typename DestType, unsigned int DestMode>
603 static void run(SparseSelfAdjointView<DestType, DestMode>& dst, const SrcXprType& src,
604 const internal::assign_op<Scalar, typename MatrixType::Scalar>&) {
605 internal::permute_symm_to_symm<Mode, DestMode, false>(src.matrix(), dst.matrix(), src.perm().indices().data());
606 }
607};
608
609} // end namespace internal
610
611} // end namespace Eigen
612
613#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
Derived & derived()
Definition EigenBase.h:49
Base class for all dense matrices, vectors, and expressions.
Definition MatrixBase.h:52
Permutation matrix.
Definition PermutationMatrix.h:280
Expression of the product of two arbitrary matrices or vectors.
Definition Product.h:202
Base class of any sparse matrices or sparse expressions.
Definition SparseMatrixBase.h:30
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition SparseUtil.h:52
SparseSymmetricPermutationProduct< MatrixTypeNested_, Mode > twistedBy(const PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm) const
Definition SparseSelfAdjointView.h:130
Product< SparseSelfAdjointView, OtherDerived > operator*(const SparseMatrixBase< OtherDerived > &rhs) const
Definition SparseSelfAdjointView.h:87
Product< SparseSelfAdjointView, OtherDerived > operator*(const MatrixBase< OtherDerived > &rhs) const
Definition SparseSelfAdjointView.h:106
SparseSelfAdjointView & rankUpdate(const SparseMatrixBase< DerivedU > &u, const Scalar &alpha=Scalar(1))
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const MatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition SparseSelfAdjointView.h:112
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const SparseMatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition SparseSelfAdjointView.h:99
@ Lower
Definition Constants.h:211
@ Upper
Definition Constants.h:213
@ ColMajor
Definition Constants.h:318
@ RowMajor
Definition Constants.h:320
const unsigned int RowMajorBit
Definition Constants.h:70
Namespace containing all symbols from the Eigen library.
Definition Core:137
Derived & derived()
Definition EigenBase.h:49