ruby-eigen 0.0.10 → 0.0.11.pre1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,478 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_INCOMPLETE_LUT_H
11
+ #define EIGEN_INCOMPLETE_LUT_H
12
+
13
+
14
+ namespace Eigen {
15
+
16
+ namespace internal {
17
+
18
+ /** \internal
19
+ * Compute a quick-sort split of a vector
20
+ * On output, the vector row is permuted such that its elements satisfy
21
+ * abs(row(i)) >= abs(row(ncut)) if i<ncut
22
+ * abs(row(i)) <= abs(row(ncut)) if i>ncut
23
+ * \param row The vector of values
24
+ * \param ind The array of index for the elements in @p row
25
+ * \param ncut The number of largest elements to keep
26
+ **/
27
+ template <typename VectorV, typename VectorI, typename Index>
28
+ Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
29
+ {
30
+ typedef typename VectorV::RealScalar RealScalar;
31
+ using std::swap;
32
+ using std::abs;
33
+ Index mid;
34
+ Index n = row.size(); /* length of the vector */
35
+ Index first, last ;
36
+
37
+ ncut--; /* to fit the zero-based indices */
38
+ first = 0;
39
+ last = n-1;
40
+ if (ncut < first || ncut > last ) return 0;
41
+
42
+ do {
43
+ mid = first;
44
+ RealScalar abskey = abs(row(mid));
45
+ for (Index j = first + 1; j <= last; j++) {
46
+ if ( abs(row(j)) > abskey) {
47
+ ++mid;
48
+ swap(row(mid), row(j));
49
+ swap(ind(mid), ind(j));
50
+ }
51
+ }
52
+ /* Interchange for the pivot element */
53
+ swap(row(mid), row(first));
54
+ swap(ind(mid), ind(first));
55
+
56
+ if (mid > ncut) last = mid - 1;
57
+ else if (mid < ncut ) first = mid + 1;
58
+ } while (mid != ncut );
59
+
60
+ return 0; /* mid is equal to ncut */
61
+ }
62
+
63
+ }// end namespace internal
64
+
65
+ /** \ingroup IterativeLinearSolvers_Module
66
+ * \class IncompleteLUT
67
+ * \brief Incomplete LU factorization with dual-threshold strategy
68
+ *
69
+ * During the numerical factorization, two dropping rules are used :
70
+ * 1) any element whose magnitude is less than some tolerance is dropped.
71
+ * This tolerance is obtained by multiplying the input tolerance @p droptol
72
+ * by the average magnitude of all the original elements in the current row.
73
+ * 2) After the elimination of the row, only the @p fill largest elements in
74
+ * the L part and the @p fill largest elements in the U part are kept
75
+ * (in addition to the diagonal element ). Note that @p fill is computed from
76
+ * the input parameter @p fillfactor which is used the ratio to control the fill_in
77
+ * relatively to the initial number of nonzero elements.
78
+ *
79
+ * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
80
+ * and when @p fill=n/2 with @p droptol being different to zero.
81
+ *
82
+ * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization,
83
+ * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
84
+ *
85
+ * NOTE : The following implementation is derived from the ILUT implementation
86
+ * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota
87
+ * released under the terms of the GNU LGPL:
88
+ * http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
89
+ * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
90
+ * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
91
+ * http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
92
+ * alternatively, on GMANE:
93
+ * http://comments.gmane.org/gmane.comp.lib.eigen/3302
94
+ */
95
+ template <typename _Scalar>
96
+ class IncompleteLUT : internal::noncopyable
97
+ {
98
+ typedef _Scalar Scalar;
99
+ typedef typename NumTraits<Scalar>::Real RealScalar;
100
+ typedef Matrix<Scalar,Dynamic,1> Vector;
101
+ typedef SparseMatrix<Scalar,RowMajor> FactorType;
102
+ typedef SparseMatrix<Scalar,ColMajor> PermutType;
103
+ typedef typename FactorType::Index Index;
104
+
105
+ public:
106
+ typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
107
+
108
+ IncompleteLUT()
109
+ : m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),
110
+ m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false)
111
+ {}
112
+
113
+ template<typename MatrixType>
114
+ IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
115
+ : m_droptol(droptol),m_fillfactor(fillfactor),
116
+ m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false)
117
+ {
118
+ eigen_assert(fillfactor != 0);
119
+ compute(mat);
120
+ }
121
+
122
+ Index rows() const { return m_lu.rows(); }
123
+
124
+ Index cols() const { return m_lu.cols(); }
125
+
126
+ /** \brief Reports whether previous computation was successful.
127
+ *
128
+ * \returns \c Success if computation was succesful,
129
+ * \c NumericalIssue if the matrix.appears to be negative.
130
+ */
131
+ ComputationInfo info() const
132
+ {
133
+ eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
134
+ return m_info;
135
+ }
136
+
137
+ template<typename MatrixType>
138
+ void analyzePattern(const MatrixType& amat);
139
+
140
+ template<typename MatrixType>
141
+ void factorize(const MatrixType& amat);
142
+
143
+ /**
144
+ * Compute an incomplete LU factorization with dual threshold on the matrix mat
145
+ * No pivoting is done in this version
146
+ *
147
+ **/
148
+ template<typename MatrixType>
149
+ IncompleteLUT<Scalar>& compute(const MatrixType& amat)
150
+ {
151
+ analyzePattern(amat);
152
+ factorize(amat);
153
+ return *this;
154
+ }
155
+
156
+ void setDroptol(const RealScalar& droptol);
157
+ void setFillfactor(int fillfactor);
158
+
159
+ template<typename Rhs, typename Dest>
160
+ void _solve(const Rhs& b, Dest& x) const
161
+ {
162
+ x = m_Pinv * b;
163
+ x = m_lu.template triangularView<UnitLower>().solve(x);
164
+ x = m_lu.template triangularView<Upper>().solve(x);
165
+ x = m_P * x;
166
+ }
167
+
168
+ template<typename Rhs> inline const internal::solve_retval<IncompleteLUT, Rhs>
169
+ solve(const MatrixBase<Rhs>& b) const
170
+ {
171
+ eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
172
+ eigen_assert(cols()==b.rows()
173
+ && "IncompleteLUT::solve(): invalid number of rows of the right hand side matrix b");
174
+ return internal::solve_retval<IncompleteLUT, Rhs>(*this, b.derived());
175
+ }
176
+
177
+ protected:
178
+
179
+ /** keeps off-diagonal entries; drops diagonal entries */
180
+ struct keep_diag {
181
+ inline bool operator() (const Index& row, const Index& col, const Scalar&) const
182
+ {
183
+ return row!=col;
184
+ }
185
+ };
186
+
187
+ protected:
188
+
189
+ FactorType m_lu;
190
+ RealScalar m_droptol;
191
+ int m_fillfactor;
192
+ bool m_analysisIsOk;
193
+ bool m_factorizationIsOk;
194
+ bool m_isInitialized;
195
+ ComputationInfo m_info;
196
+ PermutationMatrix<Dynamic,Dynamic,Index> m_P; // Fill-reducing permutation
197
+ PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv; // Inverse permutation
198
+ };
199
+
200
+ /**
201
+ * Set control parameter droptol
202
+ * \param droptol Drop any element whose magnitude is less than this tolerance
203
+ **/
204
+ template<typename Scalar>
205
+ void IncompleteLUT<Scalar>::setDroptol(const RealScalar& droptol)
206
+ {
207
+ this->m_droptol = droptol;
208
+ }
209
+
210
+ /**
211
+ * Set control parameter fillfactor
212
+ * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row.
213
+ **/
214
+ template<typename Scalar>
215
+ void IncompleteLUT<Scalar>::setFillfactor(int fillfactor)
216
+ {
217
+ this->m_fillfactor = fillfactor;
218
+ }
219
+
220
+ template <typename Scalar>
221
+ template<typename _MatrixType>
222
+ void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
223
+ {
224
+ // Compute the Fill-reducing permutation
225
+ // Since ILUT does not perform any numerical pivoting,
226
+ // it is highly preferable to keep the diagonal through symmetric permutations.
227
+ #ifndef EIGEN_MPL2_ONLY
228
+ // To this end, let's symmetrize the pattern and perform AMD on it.
229
+ SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
230
+ SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
231
+ // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
232
+ // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
233
+ SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
234
+ AMDOrdering<Index> ordering;
235
+ ordering(AtA,m_P);
236
+ m_Pinv = m_P.inverse(); // cache the inverse permutation
237
+ #else
238
+ // If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine.
239
+ SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
240
+ COLAMDOrdering<Index> ordering;
241
+ ordering(mat1,m_Pinv);
242
+ m_P = m_Pinv.inverse();
243
+ #endif
244
+
245
+ m_analysisIsOk = true;
246
+ m_factorizationIsOk = false;
247
+ m_isInitialized = false;
248
+ }
249
+
250
+ template <typename Scalar>
251
+ template<typename _MatrixType>
252
+ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
253
+ {
254
+ using std::sqrt;
255
+ using std::swap;
256
+ using std::abs;
257
+
258
+ eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
259
+ Index n = amat.cols(); // Size of the matrix
260
+ m_lu.resize(n,n);
261
+ // Declare Working vectors and variables
262
+ Vector u(n) ; // real values of the row -- maximum size is n --
263
+ VectorXi ju(n); // column position of the values in u -- maximum size is n
264
+ VectorXi jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1
265
+
266
+ // Apply the fill-reducing permutation
267
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
268
+ SparseMatrix<Scalar,RowMajor, Index> mat;
269
+ mat = amat.twistedBy(m_Pinv);
270
+
271
+ // Initialization
272
+ jr.fill(-1);
273
+ ju.fill(0);
274
+ u.fill(0);
275
+
276
+ // number of largest elements to keep in each row:
277
+ Index fill_in = static_cast<Index> (amat.nonZeros()*m_fillfactor)/n+1;
278
+ if (fill_in > n) fill_in = n;
279
+
280
+ // number of largest nonzero elements to keep in the L and the U part of the current row:
281
+ Index nnzL = fill_in/2;
282
+ Index nnzU = nnzL;
283
+ m_lu.reserve(n * (nnzL + nnzU + 1));
284
+
285
+ // global loop over the rows of the sparse matrix
286
+ for (Index ii = 0; ii < n; ii++)
287
+ {
288
+ // 1 - copy the lower and the upper part of the row i of mat in the working vector u
289
+
290
+ Index sizeu = 1; // number of nonzero elements in the upper part of the current row
291
+ Index sizel = 0; // number of nonzero elements in the lower part of the current row
292
+ ju(ii) = ii;
293
+ u(ii) = 0;
294
+ jr(ii) = ii;
295
+ RealScalar rownorm = 0;
296
+
297
+ typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
298
+ for (; j_it; ++j_it)
299
+ {
300
+ Index k = j_it.index();
301
+ if (k < ii)
302
+ {
303
+ // copy the lower part
304
+ ju(sizel) = k;
305
+ u(sizel) = j_it.value();
306
+ jr(k) = sizel;
307
+ ++sizel;
308
+ }
309
+ else if (k == ii)
310
+ {
311
+ u(ii) = j_it.value();
312
+ }
313
+ else
314
+ {
315
+ // copy the upper part
316
+ Index jpos = ii + sizeu;
317
+ ju(jpos) = k;
318
+ u(jpos) = j_it.value();
319
+ jr(k) = jpos;
320
+ ++sizeu;
321
+ }
322
+ rownorm += numext::abs2(j_it.value());
323
+ }
324
+
325
+ // 2 - detect possible zero row
326
+ if(rownorm==0)
327
+ {
328
+ m_info = NumericalIssue;
329
+ return;
330
+ }
331
+ // Take the 2-norm of the current row as a relative tolerance
332
+ rownorm = sqrt(rownorm);
333
+
334
+ // 3 - eliminate the previous nonzero rows
335
+ Index jj = 0;
336
+ Index len = 0;
337
+ while (jj < sizel)
338
+ {
339
+ // In order to eliminate in the correct order,
340
+ // we must select first the smallest column index among ju(jj:sizel)
341
+ Index k;
342
+ Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
343
+ k += jj;
344
+ if (minrow != ju(jj))
345
+ {
346
+ // swap the two locations
347
+ Index j = ju(jj);
348
+ swap(ju(jj), ju(k));
349
+ jr(minrow) = jj; jr(j) = k;
350
+ swap(u(jj), u(k));
351
+ }
352
+ // Reset this location
353
+ jr(minrow) = -1;
354
+
355
+ // Start elimination
356
+ typename FactorType::InnerIterator ki_it(m_lu, minrow);
357
+ while (ki_it && ki_it.index() < minrow) ++ki_it;
358
+ eigen_internal_assert(ki_it && ki_it.col()==minrow);
359
+ Scalar fact = u(jj) / ki_it.value();
360
+
361
+ // drop too small elements
362
+ if(abs(fact) <= m_droptol)
363
+ {
364
+ jj++;
365
+ continue;
366
+ }
367
+
368
+ // linear combination of the current row ii and the row minrow
369
+ ++ki_it;
370
+ for (; ki_it; ++ki_it)
371
+ {
372
+ Scalar prod = fact * ki_it.value();
373
+ Index j = ki_it.index();
374
+ Index jpos = jr(j);
375
+ if (jpos == -1) // fill-in element
376
+ {
377
+ Index newpos;
378
+ if (j >= ii) // dealing with the upper part
379
+ {
380
+ newpos = ii + sizeu;
381
+ sizeu++;
382
+ eigen_internal_assert(sizeu<=n);
383
+ }
384
+ else // dealing with the lower part
385
+ {
386
+ newpos = sizel;
387
+ sizel++;
388
+ eigen_internal_assert(sizel<=ii);
389
+ }
390
+ ju(newpos) = j;
391
+ u(newpos) = -prod;
392
+ jr(j) = newpos;
393
+ }
394
+ else
395
+ u(jpos) -= prod;
396
+ }
397
+ // store the pivot element
398
+ u(len) = fact;
399
+ ju(len) = minrow;
400
+ ++len;
401
+
402
+ jj++;
403
+ } // end of the elimination on the row ii
404
+
405
+ // reset the upper part of the pointer jr to zero
406
+ for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
407
+
408
+ // 4 - partially sort and insert the elements in the m_lu matrix
409
+
410
+ // sort the L-part of the row
411
+ sizel = len;
412
+ len = (std::min)(sizel, nnzL);
413
+ typename Vector::SegmentReturnType ul(u.segment(0, sizel));
414
+ typename VectorXi::SegmentReturnType jul(ju.segment(0, sizel));
415
+ internal::QuickSplit(ul, jul, len);
416
+
417
+ // store the largest m_fill elements of the L part
418
+ m_lu.startVec(ii);
419
+ for(Index k = 0; k < len; k++)
420
+ m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
421
+
422
+ // store the diagonal element
423
+ // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization)
424
+ if (u(ii) == Scalar(0))
425
+ u(ii) = sqrt(m_droptol) * rownorm;
426
+ m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii);
427
+
428
+ // sort the U-part of the row
429
+ // apply the dropping rule first
430
+ len = 0;
431
+ for(Index k = 1; k < sizeu; k++)
432
+ {
433
+ if(abs(u(ii+k)) > m_droptol * rownorm )
434
+ {
435
+ ++len;
436
+ u(ii + len) = u(ii + k);
437
+ ju(ii + len) = ju(ii + k);
438
+ }
439
+ }
440
+ sizeu = len + 1; // +1 to take into account the diagonal element
441
+ len = (std::min)(sizeu, nnzU);
442
+ typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
443
+ typename VectorXi::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
444
+ internal::QuickSplit(uu, juu, len);
445
+
446
+ // store the largest elements of the U part
447
+ for(Index k = ii + 1; k < ii + len; k++)
448
+ m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
449
+ }
450
+
451
+ m_lu.finalize();
452
+ m_lu.makeCompressed();
453
+
454
+ m_factorizationIsOk = true;
455
+ m_isInitialized = m_factorizationIsOk;
456
+ m_info = Success;
457
+ }
458
+
459
+ namespace internal {
460
+
461
+ template<typename _MatrixType, typename Rhs>
462
+ struct solve_retval<IncompleteLUT<_MatrixType>, Rhs>
463
+ : solve_retval_base<IncompleteLUT<_MatrixType>, Rhs>
464
+ {
465
+ typedef IncompleteLUT<_MatrixType> Dec;
466
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
467
+
468
+ template<typename Dest> void evalTo(Dest& dst) const
469
+ {
470
+ dec()._solve(rhs(),dst);
471
+ }
472
+ };
473
+
474
+ } // end namespace internal
475
+
476
+ } // end namespace Eigen
477
+
478
+ #endif // EIGEN_INCOMPLETE_LUT_H
@@ -0,0 +1,154 @@
1
+
2
+ // This file is part of Eigen, a lightweight C++ template library
3
+ // for linear algebra.
4
+ //
5
+ // Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_ORDERING_H
12
+ #define EIGEN_ORDERING_H
13
+
14
+ namespace Eigen {
15
+
16
+ #include "Eigen_Colamd.h"
17
+
18
+ namespace internal {
19
+
20
+ /** \internal
21
+ * \ingroup OrderingMethods_Module
22
+ * \returns the symmetric pattern A^T+A from the input matrix A.
23
+ * FIXME: The values should not be considered here
24
+ */
25
+ template<typename MatrixType>
26
+ void ordering_helper_at_plus_a(const MatrixType& mat, MatrixType& symmat)
27
+ {
28
+ MatrixType C;
29
+ C = mat.transpose(); // NOTE: Could be costly
30
+ for (int i = 0; i < C.rows(); i++)
31
+ {
32
+ for (typename MatrixType::InnerIterator it(C, i); it; ++it)
33
+ it.valueRef() = 0.0;
34
+ }
35
+ symmat = C + mat;
36
+ }
37
+
38
+ }
39
+
40
+ #ifndef EIGEN_MPL2_ONLY
41
+
42
+ /** \ingroup OrderingMethods_Module
43
+ * \class AMDOrdering
44
+ *
45
+ * Functor computing the \em approximate \em minimum \em degree ordering
46
+ * If the matrix is not structurally symmetric, an ordering of A^T+A is computed
47
+ * \tparam Index The type of indices of the matrix
48
+ * \sa COLAMDOrdering
49
+ */
50
+ template <typename Index>
51
+ class AMDOrdering
52
+ {
53
+ public:
54
+ typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
55
+
56
+ /** Compute the permutation vector from a sparse matrix
57
+ * This routine is much faster if the input matrix is column-major
58
+ */
59
+ template <typename MatrixType>
60
+ void operator()(const MatrixType& mat, PermutationType& perm)
61
+ {
62
+ // Compute the symmetric pattern
63
+ SparseMatrix<typename MatrixType::Scalar, ColMajor, Index> symm;
64
+ internal::ordering_helper_at_plus_a(mat,symm);
65
+
66
+ // Call the AMD routine
67
+ //m_mat.prune(keep_diag());
68
+ internal::minimum_degree_ordering(symm, perm);
69
+ }
70
+
71
+ /** Compute the permutation with a selfadjoint matrix */
72
+ template <typename SrcType, unsigned int SrcUpLo>
73
+ void operator()(const SparseSelfAdjointView<SrcType, SrcUpLo>& mat, PermutationType& perm)
74
+ {
75
+ SparseMatrix<typename SrcType::Scalar, ColMajor, Index> C; C = mat;
76
+
77
+ // Call the AMD routine
78
+ // m_mat.prune(keep_diag()); //Remove the diagonal elements
79
+ internal::minimum_degree_ordering(C, perm);
80
+ }
81
+ };
82
+
83
+ #endif // EIGEN_MPL2_ONLY
84
+
85
+ /** \ingroup OrderingMethods_Module
86
+ * \class NaturalOrdering
87
+ *
88
+ * Functor computing the natural ordering (identity)
89
+ *
90
+ * \note Returns an empty permutation matrix
91
+ * \tparam Index The type of indices of the matrix
92
+ */
93
+ template <typename Index>
94
+ class NaturalOrdering
95
+ {
96
+ public:
97
+ typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
98
+
99
+ /** Compute the permutation vector from a column-major sparse matrix */
100
+ template <typename MatrixType>
101
+ void operator()(const MatrixType& /*mat*/, PermutationType& perm)
102
+ {
103
+ perm.resize(0);
104
+ }
105
+
106
+ };
107
+
108
+ /** \ingroup OrderingMethods_Module
109
+ * \class COLAMDOrdering
110
+ *
111
+ * Functor computing the \em column \em approximate \em minimum \em degree ordering
112
+ * The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()).
113
+ */
114
+ template<typename Index>
115
+ class COLAMDOrdering
116
+ {
117
+ public:
118
+ typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
119
+ typedef Matrix<Index, Dynamic, 1> IndexVector;
120
+
121
+ /** Compute the permutation vector \a perm form the sparse matrix \a mat
122
+ * \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
123
+ */
124
+ template <typename MatrixType>
125
+ void operator() (const MatrixType& mat, PermutationType& perm)
126
+ {
127
+ eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering");
128
+
129
+ Index m = mat.rows();
130
+ Index n = mat.cols();
131
+ Index nnz = mat.nonZeros();
132
+ // Get the recommended value of Alen to be used by colamd
133
+ Index Alen = internal::colamd_recommended(nnz, m, n);
134
+ // Set the default parameters
135
+ double knobs [COLAMD_KNOBS];
136
+ Index stats [COLAMD_STATS];
137
+ internal::colamd_set_defaults(knobs);
138
+
139
+ IndexVector p(n+1), A(Alen);
140
+ for(Index i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
141
+ for(Index i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
142
+ // Call Colamd routine to compute the ordering
143
+ Index info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
144
+ EIGEN_UNUSED_VARIABLE(info);
145
+ eigen_assert( info && "COLAMD failed " );
146
+
147
+ perm.resize(n);
148
+ for (Index i = 0; i < n; i++) perm.indices()(p(i)) = i;
149
+ }
150
+ };
151
+
152
+ } // end namespace Eigen
153
+
154
+ #endif