nmatrix-lapacke 0.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/ext/nmatrix/data/complex.h +364 -0
- data/ext/nmatrix/data/data.h +638 -0
- data/ext/nmatrix/data/meta.h +64 -0
- data/ext/nmatrix/data/ruby_object.h +389 -0
- data/ext/nmatrix/math/asum.h +120 -0
- data/ext/nmatrix/math/cblas_enums.h +36 -0
- data/ext/nmatrix/math/cblas_templates_core.h +507 -0
- data/ext/nmatrix/math/gemm.h +241 -0
- data/ext/nmatrix/math/gemv.h +178 -0
- data/ext/nmatrix/math/getrf.h +255 -0
- data/ext/nmatrix/math/getrs.h +121 -0
- data/ext/nmatrix/math/imax.h +79 -0
- data/ext/nmatrix/math/laswp.h +165 -0
- data/ext/nmatrix/math/long_dtype.h +49 -0
- data/ext/nmatrix/math/math.h +744 -0
- data/ext/nmatrix/math/nrm2.h +160 -0
- data/ext/nmatrix/math/rot.h +117 -0
- data/ext/nmatrix/math/rotg.h +106 -0
- data/ext/nmatrix/math/scal.h +71 -0
- data/ext/nmatrix/math/trsm.h +332 -0
- data/ext/nmatrix/math/util.h +148 -0
- data/ext/nmatrix/nm_memory.h +60 -0
- data/ext/nmatrix/nmatrix.h +408 -0
- data/ext/nmatrix/ruby_constants.h +106 -0
- data/ext/nmatrix/storage/common.h +176 -0
- data/ext/nmatrix/storage/dense/dense.h +128 -0
- data/ext/nmatrix/storage/list/list.h +137 -0
- data/ext/nmatrix/storage/storage.h +98 -0
- data/ext/nmatrix/storage/yale/class.h +1139 -0
- data/ext/nmatrix/storage/yale/iterators/base.h +142 -0
- data/ext/nmatrix/storage/yale/iterators/iterator.h +130 -0
- data/ext/nmatrix/storage/yale/iterators/row.h +449 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored.h +139 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +168 -0
- data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +123 -0
- data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
- data/ext/nmatrix/storage/yale/yale.h +202 -0
- data/ext/nmatrix/types.h +54 -0
- data/ext/nmatrix/util/io.h +115 -0
- data/ext/nmatrix/util/sl_list.h +143 -0
- data/ext/nmatrix/util/util.h +78 -0
- data/ext/nmatrix_lapacke/extconf.rb +200 -0
- data/ext/nmatrix_lapacke/lapacke.cpp +100 -0
- data/ext/nmatrix_lapacke/lapacke/include/lapacke.h +16445 -0
- data/ext/nmatrix_lapacke/lapacke/include/lapacke_config.h +119 -0
- data/ext/nmatrix_lapacke/lapacke/include/lapacke_mangling.h +17 -0
- data/ext/nmatrix_lapacke/lapacke/include/lapacke_mangling_with_flags.h +17 -0
- data/ext/nmatrix_lapacke/lapacke/include/lapacke_utils.h +579 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgeev.c +89 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgeev_work.c +141 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgesdd.c +106 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgesdd_work.c +158 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgesvd.c +94 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgesvd_work.c +149 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgetrf.c +51 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgetrf_work.c +83 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgetri.c +77 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgetri_work.c +89 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgetrs.c +56 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cgetrs_work.c +102 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cpotrf.c +50 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cpotrf_work.c +82 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cpotri.c +50 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cpotri_work.c +82 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cpotrs.c +55 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_cpotrs_work.c +101 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgeev.c +78 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgeev_work.c +136 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgesdd.c +88 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgesdd_work.c +153 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgesvd.c +83 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgesvd_work.c +144 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgetrf.c +50 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgetrf_work.c +81 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgetri.c +75 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgetri_work.c +87 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgetrs.c +55 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dgetrs_work.c +99 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dpotrf.c +50 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dpotrf_work.c +81 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dpotri.c +50 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dpotri_work.c +81 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dpotrs.c +54 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_dpotrs_work.c +97 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgeev.c +78 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgeev_work.c +134 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgesdd.c +88 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgesdd_work.c +152 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgesvd.c +83 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgesvd_work.c +143 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgetrf.c +50 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgetrf_work.c +81 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgetri.c +75 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgetri_work.c +87 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgetrs.c +55 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_sgetrs_work.c +99 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_spotrf.c +50 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_spotrf_work.c +81 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_spotri.c +50 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_spotri_work.c +81 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_spotrs.c +54 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_spotrs_work.c +97 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgeev.c +89 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgeev_work.c +141 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgesdd.c +106 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgesdd_work.c +158 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgesvd.c +94 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgesvd_work.c +149 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgetrf.c +51 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgetrf_work.c +83 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgetri.c +77 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgetri_work.c +89 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgetrs.c +56 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zgetrs_work.c +102 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zpotrf.c +50 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zpotrf_work.c +82 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zpotri.c +50 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zpotri_work.c +82 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zpotrs.c +55 -0
- data/ext/nmatrix_lapacke/lapacke/src/lapacke_zpotrs_work.c +101 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_cge_nancheck.c +62 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_cge_trans.c +65 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_cpo_nancheck.c +43 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_cpo_trans.c +45 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_ctr_nancheck.c +85 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_ctr_trans.c +85 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_dge_nancheck.c +62 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_dge_trans.c +65 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_dpo_nancheck.c +43 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_dpo_trans.c +45 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_dtr_nancheck.c +85 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_dtr_trans.c +85 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_lsame.c +41 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_sge_nancheck.c +62 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_sge_trans.c +65 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_spo_nancheck.c +43 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_spo_trans.c +45 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_str_nancheck.c +85 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_str_trans.c +85 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_xerbla.c +46 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_zge_nancheck.c +62 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_zge_trans.c +65 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_zpo_nancheck.c +43 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_zpo_trans.c +45 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_ztr_nancheck.c +85 -0
- data/ext/nmatrix_lapacke/lapacke/utils/lapacke_ztr_trans.c +85 -0
- data/ext/nmatrix_lapacke/lapacke_nmatrix.h +16 -0
- data/ext/nmatrix_lapacke/make_lapacke_cpp.rb +9 -0
- data/ext/nmatrix_lapacke/math_lapacke.cpp +967 -0
- data/ext/nmatrix_lapacke/math_lapacke/cblas_local.h +576 -0
- data/ext/nmatrix_lapacke/math_lapacke/cblas_templates_lapacke.h +51 -0
- data/ext/nmatrix_lapacke/math_lapacke/lapacke_templates.h +356 -0
- data/ext/nmatrix_lapacke/nmatrix_lapacke.cpp +42 -0
- data/lib/nmatrix/lapack_ext_common.rb +69 -0
- data/lib/nmatrix/lapacke.rb +213 -0
- data/spec/00_nmatrix_spec.rb +730 -0
- data/spec/01_enum_spec.rb +190 -0
- data/spec/02_slice_spec.rb +389 -0
- data/spec/03_nmatrix_monkeys_spec.rb +78 -0
- data/spec/2x2_dense_double.mat +0 -0
- data/spec/4x4_sparse.mat +0 -0
- data/spec/4x5_dense.mat +0 -0
- data/spec/blas_spec.rb +193 -0
- data/spec/elementwise_spec.rb +303 -0
- data/spec/homogeneous_spec.rb +99 -0
- data/spec/io/fortran_format_spec.rb +88 -0
- data/spec/io/harwell_boeing_spec.rb +98 -0
- data/spec/io/test.rua +9 -0
- data/spec/io_spec.rb +149 -0
- data/spec/lapack_core_spec.rb +482 -0
- data/spec/leakcheck.rb +16 -0
- data/spec/math_spec.rb +730 -0
- data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
- data/spec/nmatrix_yale_spec.rb +286 -0
- data/spec/plugins/lapacke/lapacke_spec.rb +303 -0
- data/spec/rspec_monkeys.rb +56 -0
- data/spec/rspec_spec.rb +34 -0
- data/spec/shortcuts_spec.rb +310 -0
- data/spec/slice_set_spec.rb +157 -0
- data/spec/spec_helper.rb +140 -0
- data/spec/stat_spec.rb +203 -0
- data/spec/test.pcd +20 -0
- data/spec/utm5940.mtx +83844 -0
- metadata +262 -0
@@ -0,0 +1,139 @@
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
2
|
+
// = NMatrix
|
3
|
+
//
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
5
|
+
// NMatrix is part of SciRuby.
|
6
|
+
//
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
9
|
+
//
|
10
|
+
// == Copyright Information
|
11
|
+
//
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
14
|
+
//
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
16
|
+
//
|
17
|
+
// == Contributing
|
18
|
+
//
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
20
|
+
// our Contributor Agreement:
|
21
|
+
//
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
23
|
+
//
|
24
|
+
// == row_stored.h
|
25
|
+
//
|
26
|
+
// Iterator for traversing a single stored row of a matrix (needed
|
27
|
+
// for row.h). FIXME: This is not as efficient as it could be; it uses
|
28
|
+
// two binary searches to find the beginning and end of each slice.
|
29
|
+
// The end search shouldn't be necessary, but I couldn't make it
|
30
|
+
// work without it, and eventually decided my dissertation should
|
31
|
+
// be a priority.
|
32
|
+
//
|
33
|
+
|
34
|
+
#ifndef YALE_ITERATORS_ROW_STORED_H
|
35
|
+
# define YALE_ITERATORS_ROW_STORED_H
|
36
|
+
|
37
|
+
#include <stdexcept>
|
38
|
+
|
39
|
+
namespace nm { namespace yale_storage {
|
40
|
+
|
41
|
+
|
42
|
+
/*
|
43
|
+
* Iterator for visiting each stored element in a row, including diagonals.
|
44
|
+
*/
|
45
|
+
template <typename D,
|
46
|
+
typename RefType,
|
47
|
+
typename YaleRef = typename std::conditional<
|
48
|
+
std::is_const<RefType>::value,
|
49
|
+
const nm::YaleStorage<D>,
|
50
|
+
nm::YaleStorage<D>
|
51
|
+
>::type,
|
52
|
+
typename RowRef = typename std::conditional<
|
53
|
+
std::is_const<RefType>::value,
|
54
|
+
const row_iterator_T<D,RefType,YaleRef>,
|
55
|
+
row_iterator_T<D,RefType,YaleRef>
|
56
|
+
>::type>
|
57
|
+
class row_stored_iterator_T : public row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> {
|
58
|
+
protected:
|
59
|
+
using row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>::r;
|
60
|
+
using row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>::p_;
|
61
|
+
bool d_visited, d;
|
62
|
+
|
63
|
+
public:
|
64
|
+
|
65
|
+
// end_ is necessary for the logic when a row is empty other than the diagonal. If we just
|
66
|
+
// relied on pp == last_p+1, it'd look like these empty rows were actually end() iterators.
|
67
|
+
// So we have to actually mark end_ by telling it to ignore that diagonal visitation.
|
68
|
+
row_stored_iterator_T(RowRef& row, size_t pp, bool end_ = false)
|
69
|
+
: row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>(row, pp),
|
70
|
+
d_visited(!row.has_diag()), // if the row has no diagonal, just marked it as visited.
|
71
|
+
d(r.is_diag_first() && !end_) // do we start at the diagonal?
|
72
|
+
{
|
73
|
+
}
|
74
|
+
|
75
|
+
/* Diagonal constructor. Puts us on the diagonal (unless end is true) */
|
76
|
+
/*row_stored_iterator_T(RowRef& row, bool end_, size_t j)
|
77
|
+
: row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>(row.ndfind(j)),
|
78
|
+
d_visited(false),
|
79
|
+
d(!end_ && j + row.offset(1) == row.real_i())
|
80
|
+
{ }*/
|
81
|
+
|
82
|
+
virtual bool diag() const {
|
83
|
+
return d;
|
84
|
+
}
|
85
|
+
|
86
|
+
virtual bool end() const {
|
87
|
+
return !d && p_ > r.p_last;
|
88
|
+
}
|
89
|
+
|
90
|
+
row_stored_iterator_T<D,RefType,YaleRef,RowRef>& operator++() {
|
91
|
+
if (end()) throw std::out_of_range("cannot increment row stored iterator past end of stored row");
|
92
|
+
if (d) {
|
93
|
+
d_visited = true;
|
94
|
+
d = false;
|
95
|
+
} else {
|
96
|
+
++p_;
|
97
|
+
// Are we at a diagonal?
|
98
|
+
// If we hit the end or reach a point where j > diag_j, and still
|
99
|
+
// haven't visited the diagonal, we should do so before continuing.
|
100
|
+
if (!d_visited && (end() || j() > r.diag_j())) {
|
101
|
+
d = true;
|
102
|
+
}
|
103
|
+
}
|
104
|
+
|
105
|
+
return *this;
|
106
|
+
}
|
107
|
+
|
108
|
+
row_stored_iterator_T<D,RefType,YaleRef,RowRef> operator++(int dummy) const {
|
109
|
+
row_stored_iterator_T<D,RefType,YaleRef,RowRef> r(*this);
|
110
|
+
return ++r;
|
111
|
+
}
|
112
|
+
|
113
|
+
size_t j() const {
|
114
|
+
if (end()) throw std::out_of_range("cannot dereference an end pointer");
|
115
|
+
return (d ? r.p_diag() : r.ija(p_)) - r.offset(1);
|
116
|
+
}
|
117
|
+
|
118
|
+
// Need to declare all row_stored_iterator_T friends of each other.
|
119
|
+
template <typename E, typename ERefType, typename EYaleRef, typename ERowRef> friend class row_stored_iterator_T;
|
120
|
+
|
121
|
+
// De-reference the iterator
|
122
|
+
RefType& operator*() {
|
123
|
+
return d ? r.a(r.p_diag()) : r.a(p_);
|
124
|
+
}
|
125
|
+
|
126
|
+
RefType& operator*() const {
|
127
|
+
return d ? r.a(r.p_diag()) : r.a(p_);
|
128
|
+
}
|
129
|
+
|
130
|
+
// Ruby VALUE de-reference
|
131
|
+
VALUE operator~() const {
|
132
|
+
return nm_rb_dereference<D>(**this);
|
133
|
+
}
|
134
|
+
|
135
|
+
};
|
136
|
+
|
137
|
+
}} // end of namespace nm::yale_storage
|
138
|
+
|
139
|
+
#endif // YALE_ITERATORS_ROW_STORED_H
|
@@ -0,0 +1,168 @@
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
2
|
+
// = NMatrix
|
3
|
+
//
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
5
|
+
// NMatrix is part of SciRuby.
|
6
|
+
//
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
9
|
+
//
|
10
|
+
// == Copyright Information
|
11
|
+
//
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
14
|
+
//
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
16
|
+
//
|
17
|
+
// == Contributing
|
18
|
+
//
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
20
|
+
// our Contributor Agreement:
|
21
|
+
//
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
23
|
+
//
|
24
|
+
// == row_stored_nd.h
|
25
|
+
//
|
26
|
+
// Yale storage row-by-row nondiagonal-storage iterator
|
27
|
+
//
|
28
|
+
|
29
|
+
#ifndef YALE_ITERATORS_ROW_STORED_ND_H
|
30
|
+
# define YALE_ITERATORS_ROW_STORED_ND_H
|
31
|
+
|
32
|
+
#include <type_traits>
|
33
|
+
#include <typeinfo>
|
34
|
+
#include <stdexcept>
|
35
|
+
|
36
|
+
namespace nm { namespace yale_storage {
|
37
|
+
|
38
|
+
/*
|
39
|
+
* Constants
|
40
|
+
*/
|
41
|
+
const float GROWTH_CONSTANT = 1.5;
|
42
|
+
|
43
|
+
|
44
|
+
/*
|
45
|
+
* Forward declarations
|
46
|
+
*/
|
47
|
+
template <typename D, typename RefType, typename YaleRef> class row_iterator_T;
|
48
|
+
|
49
|
+
/*
|
50
|
+
* Iterator for visiting each stored element in a row, including diagonals.
|
51
|
+
*/
|
52
|
+
template <typename D,
|
53
|
+
typename RefType,
|
54
|
+
typename YaleRef = typename std::conditional<
|
55
|
+
std::is_const<RefType>::value,
|
56
|
+
const nm::YaleStorage<D>,
|
57
|
+
nm::YaleStorage<D>
|
58
|
+
>::type,
|
59
|
+
typename RowRef = typename std::conditional<
|
60
|
+
std::is_const<RefType>::value,
|
61
|
+
const row_iterator_T<D,RefType,YaleRef>,
|
62
|
+
row_iterator_T<D,RefType,YaleRef>
|
63
|
+
>::type>
|
64
|
+
class row_stored_nd_iterator_T {
|
65
|
+
protected:
|
66
|
+
RowRef& r;
|
67
|
+
size_t p_;
|
68
|
+
|
69
|
+
public:
|
70
|
+
|
71
|
+
row_stored_nd_iterator_T(RowRef& row, size_t pp)
|
72
|
+
: r(row),
|
73
|
+
p_(pp) // do we start at the diagonal?
|
74
|
+
{
|
75
|
+
}
|
76
|
+
|
77
|
+
// DO NOT IMPLEMENT THESE FUNCTIONS. They prevent C++ virtual slicing
|
78
|
+
//template <typename T> row_stored_nd_iterator_T(T const& rhs);
|
79
|
+
//template <typename T> row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& operator=(T const& rhs);
|
80
|
+
|
81
|
+
// Next two functions are to ensure we can still cast between nd iterators.
|
82
|
+
row_stored_nd_iterator_T(row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& rhs)
|
83
|
+
: r(rhs.r), p_(rhs.p_)
|
84
|
+
{ }
|
85
|
+
|
86
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& operator=(row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& rhs) {
|
87
|
+
if (&r != &(rhs.r))
|
88
|
+
throw std::logic_error("can't assign iterator from another row iterator");
|
89
|
+
p_ = rhs.p_;
|
90
|
+
return *this;
|
91
|
+
}
|
92
|
+
|
93
|
+
virtual size_t p() const { return p_; }
|
94
|
+
|
95
|
+
virtual bool end() const {
|
96
|
+
return p_ > r.p_last;
|
97
|
+
}
|
98
|
+
|
99
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>& operator++() {
|
100
|
+
if (end()) throw std::out_of_range("cannot increment row stored iterator past end of stored row");
|
101
|
+
++p_;
|
102
|
+
|
103
|
+
return *this;
|
104
|
+
}
|
105
|
+
|
106
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> operator++(int dummy) const {
|
107
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> r(*this);
|
108
|
+
return ++r;
|
109
|
+
}
|
110
|
+
|
111
|
+
virtual size_t j() const {
|
112
|
+
if (end()) throw std::out_of_range("cannot dereference (get j()) for an end pointer");
|
113
|
+
return r.ija(p_) - r.offset(1);
|
114
|
+
}
|
115
|
+
|
116
|
+
// Need to declare all row_stored_nd_iterator_T friends of each other.
|
117
|
+
template <typename E, typename ERefType, typename EYaleRef, typename ERowRef> friend class row_stored_nd_iterator_T;
|
118
|
+
|
119
|
+
|
120
|
+
virtual bool operator==(const row_stored_nd_iterator_T<D,RefType>& rhs) const {
|
121
|
+
if (r.i() != rhs.r.i()) return false;
|
122
|
+
if (end()) return rhs.end();
|
123
|
+
else if (rhs.end()) return false;
|
124
|
+
return j() == rhs.j();
|
125
|
+
}
|
126
|
+
|
127
|
+
// There is something wrong with this function.
|
128
|
+
virtual bool operator!=(const row_stored_nd_iterator_T<D,RefType>& rhs) const {
|
129
|
+
if (r.i() != rhs.r.i()) return true;
|
130
|
+
if (end()) return !rhs.end();
|
131
|
+
else if (rhs.end()) return true;
|
132
|
+
return j() != rhs.j();
|
133
|
+
}
|
134
|
+
|
135
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
136
|
+
bool operator<(const row_stored_nd_iterator_T<E,ERefType>& rhs) const {
|
137
|
+
if (r < rhs.r) return true;
|
138
|
+
if (r > rhs.r) return false;
|
139
|
+
|
140
|
+
// r == rhs.r
|
141
|
+
if (end()) return false;
|
142
|
+
if (rhs.end()) return true;
|
143
|
+
return j() < rhs.j();
|
144
|
+
}
|
145
|
+
|
146
|
+
// De-reference the iterator
|
147
|
+
RefType& operator*() {
|
148
|
+
return r.a(p_);
|
149
|
+
}
|
150
|
+
|
151
|
+
RefType& operator*() const {
|
152
|
+
return r.a(p_);
|
153
|
+
}
|
154
|
+
|
155
|
+
// Ruby VALUE de-reference
|
156
|
+
VALUE operator~() const {
|
157
|
+
return nm_rb_dereference<D>(**this);
|
158
|
+
}
|
159
|
+
|
160
|
+
inline virtual VALUE rb_j() const { return LONG2NUM(j()); }
|
161
|
+
|
162
|
+
};
|
163
|
+
|
164
|
+
|
165
|
+
|
166
|
+
} } // end of namespace nm::yale_storage
|
167
|
+
|
168
|
+
#endif // YALE_ITERATORS_ROW_STORED_ND_H
|
@@ -0,0 +1,123 @@
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
2
|
+
// = NMatrix
|
3
|
+
//
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
5
|
+
// NMatrix is part of SciRuby.
|
6
|
+
//
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
9
|
+
//
|
10
|
+
// == Copyright Information
|
11
|
+
//
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
14
|
+
//
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
16
|
+
//
|
17
|
+
// == Contributing
|
18
|
+
//
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
20
|
+
// our Contributor Agreement:
|
21
|
+
//
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
23
|
+
//
|
24
|
+
// == stored_diagonal_iterator.h
|
25
|
+
//
|
26
|
+
// Yale storage diagonal-storage iterator
|
27
|
+
//
|
28
|
+
|
29
|
+
#ifndef YALE_ITERATORS_STORED_DIAGONAL_H
|
30
|
+
# define YALE_ITERATORS_STORED_DIAGONAL_H
|
31
|
+
|
32
|
+
#include <type_traits>
|
33
|
+
#include <typeinfo>
|
34
|
+
|
35
|
+
namespace nm { namespace yale_storage {
|
36
|
+
|
37
|
+
/*
|
38
|
+
* Iterate across the stored diagonal.
|
39
|
+
*/
|
40
|
+
template <typename D,
|
41
|
+
typename RefType,
|
42
|
+
typename YaleRef = typename std::conditional<
|
43
|
+
std::is_const<RefType>::value,
|
44
|
+
const nm::YaleStorage<D>,
|
45
|
+
nm::YaleStorage<D>
|
46
|
+
>::type>
|
47
|
+
class stored_diagonal_iterator_T : public basic_iterator_T<D,RefType,YaleRef> {
|
48
|
+
using basic_iterator_T<D,RefType,YaleRef>::p_;
|
49
|
+
using basic_iterator_T<D,RefType,YaleRef>::y;
|
50
|
+
using basic_iterator_T<D,RefType,YaleRef>::offset;
|
51
|
+
using basic_iterator_T<D,RefType,YaleRef>::shape;
|
52
|
+
public:
|
53
|
+
stored_diagonal_iterator_T(YaleRef& obj, size_t d = 0)
|
54
|
+
: basic_iterator_T<D,RefType,YaleRef>(obj, // y
|
55
|
+
std::max(obj.offset(0), obj.offset(1)) + d - obj.offset(0), // i_
|
56
|
+
std::max(obj.offset(0), obj.offset(1)) + d) // p_
|
57
|
+
{
|
58
|
+
// std::cerr << "sdbegin: d=" << d << ", p_=" << p_ << ", i()=" << i() << ", j()=" << j() << std::endl;
|
59
|
+
// p_ can range from max(y.offset(0), y.offset(1)) to min(y.real_shape(0), y.real_shape(1))
|
60
|
+
}
|
61
|
+
|
62
|
+
|
63
|
+
size_t d() const {
|
64
|
+
return p_ - std::max(offset(0), offset(1));
|
65
|
+
}
|
66
|
+
|
67
|
+
stored_diagonal_iterator_T<D,RefType,YaleRef>& operator++() {
|
68
|
+
if (i() < shape(0)) ++p_;
|
69
|
+
return *this;
|
70
|
+
}
|
71
|
+
|
72
|
+
stored_diagonal_iterator_T<D,RefType,YaleRef> operator++(int dummy) const {
|
73
|
+
stored_diagonal_iterator_T<D,RefType,YaleRef> iter(*this);
|
74
|
+
return ++iter;
|
75
|
+
}
|
76
|
+
|
77
|
+
// Indicates if we're at the end of the iteration.
|
78
|
+
bool end() const {
|
79
|
+
return p_ >= std::min( shape(0) + offset(0), shape(1) + offset(1) );
|
80
|
+
}
|
81
|
+
|
82
|
+
// i() and j() are how we know if we're past-the-end. i will be shape(0) and j will be 0.
|
83
|
+
size_t i() const {
|
84
|
+
return p_ - offset(0);
|
85
|
+
}
|
86
|
+
|
87
|
+
size_t j() const {
|
88
|
+
return p_ - offset(1);
|
89
|
+
}
|
90
|
+
|
91
|
+
|
92
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
93
|
+
bool operator!=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return d() != rhs.d(); }
|
94
|
+
|
95
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
96
|
+
bool operator==(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return !(*this != rhs); }
|
97
|
+
|
98
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
99
|
+
bool operator<(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return d() < rhs.d(); }
|
100
|
+
|
101
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
102
|
+
bool operator<=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
|
103
|
+
return d() <= rhs.d();
|
104
|
+
}
|
105
|
+
|
106
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
107
|
+
bool operator>(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
|
108
|
+
return d() > rhs.d();
|
109
|
+
}
|
110
|
+
|
111
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
112
|
+
bool operator>=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
|
113
|
+
return d() >= rhs.d();
|
114
|
+
}
|
115
|
+
|
116
|
+
RefType& operator*() { return y.a(p_); }
|
117
|
+
RefType& operator*() const { return y.a(p_); }
|
118
|
+
|
119
|
+
};
|
120
|
+
|
121
|
+
} } // end of namespace nm::yale_storage
|
122
|
+
|
123
|
+
#endif // YALE_ITERATORS_STORED_DIAGONAL_H
|
@@ -0,0 +1,110 @@
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
2
|
+
// = NMatrix
|
3
|
+
//
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
5
|
+
// NMatrix is part of SciRuby.
|
6
|
+
//
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
9
|
+
//
|
10
|
+
// == Copyright Information
|
11
|
+
//
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
14
|
+
//
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
16
|
+
//
|
17
|
+
// == Contributing
|
18
|
+
//
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
20
|
+
// our Contributor Agreement:
|
21
|
+
//
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
23
|
+
//
|
24
|
+
// == transpose.h
|
25
|
+
//
|
26
|
+
// Functions for Yale math: transposing
|
27
|
+
//
|
28
|
+
|
29
|
+
#ifndef YALE_MATH_TRANSPOSE_H
|
30
|
+
# define YALE_MATH_TRANSPOSE_H
|
31
|
+
|
32
|
+
namespace nm { namespace yale_storage {
|
33
|
+
|
34
|
+
/*
|
35
|
+
* Transposes a generic Yale matrix (old or new). Specify new by setting RDiag = true.
|
36
|
+
*
|
37
|
+
* Based on transp from SMMP (same as symbmm and numbmm).
|
38
|
+
*
|
39
|
+
* This is not named in the same way as most yale_storage functions because it does not act on a YALE_STORAGE
|
40
|
+
* object.
|
41
|
+
*/
|
42
|
+
|
43
|
+
template <typename AD, typename BD, bool DiagA, bool Move>
|
44
|
+
void transpose_yale(const size_t n, const size_t m,
|
45
|
+
const size_t* ia, const size_t* ja, const AD* a, const AD& a_default,
|
46
|
+
size_t* ib, size_t* jb, BD* b, const BD& b_default) {
|
47
|
+
|
48
|
+
size_t index;
|
49
|
+
|
50
|
+
// Clear B
|
51
|
+
for (size_t i = 0; i < m+1; ++i) ib[i] = 0;
|
52
|
+
|
53
|
+
if (Move)
|
54
|
+
for (size_t i = 0; i < m+1; ++i) b[i] = b_default;
|
55
|
+
|
56
|
+
if (DiagA) ib[0] = m + 1;
|
57
|
+
else ib[0] = 0;
|
58
|
+
|
59
|
+
/* count indices for each column */
|
60
|
+
|
61
|
+
for (size_t i = 0; i < n; ++i) {
|
62
|
+
for (size_t j = ia[i]; j < ia[i+1]; ++j) {
|
63
|
+
++(ib[ja[j]+1]);
|
64
|
+
}
|
65
|
+
}
|
66
|
+
|
67
|
+
for (size_t i = 0; i < m; ++i) {
|
68
|
+
ib[i+1] = ib[i] + ib[i+1];
|
69
|
+
}
|
70
|
+
|
71
|
+
/* now make jb */
|
72
|
+
|
73
|
+
for (size_t i = 0; i < n; ++i) {
|
74
|
+
|
75
|
+
for (size_t j = ia[i]; j < ia[i+1]; ++j) {
|
76
|
+
index = ja[j];
|
77
|
+
jb[ib[index]] = i;
|
78
|
+
|
79
|
+
if (Move && a[j] != a_default)
|
80
|
+
b[ib[index]] = a[j];
|
81
|
+
|
82
|
+
++(ib[index]);
|
83
|
+
}
|
84
|
+
}
|
85
|
+
|
86
|
+
/* now fixup ib */
|
87
|
+
|
88
|
+
for (size_t i = m; i >= 1; --i) {
|
89
|
+
ib[i] = ib[i-1];
|
90
|
+
}
|
91
|
+
|
92
|
+
|
93
|
+
if (DiagA) {
|
94
|
+
if (Move) {
|
95
|
+
size_t j = std::min(n,m);
|
96
|
+
|
97
|
+
for (size_t i = 0; i < j; ++i) {
|
98
|
+
b[i] = a[i];
|
99
|
+
}
|
100
|
+
}
|
101
|
+
ib[0] = m + 1;
|
102
|
+
|
103
|
+
} else {
|
104
|
+
ib[0] = 0;
|
105
|
+
}
|
106
|
+
}
|
107
|
+
|
108
|
+
} } // end of namespace nm::yale_storage
|
109
|
+
|
110
|
+
#endif
|