nmatrix-fftw 0.2.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/ext/nmatrix/data/complex.h +388 -0
- data/ext/nmatrix/data/data.h +652 -0
- data/ext/nmatrix/data/meta.h +64 -0
- data/ext/nmatrix/data/ruby_object.h +389 -0
- data/ext/nmatrix/math/asum.h +120 -0
- data/ext/nmatrix/math/cblas_enums.h +36 -0
- data/ext/nmatrix/math/cblas_templates_core.h +507 -0
- data/ext/nmatrix/math/gemm.h +241 -0
- data/ext/nmatrix/math/gemv.h +178 -0
- data/ext/nmatrix/math/getrf.h +255 -0
- data/ext/nmatrix/math/getrs.h +121 -0
- data/ext/nmatrix/math/imax.h +79 -0
- data/ext/nmatrix/math/laswp.h +165 -0
- data/ext/nmatrix/math/long_dtype.h +49 -0
- data/ext/nmatrix/math/math.h +745 -0
- data/ext/nmatrix/math/nrm2.h +160 -0
- data/ext/nmatrix/math/rot.h +117 -0
- data/ext/nmatrix/math/rotg.h +106 -0
- data/ext/nmatrix/math/scal.h +71 -0
- data/ext/nmatrix/math/trsm.h +332 -0
- data/ext/nmatrix/math/util.h +148 -0
- data/ext/nmatrix/nm_memory.h +60 -0
- data/ext/nmatrix/nmatrix.h +438 -0
- data/ext/nmatrix/ruby_constants.h +106 -0
- data/ext/nmatrix/storage/common.h +177 -0
- data/ext/nmatrix/storage/dense/dense.h +129 -0
- data/ext/nmatrix/storage/list/list.h +138 -0
- data/ext/nmatrix/storage/storage.h +99 -0
- data/ext/nmatrix/storage/yale/class.h +1139 -0
- data/ext/nmatrix/storage/yale/iterators/base.h +143 -0
- data/ext/nmatrix/storage/yale/iterators/iterator.h +131 -0
- data/ext/nmatrix/storage/yale/iterators/row.h +450 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored.h +140 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +169 -0
- data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +124 -0
- data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
- data/ext/nmatrix/storage/yale/yale.h +203 -0
- data/ext/nmatrix/types.h +55 -0
- data/ext/nmatrix/util/io.h +115 -0
- data/ext/nmatrix/util/sl_list.h +144 -0
- data/ext/nmatrix/util/util.h +78 -0
- data/ext/nmatrix_fftw/extconf.rb +122 -0
- data/ext/nmatrix_fftw/nmatrix_fftw.cpp +274 -0
- data/lib/nmatrix/fftw.rb +343 -0
- data/spec/00_nmatrix_spec.rb +736 -0
- data/spec/01_enum_spec.rb +190 -0
- data/spec/02_slice_spec.rb +389 -0
- data/spec/03_nmatrix_monkeys_spec.rb +78 -0
- data/spec/2x2_dense_double.mat +0 -0
- data/spec/4x4_sparse.mat +0 -0
- data/spec/4x5_dense.mat +0 -0
- data/spec/blas_spec.rb +193 -0
- data/spec/elementwise_spec.rb +303 -0
- data/spec/homogeneous_spec.rb +99 -0
- data/spec/io/fortran_format_spec.rb +88 -0
- data/spec/io/harwell_boeing_spec.rb +98 -0
- data/spec/io/test.rua +9 -0
- data/spec/io_spec.rb +149 -0
- data/spec/lapack_core_spec.rb +482 -0
- data/spec/leakcheck.rb +16 -0
- data/spec/math_spec.rb +807 -0
- data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
- data/spec/nmatrix_yale_spec.rb +286 -0
- data/spec/plugins/fftw/fftw_spec.rb +348 -0
- data/spec/rspec_monkeys.rb +56 -0
- data/spec/rspec_spec.rb +34 -0
- data/spec/shortcuts_spec.rb +310 -0
- data/spec/slice_set_spec.rb +157 -0
- data/spec/spec_helper.rb +149 -0
- data/spec/stat_spec.rb +203 -0
- data/spec/test.pcd +20 -0
- data/spec/utm5940.mtx +83844 -0
- metadata +151 -0
@@ -0,0 +1,140 @@
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
2
|
+
// = NMatrix
|
3
|
+
//
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
5
|
+
// NMatrix is part of SciRuby.
|
6
|
+
//
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
9
|
+
//
|
10
|
+
// == Copyright Information
|
11
|
+
//
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
14
|
+
//
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
16
|
+
//
|
17
|
+
// == Contributing
|
18
|
+
//
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
20
|
+
// our Contributor Agreement:
|
21
|
+
//
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
23
|
+
//
|
24
|
+
// == row_stored.h
|
25
|
+
//
|
26
|
+
// Iterator for traversing a single stored row of a matrix (needed
|
27
|
+
// for row.h). FIXME: This is not as efficient as it could be; it uses
|
28
|
+
// two binary searches to find the beginning and end of each slice.
|
29
|
+
// The end search shouldn't be necessary, but I couldn't make it
|
30
|
+
// work without it, and eventually decided my dissertation should
|
31
|
+
// be a priority.
|
32
|
+
//
|
33
|
+
|
34
|
+
#ifndef YALE_ITERATORS_ROW_STORED_H
|
35
|
+
# define YALE_ITERATORS_ROW_STORED_H
|
36
|
+
|
37
|
+
#include <ruby.h>
|
38
|
+
#include <stdexcept>
|
39
|
+
|
40
|
+
namespace nm { namespace yale_storage {
|
41
|
+
|
42
|
+
|
43
|
+
/*
|
44
|
+
* Iterator for visiting each stored element in a row, including diagonals.
|
45
|
+
*/
|
46
|
+
template <typename D,
|
47
|
+
typename RefType,
|
48
|
+
typename YaleRef = typename std::conditional<
|
49
|
+
std::is_const<RefType>::value,
|
50
|
+
const nm::YaleStorage<D>,
|
51
|
+
nm::YaleStorage<D>
|
52
|
+
>::type,
|
53
|
+
typename RowRef = typename std::conditional<
|
54
|
+
std::is_const<RefType>::value,
|
55
|
+
const row_iterator_T<D,RefType,YaleRef>,
|
56
|
+
row_iterator_T<D,RefType,YaleRef>
|
57
|
+
>::type>
|
58
|
+
class row_stored_iterator_T : public row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> {
|
59
|
+
protected:
|
60
|
+
using row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>::r;
|
61
|
+
using row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>::p_;
|
62
|
+
bool d_visited, d;
|
63
|
+
|
64
|
+
public:
|
65
|
+
|
66
|
+
// end_ is necessary for the logic when a row is empty other than the diagonal. If we just
|
67
|
+
// relied on pp == last_p+1, it'd look like these empty rows were actually end() iterators.
|
68
|
+
// So we have to actually mark end_ by telling it to ignore that diagonal visitation.
|
69
|
+
row_stored_iterator_T(RowRef& row, size_t pp, bool end_ = false)
|
70
|
+
: row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>(row, pp),
|
71
|
+
d_visited(!row.has_diag()), // if the row has no diagonal, just marked it as visited.
|
72
|
+
d(r.is_diag_first() && !end_) // do we start at the diagonal?
|
73
|
+
{
|
74
|
+
}
|
75
|
+
|
76
|
+
/* Diagonal constructor. Puts us on the diagonal (unless end is true) */
|
77
|
+
/*row_stored_iterator_T(RowRef& row, bool end_, size_t j)
|
78
|
+
: row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>(row.ndfind(j)),
|
79
|
+
d_visited(false),
|
80
|
+
d(!end_ && j + row.offset(1) == row.real_i())
|
81
|
+
{ }*/
|
82
|
+
|
83
|
+
virtual bool diag() const {
|
84
|
+
return d;
|
85
|
+
}
|
86
|
+
|
87
|
+
virtual bool end() const {
|
88
|
+
return !d && p_ > r.p_last;
|
89
|
+
}
|
90
|
+
|
91
|
+
row_stored_iterator_T<D,RefType,YaleRef,RowRef>& operator++() {
|
92
|
+
if (end()) throw std::out_of_range("cannot increment row stored iterator past end of stored row");
|
93
|
+
if (d) {
|
94
|
+
d_visited = true;
|
95
|
+
d = false;
|
96
|
+
} else {
|
97
|
+
++p_;
|
98
|
+
// Are we at a diagonal?
|
99
|
+
// If we hit the end or reach a point where j > diag_j, and still
|
100
|
+
// haven't visited the diagonal, we should do so before continuing.
|
101
|
+
if (!d_visited && (end() || j() > r.diag_j())) {
|
102
|
+
d = true;
|
103
|
+
}
|
104
|
+
}
|
105
|
+
|
106
|
+
return *this;
|
107
|
+
}
|
108
|
+
|
109
|
+
row_stored_iterator_T<D,RefType,YaleRef,RowRef> operator++(int dummy) const {
|
110
|
+
row_stored_iterator_T<D,RefType,YaleRef,RowRef> r(*this);
|
111
|
+
return ++r;
|
112
|
+
}
|
113
|
+
|
114
|
+
size_t j() const {
|
115
|
+
if (end()) throw std::out_of_range("cannot dereference an end pointer");
|
116
|
+
return (d ? r.p_diag() : r.ija(p_)) - r.offset(1);
|
117
|
+
}
|
118
|
+
|
119
|
+
// Need to declare all row_stored_iterator_T friends of each other.
|
120
|
+
template <typename E, typename ERefType, typename EYaleRef, typename ERowRef> friend class row_stored_iterator_T;
|
121
|
+
|
122
|
+
// De-reference the iterator
|
123
|
+
RefType& operator*() {
|
124
|
+
return d ? r.a(r.p_diag()) : r.a(p_);
|
125
|
+
}
|
126
|
+
|
127
|
+
RefType& operator*() const {
|
128
|
+
return d ? r.a(r.p_diag()) : r.a(p_);
|
129
|
+
}
|
130
|
+
|
131
|
+
// Ruby VALUE de-reference
|
132
|
+
VALUE operator~() const {
|
133
|
+
return nm_rb_dereference<D>(**this);
|
134
|
+
}
|
135
|
+
|
136
|
+
};
|
137
|
+
|
138
|
+
}} // end of namespace nm::yale_storage
|
139
|
+
|
140
|
+
#endif // YALE_ITERATORS_ROW_STORED_H
|
@@ -0,0 +1,169 @@
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
2
|
+
// = NMatrix
|
3
|
+
//
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
5
|
+
// NMatrix is part of SciRuby.
|
6
|
+
//
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
9
|
+
//
|
10
|
+
// == Copyright Information
|
11
|
+
//
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
14
|
+
//
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
16
|
+
//
|
17
|
+
// == Contributing
|
18
|
+
//
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
20
|
+
// our Contributor Agreement:
|
21
|
+
//
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
23
|
+
//
|
24
|
+
// == row_stored_nd.h
|
25
|
+
//
|
26
|
+
// Yale storage row-by-row nondiagonal-storage iterator
|
27
|
+
//
|
28
|
+
|
29
|
+
#ifndef YALE_ITERATORS_ROW_STORED_ND_H
|
30
|
+
# define YALE_ITERATORS_ROW_STORED_ND_H
|
31
|
+
|
32
|
+
#include <ruby.h>
|
33
|
+
#include <type_traits>
|
34
|
+
#include <typeinfo>
|
35
|
+
#include <stdexcept>
|
36
|
+
|
37
|
+
namespace nm { namespace yale_storage {
|
38
|
+
|
39
|
+
/*
|
40
|
+
* Constants
|
41
|
+
*/
|
42
|
+
const float GROWTH_CONSTANT = 1.5;
|
43
|
+
|
44
|
+
|
45
|
+
/*
|
46
|
+
* Forward declarations
|
47
|
+
*/
|
48
|
+
template <typename D, typename RefType, typename YaleRef> class row_iterator_T;
|
49
|
+
|
50
|
+
/*
|
51
|
+
* Iterator for visiting each stored element in a row, including diagonals.
|
52
|
+
*/
|
53
|
+
template <typename D,
|
54
|
+
typename RefType,
|
55
|
+
typename YaleRef = typename std::conditional<
|
56
|
+
std::is_const<RefType>::value,
|
57
|
+
const nm::YaleStorage<D>,
|
58
|
+
nm::YaleStorage<D>
|
59
|
+
>::type,
|
60
|
+
typename RowRef = typename std::conditional<
|
61
|
+
std::is_const<RefType>::value,
|
62
|
+
const row_iterator_T<D,RefType,YaleRef>,
|
63
|
+
row_iterator_T<D,RefType,YaleRef>
|
64
|
+
>::type>
|
65
|
+
class row_stored_nd_iterator_T {
|
66
|
+
protected:
|
67
|
+
RowRef& r;
|
68
|
+
size_t p_;
|
69
|
+
|
70
|
+
public:
|
71
|
+
|
72
|
+
row_stored_nd_iterator_T(RowRef& row, size_t pp)
|
73
|
+
: r(row),
|
74
|
+
p_(pp) // do we start at the diagonal?
|
75
|
+
{
|
76
|
+
}
|
77
|
+
|
78
|
+
// DO NOT IMPLEMENT THESE FUNCTIONS. They prevent C++ virtual slicing
|
79
|
+
//template <typename T> row_stored_nd_iterator_T(T const& rhs);
|
80
|
+
//template <typename T> row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& operator=(T const& rhs);
|
81
|
+
|
82
|
+
// Next two functions are to ensure we can still cast between nd iterators.
|
83
|
+
row_stored_nd_iterator_T(row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& rhs)
|
84
|
+
: r(rhs.r), p_(rhs.p_)
|
85
|
+
{ }
|
86
|
+
|
87
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& operator=(row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& rhs) {
|
88
|
+
if (&r != &(rhs.r))
|
89
|
+
throw std::logic_error("can't assign iterator from another row iterator");
|
90
|
+
p_ = rhs.p_;
|
91
|
+
return *this;
|
92
|
+
}
|
93
|
+
|
94
|
+
virtual size_t p() const { return p_; }
|
95
|
+
|
96
|
+
virtual bool end() const {
|
97
|
+
return p_ > r.p_last;
|
98
|
+
}
|
99
|
+
|
100
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>& operator++() {
|
101
|
+
if (end()) throw std::out_of_range("cannot increment row stored iterator past end of stored row");
|
102
|
+
++p_;
|
103
|
+
|
104
|
+
return *this;
|
105
|
+
}
|
106
|
+
|
107
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> operator++(int dummy) const {
|
108
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> r(*this);
|
109
|
+
return ++r;
|
110
|
+
}
|
111
|
+
|
112
|
+
virtual size_t j() const {
|
113
|
+
if (end()) throw std::out_of_range("cannot dereference (get j()) for an end pointer");
|
114
|
+
return r.ija(p_) - r.offset(1);
|
115
|
+
}
|
116
|
+
|
117
|
+
// Need to declare all row_stored_nd_iterator_T friends of each other.
|
118
|
+
template <typename E, typename ERefType, typename EYaleRef, typename ERowRef> friend class row_stored_nd_iterator_T;
|
119
|
+
|
120
|
+
|
121
|
+
virtual bool operator==(const row_stored_nd_iterator_T<D,RefType>& rhs) const {
|
122
|
+
if (r.i() != rhs.r.i()) return false;
|
123
|
+
if (end()) return rhs.end();
|
124
|
+
else if (rhs.end()) return false;
|
125
|
+
return j() == rhs.j();
|
126
|
+
}
|
127
|
+
|
128
|
+
// There is something wrong with this function.
|
129
|
+
virtual bool operator!=(const row_stored_nd_iterator_T<D,RefType>& rhs) const {
|
130
|
+
if (r.i() != rhs.r.i()) return true;
|
131
|
+
if (end()) return !rhs.end();
|
132
|
+
else if (rhs.end()) return true;
|
133
|
+
return j() != rhs.j();
|
134
|
+
}
|
135
|
+
|
136
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
137
|
+
bool operator<(const row_stored_nd_iterator_T<E,ERefType>& rhs) const {
|
138
|
+
if (r < rhs.r) return true;
|
139
|
+
if (r > rhs.r) return false;
|
140
|
+
|
141
|
+
// r == rhs.r
|
142
|
+
if (end()) return false;
|
143
|
+
if (rhs.end()) return true;
|
144
|
+
return j() < rhs.j();
|
145
|
+
}
|
146
|
+
|
147
|
+
// De-reference the iterator
|
148
|
+
RefType& operator*() {
|
149
|
+
return r.a(p_);
|
150
|
+
}
|
151
|
+
|
152
|
+
RefType& operator*() const {
|
153
|
+
return r.a(p_);
|
154
|
+
}
|
155
|
+
|
156
|
+
// Ruby VALUE de-reference
|
157
|
+
VALUE operator~() const {
|
158
|
+
return nm_rb_dereference<D>(**this);
|
159
|
+
}
|
160
|
+
|
161
|
+
inline virtual VALUE rb_j() const { return LONG2NUM(j()); }
|
162
|
+
|
163
|
+
};
|
164
|
+
|
165
|
+
|
166
|
+
|
167
|
+
} } // end of namespace nm::yale_storage
|
168
|
+
|
169
|
+
#endif // YALE_ITERATORS_ROW_STORED_ND_H
|
@@ -0,0 +1,124 @@
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
2
|
+
// = NMatrix
|
3
|
+
//
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
5
|
+
// NMatrix is part of SciRuby.
|
6
|
+
//
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
9
|
+
//
|
10
|
+
// == Copyright Information
|
11
|
+
//
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
14
|
+
//
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
16
|
+
//
|
17
|
+
// == Contributing
|
18
|
+
//
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
20
|
+
// our Contributor Agreement:
|
21
|
+
//
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
23
|
+
//
|
24
|
+
// == stored_diagonal_iterator.h
|
25
|
+
//
|
26
|
+
// Yale storage diagonal-storage iterator
|
27
|
+
//
|
28
|
+
|
29
|
+
#ifndef YALE_ITERATORS_STORED_DIAGONAL_H
|
30
|
+
# define YALE_ITERATORS_STORED_DIAGONAL_H
|
31
|
+
|
32
|
+
#include <ruby.h>
|
33
|
+
#include <type_traits>
|
34
|
+
#include <typeinfo>
|
35
|
+
|
36
|
+
namespace nm { namespace yale_storage {
|
37
|
+
|
38
|
+
/*
|
39
|
+
* Iterate across the stored diagonal.
|
40
|
+
*/
|
41
|
+
template <typename D,
|
42
|
+
typename RefType,
|
43
|
+
typename YaleRef = typename std::conditional<
|
44
|
+
std::is_const<RefType>::value,
|
45
|
+
const nm::YaleStorage<D>,
|
46
|
+
nm::YaleStorage<D>
|
47
|
+
>::type>
|
48
|
+
class stored_diagonal_iterator_T : public basic_iterator_T<D,RefType,YaleRef> {
|
49
|
+
using basic_iterator_T<D,RefType,YaleRef>::p_;
|
50
|
+
using basic_iterator_T<D,RefType,YaleRef>::y;
|
51
|
+
using basic_iterator_T<D,RefType,YaleRef>::offset;
|
52
|
+
using basic_iterator_T<D,RefType,YaleRef>::shape;
|
53
|
+
public:
|
54
|
+
stored_diagonal_iterator_T(YaleRef& obj, size_t d = 0)
|
55
|
+
: basic_iterator_T<D,RefType,YaleRef>(obj, // y
|
56
|
+
std::max(obj.offset(0), obj.offset(1)) + d - obj.offset(0), // i_
|
57
|
+
std::max(obj.offset(0), obj.offset(1)) + d) // p_
|
58
|
+
{
|
59
|
+
// std::cerr << "sdbegin: d=" << d << ", p_=" << p_ << ", i()=" << i() << ", j()=" << j() << std::endl;
|
60
|
+
// p_ can range from max(y.offset(0), y.offset(1)) to min(y.real_shape(0), y.real_shape(1))
|
61
|
+
}
|
62
|
+
|
63
|
+
|
64
|
+
size_t d() const {
|
65
|
+
return p_ - std::max(offset(0), offset(1));
|
66
|
+
}
|
67
|
+
|
68
|
+
stored_diagonal_iterator_T<D,RefType,YaleRef>& operator++() {
|
69
|
+
if (i() < shape(0)) ++p_;
|
70
|
+
return *this;
|
71
|
+
}
|
72
|
+
|
73
|
+
stored_diagonal_iterator_T<D,RefType,YaleRef> operator++(int dummy) const {
|
74
|
+
stored_diagonal_iterator_T<D,RefType,YaleRef> iter(*this);
|
75
|
+
return ++iter;
|
76
|
+
}
|
77
|
+
|
78
|
+
// Indicates if we're at the end of the iteration.
|
79
|
+
bool end() const {
|
80
|
+
return p_ >= std::min( shape(0) + offset(0), shape(1) + offset(1) );
|
81
|
+
}
|
82
|
+
|
83
|
+
// i() and j() are how we know if we're past-the-end. i will be shape(0) and j will be 0.
|
84
|
+
size_t i() const {
|
85
|
+
return p_ - offset(0);
|
86
|
+
}
|
87
|
+
|
88
|
+
size_t j() const {
|
89
|
+
return p_ - offset(1);
|
90
|
+
}
|
91
|
+
|
92
|
+
|
93
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
94
|
+
bool operator!=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return d() != rhs.d(); }
|
95
|
+
|
96
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
97
|
+
bool operator==(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return !(*this != rhs); }
|
98
|
+
|
99
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
100
|
+
bool operator<(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return d() < rhs.d(); }
|
101
|
+
|
102
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
103
|
+
bool operator<=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
|
104
|
+
return d() <= rhs.d();
|
105
|
+
}
|
106
|
+
|
107
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
108
|
+
bool operator>(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
|
109
|
+
return d() > rhs.d();
|
110
|
+
}
|
111
|
+
|
112
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
113
|
+
bool operator>=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
|
114
|
+
return d() >= rhs.d();
|
115
|
+
}
|
116
|
+
|
117
|
+
RefType& operator*() { return y.a(p_); }
|
118
|
+
RefType& operator*() const { return y.a(p_); }
|
119
|
+
|
120
|
+
};
|
121
|
+
|
122
|
+
} } // end of namespace nm::yale_storage
|
123
|
+
|
124
|
+
#endif // YALE_ITERATORS_STORED_DIAGONAL_H
|
@@ -0,0 +1,110 @@
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
2
|
+
// = NMatrix
|
3
|
+
//
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
5
|
+
// NMatrix is part of SciRuby.
|
6
|
+
//
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
9
|
+
//
|
10
|
+
// == Copyright Information
|
11
|
+
//
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
14
|
+
//
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
16
|
+
//
|
17
|
+
// == Contributing
|
18
|
+
//
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
20
|
+
// our Contributor Agreement:
|
21
|
+
//
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
23
|
+
//
|
24
|
+
// == transpose.h
|
25
|
+
//
|
26
|
+
// Functions for Yale math: transposing
|
27
|
+
//
|
28
|
+
|
29
|
+
#ifndef YALE_MATH_TRANSPOSE_H
|
30
|
+
# define YALE_MATH_TRANSPOSE_H
|
31
|
+
|
32
|
+
namespace nm { namespace yale_storage {
|
33
|
+
|
34
|
+
/*
|
35
|
+
* Transposes a generic Yale matrix (old or new). Specify new by setting RDiag = true.
|
36
|
+
*
|
37
|
+
* Based on transp from SMMP (same as symbmm and numbmm).
|
38
|
+
*
|
39
|
+
* This is not named in the same way as most yale_storage functions because it does not act on a YALE_STORAGE
|
40
|
+
* object.
|
41
|
+
*/
|
42
|
+
|
43
|
+
template <typename AD, typename BD, bool DiagA, bool Move>
|
44
|
+
void transpose_yale(const size_t n, const size_t m,
|
45
|
+
const size_t* ia, const size_t* ja, const AD* a, const AD& a_default,
|
46
|
+
size_t* ib, size_t* jb, BD* b, const BD& b_default) {
|
47
|
+
|
48
|
+
size_t index;
|
49
|
+
|
50
|
+
// Clear B
|
51
|
+
for (size_t i = 0; i < m+1; ++i) ib[i] = 0;
|
52
|
+
|
53
|
+
if (Move)
|
54
|
+
for (size_t i = 0; i < m+1; ++i) b[i] = b_default;
|
55
|
+
|
56
|
+
if (DiagA) ib[0] = m + 1;
|
57
|
+
else ib[0] = 0;
|
58
|
+
|
59
|
+
/* count indices for each column */
|
60
|
+
|
61
|
+
for (size_t i = 0; i < n; ++i) {
|
62
|
+
for (size_t j = ia[i]; j < ia[i+1]; ++j) {
|
63
|
+
++(ib[ja[j]+1]);
|
64
|
+
}
|
65
|
+
}
|
66
|
+
|
67
|
+
for (size_t i = 0; i < m; ++i) {
|
68
|
+
ib[i+1] = ib[i] + ib[i+1];
|
69
|
+
}
|
70
|
+
|
71
|
+
/* now make jb */
|
72
|
+
|
73
|
+
for (size_t i = 0; i < n; ++i) {
|
74
|
+
|
75
|
+
for (size_t j = ia[i]; j < ia[i+1]; ++j) {
|
76
|
+
index = ja[j];
|
77
|
+
jb[ib[index]] = i;
|
78
|
+
|
79
|
+
if (Move && a[j] != a_default)
|
80
|
+
b[ib[index]] = a[j];
|
81
|
+
|
82
|
+
++(ib[index]);
|
83
|
+
}
|
84
|
+
}
|
85
|
+
|
86
|
+
/* now fixup ib */
|
87
|
+
|
88
|
+
for (size_t i = m; i >= 1; --i) {
|
89
|
+
ib[i] = ib[i-1];
|
90
|
+
}
|
91
|
+
|
92
|
+
|
93
|
+
if (DiagA) {
|
94
|
+
if (Move) {
|
95
|
+
size_t j = std::min(n,m);
|
96
|
+
|
97
|
+
for (size_t i = 0; i < j; ++i) {
|
98
|
+
b[i] = a[i];
|
99
|
+
}
|
100
|
+
}
|
101
|
+
ib[0] = m + 1;
|
102
|
+
|
103
|
+
} else {
|
104
|
+
ib[0] = 0;
|
105
|
+
}
|
106
|
+
}
|
107
|
+
|
108
|
+
} } // end of namespace nm::yale_storage
|
109
|
+
|
110
|
+
#endif
|