pnmatrix 1.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/ext/nmatrix/binary_format.txt +53 -0
- data/ext/nmatrix/data/complex.h +388 -0
- data/ext/nmatrix/data/data.cpp +274 -0
- data/ext/nmatrix/data/data.h +651 -0
- data/ext/nmatrix/data/meta.h +64 -0
- data/ext/nmatrix/data/ruby_object.h +386 -0
- data/ext/nmatrix/extconf.rb +70 -0
- data/ext/nmatrix/math/asum.h +99 -0
- data/ext/nmatrix/math/cblas_enums.h +36 -0
- data/ext/nmatrix/math/cblas_templates_core.h +507 -0
- data/ext/nmatrix/math/gemm.h +241 -0
- data/ext/nmatrix/math/gemv.h +178 -0
- data/ext/nmatrix/math/getrf.h +255 -0
- data/ext/nmatrix/math/getrs.h +121 -0
- data/ext/nmatrix/math/imax.h +82 -0
- data/ext/nmatrix/math/laswp.h +165 -0
- data/ext/nmatrix/math/long_dtype.h +62 -0
- data/ext/nmatrix/math/magnitude.h +54 -0
- data/ext/nmatrix/math/math.h +751 -0
- data/ext/nmatrix/math/nrm2.h +165 -0
- data/ext/nmatrix/math/rot.h +117 -0
- data/ext/nmatrix/math/rotg.h +106 -0
- data/ext/nmatrix/math/scal.h +71 -0
- data/ext/nmatrix/math/trsm.h +336 -0
- data/ext/nmatrix/math/util.h +162 -0
- data/ext/nmatrix/math.cpp +1368 -0
- data/ext/nmatrix/nm_memory.h +60 -0
- data/ext/nmatrix/nmatrix.cpp +285 -0
- data/ext/nmatrix/nmatrix.h +476 -0
- data/ext/nmatrix/ruby_constants.cpp +151 -0
- data/ext/nmatrix/ruby_constants.h +106 -0
- data/ext/nmatrix/ruby_nmatrix.c +3130 -0
- data/ext/nmatrix/storage/common.cpp +77 -0
- data/ext/nmatrix/storage/common.h +183 -0
- data/ext/nmatrix/storage/dense/dense.cpp +1096 -0
- data/ext/nmatrix/storage/dense/dense.h +129 -0
- data/ext/nmatrix/storage/list/list.cpp +1628 -0
- data/ext/nmatrix/storage/list/list.h +138 -0
- data/ext/nmatrix/storage/storage.cpp +730 -0
- data/ext/nmatrix/storage/storage.h +99 -0
- data/ext/nmatrix/storage/yale/class.h +1139 -0
- data/ext/nmatrix/storage/yale/iterators/base.h +143 -0
- data/ext/nmatrix/storage/yale/iterators/iterator.h +131 -0
- data/ext/nmatrix/storage/yale/iterators/row.h +450 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored.h +140 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +169 -0
- data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +124 -0
- data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
- data/ext/nmatrix/storage/yale/yale.cpp +2074 -0
- data/ext/nmatrix/storage/yale/yale.h +203 -0
- data/ext/nmatrix/types.h +55 -0
- data/ext/nmatrix/util/io.cpp +279 -0
- data/ext/nmatrix/util/io.h +115 -0
- data/ext/nmatrix/util/sl_list.cpp +627 -0
- data/ext/nmatrix/util/sl_list.h +144 -0
- data/ext/nmatrix/util/util.h +78 -0
- data/lib/nmatrix/blas.rb +378 -0
- data/lib/nmatrix/cruby/math.rb +744 -0
- data/lib/nmatrix/enumerate.rb +253 -0
- data/lib/nmatrix/homogeneous.rb +241 -0
- data/lib/nmatrix/io/fortran_format.rb +138 -0
- data/lib/nmatrix/io/harwell_boeing.rb +221 -0
- data/lib/nmatrix/io/market.rb +263 -0
- data/lib/nmatrix/io/point_cloud.rb +189 -0
- data/lib/nmatrix/jruby/decomposition.rb +24 -0
- data/lib/nmatrix/jruby/enumerable.rb +13 -0
- data/lib/nmatrix/jruby/error.rb +4 -0
- data/lib/nmatrix/jruby/math.rb +501 -0
- data/lib/nmatrix/jruby/nmatrix_java.rb +840 -0
- data/lib/nmatrix/jruby/operators.rb +283 -0
- data/lib/nmatrix/jruby/slice.rb +264 -0
- data/lib/nmatrix/lapack_core.rb +181 -0
- data/lib/nmatrix/lapack_plugin.rb +44 -0
- data/lib/nmatrix/math.rb +953 -0
- data/lib/nmatrix/mkmf.rb +100 -0
- data/lib/nmatrix/monkeys.rb +137 -0
- data/lib/nmatrix/nmatrix.rb +1172 -0
- data/lib/nmatrix/rspec.rb +75 -0
- data/lib/nmatrix/shortcuts.rb +1163 -0
- data/lib/nmatrix/version.rb +39 -0
- data/lib/nmatrix/yale_functions.rb +118 -0
- data/lib/nmatrix.rb +28 -0
- data/spec/00_nmatrix_spec.rb +892 -0
- data/spec/01_enum_spec.rb +196 -0
- data/spec/02_slice_spec.rb +407 -0
- data/spec/03_nmatrix_monkeys_spec.rb +80 -0
- data/spec/2x2_dense_double.mat +0 -0
- data/spec/4x4_sparse.mat +0 -0
- data/spec/4x5_dense.mat +0 -0
- data/spec/blas_spec.rb +215 -0
- data/spec/elementwise_spec.rb +311 -0
- data/spec/homogeneous_spec.rb +100 -0
- data/spec/io/fortran_format_spec.rb +88 -0
- data/spec/io/harwell_boeing_spec.rb +98 -0
- data/spec/io/test.rua +9 -0
- data/spec/io_spec.rb +159 -0
- data/spec/lapack_core_spec.rb +482 -0
- data/spec/leakcheck.rb +16 -0
- data/spec/math_spec.rb +1363 -0
- data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
- data/spec/nmatrix_yale_spec.rb +286 -0
- data/spec/rspec_monkeys.rb +56 -0
- data/spec/rspec_spec.rb +35 -0
- data/spec/shortcuts_spec.rb +474 -0
- data/spec/slice_set_spec.rb +162 -0
- data/spec/spec_helper.rb +172 -0
- data/spec/stat_spec.rb +214 -0
- data/spec/test.pcd +20 -0
- data/spec/utm5940.mtx +83844 -0
- metadata +295 -0
|
@@ -0,0 +1,450 @@
|
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
|
2
|
+
// = NMatrix
|
|
3
|
+
//
|
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
|
5
|
+
// NMatrix is part of SciRuby.
|
|
6
|
+
//
|
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
|
9
|
+
//
|
|
10
|
+
// == Copyright Information
|
|
11
|
+
//
|
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
|
14
|
+
//
|
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
|
16
|
+
//
|
|
17
|
+
// == Contributing
|
|
18
|
+
//
|
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
|
20
|
+
// our Contributor Agreement:
|
|
21
|
+
//
|
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
|
23
|
+
//
|
|
24
|
+
// == row.h
|
|
25
|
+
//
|
|
26
|
+
// Iterator for traversing a matrix row by row. Includes an
|
|
27
|
+
// orthogonal iterator for visiting each stored entry in a row.
|
|
28
|
+
// This one cannot be de-referenced; you have to de-reference
|
|
29
|
+
// the column.
|
|
30
|
+
|
|
31
|
+
#ifndef YALE_ITERATORS_ROW_H
|
|
32
|
+
# define YALE_ITERATORS_ROW_H
|
|
33
|
+
|
|
34
|
+
#include <ruby.h>
|
|
35
|
+
#include <stdexcept>
|
|
36
|
+
|
|
37
|
+
namespace nm { namespace yale_storage {
|
|
38
|
+
|
|
39
|
+
template <typename D,
|
|
40
|
+
typename RefType,
|
|
41
|
+
typename YaleRef = typename std::conditional<
|
|
42
|
+
std::is_const<RefType>::value,
|
|
43
|
+
const nm::YaleStorage<D>,
|
|
44
|
+
nm::YaleStorage<D>
|
|
45
|
+
>::type>
|
|
46
|
+
class row_iterator_T {
|
|
47
|
+
|
|
48
|
+
protected:
|
|
49
|
+
YaleRef& y;
|
|
50
|
+
size_t i_;
|
|
51
|
+
size_t p_first, p_last; // first and last IJA positions in the row
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
/*
|
|
55
|
+
* Update the row positions -- use to ensure a row stays valid after an insert operation. Also
|
|
56
|
+
* used to initialize a row iterator at a different row index.
|
|
57
|
+
*/
|
|
58
|
+
void update() {
|
|
59
|
+
if (i_ < y.shape(0)) {
|
|
60
|
+
p_first = p_real_first();
|
|
61
|
+
p_last = p_real_last();
|
|
62
|
+
if (!nd_empty()) {
|
|
63
|
+
// try to find new p_first
|
|
64
|
+
p_first = y.real_find_left_boundary_pos(p_first, p_last, y.offset(1));
|
|
65
|
+
if (!nd_empty()) {
|
|
66
|
+
// also try to find new p_last
|
|
67
|
+
p_last = y.real_find_left_boundary_pos(p_first, p_last, y.offset(1) + y.shape(1) - 1);
|
|
68
|
+
if (y.ija(p_last) - y.offset(1) >= shape(1)) --p_last; // searched too far.
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
} else { // invalid row -- this is an end iterator.
|
|
72
|
+
p_first = y.ija(y.real_shape(0));
|
|
73
|
+
p_last = y.ija(y.real_shape(0))-1; // mark as empty
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/*
|
|
78
|
+
* Indicate to the row iterator that p_first and p_last have moved by some amount. Only
|
|
79
|
+
* defined for row_iterator, not const_row_iterator. This is a lightweight form of update().
|
|
80
|
+
*/
|
|
81
|
+
//template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
|
|
82
|
+
void shift(int amount) {
|
|
83
|
+
p_first += amount;
|
|
84
|
+
p_last += amount;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
/*
|
|
89
|
+
* Enlarge the row by amount by moving p_last over. This is a lightweight form of update().
|
|
90
|
+
*/
|
|
91
|
+
//template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
|
|
92
|
+
void adjust_length(int amount) {
|
|
93
|
+
p_last += amount;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
public:
|
|
97
|
+
/* typedef row_stored_iterator_T<D,RefType,YaleRef> row_stored_iterator;
|
|
98
|
+
typedef row_stored_nd_iterator_T<D,RefType,YaleRef> row_stored_nd_iterator;
|
|
99
|
+
typedef row_stored_iterator_T<D,const RefType,const YaleRef> const_row_stored_iterator;
|
|
100
|
+
typedef row_stored_nd_iterator_T<D,const RefType,const YaleRef> const_row_stored_nd_iterator;*/
|
|
101
|
+
typedef row_stored_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> > row_stored_iterator;
|
|
102
|
+
typedef row_stored_nd_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> > row_stored_nd_iterator;
|
|
103
|
+
template <typename E, typename ERefType, typename EYaleRef> friend class row_iterator_T;
|
|
104
|
+
friend class row_stored_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> >;
|
|
105
|
+
friend class row_stored_nd_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> >;//row_stored_iterator;
|
|
106
|
+
friend class row_stored_iterator_T<D,RefType,YaleRef, const row_iterator_T<D,RefType,YaleRef> >;
|
|
107
|
+
friend class row_stored_nd_iterator_T<D,RefType,YaleRef, const row_iterator_T<D,RefType,YaleRef> >;//row_stored_iterator;
|
|
108
|
+
friend class nm::YaleStorage<D>;
|
|
109
|
+
|
|
110
|
+
//friend row_stored_nd_iterator;
|
|
111
|
+
|
|
112
|
+
inline size_t ija(size_t pp) const { return y.ija(pp); }
|
|
113
|
+
inline size_t& ija(size_t pp) { return y.ija(pp); }
|
|
114
|
+
inline RefType& a(size_t p) const { return y.a_p()[p]; }
|
|
115
|
+
inline RefType& a(size_t p) { return y.a_p()[p]; }
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
row_iterator_T(YaleRef& obj, size_t ii = 0)
|
|
120
|
+
: y(obj), i_(ii)
|
|
121
|
+
{
|
|
122
|
+
update();
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
|
127
|
+
bool operator!=(const row_iterator_T<E,ERefType>& rhs) const {
|
|
128
|
+
return i_ != rhs.i_;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
|
132
|
+
bool operator==(const row_iterator_T<E,ERefType>& rhs) const {
|
|
133
|
+
return i_ == rhs.i_;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
|
137
|
+
bool operator<(const row_iterator_T<E,ERefType>& rhs) const {
|
|
138
|
+
return i_ < rhs.i_;
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
|
142
|
+
bool operator>(const row_iterator_T<E,ERefType>& rhs) const {
|
|
143
|
+
return i_ > rhs.i_;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
row_iterator_T<D,RefType,YaleRef>& operator++() {
|
|
147
|
+
if (is_end()) throw std::out_of_range("attempted to iterate past end of slice (vertically)");
|
|
148
|
+
++i_;
|
|
149
|
+
update();
|
|
150
|
+
return *this;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
row_iterator_T<D,RefType,YaleRef> operator++(int dummy) const {
|
|
154
|
+
row_iterator_T<D,RefType,YaleRef> next(*this);
|
|
155
|
+
return ++next;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
bool is_end() const {
|
|
159
|
+
return i_ == y.shape(0) && p_first == y.ija(y.real_shape(0));
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
size_t real_i() const {
|
|
163
|
+
return i_ + y.offset(0);
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
size_t i() const {
|
|
167
|
+
return i_;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
// last element of the real row
|
|
171
|
+
size_t p_real_last() const {
|
|
172
|
+
return y.ija(real_i()+1)-1;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
// first element of the real row
|
|
176
|
+
size_t p_real_first() const {
|
|
177
|
+
return y.ija(real_i());
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// Is the real row of the original matrix totally empty of NDs?
|
|
181
|
+
bool real_nd_empty() const {
|
|
182
|
+
return p_real_last() < p_real_first();
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
bool nd_empty() const {
|
|
186
|
+
return p_last < p_first;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
// slice j coord of the diag.
|
|
190
|
+
size_t diag_j() const {
|
|
191
|
+
if (!has_diag())
|
|
192
|
+
throw std::out_of_range("don't call diag_j unless you've checked for one");
|
|
193
|
+
return real_i() - y.offset(1);
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
// return the actual position of the diagonal element for this real row, regardless of whether
|
|
197
|
+
// it's in range or not.
|
|
198
|
+
size_t p_diag() const {
|
|
199
|
+
return real_i();
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// Checks to see if there is a diagonal within the slice
|
|
203
|
+
bool has_diag() const {
|
|
204
|
+
// real position of diag is real_i == real_j. Is it in range?
|
|
205
|
+
return (p_diag() >= y.offset(1) && p_diag() - y.offset(1) < y.shape(1));
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// Checks to see if the diagonal is the first entry in the slice.
|
|
209
|
+
bool is_diag_first() const {
|
|
210
|
+
if (!has_diag()) return false;
|
|
211
|
+
if (nd_empty()) return true;
|
|
212
|
+
return diag_j() < y.ija(p_first) - y.offset(1);
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// Checks to see if the diagonal is the last entry in the slice.
|
|
216
|
+
bool is_diag_last() const {
|
|
217
|
+
if (!has_diag()) return false;
|
|
218
|
+
if (nd_empty()) return true;
|
|
219
|
+
return diag_j() > y.ija(p_last);
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
// Is the row of the slice totally empty of NDs and Ds?
|
|
223
|
+
// We can only determine that it's empty of Ds if the diagonal
|
|
224
|
+
// is not a part of the sliced portion of the row.
|
|
225
|
+
bool empty() const {
|
|
226
|
+
return nd_empty() && has_diag();
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
size_t shape(size_t pp) const {
|
|
231
|
+
return y.shape(pp);
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
size_t offset(size_t pp) const {
|
|
235
|
+
return y.offset(pp);
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
inline VALUE rb_i() const { return LONG2NUM(i()); }
|
|
239
|
+
|
|
240
|
+
row_stored_iterator_T<D,RefType,YaleRef> begin() { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_first); }
|
|
241
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef> ndbegin() { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_first); }
|
|
242
|
+
row_stored_iterator_T<D,RefType,YaleRef> end() { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_last+1, true); }
|
|
243
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef> ndend() { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_last+1); }
|
|
244
|
+
|
|
245
|
+
row_stored_iterator_T<D,RefType,YaleRef> begin() const { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_first); }
|
|
246
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef> ndbegin() const { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_first); }
|
|
247
|
+
row_stored_iterator_T<D,RefType,YaleRef> end() const { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_last+1, true); }
|
|
248
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef> ndend() const { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_last+1); }
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef> lower_bound(const size_t& j) const {
|
|
252
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, y.real_find_left_boundary_pos(p_first, p_last, y.offset(1)));
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef> ndfind(size_t j) {
|
|
256
|
+
if (j == 0) return ndbegin();
|
|
257
|
+
size_t p = p_first > p_last ? p_first : y.real_find_left_boundary_pos(p_first, p_last, j + y.offset(1));
|
|
258
|
+
row_stored_nd_iterator iter = row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p);
|
|
259
|
+
return iter;
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
row_stored_iterator_T<D,RefType,YaleRef> find(size_t j) {
|
|
263
|
+
if (j == 0) return begin(); // may or may not be on the diagonal
|
|
264
|
+
else return row_stored_iterator_T<D,RefType,YaleRef>(*this, ndfind(j).p(), false); // is on the diagonal, definitely
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
/*
|
|
268
|
+
* Remove an entry from an already found non-diagonal position. Adjust this row appropriately so we can continue to
|
|
269
|
+
* use it.
|
|
270
|
+
*/
|
|
271
|
+
//template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
|
|
272
|
+
row_stored_nd_iterator erase(row_stored_nd_iterator position) {
|
|
273
|
+
size_t sz = y.size();
|
|
274
|
+
if (sz - 1 <= y.capacity() / nm::yale_storage::GROWTH_CONSTANT) {
|
|
275
|
+
y.update_resize_move(position, real_i(), -1);
|
|
276
|
+
} else {
|
|
277
|
+
y.move_left(position, 1);
|
|
278
|
+
y.update_real_row_sizes_from(real_i(), -1);
|
|
279
|
+
}
|
|
280
|
+
adjust_length(-1);
|
|
281
|
+
return row_stored_nd_iterator(*this, position.p()-1);
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
/*
|
|
285
|
+
* Remove an entry from the matrix at the already-located position. If diagonal, just sets to default; otherwise,
|
|
286
|
+
* actually removes the entry.
|
|
287
|
+
*/
|
|
288
|
+
//template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
|
|
289
|
+
row_stored_nd_iterator erase(const row_stored_iterator& jt) {
|
|
290
|
+
if (jt.diag()) {
|
|
291
|
+
*jt = y.const_default_obj(); // diagonal is the easy case -- no movement.
|
|
292
|
+
return row_stored_nd_iterator(*this, jt.p());
|
|
293
|
+
} else {
|
|
294
|
+
return erase(row_stored_nd_iterator(*this, jt.p()));
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
//template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
|
|
301
|
+
row_stored_nd_iterator insert(row_stored_nd_iterator position, size_t jj, const D& val) {
|
|
302
|
+
size_t sz = y.size();
|
|
303
|
+
while (!position.end() && position.j() < jj) ++position; // position is just a hint. (This loop ideally only has to happen once.)
|
|
304
|
+
|
|
305
|
+
if (!position.end() && position.j() == jj) {
|
|
306
|
+
*position = val; // replace existing
|
|
307
|
+
} else {
|
|
308
|
+
|
|
309
|
+
if (sz + 1 > y.capacity()) {
|
|
310
|
+
y.update_resize_move(position, real_i(), 1);
|
|
311
|
+
} else {
|
|
312
|
+
y.move_right(position, 1);
|
|
313
|
+
y.update_real_row_sizes_from(real_i(), 1);
|
|
314
|
+
}
|
|
315
|
+
ija(position.p()) = jj + y.offset(1); // set column ID
|
|
316
|
+
a(position.p()) = val;
|
|
317
|
+
adjust_length(1);
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
return position++;
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
/*
|
|
325
|
+
* This version of insert doesn't return anything. Why, when the others do?
|
|
326
|
+
*
|
|
327
|
+
* Well, mainly because j here can be a diagonal entry. Most of the inserters return the *next* element following
|
|
328
|
+
* the insertion, but to do that, we have to create a row_stored_nd_iterator, which requires at least one binary
|
|
329
|
+
* search for the location following the diagonal (and as of the writing of this, two binary searches). There's no
|
|
330
|
+
* reason to do that when we never actually *use* the return value. So instead we just have void.
|
|
331
|
+
*/
|
|
332
|
+
//template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
|
|
333
|
+
void insert(size_t j, const D& val) {
|
|
334
|
+
if (j + y.offset(1) == real_i()) a(real_i()) = val;
|
|
335
|
+
else {
|
|
336
|
+
row_stored_nd_iterator jt = ndfind(j);
|
|
337
|
+
if (!jt.end() && jt.j() == j) {
|
|
338
|
+
if (val == y.const_default_obj()) erase(jt); // erase
|
|
339
|
+
else insert(jt, j, val); // replace
|
|
340
|
+
} else { // only insert if it's not the default
|
|
341
|
+
if (val != y.const_default_obj()) insert(jt, j, val);
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
/*
|
|
348
|
+
* Determines a plan for inserting a single row. Returns an integer giving the amount of the row change.
|
|
349
|
+
*/
|
|
350
|
+
int single_row_insertion_plan(row_stored_nd_iterator position, size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
|
|
351
|
+
int nd_change = 0;
|
|
352
|
+
|
|
353
|
+
for (size_t jc = jj; jc < jj + length; ++jc, ++v_offset) {
|
|
354
|
+
if (v_offset >= v_size) v_offset %= v_size; // reset v position.
|
|
355
|
+
|
|
356
|
+
if (jc + y.offset(1) != real_i()) { // diagonal -- no nd_change here
|
|
357
|
+
if (position.end()) {
|
|
358
|
+
if (v[v_offset] != y.const_default_obj()) nd_change++; // insert
|
|
359
|
+
} else if (position.j() != jc) { // not present -- do we need to add it?
|
|
360
|
+
if (v[v_offset] != y.const_default_obj()) nd_change++;
|
|
361
|
+
} else { // position.j() == jc
|
|
362
|
+
if (v[v_offset] == y.const_default_obj()) nd_change--;
|
|
363
|
+
++position; // move iterator forward.
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
return nd_change;
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
/*
|
|
371
|
+
* Determine a plan for inserting a single row -- finds the position first. Returns the position and
|
|
372
|
+
* the change amount. Don't use this one if you can help it because it requires a binary search of
|
|
373
|
+
* the row.
|
|
374
|
+
*/
|
|
375
|
+
std::pair<int,size_t> single_row_insertion_plan(size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
|
|
376
|
+
std::pair<int,size_t> result;
|
|
377
|
+
row_stored_nd_iterator pos = ndfind(jj);
|
|
378
|
+
result.first = single_row_insertion_plan(pos, jj, length, v, v_size, v_offset);
|
|
379
|
+
result.second = pos.p();
|
|
380
|
+
return result;
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
/*
|
|
384
|
+
* Insert elements into a single row. Returns an iterator to the end of the insertion range.
|
|
385
|
+
*/
|
|
386
|
+
row_stored_nd_iterator insert(row_stored_nd_iterator position, size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
|
|
387
|
+
size_t tmp_v_offset = v_offset;
|
|
388
|
+
int nd_change = single_row_insertion_plan(position, jj, length, v, v_size, tmp_v_offset);
|
|
389
|
+
|
|
390
|
+
// First record the position, just in case our iterator becomes invalid.
|
|
391
|
+
size_t pp = position.p();
|
|
392
|
+
|
|
393
|
+
// Resize the array as necessary, or move entries after the insertion point to make room.
|
|
394
|
+
size_t sz = y.size();
|
|
395
|
+
if (sz + nd_change > y.capacity() || sz + nd_change <= y.capacity() / nm::yale_storage::GROWTH_CONSTANT)
|
|
396
|
+
y.update_resize_move(position, real_i(), nd_change);
|
|
397
|
+
else if (nd_change != 0) {
|
|
398
|
+
if (nd_change < 0) y.move_left(position, -nd_change);
|
|
399
|
+
else if (nd_change > 0) y.move_right(position, nd_change);
|
|
400
|
+
y.update_real_row_sizes_from(real_i(), nd_change);
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
for (size_t jc = jj; jc < jj + length; ++jc, ++v_offset) {
|
|
404
|
+
if (v_offset >= v_size) v_offset %= v_size; // reset v position.
|
|
405
|
+
|
|
406
|
+
if (jc + y.offset(1) == real_i()) {
|
|
407
|
+
y.a(real_i()) = v[v_offset]; // modify diagonal
|
|
408
|
+
} else if (v[v_offset] != y.const_default_obj()) {
|
|
409
|
+
y.ija(pp) = jc; // modify non-diagonal
|
|
410
|
+
y.a(pp) = v[v_offset];
|
|
411
|
+
++pp;
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
// Update this row.
|
|
416
|
+
adjust_length(nd_change);
|
|
417
|
+
|
|
418
|
+
return row_stored_nd_iterator(*this, pp);
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
/*
|
|
422
|
+
* For when we don't need to worry about the offset, does the same thing as the insert above.
|
|
423
|
+
*/
|
|
424
|
+
row_stored_nd_iterator insert(const row_stored_nd_iterator& position, size_t jj, size_t length, D const* v, size_t v_size) {
|
|
425
|
+
size_t v_offset = 0;
|
|
426
|
+
return insert(position, jj, length, v, v_size, v_offset);
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
/*
|
|
431
|
+
* Merges elements offered for insertion with existing elements in the row.
|
|
432
|
+
*/
|
|
433
|
+
row_stored_nd_iterator insert(size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
|
|
434
|
+
return insert(ndfind(jj), jj, length, v, v_size, v_offset);
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
/*
|
|
438
|
+
* Merges elements offered for insertion with existing elements in the row.
|
|
439
|
+
*/
|
|
440
|
+
row_stored_nd_iterator insert(size_t jj, size_t length, D const* v, size_t v_size) {
|
|
441
|
+
size_t v_offset = 0;
|
|
442
|
+
return insert(ndfind(jj), jj, length, v, v_size, v_offset);
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
};
|
|
447
|
+
|
|
448
|
+
} } // end of nm::yale_storage namespace
|
|
449
|
+
|
|
450
|
+
#endif // YALE_ITERATORS_ROW_H
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
|
2
|
+
// = NMatrix
|
|
3
|
+
//
|
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
|
5
|
+
// NMatrix is part of SciRuby.
|
|
6
|
+
//
|
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
|
9
|
+
//
|
|
10
|
+
// == Copyright Information
|
|
11
|
+
//
|
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
|
14
|
+
//
|
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
|
16
|
+
//
|
|
17
|
+
// == Contributing
|
|
18
|
+
//
|
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
|
20
|
+
// our Contributor Agreement:
|
|
21
|
+
//
|
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
|
23
|
+
//
|
|
24
|
+
// == row_stored.h
|
|
25
|
+
//
|
|
26
|
+
// Iterator for traversing a single stored row of a matrix (needed
|
|
27
|
+
// for row.h). FIXME: This is not as efficient as it could be; it uses
|
|
28
|
+
// two binary searches to find the beginning and end of each slice.
|
|
29
|
+
// The end search shouldn't be necessary, but I couldn't make it
|
|
30
|
+
// work without it, and eventually decided my dissertation should
|
|
31
|
+
// be a priority.
|
|
32
|
+
//
|
|
33
|
+
|
|
34
|
+
#ifndef YALE_ITERATORS_ROW_STORED_H
|
|
35
|
+
# define YALE_ITERATORS_ROW_STORED_H
|
|
36
|
+
|
|
37
|
+
#include <ruby.h>
|
|
38
|
+
#include <stdexcept>
|
|
39
|
+
|
|
40
|
+
namespace nm { namespace yale_storage {
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
/*
|
|
44
|
+
* Iterator for visiting each stored element in a row, including diagonals.
|
|
45
|
+
*/
|
|
46
|
+
template <typename D,
|
|
47
|
+
typename RefType,
|
|
48
|
+
typename YaleRef = typename std::conditional<
|
|
49
|
+
std::is_const<RefType>::value,
|
|
50
|
+
const nm::YaleStorage<D>,
|
|
51
|
+
nm::YaleStorage<D>
|
|
52
|
+
>::type,
|
|
53
|
+
typename RowRef = typename std::conditional<
|
|
54
|
+
std::is_const<RefType>::value,
|
|
55
|
+
const row_iterator_T<D,RefType,YaleRef>,
|
|
56
|
+
row_iterator_T<D,RefType,YaleRef>
|
|
57
|
+
>::type>
|
|
58
|
+
class row_stored_iterator_T : public row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> {
|
|
59
|
+
protected:
|
|
60
|
+
using row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>::r;
|
|
61
|
+
using row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>::p_;
|
|
62
|
+
bool d_visited, d;
|
|
63
|
+
|
|
64
|
+
public:
|
|
65
|
+
|
|
66
|
+
// end_ is necessary for the logic when a row is empty other than the diagonal. If we just
|
|
67
|
+
// relied on pp == last_p+1, it'd look like these empty rows were actually end() iterators.
|
|
68
|
+
// So we have to actually mark end_ by telling it to ignore that diagonal visitation.
|
|
69
|
+
row_stored_iterator_T(RowRef& row, size_t pp, bool end_ = false)
|
|
70
|
+
: row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>(row, pp),
|
|
71
|
+
d_visited(!row.has_diag()), // if the row has no diagonal, just marked it as visited.
|
|
72
|
+
d(r.is_diag_first() && !end_) // do we start at the diagonal?
|
|
73
|
+
{
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/* Diagonal constructor. Puts us on the diagonal (unless end is true) */
|
|
77
|
+
/*row_stored_iterator_T(RowRef& row, bool end_, size_t j)
|
|
78
|
+
: row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>(row.ndfind(j)),
|
|
79
|
+
d_visited(false),
|
|
80
|
+
d(!end_ && j + row.offset(1) == row.real_i())
|
|
81
|
+
{ }*/
|
|
82
|
+
|
|
83
|
+
virtual bool diag() const {
|
|
84
|
+
return d;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
virtual bool end() const {
|
|
88
|
+
return !d && p_ > r.p_last;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
row_stored_iterator_T<D,RefType,YaleRef,RowRef>& operator++() {
|
|
92
|
+
if (end()) throw std::out_of_range("cannot increment row stored iterator past end of stored row");
|
|
93
|
+
if (d) {
|
|
94
|
+
d_visited = true;
|
|
95
|
+
d = false;
|
|
96
|
+
} else {
|
|
97
|
+
++p_;
|
|
98
|
+
// Are we at a diagonal?
|
|
99
|
+
// If we hit the end or reach a point where j > diag_j, and still
|
|
100
|
+
// haven't visited the diagonal, we should do so before continuing.
|
|
101
|
+
if (!d_visited && (end() || j() > r.diag_j())) {
|
|
102
|
+
d = true;
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
return *this;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
row_stored_iterator_T<D,RefType,YaleRef,RowRef> operator++(int dummy) const {
|
|
110
|
+
row_stored_iterator_T<D,RefType,YaleRef,RowRef> r(*this);
|
|
111
|
+
return ++r;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
size_t j() const {
|
|
115
|
+
if (end()) throw std::out_of_range("cannot dereference an end pointer");
|
|
116
|
+
return (d ? r.p_diag() : r.ija(p_)) - r.offset(1);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Need to declare all row_stored_iterator_T friends of each other.
|
|
120
|
+
template <typename E, typename ERefType, typename EYaleRef, typename ERowRef> friend class row_stored_iterator_T;
|
|
121
|
+
|
|
122
|
+
// De-reference the iterator
|
|
123
|
+
RefType& operator*() {
|
|
124
|
+
return d ? r.a(r.p_diag()) : r.a(p_);
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
RefType& operator*() const {
|
|
128
|
+
return d ? r.a(r.p_diag()) : r.a(p_);
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// Ruby VALUE de-reference
|
|
132
|
+
VALUE operator~() const {
|
|
133
|
+
return nm_rb_dereference<D>(**this);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
};
|
|
137
|
+
|
|
138
|
+
}} // end of namespace nm::yale_storage
|
|
139
|
+
|
|
140
|
+
#endif // YALE_ITERATORS_ROW_STORED_H
|