nmatrix-fftw 0.2.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (74) hide show
  1. checksums.yaml +7 -0
  2. data/ext/nmatrix/data/complex.h +388 -0
  3. data/ext/nmatrix/data/data.h +652 -0
  4. data/ext/nmatrix/data/meta.h +64 -0
  5. data/ext/nmatrix/data/ruby_object.h +389 -0
  6. data/ext/nmatrix/math/asum.h +120 -0
  7. data/ext/nmatrix/math/cblas_enums.h +36 -0
  8. data/ext/nmatrix/math/cblas_templates_core.h +507 -0
  9. data/ext/nmatrix/math/gemm.h +241 -0
  10. data/ext/nmatrix/math/gemv.h +178 -0
  11. data/ext/nmatrix/math/getrf.h +255 -0
  12. data/ext/nmatrix/math/getrs.h +121 -0
  13. data/ext/nmatrix/math/imax.h +79 -0
  14. data/ext/nmatrix/math/laswp.h +165 -0
  15. data/ext/nmatrix/math/long_dtype.h +49 -0
  16. data/ext/nmatrix/math/math.h +745 -0
  17. data/ext/nmatrix/math/nrm2.h +160 -0
  18. data/ext/nmatrix/math/rot.h +117 -0
  19. data/ext/nmatrix/math/rotg.h +106 -0
  20. data/ext/nmatrix/math/scal.h +71 -0
  21. data/ext/nmatrix/math/trsm.h +332 -0
  22. data/ext/nmatrix/math/util.h +148 -0
  23. data/ext/nmatrix/nm_memory.h +60 -0
  24. data/ext/nmatrix/nmatrix.h +438 -0
  25. data/ext/nmatrix/ruby_constants.h +106 -0
  26. data/ext/nmatrix/storage/common.h +177 -0
  27. data/ext/nmatrix/storage/dense/dense.h +129 -0
  28. data/ext/nmatrix/storage/list/list.h +138 -0
  29. data/ext/nmatrix/storage/storage.h +99 -0
  30. data/ext/nmatrix/storage/yale/class.h +1139 -0
  31. data/ext/nmatrix/storage/yale/iterators/base.h +143 -0
  32. data/ext/nmatrix/storage/yale/iterators/iterator.h +131 -0
  33. data/ext/nmatrix/storage/yale/iterators/row.h +450 -0
  34. data/ext/nmatrix/storage/yale/iterators/row_stored.h +140 -0
  35. data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +169 -0
  36. data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +124 -0
  37. data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
  38. data/ext/nmatrix/storage/yale/yale.h +203 -0
  39. data/ext/nmatrix/types.h +55 -0
  40. data/ext/nmatrix/util/io.h +115 -0
  41. data/ext/nmatrix/util/sl_list.h +144 -0
  42. data/ext/nmatrix/util/util.h +78 -0
  43. data/ext/nmatrix_fftw/extconf.rb +122 -0
  44. data/ext/nmatrix_fftw/nmatrix_fftw.cpp +274 -0
  45. data/lib/nmatrix/fftw.rb +343 -0
  46. data/spec/00_nmatrix_spec.rb +736 -0
  47. data/spec/01_enum_spec.rb +190 -0
  48. data/spec/02_slice_spec.rb +389 -0
  49. data/spec/03_nmatrix_monkeys_spec.rb +78 -0
  50. data/spec/2x2_dense_double.mat +0 -0
  51. data/spec/4x4_sparse.mat +0 -0
  52. data/spec/4x5_dense.mat +0 -0
  53. data/spec/blas_spec.rb +193 -0
  54. data/spec/elementwise_spec.rb +303 -0
  55. data/spec/homogeneous_spec.rb +99 -0
  56. data/spec/io/fortran_format_spec.rb +88 -0
  57. data/spec/io/harwell_boeing_spec.rb +98 -0
  58. data/spec/io/test.rua +9 -0
  59. data/spec/io_spec.rb +149 -0
  60. data/spec/lapack_core_spec.rb +482 -0
  61. data/spec/leakcheck.rb +16 -0
  62. data/spec/math_spec.rb +807 -0
  63. data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
  64. data/spec/nmatrix_yale_spec.rb +286 -0
  65. data/spec/plugins/fftw/fftw_spec.rb +348 -0
  66. data/spec/rspec_monkeys.rb +56 -0
  67. data/spec/rspec_spec.rb +34 -0
  68. data/spec/shortcuts_spec.rb +310 -0
  69. data/spec/slice_set_spec.rb +157 -0
  70. data/spec/spec_helper.rb +149 -0
  71. data/spec/stat_spec.rb +203 -0
  72. data/spec/test.pcd +20 -0
  73. data/spec/utm5940.mtx +83844 -0
  74. metadata +151 -0
@@ -0,0 +1,143 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == base.h
25
+ //
26
+ // Yale storage pure virtual basic_iterator class.
27
+ //
28
+
29
+ #ifndef YALE_ITERATORS_BASE_H
30
+ # define YALE_ITERATORS_BASE_H
31
+
32
+ #include <ruby.h>
33
+ #include <type_traits>
34
+ #include <typeinfo>
35
+ #include <stdexcept>
36
+
37
+ namespace nm {
38
+
39
+ template <typename D> class YaleStorage;
40
+
41
+ namespace yale_storage {
42
+
43
+ template <typename D>
44
+ VALUE nm_rb_dereference(D const& v) {
45
+ return nm::RubyObject(v).rval;
46
+ }
47
+
48
+ template <>
49
+ VALUE nm_rb_dereference<nm::RubyObject>(nm::RubyObject const& v) {
50
+ return v.rval;
51
+ }
52
+
53
+ /*
54
+ * Iterator base class (pure virtual).
55
+ */
56
+ template <typename D,
57
+ typename RefType,
58
+ typename YaleRef = typename std::conditional<
59
+ std::is_const<RefType>::value,
60
+ const nm::YaleStorage<D>,
61
+ nm::YaleStorage<D>
62
+ >::type>
63
+ class basic_iterator_T {
64
+
65
+ protected:
66
+ YaleRef& y;
67
+ size_t i_;
68
+ size_t p_;
69
+
70
+ public:
71
+ size_t offset(size_t d) const { return y.offset(d); }
72
+ size_t shape(size_t d) const { return y.shape(d); }
73
+ size_t real_shape(size_t d) const { return y.real_shape(d); }
74
+
75
+ size_t dense_location() const {
76
+ return i()*shape(1) + j();
77
+ }
78
+
79
+ template <typename T = typename std::conditional<std::is_const<RefType>::value, const size_t, size_t>::type>
80
+ T& ija(size_t pp) const { return y.ija(pp); }
81
+
82
+ template <typename T = typename std::conditional<std::is_const<RefType>::value, const size_t, size_t>::type>
83
+ T& ija(size_t pp) { return y.ija(pp); }
84
+
85
+ virtual bool diag() const {
86
+ return p_ < std::min(y.real_shape(0), y.real_shape(1));
87
+ }
88
+ virtual bool done_with_diag() const {
89
+ return p_ == std::min(y.real_shape(0), y.real_shape(1));
90
+ }
91
+ virtual bool nondiag() const {
92
+ return p_ > std::min(y.real_shape(0), y.real_shape(1));
93
+ }
94
+
95
+ basic_iterator_T(YaleRef& obj, size_t ii = 0, size_t pp = 0) : y(obj), i_(ii), p_(pp) { }
96
+
97
+ basic_iterator_T<D,RefType,YaleRef>& operator=(const basic_iterator_T<D,RefType,YaleRef>& rhs) {
98
+ if (&y != &(rhs.y)) throw std::logic_error("can only be used on iterators with the same matrix");
99
+ i_ = rhs.i_;
100
+ p_ = rhs.p_;
101
+ return *this;
102
+ }
103
+
104
+ virtual inline size_t i() const { return i_; }
105
+ virtual size_t j() const = 0;
106
+
107
+ virtual inline VALUE rb_i() const { return LONG2NUM(i()); }
108
+ virtual inline VALUE rb_j() const { return LONG2NUM(j()); }
109
+
110
+ virtual size_t real_i() const { return offset(0) + i(); }
111
+ virtual size_t real_j() const { return offset(1) + j(); }
112
+ virtual size_t p() const { return p_; }
113
+ virtual bool real_ndnz_exists() const { return !y.real_row_empty(real_i()) && ija(p_) == real_j(); }
114
+
115
+ virtual RefType& operator*() = 0;
116
+ virtual RefType& operator*() const = 0;
117
+
118
+
119
+ // Ruby VALUE de-reference
120
+ inline VALUE operator~() const {
121
+ return nm_rb_dereference<D>(**this);
122
+ //virtual VALUE operator~() const {
123
+ // if (typeid(D) == typeid(RubyObject)) return (**this); // FIXME: return rval instead, faster;
124
+ // else return RubyObject(*(*this)).rval;
125
+ }
126
+
127
+ virtual bool operator==(const std::pair<size_t,size_t>& ij) {
128
+ if (p() >= ija(real_shape(0))) return false;
129
+ else return i() == ij.first && j() == ij.second;
130
+ }
131
+
132
+ virtual bool operator==(const basic_iterator_T<D,RefType,YaleRef>& rhs) const {
133
+ return i() == rhs.i() && j() == rhs.j();
134
+ }
135
+ virtual bool operator!=(const basic_iterator_T<D,RefType,YaleRef>& rhs) const {
136
+ return i() != rhs.i() || j() != rhs.j();
137
+ }
138
+ };
139
+
140
+
141
+ } } // end of namespace nm::yale_storage
142
+
143
+ #endif // YALE_ITERATORS_BASE_H
@@ -0,0 +1,131 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == iterator.h
25
+ //
26
+ // Iterate over yale as if dense
27
+ //
28
+
29
+ #ifndef YALE_ITERATORS_ITERATOR_H
30
+ # define YALE_ITERATORS_ITERATOR_H
31
+
32
+ #include <ruby.h>
33
+ #include <type_traits>
34
+ #include <typeinfo>
35
+
36
+ namespace nm { namespace yale_storage {
37
+
38
+ /*
39
+ * Iterator for traversing matrix class as if it were dense (visits each entry in order).
40
+ */
41
+ template <typename D,
42
+ typename RefType,
43
+ typename YaleRef = typename std::conditional<
44
+ std::is_const<RefType>::value,
45
+ const nm::YaleStorage<D>,
46
+ nm::YaleStorage<D>
47
+ >::type>
48
+ class iterator_T : public basic_iterator_T<D,RefType,YaleRef> {
49
+ using basic_iterator_T<D,RefType,YaleRef>::i_;
50
+ using basic_iterator_T<D,RefType,YaleRef>::p_;
51
+ using basic_iterator_T<D,RefType,YaleRef>::y;
52
+ using basic_iterator_T<D,RefType,YaleRef>::offset;
53
+ using basic_iterator_T<D,RefType,YaleRef>::shape;
54
+ using basic_iterator_T<D,RefType,YaleRef>::ija;
55
+
56
+ protected:
57
+ size_t j_; // These are relative to the slice.
58
+
59
+ public:
60
+ // Create an iterator. May select the row since this is O(1).
61
+ iterator_T(YaleRef& obj, size_t ii = 0)
62
+ : basic_iterator_T<D,RefType,YaleRef>(obj, ii, obj.ija(ii + obj.offset(0))), j_(0)
63
+ {
64
+ // advance to the beginning of the row
65
+ if (obj.offset(1) > 0)
66
+ p_ = y.find_pos_for_insertion(i_,j_);
67
+ }
68
+
69
+ // Prefix ++
70
+ iterator_T<D,RefType,YaleRef>& operator++() {
71
+ size_t prev_j = j_++;
72
+ if (j_ >= shape(1)) {
73
+ j_ = 0;
74
+ ++i_;
75
+
76
+ // Do a binary search to find the beginning of the slice
77
+ p_ = offset(0) > 0 ? y.find_pos_for_insertion(i_,j_) : ija(i_);
78
+ } else {
79
+ // If the last j was actually stored in this row of the matrix, need to advance p.
80
+
81
+ if (!y.real_row_empty(i_ + offset(0)) && ija(p_) <= prev_j + offset(1)) ++p_; // this test is the same as real_ndnz_exists
82
+ }
83
+
84
+ return *this;
85
+ }
86
+
87
+ iterator_T<D,RefType,YaleRef> operator++(int dummy) const {
88
+ iterator_T<D,RefType,YaleRef> iter(*this);
89
+ return ++iter;
90
+ }
91
+
92
+ virtual bool operator!=(const iterator_T<D,RefType,YaleRef>& rhs) const {
93
+ return this->dense_location() != rhs.dense_location();
94
+ }
95
+
96
+ virtual bool operator==(const iterator_T<D,RefType,YaleRef>& rhs) const {
97
+ return this->dense_location() == rhs.dense_location();
98
+ }
99
+
100
+ bool operator<(const iterator_T<D,RefType,YaleRef>& rhs) const {
101
+ return this->dense_location() < rhs.dense_location();
102
+ }
103
+
104
+ bool operator>(const iterator_T<D,RefType,YaleRef>& rhs) const {
105
+ return this->dense_location() > rhs.dense_location();
106
+ }
107
+
108
+ virtual bool diag() const { return i_ + offset(0) == j_ + offset(1); }
109
+
110
+ // De-reference
111
+ RefType& operator*() {
112
+ if (diag()) return y.a( i_ + offset(0) );
113
+ else if (p_ >= ija(i_+offset(0)+1)) return y.const_default_obj();
114
+ else if (!y.real_row_empty(i_ + offset(0)) && ija(p_) == j_ + offset(1)) return y.a( p_ );
115
+ else return y.const_default_obj();
116
+ }
117
+
118
+ RefType& operator*() const {
119
+ if (diag()) return y.a( i_ + offset(0) );
120
+ else if (p_ >= ija(i_+offset(0)+1)) return y.const_default_obj();
121
+ else if (!y.real_row_empty(i_ + offset(0)) && ija(p_) == j_ + offset(1)) return y.a( p_ );
122
+ else return y.const_default_obj();
123
+ }
124
+
125
+ virtual size_t j() const { return j_; }
126
+ };
127
+
128
+
129
+ } } // end of namespace nm::yale_storage
130
+
131
+ #endif // YALE_ITERATORS_ITERATOR_H
@@ -0,0 +1,450 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == row.h
25
+ //
26
+ // Iterator for traversing a matrix row by row. Includes an
27
+ // orthogonal iterator for visiting each stored entry in a row.
28
+ // This one cannot be de-referenced; you have to de-reference
29
+ // the column.
30
+
31
+ #ifndef YALE_ITERATORS_ROW_H
32
+ # define YALE_ITERATORS_ROW_H
33
+
34
+ #include <ruby.h>
35
+ #include <stdexcept>
36
+
37
+ namespace nm { namespace yale_storage {
38
+
39
+ template <typename D,
40
+ typename RefType,
41
+ typename YaleRef = typename std::conditional<
42
+ std::is_const<RefType>::value,
43
+ const nm::YaleStorage<D>,
44
+ nm::YaleStorage<D>
45
+ >::type>
46
+ class row_iterator_T {
47
+
48
+ protected:
49
+ YaleRef& y;
50
+ size_t i_;
51
+ size_t p_first, p_last; // first and last IJA positions in the row
52
+
53
+
54
+ /*
55
+ * Update the row positions -- use to ensure a row stays valid after an insert operation. Also
56
+ * used to initialize a row iterator at a different row index.
57
+ */
58
+ void update() {
59
+ if (i_ < y.shape(0)) {
60
+ p_first = p_real_first();
61
+ p_last = p_real_last();
62
+ if (!nd_empty()) {
63
+ // try to find new p_first
64
+ p_first = y.real_find_left_boundary_pos(p_first, p_last, y.offset(1));
65
+ if (!nd_empty()) {
66
+ // also try to find new p_last
67
+ p_last = y.real_find_left_boundary_pos(p_first, p_last, y.offset(1) + y.shape(1) - 1);
68
+ if (y.ija(p_last) - y.offset(1) >= shape(1)) --p_last; // searched too far.
69
+ }
70
+ }
71
+ } else { // invalid row -- this is an end iterator.
72
+ p_first = y.ija(y.real_shape(0));
73
+ p_last = y.ija(y.real_shape(0))-1; // mark as empty
74
+ }
75
+ }
76
+
77
+ /*
78
+ * Indicate to the row iterator that p_first and p_last have moved by some amount. Only
79
+ * defined for row_iterator, not const_row_iterator. This is a lightweight form of update().
80
+ */
81
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
82
+ void shift(int amount) {
83
+ p_first += amount;
84
+ p_last += amount;
85
+ }
86
+
87
+
88
+ /*
89
+ * Enlarge the row by amount by moving p_last over. This is a lightweight form of update().
90
+ */
91
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
92
+ void adjust_length(int amount) {
93
+ p_last += amount;
94
+ }
95
+
96
+ public:
97
+ /* typedef row_stored_iterator_T<D,RefType,YaleRef> row_stored_iterator;
98
+ typedef row_stored_nd_iterator_T<D,RefType,YaleRef> row_stored_nd_iterator;
99
+ typedef row_stored_iterator_T<D,const RefType,const YaleRef> const_row_stored_iterator;
100
+ typedef row_stored_nd_iterator_T<D,const RefType,const YaleRef> const_row_stored_nd_iterator;*/
101
+ typedef row_stored_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> > row_stored_iterator;
102
+ typedef row_stored_nd_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> > row_stored_nd_iterator;
103
+ template <typename E, typename ERefType, typename EYaleRef> friend class row_iterator_T;
104
+ friend class row_stored_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> >;
105
+ friend class row_stored_nd_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> >;//row_stored_iterator;
106
+ friend class row_stored_iterator_T<D,RefType,YaleRef, const row_iterator_T<D,RefType,YaleRef> >;
107
+ friend class row_stored_nd_iterator_T<D,RefType,YaleRef, const row_iterator_T<D,RefType,YaleRef> >;//row_stored_iterator;
108
+ friend class nm::YaleStorage<D>;
109
+
110
+ //friend row_stored_nd_iterator;
111
+
112
+ inline size_t ija(size_t pp) const { return y.ija(pp); }
113
+ inline size_t& ija(size_t pp) { return y.ija(pp); }
114
+ inline RefType& a(size_t p) const { return y.a_p()[p]; }
115
+ inline RefType& a(size_t p) { return y.a_p()[p]; }
116
+
117
+
118
+
119
+ row_iterator_T(YaleRef& obj, size_t ii = 0)
120
+ : y(obj), i_(ii)
121
+ {
122
+ update();
123
+ }
124
+
125
+
126
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
127
+ bool operator!=(const row_iterator_T<E,ERefType>& rhs) const {
128
+ return i_ != rhs.i_;
129
+ }
130
+
131
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
132
+ bool operator==(const row_iterator_T<E,ERefType>& rhs) const {
133
+ return i_ == rhs.i_;
134
+ }
135
+
136
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
137
+ bool operator<(const row_iterator_T<E,ERefType>& rhs) const {
138
+ return i_ < rhs.i_;
139
+ }
140
+
141
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
142
+ bool operator>(const row_iterator_T<E,ERefType>& rhs) const {
143
+ return i_ > rhs.i_;
144
+ }
145
+
146
+ row_iterator_T<D,RefType,YaleRef>& operator++() {
147
+ if (is_end()) throw std::out_of_range("attempted to iterate past end of slice (vertically)");
148
+ ++i_;
149
+ update();
150
+ return *this;
151
+ }
152
+
153
+ row_iterator_T<D,RefType,YaleRef> operator++(int dummy) const {
154
+ row_iterator_T<D,RefType,YaleRef> next(*this);
155
+ return ++next;
156
+ }
157
+
158
+ bool is_end() const {
159
+ return i_ == y.shape(0) && p_first == y.ija(y.real_shape(0));
160
+ }
161
+
162
+ size_t real_i() const {
163
+ return i_ + y.offset(0);
164
+ }
165
+
166
+ size_t i() const {
167
+ return i_;
168
+ }
169
+
170
+ // last element of the real row
171
+ size_t p_real_last() const {
172
+ return y.ija(real_i()+1)-1;
173
+ }
174
+
175
+ // first element of the real row
176
+ size_t p_real_first() const {
177
+ return y.ija(real_i());
178
+ }
179
+
180
+ // Is the real row of the original matrix totally empty of NDs?
181
+ bool real_nd_empty() const {
182
+ return p_real_last() < p_real_first();
183
+ }
184
+
185
+ bool nd_empty() const {
186
+ return p_last < p_first;
187
+ }
188
+
189
+ // slice j coord of the diag.
190
+ size_t diag_j() const {
191
+ if (!has_diag())
192
+ throw std::out_of_range("don't call diag_j unless you've checked for one");
193
+ return real_i() - y.offset(1);
194
+ }
195
+
196
+ // return the actual position of the diagonal element for this real row, regardless of whether
197
+ // it's in range or not.
198
+ size_t p_diag() const {
199
+ return real_i();
200
+ }
201
+
202
+ // Checks to see if there is a diagonal within the slice
203
+ bool has_diag() const {
204
+ // real position of diag is real_i == real_j. Is it in range?
205
+ return (p_diag() >= y.offset(1) && p_diag() - y.offset(1) < y.shape(1));
206
+ }
207
+
208
+ // Checks to see if the diagonal is the first entry in the slice.
209
+ bool is_diag_first() const {
210
+ if (!has_diag()) return false;
211
+ if (nd_empty()) return true;
212
+ return diag_j() < y.ija(p_first) - y.offset(1);
213
+ }
214
+
215
+ // Checks to see if the diagonal is the last entry in the slice.
216
+ bool is_diag_last() const {
217
+ if (!has_diag()) return false;
218
+ if (nd_empty()) return true;
219
+ return diag_j() > y.ija(p_last);
220
+ }
221
+
222
+ // Is the row of the slice totally empty of NDs and Ds?
223
+ // We can only determine that it's empty of Ds if the diagonal
224
+ // is not a part of the sliced portion of the row.
225
+ bool empty() const {
226
+ return nd_empty() && has_diag();
227
+ }
228
+
229
+
230
+ size_t shape(size_t pp) const {
231
+ return y.shape(pp);
232
+ }
233
+
234
+ size_t offset(size_t pp) const {
235
+ return y.offset(pp);
236
+ }
237
+
238
+ inline VALUE rb_i() const { return LONG2NUM(i()); }
239
+
240
+ row_stored_iterator_T<D,RefType,YaleRef> begin() { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_first); }
241
+ row_stored_nd_iterator_T<D,RefType,YaleRef> ndbegin() { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_first); }
242
+ row_stored_iterator_T<D,RefType,YaleRef> end() { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_last+1, true); }
243
+ row_stored_nd_iterator_T<D,RefType,YaleRef> ndend() { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_last+1); }
244
+
245
+ row_stored_iterator_T<D,RefType,YaleRef> begin() const { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_first); }
246
+ row_stored_nd_iterator_T<D,RefType,YaleRef> ndbegin() const { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_first); }
247
+ row_stored_iterator_T<D,RefType,YaleRef> end() const { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_last+1, true); }
248
+ row_stored_nd_iterator_T<D,RefType,YaleRef> ndend() const { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_last+1); }
249
+
250
+
251
+ row_stored_nd_iterator_T<D,RefType,YaleRef> lower_bound(const size_t& j) const {
252
+ row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, y.real_find_left_boundary_pos(p_first, p_last, y.offset(1)));
253
+ }
254
+
255
+ row_stored_nd_iterator_T<D,RefType,YaleRef> ndfind(size_t j) {
256
+ if (j == 0) return ndbegin();
257
+ size_t p = p_first > p_last ? p_first : y.real_find_left_boundary_pos(p_first, p_last, j + y.offset(1));
258
+ row_stored_nd_iterator iter = row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p);
259
+ return iter;
260
+ }
261
+
262
+ row_stored_iterator_T<D,RefType,YaleRef> find(size_t j) {
263
+ if (j == 0) return begin(); // may or may not be on the diagonal
264
+ else return row_stored_iterator_T<D,RefType,YaleRef>(*this, ndfind(j).p(), false); // is on the diagonal, definitely
265
+ }
266
+
267
+ /*
268
+ * Remove an entry from an already found non-diagonal position. Adjust this row appropriately so we can continue to
269
+ * use it.
270
+ */
271
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
272
+ row_stored_nd_iterator erase(row_stored_nd_iterator position) {
273
+ size_t sz = y.size();
274
+ if (sz - 1 <= y.capacity() / nm::yale_storage::GROWTH_CONSTANT) {
275
+ y.update_resize_move(position, real_i(), -1);
276
+ } else {
277
+ y.move_left(position, 1);
278
+ y.update_real_row_sizes_from(real_i(), -1);
279
+ }
280
+ adjust_length(-1);
281
+ return row_stored_nd_iterator(*this, position.p()-1);
282
+ }
283
+
284
+ /*
285
+ * Remove an entry from the matrix at the already-located position. If diagonal, just sets to default; otherwise,
286
+ * actually removes the entry.
287
+ */
288
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
289
+ row_stored_nd_iterator erase(const row_stored_iterator& jt) {
290
+ if (jt.diag()) {
291
+ *jt = y.const_default_obj(); // diagonal is the easy case -- no movement.
292
+ return row_stored_nd_iterator(*this, jt.p());
293
+ } else {
294
+ return erase(row_stored_nd_iterator(*this, jt.p()));
295
+ }
296
+ }
297
+
298
+
299
+
300
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
301
+ row_stored_nd_iterator insert(row_stored_nd_iterator position, size_t jj, const D& val) {
302
+ size_t sz = y.size();
303
+ while (!position.end() && position.j() < jj) ++position; // position is just a hint. (This loop ideally only has to happen once.)
304
+
305
+ if (!position.end() && position.j() == jj) {
306
+ *position = val; // replace existing
307
+ } else {
308
+
309
+ if (sz + 1 > y.capacity()) {
310
+ y.update_resize_move(position, real_i(), 1);
311
+ } else {
312
+ y.move_right(position, 1);
313
+ y.update_real_row_sizes_from(real_i(), 1);
314
+ }
315
+ ija(position.p()) = jj + y.offset(1); // set column ID
316
+ a(position.p()) = val;
317
+ adjust_length(1);
318
+ }
319
+
320
+ return position++;
321
+ }
322
+
323
+
324
+ /*
325
+ * This version of insert doesn't return anything. Why, when the others do?
326
+ *
327
+ * Well, mainly because j here can be a diagonal entry. Most of the inserters return the *next* element following
328
+ * the insertion, but to do that, we have to create a row_stored_nd_iterator, which requires at least one binary
329
+ * search for the location following the diagonal (and as of the writing of this, two binary searches). There's no
330
+ * reason to do that when we never actually *use* the return value. So instead we just have void.
331
+ */
332
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
333
+ void insert(size_t j, const D& val) {
334
+ if (j + y.offset(1) == real_i()) a(real_i()) = val;
335
+ else {
336
+ row_stored_nd_iterator jt = ndfind(j);
337
+ if (!jt.end() && jt.j() == j) {
338
+ if (val == y.const_default_obj()) erase(jt); // erase
339
+ else insert(jt, j, val); // replace
340
+ } else { // only insert if it's not the default
341
+ if (val != y.const_default_obj()) insert(jt, j, val);
342
+ }
343
+ }
344
+ }
345
+
346
+
347
+ /*
348
+ * Determines a plan for inserting a single row. Returns an integer giving the amount of the row change.
349
+ */
350
+ int single_row_insertion_plan(row_stored_nd_iterator position, size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
351
+ int nd_change = 0;
352
+
353
+ for (size_t jc = jj; jc < jj + length; ++jc, ++v_offset) {
354
+ if (v_offset >= v_size) v_offset %= v_size; // reset v position.
355
+
356
+ if (jc + y.offset(1) != real_i()) { // diagonal -- no nd_change here
357
+ if (position.end()) {
358
+ if (v[v_offset] != y.const_default_obj()) nd_change++; // insert
359
+ } else if (position.j() != jc) { // not present -- do we need to add it?
360
+ if (v[v_offset] != y.const_default_obj()) nd_change++;
361
+ } else { // position.j() == jc
362
+ if (v[v_offset] == y.const_default_obj()) nd_change--;
363
+ ++position; // move iterator forward.
364
+ }
365
+ }
366
+ }
367
+ return nd_change;
368
+ }
369
+
370
+ /*
371
+ * Determine a plan for inserting a single row -- finds the position first. Returns the position and
372
+ * the change amount. Don't use this one if you can help it because it requires a binary search of
373
+ * the row.
374
+ */
375
+ std::pair<int,size_t> single_row_insertion_plan(size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
376
+ std::pair<int,size_t> result;
377
+ row_stored_nd_iterator pos = ndfind(jj);
378
+ result.first = single_row_insertion_plan(pos, jj, length, v, v_size, v_offset);
379
+ result.second = pos.p();
380
+ return result;
381
+ }
382
+
383
+ /*
384
+ * Insert elements into a single row. Returns an iterator to the end of the insertion range.
385
+ */
386
+ row_stored_nd_iterator insert(row_stored_nd_iterator position, size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
387
+ size_t tmp_v_offset = v_offset;
388
+ int nd_change = single_row_insertion_plan(position, jj, length, v, v_size, tmp_v_offset);
389
+
390
+ // First record the position, just in case our iterator becomes invalid.
391
+ size_t pp = position.p();
392
+
393
+ // Resize the array as necessary, or move entries after the insertion point to make room.
394
+ size_t sz = y.size();
395
+ if (sz + nd_change > y.capacity() || sz + nd_change <= y.capacity() / nm::yale_storage::GROWTH_CONSTANT)
396
+ y.update_resize_move(position, real_i(), nd_change);
397
+ else if (nd_change != 0) {
398
+ if (nd_change < 0) y.move_left(position, -nd_change);
399
+ else if (nd_change > 0) y.move_right(position, nd_change);
400
+ y.update_real_row_sizes_from(real_i(), nd_change);
401
+ }
402
+
403
+ for (size_t jc = jj; jc < jj + length; ++jc, ++v_offset) {
404
+ if (v_offset >= v_size) v_offset %= v_size; // reset v position.
405
+
406
+ if (jc + y.offset(1) == real_i()) {
407
+ y.a(real_i()) = v[v_offset]; // modify diagonal
408
+ } else if (v[v_offset] != y.const_default_obj()) {
409
+ y.ija(pp) = jc; // modify non-diagonal
410
+ y.a(pp) = v[v_offset];
411
+ ++pp;
412
+ }
413
+ }
414
+
415
+ // Update this row.
416
+ adjust_length(nd_change);
417
+
418
+ return row_stored_nd_iterator(*this, pp);
419
+ }
420
+
421
+ /*
422
+ * For when we don't need to worry about the offset, does the same thing as the insert above.
423
+ */
424
+ row_stored_nd_iterator insert(const row_stored_nd_iterator& position, size_t jj, size_t length, D const* v, size_t v_size) {
425
+ size_t v_offset = 0;
426
+ return insert(position, jj, length, v, v_size, v_offset);
427
+ }
428
+
429
+
430
+ /*
431
+ * Merges elements offered for insertion with existing elements in the row.
432
+ */
433
+ row_stored_nd_iterator insert(size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
434
+ return insert(ndfind(jj), jj, length, v, v_size, v_offset);
435
+ }
436
+
437
+ /*
438
+ * Merges elements offered for insertion with existing elements in the row.
439
+ */
440
+ row_stored_nd_iterator insert(size_t jj, size_t length, D const* v, size_t v_size) {
441
+ size_t v_offset = 0;
442
+ return insert(ndfind(jj), jj, length, v, v_size, v_offset);
443
+ }
444
+
445
+
446
+ };
447
+
448
+ } } // end of nm::yale_storage namespace
449
+
450
+ #endif // YALE_ITERATORS_ROW_H