nmatrix 0.0.8 → 0.0.9

Sign up to get free protection for your applications and to get access to all the features.
Files changed (68) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +3 -8
  3. data/.rspec +1 -1
  4. data/.travis.yml +12 -0
  5. data/CONTRIBUTING.md +27 -12
  6. data/Gemfile +1 -0
  7. data/History.txt +38 -0
  8. data/Manifest.txt +15 -15
  9. data/README.rdoc +7 -6
  10. data/Rakefile +40 -5
  11. data/ext/nmatrix/data/data.cpp +2 -37
  12. data/ext/nmatrix/data/data.h +19 -121
  13. data/ext/nmatrix/data/meta.h +70 -0
  14. data/ext/nmatrix/extconf.rb +40 -12
  15. data/ext/nmatrix/math/math.h +13 -103
  16. data/ext/nmatrix/nmatrix.cpp +10 -2018
  17. data/ext/nmatrix/nmatrix.h +16 -13
  18. data/ext/nmatrix/ruby_constants.cpp +12 -1
  19. data/ext/nmatrix/ruby_constants.h +7 -1
  20. data/ext/nmatrix/ruby_nmatrix.c +2169 -0
  21. data/ext/nmatrix/storage/dense.cpp +123 -14
  22. data/ext/nmatrix/storage/dense.h +10 -4
  23. data/ext/nmatrix/storage/list.cpp +265 -48
  24. data/ext/nmatrix/storage/list.h +6 -9
  25. data/ext/nmatrix/storage/storage.cpp +44 -54
  26. data/ext/nmatrix/storage/storage.h +2 -2
  27. data/ext/nmatrix/storage/yale/class.h +1070 -0
  28. data/ext/nmatrix/storage/yale/iterators/base.h +142 -0
  29. data/ext/nmatrix/storage/yale/iterators/iterator.h +130 -0
  30. data/ext/nmatrix/storage/yale/iterators/row.h +449 -0
  31. data/ext/nmatrix/storage/yale/iterators/row_stored.h +139 -0
  32. data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +167 -0
  33. data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +123 -0
  34. data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
  35. data/ext/nmatrix/storage/yale/yale.cpp +1785 -0
  36. data/ext/nmatrix/storage/{yale.h → yale/yale.h} +23 -55
  37. data/ext/nmatrix/types.h +2 -0
  38. data/ext/nmatrix/util/io.cpp +27 -45
  39. data/ext/nmatrix/util/io.h +0 -2
  40. data/ext/nmatrix/util/sl_list.cpp +169 -28
  41. data/ext/nmatrix/util/sl_list.h +9 -3
  42. data/lib/nmatrix/blas.rb +20 -20
  43. data/lib/nmatrix/enumerate.rb +1 -1
  44. data/lib/nmatrix/io/mat5_reader.rb +8 -14
  45. data/lib/nmatrix/lapack.rb +3 -3
  46. data/lib/nmatrix/math.rb +3 -3
  47. data/lib/nmatrix/nmatrix.rb +19 -5
  48. data/lib/nmatrix/nvector.rb +2 -0
  49. data/lib/nmatrix/shortcuts.rb +90 -125
  50. data/lib/nmatrix/version.rb +1 -1
  51. data/nmatrix.gemspec +7 -8
  52. data/spec/{nmatrix_spec.rb → 00_nmatrix_spec.rb} +45 -208
  53. data/spec/01_enum_spec.rb +184 -0
  54. data/spec/{slice_spec.rb → 02_slice_spec.rb} +55 -39
  55. data/spec/blas_spec.rb +22 -54
  56. data/spec/elementwise_spec.rb +9 -8
  57. data/spec/io_spec.rb +6 -4
  58. data/spec/lapack_spec.rb +26 -26
  59. data/spec/math_spec.rb +9 -5
  60. data/spec/nmatrix_yale_spec.rb +29 -61
  61. data/spec/shortcuts_spec.rb +34 -22
  62. data/spec/slice_set_spec.rb +157 -0
  63. data/spec/spec_helper.rb +42 -2
  64. data/spec/stat_spec.rb +192 -0
  65. metadata +52 -55
  66. data/ext/nmatrix/storage/yale.cpp +0 -2284
  67. data/spec/nmatrix_list_spec.rb +0 -113
  68. data/spec/nvector_spec.rb +0 -112
@@ -0,0 +1,142 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2013, Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == base.h
25
+ //
26
+ // Yale storage pure virtual basic_iterator class.
27
+ //
28
+
29
+ #ifndef YALE_ITERATORS_BASE_H
30
+ # define YALE_ITERATORS_BASE_H
31
+
32
+ #include <type_traits>
33
+ #include <typeinfo>
34
+ #include <stdexcept>
35
+
36
+ namespace nm {
37
+
38
+ template <typename D> class YaleStorage;
39
+
40
+ namespace yale_storage {
41
+
42
+ template <typename D>
43
+ VALUE nm_rb_dereference(D const& v) {
44
+ return nm::RubyObject(v).rval;
45
+ }
46
+
47
+ template <>
48
+ VALUE nm_rb_dereference<nm::RubyObject>(nm::RubyObject const& v) {
49
+ return v.rval;
50
+ }
51
+
52
+ /*
53
+ * Iterator base class (pure virtual).
54
+ */
55
+ template <typename D,
56
+ typename RefType,
57
+ typename YaleRef = typename std::conditional<
58
+ std::is_const<RefType>::value,
59
+ const nm::YaleStorage<D>,
60
+ nm::YaleStorage<D>
61
+ >::type>
62
+ class basic_iterator_T {
63
+
64
+ protected:
65
+ YaleRef& y;
66
+ size_t i_;
67
+ size_t p_;
68
+
69
+ public:
70
+ size_t offset(size_t d) const { return y.offset(d); }
71
+ size_t shape(size_t d) const { return y.shape(d); }
72
+ size_t real_shape(size_t d) const { return y.real_shape(d); }
73
+
74
+ size_t dense_location() const {
75
+ return i()*shape(1) + j();
76
+ }
77
+
78
+ template <typename T = typename std::conditional<std::is_const<RefType>::value, const size_t, size_t>::type>
79
+ T& ija(size_t pp) const { return y.ija(pp); }
80
+
81
+ template <typename T = typename std::conditional<std::is_const<RefType>::value, const size_t, size_t>::type>
82
+ T& ija(size_t pp) { return y.ija(pp); }
83
+
84
+ virtual bool diag() const {
85
+ return p_ < std::min(y.real_shape(0), y.real_shape(1));
86
+ }
87
+ virtual bool done_with_diag() const {
88
+ return p_ == std::min(y.real_shape(0), y.real_shape(1));
89
+ }
90
+ virtual bool nondiag() const {
91
+ return p_ > std::min(y.real_shape(0), y.real_shape(1));
92
+ }
93
+
94
+ basic_iterator_T(YaleRef& obj, size_t ii = 0, size_t pp = 0) : y(obj), i_(ii), p_(pp) { }
95
+
96
+ basic_iterator_T<D,RefType,YaleRef>& operator=(const basic_iterator_T<D,RefType,YaleRef>& rhs) {
97
+ if (&y != &(rhs.y)) throw std::logic_error("can only be used on iterators with the same matrix");
98
+ i_ = rhs.i_;
99
+ p_ = rhs.p_;
100
+ return *this;
101
+ }
102
+
103
+ virtual inline size_t i() const { return i_; }
104
+ virtual size_t j() const = 0;
105
+
106
+ virtual inline VALUE rb_i() const { return LONG2NUM(i()); }
107
+ virtual inline VALUE rb_j() const { return LONG2NUM(j()); }
108
+
109
+ virtual size_t real_i() const { return offset(0) + i(); }
110
+ virtual size_t real_j() const { return offset(1) + j(); }
111
+ virtual size_t p() const { return p_; }
112
+ virtual bool real_ndnz_exists() const { return !y.real_row_empty(real_i()) && ija(p_) == real_j(); }
113
+
114
+ virtual RefType& operator*() = 0;
115
+ virtual RefType& operator*() const = 0;
116
+
117
+
118
+ // Ruby VALUE de-reference
119
+ inline VALUE operator~() const {
120
+ return nm_rb_dereference<D>(**this);
121
+ //virtual VALUE operator~() const {
122
+ // if (typeid(D) == typeid(RubyObject)) return (**this); // FIXME: return rval instead, faster;
123
+ // else return RubyObject(*(*this)).rval;
124
+ }
125
+
126
+ virtual bool operator==(const std::pair<size_t,size_t>& ij) {
127
+ if (p() >= ija(real_shape(0))) return false;
128
+ else return i() == ij.first && j() == ij.second;
129
+ }
130
+
131
+ virtual bool operator==(const basic_iterator_T<D,RefType,YaleRef>& rhs) const {
132
+ return i() == rhs.i() && j() == rhs.j();
133
+ }
134
+ virtual bool operator!=(const basic_iterator_T<D,RefType,YaleRef>& rhs) const {
135
+ return i() != rhs.i() || j() != rhs.j();
136
+ }
137
+ };
138
+
139
+
140
+ } } // end of namespace nm::yale_storage
141
+
142
+ #endif // YALE_ITERATORS_BASE_H
@@ -0,0 +1,130 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2013, Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == iterator.h
25
+ //
26
+ // Iterate over yale as if dense
27
+ //
28
+
29
+ #ifndef YALE_ITERATORS_ITERATOR_H
30
+ # define YALE_ITERATORS_ITERATOR_H
31
+
32
+ #include <type_traits>
33
+ #include <typeinfo>
34
+
35
+ namespace nm { namespace yale_storage {
36
+
37
+ /*
38
+ * Iterator for traversing matrix class as if it were dense (visits each entry in order).
39
+ */
40
+ template <typename D,
41
+ typename RefType,
42
+ typename YaleRef = typename std::conditional<
43
+ std::is_const<RefType>::value,
44
+ const nm::YaleStorage<D>,
45
+ nm::YaleStorage<D>
46
+ >::type>
47
+ class iterator_T : public basic_iterator_T<D,RefType,YaleRef> {
48
+ using basic_iterator_T<D,RefType,YaleRef>::i_;
49
+ using basic_iterator_T<D,RefType,YaleRef>::p_;
50
+ using basic_iterator_T<D,RefType,YaleRef>::y;
51
+ using basic_iterator_T<D,RefType,YaleRef>::offset;
52
+ using basic_iterator_T<D,RefType,YaleRef>::shape;
53
+ using basic_iterator_T<D,RefType,YaleRef>::ija;
54
+
55
+ protected:
56
+ size_t j_; // These are relative to the slice.
57
+
58
+ public:
59
+ // Create an iterator. May select the row since this is O(1).
60
+ iterator_T(YaleRef& obj, size_t ii = 0)
61
+ : basic_iterator_T<D,RefType,YaleRef>(obj, ii, obj.ija(ii + obj.offset(0))), j_(0)
62
+ {
63
+ // advance to the beginning of the row
64
+ if (obj.offset(1) > 0)
65
+ p_ = y.find_pos_for_insertion(i_,j_);
66
+ }
67
+
68
+ // Prefix ++
69
+ iterator_T<D,RefType,YaleRef>& operator++() {
70
+ size_t prev_j = j_++;
71
+ if (j_ >= shape(1)) {
72
+ j_ = 0;
73
+ ++i_;
74
+
75
+ // Do a binary search to find the beginning of the slice
76
+ p_ = offset(0) > 0 ? y.find_pos_for_insertion(i_,j_) : ija(i_);
77
+ } else {
78
+ // If the last j was actually stored in this row of the matrix, need to advance p.
79
+
80
+ if (!y.real_row_empty(i_ + offset(0)) && ija(p_) <= prev_j + offset(1)) ++p_; // this test is the same as real_ndnz_exists
81
+ }
82
+
83
+ return *this;
84
+ }
85
+
86
+ iterator_T<D,RefType,YaleRef> operator++(int dummy) const {
87
+ iterator_T<D,RefType,YaleRef> iter(*this);
88
+ return ++iter;
89
+ }
90
+
91
+ virtual bool operator!=(const iterator_T<D,RefType,YaleRef>& rhs) const {
92
+ return this->dense_location() != rhs.dense_location();
93
+ }
94
+
95
+ virtual bool operator==(const iterator_T<D,RefType,YaleRef>& rhs) const {
96
+ return this->dense_location() == rhs.dense_location();
97
+ }
98
+
99
+ bool operator<(const iterator_T<D,RefType,YaleRef>& rhs) const {
100
+ return this->dense_location() < rhs.dense_location();
101
+ }
102
+
103
+ bool operator>(const iterator_T<D,RefType,YaleRef>& rhs) const {
104
+ return this->dense_location() > rhs.dense_location();
105
+ }
106
+
107
+ virtual bool diag() const { return i_ + offset(0) == j_ + offset(1); }
108
+
109
+ // De-reference
110
+ RefType& operator*() {
111
+ if (diag()) return y.a( i_ + offset(0) );
112
+ else if (p_ >= ija(i_+offset(0)+1)) return y.const_default_obj();
113
+ else if (!y.real_row_empty(i_ + offset(0)) && ija(p_) == j_ + offset(1)) return y.a( p_ );
114
+ else return y.const_default_obj();
115
+ }
116
+
117
+ RefType& operator*() const {
118
+ if (diag()) return y.a( i_ + offset(0) );
119
+ else if (p_ >= ija(i_+offset(0)+1)) return y.const_default_obj();
120
+ else if (!y.real_row_empty(i_ + offset(0)) && ija(p_) == j_ + offset(1)) return y.a( p_ );
121
+ else return y.const_default_obj();
122
+ }
123
+
124
+ virtual size_t j() const { return j_; }
125
+ };
126
+
127
+
128
+ } } // end of namespace nm::yale_storage
129
+
130
+ #endif // YALE_ITERATORS_ITERATOR_H
@@ -0,0 +1,449 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2013, Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == row.h
25
+ //
26
+ // Iterator for traversing a matrix row by row. Includes an
27
+ // orthogonal iterator for visiting each stored entry in a row.
28
+ // This one cannot be de-referenced; you have to de-reference
29
+ // the column.
30
+
31
+ #ifndef YALE_ITERATORS_ROW_H
32
+ # define YALE_ITERATORS_ROW_H
33
+
34
+ #include <stdexcept>
35
+
36
+ namespace nm { namespace yale_storage {
37
+
38
+ template <typename D,
39
+ typename RefType,
40
+ typename YaleRef = typename std::conditional<
41
+ std::is_const<RefType>::value,
42
+ const nm::YaleStorage<D>,
43
+ nm::YaleStorage<D>
44
+ >::type>
45
+ class row_iterator_T {
46
+
47
+ protected:
48
+ YaleRef& y;
49
+ size_t i_;
50
+ size_t p_first, p_last; // first and last IJA positions in the row
51
+
52
+
53
+ /*
54
+ * Update the row positions -- use to ensure a row stays valid after an insert operation. Also
55
+ * used to initialize a row iterator at a different row index.
56
+ */
57
+ void update() {
58
+ if (i_ < y.shape(0)) {
59
+ p_first = p_real_first();
60
+ p_last = p_real_last();
61
+ if (!nd_empty()) {
62
+ // try to find new p_first
63
+ p_first = y.real_find_left_boundary_pos(p_first, p_last, y.offset(1));
64
+ if (!nd_empty()) {
65
+ // also try to find new p_last
66
+ p_last = y.real_find_left_boundary_pos(p_first, p_last, y.offset(1) + y.shape(1) - 1);
67
+ if (y.ija(p_last) - y.offset(1) >= shape(1)) --p_last; // searched too far.
68
+ }
69
+ }
70
+ } else { // invalid row -- this is an end iterator.
71
+ p_first = y.ija(y.real_shape(0));
72
+ p_last = y.ija(y.real_shape(0))-1; // mark as empty
73
+ }
74
+ }
75
+
76
+ /*
77
+ * Indicate to the row iterator that p_first and p_last have moved by some amount. Only
78
+ * defined for row_iterator, not const_row_iterator. This is a lightweight form of update().
79
+ */
80
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
81
+ void shift(int amount) {
82
+ p_first += amount;
83
+ p_last += amount;
84
+ }
85
+
86
+
87
+ /*
88
+ * Enlarge the row by amount by moving p_last over. This is a lightweight form of update().
89
+ */
90
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
91
+ void adjust_length(int amount) {
92
+ p_last += amount;
93
+ }
94
+
95
+ public:
96
+ /* typedef row_stored_iterator_T<D,RefType,YaleRef> row_stored_iterator;
97
+ typedef row_stored_nd_iterator_T<D,RefType,YaleRef> row_stored_nd_iterator;
98
+ typedef row_stored_iterator_T<D,const RefType,const YaleRef> const_row_stored_iterator;
99
+ typedef row_stored_nd_iterator_T<D,const RefType,const YaleRef> const_row_stored_nd_iterator;*/
100
+ typedef row_stored_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> > row_stored_iterator;
101
+ typedef row_stored_nd_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> > row_stored_nd_iterator;
102
+ template <typename E, typename ERefType, typename EYaleRef> friend class row_iterator_T;
103
+ friend class row_stored_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> >;
104
+ friend class row_stored_nd_iterator_T<D,RefType,YaleRef, row_iterator_T<D,RefType,YaleRef> >;//row_stored_iterator;
105
+ friend class row_stored_iterator_T<D,RefType,YaleRef, const row_iterator_T<D,RefType,YaleRef> >;
106
+ friend class row_stored_nd_iterator_T<D,RefType,YaleRef, const row_iterator_T<D,RefType,YaleRef> >;//row_stored_iterator;
107
+ friend class nm::YaleStorage<D>;
108
+
109
+ //friend row_stored_nd_iterator;
110
+
111
+ inline size_t ija(size_t pp) const { return y.ija(pp); }
112
+ inline size_t& ija(size_t pp) { return y.ija(pp); }
113
+ inline RefType& a(size_t p) const { return y.a_p()[p]; }
114
+ inline RefType& a(size_t p) { return y.a_p()[p]; }
115
+
116
+
117
+
118
+ row_iterator_T(YaleRef& obj, size_t ii = 0)
119
+ : y(obj), i_(ii)
120
+ {
121
+ update();
122
+ }
123
+
124
+
125
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
126
+ bool operator!=(const row_iterator_T<E,ERefType>& rhs) const {
127
+ return i_ != rhs.i_;
128
+ }
129
+
130
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
131
+ bool operator==(const row_iterator_T<E,ERefType>& rhs) const {
132
+ return i_ == rhs.i_;
133
+ }
134
+
135
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
136
+ bool operator<(const row_iterator_T<E,ERefType>& rhs) const {
137
+ return i_ < rhs.i_;
138
+ }
139
+
140
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
141
+ bool operator>(const row_iterator_T<E,ERefType>& rhs) const {
142
+ return i_ > rhs.i_;
143
+ }
144
+
145
+ row_iterator_T<D,RefType,YaleRef>& operator++() {
146
+ if (is_end()) throw std::out_of_range("attempted to iterate past end of slice (vertically)");
147
+ ++i_;
148
+ update();
149
+ return *this;
150
+ }
151
+
152
+ row_iterator_T<D,RefType,YaleRef> operator++(int dummy) const {
153
+ row_iterator_T<D,RefType,YaleRef> next(*this);
154
+ return ++next;
155
+ }
156
+
157
+ bool is_end() const {
158
+ return i_ == y.shape(0) && p_first == y.ija(y.real_shape(0));
159
+ }
160
+
161
+ size_t real_i() const {
162
+ return i_ + y.offset(0);
163
+ }
164
+
165
+ size_t i() const {
166
+ return i_;
167
+ }
168
+
169
+ // last element of the real row
170
+ size_t p_real_last() const {
171
+ return y.ija(real_i()+1)-1;
172
+ }
173
+
174
+ // first element of the real row
175
+ size_t p_real_first() const {
176
+ return y.ija(real_i());
177
+ }
178
+
179
+ // Is the real row of the original matrix totally empty of NDs?
180
+ bool real_nd_empty() const {
181
+ return p_real_last() < p_real_first();
182
+ }
183
+
184
+ bool nd_empty() const {
185
+ return p_last < p_first;
186
+ }
187
+
188
+ // slice j coord of the diag.
189
+ size_t diag_j() const {
190
+ if (!has_diag())
191
+ throw std::out_of_range("don't call diag_j unless you've checked for one");
192
+ return real_i() - y.offset(1);
193
+ }
194
+
195
+ // return the actual position of the diagonal element for this real row, regardless of whether
196
+ // it's in range or not.
197
+ size_t p_diag() const {
198
+ return real_i();
199
+ }
200
+
201
+ // Checks to see if there is a diagonal within the slice
202
+ bool has_diag() const {
203
+ // real position of diag is real_i == real_j. Is it in range?
204
+ return (p_diag() >= y.offset(1) && p_diag() - y.offset(1) < y.shape(1));
205
+ }
206
+
207
+ // Checks to see if the diagonal is the first entry in the slice.
208
+ bool is_diag_first() const {
209
+ if (!has_diag()) return false;
210
+ if (nd_empty()) return true;
211
+ return diag_j() < y.ija(p_first) - y.offset(1);
212
+ }
213
+
214
+ // Checks to see if the diagonal is the last entry in the slice.
215
+ bool is_diag_last() const {
216
+ if (!has_diag()) return false;
217
+ if (nd_empty()) return true;
218
+ return diag_j() > y.ija(p_last);
219
+ }
220
+
221
+ // Is the row of the slice totally empty of NDs and Ds?
222
+ // We can only determine that it's empty of Ds if the diagonal
223
+ // is not a part of the sliced portion of the row.
224
+ bool empty() const {
225
+ return nd_empty() && has_diag();
226
+ }
227
+
228
+
229
+ size_t shape(size_t pp) const {
230
+ return y.shape(pp);
231
+ }
232
+
233
+ size_t offset(size_t pp) const {
234
+ return y.offset(pp);
235
+ }
236
+
237
+ inline VALUE rb_i() const { return LONG2NUM(i()); }
238
+
239
+ row_stored_iterator_T<D,RefType,YaleRef> begin() { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_first); }
240
+ row_stored_nd_iterator_T<D,RefType,YaleRef> ndbegin() { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_first); }
241
+ row_stored_iterator_T<D,RefType,YaleRef> end() { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_last+1, true); }
242
+ row_stored_nd_iterator_T<D,RefType,YaleRef> ndend() { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_last+1); }
243
+
244
+ row_stored_iterator_T<D,RefType,YaleRef> begin() const { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_first); }
245
+ row_stored_nd_iterator_T<D,RefType,YaleRef> ndbegin() const { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_first); }
246
+ row_stored_iterator_T<D,RefType,YaleRef> end() const { return row_stored_iterator_T<D,RefType,YaleRef>(*this, p_last+1, true); }
247
+ row_stored_nd_iterator_T<D,RefType,YaleRef> ndend() const { return row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p_last+1); }
248
+
249
+
250
+ row_stored_nd_iterator_T<D,RefType,YaleRef> lower_bound(const size_t& j) const {
251
+ row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, y.real_find_left_boundary_pos(p_first, p_last, y.offset(1)));
252
+ }
253
+
254
+ row_stored_nd_iterator_T<D,RefType,YaleRef> ndfind(size_t j) {
255
+ if (j == 0) return ndbegin();
256
+ size_t p = p_first > p_last ? p_first : y.real_find_left_boundary_pos(p_first, p_last, j + y.offset(1));
257
+ row_stored_nd_iterator iter = row_stored_nd_iterator_T<D,RefType,YaleRef>(*this, p);
258
+ return iter;
259
+ }
260
+
261
+ row_stored_iterator_T<D,RefType,YaleRef> find(size_t j) {
262
+ if (j == 0) return begin(); // may or may not be on the diagonal
263
+ else return row_stored_iterator_T<D,RefType,YaleRef>(*this, ndfind(j).p(), false); // is on the diagonal, definitely
264
+ }
265
+
266
+ /*
267
+ * Remove an entry from an already found non-diagonal position. Adjust this row appropriately so we can continue to
268
+ * use it.
269
+ */
270
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
271
+ row_stored_nd_iterator erase(row_stored_nd_iterator position) {
272
+ size_t sz = y.size();
273
+ if (sz - 1 <= y.capacity() / nm::yale_storage::GROWTH_CONSTANT) {
274
+ y.update_resize_move(position, real_i(), -1);
275
+ } else {
276
+ y.move_left(position, 1);
277
+ y.update_real_row_sizes_from(real_i(), -1);
278
+ }
279
+ adjust_length(-1);
280
+ return row_stored_nd_iterator(*this, position.p()-1);
281
+ }
282
+
283
+ /*
284
+ * Remove an entry from the matrix at the already-located position. If diagonal, just sets to default; otherwise,
285
+ * actually removes the entry.
286
+ */
287
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
288
+ row_stored_nd_iterator erase(const row_stored_iterator& jt) {
289
+ if (jt.diag()) {
290
+ *jt = y.const_default_obj(); // diagonal is the easy case -- no movement.
291
+ return row_stored_nd_iterator(*this, jt.p());
292
+ } else {
293
+ return erase(row_stored_nd_iterator(*this, jt.p()));
294
+ }
295
+ }
296
+
297
+
298
+
299
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
300
+ row_stored_nd_iterator insert(row_stored_nd_iterator position, size_t jj, const D& val) {
301
+ size_t sz = y.size();
302
+ while (!position.end() && position.j() < jj) ++position; // position is just a hint. (This loop ideally only has to happen once.)
303
+
304
+ if (!position.end() && position.j() == jj) {
305
+ *position = val; // replace existing
306
+ } else {
307
+
308
+ if (sz + 1 > y.capacity()) {
309
+ y.update_resize_move(position, real_i(), 1);
310
+ } else {
311
+ y.move_right(position, 1);
312
+ y.update_real_row_sizes_from(real_i(), 1);
313
+ }
314
+ ija(position.p()) = jj + y.offset(1); // set column ID
315
+ a(position.p()) = val;
316
+ adjust_length(1);
317
+ }
318
+
319
+ return position++;
320
+ }
321
+
322
+
323
+ /*
324
+ * This version of insert doesn't return anything. Why, when the others do?
325
+ *
326
+ * Well, mainly because j here can be a diagonal entry. Most of the inserters return the *next* element following
327
+ * the insertion, but to do that, we have to create a row_stored_nd_iterator, which requires at least one binary
328
+ * search for the location following the diagonal (and as of the writing of this, two binary searches). There's no
329
+ * reason to do that when we never actually *use* the return value. So instead we just have void.
330
+ */
331
+ //template <typename = typename std::enable_if<!std::is_const<RefType>::value>::type>
332
+ void insert(size_t j, const D& val) {
333
+ if (j + y.offset(1) == real_i()) a(real_i()) = val;
334
+ else {
335
+ row_stored_nd_iterator jt = ndfind(j);
336
+ if (!jt.end() && jt.j() == j) {
337
+ if (val == y.const_default_obj()) erase(jt); // erase
338
+ else insert(jt, j, val); // replace
339
+ } else { // only insert if it's not the default
340
+ if (val != y.const_default_obj()) insert(jt, j, val);
341
+ }
342
+ }
343
+ }
344
+
345
+
346
+ /*
347
+ * Determines a plan for inserting a single row. Returns an integer giving the amount of the row change.
348
+ */
349
+ int single_row_insertion_plan(row_stored_nd_iterator position, size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
350
+ int nd_change = 0;
351
+
352
+ for (size_t jc = jj; jc < jj + length; ++jc, ++v_offset) {
353
+ if (v_offset >= v_size) v_offset %= v_size; // reset v position.
354
+
355
+ if (jc + y.offset(1) != real_i()) { // diagonal -- no nd_change here
356
+ if (position.end()) {
357
+ if (v[v_offset] != y.const_default_obj()) nd_change++; // insert
358
+ } else if (position.j() != jc) { // not present -- do we need to add it?
359
+ if (v[v_offset] != y.const_default_obj()) nd_change++;
360
+ } else { // position.j() == jc
361
+ if (v[v_offset] == y.const_default_obj()) nd_change--;
362
+ ++position; // move iterator forward.
363
+ }
364
+ }
365
+ }
366
+ return nd_change;
367
+ }
368
+
369
+ /*
370
+ * Determine a plan for inserting a single row -- finds the position first. Returns the position and
371
+ * the change amount. Don't use this one if you can help it because it requires a binary search of
372
+ * the row.
373
+ */
374
+ std::pair<int,size_t> single_row_insertion_plan(size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
375
+ std::pair<int,size_t> result;
376
+ row_stored_nd_iterator pos = ndfind(jj);
377
+ result.first = single_row_insertion_plan(pos, jj, length, v, v_size, v_offset);
378
+ result.second = pos.p();
379
+ return result;
380
+ }
381
+
382
+ /*
383
+ * Insert elements into a single row. Returns an iterator to the end of the insertion range.
384
+ */
385
+ row_stored_nd_iterator insert(row_stored_nd_iterator position, size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
386
+ size_t tmp_v_offset = v_offset;
387
+ int nd_change = single_row_insertion_plan(position, jj, length, v, v_size, tmp_v_offset);
388
+
389
+ // First record the position, just in case our iterator becomes invalid.
390
+ size_t pp = position.p();
391
+
392
+ // Resize the array as necessary, or move entries after the insertion point to make room.
393
+ size_t sz = y.size();
394
+ if (sz + nd_change > y.capacity() || sz + nd_change <= y.capacity() / nm::yale_storage::GROWTH_CONSTANT)
395
+ y.update_resize_move(position, real_i(), nd_change);
396
+ else if (nd_change != 0) {
397
+ if (nd_change < 0) y.move_left(position, -nd_change);
398
+ else if (nd_change > 0) y.move_right(position, nd_change);
399
+ y.update_real_row_sizes_from(real_i(), nd_change);
400
+ }
401
+
402
+ for (size_t jc = jj; jc < jj + length; ++jc, ++v_offset) {
403
+ if (v_offset >= v_size) v_offset %= v_size; // reset v position.
404
+
405
+ if (jc + y.offset(1) == real_i()) {
406
+ y.a(real_i()) = v[v_offset]; // modify diagonal
407
+ } else if (v[v_offset] != y.const_default_obj()) {
408
+ y.ija(pp) = jc; // modify non-diagonal
409
+ y.a(pp) = v[v_offset];
410
+ ++pp;
411
+ }
412
+ }
413
+
414
+ // Update this row.
415
+ adjust_length(nd_change);
416
+
417
+ return row_stored_nd_iterator(*this, pp);
418
+ }
419
+
420
+ /*
421
+ * For when we don't need to worry about the offset, does the same thing as the insert above.
422
+ */
423
+ row_stored_nd_iterator insert(const row_stored_nd_iterator& position, size_t jj, size_t length, D const* v, size_t v_size) {
424
+ size_t v_offset = 0;
425
+ return insert(position, jj, length, v, v_size, v_offset);
426
+ }
427
+
428
+
429
+ /*
430
+ * Merges elements offered for insertion with existing elements in the row.
431
+ */
432
+ row_stored_nd_iterator insert(size_t jj, size_t length, D const* v, size_t v_size, size_t& v_offset) {
433
+ return insert(ndfind(jj), jj, length, v, v_size, v_offset);
434
+ }
435
+
436
+ /*
437
+ * Merges elements offered for insertion with existing elements in the row.
438
+ */
439
+ row_stored_nd_iterator insert(size_t jj, size_t length, D const* v, size_t v_size) {
440
+ size_t v_offset = 0;
441
+ return insert(ndfind(jj), jj, length, v, v_size, v_offset);
442
+ }
443
+
444
+
445
+ };
446
+
447
+ } } // end of nm::yale_storage namespace
448
+
449
+ #endif // YALE_ITERATORS_ROW_H