pnmatrix 1.2.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (111) hide show
  1. checksums.yaml +7 -0
  2. data/ext/nmatrix/binary_format.txt +53 -0
  3. data/ext/nmatrix/data/complex.h +388 -0
  4. data/ext/nmatrix/data/data.cpp +274 -0
  5. data/ext/nmatrix/data/data.h +651 -0
  6. data/ext/nmatrix/data/meta.h +64 -0
  7. data/ext/nmatrix/data/ruby_object.h +386 -0
  8. data/ext/nmatrix/extconf.rb +70 -0
  9. data/ext/nmatrix/math/asum.h +99 -0
  10. data/ext/nmatrix/math/cblas_enums.h +36 -0
  11. data/ext/nmatrix/math/cblas_templates_core.h +507 -0
  12. data/ext/nmatrix/math/gemm.h +241 -0
  13. data/ext/nmatrix/math/gemv.h +178 -0
  14. data/ext/nmatrix/math/getrf.h +255 -0
  15. data/ext/nmatrix/math/getrs.h +121 -0
  16. data/ext/nmatrix/math/imax.h +82 -0
  17. data/ext/nmatrix/math/laswp.h +165 -0
  18. data/ext/nmatrix/math/long_dtype.h +62 -0
  19. data/ext/nmatrix/math/magnitude.h +54 -0
  20. data/ext/nmatrix/math/math.h +751 -0
  21. data/ext/nmatrix/math/nrm2.h +165 -0
  22. data/ext/nmatrix/math/rot.h +117 -0
  23. data/ext/nmatrix/math/rotg.h +106 -0
  24. data/ext/nmatrix/math/scal.h +71 -0
  25. data/ext/nmatrix/math/trsm.h +336 -0
  26. data/ext/nmatrix/math/util.h +162 -0
  27. data/ext/nmatrix/math.cpp +1368 -0
  28. data/ext/nmatrix/nm_memory.h +60 -0
  29. data/ext/nmatrix/nmatrix.cpp +285 -0
  30. data/ext/nmatrix/nmatrix.h +476 -0
  31. data/ext/nmatrix/ruby_constants.cpp +151 -0
  32. data/ext/nmatrix/ruby_constants.h +106 -0
  33. data/ext/nmatrix/ruby_nmatrix.c +3130 -0
  34. data/ext/nmatrix/storage/common.cpp +77 -0
  35. data/ext/nmatrix/storage/common.h +183 -0
  36. data/ext/nmatrix/storage/dense/dense.cpp +1096 -0
  37. data/ext/nmatrix/storage/dense/dense.h +129 -0
  38. data/ext/nmatrix/storage/list/list.cpp +1628 -0
  39. data/ext/nmatrix/storage/list/list.h +138 -0
  40. data/ext/nmatrix/storage/storage.cpp +730 -0
  41. data/ext/nmatrix/storage/storage.h +99 -0
  42. data/ext/nmatrix/storage/yale/class.h +1139 -0
  43. data/ext/nmatrix/storage/yale/iterators/base.h +143 -0
  44. data/ext/nmatrix/storage/yale/iterators/iterator.h +131 -0
  45. data/ext/nmatrix/storage/yale/iterators/row.h +450 -0
  46. data/ext/nmatrix/storage/yale/iterators/row_stored.h +140 -0
  47. data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +169 -0
  48. data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +124 -0
  49. data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
  50. data/ext/nmatrix/storage/yale/yale.cpp +2074 -0
  51. data/ext/nmatrix/storage/yale/yale.h +203 -0
  52. data/ext/nmatrix/types.h +55 -0
  53. data/ext/nmatrix/util/io.cpp +279 -0
  54. data/ext/nmatrix/util/io.h +115 -0
  55. data/ext/nmatrix/util/sl_list.cpp +627 -0
  56. data/ext/nmatrix/util/sl_list.h +144 -0
  57. data/ext/nmatrix/util/util.h +78 -0
  58. data/lib/nmatrix/blas.rb +378 -0
  59. data/lib/nmatrix/cruby/math.rb +744 -0
  60. data/lib/nmatrix/enumerate.rb +253 -0
  61. data/lib/nmatrix/homogeneous.rb +241 -0
  62. data/lib/nmatrix/io/fortran_format.rb +138 -0
  63. data/lib/nmatrix/io/harwell_boeing.rb +221 -0
  64. data/lib/nmatrix/io/market.rb +263 -0
  65. data/lib/nmatrix/io/point_cloud.rb +189 -0
  66. data/lib/nmatrix/jruby/decomposition.rb +24 -0
  67. data/lib/nmatrix/jruby/enumerable.rb +13 -0
  68. data/lib/nmatrix/jruby/error.rb +4 -0
  69. data/lib/nmatrix/jruby/math.rb +501 -0
  70. data/lib/nmatrix/jruby/nmatrix_java.rb +840 -0
  71. data/lib/nmatrix/jruby/operators.rb +283 -0
  72. data/lib/nmatrix/jruby/slice.rb +264 -0
  73. data/lib/nmatrix/lapack_core.rb +181 -0
  74. data/lib/nmatrix/lapack_plugin.rb +44 -0
  75. data/lib/nmatrix/math.rb +953 -0
  76. data/lib/nmatrix/mkmf.rb +100 -0
  77. data/lib/nmatrix/monkeys.rb +137 -0
  78. data/lib/nmatrix/nmatrix.rb +1172 -0
  79. data/lib/nmatrix/rspec.rb +75 -0
  80. data/lib/nmatrix/shortcuts.rb +1163 -0
  81. data/lib/nmatrix/version.rb +39 -0
  82. data/lib/nmatrix/yale_functions.rb +118 -0
  83. data/lib/nmatrix.rb +28 -0
  84. data/spec/00_nmatrix_spec.rb +892 -0
  85. data/spec/01_enum_spec.rb +196 -0
  86. data/spec/02_slice_spec.rb +407 -0
  87. data/spec/03_nmatrix_monkeys_spec.rb +80 -0
  88. data/spec/2x2_dense_double.mat +0 -0
  89. data/spec/4x4_sparse.mat +0 -0
  90. data/spec/4x5_dense.mat +0 -0
  91. data/spec/blas_spec.rb +215 -0
  92. data/spec/elementwise_spec.rb +311 -0
  93. data/spec/homogeneous_spec.rb +100 -0
  94. data/spec/io/fortran_format_spec.rb +88 -0
  95. data/spec/io/harwell_boeing_spec.rb +98 -0
  96. data/spec/io/test.rua +9 -0
  97. data/spec/io_spec.rb +159 -0
  98. data/spec/lapack_core_spec.rb +482 -0
  99. data/spec/leakcheck.rb +16 -0
  100. data/spec/math_spec.rb +1363 -0
  101. data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
  102. data/spec/nmatrix_yale_spec.rb +286 -0
  103. data/spec/rspec_monkeys.rb +56 -0
  104. data/spec/rspec_spec.rb +35 -0
  105. data/spec/shortcuts_spec.rb +474 -0
  106. data/spec/slice_set_spec.rb +162 -0
  107. data/spec/spec_helper.rb +172 -0
  108. data/spec/stat_spec.rb +214 -0
  109. data/spec/test.pcd +20 -0
  110. data/spec/utm5940.mtx +83844 -0
  111. metadata +295 -0
@@ -0,0 +1,1139 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == class.h
25
+ //
26
+ // Object-oriented interface for Yale.
27
+ //
28
+
29
+ #ifndef YALE_CLASS_H
30
+ # define YALE_CLASS_H
31
+
32
+ #include "../dense/dense.h"
33
+ #include "math/transpose.h"
34
+ #include "yale.h"
35
+
36
+ namespace nm {
37
+
38
+
39
+ /*
40
+ * This class is basically an intermediary for YALE_STORAGE objects which enables us to treat it like a C++ object. It
41
+ * keeps the src pointer as its s, along with other relevant slice information.
42
+ *
43
+ * It's useful for creating iterators and such. It isn't responsible for allocating or freeing its YALE_STORAGE* pointers.
44
+ */
45
+
46
+ template <typename D>
47
+ class YaleStorage {
48
+ public:
49
+ YaleStorage(const YALE_STORAGE* storage)
50
+ : s(reinterpret_cast<YALE_STORAGE*>(storage->src)),
51
+ slice(storage != storage->src),
52
+ slice_shape(storage->shape),
53
+ slice_offset(storage->offset)
54
+ {
55
+ nm_yale_storage_register(storage->src);
56
+ }
57
+
58
+ YaleStorage(const STORAGE* storage)
59
+ : s(reinterpret_cast<YALE_STORAGE*>(storage->src)),
60
+ slice(storage != storage->src),
61
+ slice_shape(storage->shape),
62
+ slice_offset(storage->offset)
63
+ {
64
+ nm_yale_storage_register(reinterpret_cast<STORAGE*>(storage->src));
65
+ }
66
+
67
+ ~YaleStorage() {
68
+ nm_yale_storage_unregister(s);
69
+ }
70
+
71
+ /* Allows us to do YaleStorage<uint8>::dtype() to get an nm::dtype_t */
72
+ static nm::dtype_t dtype() {
73
+ return nm::ctype_to_dtype_enum<D>::value_type;
74
+ }
75
+
76
+
77
+ bool is_ref() const { return slice; }
78
+
79
+ inline D* default_obj_ptr() { return &(a(s->shape[0])); }
80
+ inline D& default_obj() { return a(s->shape[0]); }
81
+ inline const D& default_obj() const { return a(s->shape[0]); }
82
+ inline const D& const_default_obj() const { return a(s->shape[0]); }
83
+
84
+
85
+ /*
86
+ * Return a Ruby VALUE representation of default_obj()
87
+ */
88
+ VALUE const_default_value() const {
89
+ return nm::yale_storage::nm_rb_dereference(a(s->shape[0]));
90
+ }
91
+
92
+ inline size_t* ija_p() const { return reinterpret_cast<size_t*>(s->ija); }
93
+ inline const size_t& ija(size_t p) const { return ija_p()[p]; }
94
+ inline size_t& ija(size_t p) { return ija_p()[p]; }
95
+ inline D* a_p() const { return reinterpret_cast<D*>(s->a); }
96
+ inline const D& a(size_t p) const { return a_p()[p]; }
97
+ inline D& a(size_t p) { return a_p()[p]; }
98
+
99
+ bool real_row_empty(size_t i) const { return ija(i+1) - ija(i) == 0 ? true : false; }
100
+
101
+ inline size_t* shape_p() const { return slice_shape; }
102
+ inline size_t shape(uint8_t d) const { return slice_shape[d]; }
103
+ inline size_t* real_shape_p() const { return s->shape; }
104
+ inline size_t real_shape(uint8_t d) const { return s->shape[d]; }
105
+ inline size_t* offset_p() const { return slice_offset; }
106
+ inline size_t offset(uint8_t d) const { return slice_offset[d]; }
107
+ inline size_t capacity() const { return s->capacity; }
108
+ inline size_t size() const { return ija(real_shape(0)); }
109
+
110
+
111
+ /*
112
+ * Returns true if the value at apos is the default value.
113
+ * Mainly used for determining if the diagonal contains zeros.
114
+ */
115
+ bool is_pos_default_value(size_t apos) const {
116
+ return (a(apos) == const_default_obj());
117
+ }
118
+
119
+ /*
120
+ * Given a size-2 array of size_t, representing the shape, determine
121
+ * the maximum size of YaleStorage arrays.
122
+ */
123
+ static size_t max_size(const size_t* shape) {
124
+ size_t result = shape[0] * shape[1] + 1;
125
+ if (shape[0] > shape[1])
126
+ result += shape[0] - shape[1];
127
+ return result;
128
+ }
129
+
130
+ /*
131
+ * Minimum size of Yale Storage arrays given some shape.
132
+ */
133
+ static size_t min_size(const size_t* shape) {
134
+ return shape[0]*2 + 1;
135
+ }
136
+
137
+ /*
138
+ * This is the guaranteed maximum size of the IJA/A arrays of the matrix given its shape.
139
+ */
140
+ inline size_t real_max_size() const {
141
+ return YaleStorage<D>::max_size(real_shape_p());
142
+ }
143
+
144
+ // Binary search between left and right in IJA for column ID real_j. Returns left if not found.
145
+ size_t real_find_pos(size_t left, size_t right, size_t real_j, bool& found) const {
146
+ if (left > right) {
147
+ found = false;
148
+ return left;
149
+ }
150
+
151
+ size_t mid = (left + right) / 2;
152
+ size_t mid_j = ija(mid);
153
+
154
+ if (mid_j == real_j) {
155
+ found = true;
156
+ return mid;
157
+ } else if (mid_j > real_j) return real_find_pos(left, mid - 1, real_j, found);
158
+ else return real_find_pos(mid + 1, right, real_j, found);
159
+ }
160
+
161
+ // Binary search between left and right in IJA for column ID real_j. Essentially finds where the slice should begin,
162
+ // with no guarantee that there's anything in there.
163
+ size_t real_find_left_boundary_pos(size_t left, size_t right, size_t real_j) const {
164
+ if (left > right) return right;
165
+ if (ija(left) >= real_j) return left;
166
+
167
+ size_t mid = (left + right) / 2;
168
+ size_t mid_j = ija(mid);
169
+
170
+ if (mid_j == real_j) return mid;
171
+ else if (mid_j > real_j) return real_find_left_boundary_pos(left, mid, real_j);
172
+ else return real_find_left_boundary_pos(mid + 1, right, real_j);
173
+ }
174
+
175
+ // Binary search between left and right in IJA for column ID real_j. Essentially finds where the slice should begin,
176
+ // with no guarantee that there's anything in there.
177
+ size_t real_find_right_boundary_pos(size_t left, size_t right, size_t real_j) const {
178
+ if (left > right) return right;
179
+ if (ija(right) <= real_j) return right;
180
+
181
+ size_t mid = (left + right) / 2;
182
+ size_t mid_j = ija(mid);
183
+
184
+ if (mid_j == real_j) return mid;
185
+ else if (mid_j > real_j) return real_find_right_boundary_pos(left, mid, real_j);
186
+ else return real_find_right_boundary_pos(mid + 1, right, real_j);
187
+ }
188
+
189
+
190
+ // Binary search for coordinates i,j in the slice. If not found, return -1.
191
+ std::pair<size_t,bool> find_pos(const std::pair<size_t,size_t>& ij) const {
192
+ size_t left = ija(ij.first + offset(0));
193
+ size_t right = ija(ij.first + offset(0) + 1) - 1;
194
+
195
+ std::pair<size_t, bool> result;
196
+ result.first = real_find_pos(left, right, ij.second + offset(1), result.second);
197
+ return result;
198
+ }
199
+
200
+ // Binary search for coordinates i,j in the slice, and return the first position >= j in row i.
201
+ size_t find_pos_for_insertion(size_t i, size_t j) const {
202
+ size_t left = ija(i + offset(0));
203
+ size_t right = ija(i + offset(0) + 1) - 1;
204
+
205
+ // Check that the right search point is valid. rflbp will check to make sure the left is valid relative to left.
206
+ if (right > ija(real_shape(0))) {
207
+ right = ija(real_shape(0))-1;
208
+ }
209
+ size_t result = real_find_left_boundary_pos(left, right, j + offset(1));
210
+ return result;
211
+ }
212
+
213
+ typedef yale_storage::basic_iterator_T<D,D,YaleStorage<D> > basic_iterator;
214
+ typedef yale_storage::basic_iterator_T<D,const D,const YaleStorage<D> > const_basic_iterator;
215
+
216
+ typedef yale_storage::stored_diagonal_iterator_T<D,D,YaleStorage<D> > stored_diagonal_iterator;
217
+ typedef yale_storage::stored_diagonal_iterator_T<D,const D,const YaleStorage<D> > const_stored_diagonal_iterator;
218
+
219
+ typedef yale_storage::iterator_T<D,D,YaleStorage<D> > iterator;
220
+ typedef yale_storage::iterator_T<D,const D,const YaleStorage<D> > const_iterator;
221
+
222
+
223
+ friend class yale_storage::row_iterator_T<D,D,YaleStorage<D> >;
224
+ typedef yale_storage::row_iterator_T<D,D,YaleStorage<D> > row_iterator;
225
+ typedef yale_storage::row_iterator_T<D,const D,const YaleStorage<D> > const_row_iterator;
226
+
227
+ typedef yale_storage::row_stored_iterator_T<D,D,YaleStorage<D>,row_iterator> row_stored_iterator;
228
+ typedef yale_storage::row_stored_nd_iterator_T<D,D,YaleStorage<D>,row_iterator> row_stored_nd_iterator;
229
+ typedef yale_storage::row_stored_iterator_T<D,const D,const YaleStorage<D>,const_row_iterator> const_row_stored_iterator;
230
+ typedef yale_storage::row_stored_nd_iterator_T<D,const D,const YaleStorage<D>,const_row_iterator> const_row_stored_nd_iterator;
231
+ typedef std::pair<row_iterator,row_stored_nd_iterator> row_nd_iter_pair;
232
+
233
+ // Variety of iterator begin and end functions.
234
+ iterator begin(size_t row = 0) { return iterator(*this, row); }
235
+ iterator row_end(size_t row) { return begin(row+1); }
236
+ iterator end() { return iterator(*this, shape(0)); }
237
+ const_iterator cbegin(size_t row = 0) const { return const_iterator(*this, row); }
238
+ const_iterator crow_end(size_t row) const { return cbegin(row+1); }
239
+ const_iterator cend() const { return const_iterator(*this, shape(0)); }
240
+
241
+ stored_diagonal_iterator sdbegin(size_t d = 0) { return stored_diagonal_iterator(*this, d); }
242
+ stored_diagonal_iterator sdend() {
243
+ return stored_diagonal_iterator(*this, std::min( shape(0) + offset(0), shape(1) + offset(1) ) - std::max(offset(0), offset(1)) );
244
+ }
245
+ const_stored_diagonal_iterator csdbegin(size_t d = 0) const { return const_stored_diagonal_iterator(*this, d); }
246
+ const_stored_diagonal_iterator csdend() const {
247
+ return const_stored_diagonal_iterator(*this, std::min( shape(0) + offset(0), shape(1) + offset(1) ) - std::max(offset(0), offset(1)) );
248
+ }
249
+ row_iterator ribegin(size_t row = 0) { return row_iterator(*this, row); }
250
+ row_iterator riend() { return row_iterator(*this, shape(0)); }
251
+ const_row_iterator cribegin(size_t row = 0) const { return const_row_iterator(*this, row); }
252
+ const_row_iterator criend() const { return const_row_iterator(*this, shape(0)); }
253
+
254
+
255
+ /*
256
+ * Get a count of the ndnz in the slice as if it were its own matrix.
257
+ */
258
+ size_t count_copy_ndnz() const {
259
+ if (!slice) return s->ndnz; // easy way -- not a slice.
260
+ size_t count = 0;
261
+
262
+ // Visit all stored entries.
263
+ for (const_row_iterator it = cribegin(); it != criend(); ++it){
264
+ for (auto jt = it.begin(); jt != it.end(); ++jt) {
265
+ if (it.i() != jt.j() && *jt != const_default_obj()) ++count;
266
+ }
267
+ }
268
+
269
+ return count;
270
+ }
271
+
272
+ /*
273
+ * Returns the iterator for i,j or snd_end() if not found.
274
+ */
275
+ /* stored_nondiagonal_iterator find(const std::pair<size_t,size_t>& ij) {
276
+ std::pair<size_t,bool> find_pos_result = find_pos(ij);
277
+ if (!find_pos_result.second) return sndend();
278
+ else return stored_nondiagonal_iterator(*this, ij.first, find_pos_result.first);
279
+ } */
280
+
281
+ /*
282
+ * Returns a stored_nondiagonal_iterator pointing to the location where some coords i,j should go, or returns their
283
+ * location if present.
284
+ */
285
+ /*std::pair<row_iterator, row_stored_nd_iterator> lower_bound(const std::pair<size_t,size_t>& ij) {
286
+ row_iterator it = ribegin(ij.first);
287
+ row_stored_nd_iterator jt = it.lower_bound(ij.second);
288
+ return std::make_pair(it,jt);
289
+ } */
290
+
291
+ class multi_row_insertion_plan {
292
+ public:
293
+ std::vector<size_t> pos;
294
+ std::vector<int> change;
295
+ int total_change; // the net change occurring
296
+ size_t num_changes; // the total number of rows that need to change size
297
+ multi_row_insertion_plan(size_t rows_in_slice) : pos(rows_in_slice), change(rows_in_slice), total_change(0), num_changes(0) { }
298
+
299
+ void add(size_t i, const std::pair<int,size_t>& change_and_pos) {
300
+ pos[i] = change_and_pos.second;
301
+ change[i] = change_and_pos.first;
302
+ total_change += change_and_pos.first;
303
+ if (change_and_pos.first != 0) num_changes++;
304
+ }
305
+ };
306
+
307
+
308
+ /*
309
+ * Find all the information we need in order to modify multiple rows.
310
+ */
311
+ multi_row_insertion_plan insertion_plan(row_iterator i, size_t j, size_t* lengths, D* const v, size_t v_size) const {
312
+ multi_row_insertion_plan p(lengths[0]);
313
+
314
+ // v_offset is our offset in the array v. If the user wants to change two elements in each of three rows,
315
+ // but passes an array of size 3, we need to know that the second insertion plan must start at position
316
+ // 2 instead of 0; and then the third must start at 1.
317
+ size_t v_offset = 0;
318
+ for (size_t m = 0; m < lengths[0]; ++m, ++i) {
319
+ p.add(m, i.single_row_insertion_plan(j, lengths[1], v, v_size, v_offset));
320
+ }
321
+
322
+ return p;
323
+ }
324
+
325
+
326
+
327
+ /*
328
+ * Insert entries in multiple rows. Slice-setting.
329
+ */
330
+ void insert(row_iterator i, size_t j, size_t* lengths, D* const v, size_t v_size) {
331
+ // Expensive pre-processing step: find all the information we need in order to do insertions.
332
+ multi_row_insertion_plan p = insertion_plan(i, j, lengths, v, v_size);
333
+
334
+ // There are more efficient ways to do this, but this is the low hanging fruit version of the algorithm.
335
+ // Here's the full problem: http://stackoverflow.com/questions/18753375/algorithm-for-merging-short-lists-into-a-long-vector
336
+ // --JW
337
+
338
+ bool resize = false;
339
+ size_t sz = size();
340
+ if (p.num_changes > 1) resize = true; // TODO: There are surely better ways to do this, but I've gone for the low-hanging fruit
341
+ else if (sz + p.total_change > capacity() || sz + p.total_change <= capacity() / nm::yale_storage::GROWTH_CONSTANT) resize = true;
342
+
343
+ if (resize) {
344
+ update_resize_move_insert(i.i() + offset(0), j + offset(1), lengths, v, v_size, p);
345
+ } else {
346
+
347
+ // Make the necessary modifications, which hopefully can be done in-place.
348
+ size_t v_offset = 0;
349
+ //int accum = 0;
350
+ for (size_t ii = 0; ii < lengths[0]; ++ii, ++i) {
351
+ i.insert(row_stored_nd_iterator(i, p.pos[ii]), j, lengths[1], v, v_size, v_offset);
352
+ }
353
+ }
354
+ }
355
+
356
+
357
+ /*
358
+ * Most Ruby-centric insert function. Accepts coordinate information in slice,
359
+ * and value information of various types in +right+. This function must evaluate
360
+ * +right+ and determine what other functions to call in order to properly handle
361
+ * it.
362
+ */
363
+ void insert(SLICE* slice, VALUE right) {
364
+
365
+ NM_CONSERVATIVE(nm_register_value(&right));
366
+
367
+ std::pair<NMATRIX*,bool> nm_and_free =
368
+ interpret_arg_as_dense_nmatrix(right, dtype());
369
+ // Map the data onto D* v
370
+
371
+ D* v;
372
+ size_t v_size = 1;
373
+
374
+ if (nm_and_free.first) {
375
+ DENSE_STORAGE* s = reinterpret_cast<DENSE_STORAGE*>(nm_and_free.first->storage);
376
+ v = reinterpret_cast<D*>(s->elements);
377
+ v_size = nm_storage_count_max_elements(s);
378
+
379
+ } else if (RB_TYPE_P(right, T_ARRAY)) {
380
+ v_size = RARRAY_LEN(right);
381
+ v = NM_ALLOC_N(D, v_size);
382
+ if (dtype() == nm::RUBYOBJ) {
383
+ nm_register_values(reinterpret_cast<VALUE*>(v), v_size);
384
+ }
385
+ for (size_t m = 0; m < v_size; ++m) {
386
+ rubyval_to_cval(rb_ary_entry(right, m), s->dtype, &(v[m]));
387
+ }
388
+ if (dtype() == nm::RUBYOBJ) {
389
+ nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);
390
+ }
391
+
392
+ } else {
393
+ v = reinterpret_cast<D*>(rubyobj_to_cval(right, dtype()));
394
+ }
395
+
396
+ row_iterator i = ribegin(slice->coords[0]);
397
+
398
+ if (slice->single || (slice->lengths[0] == 1 && slice->lengths[1] == 1)) { // single entry
399
+ i.insert(slice->coords[1], *v);
400
+ } else if (slice->lengths[0] == 1) { // single row, multiple entries
401
+ i.insert(slice->coords[1], slice->lengths[1], v, v_size);
402
+ } else { // multiple rows, unknown number of entries
403
+ insert(i, slice->coords[1], slice->lengths, v, v_size);
404
+ }
405
+
406
+ // Only free v if it was allocated in this function.
407
+ if (nm_and_free.first) {
408
+ if (nm_and_free.second) {
409
+ nm_delete(nm_and_free.first);
410
+ }
411
+ } else NM_FREE(v);
412
+
413
+ NM_CONSERVATIVE(nm_unregister_value(&right));
414
+ }
415
+
416
+
417
+ /*
418
+ * Remove an entry from an already found non-diagonal position.
419
+ */
420
+ row_iterator erase(row_iterator it, const row_stored_nd_iterator& position) {
421
+ it.erase(position);
422
+ return it;
423
+ }
424
+
425
+
426
+ /*
427
+ * Remove an entry from the matrix at the already-located position. If diagonal, just sets to default; otherwise,
428
+ * actually removes the entry.
429
+ */
430
+ row_iterator erase(row_iterator it, const row_stored_iterator& jt) {
431
+ it.erase((const row_stored_nd_iterator&)jt);
432
+ return it;
433
+ }
434
+
435
+
436
+ row_iterator insert(row_iterator it, row_stored_iterator position, size_t j, const D& val) {
437
+ it.insert(position, j, val);
438
+ return it;
439
+ }
440
+
441
+
442
+ /*
443
+ * Insert an element in column j, using position's p() as the location to insert the new column. i and j will be the
444
+ * coordinates. This also does a replace if column j is already present.
445
+ *
446
+ * Returns true if a new entry was added and false if an entry was replaced.
447
+ *
448
+ * Pre-conditions:
449
+ * - position.p() must be between ija(real_i) and ija(real_i+1), inclusive, where real_i = i + offset(0)
450
+ * - real_i and real_j must not be equal
451
+ */
452
+ row_iterator insert(row_iterator it, row_stored_nd_iterator position, size_t j, const D& val) {
453
+ it.insert(position, j, val);
454
+ return it;
455
+ }
456
+
457
+
458
+ /*
459
+ * Insert n elements v in columns j, using position as a guide. i gives the starting row. If at any time a value in j
460
+ * decreases,
461
+ */
462
+ /*bool insert(stored_iterator position, size_t n, size_t i, size_t* j, DType* v) {
463
+
464
+ } */
465
+
466
+ /*
467
+ * A pseudo-insert operation, since the diagonal portion of the A array is constant size.
468
+ */
469
+ stored_diagonal_iterator insert(stored_diagonal_iterator position, const D& val) {
470
+ *position = val;
471
+ return position;
472
+ }
473
+
474
+
475
+ /* iterator insert(iterator position, size_t j, const D& val) {
476
+ if (position.real_i() == position.real_j()) {
477
+ s->a(position.real_i()) = val;
478
+ return position;
479
+ } else {
480
+ row_iterator it = ribegin(position.i());
481
+ row_stored_nd_iterator position = it.ndbegin(j);
482
+ return insert(it, position, j, val);
483
+ }
484
+ }*/
485
+
486
+
487
+
488
+
489
+ /*
490
+ * Returns a pointer to the location of some entry in the matrix.
491
+ *
492
+ * This is needed for backwards compatibility. We don't really want anyone
493
+ * to modify the contents of that pointer, because it might be the ZERO location.
494
+ *
495
+ * TODO: Change all storage_get functions to return a VALUE once we've put list and
496
+ * dense in OO mode. ???
497
+ */
498
+ inline D* get_single_p(SLICE* slice) {
499
+ size_t real_i = offset(0) + slice->coords[0],
500
+ real_j = offset(1) + slice->coords[1];
501
+
502
+ if (real_i == real_j)
503
+ return &(a(real_i));
504
+
505
+ if (ija(real_i) == ija(real_i+1))
506
+ return default_obj_ptr(); // zero pointer
507
+
508
+ // binary search for a column's location
509
+ std::pair<size_t,bool> p = find_pos(std::make_pair(slice->coords[0], slice->coords[1]));
510
+ if (p.second)
511
+ return &(a(p.first));
512
+ // not found: return default
513
+ return default_obj_ptr(); // zero pointer
514
+ }
515
+
516
+
517
+ /*
518
+ * Allocate a reference pointing to s. Note that even if +this+ is a reference,
519
+ * we can create a reference within it.
520
+ *
521
+ * Note: Make sure you NM_FREE() the result of this call. You can't just cast it
522
+ * directly into a YaleStorage<D> class.
523
+ */
524
+ YALE_STORAGE* alloc_ref(SLICE* slice) {
525
+ YALE_STORAGE* ns = NM_ALLOC( YALE_STORAGE );
526
+
527
+ ns->dim = s->dim;
528
+ ns->offset = NM_ALLOC_N(size_t, ns->dim);
529
+ ns->shape = NM_ALLOC_N(size_t, ns->dim);
530
+
531
+ for (size_t d = 0; d < ns->dim; ++d) {
532
+ ns->offset[d] = slice->coords[d] + offset(d);
533
+ ns->shape[d] = slice->lengths[d];
534
+ }
535
+
536
+ ns->dtype = s->dtype;
537
+ ns->a = a_p();
538
+ ns->ija = ija_p();
539
+
540
+ ns->src = s;
541
+ s->count++;
542
+
543
+ ns->ndnz = 0;
544
+ ns->capacity = 0;
545
+
546
+ return ns;
547
+ }
548
+
549
+
550
+ /*
551
+ * Allocates and initializes the basic struct (but not IJA or A vectors).
552
+ */
553
+ static YALE_STORAGE* alloc(size_t* shape, size_t dim = 2) {
554
+ YALE_STORAGE* s = NM_ALLOC( YALE_STORAGE );
555
+
556
+ s->ndnz = 0;
557
+ s->dtype = dtype();
558
+ s->shape = shape;
559
+ s->offset = NM_ALLOC_N(size_t, dim);
560
+ for (size_t d = 0; d < dim; ++d)
561
+ s->offset[d] = 0;
562
+ s->dim = dim;
563
+ s->src = reinterpret_cast<STORAGE*>(s);
564
+ s->count = 1;
565
+
566
+ return s;
567
+ }
568
+
569
+
570
+ /*
571
+ * Create basic storage of same dtype as YaleStorage<D>. Allocates it,
572
+ * reserves necessary space, but doesn't fill structure at all.
573
+ */
574
+ static YALE_STORAGE* create(size_t* shape, size_t reserve) {
575
+
576
+ YALE_STORAGE* s = alloc( shape, 2 );
577
+ size_t max_sz = YaleStorage<D>::max_size(shape),
578
+ min_sz = YaleStorage<D>::min_size(shape);
579
+
580
+ if (reserve < min_sz) {
581
+ s->capacity = min_sz;
582
+ } else if (reserve > max_sz) {
583
+ s->capacity = max_sz;
584
+ } else {
585
+ s->capacity = reserve;
586
+ }
587
+
588
+ s->ija = NM_ALLOC_N( size_t, s->capacity );
589
+ s->a = NM_ALLOC_N( D, s->capacity );
590
+
591
+ return s;
592
+ }
593
+
594
+
595
+ /*
596
+ * Clear out the D portion of the A vector (clearing the diagonal and setting
597
+ * the zero value).
598
+ */
599
+ static void clear_diagonal_and_zero(YALE_STORAGE& s, D* init_val = NULL) {
600
+ D* a = reinterpret_cast<D*>(s.a);
601
+
602
+ // Clear out the diagonal + one extra entry
603
+ if (init_val) {
604
+ for (size_t i = 0; i <= s.shape[0]; ++i)
605
+ a[i] = *init_val;
606
+ } else {
607
+ for (size_t i = 0; i <= s.shape[0]; ++i)
608
+ a[i] = 0;
609
+ }
610
+ }
611
+
612
+
613
+ /*
614
+ * Empty the matrix by initializing the IJA vector and setting the diagonal to 0.
615
+ *
616
+ * Called when most YALE_STORAGE objects are created.
617
+ *
618
+ * Can't go inside of class YaleStorage because YaleStorage creation requires that
619
+ * IJA already be initialized.
620
+ */
621
+ static void init(YALE_STORAGE& s, D* init_val) {
622
+ size_t IA_INIT = s.shape[0] + 1;
623
+ for (size_t m = 0; m < IA_INIT; ++m) {
624
+ s.ija[m] = IA_INIT;
625
+ }
626
+
627
+ clear_diagonal_and_zero(s, init_val);
628
+ }
629
+
630
+
631
+ /*
632
+ * Make a very basic allocation. No structure or copy or anything. It'll be shaped like this
633
+ * matrix.
634
+ *
635
+ * TODO: Combine this with ::create()'s ::alloc(). These are redundant.
636
+ */
637
+ template <typename E>
638
+ YALE_STORAGE* alloc_basic_copy(size_t new_capacity, size_t new_ndnz) const {
639
+ nm::dtype_t new_dtype = nm::ctype_to_dtype_enum<E>::value_type;
640
+ YALE_STORAGE* lhs = NM_ALLOC( YALE_STORAGE );
641
+ lhs->dim = s->dim;
642
+ lhs->shape = NM_ALLOC_N( size_t, lhs->dim );
643
+
644
+ lhs->shape[0] = shape(0);
645
+ lhs->shape[1] = shape(1);
646
+
647
+ lhs->offset = NM_ALLOC_N( size_t, lhs->dim );
648
+
649
+ lhs->offset[0] = 0;
650
+ lhs->offset[1] = 0;
651
+
652
+ lhs->capacity = new_capacity;
653
+ lhs->dtype = new_dtype;
654
+ lhs->ndnz = new_ndnz;
655
+ lhs->ija = NM_ALLOC_N( size_t, new_capacity );
656
+ lhs->a = NM_ALLOC_N( E, new_capacity );
657
+ lhs->src = lhs;
658
+ lhs->count = 1;
659
+
660
+ return lhs;
661
+ }
662
+
663
+
664
+ /*
665
+ * Make a full matrix structure copy (entries remain uninitialized). Remember to NM_FREE()!
666
+ */
667
+ template <typename E>
668
+ YALE_STORAGE* alloc_struct_copy(size_t new_capacity) const {
669
+ YALE_STORAGE* lhs = alloc_basic_copy<E>(new_capacity, count_copy_ndnz());
670
+ // Now copy the IJA contents
671
+ if (slice) {
672
+ rb_raise(rb_eNotImpError, "cannot copy struct due to different offsets");
673
+ } else {
674
+ for (size_t m = 0; m < size(); ++m) {
675
+ lhs->ija[m] = ija(m); // copy indices
676
+ }
677
+ }
678
+ return lhs;
679
+ }
680
+
681
+
682
+ /*
683
+ * Copy this slice (or the full matrix if it isn't a slice) into a new matrix which is already allocated, ns.
684
+ */
685
+ template <typename E, bool Yield=false>
686
+ void copy(YALE_STORAGE& ns) const {
687
+ //nm::dtype_t new_dtype = nm::ctype_to_dtype_enum<E>::value_type;
688
+ // get the default value for initialization (we'll re-use val for other copies after this)
689
+ E val = static_cast<E>(const_default_obj());
690
+
691
+ // initialize the matrix structure and clear the diagonal so we don't have to
692
+ // keep track of unwritten entries.
693
+ YaleStorage<E>::init(ns, &val);
694
+
695
+ E* ns_a = reinterpret_cast<E*>(ns.a);
696
+ size_t sz = shape(0) + 1; // current used size of ns
697
+ nm_yale_storage_register(&ns);
698
+
699
+ // FIXME: If diagonals line up, it's probably faster to do this with stored diagonal and stored non-diagonal iterators
700
+ for (const_row_iterator it = cribegin(); it != criend(); ++it) {
701
+ for (auto jt = it.begin(); !jt.end(); ++jt) {
702
+ if (it.i() == jt.j()) {
703
+ if (Yield) ns_a[it.i()] = rb_yield(~jt);
704
+ else ns_a[it.i()] = static_cast<E>(*jt);
705
+ } else if (*jt != const_default_obj()) {
706
+ if (Yield) ns_a[sz] = rb_yield(~jt);
707
+ else ns_a[sz] = static_cast<E>(*jt);
708
+ ns.ija[sz] = jt.j();
709
+ ++sz;
710
+ }
711
+ }
712
+ ns.ija[it.i()+1] = sz;
713
+ }
714
+ nm_yale_storage_unregister(&ns);
715
+
716
+ //ns.ija[shape(0)] = sz; // indicate end of last row
717
+ ns.ndnz = sz - shape(0) - 1; // update ndnz count
718
+ }
719
+
720
+
721
+ /*
722
+ * Allocate a casted copy of this matrix/reference. Remember to NM_FREE() the result!
723
+ *
724
+ * If Yield is true, E must be nm::RubyObject, and it will call an rb_yield upon the stored value.
725
+ */
726
+ template <typename E, bool Yield = false>
727
+ YALE_STORAGE* alloc_copy() const {
728
+ //nm::dtype_t new_dtype = nm::ctype_to_dtype_enum<E>::value_type;
729
+
730
+ YALE_STORAGE* lhs;
731
+ if (slice) {
732
+ size_t* xshape = NM_ALLOC_N(size_t, 2);
733
+ xshape[0] = shape(0);
734
+ xshape[1] = shape(1);
735
+ size_t ndnz = count_copy_ndnz();
736
+ size_t reserve = shape(0) + ndnz + 1;
737
+
738
+ // std::cerr << "reserve = " << reserve << std::endl;
739
+
740
+ lhs = YaleStorage<E>::create(xshape, reserve);
741
+
742
+ // FIXME: This should probably be a throw which gets caught outside of the object.
743
+ if (lhs->capacity < reserve)
744
+ rb_raise(nm_eStorageTypeError, "conversion failed; capacity of %lu requested, max allowable is %lu", reserve, lhs->capacity);
745
+
746
+ // Fill lhs with what's in our current matrix.
747
+ copy<E, Yield>(*lhs);
748
+ } else {
749
+ // Copy the structure and setup the IJA structure.
750
+ lhs = alloc_struct_copy<E>(s->capacity);
751
+
752
+ E* la = reinterpret_cast<E*>(lhs->a);
753
+
754
+ nm_yale_storage_register(lhs);
755
+ for (size_t m = 0; m < size(); ++m) {
756
+ if (Yield) {
757
+ la[m] = rb_yield(nm::yale_storage::nm_rb_dereference(a(m)));
758
+ }
759
+ else la[m] = static_cast<E>(a(m));
760
+ }
761
+ nm_yale_storage_unregister(lhs);
762
+
763
+ }
764
+
765
+ return lhs;
766
+ }
767
+
768
+ /*
769
+ * Allocate a transposed copy of the matrix
770
+ */
771
+ /*
772
+ * Allocate a casted copy of this matrix/reference. Remember to NM_FREE() the result!
773
+ *
774
+ * If Yield is true, E must be nm::RubyObject, and it will call an rb_yield upon the stored value.
775
+ */
776
+ template <typename E, bool Yield = false>
777
+ YALE_STORAGE* alloc_copy_transposed() const {
778
+
779
+ if (slice) {
780
+ rb_raise(rb_eNotImpError, "please make a copy before transposing");
781
+ } else {
782
+ // Copy the structure and setup the IJA structure.
783
+ size_t* xshape = NM_ALLOC_N(size_t, 2);
784
+ xshape[0] = shape(1);
785
+ xshape[1] = shape(0);
786
+
787
+ // Take a stab at the number of non-diagonal stored entries we'll have.
788
+ size_t reserve = size() - xshape[1] + xshape[0];
789
+ YALE_STORAGE* lhs = YaleStorage<E>::create(xshape, reserve);
790
+ E r_init = static_cast<E>(const_default_obj());
791
+ YaleStorage<E>::init(*lhs, &r_init);
792
+
793
+ nm::yale_storage::transpose_yale<D,E,true,true>(shape(0), shape(1), ija_p(), ija_p(), a_p(), const_default_obj(),
794
+ lhs->ija, lhs->ija, reinterpret_cast<E*>(lhs->a), r_init);
795
+ return lhs;
796
+ }
797
+
798
+ return NULL;
799
+ }
800
+
801
+
802
+ /*
803
+ * Comparison between two matrices. Does not check size and such -- assumption is that they are the same shape.
804
+ */
805
+ template <typename E>
806
+ bool operator==(const YaleStorage<E>& rhs) const {
807
+ for (size_t i = 0; i < shape(0); ++i) {
808
+ typename YaleStorage<D>::const_row_iterator li = cribegin(i);
809
+ typename YaleStorage<E>::const_row_iterator ri = rhs.cribegin(i);
810
+
811
+ size_t j = 0; // keep track of j so we can compare different defaults
812
+
813
+ auto lj = li.begin();
814
+ auto rj = ri.begin();
815
+ while (!lj.end() || !rj.end()) {
816
+ if (lj < rj) {
817
+ if (*lj != rhs.const_default_obj()) return false;
818
+ ++lj;
819
+ } else if (rj < lj) {
820
+ if (const_default_obj() != *rj) return false;
821
+ ++rj;
822
+ } else { // rj == lj
823
+ if (*lj != *rj) return false;
824
+ ++lj;
825
+ ++rj;
826
+ }
827
+ ++j;
828
+ }
829
+
830
+ // if we skip an entry (because it's an ndnz in BOTH matrices), we need to compare defaults.
831
+ // (We know we skipped if lj and rj hit end before j does.)
832
+ if (j < shape(1) && const_default_obj() != rhs.const_default_obj()) return false;
833
+
834
+ ++li;
835
+ ++ri;
836
+ }
837
+
838
+ return true;
839
+ }
840
+
841
+ /*
842
+ * Necessary for element-wise operations. The return dtype will be nm::RUBYOBJ.
843
+ */
844
+ template <typename E>
845
+ VALUE map_merged_stored(VALUE klass, nm::YaleStorage<E>& t, VALUE r_init) const {
846
+ nm_register_value(&r_init);
847
+ VALUE s_init = const_default_value(),
848
+ t_init = t.const_default_value();
849
+ nm_register_value(&s_init);
850
+ nm_register_value(&t_init);
851
+
852
+ // Make a reasonable approximation of the resulting capacity
853
+ size_t s_ndnz = count_copy_ndnz(),
854
+ t_ndnz = t.count_copy_ndnz();
855
+ size_t reserve = shape(0) + std::max(s_ndnz, t_ndnz) + 1;
856
+
857
+ size_t* xshape = NM_ALLOC_N(size_t, 2);
858
+ xshape[0] = shape(0);
859
+ xshape[1] = shape(1);
860
+
861
+ YALE_STORAGE* rs= YaleStorage<nm::RubyObject>::create(xshape, reserve);
862
+
863
+ if (r_init == Qnil) {
864
+ nm_unregister_value(&r_init);
865
+ r_init = rb_yield_values(2, s_init, t_init);
866
+ nm_register_value(&r_init);
867
+ }
868
+
869
+ nm::RubyObject r_init_obj(r_init);
870
+
871
+ // Prepare the matrix structure
872
+ YaleStorage<nm::RubyObject>::init(*rs, &r_init_obj);
873
+ NMATRIX* m = nm_create(nm::YALE_STORE, reinterpret_cast<STORAGE*>(rs));
874
+ nm_register_nmatrix(m);
875
+ VALUE result = Data_Wrap_Struct(klass, nm_mark, nm_delete, m);
876
+ nm_unregister_nmatrix(m);
877
+ nm_register_value(&result);
878
+ nm_unregister_value(&r_init);
879
+
880
+ RETURN_SIZED_ENUMERATOR_PRE
881
+ nm_unregister_value(&result);
882
+ nm_unregister_value(&t_init);
883
+ nm_unregister_value(&s_init);
884
+ // No obvious, efficient way to pass a length function as the fourth argument here:
885
+ RETURN_SIZED_ENUMERATOR(result, 0, 0, 0);
886
+
887
+ // Create an object for us to iterate over.
888
+ YaleStorage<nm::RubyObject> r(rs);
889
+
890
+ // Walk down our new matrix, inserting values as we go.
891
+ for (size_t i = 0; i < xshape[0]; ++i) {
892
+ YaleStorage<nm::RubyObject>::row_iterator ri = r.ribegin(i);
893
+ typename YaleStorage<D>::const_row_iterator si = cribegin(i);
894
+ typename YaleStorage<E>::const_row_iterator ti = t.cribegin(i);
895
+
896
+ auto sj = si.begin();
897
+ auto tj = ti.begin();
898
+ auto rj = ri.ndbegin();
899
+
900
+ while (sj != si.end() || tj != ti.end()) {
901
+ VALUE v;
902
+ size_t j;
903
+
904
+ if (sj < tj) {
905
+ v = rb_yield_values(2, ~sj, t_init);
906
+ j = sj.j();
907
+ ++sj;
908
+ } else if (tj < sj) {
909
+ v = rb_yield_values(2, s_init, ~tj);
910
+ j = tj.j();
911
+ ++tj;
912
+ } else {
913
+ v = rb_yield_values(2, ~sj, ~tj);
914
+ j = sj.j();
915
+ ++sj;
916
+ ++tj;
917
+ }
918
+
919
+ // FIXME: This can be sped up by inserting all at the same time
920
+ // since it's a new matrix. But that function isn't quite ready
921
+ // yet.
922
+ if (j == i) r.a(i) = v;
923
+ else rj = ri.insert(rj, j, v);
924
+ //RB_P(rb_funcall(result, rb_intern("yale_ija"), 0));
925
+ }
926
+ }
927
+ nm_unregister_value(&result);
928
+ nm_unregister_value(&t_init);
929
+ nm_unregister_value(&s_init);
930
+
931
+ return result;
932
+ }
933
+
934
+ protected:
935
+ /*
936
+ * Update row sizes starting with row i
937
+ */
938
+ void update_real_row_sizes_from(size_t real_i, int change) {
939
+ ++real_i;
940
+ for (; real_i <= real_shape(0); ++real_i) {
941
+ ija(real_i) += change;
942
+ }
943
+ }
944
+
945
+
946
+ /*
947
+ * Like move_right, but also involving a resize. This updates row sizes as well. This version also takes a plan for
948
+ * multiple rows, and tries to do them all in one copy. It's used for multi-row slice-setting.
949
+ *
950
+ * This also differs from update_resize_move in that it resizes to the exact requested size instead of reserving space.
951
+ */
952
+ void update_resize_move_insert(size_t real_i, size_t real_j, size_t* lengths, D* const v, size_t v_size, multi_row_insertion_plan p) {
953
+ size_t sz = size(); // current size of the storage vectors
954
+ size_t new_cap = sz + p.total_change;
955
+
956
+ if (new_cap > real_max_size()) {
957
+ NM_FREE(v);
958
+ rb_raise(rb_eStandardError, "resize caused by insertion of size %d (on top of current size %lu) would have caused yale matrix size to exceed its maximum (%lu)", p.total_change, sz, real_max_size());
959
+ }
960
+
961
+ if (s->dtype == nm::RUBYOBJ) {
962
+ nm_register_values(reinterpret_cast<VALUE*>(v), v_size);
963
+ }
964
+
965
+ size_t* new_ija = NM_ALLOC_N( size_t,new_cap );
966
+ D* new_a = NM_ALLOC_N( D, new_cap );
967
+
968
+ // Copy unchanged row pointers first.
969
+ size_t m = 0;
970
+ for (; m <= real_i; ++m) {
971
+ new_ija[m] = ija(m);
972
+ new_a[m] = a(m);
973
+ }
974
+
975
+ // Now copy unchanged locations in IJA and A.
976
+ size_t q = real_shape(0)+1; // q is the copy-to position.
977
+ size_t r = real_shape(0)+1; // r is the copy-from position.
978
+ for (; r < p.pos[0]; ++r, ++q) {
979
+ new_ija[q] = ija(r);
980
+ new_a[q] = a(r);
981
+ }
982
+
983
+ // For each pos and change in the slice, copy the information prior to the insertion point. Then insert the necessary
984
+ // information.
985
+ size_t v_offset = 0;
986
+ int accum = 0; // keep track of the total change as we go so we can update row information.
987
+ for (size_t i = 0; i < lengths[0]; ++i, ++m) {
988
+ for (; r < p.pos[i]; ++r, ++q) {
989
+ new_ija[q] = ija(r);
990
+ new_a[q] = a(r);
991
+ }
992
+
993
+ // Insert slice data for a single row.
994
+ for (size_t j = 0; j < lengths[1]; ++j, ++v_offset) {
995
+ if (v_offset >= v_size) v_offset %= v_size;
996
+
997
+ if (j + real_j == i + real_i) { // modify diagonal
998
+ new_a[real_i + i] = v[v_offset];
999
+ } else if (v[v_offset] != const_default_obj()) {
1000
+ new_ija[q] = j + real_j;
1001
+ new_a[q] = v[v_offset];
1002
+ ++q; // move on to next q location
1003
+ }
1004
+
1005
+ if (r < ija(real_shape(0)) && ija(r) == j + real_j) ++r; // move r forward if the column matches.
1006
+ }
1007
+
1008
+ // Update the row pointer for the current row.
1009
+ accum += p.change[i];
1010
+ new_ija[m] = ija(m) + accum;
1011
+ new_a[m] = a(m); // copy diagonal for this row
1012
+ }
1013
+
1014
+ // Now copy everything subsequent to the last insertion point.
1015
+ for (; r < size(); ++r, ++q) {
1016
+ new_ija[q] = ija(r);
1017
+ new_a[q] = a(r);
1018
+ }
1019
+
1020
+ // Update the remaining row pointers and copy remaining diagonals
1021
+ for (; m <= real_shape(0); ++m) {
1022
+ new_ija[m] = ija(m) + accum;
1023
+ new_a[m] = a(m);
1024
+ }
1025
+
1026
+ s->capacity = new_cap;
1027
+
1028
+ NM_FREE(s->ija);
1029
+ NM_FREE(s->a);
1030
+
1031
+ if (s->dtype == nm::RUBYOBJ) {
1032
+ nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);
1033
+ }
1034
+
1035
+ s->ija = new_ija;
1036
+ s->a = reinterpret_cast<void*>(new_a);
1037
+ }
1038
+
1039
+
1040
+
1041
+
1042
+ /*
1043
+ * Like move_right, but also involving a resize. This updates row sizes as well.
1044
+ */
1045
+ void update_resize_move(row_stored_nd_iterator position, size_t real_i, int n) {
1046
+ size_t sz = size(); // current size of the storage vectors
1047
+ size_t new_cap = n > 0 ? capacity() * nm::yale_storage::GROWTH_CONSTANT
1048
+ : capacity() / nm::yale_storage::GROWTH_CONSTANT;
1049
+ size_t max_cap = real_max_size();
1050
+
1051
+ if (new_cap > max_cap) {
1052
+ new_cap = max_cap;
1053
+ if (sz + n > max_cap)
1054
+ rb_raise(rb_eStandardError, "resize caused by insertion/deletion of size %d (on top of current size %lu) would have caused yale matrix size to exceed its maximum (%lu)", n, sz, real_max_size());
1055
+ }
1056
+
1057
+ if (new_cap < sz + n) new_cap = sz + n;
1058
+
1059
+ size_t* new_ija = NM_ALLOC_N( size_t,new_cap );
1060
+ D* new_a = NM_ALLOC_N( D, new_cap );
1061
+
1062
+ // Copy unchanged row pointers first.
1063
+ for (size_t m = 0; m <= real_i; ++m) {
1064
+ new_ija[m] = ija(m);
1065
+ new_a[m] = a(m);
1066
+ }
1067
+
1068
+ // Now update row pointers following the changed row as we copy the additional values.
1069
+ for (size_t m = real_i + 1; m <= real_shape(0); ++m) {
1070
+ new_ija[m] = ija(m) + n;
1071
+ new_a[m] = a(m);
1072
+ }
1073
+
1074
+ // Copy all remaining prior to insertion/removal site
1075
+ for (size_t m = real_shape(0) + 1; m < position.p(); ++m) {
1076
+ new_ija[m] = ija(m);
1077
+ new_a[m] = a(m);
1078
+ }
1079
+
1080
+ // Copy all subsequent to insertion/removal site
1081
+ size_t m = position.p();
1082
+ if (n < 0) m -= n;
1083
+
1084
+ for (; m < sz; ++m) {
1085
+ new_ija[m+n] = ija(m);
1086
+ new_a[m+n] = a(m);
1087
+ }
1088
+
1089
+ if (s->dtype == nm::RUBYOBJ) {
1090
+ nm_yale_storage_register_a(new_a, new_cap);
1091
+ }
1092
+
1093
+ s->capacity = new_cap;
1094
+
1095
+ NM_FREE(s->ija);
1096
+ NM_FREE(s->a);
1097
+
1098
+ if (s->dtype == nm::RUBYOBJ) {
1099
+ nm_yale_storage_unregister_a(new_a, new_cap);
1100
+ }
1101
+
1102
+ s->ija = new_ija;
1103
+ s->a = reinterpret_cast<void*>(new_a);
1104
+ }
1105
+
1106
+
1107
+ /*
1108
+ * Move elements in the IJA and A arrays by n (to the right).
1109
+ * Does not update row sizes.
1110
+ */
1111
+ void move_right(row_stored_nd_iterator position, size_t n) {
1112
+ size_t sz = size();
1113
+ for (size_t m = 0; m < sz - position.p(); ++m) {
1114
+ ija(sz+n-1-m) = ija(sz-1-m);
1115
+ a(sz+n-1-m) = a(sz-1-m);
1116
+ }
1117
+ }
1118
+
1119
+ /*
1120
+ * Move elements in the IJA and A arrays by n (to the left). Here position gives
1121
+ * the location to move to, and they should come from n to the right.
1122
+ */
1123
+ void move_left(row_stored_nd_iterator position, size_t n) {
1124
+ size_t sz = size();
1125
+ for (size_t m = position.p() + n; m < sz; ++m) { // work backwards
1126
+ ija(m-n) = ija(m);
1127
+ a(m-n) = a(m);
1128
+ }
1129
+ }
1130
+
1131
+ YALE_STORAGE* s;
1132
+ bool slice;
1133
+ size_t* slice_shape;
1134
+ size_t* slice_offset;
1135
+ };
1136
+
1137
+ } // end of nm namespace
1138
+
1139
+ #endif // YALE_CLASS_H