pnmatrix 1.2.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (111) hide show
  1. checksums.yaml +7 -0
  2. data/ext/nmatrix/binary_format.txt +53 -0
  3. data/ext/nmatrix/data/complex.h +388 -0
  4. data/ext/nmatrix/data/data.cpp +274 -0
  5. data/ext/nmatrix/data/data.h +651 -0
  6. data/ext/nmatrix/data/meta.h +64 -0
  7. data/ext/nmatrix/data/ruby_object.h +386 -0
  8. data/ext/nmatrix/extconf.rb +70 -0
  9. data/ext/nmatrix/math/asum.h +99 -0
  10. data/ext/nmatrix/math/cblas_enums.h +36 -0
  11. data/ext/nmatrix/math/cblas_templates_core.h +507 -0
  12. data/ext/nmatrix/math/gemm.h +241 -0
  13. data/ext/nmatrix/math/gemv.h +178 -0
  14. data/ext/nmatrix/math/getrf.h +255 -0
  15. data/ext/nmatrix/math/getrs.h +121 -0
  16. data/ext/nmatrix/math/imax.h +82 -0
  17. data/ext/nmatrix/math/laswp.h +165 -0
  18. data/ext/nmatrix/math/long_dtype.h +62 -0
  19. data/ext/nmatrix/math/magnitude.h +54 -0
  20. data/ext/nmatrix/math/math.h +751 -0
  21. data/ext/nmatrix/math/nrm2.h +165 -0
  22. data/ext/nmatrix/math/rot.h +117 -0
  23. data/ext/nmatrix/math/rotg.h +106 -0
  24. data/ext/nmatrix/math/scal.h +71 -0
  25. data/ext/nmatrix/math/trsm.h +336 -0
  26. data/ext/nmatrix/math/util.h +162 -0
  27. data/ext/nmatrix/math.cpp +1368 -0
  28. data/ext/nmatrix/nm_memory.h +60 -0
  29. data/ext/nmatrix/nmatrix.cpp +285 -0
  30. data/ext/nmatrix/nmatrix.h +476 -0
  31. data/ext/nmatrix/ruby_constants.cpp +151 -0
  32. data/ext/nmatrix/ruby_constants.h +106 -0
  33. data/ext/nmatrix/ruby_nmatrix.c +3130 -0
  34. data/ext/nmatrix/storage/common.cpp +77 -0
  35. data/ext/nmatrix/storage/common.h +183 -0
  36. data/ext/nmatrix/storage/dense/dense.cpp +1096 -0
  37. data/ext/nmatrix/storage/dense/dense.h +129 -0
  38. data/ext/nmatrix/storage/list/list.cpp +1628 -0
  39. data/ext/nmatrix/storage/list/list.h +138 -0
  40. data/ext/nmatrix/storage/storage.cpp +730 -0
  41. data/ext/nmatrix/storage/storage.h +99 -0
  42. data/ext/nmatrix/storage/yale/class.h +1139 -0
  43. data/ext/nmatrix/storage/yale/iterators/base.h +143 -0
  44. data/ext/nmatrix/storage/yale/iterators/iterator.h +131 -0
  45. data/ext/nmatrix/storage/yale/iterators/row.h +450 -0
  46. data/ext/nmatrix/storage/yale/iterators/row_stored.h +140 -0
  47. data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +169 -0
  48. data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +124 -0
  49. data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
  50. data/ext/nmatrix/storage/yale/yale.cpp +2074 -0
  51. data/ext/nmatrix/storage/yale/yale.h +203 -0
  52. data/ext/nmatrix/types.h +55 -0
  53. data/ext/nmatrix/util/io.cpp +279 -0
  54. data/ext/nmatrix/util/io.h +115 -0
  55. data/ext/nmatrix/util/sl_list.cpp +627 -0
  56. data/ext/nmatrix/util/sl_list.h +144 -0
  57. data/ext/nmatrix/util/util.h +78 -0
  58. data/lib/nmatrix/blas.rb +378 -0
  59. data/lib/nmatrix/cruby/math.rb +744 -0
  60. data/lib/nmatrix/enumerate.rb +253 -0
  61. data/lib/nmatrix/homogeneous.rb +241 -0
  62. data/lib/nmatrix/io/fortran_format.rb +138 -0
  63. data/lib/nmatrix/io/harwell_boeing.rb +221 -0
  64. data/lib/nmatrix/io/market.rb +263 -0
  65. data/lib/nmatrix/io/point_cloud.rb +189 -0
  66. data/lib/nmatrix/jruby/decomposition.rb +24 -0
  67. data/lib/nmatrix/jruby/enumerable.rb +13 -0
  68. data/lib/nmatrix/jruby/error.rb +4 -0
  69. data/lib/nmatrix/jruby/math.rb +501 -0
  70. data/lib/nmatrix/jruby/nmatrix_java.rb +840 -0
  71. data/lib/nmatrix/jruby/operators.rb +283 -0
  72. data/lib/nmatrix/jruby/slice.rb +264 -0
  73. data/lib/nmatrix/lapack_core.rb +181 -0
  74. data/lib/nmatrix/lapack_plugin.rb +44 -0
  75. data/lib/nmatrix/math.rb +953 -0
  76. data/lib/nmatrix/mkmf.rb +100 -0
  77. data/lib/nmatrix/monkeys.rb +137 -0
  78. data/lib/nmatrix/nmatrix.rb +1172 -0
  79. data/lib/nmatrix/rspec.rb +75 -0
  80. data/lib/nmatrix/shortcuts.rb +1163 -0
  81. data/lib/nmatrix/version.rb +39 -0
  82. data/lib/nmatrix/yale_functions.rb +118 -0
  83. data/lib/nmatrix.rb +28 -0
  84. data/spec/00_nmatrix_spec.rb +892 -0
  85. data/spec/01_enum_spec.rb +196 -0
  86. data/spec/02_slice_spec.rb +407 -0
  87. data/spec/03_nmatrix_monkeys_spec.rb +80 -0
  88. data/spec/2x2_dense_double.mat +0 -0
  89. data/spec/4x4_sparse.mat +0 -0
  90. data/spec/4x5_dense.mat +0 -0
  91. data/spec/blas_spec.rb +215 -0
  92. data/spec/elementwise_spec.rb +311 -0
  93. data/spec/homogeneous_spec.rb +100 -0
  94. data/spec/io/fortran_format_spec.rb +88 -0
  95. data/spec/io/harwell_boeing_spec.rb +98 -0
  96. data/spec/io/test.rua +9 -0
  97. data/spec/io_spec.rb +159 -0
  98. data/spec/lapack_core_spec.rb +482 -0
  99. data/spec/leakcheck.rb +16 -0
  100. data/spec/math_spec.rb +1363 -0
  101. data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
  102. data/spec/nmatrix_yale_spec.rb +286 -0
  103. data/spec/rspec_monkeys.rb +56 -0
  104. data/spec/rspec_spec.rb +35 -0
  105. data/spec/shortcuts_spec.rb +474 -0
  106. data/spec/slice_set_spec.rb +162 -0
  107. data/spec/spec_helper.rb +172 -0
  108. data/spec/stat_spec.rb +214 -0
  109. data/spec/test.pcd +20 -0
  110. data/spec/utm5940.mtx +83844 -0
  111. metadata +295 -0
@@ -0,0 +1,1628 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == list.c
25
+ //
26
+ // List-of-lists n-dimensional matrix storage. Uses singly-linked
27
+ // lists.
28
+ /*
29
+ * Standard Includes
30
+ */
31
+
32
+ #include <ruby.h>
33
+ #include <algorithm> // std::min
34
+ #include <iostream>
35
+ #include <vector>
36
+ #include <list>
37
+
38
+ /*
39
+ * Project Includes
40
+ */
41
+
42
+ #include "../../types.h"
43
+
44
+ #include "../../data/data.h"
45
+
46
+ #include "../dense/dense.h"
47
+ #include "../common.h"
48
+ #include "list.h"
49
+
50
+ #include "../../math/math.h"
51
+ #include "../../util/sl_list.h"
52
+
53
+ /*
54
+ * Macros
55
+ */
56
+
57
+ /*
58
+ * Global Variables
59
+ */
60
+
61
+
62
+ extern "C" {
63
+ static void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coords, size_t* lengths, size_t n);
64
+ static void __nm_list_storage_unregister_temp_value_list(std::list<VALUE*>& temp_vals);
65
+ static void __nm_list_storage_unregister_temp_list_list(std::list<LIST*>& temp_vals, size_t recursions);
66
+ }
67
+
68
+ namespace nm { namespace list_storage {
69
+
70
+ /*
71
+ * Forward Declarations
72
+ */
73
+
74
+ class RecurseData {
75
+ public:
76
+ // Note that providing init_obj argument does not override init.
77
+ RecurseData(const LIST_STORAGE* s, VALUE init_obj__ = Qnil) : ref(s), actual(s), shape_(s->shape), offsets(s->dim, 0), init_(s->default_val), init_obj_(init_obj__) {
78
+ while (actual->src != actual) {
79
+ for (size_t i = 0; i < s->dim; ++i) // update offsets as we recurse
80
+ offsets[i] += actual->offset[i];
81
+ actual = reinterpret_cast<LIST_STORAGE*>(actual->src);
82
+ }
83
+ nm_list_storage_register(actual);
84
+ nm_list_storage_register(ref);
85
+ actual_shape_ = actual->shape;
86
+
87
+ if (init_obj_ == Qnil) {
88
+ init_obj_ = s->dtype == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s->default_val) : nm::rubyobj_from_cval(s->default_val, s->dtype).rval;
89
+ }
90
+ nm_register_value(&init_obj_);
91
+ }
92
+
93
+ ~RecurseData() {
94
+ nm_unregister_value(&init_obj_);
95
+ nm_list_storage_unregister(ref);
96
+ nm_list_storage_unregister(actual);
97
+ }
98
+
99
+ dtype_t dtype() const { return ref->dtype; }
100
+
101
+
102
+ size_t dim() const { return ref->dim; }
103
+
104
+ size_t ref_shape(size_t rec) const {
105
+ return shape_[ref->dim - rec - 1];
106
+ }
107
+
108
+ size_t* copy_alloc_shape() const {
109
+ size_t* new_shape = NM_ALLOC_N(size_t, ref->dim);
110
+ memcpy(new_shape, shape_, sizeof(size_t)*ref->dim);
111
+ return new_shape;
112
+ }
113
+
114
+ size_t actual_shape(size_t rec) const {
115
+ return actual_shape_[actual->dim - rec - 1];
116
+ }
117
+
118
+ size_t offset(size_t rec) const {
119
+ return offsets[ref->dim - rec - 1];
120
+ }
121
+
122
+ void* init() const {
123
+ return init_;
124
+ }
125
+
126
+ VALUE init_obj() const { return init_obj_; }
127
+
128
+ LIST* top_level_list() const {
129
+ return reinterpret_cast<LIST*>(actual->rows);
130
+ }
131
+
132
+ const LIST_STORAGE* ref;
133
+ const LIST_STORAGE* actual;
134
+
135
+ size_t* shape_; // of ref
136
+ size_t* actual_shape_;
137
+ protected:
138
+ std::vector<size_t> offsets; // relative to actual
139
+ void* init_;
140
+ VALUE init_obj_;
141
+
142
+ };
143
+
144
+
145
+ template <typename LDType, typename RDType>
146
+ static LIST_STORAGE* cast_copy(const LIST_STORAGE* rhs, nm::dtype_t new_dtype);
147
+
148
+ template <typename LDType, typename RDType>
149
+ static bool eqeq_r(RecurseData& left, RecurseData& right, const LIST* l, const LIST* r, size_t rec);
150
+
151
+ template <typename SDType, typename TDType>
152
+ static bool eqeq_empty_r(RecurseData& s, const LIST* l, size_t rec, const TDType* t_init);
153
+
154
+ /*
155
+ * Recursive helper for map_merged_stored_r which handles the case where one list is empty and the other is not.
156
+ */
157
+ static void map_empty_stored_r(RecurseData& result, RecurseData& s, LIST* x, const LIST* l, size_t rec, bool rev, const VALUE& t_init) {
158
+ if (s.dtype() == nm::RUBYOBJ) {
159
+ nm_list_storage_register_list(l, rec);
160
+ }
161
+ if (result.dtype() == nm::RUBYOBJ) {
162
+ nm_list_storage_register_list(x, rec);
163
+ }
164
+
165
+ NODE *curr = l->first,
166
+ *xcurr = NULL;
167
+
168
+ // For reference matrices, make sure we start in the correct place.
169
+ size_t offset = s.offset(rec);
170
+ size_t x_shape = s.ref_shape(rec);
171
+
172
+ while (curr && curr->key < offset) { curr = curr->next; }
173
+ if (curr && curr->key - offset >= x_shape) curr = NULL;
174
+
175
+ if (rec) {
176
+ std::list<LIST*> temp_vals;
177
+ while (curr) {
178
+ LIST* val = nm::list::create();
179
+ map_empty_stored_r(result, s, val, reinterpret_cast<const LIST*>(curr->val), rec-1, rev, t_init);
180
+
181
+ if (!val->first) nm::list::del(val, 0);
182
+ else {
183
+ nm_list_storage_register_list(val, rec-1);
184
+ temp_vals.push_front(val);
185
+ nm::list::insert_helper(x, xcurr, curr->key - offset, val);
186
+ }
187
+ curr = curr->next;
188
+ if (curr && curr->key - offset >= x_shape) curr = NULL;
189
+ }
190
+ __nm_list_storage_unregister_temp_list_list(temp_vals, rec-1);
191
+ } else {
192
+ std::list<VALUE*> temp_vals;
193
+ while (curr) {
194
+ VALUE val, s_val;
195
+ if (s.dtype() == nm::RUBYOBJ) s_val = (*reinterpret_cast<nm::RubyObject*>(curr->val)).rval;
196
+ else s_val = nm::rubyobj_from_cval(curr->val, s.dtype()).rval;
197
+
198
+ if (rev) val = rb_yield_values(2, t_init, s_val);
199
+ else val = rb_yield_values(2, s_val, t_init);
200
+
201
+ nm_register_value(&val);
202
+
203
+ if (rb_funcall(val, rb_intern("!="), 1, result.init_obj()) == Qtrue) {
204
+ xcurr = nm::list::insert_helper(x, xcurr, curr->key - offset, val);
205
+ temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
206
+ nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));
207
+ }
208
+ nm_unregister_value(&val);
209
+
210
+ curr = curr->next;
211
+ if (curr && curr->key - offset >= x_shape) curr = NULL;
212
+ }
213
+ __nm_list_storage_unregister_temp_value_list(temp_vals);
214
+ }
215
+
216
+ if (s.dtype() == nm::RUBYOBJ){
217
+ nm_list_storage_unregister_list(l, rec);
218
+ }
219
+ if (result.dtype() == nm::RUBYOBJ) {
220
+ nm_list_storage_unregister_list(x, rec);
221
+ }
222
+
223
+ }
224
+
225
+
226
+ /*
227
+ * Recursive helper function for nm_list_map_stored
228
+ */
229
+ static void map_stored_r(RecurseData& result, RecurseData& left, LIST* x, const LIST* l, size_t rec) {
230
+ if (left.dtype() == nm::RUBYOBJ) {
231
+ nm_list_storage_register_list(l, rec);
232
+ }
233
+ if (result.dtype() == nm::RUBYOBJ) {
234
+ nm_list_storage_register_list(x, rec);
235
+ }
236
+ NODE *lcurr = l->first,
237
+ *xcurr = x->first;
238
+
239
+ // For reference matrices, make sure we start in the correct place.
240
+ while (lcurr && lcurr->key < left.offset(rec)) { lcurr = lcurr->next; }
241
+
242
+ if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
243
+
244
+ if (rec) {
245
+ std::list<LIST*> temp_vals;
246
+ while (lcurr) {
247
+ size_t key;
248
+ LIST* val = nm::list::create();
249
+ map_stored_r(result, left, val, reinterpret_cast<const LIST*>(lcurr->val), rec-1);
250
+ key = lcurr->key - left.offset(rec);
251
+ lcurr = lcurr->next;
252
+
253
+ if (!val->first) nm::list::del(val, 0); // empty list -- don't insert
254
+ else {
255
+ nm_list_storage_register_list(val, rec-1);
256
+ temp_vals.push_front(val);
257
+ xcurr = nm::list::insert_helper(x, xcurr, key, val);
258
+ }
259
+ if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
260
+ }
261
+ __nm_list_storage_unregister_temp_list_list(temp_vals, rec-1);
262
+ } else {
263
+ std::list<VALUE*> temp_vals;
264
+ while (lcurr) {
265
+ size_t key;
266
+ VALUE val;
267
+
268
+ val = rb_yield_values(1, left.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(lcurr->val) : nm::rubyobj_from_cval(lcurr->val, left.dtype()).rval);
269
+ key = lcurr->key - left.offset(rec);
270
+ lcurr = lcurr->next;
271
+
272
+ if (!rb_equal(val, result.init_obj())) {
273
+ xcurr = nm::list::insert_helper(x, xcurr, key, val);
274
+ temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
275
+ nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));
276
+ }
277
+
278
+ if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
279
+ }
280
+ __nm_list_storage_unregister_temp_value_list(temp_vals);
281
+ }
282
+
283
+ if (left.dtype() == nm::RUBYOBJ) {
284
+ nm_list_storage_unregister_list(l, rec);
285
+ }
286
+ if (result.dtype() == nm::RUBYOBJ) {
287
+ nm_list_storage_unregister_list(x, rec);
288
+ }
289
+ }
290
+
291
+
292
+
293
+ /*
294
+ * Recursive helper function for nm_list_map_merged_stored
295
+ */
296
+ static void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseData& right, LIST* x, const LIST* l, const LIST* r, size_t rec) {
297
+ if (left.dtype() == nm::RUBYOBJ) {
298
+ nm_list_storage_register_list(l, rec);
299
+ }
300
+ if (right.dtype() == nm::RUBYOBJ) {
301
+ nm_list_storage_register_list(r, rec);
302
+ }
303
+ if (result.dtype() == nm::RUBYOBJ) {
304
+ nm_list_storage_register_list(x, rec);
305
+ }
306
+
307
+
308
+ NODE *lcurr = l->first,
309
+ *rcurr = r->first,
310
+ *xcurr = x->first;
311
+
312
+ // For reference matrices, make sure we start in the correct place.
313
+ while (lcurr && lcurr->key < left.offset(rec)) { lcurr = lcurr->next; }
314
+ while (rcurr && rcurr->key < right.offset(rec)) { rcurr = rcurr->next; }
315
+
316
+ if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;
317
+ if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
318
+
319
+ if (rec) {
320
+ std::list<LIST*> temp_vals;
321
+ while (lcurr || rcurr) {
322
+ size_t key;
323
+ LIST* val = nm::list::create();
324
+
325
+ if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {
326
+ map_empty_stored_r(result, left, val, reinterpret_cast<const LIST*>(lcurr->val), rec-1, false, right.init_obj());
327
+ key = lcurr->key - left.offset(rec);
328
+ lcurr = lcurr->next;
329
+ } else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {
330
+ map_empty_stored_r(result, right, val, reinterpret_cast<const LIST*>(rcurr->val), rec-1, true, left.init_obj());
331
+ key = rcurr->key - right.offset(rec);
332
+ rcurr = rcurr->next;
333
+ } else { // == and both present
334
+ map_merged_stored_r(result, left, right, val, reinterpret_cast<const LIST*>(lcurr->val), reinterpret_cast<const LIST*>(rcurr->val), rec-1);
335
+ key = lcurr->key - left.offset(rec);
336
+ lcurr = lcurr->next;
337
+ rcurr = rcurr->next;
338
+ }
339
+
340
+
341
+ if (!val->first) nm::list::del(val, 0); // empty list -- don't insert
342
+ else {
343
+ nm_list_storage_register_list(val, rec-1);
344
+ temp_vals.push_front(val);
345
+ xcurr = nm::list::insert_helper(x, xcurr, key, val);
346
+ }
347
+ if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;
348
+ if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
349
+ }
350
+ __nm_list_storage_unregister_temp_list_list(temp_vals, rec-1);
351
+ } else {
352
+ std::list<VALUE*> temp_vals;
353
+ while (lcurr || rcurr) {
354
+ size_t key;
355
+ VALUE val;
356
+
357
+ if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {
358
+ val = rb_yield_values(2, nm::rubyobj_from_cval(lcurr->val, left.dtype()).rval, right.init_obj());
359
+ key = lcurr->key - left.offset(rec);
360
+ lcurr = lcurr->next;
361
+ } else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {
362
+ val = rb_yield_values(2, left.init_obj(), nm::rubyobj_from_cval(rcurr->val, right.dtype()).rval);
363
+ key = rcurr->key - right.offset(rec);
364
+ rcurr = rcurr->next;
365
+ } else { // == and both present
366
+ val = rb_yield_values(2, nm::rubyobj_from_cval(lcurr->val, left.dtype()).rval, nm::rubyobj_from_cval(rcurr->val, right.dtype()).rval);
367
+ key = lcurr->key - left.offset(rec);
368
+ lcurr = lcurr->next;
369
+ rcurr = rcurr->next;
370
+ }
371
+
372
+ nm_register_value(&val);
373
+
374
+ if (rb_funcall(val, rb_intern("!="), 1, result.init_obj()) == Qtrue) {
375
+ xcurr = nm::list::insert_helper(x, xcurr, key, val);
376
+ temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
377
+ nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));
378
+ }
379
+
380
+ nm_unregister_value(&val);
381
+
382
+ if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;
383
+ if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
384
+ }
385
+ __nm_list_storage_unregister_temp_value_list(temp_vals);
386
+ }
387
+
388
+ if (left.dtype() == nm::RUBYOBJ) {
389
+ nm_list_storage_unregister_list(l, rec);
390
+ }
391
+ if (right.dtype() == nm::RUBYOBJ) {
392
+ nm_list_storage_unregister_list(r, rec);
393
+ }
394
+ if (result.dtype() == nm::RUBYOBJ) {
395
+ nm_list_storage_unregister_list(x, rec);
396
+ }
397
+ }
398
+
399
+
400
+ /*
401
+ * Recursive function, sets multiple values in a matrix from multiple source values. Also handles removal; returns true
402
+ * if the recursion results in an empty list at that level (which signals that the current parent should be removed).
403
+ */
404
+ template <typename D>
405
+ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengths, size_t n, D* v, size_t v_size, size_t& v_offset) {
406
+ using nm::list::node_is_within_slice;
407
+ using nm::list::remove_by_node;
408
+ using nm::list::find_preceding_from_list;
409
+ using nm::list::insert_first_list;
410
+ using nm::list::insert_first_node;
411
+ using nm::list::insert_after;
412
+ size_t* offsets = dest->offset;
413
+
414
+ nm_list_storage_register(dest);
415
+ if (dest->dtype == nm::RUBYOBJ) {
416
+ nm_register_values(reinterpret_cast<VALUE*>(v), v_size);
417
+ nm_list_storage_register_list(l, dest->dim - n - 1);
418
+ }
419
+
420
+ // drill down into the structure
421
+ NODE* prev = find_preceding_from_list(l, coords[n] + offsets[n]);
422
+ NODE* node = NULL;
423
+ if (prev) node = prev->next && node_is_within_slice(prev->next, coords[n] + offsets[n], lengths[n]) ? prev->next : NULL;
424
+ else node = node_is_within_slice(l->first, coords[n] + offsets[n], lengths[n]) ? l->first : NULL;
425
+
426
+ if (dest->dim - n > 1) {
427
+ size_t i = 0;
428
+ size_t key = i + offsets[n] + coords[n];
429
+
430
+ // Make sure we have an element to work with
431
+ if (!node) {
432
+ if (!prev) {
433
+ node = insert_first_list(l, key, nm::list::create());
434
+ } else {
435
+ node = insert_after(prev, key, nm::list::create());
436
+ }
437
+ }
438
+
439
+ // At this point, it's guaranteed that there is a list here matching key.
440
+ std::list<LIST*> temp_lists;
441
+ while (node) {
442
+ // Recurse down into the list. If it returns true, it's empty, so we need to delete it.
443
+ bool remove_parent = slice_set(dest, reinterpret_cast<LIST*>(node->val), coords, lengths, n+1, v, v_size, v_offset);
444
+ if (dest->dtype == nm::RUBYOBJ) {
445
+ temp_lists.push_front(reinterpret_cast<LIST*>(node->val));
446
+ nm_list_storage_register_list(reinterpret_cast<LIST*>(node->val), dest->dim - n - 2);
447
+ }
448
+ if (remove_parent) {
449
+ NM_FREE(remove_by_node(l, prev, node));
450
+ if (prev) node = prev->next ? prev->next : NULL;
451
+ else node = l->first ? l->first : NULL;
452
+ } else { // move forward
453
+ prev = node;
454
+ node = node_is_within_slice(prev->next, key-i, lengths[n]) ? prev->next : NULL;
455
+ }
456
+
457
+ ++i; ++key;
458
+
459
+ if (i >= lengths[n]) break;
460
+
461
+ // Now do we need to insert another node here? Or is there already one?
462
+ if (!node) {
463
+ if (!prev) {
464
+ node = insert_first_list(l, key, nm::list::create());
465
+ } else {
466
+ node = insert_after(prev, key, nm::list::create());
467
+ }
468
+ }
469
+ }
470
+ __nm_list_storage_unregister_temp_list_list(temp_lists, dest->dim - n - 2);
471
+
472
+ } else {
473
+
474
+ size_t i = 0;
475
+ size_t key = i + offsets[n] + coords[n];
476
+ std::list<VALUE*> temp_vals;
477
+ while (i < lengths[n]) {
478
+ // Make sure we have an element to work with
479
+ if (v_offset >= v_size) v_offset %= v_size;
480
+
481
+ if (node) {
482
+ if (node->key == key) {
483
+ if (v[v_offset] == *reinterpret_cast<D*>(dest->default_val)) { // remove zero value
484
+
485
+ NM_FREE(remove_by_node(l, (prev ? prev : l->first), node));
486
+
487
+ if (prev) node = prev->next ? prev->next : NULL;
488
+ else node = l->first ? l->first : NULL;
489
+
490
+ } else { // edit directly
491
+ *reinterpret_cast<D*>(node->val) = v[v_offset];
492
+ prev = node;
493
+ node = node->next ? node->next : NULL;
494
+ }
495
+ } else if (node->key > key) {
496
+ D* nv = NM_ALLOC(D); *nv = v[v_offset++];
497
+ if (dest->dtype == nm::RUBYOBJ) {
498
+ nm_register_value(&*reinterpret_cast<VALUE*>(nv));
499
+ temp_vals.push_front(reinterpret_cast<VALUE*>(nv));
500
+ }
501
+
502
+ if (prev) node = insert_after(prev, key, nv);
503
+ else node = insert_first_node(l, key, nv, sizeof(D));
504
+
505
+ prev = node;
506
+ node = prev->next ? prev->next : NULL;
507
+ }
508
+ } else { // no node -- insert a new one
509
+ D* nv = NM_ALLOC(D); *nv = v[v_offset++];
510
+ if (dest->dtype == nm::RUBYOBJ) {
511
+ nm_register_value(&*reinterpret_cast<VALUE*>(nv));
512
+ temp_vals.push_front(reinterpret_cast<VALUE*>(nv));
513
+ }
514
+ if (prev) node = insert_after(prev, key, nv);
515
+ else node = insert_first_node(l, key, nv, sizeof(D));
516
+
517
+ prev = node;
518
+ node = prev->next ? prev->next : NULL;
519
+ }
520
+
521
+ ++i; ++key;
522
+ }
523
+ __nm_list_storage_unregister_temp_value_list(temp_vals);
524
+ }
525
+
526
+ if (dest->dtype == nm::RUBYOBJ) {
527
+ nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);
528
+ nm_list_storage_unregister_list(l, dest->dim - n - 1);
529
+ }
530
+ nm_list_storage_unregister(dest);
531
+
532
+ return (l->first) ? false : true;
533
+ }
534
+
535
+
536
+ template <typename D>
537
+ void set(VALUE left, SLICE* slice, VALUE right) {
538
+ NM_CONSERVATIVE(nm_register_value(&left));
539
+ NM_CONSERVATIVE(nm_register_value(&right));
540
+ LIST_STORAGE* s = NM_STORAGE_LIST(left);
541
+
542
+ std::pair<NMATRIX*,bool> nm_and_free =
543
+ interpret_arg_as_dense_nmatrix(right, NM_DTYPE(left));
544
+
545
+ // Map the data onto D* v.
546
+ D* v;
547
+ size_t v_size = 1;
548
+
549
+ if (nm_and_free.first) {
550
+ DENSE_STORAGE* t = reinterpret_cast<DENSE_STORAGE*>(nm_and_free.first->storage);
551
+ v = reinterpret_cast<D*>(t->elements);
552
+ v_size = nm_storage_count_max_elements(t);
553
+
554
+ } else if (RB_TYPE_P(right, T_ARRAY)) {
555
+ nm_register_nmatrix(nm_and_free.first);
556
+ v_size = RARRAY_LEN(right);
557
+ v = NM_ALLOC_N(D, v_size);
558
+ if (NM_DTYPE(left) == nm::RUBYOBJ)
559
+ nm_register_values(reinterpret_cast<VALUE*>(v), v_size);
560
+
561
+ for (size_t m = 0; m < v_size; ++m) {
562
+ rubyval_to_cval(rb_ary_entry(right, m), s->dtype, &(v[m]));
563
+ }
564
+ if (NM_DTYPE(left) == nm::RUBYOBJ)
565
+ nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);
566
+
567
+ } else {
568
+ nm_register_nmatrix(nm_and_free.first);
569
+ v = reinterpret_cast<D*>(rubyobj_to_cval(right, NM_DTYPE(left)));
570
+ }
571
+
572
+ if (v_size == 1 && *v == *reinterpret_cast<D*>(s->default_val)) {
573
+ if (*reinterpret_cast<D*>(nm_list_storage_get(s, slice)) != *reinterpret_cast<D*>(s->default_val)) {
574
+ nm::list::remove_recursive(s->rows, slice->coords, s->offset, slice->lengths, 0, s->dim);
575
+ }
576
+ } else if (slice->single) {
577
+ slice_set_single(s, s->rows, reinterpret_cast<void*>(v), slice->coords, slice->lengths, 0);
578
+ } else {
579
+ size_t v_offset = 0;
580
+ slice_set<D>(s, s->rows, slice->coords, slice->lengths, 0, v, v_size, v_offset);
581
+ }
582
+
583
+
584
+ // Only free v if it was allocated in this function.
585
+ if (nm_and_free.first) {
586
+ if (nm_and_free.second) {
587
+ nm_delete(nm_and_free.first);
588
+ }
589
+ } else {
590
+ NM_FREE(v);
591
+ nm_unregister_nmatrix(nm_and_free.first);
592
+ }
593
+ NM_CONSERVATIVE(nm_unregister_value(&left));
594
+ NM_CONSERVATIVE(nm_unregister_value(&right));
595
+ }
596
+
597
+ /*
598
+ * Used only to set a default initial value.
599
+ */
600
+ template <typename D>
601
+ void init_default(LIST_STORAGE* s) {
602
+ s->default_val = NM_ALLOC(D);
603
+ *reinterpret_cast<D*>(s->default_val) = 0;
604
+ }
605
+
606
+
607
+ }} // end of namespace list_storage
608
+
609
+ extern "C" {
610
+
611
+ /*
612
+ * Functions
613
+ */
614
+
615
+
616
+ ////////////////
617
+ // Lifecycle //
618
+ ///////////////
619
+
620
+
621
+ /*
622
+ * Creates a list-of-lists(-of-lists-of-lists-etc) storage framework for a
623
+ * matrix.
624
+ *
625
+ * Note: The pointers you pass in for shape and init_val become property of our
626
+ * new storage. You don't need to free them, and you shouldn't re-use them.
627
+ */
628
+ LIST_STORAGE* nm_list_storage_create(nm::dtype_t dtype, size_t* shape, size_t dim, void* init_val) {
629
+ LIST_STORAGE* s = NM_ALLOC( LIST_STORAGE );
630
+
631
+ s->dim = dim;
632
+ s->shape = shape;
633
+ s->dtype = dtype;
634
+
635
+ s->offset = NM_ALLOC_N(size_t, s->dim);
636
+ memset(s->offset, 0, s->dim * sizeof(size_t));
637
+
638
+ s->rows = nm::list::create();
639
+ if (init_val)
640
+ s->default_val = init_val;
641
+ else {
642
+ DTYPE_TEMPLATE_TABLE(nm::list_storage::init_default, void, LIST_STORAGE*)
643
+ ttable[dtype](s);
644
+ }
645
+ s->count = 1;
646
+ s->src = s;
647
+
648
+ return s;
649
+ }
650
+
651
+ /*
652
+ * Destructor for list storage.
653
+ */
654
+ void nm_list_storage_delete(STORAGE* s) {
655
+ if (s) {
656
+ LIST_STORAGE* storage = (LIST_STORAGE*)s;
657
+ if (storage->count-- == 1) {
658
+ nm::list::del( storage->rows, storage->dim - 1 );
659
+
660
+ NM_FREE(storage->shape);
661
+ NM_FREE(storage->offset);
662
+ NM_FREE(storage->default_val);
663
+ NM_FREE(s);
664
+ }
665
+ }
666
+ }
667
+
668
+ /*
669
+ * Destructor for a list storage reference slice.
670
+ */
671
+ void nm_list_storage_delete_ref(STORAGE* s) {
672
+ if (s) {
673
+ LIST_STORAGE* storage = (LIST_STORAGE*)s;
674
+
675
+ nm_list_storage_delete( reinterpret_cast<STORAGE*>(storage->src ) );
676
+ NM_FREE(storage->shape);
677
+ NM_FREE(storage->offset);
678
+ NM_FREE(s);
679
+ }
680
+ }
681
+
682
+ /*
683
+ * GC mark function for list storage.
684
+ */
685
+ void nm_list_storage_mark(STORAGE* storage_base) {
686
+ LIST_STORAGE* storage = (LIST_STORAGE*)storage_base;
687
+
688
+ if (storage && storage->dtype == nm::RUBYOBJ) {
689
+ rb_gc_mark(*((VALUE*)(storage->default_val)));
690
+ nm::list::mark(storage->rows, storage->dim - 1);
691
+ }
692
+ }
693
+
694
+ static void __nm_list_storage_unregister_temp_value_list(std::list<VALUE*>& temp_vals) {
695
+ for (std::list<VALUE*>::iterator it = temp_vals.begin(); it != temp_vals.end(); ++it) {
696
+ nm_unregister_value(&**it);
697
+ }
698
+ }
699
+
700
+ static void __nm_list_storage_unregister_temp_list_list(std::list<LIST*>& temp_vals, size_t recursions) {
701
+ for (std::list<LIST*>::iterator it = temp_vals.begin(); it != temp_vals.end(); ++it) {
702
+ nm_list_storage_unregister_list(*it, recursions);
703
+ }
704
+ }
705
+
706
+ void nm_list_storage_register_node(const NODE* curr) {
707
+ nm_register_value(&*reinterpret_cast<VALUE*>(curr->val));
708
+ }
709
+
710
+ void nm_list_storage_unregister_node(const NODE* curr) {
711
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(curr->val));
712
+ }
713
+
714
+ /**
715
+ * Gets rid of all instances of a given node in the registration list.
716
+ * Sometimes a node will get deleted and replaced deep in a recursion, but
717
+ * further up it will still get registered. This leads to a potential read
718
+ * after free during the GC marking. This function completely clears out a
719
+ * node so that this won't happen.
720
+ */
721
+ void nm_list_storage_completely_unregister_node(const NODE* curr) {
722
+ nm_completely_unregister_value(&*reinterpret_cast<VALUE*>(curr->val));
723
+ }
724
+
725
+ void nm_list_storage_register_list(const LIST* list, size_t recursions) {
726
+ NODE* next;
727
+ if (!list) return;
728
+ NODE* curr = list->first;
729
+
730
+ while (curr != NULL) {
731
+ next = curr->next;
732
+ if (recursions == 0) {
733
+ nm_list_storage_register_node(curr);
734
+ } else {
735
+ nm_list_storage_register_list(reinterpret_cast<LIST*>(curr->val), recursions - 1);
736
+ }
737
+ curr = next;
738
+ }
739
+ }
740
+
741
+ void nm_list_storage_unregister_list(const LIST* list, size_t recursions) {
742
+ NODE* next;
743
+ if (!list) return;
744
+ NODE* curr = list->first;
745
+
746
+ while (curr != NULL) {
747
+ next = curr->next;
748
+ if (recursions == 0) {
749
+ nm_list_storage_unregister_node(curr);
750
+ } else {
751
+ nm_list_storage_unregister_list(reinterpret_cast<LIST*>(curr->val), recursions - 1);
752
+ }
753
+ curr = next;
754
+ }
755
+ }
756
+
757
+ void nm_list_storage_register(const STORAGE* s) {
758
+ const LIST_STORAGE* storage = reinterpret_cast<const LIST_STORAGE*>(s);
759
+ if (storage && storage->dtype == nm::RUBYOBJ) {
760
+ nm_register_value(&*reinterpret_cast<VALUE*>(storage->default_val));
761
+ nm_list_storage_register_list(storage->rows, storage->dim - 1);
762
+ }
763
+ }
764
+
765
+ void nm_list_storage_unregister(const STORAGE* s) {
766
+ const LIST_STORAGE* storage = reinterpret_cast<const LIST_STORAGE*>(s);
767
+ if (storage && storage->dtype == nm::RUBYOBJ) {
768
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(storage->default_val));
769
+ nm_list_storage_unregister_list(storage->rows, storage->dim - 1);
770
+ }
771
+ }
772
+
773
+ ///////////////
774
+ // Accessors //
775
+ ///////////////
776
+
777
+ /*
778
+ * Documentation goes here.
779
+ */
780
+ static NODE* list_storage_get_single_node(LIST_STORAGE* s, SLICE* slice) {
781
+ LIST* l = s->rows;
782
+ NODE* n;
783
+
784
+ for (size_t r = 0; r < s->dim; r++) {
785
+ n = nm::list::find(l, s->offset[r] + slice->coords[r]);
786
+
787
+ if (n) l = reinterpret_cast<LIST*>(n->val);
788
+ else return NULL;
789
+ }
790
+
791
+ return n;
792
+ }
793
+
794
+
795
+ /*
796
+ * Recursive helper function for each_with_indices, based on nm_list_storage_count_elements_r.
797
+ * Handles empty/non-existent sublists.
798
+ */
799
+ static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t rec, VALUE& stack) {
800
+ VALUE empty = s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s.init()) : s.init_obj();
801
+ NM_CONSERVATIVE(nm_register_value(&stack));
802
+
803
+ if (rec) {
804
+ for (unsigned long index = 0; index < s.ref_shape(rec); ++index) {
805
+ // Don't do an unshift/shift here -- we'll let that be handled in the lowest-level iteration (recursions == 0)
806
+ rb_ary_push(stack, LONG2NUM(index));
807
+ each_empty_with_indices_r(s, rec-1, stack);
808
+ rb_ary_pop(stack);
809
+ }
810
+ } else {
811
+ rb_ary_unshift(stack, empty);
812
+ for (unsigned long index = 0; index < s.ref_shape(rec); ++index) {
813
+ rb_ary_push(stack, LONG2NUM(index));
814
+ rb_yield_splat(stack);
815
+ rb_ary_pop(stack);
816
+ }
817
+ rb_ary_shift(stack);
818
+ }
819
+ NM_CONSERVATIVE(nm_unregister_value(&stack));
820
+ }
821
+
822
+ /*
823
+ * Recursive helper function for each_with_indices, based on nm_list_storage_count_elements_r.
824
+ */
825
+ static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {
826
+ if (s.dtype() == nm::RUBYOBJ)
827
+ nm_list_storage_register_list(l, rec);
828
+ NM_CONSERVATIVE(nm_register_value(&stack));
829
+ NODE* curr = l->first;
830
+
831
+ size_t offset = s.offset(rec);
832
+ size_t shape = s.ref_shape(rec);
833
+
834
+ while (curr && curr->key < offset) curr = curr->next;
835
+ if (curr && curr->key - offset >= shape) curr = NULL;
836
+
837
+
838
+ if (rec) {
839
+ for (unsigned long index = 0; index < shape; ++index) { // index in reference
840
+ rb_ary_push(stack, LONG2NUM(index));
841
+ if (!curr || index < curr->key - offset) {
842
+ each_empty_with_indices_r(s, rec-1, stack);
843
+ } else { // index == curr->key - offset
844
+ each_with_indices_r(s, reinterpret_cast<const LIST*>(curr->val), rec-1, stack);
845
+ curr = curr->next;
846
+ }
847
+ rb_ary_pop(stack);
848
+ }
849
+ } else {
850
+ for (unsigned long index = 0; index < shape; ++index) {
851
+
852
+ rb_ary_push(stack, LONG2NUM(index));
853
+
854
+ if (!curr || index < curr->key - offset) {
855
+ rb_ary_unshift(stack, s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s.init()) : s.init_obj());
856
+
857
+ } else { // index == curr->key - offset
858
+ rb_ary_unshift(stack, s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(curr->val) : nm::rubyobj_from_cval(curr->val, s.dtype()).rval);
859
+
860
+ curr = curr->next;
861
+ }
862
+ rb_yield_splat(stack);
863
+
864
+ rb_ary_shift(stack);
865
+ rb_ary_pop(stack);
866
+ }
867
+ }
868
+ NM_CONSERVATIVE(nm_unregister_value(&stack));
869
+ if (s.dtype() == nm::RUBYOBJ)
870
+ nm_list_storage_unregister_list(l, rec);
871
+ }
872
+
873
+
874
+ /*
875
+ * Recursive helper function for each_stored_with_indices, based on nm_list_storage_count_elements_r.
876
+ */
877
+ static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {
878
+ if (s.dtype() == nm::RUBYOBJ)
879
+ nm_list_storage_register_list(l, rec);
880
+ NM_CONSERVATIVE(nm_register_value(&stack));
881
+
882
+ NODE* curr = l->first;
883
+
884
+ size_t offset = s.offset(rec);
885
+ size_t shape = s.ref_shape(rec);
886
+
887
+ while (curr && curr->key < offset) { curr = curr->next; }
888
+ if (curr && curr->key - offset >= shape) curr = NULL;
889
+
890
+ if (rec) {
891
+ while (curr) {
892
+
893
+ rb_ary_push(stack, LONG2NUM(static_cast<long>(curr->key - offset)));
894
+ each_stored_with_indices_r(s, reinterpret_cast<const LIST*>(curr->val), rec-1, stack);
895
+ rb_ary_pop(stack);
896
+
897
+ curr = curr->next;
898
+ if (curr && curr->key - offset >= shape) curr = NULL;
899
+ }
900
+ } else {
901
+ while (curr) {
902
+ rb_ary_push(stack, LONG2NUM(static_cast<long>(curr->key - offset))); // add index to end
903
+
904
+ // add value to beginning
905
+ rb_ary_unshift(stack, s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(curr->val) : nm::rubyobj_from_cval(curr->val, s.dtype()).rval);
906
+ // yield to the whole stack (value, i, j, k, ...)
907
+ rb_yield_splat(stack);
908
+
909
+ // remove the value
910
+ rb_ary_shift(stack);
911
+
912
+ // remove the index from the end
913
+ rb_ary_pop(stack);
914
+
915
+ curr = curr->next;
916
+ if (curr && curr->key - offset >= shape) curr = NULL;
917
+ }
918
+ }
919
+ NM_CONSERVATIVE(nm_unregister_value(&stack));
920
+ if (s.dtype() == nm::RUBYOBJ)
921
+ nm_list_storage_unregister_list(l, rec);
922
+ }
923
+
924
+
925
+ /*
926
+ * Each/each-stored iterator, brings along the indices.
927
+ */
928
+ VALUE nm_list_each_with_indices(VALUE nmatrix, bool stored) {
929
+
930
+ NM_CONSERVATIVE(nm_register_value(&nmatrix));
931
+
932
+ // If we don't have a block, return an enumerator.
933
+ RETURN_SIZED_ENUMERATOR_PRE
934
+ NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
935
+ RETURN_SIZED_ENUMERATOR(nmatrix, 0, 0, 0);
936
+
937
+ nm::list_storage::RecurseData sdata(NM_STORAGE_LIST(nmatrix));
938
+
939
+ VALUE stack = rb_ary_new();
940
+
941
+ if (stored) each_stored_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);
942
+ else each_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);
943
+
944
+ NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
945
+ return nmatrix;
946
+ }
947
+
948
+
949
+ /*
950
+ * map merged stored iterator. Always returns a matrix containing RubyObjects
951
+ * which probably needs to be casted.
952
+ */
953
+ VALUE nm_list_map_stored(VALUE left, VALUE init) {
954
+ NM_CONSERVATIVE(nm_register_value(&left));
955
+ NM_CONSERVATIVE(nm_register_value(&init));
956
+
957
+ LIST_STORAGE *s = NM_STORAGE_LIST(left);
958
+
959
+ // For each matrix, if it's a reference, we want to deal directly with the
960
+ // original (with appropriate offsetting)
961
+ nm::list_storage::RecurseData sdata(s);
962
+
963
+ //if (!rb_block_given_p()) {
964
+ // rb_raise(rb_eNotImpError, "RETURN_SIZED_ENUMERATOR probably won't work for a map_merged since no merged object is created");
965
+ //}
966
+ // If we don't have a block, return an enumerator.
967
+ RETURN_SIZED_ENUMERATOR_PRE
968
+ NM_CONSERVATIVE(nm_unregister_value(&left));
969
+ NM_CONSERVATIVE(nm_unregister_value(&init));
970
+ RETURN_SIZED_ENUMERATOR(left, 0, 0, 0); // FIXME: Test this. Probably won't work. Enable above code instead.
971
+
972
+ // Figure out default value if none provided by the user
973
+ if (init == Qnil) {
974
+ nm_unregister_value(&init);
975
+ init = rb_yield_values(1, sdata.init_obj());
976
+ nm_register_value(&init);
977
+ }
978
+ // Allocate a new shape array for the resulting matrix.
979
+ void* init_val = NM_ALLOC(VALUE);
980
+ memcpy(init_val, &init, sizeof(VALUE));
981
+ nm_register_value(&*reinterpret_cast<VALUE*>(init_val));
982
+
983
+ NMATRIX* result = nm_create(nm::LIST_STORE, nm_list_storage_create(nm::RUBYOBJ, sdata.copy_alloc_shape(), s->dim, init_val));
984
+ LIST_STORAGE* r = reinterpret_cast<LIST_STORAGE*>(result->storage);
985
+ nm::list_storage::RecurseData rdata(r, init);
986
+ nm_register_nmatrix(result);
987
+ map_stored_r(rdata, sdata, rdata.top_level_list(), sdata.top_level_list(), sdata.dim() - 1);
988
+
989
+ VALUE to_return = Data_Wrap_Struct(CLASS_OF(left), nm_mark, nm_delete, result);
990
+
991
+ nm_unregister_nmatrix(result);
992
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));
993
+ NM_CONSERVATIVE(nm_unregister_value(&init));
994
+ NM_CONSERVATIVE(nm_unregister_value(&left));
995
+
996
+ return to_return;
997
+ }
998
+
999
+
1000
+ /*
1001
+ * map merged stored iterator. Always returns a matrix containing RubyObjects which probably needs to be casted.
1002
+ */
1003
+ VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
1004
+ NM_CONSERVATIVE(nm_register_value(&left));
1005
+ NM_CONSERVATIVE(nm_register_value(&right));
1006
+ NM_CONSERVATIVE(nm_register_value(&init));
1007
+
1008
+ bool scalar = false;
1009
+
1010
+ LIST_STORAGE *s = NM_STORAGE_LIST(left),
1011
+ *t;
1012
+
1013
+ // For each matrix, if it's a reference, we want to deal directly with the original (with appropriate offsetting)
1014
+ nm::list_storage::RecurseData sdata(s);
1015
+
1016
+ void* scalar_init = NULL;
1017
+
1018
+ // right might be a scalar, in which case this is a scalar operation.
1019
+ if (!IsNMatrixType(right)) {
1020
+ nm::dtype_t r_dtype = Upcast[NM_DTYPE(left)][nm_dtype_min(right)];
1021
+ scalar_init = rubyobj_to_cval(right, r_dtype); // make a copy of right
1022
+
1023
+ t = reinterpret_cast<LIST_STORAGE*>(nm_list_storage_create(r_dtype, sdata.copy_alloc_shape(), s->dim, scalar_init));
1024
+ scalar = true;
1025
+ } else {
1026
+ t = NM_STORAGE_LIST(right); // element-wise, not scalar.
1027
+ }
1028
+
1029
+ //if (!rb_block_given_p()) {
1030
+ // rb_raise(rb_eNotImpError, "RETURN_SIZED_ENUMERATOR probably won't work for a map_merged since no merged object is created");
1031
+ //}
1032
+ // If we don't have a block, return an enumerator.
1033
+ RETURN_SIZED_ENUMERATOR_PRE
1034
+ NM_CONSERVATIVE(nm_unregister_value(&left));
1035
+ NM_CONSERVATIVE(nm_unregister_value(&right));
1036
+ NM_CONSERVATIVE(nm_unregister_value(&init));
1037
+ RETURN_SIZED_ENUMERATOR(left, 0, 0, 0); // FIXME: Test this. Probably won't work. Enable above code instead.
1038
+
1039
+ // Figure out default value if none provided by the user
1040
+ nm::list_storage::RecurseData& tdata = *(new nm::list_storage::RecurseData(t)); //FIXME: this is a hack to make sure that we can run the destructor before nm_list_storage_delete(t) below.
1041
+ if (init == Qnil) {
1042
+ nm_unregister_value(&init);
1043
+ init = rb_yield_values(2, sdata.init_obj(), tdata.init_obj());
1044
+ nm_register_value(&init);
1045
+ }
1046
+
1047
+ // Allocate a new shape array for the resulting matrix.
1048
+ void* init_val = NM_ALLOC(VALUE);
1049
+ memcpy(init_val, &init, sizeof(VALUE));
1050
+ nm_register_value(&*reinterpret_cast<VALUE*>(init_val));
1051
+
1052
+ NMATRIX* result = nm_create(nm::LIST_STORE, nm_list_storage_create(nm::RUBYOBJ, sdata.copy_alloc_shape(), s->dim, init_val));
1053
+ LIST_STORAGE* r = reinterpret_cast<LIST_STORAGE*>(result->storage);
1054
+ nm::list_storage::RecurseData rdata(r, init);
1055
+ map_merged_stored_r(rdata, sdata, tdata, rdata.top_level_list(), sdata.top_level_list(), tdata.top_level_list(), sdata.dim() - 1);
1056
+
1057
+ delete &tdata;
1058
+ // If we are working with a scalar operation
1059
+ if (scalar) nm_list_storage_delete(t);
1060
+
1061
+ VALUE to_return = Data_Wrap_Struct(CLASS_OF(left), nm_mark, nm_delete, result);
1062
+
1063
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));
1064
+
1065
+ NM_CONSERVATIVE(nm_unregister_value(&init));
1066
+ NM_CONSERVATIVE(nm_unregister_value(&right));
1067
+ NM_CONSERVATIVE(nm_unregister_value(&left));
1068
+
1069
+ return to_return;
1070
+ }
1071
+
1072
+
1073
+ /*
1074
+ * Copy a slice of a list matrix into a regular list matrix.
1075
+ */
1076
+ static LIST* slice_copy(const LIST_STORAGE* src, LIST* src_rows, size_t* coords, size_t* lengths, size_t n) {
1077
+ nm_list_storage_register(src);
1078
+ void *val = NULL;
1079
+ int key;
1080
+
1081
+ LIST* dst_rows = nm::list::create();
1082
+ NODE* src_node = src_rows->first;
1083
+ std::list<VALUE*> temp_vals;
1084
+ std::list<LIST*> temp_lists;
1085
+ while (src_node) {
1086
+ key = src_node->key - (src->offset[n] + coords[n]);
1087
+
1088
+ if (key >= 0 && (size_t)key < lengths[n]) {
1089
+ if (src->dim - n > 1) {
1090
+ val = slice_copy( src,
1091
+ reinterpret_cast<LIST*>(src_node->val),
1092
+ coords,
1093
+ lengths,
1094
+ n + 1 );
1095
+ if (val) {
1096
+ if (src->dtype == nm::RUBYOBJ) {
1097
+ nm_list_storage_register_list(reinterpret_cast<LIST*>(val), src->dim - n - 2);
1098
+ temp_lists.push_front(reinterpret_cast<LIST*>(val));
1099
+ }
1100
+ nm::list::insert_copy(dst_rows, false, key, val, sizeof(LIST));
1101
+ }
1102
+ } else { // matches src->dim - n > 1
1103
+ if (src->dtype == nm::RUBYOBJ) {
1104
+ nm_register_value(&*reinterpret_cast<VALUE*>(src_node->val));
1105
+ temp_vals.push_front(reinterpret_cast<VALUE*>(src_node->val));
1106
+ }
1107
+ nm::list::insert_copy(dst_rows, false, key, src_node->val, DTYPE_SIZES[src->dtype]);
1108
+ }
1109
+ }
1110
+ src_node = src_node->next;
1111
+ }
1112
+ if (src->dtype == nm::RUBYOBJ) {
1113
+ __nm_list_storage_unregister_temp_list_list(temp_lists, src->dim - n - 2);
1114
+ __nm_list_storage_unregister_temp_value_list(temp_vals);
1115
+ }
1116
+ nm_list_storage_unregister(src);
1117
+ return dst_rows;
1118
+ }
1119
+
1120
+ /*
1121
+ * Documentation goes here.
1122
+ */
1123
+ void* nm_list_storage_get(const STORAGE* storage, SLICE* slice) {
1124
+ LIST_STORAGE* s = (LIST_STORAGE*)storage;
1125
+ LIST_STORAGE* ns = NULL;
1126
+
1127
+ nm_list_storage_register(s);
1128
+
1129
+ if (slice->single) {
1130
+ NODE* n = list_storage_get_single_node(s, slice);
1131
+ nm_list_storage_unregister(s);
1132
+ return (n ? n->val : s->default_val);
1133
+ } else {
1134
+ void *init_val = NM_ALLOC_N(char, DTYPE_SIZES[s->dtype]);
1135
+ memcpy(init_val, s->default_val, DTYPE_SIZES[s->dtype]);
1136
+ if (s->dtype == nm::RUBYOBJ)
1137
+ nm_register_value(&*reinterpret_cast<VALUE*>(init_val));
1138
+
1139
+ size_t *shape = NM_ALLOC_N(size_t, s->dim);
1140
+ memcpy(shape, slice->lengths, sizeof(size_t) * s->dim);
1141
+
1142
+ ns = nm_list_storage_create(s->dtype, shape, s->dim, init_val);
1143
+
1144
+ ns->rows = slice_copy(s, s->rows, slice->coords, slice->lengths, 0);
1145
+
1146
+ if (s->dtype == nm::RUBYOBJ) {
1147
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));
1148
+ }
1149
+
1150
+ nm_list_storage_unregister(s);
1151
+
1152
+ return ns;
1153
+ }
1154
+ }
1155
+
1156
+ /*
1157
+ * Get the contents of some set of coordinates. Note: Does not make a copy!
1158
+ * Don't free!
1159
+ */
1160
+ void* nm_list_storage_ref(const STORAGE* storage, SLICE* slice) {
1161
+ LIST_STORAGE* s = (LIST_STORAGE*)storage;
1162
+ LIST_STORAGE* ns = NULL;
1163
+ nm_list_storage_register(s);
1164
+
1165
+ //TODO: It needs a refactoring.
1166
+ if (slice->single) {
1167
+ NODE* n = list_storage_get_single_node(s, slice);
1168
+ nm_list_storage_unregister(s);
1169
+ return (n ? n->val : s->default_val);
1170
+ } else {
1171
+ ns = NM_ALLOC( LIST_STORAGE );
1172
+
1173
+ ns->dim = s->dim;
1174
+ ns->dtype = s->dtype;
1175
+ ns->offset = NM_ALLOC_N(size_t, ns->dim);
1176
+ ns->shape = NM_ALLOC_N(size_t, ns->dim);
1177
+
1178
+ for (size_t i = 0; i < ns->dim; ++i) {
1179
+ ns->offset[i] = slice->coords[i] + s->offset[i];
1180
+ ns->shape[i] = slice->lengths[i];
1181
+ }
1182
+
1183
+ ns->rows = s->rows;
1184
+ ns->default_val = s->default_val;
1185
+
1186
+ s->src->count++;
1187
+ ns->src = s->src;
1188
+ nm_list_storage_unregister(s);
1189
+ return ns;
1190
+ }
1191
+ }
1192
+
1193
+
1194
+ /*
1195
+ * Recursive function, sets multiple values in a matrix from a single source value.
1196
+ */
1197
+ static void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coords, size_t* lengths, size_t n) {
1198
+ nm_list_storage_register(dest);
1199
+ if (dest->dtype == nm::RUBYOBJ) {
1200
+ nm_register_value(&*reinterpret_cast<VALUE*>(val));
1201
+ nm_list_storage_register_list(l, dest->dim - n - 1);
1202
+ }
1203
+
1204
+ // drill down into the structure
1205
+ NODE* node = NULL;
1206
+ if (dest->dim - n > 1) {
1207
+ std::list<LIST*> temp_nodes;
1208
+ for (size_t i = 0; i < lengths[n]; ++i) {
1209
+
1210
+ size_t key = i + dest->offset[n] + coords[n];
1211
+
1212
+ if (!node) {
1213
+ // try to insert list
1214
+ node = nm::list::insert(l, false, key, nm::list::create());
1215
+ } else if (!node->next || (node->next && node->next->key > key)) {
1216
+ node = nm::list::insert_after(node, key, nm::list::create());
1217
+ } else {
1218
+ node = node->next; // correct rank already exists.
1219
+ }
1220
+
1221
+ if (dest->dtype == nm::RUBYOBJ) {
1222
+ temp_nodes.push_front(reinterpret_cast<LIST*>(node->val));
1223
+ nm_list_storage_register_list(reinterpret_cast<LIST*>(node->val), dest->dim - n - 2);
1224
+ }
1225
+
1226
+ // cast it to a list and recurse
1227
+ slice_set_single(dest, reinterpret_cast<LIST*>(node->val), val, coords, lengths, n + 1);
1228
+ }
1229
+ __nm_list_storage_unregister_temp_list_list(temp_nodes, dest->dim - n - 2);
1230
+ } else {
1231
+ std::list<VALUE*> temp_vals;
1232
+ for (size_t i = 0; i < lengths[n]; ++i) {
1233
+
1234
+ size_t key = i + dest->offset[n] + coords[n];
1235
+
1236
+ if (!node) {
1237
+ node = nm::list::insert_copy(l, true, key, val, DTYPE_SIZES[dest->dtype]);
1238
+ } else {
1239
+ node = nm::list::replace_insert_after(node, key, val, true, DTYPE_SIZES[dest->dtype]);
1240
+ }
1241
+ if (dest->dtype == nm::RUBYOBJ) {
1242
+ temp_vals.push_front(reinterpret_cast<VALUE*>(node->val));
1243
+ nm_register_value(&*reinterpret_cast<VALUE*>(node->val));
1244
+ }
1245
+ }
1246
+ __nm_list_storage_unregister_temp_value_list(temp_vals);
1247
+ }
1248
+
1249
+ nm_list_storage_unregister(dest);
1250
+ if (dest->dtype == nm::RUBYOBJ) {
1251
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(val));
1252
+ nm_list_storage_unregister_list(l, dest->dim - n - 1);
1253
+ }
1254
+ }
1255
+
1256
+
1257
+
1258
+ /*
1259
+ * Set a value or values in a list matrix.
1260
+ */
1261
+ void nm_list_storage_set(VALUE left, SLICE* slice, VALUE right) {
1262
+ NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::set, void, VALUE, SLICE*, VALUE)
1263
+ ttable[NM_DTYPE(left)](left, slice, right);
1264
+ }
1265
+
1266
+
1267
+ /*
1268
+ * Insert an entry directly in a row (not using copy! don't free after).
1269
+ *
1270
+ * Returns a pointer to the insertion location.
1271
+ *
1272
+ * TODO: Allow this function to accept an entire row and not just one value -- for slicing
1273
+ */
1274
+ NODE* nm_list_storage_insert(STORAGE* storage, SLICE* slice, void* val) {
1275
+ LIST_STORAGE* s = (LIST_STORAGE*)storage;
1276
+ nm_list_storage_register(s);
1277
+ if (s->dtype == nm::RUBYOBJ)
1278
+ nm_register_value(&*reinterpret_cast<VALUE*>(val));
1279
+ // Pretend dims = 2
1280
+ // Then coords is going to be size 2
1281
+ // So we need to find out if some key already exists
1282
+ size_t r;
1283
+ NODE* n;
1284
+ LIST* l = s->rows;
1285
+
1286
+ // drill down into the structure
1287
+ for (r = 0; r < s->dim -1; ++r) {
1288
+ n = nm::list::insert(l, false, s->offset[r] + slice->coords[s->dim - r], nm::list::create());
1289
+ l = reinterpret_cast<LIST*>(n->val);
1290
+ }
1291
+
1292
+ nm_list_storage_unregister(s);
1293
+ if (s->dtype == nm::RUBYOBJ)
1294
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(val));
1295
+
1296
+ return nm::list::insert(l, true, s->offset[r] + slice->coords[r], val);
1297
+ }
1298
+
1299
+ /*
1300
+ * Remove an item or slice from list storage.
1301
+ */
1302
+ void nm_list_storage_remove(STORAGE* storage, SLICE* slice) {
1303
+ LIST_STORAGE* s = (LIST_STORAGE*)storage;
1304
+
1305
+ // This returns a boolean, which will indicate whether s->rows is empty.
1306
+ // We can safely ignore it, since we never want to delete s->rows until
1307
+ // it's time to destroy the LIST_STORAGE object.
1308
+ nm::list::remove_recursive(s->rows, slice->coords, s->offset, slice->lengths, 0, s->dim);
1309
+ }
1310
+
1311
+ ///////////
1312
+ // Tests //
1313
+ ///////////
1314
+
1315
+ /*
1316
+ * Comparison of contents for list storage.
1317
+ */
1318
+ bool nm_list_storage_eqeq(const STORAGE* left, const STORAGE* right) {
1319
+ NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::eqeq_r, bool, nm::list_storage::RecurseData& left, nm::list_storage::RecurseData& right, const LIST* l, const LIST* r, size_t rec)
1320
+
1321
+ nm::list_storage::RecurseData ldata(reinterpret_cast<const LIST_STORAGE*>(left)),
1322
+ rdata(reinterpret_cast<const LIST_STORAGE*>(right));
1323
+
1324
+ return ttable[left->dtype][right->dtype](ldata, rdata, ldata.top_level_list(), rdata.top_level_list(), ldata.dim()-1);
1325
+ }
1326
+
1327
+ //////////
1328
+ // Math //
1329
+ //////////
1330
+
1331
+
1332
+ /*
1333
+ * List storage matrix multiplication.
1334
+ */
1335
+ STORAGE* nm_list_storage_matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector) {
1336
+ free(resulting_shape);
1337
+ rb_raise(rb_eNotImpError, "multiplication not implemented for list-of-list matrices");
1338
+ return NULL;
1339
+ //DTYPE_TEMPLATE_TABLE(dense_storage::matrix_multiply, NMATRIX*, STORAGE_PAIR, size_t*, bool);
1340
+
1341
+ //return ttable[reinterpret_cast<DENSE_STORAGE*>(casted_storage.left)->dtype](casted_storage, resulting_shape, vector);
1342
+ }
1343
+
1344
+
1345
+ /*
1346
+ * List storage to Hash conversion. Uses Hashes with default values, so you can continue to pretend
1347
+ * it's a sparse matrix.
1348
+ */
1349
+ VALUE nm_list_storage_to_hash(const LIST_STORAGE* s, const nm::dtype_t dtype) {
1350
+ nm_list_storage_register(s);
1351
+ // Get the default value for the list storage.
1352
+ VALUE default_value = nm::rubyobj_from_cval(s->default_val, dtype).rval;
1353
+ nm_list_storage_unregister(s);
1354
+ // Recursively copy each dimension of the matrix into a nested hash.
1355
+ return nm_list_copy_to_hash(s->rows, dtype, s->dim - 1, default_value);
1356
+ }
1357
+
1358
+ /////////////
1359
+ // Utility //
1360
+ /////////////
1361
+
1362
+ /*
1363
+ * Recursively count the non-zero elements in a list storage object.
1364
+ */
1365
+ size_t nm_list_storage_count_elements_r(const LIST* l, size_t recursions) {
1366
+ size_t count = 0;
1367
+ NODE* curr = l->first;
1368
+
1369
+ if (recursions) {
1370
+ while (curr) {
1371
+ count += nm_list_storage_count_elements_r(reinterpret_cast<const LIST*>(curr->val), recursions - 1);
1372
+ curr = curr->next;
1373
+ }
1374
+
1375
+ } else {
1376
+ while (curr) {
1377
+ ++count;
1378
+ curr = curr->next;
1379
+ }
1380
+ }
1381
+
1382
+ return count;
1383
+ }
1384
+
1385
+ /*
1386
+ * Count non-diagonal non-zero elements.
1387
+ */
1388
+ size_t nm_list_storage_count_nd_elements(const LIST_STORAGE* s) {
1389
+ NODE *i_curr, *j_curr;
1390
+ size_t count = 0;
1391
+
1392
+ if (s->dim != 2) {
1393
+ rb_raise(rb_eNotImpError, "non-diagonal element counting only defined for dim = 2");
1394
+ }
1395
+
1396
+ for (i_curr = s->rows->first; i_curr; i_curr = i_curr->next) {
1397
+ int i = i_curr->key - s->offset[0];
1398
+ if (i < 0 || i >= (int)s->shape[0]) continue;
1399
+
1400
+ for (j_curr = ((LIST*)(i_curr->val))->first; j_curr; j_curr = j_curr->next) {
1401
+ int j = j_curr->key - s->offset[1];
1402
+ if (j < 0 || j >= (int)s->shape[1]) continue;
1403
+
1404
+ if (i != j) ++count;
1405
+ }
1406
+ }
1407
+
1408
+ return count;
1409
+ }
1410
+
1411
+ /////////////////////////
1412
+ // Copying and Casting //
1413
+ /////////////////////////
1414
+ //
1415
+ /*
1416
+ * List storage copy constructor C access.
1417
+ */
1418
+
1419
+ LIST_STORAGE* nm_list_storage_copy(const LIST_STORAGE* rhs) {
1420
+ nm_list_storage_register(rhs);
1421
+ size_t *shape = NM_ALLOC_N(size_t, rhs->dim);
1422
+ memcpy(shape, rhs->shape, sizeof(size_t) * rhs->dim);
1423
+
1424
+ void *init_val = NM_ALLOC_N(char, DTYPE_SIZES[rhs->dtype]);
1425
+ memcpy(init_val, rhs->default_val, DTYPE_SIZES[rhs->dtype]);
1426
+
1427
+ LIST_STORAGE* lhs = nm_list_storage_create(rhs->dtype, shape, rhs->dim, init_val);
1428
+ nm_list_storage_register(lhs);
1429
+
1430
+ lhs->rows = slice_copy(rhs, rhs->rows, lhs->offset, lhs->shape, 0);
1431
+
1432
+ nm_list_storage_unregister(rhs);
1433
+ nm_list_storage_unregister(lhs);
1434
+ return lhs;
1435
+ }
1436
+
1437
+ /*
1438
+ * List storage copy constructor C access with casting.
1439
+ */
1440
+ STORAGE* nm_list_storage_cast_copy(const STORAGE* rhs, nm::dtype_t new_dtype, void* dummy) {
1441
+ NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::cast_copy, LIST_STORAGE*, const LIST_STORAGE* rhs, nm::dtype_t new_dtype);
1442
+
1443
+ return (STORAGE*)ttable[new_dtype][rhs->dtype]((LIST_STORAGE*)rhs, new_dtype);
1444
+ }
1445
+
1446
+
1447
+ /*
1448
+ * List storage copy constructor for transposing.
1449
+ */
1450
+ STORAGE* nm_list_storage_copy_transposed(const STORAGE* rhs_base) {
1451
+ rb_raise(rb_eNotImpError, "list storage transpose not yet implemented");
1452
+ return NULL;
1453
+ }
1454
+
1455
+
1456
+ } // end of extern "C" block
1457
+
1458
+
1459
+ /////////////////////////
1460
+ // Templated Functions //
1461
+ /////////////////////////
1462
+
1463
+
1464
+ namespace nm {
1465
+ namespace list_storage {
1466
+
1467
+
1468
+ /*
1469
+ * List storage copy constructor for changing dtypes.
1470
+ */
1471
+ template <typename LDType, typename RDType>
1472
+ static LIST_STORAGE* cast_copy(const LIST_STORAGE* rhs, dtype_t new_dtype) {
1473
+ nm_list_storage_register(rhs);
1474
+ // allocate and copy shape
1475
+ size_t* shape = NM_ALLOC_N(size_t, rhs->dim);
1476
+ memcpy(shape, rhs->shape, rhs->dim * sizeof(size_t));
1477
+
1478
+ // copy default value
1479
+ LDType* default_val = NM_ALLOC_N(LDType, 1);
1480
+ *default_val = *reinterpret_cast<RDType*>(rhs->default_val);
1481
+
1482
+ LIST_STORAGE* lhs = nm_list_storage_create(new_dtype, shape, rhs->dim, default_val);
1483
+ //lhs->rows = nm::list::create();
1484
+
1485
+ nm_list_storage_register(lhs);
1486
+ // TODO: Needs optimization. When matrix is reference it is copped twice.
1487
+ if (rhs->src == rhs)
1488
+ nm::list::cast_copy_contents<LDType, RDType>(lhs->rows, rhs->rows, rhs->dim - 1);
1489
+ else {
1490
+ LIST_STORAGE *tmp = nm_list_storage_copy(rhs);
1491
+ nm_list_storage_register(tmp);
1492
+ nm::list::cast_copy_contents<LDType, RDType>(lhs->rows, tmp->rows, rhs->dim - 1);
1493
+ nm_list_storage_unregister(tmp);
1494
+ nm_list_storage_delete(tmp);
1495
+ }
1496
+ nm_list_storage_unregister(lhs);
1497
+ nm_list_storage_unregister(rhs);
1498
+ return lhs;
1499
+ }
1500
+
1501
+
1502
+ /*
1503
+ * Recursive helper function for eqeq. Note that we use SDType and TDType instead of L and R because this function
1504
+ * is a re-labeling. That is, it can be called in order L,R or order R,L; and we don't want to get confused. So we
1505
+ * use S and T to denote first and second passed in.
1506
+ */
1507
+ template <typename SDType, typename TDType>
1508
+ static bool eqeq_empty_r(RecurseData& s, const LIST* l, size_t rec, const TDType* t_init) {
1509
+ NODE* curr = l->first;
1510
+
1511
+ // For reference matrices, make sure we start in the correct place.
1512
+ while (curr && curr->key < s.offset(rec)) { curr = curr->next; }
1513
+ if (curr && curr->key - s.offset(rec) >= s.ref_shape(rec)) curr = NULL;
1514
+
1515
+ if (rec) {
1516
+ while (curr) {
1517
+ if (!eqeq_empty_r<SDType,TDType>(s, reinterpret_cast<const LIST*>(curr->val), rec-1, t_init)) return false;
1518
+ curr = curr->next;
1519
+
1520
+ if (curr && curr->key - s.offset(rec) >= s.ref_shape(rec)) curr = NULL;
1521
+ }
1522
+ } else {
1523
+ while (curr) {
1524
+ if (*reinterpret_cast<SDType*>(curr->val) != *t_init) return false;
1525
+ curr = curr->next;
1526
+
1527
+ if (curr && curr->key - s.offset(rec) >= s.ref_shape(rec)) curr = NULL;
1528
+ }
1529
+ }
1530
+ return true;
1531
+ }
1532
+
1533
+
1534
+
1535
+ /*
1536
+ * Do these two list matrices of the same dtype have exactly the same contents (accounting for default_vals)?
1537
+ *
1538
+ * This function is recursive.
1539
+ */
1540
+ template <typename LDType, typename RDType>
1541
+ static bool eqeq_r(RecurseData& left, RecurseData& right, const LIST* l, const LIST* r, size_t rec) {
1542
+ NODE *lcurr = l->first,
1543
+ *rcurr = r->first;
1544
+
1545
+ // For reference matrices, make sure we start in the correct place.
1546
+ while (lcurr && lcurr->key < left.offset(rec)) { lcurr = lcurr->next; }
1547
+ while (rcurr && rcurr->key < right.offset(rec)) { rcurr = rcurr->next; }
1548
+ if (rcurr && rcurr->key - right.offset(rec) >= left.ref_shape(rec)) rcurr = NULL;
1549
+ if (lcurr && lcurr->key - left.offset(rec) >= left.ref_shape(rec)) lcurr = NULL;
1550
+
1551
+ bool compared = false;
1552
+
1553
+ if (rec) {
1554
+
1555
+ while (lcurr || rcurr) {
1556
+
1557
+ if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {
1558
+ if (!eqeq_empty_r<LDType,RDType>(left, reinterpret_cast<const LIST*>(lcurr->val), rec-1, reinterpret_cast<const RDType*>(right.init()))) return false;
1559
+ lcurr = lcurr->next;
1560
+ } else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {
1561
+ if (!eqeq_empty_r<RDType,LDType>(right, reinterpret_cast<const LIST*>(rcurr->val), rec-1, reinterpret_cast<const LDType*>(left.init()))) return false;
1562
+ rcurr = rcurr->next;
1563
+ } else { // keys are == and both present
1564
+ if (!eqeq_r<LDType,RDType>(left, right, reinterpret_cast<const LIST*>(lcurr->val), reinterpret_cast<const LIST*>(rcurr->val), rec-1)) return false;
1565
+ lcurr = lcurr->next;
1566
+ rcurr = rcurr->next;
1567
+ }
1568
+ if (rcurr && rcurr->key - right.offset(rec) >= right.ref_shape(rec)) rcurr = NULL;
1569
+ if (lcurr && lcurr->key - left.offset(rec) >= left.ref_shape(rec)) lcurr = NULL;
1570
+ compared = true;
1571
+ }
1572
+ } else {
1573
+ while (lcurr || rcurr) {
1574
+
1575
+ if (rcurr && rcurr->key - right.offset(rec) >= left.ref_shape(rec)) rcurr = NULL;
1576
+ if (lcurr && lcurr->key - left.offset(rec) >= left.ref_shape(rec)) lcurr = NULL;
1577
+
1578
+ if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {
1579
+ if (*reinterpret_cast<LDType*>(lcurr->val) != *reinterpret_cast<const RDType*>(right.init())) return false;
1580
+ lcurr = lcurr->next;
1581
+ } else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {
1582
+ if (*reinterpret_cast<RDType*>(rcurr->val) != *reinterpret_cast<const LDType*>(left.init())) return false;
1583
+ rcurr = rcurr->next;
1584
+ } else { // keys == and both left and right nodes present
1585
+ if (*reinterpret_cast<LDType*>(lcurr->val) != *reinterpret_cast<RDType*>(rcurr->val)) return false;
1586
+ lcurr = lcurr->next;
1587
+ rcurr = rcurr->next;
1588
+ }
1589
+ if (rcurr && rcurr->key - right.offset(rec) >= right.ref_shape(rec)) rcurr = NULL;
1590
+ if (lcurr && lcurr->key - left.offset(rec) >= left.ref_shape(rec)) lcurr = NULL;
1591
+ compared = true;
1592
+ }
1593
+ }
1594
+
1595
+ // Final condition: both containers are empty, and have different default values.
1596
+ if (!compared && !lcurr && !rcurr) return *reinterpret_cast<const LDType*>(left.init()) == *reinterpret_cast<const RDType*>(right.init());
1597
+ return true;
1598
+ }
1599
+
1600
+
1601
+ }} // end of namespace nm::list_storage
1602
+
1603
+ extern "C" {
1604
+ /*
1605
+ * call-seq:
1606
+ * __list_to_hash__ -> Hash
1607
+ *
1608
+ * Create a Ruby Hash from a list NMatrix.
1609
+ *
1610
+ * This is an internal C function which handles list stype only.
1611
+ */
1612
+ VALUE nm_to_hash(VALUE self) {
1613
+ return nm_list_storage_to_hash(NM_STORAGE_LIST(self), NM_DTYPE(self));
1614
+ }
1615
+
1616
+ /*
1617
+ * call-seq:
1618
+ * __list_default_value__ -> ...
1619
+ *
1620
+ * Get the default_value property from a list matrix.
1621
+ */
1622
+ VALUE nm_list_default_value(VALUE self) {
1623
+ NM_CONSERVATIVE(nm_register_value(&self));
1624
+ VALUE to_return = (NM_DTYPE(self) == nm::RUBYOBJ) ? *reinterpret_cast<VALUE*>(NM_DEFAULT_VAL(self)) : nm::rubyobj_from_cval(NM_DEFAULT_VAL(self), NM_DTYPE(self)).rval;
1625
+ NM_CONSERVATIVE(nm_unregister_value(&self));
1626
+ return to_return;
1627
+ }
1628
+ } // end of extern "C" block