nmatrix 0.0.9 → 0.1.0.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile +1 -0
- data/History.txt +95 -1
- data/LICENSE.txt +2 -2
- data/README.rdoc +24 -26
- data/Rakefile +32 -16
- data/ext/nmatrix/data/complex.h +2 -2
- data/ext/nmatrix/data/data.cpp +27 -51
- data/ext/nmatrix/data/data.h +92 -4
- data/ext/nmatrix/data/meta.h +2 -2
- data/ext/nmatrix/data/rational.h +2 -2
- data/ext/nmatrix/data/ruby_object.h +2 -2
- data/ext/nmatrix/extconf.rb +87 -86
- data/ext/nmatrix/math.cpp +45 -40
- data/ext/nmatrix/math/asum.h +3 -3
- data/ext/nmatrix/math/geev.h +2 -2
- data/ext/nmatrix/math/gemm.h +6 -2
- data/ext/nmatrix/math/gemv.h +6 -2
- data/ext/nmatrix/math/ger.h +2 -2
- data/ext/nmatrix/math/gesdd.h +2 -2
- data/ext/nmatrix/math/gesvd.h +2 -2
- data/ext/nmatrix/math/getf2.h +2 -2
- data/ext/nmatrix/math/getrf.h +2 -2
- data/ext/nmatrix/math/getri.h +2 -2
- data/ext/nmatrix/math/getrs.h +7 -3
- data/ext/nmatrix/math/idamax.h +2 -2
- data/ext/nmatrix/math/inc.h +12 -6
- data/ext/nmatrix/math/laswp.h +2 -2
- data/ext/nmatrix/math/long_dtype.h +2 -2
- data/ext/nmatrix/math/math.h +16 -10
- data/ext/nmatrix/math/nrm2.h +3 -3
- data/ext/nmatrix/math/potrs.h +7 -3
- data/ext/nmatrix/math/rot.h +2 -2
- data/ext/nmatrix/math/rotg.h +2 -2
- data/ext/nmatrix/math/scal.h +2 -2
- data/ext/nmatrix/math/swap.h +2 -2
- data/ext/nmatrix/math/trsm.h +7 -3
- data/ext/nmatrix/nm_memory.h +60 -0
- data/ext/nmatrix/nmatrix.cpp +13 -47
- data/ext/nmatrix/nmatrix.h +37 -12
- data/ext/nmatrix/ruby_constants.cpp +4 -2
- data/ext/nmatrix/ruby_constants.h +4 -2
- data/ext/nmatrix/ruby_nmatrix.c +937 -170
- data/ext/nmatrix/storage/common.cpp +2 -2
- data/ext/nmatrix/storage/common.h +2 -2
- data/ext/nmatrix/storage/{dense.cpp → dense/dense.cpp} +253 -100
- data/ext/nmatrix/storage/{dense.h → dense/dense.h} +6 -5
- data/ext/nmatrix/storage/{list.cpp → list/list.cpp} +517 -98
- data/ext/nmatrix/storage/{list.h → list/list.h} +13 -6
- data/ext/nmatrix/storage/storage.cpp +48 -19
- data/ext/nmatrix/storage/storage.h +4 -4
- data/ext/nmatrix/storage/yale/class.h +112 -43
- data/ext/nmatrix/storage/yale/iterators/base.h +2 -2
- data/ext/nmatrix/storage/yale/iterators/iterator.h +2 -2
- data/ext/nmatrix/storage/yale/iterators/row.h +2 -2
- data/ext/nmatrix/storage/yale/iterators/row_stored.h +2 -2
- data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +4 -3
- data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +2 -2
- data/ext/nmatrix/storage/yale/math/transpose.h +2 -2
- data/ext/nmatrix/storage/yale/yale.cpp +343 -52
- data/ext/nmatrix/storage/yale/yale.h +7 -3
- data/ext/nmatrix/types.h +2 -2
- data/ext/nmatrix/util/io.cpp +5 -5
- data/ext/nmatrix/util/io.h +2 -2
- data/ext/nmatrix/util/sl_list.cpp +40 -27
- data/ext/nmatrix/util/sl_list.h +3 -3
- data/ext/nmatrix/util/util.h +2 -2
- data/lib/nmatrix.rb +2 -2
- data/lib/nmatrix/blas.rb +2 -2
- data/lib/nmatrix/enumerate.rb +17 -6
- data/lib/nmatrix/io/market.rb +2 -3
- data/lib/nmatrix/io/mat5_reader.rb +2 -2
- data/lib/nmatrix/io/mat_reader.rb +2 -2
- data/lib/nmatrix/lapack.rb +46 -46
- data/lib/nmatrix/math.rb +213 -20
- data/lib/nmatrix/monkeys.rb +24 -2
- data/lib/nmatrix/nmatrix.rb +394 -9
- data/lib/nmatrix/nvector.rb +2 -64
- data/lib/nmatrix/rspec.rb +2 -2
- data/lib/nmatrix/shortcuts.rb +14 -61
- data/lib/nmatrix/version.rb +11 -3
- data/lib/nmatrix/yale_functions.rb +4 -4
- data/nmatrix.gemspec +2 -7
- data/scripts/mac-brew-gcc.sh +11 -8
- data/scripts/mac-mavericks-brew-gcc.sh +22 -0
- data/spec/00_nmatrix_spec.rb +116 -7
- data/spec/01_enum_spec.rb +17 -3
- data/spec/02_slice_spec.rb +11 -3
- data/spec/blas_spec.rb +5 -2
- data/spec/elementwise_spec.rb +5 -2
- data/spec/io_spec.rb +27 -17
- data/spec/lapack_spec.rb +157 -9
- data/spec/math_spec.rb +95 -4
- data/spec/nmatrix_yale_spec.rb +21 -26
- data/spec/rspec_monkeys.rb +27 -0
- data/spec/rspec_spec.rb +2 -2
- data/spec/shortcuts_spec.rb +5 -10
- data/spec/slice_set_spec.rb +6 -2
- data/spec/spec_helper.rb +3 -2
- data/spec/stat_spec.rb +174 -158
- metadata +15 -15
@@ -9,8 +9,8 @@
|
|
9
9
|
//
|
10
10
|
// == Copyright Information
|
11
11
|
//
|
12
|
-
// SciRuby is Copyright (c) 2010 -
|
13
|
-
// NMatrix is Copyright (c)
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
14
14
|
//
|
15
15
|
// Please see LICENSE.txt for additional copyright notices.
|
16
16
|
//
|
@@ -43,7 +43,7 @@
|
|
43
43
|
|
44
44
|
#include "data/data.h"
|
45
45
|
|
46
|
-
#include "common.h"
|
46
|
+
#include "../common.h"
|
47
47
|
|
48
48
|
#include "nmatrix.h"
|
49
49
|
|
@@ -73,8 +73,9 @@ DENSE_STORAGE* nm_dense_storage_create(nm::dtype_t dtype, size_t* shape, size_t
|
|
73
73
|
void nm_dense_storage_delete(STORAGE* s);
|
74
74
|
void nm_dense_storage_delete_ref(STORAGE* s);
|
75
75
|
void nm_dense_storage_mark(STORAGE*);
|
76
|
-
void
|
77
|
-
void
|
76
|
+
void nm_dense_storage_register(const STORAGE* s);
|
77
|
+
void nm_dense_storage_unregister(const STORAGE* s);
|
78
|
+
|
78
79
|
|
79
80
|
///////////////
|
80
81
|
// Accessors //
|
@@ -9,8 +9,8 @@
|
|
9
9
|
//
|
10
10
|
// == Copyright Information
|
11
11
|
//
|
12
|
-
// SciRuby is Copyright (c) 2010 -
|
13
|
-
// NMatrix is Copyright (c)
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
14
14
|
//
|
15
15
|
// Please see LICENSE.txt for additional copyright notices.
|
16
16
|
//
|
@@ -25,7 +25,6 @@
|
|
25
25
|
//
|
26
26
|
// List-of-lists n-dimensional matrix storage. Uses singly-linked
|
27
27
|
// lists.
|
28
|
-
|
29
28
|
/*
|
30
29
|
* Standard Includes
|
31
30
|
*/
|
@@ -34,21 +33,22 @@
|
|
34
33
|
#include <algorithm> // std::min
|
35
34
|
#include <iostream>
|
36
35
|
#include <vector>
|
36
|
+
#include <list>
|
37
37
|
|
38
38
|
/*
|
39
39
|
* Project Includes
|
40
40
|
*/
|
41
41
|
|
42
|
-
#include "types.h"
|
42
|
+
#include "../../types.h"
|
43
43
|
|
44
|
-
#include "data/data.h"
|
44
|
+
#include "../../data/data.h"
|
45
45
|
|
46
|
-
#include "dense.h"
|
47
|
-
#include "common.h"
|
46
|
+
#include "../dense/dense.h"
|
47
|
+
#include "../common.h"
|
48
48
|
#include "list.h"
|
49
49
|
|
50
|
-
#include "math/math.h"
|
51
|
-
#include "util/sl_list.h"
|
50
|
+
#include "../../math/math.h"
|
51
|
+
#include "../../util/sl_list.h"
|
52
52
|
|
53
53
|
/*
|
54
54
|
* Macros
|
@@ -61,6 +61,8 @@
|
|
61
61
|
|
62
62
|
extern "C" {
|
63
63
|
static void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coords, size_t* lengths, size_t n);
|
64
|
+
static void __nm_list_storage_unregister_temp_value_list(std::list<VALUE*>& temp_vals);
|
65
|
+
static void __nm_list_storage_unregister_temp_list_list(std::list<LIST*>& temp_vals, size_t recursions);
|
64
66
|
}
|
65
67
|
|
66
68
|
namespace nm { namespace list_storage {
|
@@ -78,11 +80,20 @@ public:
|
|
78
80
|
offsets[i] += actual->offset[i];
|
79
81
|
actual = reinterpret_cast<LIST_STORAGE*>(actual->src);
|
80
82
|
}
|
83
|
+
nm_list_storage_register(actual);
|
84
|
+
nm_list_storage_register(ref);
|
81
85
|
actual_shape_ = actual->shape;
|
82
86
|
|
83
87
|
if (init_obj_ == Qnil) {
|
84
88
|
init_obj_ = s->dtype == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s->default_val) : rubyobj_from_cval(s->default_val, s->dtype).rval;
|
85
89
|
}
|
90
|
+
nm_register_value(init_obj_);
|
91
|
+
}
|
92
|
+
|
93
|
+
~RecurseData() {
|
94
|
+
nm_unregister_value(init_obj_);
|
95
|
+
nm_list_storage_unregister(ref);
|
96
|
+
nm_list_storage_unregister(actual);
|
86
97
|
}
|
87
98
|
|
88
99
|
dtype_t dtype() const { return ref->dtype; }
|
@@ -95,13 +106,13 @@ public:
|
|
95
106
|
}
|
96
107
|
|
97
108
|
size_t* copy_alloc_shape() const {
|
98
|
-
size_t* new_shape =
|
109
|
+
size_t* new_shape = NM_ALLOC_N(size_t, ref->dim);
|
99
110
|
memcpy(new_shape, shape_, sizeof(size_t)*ref->dim);
|
100
111
|
return new_shape;
|
101
112
|
}
|
102
113
|
|
103
114
|
size_t actual_shape(size_t rec) const {
|
104
|
-
return actual_shape_[
|
115
|
+
return actual_shape_[actual->dim - rec - 1];
|
105
116
|
}
|
106
117
|
|
107
118
|
size_t offset(size_t rec) const {
|
@@ -140,53 +151,157 @@ static bool eqeq_r(RecurseData& left, RecurseData& right, const LIST* l, const L
|
|
140
151
|
template <typename SDType, typename TDType>
|
141
152
|
static bool eqeq_empty_r(RecurseData& s, const LIST* l, size_t rec, const TDType* t_init);
|
142
153
|
|
143
|
-
|
144
154
|
/*
|
145
155
|
* Recursive helper for map_merged_stored_r which handles the case where one list is empty and the other is not.
|
146
156
|
*/
|
147
157
|
static void map_empty_stored_r(RecurseData& result, RecurseData& s, LIST* x, const LIST* l, size_t rec, bool rev, const VALUE& t_init) {
|
158
|
+
if (s.dtype() == nm::RUBYOBJ) {
|
159
|
+
nm_list_storage_register_list(l, rec);
|
160
|
+
}
|
161
|
+
if (result.dtype() == nm::RUBYOBJ) {
|
162
|
+
nm_list_storage_register_list(x, rec);
|
163
|
+
}
|
164
|
+
|
148
165
|
NODE *curr = l->first,
|
149
166
|
*xcurr = NULL;
|
150
167
|
|
151
168
|
// For reference matrices, make sure we start in the correct place.
|
152
|
-
size_t offset =
|
153
|
-
size_t x_shape =
|
169
|
+
size_t offset = s.offset(rec);
|
170
|
+
size_t x_shape = s.ref_shape(rec);
|
154
171
|
|
155
172
|
while (curr && curr->key < offset) { curr = curr->next; }
|
156
173
|
if (curr && curr->key - offset >= x_shape) curr = NULL;
|
157
174
|
|
158
175
|
if (rec) {
|
176
|
+
std::list<LIST*> temp_vals;
|
159
177
|
while (curr) {
|
160
178
|
LIST* val = nm::list::create();
|
161
179
|
map_empty_stored_r(result, s, val, reinterpret_cast<const LIST*>(curr->val), rec-1, rev, t_init);
|
162
180
|
|
163
181
|
if (!val->first) nm::list::del(val, 0);
|
164
|
-
else
|
165
|
-
|
182
|
+
else {
|
183
|
+
nm_list_storage_register_list(val, rec-1);
|
184
|
+
temp_vals.push_front(val);
|
185
|
+
nm::list::insert_helper(x, xcurr, curr->key - offset, val);
|
186
|
+
}
|
166
187
|
curr = curr->next;
|
167
188
|
if (curr && curr->key - offset >= x_shape) curr = NULL;
|
168
189
|
}
|
190
|
+
__nm_list_storage_unregister_temp_list_list(temp_vals, rec-1);
|
169
191
|
} else {
|
192
|
+
std::list<VALUE*> temp_vals;
|
170
193
|
while (curr) {
|
171
194
|
VALUE val, s_val = rubyobj_from_cval(curr->val, s.dtype()).rval;
|
172
195
|
if (rev) val = rb_yield_values(2, t_init, s_val);
|
173
196
|
else val = rb_yield_values(2, s_val, t_init);
|
174
197
|
|
175
|
-
|
198
|
+
nm_register_value(val);
|
199
|
+
|
200
|
+
if (rb_funcall(val, rb_intern("!="), 1, result.init_obj()) == Qtrue) {
|
176
201
|
xcurr = nm::list::insert_helper(x, xcurr, curr->key - offset, val);
|
202
|
+
temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
|
203
|
+
nm_register_value(*reinterpret_cast<VALUE*>(xcurr->val));
|
204
|
+
}
|
205
|
+
nm_unregister_value(val);
|
177
206
|
|
178
207
|
curr = curr->next;
|
179
208
|
if (curr && curr->key - offset >= x_shape) curr = NULL;
|
180
209
|
}
|
210
|
+
__nm_list_storage_unregister_temp_value_list(temp_vals);
|
211
|
+
}
|
212
|
+
|
213
|
+
if (s.dtype() == nm::RUBYOBJ){
|
214
|
+
nm_list_storage_unregister_list(l, rec);
|
215
|
+
}
|
216
|
+
if (result.dtype() == nm::RUBYOBJ) {
|
217
|
+
nm_list_storage_unregister_list(x, rec);
|
181
218
|
}
|
182
219
|
|
183
220
|
}
|
184
221
|
|
185
222
|
|
223
|
+
/*
|
224
|
+
* Recursive helper function for nm_list_map_stored
|
225
|
+
*/
|
226
|
+
static void map_stored_r(RecurseData& result, RecurseData& left, LIST* x, const LIST* l, size_t rec) {
|
227
|
+
if (left.dtype() == nm::RUBYOBJ) {
|
228
|
+
nm_list_storage_register_list(l, rec);
|
229
|
+
}
|
230
|
+
if (result.dtype() == nm::RUBYOBJ) {
|
231
|
+
nm_list_storage_register_list(x, rec);
|
232
|
+
}
|
233
|
+
NODE *lcurr = l->first,
|
234
|
+
*xcurr = x->first;
|
235
|
+
|
236
|
+
// For reference matrices, make sure we start in the correct place.
|
237
|
+
while (lcurr && lcurr->key < left.offset(rec)) { lcurr = lcurr->next; }
|
238
|
+
|
239
|
+
if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
|
240
|
+
|
241
|
+
if (rec) {
|
242
|
+
std::list<LIST*> temp_vals;
|
243
|
+
while (lcurr) {
|
244
|
+
size_t key;
|
245
|
+
LIST* val = nm::list::create();
|
246
|
+
map_stored_r(result, left, val, reinterpret_cast<const LIST*>(lcurr->val), rec-1);
|
247
|
+
key = lcurr->key - left.offset(rec);
|
248
|
+
lcurr = lcurr->next;
|
249
|
+
|
250
|
+
if (!val->first) nm::list::del(val, 0); // empty list -- don't insert
|
251
|
+
else {
|
252
|
+
nm_list_storage_register_list(val, rec-1);
|
253
|
+
temp_vals.push_front(val);
|
254
|
+
xcurr = nm::list::insert_helper(x, xcurr, key, val);
|
255
|
+
}
|
256
|
+
if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
|
257
|
+
}
|
258
|
+
__nm_list_storage_unregister_temp_list_list(temp_vals, rec-1);
|
259
|
+
} else {
|
260
|
+
std::list<VALUE*> temp_vals;
|
261
|
+
while (lcurr) {
|
262
|
+
size_t key;
|
263
|
+
VALUE val;
|
264
|
+
|
265
|
+
val = rb_yield_values(1, rubyobj_from_cval(lcurr->val, left.dtype()).rval);
|
266
|
+
key = lcurr->key - left.offset(rec);
|
267
|
+
lcurr = lcurr->next;
|
268
|
+
|
269
|
+
if (!rb_equal(val, result.init_obj())) {
|
270
|
+
xcurr = nm::list::insert_helper(x, xcurr, key, val);
|
271
|
+
temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
|
272
|
+
nm_register_value(*reinterpret_cast<VALUE*>(xcurr->val));
|
273
|
+
}
|
274
|
+
|
275
|
+
if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
|
276
|
+
}
|
277
|
+
__nm_list_storage_unregister_temp_value_list(temp_vals);
|
278
|
+
}
|
279
|
+
|
280
|
+
if (left.dtype() == nm::RUBYOBJ) {
|
281
|
+
nm_list_storage_unregister_list(l, rec);
|
282
|
+
}
|
283
|
+
if (result.dtype() == nm::RUBYOBJ) {
|
284
|
+
nm_list_storage_unregister_list(x, rec);
|
285
|
+
}
|
286
|
+
}
|
287
|
+
|
288
|
+
|
289
|
+
|
186
290
|
/*
|
187
291
|
* Recursive helper function for nm_list_map_merged_stored
|
188
292
|
*/
|
189
293
|
static void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseData& right, LIST* x, const LIST* l, const LIST* r, size_t rec) {
|
294
|
+
if (left.dtype() == nm::RUBYOBJ) {
|
295
|
+
nm_list_storage_register_list(l, rec);
|
296
|
+
}
|
297
|
+
if (right.dtype() == nm::RUBYOBJ) {
|
298
|
+
nm_list_storage_register_list(r, rec);
|
299
|
+
}
|
300
|
+
if (result.dtype() == nm::RUBYOBJ) {
|
301
|
+
nm_list_storage_register_list(x, rec);
|
302
|
+
}
|
303
|
+
|
304
|
+
|
190
305
|
NODE *lcurr = l->first,
|
191
306
|
*rcurr = r->first,
|
192
307
|
*xcurr = x->first;
|
@@ -199,6 +314,7 @@ static void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseD
|
|
199
314
|
if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
|
200
315
|
|
201
316
|
if (rec) {
|
317
|
+
std::list<LIST*> temp_vals;
|
202
318
|
while (lcurr || rcurr) {
|
203
319
|
size_t key;
|
204
320
|
LIST* val = nm::list::create();
|
@@ -218,13 +334,19 @@ static void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseD
|
|
218
334
|
rcurr = rcurr->next;
|
219
335
|
}
|
220
336
|
|
221
|
-
if (!val->first) nm::list::del(val, 0); // empty list -- don't insert
|
222
|
-
else xcurr = nm::list::insert_helper(x, xcurr, key, val);
|
223
337
|
|
338
|
+
if (!val->first) nm::list::del(val, 0); // empty list -- don't insert
|
339
|
+
else {
|
340
|
+
nm_list_storage_register_list(val, rec-1);
|
341
|
+
temp_vals.push_front(val);
|
342
|
+
xcurr = nm::list::insert_helper(x, xcurr, key, val);
|
343
|
+
}
|
224
344
|
if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;
|
225
345
|
if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
|
226
346
|
}
|
347
|
+
__nm_list_storage_unregister_temp_list_list(temp_vals, rec-1);
|
227
348
|
} else {
|
349
|
+
std::list<VALUE*> temp_vals;
|
228
350
|
while (lcurr || rcurr) {
|
229
351
|
size_t key;
|
230
352
|
VALUE val;
|
@@ -234,7 +356,7 @@ static void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseD
|
|
234
356
|
key = lcurr->key - left.offset(rec);
|
235
357
|
lcurr = lcurr->next;
|
236
358
|
} else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {
|
237
|
-
|
359
|
+
val = rb_yield_values(2, left.init_obj(), rubyobj_from_cval(rcurr->val, right.dtype()).rval);
|
238
360
|
key = rcurr->key - right.offset(rec);
|
239
361
|
rcurr = rcurr->next;
|
240
362
|
} else { // == and both present
|
@@ -243,15 +365,35 @@ static void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseD
|
|
243
365
|
lcurr = lcurr->next;
|
244
366
|
rcurr = rcurr->next;
|
245
367
|
}
|
246
|
-
|
368
|
+
|
369
|
+
nm_register_value(val);
|
370
|
+
|
371
|
+
if (rb_funcall(val, rb_intern("!="), 1, result.init_obj()) == Qtrue) {
|
247
372
|
xcurr = nm::list::insert_helper(x, xcurr, key, val);
|
373
|
+
temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
|
374
|
+
nm_register_value(*reinterpret_cast<VALUE*>(xcurr->val));
|
375
|
+
}
|
376
|
+
|
377
|
+
nm_unregister_value(val);
|
248
378
|
|
249
379
|
if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;
|
250
380
|
if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
|
251
381
|
}
|
382
|
+
__nm_list_storage_unregister_temp_value_list(temp_vals);
|
383
|
+
}
|
384
|
+
|
385
|
+
if (left.dtype() == nm::RUBYOBJ) {
|
386
|
+
nm_list_storage_unregister_list(l, rec);
|
387
|
+
}
|
388
|
+
if (right.dtype() == nm::RUBYOBJ) {
|
389
|
+
nm_list_storage_unregister_list(r, rec);
|
390
|
+
}
|
391
|
+
if (result.dtype() == nm::RUBYOBJ) {
|
392
|
+
nm_list_storage_unregister_list(x, rec);
|
252
393
|
}
|
253
394
|
}
|
254
395
|
|
396
|
+
|
255
397
|
/*
|
256
398
|
* Recursive function, sets multiple values in a matrix from multiple source values. Also handles removal; returns true
|
257
399
|
* if the recursion results in an empty list at that level (which signals that the current parent should be removed).
|
@@ -266,6 +408,12 @@ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengt
|
|
266
408
|
using nm::list::insert_after;
|
267
409
|
size_t* offsets = dest->offset;
|
268
410
|
|
411
|
+
nm_list_storage_register(dest);
|
412
|
+
if (dest->dtype == nm::RUBYOBJ) {
|
413
|
+
nm_register_values(reinterpret_cast<VALUE*>(v), v_size);
|
414
|
+
nm_list_storage_register_list(l, dest->dim - n - 1);
|
415
|
+
}
|
416
|
+
|
269
417
|
// drill down into the structure
|
270
418
|
NODE* prev = find_preceding_from_list(l, coords[n] + offsets[n]);
|
271
419
|
NODE* node = NULL;
|
@@ -286,13 +434,16 @@ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengt
|
|
286
434
|
}
|
287
435
|
|
288
436
|
// At this point, it's guaranteed that there is a list here matching key.
|
289
|
-
|
437
|
+
std::list<LIST*> temp_lists;
|
290
438
|
while (node) {
|
291
439
|
// Recurse down into the list. If it returns true, it's empty, so we need to delete it.
|
292
440
|
bool remove_parent = slice_set(dest, reinterpret_cast<LIST*>(node->val), coords, lengths, n+1, v, v_size, v_offset);
|
293
|
-
|
441
|
+
if (dest->dtype == nm::RUBYOBJ) {
|
442
|
+
temp_lists.push_front(reinterpret_cast<LIST*>(node->val));
|
443
|
+
nm_list_storage_register_list(reinterpret_cast<LIST*>(node->val), dest->dim - n - 2);
|
444
|
+
}
|
294
445
|
if (remove_parent) {
|
295
|
-
|
446
|
+
NM_FREE(remove_by_node(l, prev, node));
|
296
447
|
if (prev) node = prev->next ? prev->next : NULL;
|
297
448
|
else node = l->first ? l->first : NULL;
|
298
449
|
} else { // move forward
|
@@ -313,12 +464,13 @@ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengt
|
|
313
464
|
}
|
314
465
|
}
|
315
466
|
}
|
467
|
+
__nm_list_storage_unregister_temp_list_list(temp_lists, dest->dim - n - 2);
|
316
468
|
|
317
469
|
} else {
|
318
470
|
|
319
471
|
size_t i = 0;
|
320
472
|
size_t key = i + offsets[n] + coords[n];
|
321
|
-
|
473
|
+
std::list<VALUE*> temp_vals;
|
322
474
|
while (i < lengths[n]) {
|
323
475
|
// Make sure we have an element to work with
|
324
476
|
if (v_offset >= v_size) v_offset %= v_size;
|
@@ -327,7 +479,7 @@ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengt
|
|
327
479
|
if (node->key == key) {
|
328
480
|
if (v[v_offset] == *reinterpret_cast<D*>(dest->default_val)) { // remove zero value
|
329
481
|
|
330
|
-
|
482
|
+
NM_FREE(remove_by_node(l, (prev ? prev : l->first), node));
|
331
483
|
|
332
484
|
if (prev) node = prev->next ? prev->next : NULL;
|
333
485
|
else node = l->first ? l->first : NULL;
|
@@ -338,7 +490,12 @@ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengt
|
|
338
490
|
node = node->next ? node->next : NULL;
|
339
491
|
}
|
340
492
|
} else if (node->key > key) {
|
341
|
-
D* nv =
|
493
|
+
D* nv = NM_ALLOC(D); *nv = v[v_offset++];
|
494
|
+
if (dest->dtype == nm::RUBYOBJ) {
|
495
|
+
nm_register_value(*reinterpret_cast<VALUE*>(nv));
|
496
|
+
temp_vals.push_front(reinterpret_cast<VALUE*>(nv));
|
497
|
+
}
|
498
|
+
|
342
499
|
if (prev) node = insert_after(prev, key, nv);
|
343
500
|
else node = insert_first_node(l, key, nv, sizeof(D));
|
344
501
|
|
@@ -346,7 +503,11 @@ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengt
|
|
346
503
|
node = prev->next ? prev->next : NULL;
|
347
504
|
}
|
348
505
|
} else { // no node -- insert a new one
|
349
|
-
D* nv =
|
506
|
+
D* nv = NM_ALLOC(D); *nv = v[v_offset++];
|
507
|
+
if (dest->dtype == nm::RUBYOBJ) {
|
508
|
+
nm_register_value(*reinterpret_cast<VALUE*>(nv));
|
509
|
+
temp_vals.push_front(reinterpret_cast<VALUE*>(nv));
|
510
|
+
}
|
350
511
|
if (prev) node = insert_after(prev, key, nv);
|
351
512
|
else node = insert_first_node(l, key, nv, sizeof(D));
|
352
513
|
|
@@ -356,7 +517,14 @@ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengt
|
|
356
517
|
|
357
518
|
++i; ++key;
|
358
519
|
}
|
520
|
+
__nm_list_storage_unregister_temp_value_list(temp_vals);
|
521
|
+
}
|
522
|
+
|
523
|
+
if (dest->dtype == nm::RUBYOBJ) {
|
524
|
+
nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);
|
525
|
+
nm_list_storage_unregister_list(l, dest->dim - n - 1);
|
359
526
|
}
|
527
|
+
nm_list_storage_unregister(dest);
|
360
528
|
|
361
529
|
return (l->first) ? false : true;
|
362
530
|
}
|
@@ -364,8 +532,10 @@ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengt
|
|
364
532
|
|
365
533
|
template <typename D>
|
366
534
|
void set(VALUE left, SLICE* slice, VALUE right) {
|
535
|
+
NM_CONSERVATIVE(nm_register_value(left));
|
536
|
+
NM_CONSERVATIVE(nm_register_value(right));
|
367
537
|
LIST_STORAGE* s = NM_STORAGE_LIST(left);
|
368
|
-
|
538
|
+
|
369
539
|
std::pair<NMATRIX*,bool> nm_and_free =
|
370
540
|
interpret_arg_as_dense_nmatrix(right, NM_DTYPE(left));
|
371
541
|
|
@@ -379,17 +549,27 @@ void set(VALUE left, SLICE* slice, VALUE right) {
|
|
379
549
|
v_size = nm_storage_count_max_elements(t);
|
380
550
|
|
381
551
|
} else if (TYPE(right) == T_ARRAY) {
|
552
|
+
nm_register_nmatrix(nm_and_free.first);
|
382
553
|
v_size = RARRAY_LEN(right);
|
383
|
-
v =
|
554
|
+
v = NM_ALLOC_N(D, v_size);
|
555
|
+
if (NM_DTYPE(left) == nm::RUBYOBJ)
|
556
|
+
nm_register_values(reinterpret_cast<VALUE*>(v), v_size);
|
557
|
+
|
384
558
|
for (size_t m = 0; m < v_size; ++m) {
|
385
559
|
rubyval_to_cval(rb_ary_entry(right, m), s->dtype, &(v[m]));
|
386
560
|
}
|
561
|
+
if (NM_DTYPE(left) == nm::RUBYOBJ)
|
562
|
+
nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);
|
563
|
+
|
387
564
|
} else {
|
565
|
+
nm_register_nmatrix(nm_and_free.first);
|
388
566
|
v = reinterpret_cast<D*>(rubyobj_to_cval(right, NM_DTYPE(left)));
|
389
567
|
}
|
390
568
|
|
391
569
|
if (v_size == 1 && *v == *reinterpret_cast<D*>(s->default_val)) {
|
392
|
-
|
570
|
+
if (*reinterpret_cast<D*>(nm_list_storage_get(s, slice)) != *reinterpret_cast<D*>(s->default_val)) {
|
571
|
+
nm::list::remove_recursive(s->rows, slice->coords, s->offset, slice->lengths, 0, s->dim);
|
572
|
+
}
|
393
573
|
} else if (slice->single) {
|
394
574
|
slice_set_single(s, s->rows, reinterpret_cast<void*>(v), slice->coords, slice->lengths, 0);
|
395
575
|
} else {
|
@@ -403,7 +583,12 @@ void set(VALUE left, SLICE* slice, VALUE right) {
|
|
403
583
|
if (nm_and_free.second) {
|
404
584
|
nm_delete(nm_and_free.first);
|
405
585
|
}
|
406
|
-
} else
|
586
|
+
} else {
|
587
|
+
NM_FREE(v);
|
588
|
+
nm_unregister_nmatrix(nm_and_free.first);
|
589
|
+
}
|
590
|
+
NM_CONSERVATIVE(nm_unregister_value(left));
|
591
|
+
NM_CONSERVATIVE(nm_unregister_value(right));
|
407
592
|
}
|
408
593
|
|
409
594
|
/*
|
@@ -411,7 +596,7 @@ void set(VALUE left, SLICE* slice, VALUE right) {
|
|
411
596
|
*/
|
412
597
|
template <typename D>
|
413
598
|
void init_default(LIST_STORAGE* s) {
|
414
|
-
s->default_val =
|
599
|
+
s->default_val = NM_ALLOC(D);
|
415
600
|
*reinterpret_cast<D*>(s->default_val) = 0;
|
416
601
|
}
|
417
602
|
|
@@ -438,13 +623,13 @@ extern "C" {
|
|
438
623
|
* new storage. You don't need to free them, and you shouldn't re-use them.
|
439
624
|
*/
|
440
625
|
LIST_STORAGE* nm_list_storage_create(nm::dtype_t dtype, size_t* shape, size_t dim, void* init_val) {
|
441
|
-
LIST_STORAGE* s =
|
626
|
+
LIST_STORAGE* s = NM_ALLOC( LIST_STORAGE );
|
442
627
|
|
443
628
|
s->dim = dim;
|
444
629
|
s->shape = shape;
|
445
630
|
s->dtype = dtype;
|
446
631
|
|
447
|
-
s->offset =
|
632
|
+
s->offset = NM_ALLOC_N(size_t, s->dim);
|
448
633
|
memset(s->offset, 0, s->dim * sizeof(size_t));
|
449
634
|
|
450
635
|
s->rows = nm::list::create();
|
@@ -461,7 +646,7 @@ LIST_STORAGE* nm_list_storage_create(nm::dtype_t dtype, size_t* shape, size_t di
|
|
461
646
|
}
|
462
647
|
|
463
648
|
/*
|
464
|
-
*
|
649
|
+
* Destructor for list storage.
|
465
650
|
*/
|
466
651
|
void nm_list_storage_delete(STORAGE* s) {
|
467
652
|
if (s) {
|
@@ -469,30 +654,30 @@ void nm_list_storage_delete(STORAGE* s) {
|
|
469
654
|
if (storage->count-- == 1) {
|
470
655
|
nm::list::del( storage->rows, storage->dim - 1 );
|
471
656
|
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
657
|
+
NM_FREE(storage->shape);
|
658
|
+
NM_FREE(storage->offset);
|
659
|
+
NM_FREE(storage->default_val);
|
660
|
+
NM_FREE(s);
|
476
661
|
}
|
477
662
|
}
|
478
663
|
}
|
479
664
|
|
480
665
|
/*
|
481
|
-
*
|
666
|
+
* Destructor for a list storage reference slice.
|
482
667
|
*/
|
483
668
|
void nm_list_storage_delete_ref(STORAGE* s) {
|
484
669
|
if (s) {
|
485
670
|
LIST_STORAGE* storage = (LIST_STORAGE*)s;
|
486
671
|
|
487
672
|
nm_list_storage_delete( reinterpret_cast<STORAGE*>(storage->src ) );
|
488
|
-
|
489
|
-
|
490
|
-
|
673
|
+
NM_FREE(storage->shape);
|
674
|
+
NM_FREE(storage->offset);
|
675
|
+
NM_FREE(s);
|
491
676
|
}
|
492
677
|
}
|
493
678
|
|
494
679
|
/*
|
495
|
-
*
|
680
|
+
* GC mark function for list storage.
|
496
681
|
*/
|
497
682
|
void nm_list_storage_mark(STORAGE* storage_base) {
|
498
683
|
LIST_STORAGE* storage = (LIST_STORAGE*)storage_base;
|
@@ -503,6 +688,85 @@ void nm_list_storage_mark(STORAGE* storage_base) {
|
|
503
688
|
}
|
504
689
|
}
|
505
690
|
|
691
|
+
static void __nm_list_storage_unregister_temp_value_list(std::list<VALUE*>& temp_vals) {
|
692
|
+
for (std::list<VALUE*>::iterator it = temp_vals.begin(); it != temp_vals.end(); ++it) {
|
693
|
+
nm_unregister_value(**it);
|
694
|
+
}
|
695
|
+
}
|
696
|
+
|
697
|
+
static void __nm_list_storage_unregister_temp_list_list(std::list<LIST*>& temp_vals, size_t recursions) {
|
698
|
+
for (std::list<LIST*>::iterator it = temp_vals.begin(); it != temp_vals.end(); ++it) {
|
699
|
+
nm_list_storage_unregister_list(*it, recursions);
|
700
|
+
}
|
701
|
+
}
|
702
|
+
|
703
|
+
void nm_list_storage_register_node(const NODE* curr) {
|
704
|
+
nm_register_value(*reinterpret_cast<VALUE*>(curr->val));
|
705
|
+
}
|
706
|
+
|
707
|
+
void nm_list_storage_unregister_node(const NODE* curr) {
|
708
|
+
nm_unregister_value(*reinterpret_cast<VALUE*>(curr->val));
|
709
|
+
}
|
710
|
+
|
711
|
+
/**
|
712
|
+
* Gets rid of all instances of a given node in the registration list.
|
713
|
+
* Sometimes a node will get deleted and replaced deep in a recursion, but
|
714
|
+
* further up it will still get registered. This leads to a potential read
|
715
|
+
* after free during the GC marking. This function completely clears out a
|
716
|
+
* node so that this won't happen.
|
717
|
+
*/
|
718
|
+
void nm_list_storage_completely_unregister_node(const NODE* curr) {
|
719
|
+
nm_completely_unregister_value(*reinterpret_cast<VALUE*>(curr->val));
|
720
|
+
}
|
721
|
+
|
722
|
+
void nm_list_storage_register_list(const LIST* list, size_t recursions) {
|
723
|
+
NODE* next;
|
724
|
+
if (!list) return;
|
725
|
+
NODE* curr = list->first;
|
726
|
+
|
727
|
+
while (curr != NULL) {
|
728
|
+
next = curr->next;
|
729
|
+
if (recursions == 0) {
|
730
|
+
nm_list_storage_register_node(curr);
|
731
|
+
} else {
|
732
|
+
nm_list_storage_register_list(reinterpret_cast<LIST*>(curr->val), recursions - 1);
|
733
|
+
}
|
734
|
+
curr = next;
|
735
|
+
}
|
736
|
+
}
|
737
|
+
|
738
|
+
void nm_list_storage_unregister_list(const LIST* list, size_t recursions) {
|
739
|
+
NODE* next;
|
740
|
+
if (!list) return;
|
741
|
+
NODE* curr = list->first;
|
742
|
+
|
743
|
+
while (curr != NULL) {
|
744
|
+
next = curr->next;
|
745
|
+
if (recursions == 0) {
|
746
|
+
nm_list_storage_unregister_node(curr);
|
747
|
+
} else {
|
748
|
+
nm_list_storage_unregister_list(reinterpret_cast<LIST*>(curr->val), recursions - 1);
|
749
|
+
}
|
750
|
+
curr = next;
|
751
|
+
}
|
752
|
+
}
|
753
|
+
|
754
|
+
void nm_list_storage_register(const STORAGE* s) {
|
755
|
+
const LIST_STORAGE* storage = reinterpret_cast<const LIST_STORAGE*>(s);
|
756
|
+
if (storage && storage->dtype == nm::RUBYOBJ) {
|
757
|
+
nm_register_value(*reinterpret_cast<VALUE*>(storage->default_val));
|
758
|
+
nm_list_storage_register_list(storage->rows, storage->dim - 1);
|
759
|
+
}
|
760
|
+
}
|
761
|
+
|
762
|
+
void nm_list_storage_unregister(const STORAGE* s) {
|
763
|
+
const LIST_STORAGE* storage = reinterpret_cast<const LIST_STORAGE*>(s);
|
764
|
+
if (storage && storage->dtype == nm::RUBYOBJ) {
|
765
|
+
nm_unregister_value(*reinterpret_cast<VALUE*>(storage->default_val));
|
766
|
+
nm_list_storage_unregister_list(storage->rows, storage->dim - 1);
|
767
|
+
}
|
768
|
+
}
|
769
|
+
|
506
770
|
///////////////
|
507
771
|
// Accessors //
|
508
772
|
///////////////
|
@@ -510,8 +774,7 @@ void nm_list_storage_mark(STORAGE* storage_base) {
|
|
510
774
|
/*
|
511
775
|
* Documentation goes here.
|
512
776
|
*/
|
513
|
-
static NODE* list_storage_get_single_node(LIST_STORAGE* s, SLICE* slice)
|
514
|
-
{
|
777
|
+
static NODE* list_storage_get_single_node(LIST_STORAGE* s, SLICE* slice) {
|
515
778
|
size_t r;
|
516
779
|
LIST* l = s->rows;
|
517
780
|
NODE* n;
|
@@ -532,6 +795,7 @@ static NODE* list_storage_get_single_node(LIST_STORAGE* s, SLICE* slice)
|
|
532
795
|
*/
|
533
796
|
static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t rec, VALUE& stack) {
|
534
797
|
VALUE empty = s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s.init()) : s.init_obj();
|
798
|
+
NM_CONSERVATIVE(nm_register_value(stack));
|
535
799
|
|
536
800
|
if (rec) {
|
537
801
|
for (long index = 0; index < s.ref_shape(rec); ++index) {
|
@@ -549,12 +813,16 @@ static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t r
|
|
549
813
|
}
|
550
814
|
rb_ary_shift(stack);
|
551
815
|
}
|
816
|
+
NM_CONSERVATIVE(nm_unregister_value(stack));
|
552
817
|
}
|
553
818
|
|
554
819
|
/*
|
555
820
|
* Recursive helper function for each_with_indices, based on nm_list_storage_count_elements_r.
|
556
821
|
*/
|
557
822
|
static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {
|
823
|
+
if (s.dtype() == nm::RUBYOBJ)
|
824
|
+
nm_list_storage_register_list(l, rec);
|
825
|
+
NM_CONSERVATIVE(nm_register_value(stack));
|
558
826
|
NODE* curr = l->first;
|
559
827
|
|
560
828
|
size_t offset = s.offset(rec);
|
@@ -594,7 +862,9 @@ static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l,
|
|
594
862
|
rb_ary_pop(stack);
|
595
863
|
}
|
596
864
|
}
|
597
|
-
|
865
|
+
NM_CONSERVATIVE(nm_unregister_value(stack));
|
866
|
+
if (s.dtype() == nm::RUBYOBJ)
|
867
|
+
nm_list_storage_unregister_list(l, rec);
|
598
868
|
}
|
599
869
|
|
600
870
|
|
@@ -602,6 +872,10 @@ static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l,
|
|
602
872
|
* Recursive helper function for each_stored_with_indices, based on nm_list_storage_count_elements_r.
|
603
873
|
*/
|
604
874
|
static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {
|
875
|
+
if (s.dtype() == nm::RUBYOBJ)
|
876
|
+
nm_list_storage_register_list(l, rec);
|
877
|
+
NM_CONSERVATIVE(nm_register_value(stack));
|
878
|
+
|
605
879
|
NODE* curr = l->first;
|
606
880
|
|
607
881
|
size_t offset = s.offset(rec);
|
@@ -639,6 +913,9 @@ static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const L
|
|
639
913
|
if (curr && curr->key - offset >= shape) curr = NULL;
|
640
914
|
}
|
641
915
|
}
|
916
|
+
NM_CONSERVATIVE(nm_unregister_value(stack));
|
917
|
+
if (s.dtype() == nm::RUBYOBJ)
|
918
|
+
nm_list_storage_unregister_list(l, rec);
|
642
919
|
}
|
643
920
|
|
644
921
|
|
@@ -648,7 +925,11 @@ static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const L
|
|
648
925
|
*/
|
649
926
|
VALUE nm_list_each_with_indices(VALUE nmatrix, bool stored) {
|
650
927
|
|
928
|
+
NM_CONSERVATIVE(nm_register_value(nmatrix));
|
929
|
+
|
651
930
|
// If we don't have a block, return an enumerator.
|
931
|
+
RETURN_SIZED_ENUMERATOR_PRE
|
932
|
+
NM_CONSERVATIVE(nm_unregister_value(nmatrix));
|
652
933
|
RETURN_SIZED_ENUMERATOR(nmatrix, 0, 0, 0);
|
653
934
|
|
654
935
|
nm::list_storage::RecurseData sdata(NM_STORAGE_LIST(nmatrix));
|
@@ -658,14 +939,71 @@ VALUE nm_list_each_with_indices(VALUE nmatrix, bool stored) {
|
|
658
939
|
if (stored) each_stored_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);
|
659
940
|
else each_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);
|
660
941
|
|
942
|
+
NM_CONSERVATIVE(nm_unregister_value(nmatrix));
|
661
943
|
return nmatrix;
|
662
944
|
}
|
663
945
|
|
664
946
|
|
947
|
+
/*
|
948
|
+
* map merged stored iterator. Always returns a matrix containing RubyObjects which probably needs to be casted.
|
949
|
+
*/
|
950
|
+
VALUE nm_list_map_stored(VALUE left, VALUE init) {
|
951
|
+
NM_CONSERVATIVE(nm_register_value(left));
|
952
|
+
NM_CONSERVATIVE(nm_register_value(init));
|
953
|
+
|
954
|
+
bool scalar = false;
|
955
|
+
|
956
|
+
LIST_STORAGE *s = NM_STORAGE_LIST(left);
|
957
|
+
|
958
|
+
// For each matrix, if it's a reference, we want to deal directly with the original (with appropriate offsetting)
|
959
|
+
nm::list_storage::RecurseData sdata(s);
|
960
|
+
|
961
|
+
void* scalar_init = NULL;
|
962
|
+
|
963
|
+
//if (!rb_block_given_p()) {
|
964
|
+
// rb_raise(rb_eNotImpError, "RETURN_SIZED_ENUMERATOR probably won't work for a map_merged since no merged object is created");
|
965
|
+
//}
|
966
|
+
// If we don't have a block, return an enumerator.
|
967
|
+
RETURN_SIZED_ENUMERATOR_PRE
|
968
|
+
NM_CONSERVATIVE(nm_unregister_value(left));
|
969
|
+
NM_CONSERVATIVE(nm_unregister_value(init));
|
970
|
+
RETURN_SIZED_ENUMERATOR(left, 0, 0, 0); // FIXME: Test this. Probably won't work. Enable above code instead.
|
971
|
+
|
972
|
+
// Figure out default value if none provided by the user
|
973
|
+
if (init == Qnil) {
|
974
|
+
nm_unregister_value(init);
|
975
|
+
init = rb_yield_values(1, sdata.init_obj());
|
976
|
+
nm_register_value(init);
|
977
|
+
}
|
978
|
+
// Allocate a new shape array for the resulting matrix.
|
979
|
+
void* init_val = NM_ALLOC(VALUE);
|
980
|
+
memcpy(init_val, &init, sizeof(VALUE));
|
981
|
+
nm_register_value(*reinterpret_cast<VALUE*>(init_val));
|
982
|
+
|
983
|
+
NMATRIX* result = nm_create(nm::LIST_STORE, nm_list_storage_create(nm::RUBYOBJ, sdata.copy_alloc_shape(), s->dim, init_val));
|
984
|
+
LIST_STORAGE* r = reinterpret_cast<LIST_STORAGE*>(result->storage);
|
985
|
+
nm::list_storage::RecurseData rdata(r, init);
|
986
|
+
nm_register_nmatrix(result);
|
987
|
+
map_stored_r(rdata, sdata, rdata.top_level_list(), sdata.top_level_list(), sdata.dim() - 1);
|
988
|
+
|
989
|
+
VALUE to_return = Data_Wrap_Struct(CLASS_OF(left), nm_mark, nm_delete, result);
|
990
|
+
|
991
|
+
nm_unregister_nmatrix(result);
|
992
|
+
nm_unregister_value(*reinterpret_cast<VALUE*>(init_val));
|
993
|
+
NM_CONSERVATIVE(nm_unregister_value(init));
|
994
|
+
NM_CONSERVATIVE(nm_unregister_value(left));
|
995
|
+
|
996
|
+
return to_return;
|
997
|
+
}
|
998
|
+
|
999
|
+
|
665
1000
|
/*
|
666
1001
|
* map merged stored iterator. Always returns a matrix containing RubyObjects which probably needs to be casted.
|
667
1002
|
*/
|
668
1003
|
VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
|
1004
|
+
NM_CONSERVATIVE(nm_register_value(left));
|
1005
|
+
NM_CONSERVATIVE(nm_register_value(right));
|
1006
|
+
NM_CONSERVATIVE(nm_register_value(init));
|
669
1007
|
|
670
1008
|
bool scalar = false;
|
671
1009
|
|
@@ -679,8 +1017,7 @@ VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
|
|
679
1017
|
|
680
1018
|
// right might be a scalar, in which case this is a scalar operation.
|
681
1019
|
if (TYPE(right) != T_DATA || (RDATA(right)->dfree != (RUBY_DATA_FUNC)nm_delete && RDATA(right)->dfree != (RUBY_DATA_FUNC)nm_delete_ref)) {
|
682
|
-
nm::dtype_t r_dtype = nm_dtype_min(right);
|
683
|
-
|
1020
|
+
nm::dtype_t r_dtype = Upcast[NM_DTYPE(left)][nm_dtype_min(right)];
|
684
1021
|
scalar_init = rubyobj_to_cval(right, r_dtype); // make a copy of right
|
685
1022
|
|
686
1023
|
t = reinterpret_cast<LIST_STORAGE*>(nm_list_storage_create(r_dtype, sdata.copy_alloc_shape(), s->dim, scalar_init));
|
@@ -693,26 +1030,43 @@ VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
|
|
693
1030
|
// rb_raise(rb_eNotImpError, "RETURN_SIZED_ENUMERATOR probably won't work for a map_merged since no merged object is created");
|
694
1031
|
//}
|
695
1032
|
// If we don't have a block, return an enumerator.
|
1033
|
+
RETURN_SIZED_ENUMERATOR_PRE
|
1034
|
+
NM_CONSERVATIVE(nm_unregister_value(left));
|
1035
|
+
NM_CONSERVATIVE(nm_unregister_value(right));
|
1036
|
+
NM_CONSERVATIVE(nm_unregister_value(init));
|
696
1037
|
RETURN_SIZED_ENUMERATOR(left, 0, 0, 0); // FIXME: Test this. Probably won't work. Enable above code instead.
|
697
1038
|
|
698
1039
|
// Figure out default value if none provided by the user
|
699
|
-
nm::list_storage::RecurseData tdata(t);
|
700
|
-
if (init == Qnil)
|
1040
|
+
nm::list_storage::RecurseData& tdata = *(new nm::list_storage::RecurseData(t)); //FIXME: this is a hack to make sure that we can run the destructor before nm_list_storage_delete(t) below.
|
1041
|
+
if (init == Qnil) {
|
1042
|
+
nm_unregister_value(init);
|
1043
|
+
init = rb_yield_values(2, sdata.init_obj(), tdata.init_obj());
|
1044
|
+
nm_register_value(init);
|
1045
|
+
}
|
701
1046
|
|
702
|
-
|
703
|
-
void* init_val =
|
1047
|
+
// Allocate a new shape array for the resulting matrix.
|
1048
|
+
void* init_val = NM_ALLOC(VALUE);
|
704
1049
|
memcpy(init_val, &init, sizeof(VALUE));
|
1050
|
+
nm_register_value(*reinterpret_cast<VALUE*>(init_val));
|
705
1051
|
|
706
1052
|
NMATRIX* result = nm_create(nm::LIST_STORE, nm_list_storage_create(nm::RUBYOBJ, sdata.copy_alloc_shape(), s->dim, init_val));
|
707
1053
|
LIST_STORAGE* r = reinterpret_cast<LIST_STORAGE*>(result->storage);
|
708
1054
|
nm::list_storage::RecurseData rdata(r, init);
|
709
|
-
|
710
1055
|
map_merged_stored_r(rdata, sdata, tdata, rdata.top_level_list(), sdata.top_level_list(), tdata.top_level_list(), sdata.dim() - 1);
|
711
1056
|
|
1057
|
+
delete &tdata;
|
712
1058
|
// If we are working with a scalar operation
|
713
1059
|
if (scalar) nm_list_storage_delete(t);
|
714
1060
|
|
715
|
-
|
1061
|
+
VALUE to_return = Data_Wrap_Struct(CLASS_OF(left), nm_mark, nm_delete, result);
|
1062
|
+
|
1063
|
+
nm_unregister_value(*reinterpret_cast<VALUE*>(init_val));
|
1064
|
+
|
1065
|
+
NM_CONSERVATIVE(nm_unregister_value(init));
|
1066
|
+
NM_CONSERVATIVE(nm_unregister_value(right));
|
1067
|
+
NM_CONSERVATIVE(nm_unregister_value(left));
|
1068
|
+
|
1069
|
+
return to_return;
|
716
1070
|
}
|
717
1071
|
|
718
1072
|
|
@@ -720,13 +1074,14 @@ VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
|
|
720
1074
|
* Copy a slice of a list matrix into a regular list matrix.
|
721
1075
|
*/
|
722
1076
|
static LIST* slice_copy(const LIST_STORAGE* src, LIST* src_rows, size_t* coords, size_t* lengths, size_t n) {
|
723
|
-
|
1077
|
+
nm_list_storage_register(src);
|
724
1078
|
void *val = NULL;
|
725
1079
|
int key;
|
726
1080
|
|
727
1081
|
LIST* dst_rows = nm::list::create();
|
728
1082
|
NODE* src_node = src_rows->first;
|
729
|
-
|
1083
|
+
std::list<VALUE*> temp_vals;
|
1084
|
+
std::list<LIST*> temp_lists;
|
730
1085
|
while (src_node) {
|
731
1086
|
key = src_node->key - (src->offset[n] + coords[n]);
|
732
1087
|
|
@@ -737,16 +1092,28 @@ static LIST* slice_copy(const LIST_STORAGE* src, LIST* src_rows, size_t* coords,
|
|
737
1092
|
coords,
|
738
1093
|
lengths,
|
739
1094
|
n + 1 );
|
740
|
-
|
741
|
-
|
1095
|
+
if (val) {
|
1096
|
+
if (src->dtype == nm::RUBYOBJ) {
|
1097
|
+
nm_list_storage_register_list(reinterpret_cast<LIST*>(val), src->dim - n - 2);
|
1098
|
+
temp_lists.push_front(reinterpret_cast<LIST*>(val));
|
1099
|
+
}
|
1100
|
+
nm::list::insert_copy(dst_rows, false, key, val, sizeof(LIST));
|
1101
|
+
}
|
1102
|
+
} else { // matches src->dim - n > 1
|
1103
|
+
if (src->dtype == nm::RUBYOBJ) {
|
1104
|
+
nm_register_value(*reinterpret_cast<VALUE*>(src_node->val));
|
1105
|
+
temp_vals.push_front(reinterpret_cast<VALUE*>(src_node->val));
|
1106
|
+
}
|
1107
|
+
nm::list::insert_copy(dst_rows, false, key, src_node->val, DTYPE_SIZES[src->dtype]);
|
742
1108
|
}
|
743
|
-
|
744
|
-
else nm::list::insert_copy(dst_rows, false, key, src_node->val, DTYPE_SIZES[src->dtype]);
|
745
1109
|
}
|
746
|
-
|
747
1110
|
src_node = src_node->next;
|
1111
|
+
}
|
1112
|
+
if (src->dtype == nm::RUBYOBJ) {
|
1113
|
+
__nm_list_storage_unregister_temp_list_list(temp_lists, src->dim - n - 2);
|
1114
|
+
__nm_list_storage_unregister_temp_value_list(temp_vals);
|
748
1115
|
}
|
749
|
-
|
1116
|
+
nm_list_storage_unregister(src);
|
750
1117
|
return dst_rows;
|
751
1118
|
}
|
752
1119
|
|
@@ -756,21 +1123,31 @@ static LIST* slice_copy(const LIST_STORAGE* src, LIST* src_rows, size_t* coords,
|
|
756
1123
|
void* nm_list_storage_get(const STORAGE* storage, SLICE* slice) {
|
757
1124
|
LIST_STORAGE* s = (LIST_STORAGE*)storage;
|
758
1125
|
LIST_STORAGE* ns = NULL;
|
759
|
-
|
1126
|
+
|
1127
|
+
nm_list_storage_register(s);
|
760
1128
|
|
761
1129
|
if (slice->single) {
|
762
|
-
n = list_storage_get_single_node(s, slice);
|
1130
|
+
NODE* n = list_storage_get_single_node(s, slice);
|
1131
|
+
nm_list_storage_unregister(s);
|
763
1132
|
return (n ? n->val : s->default_val);
|
1133
|
+
|
764
1134
|
} else {
|
765
|
-
void *init_val =
|
1135
|
+
void *init_val = NM_ALLOC_N(char, DTYPE_SIZES[s->dtype]);
|
766
1136
|
memcpy(init_val, s->default_val, DTYPE_SIZES[s->dtype]);
|
1137
|
+
if (s->dtype == nm::RUBYOBJ)
|
1138
|
+
nm_register_value(*reinterpret_cast<VALUE*>(init_val));
|
767
1139
|
|
768
|
-
size_t *shape =
|
1140
|
+
size_t *shape = NM_ALLOC_N(size_t, s->dim);
|
769
1141
|
memcpy(shape, slice->lengths, sizeof(size_t) * s->dim);
|
770
1142
|
|
771
1143
|
ns = nm_list_storage_create(s->dtype, shape, s->dim, init_val);
|
772
|
-
|
1144
|
+
|
773
1145
|
ns->rows = slice_copy(s, s->rows, slice->coords, slice->lengths, 0);
|
1146
|
+
|
1147
|
+
if (s->dtype == nm::RUBYOBJ)
|
1148
|
+
nm_unregister_value(*reinterpret_cast<VALUE*>(init_val));
|
1149
|
+
nm_list_storage_unregister(s);
|
1150
|
+
|
774
1151
|
return ns;
|
775
1152
|
}
|
776
1153
|
}
|
@@ -782,20 +1159,21 @@ void* nm_list_storage_get(const STORAGE* storage, SLICE* slice) {
|
|
782
1159
|
void* nm_list_storage_ref(const STORAGE* storage, SLICE* slice) {
|
783
1160
|
LIST_STORAGE* s = (LIST_STORAGE*)storage;
|
784
1161
|
LIST_STORAGE* ns = NULL;
|
785
|
-
|
1162
|
+
nm_list_storage_register(s);
|
786
1163
|
|
787
1164
|
//TODO: It needs a refactoring.
|
788
1165
|
if (slice->single) {
|
789
|
-
n = list_storage_get_single_node(s, slice);
|
1166
|
+
NODE* n = list_storage_get_single_node(s, slice);
|
1167
|
+
nm_list_storage_unregister(s);
|
790
1168
|
return (n ? n->val : s->default_val);
|
791
1169
|
}
|
792
1170
|
else {
|
793
|
-
ns =
|
1171
|
+
ns = NM_ALLOC( LIST_STORAGE );
|
794
1172
|
|
795
1173
|
ns->dim = s->dim;
|
796
1174
|
ns->dtype = s->dtype;
|
797
|
-
ns->offset =
|
798
|
-
ns->shape =
|
1175
|
+
ns->offset = NM_ALLOC_N(size_t, ns->dim);
|
1176
|
+
ns->shape = NM_ALLOC_N(size_t, ns->dim);
|
799
1177
|
|
800
1178
|
for (size_t i = 0; i < ns->dim; ++i) {
|
801
1179
|
ns->offset[i] = slice->coords[i] + s->offset[i];
|
@@ -807,7 +1185,7 @@ void* nm_list_storage_ref(const STORAGE* storage, SLICE* slice) {
|
|
807
1185
|
|
808
1186
|
s->src->count++;
|
809
1187
|
ns->src = s->src;
|
810
|
-
|
1188
|
+
nm_list_storage_unregister(s);
|
811
1189
|
return ns;
|
812
1190
|
}
|
813
1191
|
}
|
@@ -817,10 +1195,16 @@ void* nm_list_storage_ref(const STORAGE* storage, SLICE* slice) {
|
|
817
1195
|
* Recursive function, sets multiple values in a matrix from a single source value.
|
818
1196
|
*/
|
819
1197
|
static void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coords, size_t* lengths, size_t n) {
|
1198
|
+
nm_list_storage_register(dest);
|
1199
|
+
if (dest->dtype == nm::RUBYOBJ) {
|
1200
|
+
nm_register_value(*reinterpret_cast<VALUE*>(val));
|
1201
|
+
nm_list_storage_register_list(l, dest->dim - n - 1);
|
1202
|
+
}
|
820
1203
|
|
821
1204
|
// drill down into the structure
|
822
1205
|
NODE* node = NULL;
|
823
1206
|
if (dest->dim - n > 1) {
|
1207
|
+
std::list<LIST*> temp_nodes;
|
824
1208
|
for (size_t i = 0; i < lengths[n]; ++i) {
|
825
1209
|
|
826
1210
|
size_t key = i + dest->offset[n] + coords[n];
|
@@ -833,10 +1217,17 @@ static void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coo
|
|
833
1217
|
node = node->next; // correct rank already exists.
|
834
1218
|
}
|
835
1219
|
|
1220
|
+
if (dest->dtype == nm::RUBYOBJ) {
|
1221
|
+
temp_nodes.push_front(reinterpret_cast<LIST*>(node->val));
|
1222
|
+
nm_list_storage_register_list(reinterpret_cast<LIST*>(node->val), dest->dim - n - 2);
|
1223
|
+
}
|
1224
|
+
|
836
1225
|
// cast it to a list and recurse
|
837
1226
|
slice_set_single(dest, reinterpret_cast<LIST*>(node->val), val, coords, lengths, n + 1);
|
838
1227
|
}
|
1228
|
+
__nm_list_storage_unregister_temp_list_list(temp_nodes, dest->dim - n - 2);
|
839
1229
|
} else {
|
1230
|
+
std::list<VALUE*> temp_vals;
|
840
1231
|
for (size_t i = 0; i < lengths[n]; ++i) {
|
841
1232
|
|
842
1233
|
size_t key = i + dest->offset[n] + coords[n];
|
@@ -846,7 +1237,18 @@ static void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coo
|
|
846
1237
|
} else {
|
847
1238
|
node = nm::list::replace_insert_after(node, key, val, true, DTYPE_SIZES[dest->dtype]);
|
848
1239
|
}
|
1240
|
+
if (dest->dtype == nm::RUBYOBJ) {
|
1241
|
+
temp_vals.push_front(reinterpret_cast<VALUE*>(node->val));
|
1242
|
+
nm_register_value(*reinterpret_cast<VALUE*>(node->val));
|
1243
|
+
}
|
849
1244
|
}
|
1245
|
+
__nm_list_storage_unregister_temp_value_list(temp_vals);
|
1246
|
+
}
|
1247
|
+
|
1248
|
+
nm_list_storage_unregister(dest);
|
1249
|
+
if (dest->dtype == nm::RUBYOBJ) {
|
1250
|
+
nm_unregister_value(*reinterpret_cast<VALUE*>(val));
|
1251
|
+
nm_list_storage_unregister_list(l, dest->dim - n - 1);
|
850
1252
|
}
|
851
1253
|
}
|
852
1254
|
|
@@ -870,6 +1272,9 @@ void nm_list_storage_set(VALUE left, SLICE* slice, VALUE right) {
|
|
870
1272
|
*/
|
871
1273
|
NODE* nm_list_storage_insert(STORAGE* storage, SLICE* slice, void* val) {
|
872
1274
|
LIST_STORAGE* s = (LIST_STORAGE*)storage;
|
1275
|
+
nm_list_storage_register(s);
|
1276
|
+
if (s->dtype == nm::RUBYOBJ)
|
1277
|
+
nm_register_value(*reinterpret_cast<VALUE*>(val));
|
873
1278
|
// Pretend dims = 2
|
874
1279
|
// Then coords is going to be size 2
|
875
1280
|
// So we need to find out if some key already exists
|
@@ -878,12 +1283,16 @@ NODE* nm_list_storage_insert(STORAGE* storage, SLICE* slice, void* val) {
|
|
878
1283
|
LIST* l = s->rows;
|
879
1284
|
|
880
1285
|
// drill down into the structure
|
881
|
-
for (r =
|
882
|
-
n = nm::list::insert(l, false, s->offset[
|
1286
|
+
for (r = 0; r < s->dim -1; ++r) {
|
1287
|
+
n = nm::list::insert(l, false, s->offset[r] + slice->coords[s->dim - r], nm::list::create());
|
883
1288
|
l = reinterpret_cast<LIST*>(n->val);
|
884
1289
|
}
|
885
1290
|
|
886
|
-
|
1291
|
+
nm_list_storage_unregister(s);
|
1292
|
+
if (s->dtype == nm::RUBYOBJ)
|
1293
|
+
nm_unregister_value(*reinterpret_cast<VALUE*>(val));
|
1294
|
+
|
1295
|
+
return nm::list::insert(l, true, s->offset[r] + slice->coords[r], val);
|
887
1296
|
}
|
888
1297
|
|
889
1298
|
/*
|
@@ -937,10 +1346,10 @@ STORAGE* nm_list_storage_matrix_multiply(const STORAGE_PAIR& casted_storage, siz
|
|
937
1346
|
* it's a sparse matrix.
|
938
1347
|
*/
|
939
1348
|
VALUE nm_list_storage_to_hash(const LIST_STORAGE* s, const nm::dtype_t dtype) {
|
940
|
-
|
1349
|
+
nm_list_storage_register(s);
|
941
1350
|
// Get the default value for the list storage.
|
942
1351
|
VALUE default_value = rubyobj_from_cval(s->default_val, dtype).rval;
|
943
|
-
|
1352
|
+
nm_list_storage_unregister(s);
|
944
1353
|
// Recursively copy each dimension of the matrix into a nested hash.
|
945
1354
|
return nm_list_copy_to_hash(s->rows, dtype, s->dim - 1, default_value);
|
946
1355
|
}
|
@@ -1006,18 +1415,21 @@ size_t nm_list_storage_count_nd_elements(const LIST_STORAGE* s) {
|
|
1006
1415
|
* List storage copy constructor C access.
|
1007
1416
|
*/
|
1008
1417
|
|
1009
|
-
LIST_STORAGE* nm_list_storage_copy(const LIST_STORAGE* rhs)
|
1010
|
-
|
1011
|
-
size_t *shape =
|
1418
|
+
LIST_STORAGE* nm_list_storage_copy(const LIST_STORAGE* rhs) {
|
1419
|
+
nm_list_storage_register(rhs);
|
1420
|
+
size_t *shape = NM_ALLOC_N(size_t, rhs->dim);
|
1012
1421
|
memcpy(shape, rhs->shape, sizeof(size_t) * rhs->dim);
|
1013
1422
|
|
1014
|
-
void *init_val =
|
1423
|
+
void *init_val = NM_ALLOC_N(char, DTYPE_SIZES[rhs->dtype]);
|
1015
1424
|
memcpy(init_val, rhs->default_val, DTYPE_SIZES[rhs->dtype]);
|
1016
1425
|
|
1017
1426
|
LIST_STORAGE* lhs = nm_list_storage_create(rhs->dtype, shape, rhs->dim, init_val);
|
1018
|
-
|
1427
|
+
nm_list_storage_register(lhs);
|
1428
|
+
|
1019
1429
|
lhs->rows = slice_copy(rhs, rhs->rows, lhs->offset, lhs->shape, 0);
|
1020
1430
|
|
1431
|
+
nm_list_storage_unregister(rhs);
|
1432
|
+
nm_list_storage_unregister(lhs);
|
1021
1433
|
return lhs;
|
1022
1434
|
}
|
1023
1435
|
|
@@ -1057,27 +1469,31 @@ namespace list_storage {
|
|
1057
1469
|
*/
|
1058
1470
|
template <typename LDType, typename RDType>
|
1059
1471
|
static LIST_STORAGE* cast_copy(const LIST_STORAGE* rhs, dtype_t new_dtype) {
|
1060
|
-
|
1472
|
+
nm_list_storage_register(rhs);
|
1061
1473
|
// allocate and copy shape
|
1062
|
-
size_t* shape =
|
1474
|
+
size_t* shape = NM_ALLOC_N(size_t, rhs->dim);
|
1063
1475
|
memcpy(shape, rhs->shape, rhs->dim * sizeof(size_t));
|
1064
1476
|
|
1065
1477
|
// copy default value
|
1066
|
-
LDType* default_val =
|
1478
|
+
LDType* default_val = NM_ALLOC_N(LDType, 1);
|
1067
1479
|
*default_val = *reinterpret_cast<RDType*>(rhs->default_val);
|
1068
1480
|
|
1069
1481
|
LIST_STORAGE* lhs = nm_list_storage_create(new_dtype, shape, rhs->dim, default_val);
|
1070
1482
|
//lhs->rows = nm::list::create();
|
1071
1483
|
|
1484
|
+
nm_list_storage_register(lhs);
|
1072
1485
|
// TODO: Needs optimization. When matrix is reference it is copped twice.
|
1073
1486
|
if (rhs->src == rhs)
|
1074
1487
|
nm::list::cast_copy_contents<LDType, RDType>(lhs->rows, rhs->rows, rhs->dim - 1);
|
1075
1488
|
else {
|
1076
1489
|
LIST_STORAGE *tmp = nm_list_storage_copy(rhs);
|
1490
|
+
nm_list_storage_register(tmp);
|
1077
1491
|
nm::list::cast_copy_contents<LDType, RDType>(lhs->rows, tmp->rows, rhs->dim - 1);
|
1492
|
+
nm_list_storage_unregister(tmp);
|
1078
1493
|
nm_list_storage_delete(tmp);
|
1079
1494
|
}
|
1080
|
-
|
1495
|
+
nm_list_storage_unregister(lhs);
|
1496
|
+
nm_list_storage_unregister(rhs);
|
1081
1497
|
return lhs;
|
1082
1498
|
}
|
1083
1499
|
|
@@ -1196,13 +1612,16 @@ extern "C" {
|
|
1196
1612
|
return nm_list_storage_to_hash(NM_STORAGE_LIST(self), NM_DTYPE(self));
|
1197
1613
|
}
|
1198
1614
|
|
1199
|
-
|
1200
|
-
|
1201
|
-
|
1202
|
-
|
1203
|
-
|
1204
|
-
|
1205
|
-
|
1206
|
-
|
1207
|
-
|
1615
|
+
/*
|
1616
|
+
* call-seq:
|
1617
|
+
* __list_default_value__ -> ...
|
1618
|
+
*
|
1619
|
+
* Get the default_value property from a list matrix.
|
1620
|
+
*/
|
1621
|
+
VALUE nm_list_default_value(VALUE self) {
|
1622
|
+
NM_CONSERVATIVE(nm_register_value(self));
|
1623
|
+
VALUE to_return = (NM_DTYPE(self) == nm::RUBYOBJ) ? *reinterpret_cast<VALUE*>(NM_DEFAULT_VAL(self)) : rubyobj_from_cval(NM_DEFAULT_VAL(self), NM_DTYPE(self)).rval;
|
1624
|
+
NM_CONSERVATIVE(nm_unregister_value(self));
|
1625
|
+
return to_return;
|
1626
|
+
}
|
1208
1627
|
} // end of extern "C" block
|