nmatrix 0.0.5 → 0.0.6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/History.txt +102 -10
- data/README.rdoc +24 -32
- data/Rakefile +1 -1
- data/ext/nmatrix/data/complex.h +9 -0
- data/ext/nmatrix/data/data.cpp +78 -4
- data/ext/nmatrix/data/data.h +86 -54
- data/ext/nmatrix/data/rational.h +2 -0
- data/ext/nmatrix/data/ruby_object.h +38 -8
- data/ext/nmatrix/extconf.rb +13 -7
- data/ext/nmatrix/nmatrix.cpp +262 -139
- data/ext/nmatrix/nmatrix.h +11 -4
- data/ext/nmatrix/storage/common.cpp +20 -13
- data/ext/nmatrix/storage/common.h +18 -12
- data/ext/nmatrix/storage/dense.cpp +122 -192
- data/ext/nmatrix/storage/dense.h +4 -2
- data/ext/nmatrix/storage/list.cpp +467 -636
- data/ext/nmatrix/storage/list.h +6 -3
- data/ext/nmatrix/storage/storage.cpp +83 -46
- data/ext/nmatrix/storage/storage.h +7 -7
- data/ext/nmatrix/storage/yale.cpp +621 -361
- data/ext/nmatrix/storage/yale.h +21 -9
- data/ext/nmatrix/ttable_helper.rb +27 -31
- data/ext/nmatrix/types.h +1 -1
- data/ext/nmatrix/util/math.cpp +9 -10
- data/ext/nmatrix/util/sl_list.cpp +1 -7
- data/ext/nmatrix/util/sl_list.h +0 -118
- data/lib/nmatrix/blas.rb +59 -18
- data/lib/nmatrix/monkeys.rb +0 -52
- data/lib/nmatrix/nmatrix.rb +136 -9
- data/lib/nmatrix/nvector.rb +33 -0
- data/lib/nmatrix/shortcuts.rb +95 -16
- data/lib/nmatrix/version.rb +1 -1
- data/lib/nmatrix/yale_functions.rb +25 -19
- data/spec/blas_spec.rb +1 -19
- data/spec/elementwise_spec.rb +132 -17
- data/spec/lapack_spec.rb +0 -3
- data/spec/nmatrix_list_spec.rb +18 -0
- data/spec/nmatrix_spec.rb +44 -18
- data/spec/nmatrix_yale_spec.rb +1 -3
- data/spec/shortcuts_spec.rb +26 -36
- data/spec/slice_spec.rb +2 -4
- metadata +2 -2
data/ext/nmatrix/storage/dense.h
CHANGED
@@ -78,6 +78,9 @@ void nm_dense_storage_mark(void*);
|
|
78
78
|
// Accessors //
|
79
79
|
///////////////
|
80
80
|
|
81
|
+
|
82
|
+
VALUE nm_dense_map_pair(VALUE self, VALUE right);
|
83
|
+
VALUE nm_dense_map(VALUE self);
|
81
84
|
VALUE nm_dense_each(VALUE nmatrix);
|
82
85
|
VALUE nm_dense_each_with_indices(VALUE nmatrix);
|
83
86
|
void* nm_dense_storage_get(STORAGE* s, SLICE* slice);
|
@@ -96,7 +99,6 @@ bool nm_dense_storage_is_hermitian(const DENSE_STORAGE* mat, int lda);
|
|
96
99
|
// Math //
|
97
100
|
//////////
|
98
101
|
|
99
|
-
STORAGE* nm_dense_storage_ew_op(nm::ewop_t op, const STORAGE* left, const STORAGE* right, VALUE scalar);
|
100
102
|
STORAGE* nm_dense_storage_matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector);
|
101
103
|
|
102
104
|
/////////////
|
@@ -112,7 +114,7 @@ void nm_dense_storage_coords(const DENSE_STORAGE* s, const size_t slice_pos, siz
|
|
112
114
|
|
113
115
|
DENSE_STORAGE* nm_dense_storage_copy(const DENSE_STORAGE* rhs);
|
114
116
|
STORAGE* nm_dense_storage_copy_transposed(const STORAGE* rhs_base);
|
115
|
-
STORAGE* nm_dense_storage_cast_copy(const STORAGE* rhs, nm::dtype_t new_dtype);
|
117
|
+
STORAGE* nm_dense_storage_cast_copy(const STORAGE* rhs, nm::dtype_t new_dtype, void*);
|
116
118
|
|
117
119
|
} // end of extern "C" block
|
118
120
|
|
@@ -62,20 +62,189 @@ namespace nm { namespace list_storage {
|
|
62
62
|
* Forward Declarations
|
63
63
|
*/
|
64
64
|
|
65
|
+
class RecurseData {
|
66
|
+
public:
|
67
|
+
// Note that providing init_obj argument does not override init.
|
68
|
+
RecurseData(const LIST_STORAGE* s, VALUE init_obj__ = Qnil) : ref(s), actual(s), shape_(s->shape), offsets(s->dim, 0), init_(s->default_val), init_obj_(init_obj__) {
|
69
|
+
while (actual->src != actual) {
|
70
|
+
for (size_t i = 0; i < s->dim; ++i) // update offsets as we recurse
|
71
|
+
offsets[i] += actual->offset[i];
|
72
|
+
actual = reinterpret_cast<LIST_STORAGE*>(actual->src);
|
73
|
+
}
|
74
|
+
actual_shape_ = actual->shape;
|
75
|
+
|
76
|
+
if (init_obj_ == Qnil) {
|
77
|
+
init_obj_ = s->dtype == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s->default_val) : rubyobj_from_cval(s->default_val, s->dtype).rval;
|
78
|
+
}
|
79
|
+
}
|
80
|
+
|
81
|
+
dtype_t dtype() const { return ref->dtype; }
|
82
|
+
|
83
|
+
|
84
|
+
size_t dim() const { return ref->dim; }
|
85
|
+
|
86
|
+
size_t shape(size_t rec) const {
|
87
|
+
return shape_[ref->dim - rec - 1];
|
88
|
+
}
|
89
|
+
|
90
|
+
size_t* copy_alloc_shape() const {
|
91
|
+
size_t* new_shape = ALLOC_N(size_t, ref->dim);
|
92
|
+
memcpy(new_shape, shape_, sizeof(size_t)*ref->dim);
|
93
|
+
return new_shape;
|
94
|
+
}
|
95
|
+
|
96
|
+
size_t actual_shape(size_t rec) const {
|
97
|
+
return actual_shape_[ref->dim - rec - 1];
|
98
|
+
}
|
99
|
+
|
100
|
+
size_t offset(size_t rec) const {
|
101
|
+
return offsets[ref->dim - rec - 1];
|
102
|
+
}
|
103
|
+
|
104
|
+
void* init() const {
|
105
|
+
return init_;
|
106
|
+
}
|
107
|
+
|
108
|
+
VALUE init_obj() const { return init_obj_; }
|
109
|
+
|
110
|
+
LIST* top_level_list() const {
|
111
|
+
return reinterpret_cast<LIST*>(actual->rows);
|
112
|
+
}
|
113
|
+
|
114
|
+
const LIST_STORAGE* ref;
|
115
|
+
const LIST_STORAGE* actual;
|
116
|
+
|
117
|
+
size_t* shape_; // of ref
|
118
|
+
size_t* actual_shape_;
|
119
|
+
protected:
|
120
|
+
std::vector<size_t> offsets; // relative to actual
|
121
|
+
void* init_;
|
122
|
+
VALUE init_obj_;
|
123
|
+
|
124
|
+
};
|
125
|
+
|
126
|
+
|
65
127
|
template <typename LDType, typename RDType>
|
66
128
|
static LIST_STORAGE* cast_copy(const LIST_STORAGE* rhs, dtype_t new_dtype);
|
67
129
|
|
68
130
|
template <typename LDType, typename RDType>
|
69
|
-
static bool
|
131
|
+
static bool eqeq_r(RecurseData& left, RecurseData& right, const LIST* l, const LIST* r, size_t rec);
|
132
|
+
|
133
|
+
template <typename SDType, typename TDType>
|
134
|
+
static bool eqeq_empty_r(RecurseData& s, const LIST* l, size_t rec, const TDType* t_init);
|
135
|
+
|
136
|
+
|
137
|
+
/*
|
138
|
+
* Recursive helper for map_merged_stored_r which handles the case where one list is empty and the other is not.
|
139
|
+
*/
|
140
|
+
static void map_empty_stored_r(RecurseData& result, RecurseData& s, LIST* x, const LIST* l, size_t rec, bool rev, const VALUE& t_init) {
|
141
|
+
NODE *curr = l->first,
|
142
|
+
*xcurr = NULL;
|
70
143
|
|
71
|
-
|
72
|
-
|
144
|
+
// For reference matrices, make sure we start in the correct place.
|
145
|
+
size_t offset = result.offset(rec);
|
146
|
+
size_t x_shape = result.shape(rec);
|
73
147
|
|
74
|
-
|
75
|
-
|
148
|
+
while (curr && curr->key < offset) { curr = curr->next; }
|
149
|
+
if (curr && curr->key - offset >= x_shape) curr = NULL;
|
150
|
+
|
151
|
+
if (rec) {
|
152
|
+
while (curr) {
|
153
|
+
LIST* val = nm::list::create();
|
154
|
+
map_empty_stored_r(result, s, val, reinterpret_cast<const LIST*>(curr->val), rec-1, rev, t_init);
|
155
|
+
|
156
|
+
if (!val->first) nm::list::del(val, 0);
|
157
|
+
else nm::list::insert_helper(x, xcurr, curr->key - offset, val);
|
158
|
+
|
159
|
+
curr = curr->next;
|
160
|
+
if (curr && curr->key - offset >= x_shape) curr = NULL;
|
161
|
+
}
|
162
|
+
} else {
|
163
|
+
while (curr) {
|
164
|
+
VALUE val, s_val = rubyobj_from_cval(curr->val, s.dtype()).rval;
|
165
|
+
if (rev) val = rb_yield_values(2, t_init, s_val);
|
166
|
+
else val = rb_yield_values(2, s_val, t_init);
|
167
|
+
|
168
|
+
if (rb_funcall(val, rb_intern("!="), 1, result.init_obj()) == Qtrue)
|
169
|
+
xcurr = nm::list::insert_helper(x, xcurr, curr->key - offset, val);
|
170
|
+
|
171
|
+
curr = curr->next;
|
172
|
+
if (curr && curr->key - offset >= x_shape) curr = NULL;
|
173
|
+
}
|
174
|
+
}
|
175
|
+
|
176
|
+
}
|
177
|
+
|
178
|
+
|
179
|
+
/*
|
180
|
+
* Recursive helper function for nm_list_map_merged_stored
|
181
|
+
*/
|
182
|
+
static void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseData& right, LIST* x, const LIST* l, const LIST* r, size_t rec) {
|
183
|
+
NODE *lcurr = l->first,
|
184
|
+
*rcurr = r->first,
|
185
|
+
*xcurr = x->first;
|
186
|
+
|
187
|
+
// For reference matrices, make sure we start in the correct place.
|
188
|
+
while (lcurr && lcurr->key < left.offset(rec)) { lcurr = lcurr->next; }
|
189
|
+
while (rcurr && rcurr->key < right.offset(rec)) { rcurr = rcurr->next; }
|
190
|
+
|
191
|
+
if (rcurr && rcurr->key - right.offset(rec) >= result.shape(rec)) rcurr = NULL;
|
192
|
+
if (lcurr && lcurr->key - left.offset(rec) >= result.shape(rec)) lcurr = NULL;
|
193
|
+
|
194
|
+
if (rec) {
|
195
|
+
while (lcurr || rcurr) {
|
196
|
+
size_t key;
|
197
|
+
LIST* val = nm::list::create();
|
198
|
+
|
199
|
+
if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {
|
200
|
+
map_empty_stored_r(result, left, val, reinterpret_cast<const LIST*>(lcurr->val), rec-1, false, right.init_obj());
|
201
|
+
key = lcurr->key - left.offset(rec);
|
202
|
+
lcurr = lcurr->next;
|
203
|
+
} else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {
|
204
|
+
map_empty_stored_r(result, right, val, reinterpret_cast<const LIST*>(rcurr->val), rec-1, true, left.init_obj());
|
205
|
+
key = rcurr->key - right.offset(rec);
|
206
|
+
rcurr = rcurr->next;
|
207
|
+
} else { // == and both present
|
208
|
+
map_merged_stored_r(result, left, right, val, reinterpret_cast<const LIST*>(lcurr->val), reinterpret_cast<const LIST*>(rcurr->val), rec-1);
|
209
|
+
key = lcurr->key - left.offset(rec);
|
210
|
+
lcurr = lcurr->next;
|
211
|
+
rcurr = rcurr->next;
|
212
|
+
}
|
213
|
+
|
214
|
+
if (!val->first) nm::list::del(val, 0); // empty list -- don't insert
|
215
|
+
else xcurr = nm::list::insert_helper(x, xcurr, key, val);
|
216
|
+
|
217
|
+
if (rcurr && rcurr->key - right.offset(rec) >= result.shape(rec)) rcurr = NULL;
|
218
|
+
if (lcurr && lcurr->key - left.offset(rec) >= result.shape(rec)) lcurr = NULL;
|
219
|
+
}
|
220
|
+
} else {
|
221
|
+
while (lcurr || rcurr) {
|
222
|
+
size_t key;
|
223
|
+
VALUE val;
|
224
|
+
|
225
|
+
if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {
|
226
|
+
val = rb_yield_values(2, rubyobj_from_cval(lcurr->val, left.dtype()).rval, right.init_obj());
|
227
|
+
key = lcurr->key - left.offset(rec);
|
228
|
+
lcurr = lcurr->next;
|
229
|
+
} else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {
|
230
|
+
val = rb_yield_values(2, left.init_obj(), rubyobj_from_cval(rcurr->val, right.dtype()).rval);
|
231
|
+
key = rcurr->key - right.offset(rec);
|
232
|
+
rcurr = rcurr->next;
|
233
|
+
} else { // == and both present
|
234
|
+
val = rb_yield_values(2, rubyobj_from_cval(lcurr->val, left.dtype()).rval, rubyobj_from_cval(rcurr->val, right.dtype()).rval);
|
235
|
+
key = lcurr->key - left.offset(rec);
|
236
|
+
lcurr = lcurr->next;
|
237
|
+
rcurr = rcurr->next;
|
238
|
+
}
|
239
|
+
if (rb_funcall(val, rb_intern("!="), 1, result.init_obj()) == Qtrue)
|
240
|
+
xcurr = nm::list::insert_helper(x, xcurr, key, val);
|
241
|
+
|
242
|
+
if (rcurr && rcurr->key - right.offset(rec) >= result.shape(rec)) rcurr = NULL;
|
243
|
+
if (lcurr && lcurr->key - left.offset(rec) >= result.shape(rec)) lcurr = NULL;
|
244
|
+
}
|
245
|
+
}
|
246
|
+
}
|
76
247
|
|
77
|
-
template <ewop_t op, typename LDType, typename RDType>
|
78
|
-
static void ew_comp_prime(LIST* dest, uint8_t d_default, const LIST* left, LDType l_default, const LIST* right, RDType r_default, const size_t* shape, size_t last_level, size_t level);
|
79
248
|
|
80
249
|
} // end of namespace list_storage
|
81
250
|
|
@@ -98,9 +267,7 @@ extern "C" {
|
|
98
267
|
* new storage. You don't need to free them, and you shouldn't re-use them.
|
99
268
|
*/
|
100
269
|
LIST_STORAGE* nm_list_storage_create(dtype_t dtype, size_t* shape, size_t dim, void* init_val) {
|
101
|
-
LIST_STORAGE* s;
|
102
|
-
|
103
|
-
s = ALLOC( LIST_STORAGE );
|
270
|
+
LIST_STORAGE* s = ALLOC( LIST_STORAGE );
|
104
271
|
|
105
272
|
s->dim = dim;
|
106
273
|
s->shape = shape;
|
@@ -182,56 +349,199 @@ NODE* list_storage_get_single_node(LIST_STORAGE* s, SLICE* slice)
|
|
182
349
|
return n;
|
183
350
|
}
|
184
351
|
|
352
|
+
|
185
353
|
/*
|
186
|
-
*
|
354
|
+
* Recursive helper function for each_with_indices, based on nm_list_storage_count_elements_r.
|
355
|
+
* Handles empty/non-existent sublists.
|
187
356
|
*/
|
188
|
-
|
357
|
+
static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t rec, VALUE& stack) {
|
358
|
+
VALUE empty = s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s.init()) : s.init_obj();
|
359
|
+
|
360
|
+
if (rec) {
|
361
|
+
for (long index = 0; index < s.shape(rec); ++index) {
|
362
|
+
// Don't do an unshift/shift here -- we'll let that be handled in the lowest-level iteration (recursions == 0)
|
363
|
+
rb_ary_push(stack, LONG2NUM(index));
|
364
|
+
each_empty_with_indices_r(s, rec-1, stack);
|
365
|
+
rb_ary_pop(stack);
|
366
|
+
}
|
367
|
+
} else {
|
368
|
+
rb_ary_unshift(stack, empty);
|
369
|
+
for (long index = 0; index < s.shape(rec); ++index) {
|
370
|
+
rb_ary_push(stack, LONG2NUM(index));
|
371
|
+
rb_yield_splat(stack);
|
372
|
+
rb_ary_pop(stack);
|
373
|
+
}
|
374
|
+
rb_ary_shift(stack);
|
375
|
+
}
|
376
|
+
}
|
189
377
|
|
190
|
-
|
191
|
-
|
378
|
+
/*
|
379
|
+
* Recursive helper function for each_with_indices, based on nm_list_storage_count_elements_r.
|
380
|
+
*/
|
381
|
+
static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {
|
382
|
+
NODE* curr = l->first;
|
192
383
|
|
193
|
-
|
194
|
-
|
195
|
-
size_t* coords = ALLOCA_N(size_t, s->dim);
|
196
|
-
memset(coords, 0, sizeof(size_t) * s->dim);
|
384
|
+
size_t offset = s.offset(rec);
|
385
|
+
size_t shape = s.shape(rec);
|
197
386
|
|
198
|
-
|
199
|
-
|
200
|
-
NODE* curr = l->first;
|
387
|
+
while (curr && curr->key < offset) curr = curr->next;
|
388
|
+
if (curr && curr->key >= shape) curr = NULL;
|
201
389
|
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
VALUE ary = rb_ary_new();
|
209
|
-
size_t col = subnode->key;
|
210
|
-
|
211
|
-
// Conditional type handling
|
212
|
-
if (NM_DTYPE(nmatrix) == nm::RUBYOBJ) {
|
213
|
-
rb_ary_push(ary, *reinterpret_cast<VALUE*>(subnode->val));
|
390
|
+
|
391
|
+
if (rec) {
|
392
|
+
for (long index = 0; index < shape; ++index) {
|
393
|
+
rb_ary_push(stack, LONG2NUM(index));
|
394
|
+
if (!curr || index < curr->key - offset) {
|
395
|
+
each_empty_with_indices_r(s, rec-1, stack);
|
214
396
|
} else {
|
215
|
-
|
397
|
+
each_with_indices_r(s, reinterpret_cast<const LIST*>(curr->val), rec-1, stack);
|
398
|
+
curr = curr->next;
|
216
399
|
}
|
400
|
+
rb_ary_pop(stack);
|
401
|
+
}
|
402
|
+
} else {
|
403
|
+
for (long index = 0; index < shape; ++index) {
|
217
404
|
|
218
|
-
|
219
|
-
rb_ary_push(ary, INT2FIX(row));
|
220
|
-
rb_ary_push(ary, INT2FIX(col));
|
405
|
+
rb_ary_push(stack, LONG2NUM(index));
|
221
406
|
|
222
|
-
|
223
|
-
|
407
|
+
if (!curr || index < curr->key - offset) {
|
408
|
+
rb_ary_unshift(stack, s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s.init()) : s.init_obj());
|
224
409
|
|
225
|
-
//
|
226
|
-
|
410
|
+
} else { // index == curr->key
|
411
|
+
rb_ary_unshift(stack, s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(curr->val) : rubyobj_from_cval(curr->val, s.dtype()).rval);
|
412
|
+
|
413
|
+
curr = curr->next;
|
414
|
+
}
|
415
|
+
rb_yield_splat(stack);
|
416
|
+
|
417
|
+
rb_ary_shift(stack);
|
418
|
+
rb_ary_pop(stack);
|
227
419
|
}
|
228
|
-
// Update the row node
|
229
|
-
curr = curr->next;
|
230
420
|
}
|
421
|
+
|
422
|
+
}
|
423
|
+
|
424
|
+
|
425
|
+
/*
|
426
|
+
* Recursive helper function for each_stored_with_indices, based on nm_list_storage_count_elements_r.
|
427
|
+
*/
|
428
|
+
static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {
|
429
|
+
NODE* curr = l->first;
|
430
|
+
|
431
|
+
size_t offset = s.offset(rec);
|
432
|
+
size_t shape = s.shape(rec);
|
433
|
+
|
434
|
+
while (curr && curr->key < offset) { curr = curr->next; }
|
435
|
+
if (curr && curr->key - offset >= shape) curr = NULL;
|
436
|
+
|
437
|
+
if (rec) {
|
438
|
+
while (curr) {
|
439
|
+
|
440
|
+
rb_ary_push(stack, LONG2NUM(static_cast<long>(curr->key - offset)));
|
441
|
+
each_stored_with_indices_r(s, reinterpret_cast<const LIST*>(curr->val), rec-1, stack);
|
442
|
+
rb_ary_pop(stack);
|
443
|
+
|
444
|
+
curr = curr->next;
|
445
|
+
if (curr && curr->key - offset >= shape) curr = NULL;
|
446
|
+
}
|
447
|
+
} else {
|
448
|
+
while (curr) {
|
449
|
+
rb_ary_push(stack, LONG2NUM(static_cast<long>(curr->key - offset))); // add index to end
|
450
|
+
|
451
|
+
// add value to beginning
|
452
|
+
rb_ary_unshift(stack, s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(curr->val) : rubyobj_from_cval(curr->val, s.dtype()).rval);
|
453
|
+
// yield to the whole stack (value, i, j, k, ...)
|
454
|
+
rb_yield_splat(stack);
|
455
|
+
|
456
|
+
// remove the value
|
457
|
+
rb_ary_shift(stack);
|
458
|
+
|
459
|
+
// remove the index from the end
|
460
|
+
rb_ary_pop(stack);
|
461
|
+
|
462
|
+
curr = curr->next;
|
463
|
+
if (curr && curr->key >= shape) curr = NULL;
|
464
|
+
}
|
465
|
+
}
|
466
|
+
}
|
467
|
+
|
468
|
+
|
469
|
+
|
470
|
+
/*
|
471
|
+
* Each/each-stored iterator, brings along the indices.
|
472
|
+
*/
|
473
|
+
VALUE nm_list_each_with_indices(VALUE nmatrix, bool stored) {
|
474
|
+
|
475
|
+
// If we don't have a block, return an enumerator.
|
476
|
+
RETURN_SIZED_ENUMERATOR(nmatrix, 0, 0, 0);
|
477
|
+
|
478
|
+
nm::list_storage::RecurseData sdata(NM_STORAGE_LIST(nmatrix));
|
479
|
+
|
480
|
+
VALUE stack = rb_ary_new();
|
481
|
+
|
482
|
+
if (stored) each_stored_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);
|
483
|
+
else each_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);
|
484
|
+
|
231
485
|
return nmatrix;
|
232
486
|
}
|
233
487
|
|
234
488
|
|
489
|
+
/*
|
490
|
+
* map merged stored iterator. Always returns a matrix containing RubyObjects which probably needs to be casted.
|
491
|
+
*/
|
492
|
+
VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
|
493
|
+
|
494
|
+
bool scalar = false;
|
495
|
+
|
496
|
+
LIST_STORAGE *s = NM_STORAGE_LIST(left),
|
497
|
+
*t;
|
498
|
+
|
499
|
+
// For each matrix, if it's a reference, we want to deal directly with the original (with appropriate offsetting)
|
500
|
+
nm::list_storage::RecurseData sdata(s);
|
501
|
+
|
502
|
+
void* scalar_init = NULL;
|
503
|
+
size_t* shape;
|
504
|
+
|
505
|
+
// right might be a scalar, in which case this is a scalar operation.
|
506
|
+
if (TYPE(right) != T_DATA || (RDATA(right)->dfree != (RUBY_DATA_FUNC)nm_delete && RDATA(right)->dfree != (RUBY_DATA_FUNC)nm_delete_ref)) {
|
507
|
+
nm::dtype_t r_dtype = nm_dtype_min(right);
|
508
|
+
|
509
|
+
scalar_init = rubyobj_to_cval(right, r_dtype); // make a copy of right
|
510
|
+
|
511
|
+
t = reinterpret_cast<LIST_STORAGE*>(nm_list_storage_create(r_dtype, sdata.copy_alloc_shape(), s->dim, scalar_init));
|
512
|
+
scalar = true;
|
513
|
+
} else {
|
514
|
+
t = NM_STORAGE_LIST(right); // element-wise, not scalar.
|
515
|
+
}
|
516
|
+
|
517
|
+
//if (!rb_block_given_p()) {
|
518
|
+
// rb_raise(rb_eNotImpError, "RETURN_SIZED_ENUMERATOR probably won't work for a map_merged since no merged object is created");
|
519
|
+
//}
|
520
|
+
// If we don't have a block, return an enumerator.
|
521
|
+
RETURN_SIZED_ENUMERATOR(left, 0, 0, 0); // FIXME: Test this. Probably won't work. Enable above code instead.
|
522
|
+
|
523
|
+
// Figure out default value if none provided by the user
|
524
|
+
nm::list_storage::RecurseData tdata(t);
|
525
|
+
if (init == Qnil) init = rb_yield_values(2, sdata.init_obj(), tdata.init_obj());
|
526
|
+
|
527
|
+
// Allocate a new shape array for the resulting matrix.
|
528
|
+
void* init_val = ALLOC(VALUE);
|
529
|
+
memcpy(init_val, &init, sizeof(VALUE));
|
530
|
+
|
531
|
+
NMATRIX* result = nm_create(nm::LIST_STORE, nm_list_storage_create(nm::RUBYOBJ, sdata.copy_alloc_shape(), s->dim, init_val));
|
532
|
+
LIST_STORAGE* r = reinterpret_cast<LIST_STORAGE*>(result->storage);
|
533
|
+
nm::list_storage::RecurseData rdata(r, init);
|
534
|
+
|
535
|
+
map_merged_stored_r(rdata, sdata, tdata, rdata.top_level_list(), sdata.top_level_list(), tdata.top_level_list(), sdata.dim() - 1);
|
536
|
+
|
537
|
+
// If we are working with a scalar operation
|
538
|
+
if (scalar) nm_list_storage_delete(t);
|
539
|
+
|
540
|
+
return Data_Wrap_Struct(CLASS_OF(left), nm_list_storage_mark, nm_delete, result);
|
541
|
+
}
|
542
|
+
|
543
|
+
|
544
|
+
|
235
545
|
static LIST* slice_copy(const LIST_STORAGE *src, LIST *src_rows, size_t *coords, size_t *lengths, size_t n) {
|
236
546
|
NODE *src_node;
|
237
547
|
LIST *dst_rows = NULL;
|
@@ -377,87 +687,18 @@ void* nm_list_storage_remove(STORAGE* storage, SLICE* slice) {
|
|
377
687
|
* Comparison of contents for list storage.
|
378
688
|
*/
|
379
689
|
bool nm_list_storage_eqeq(const STORAGE* left, const STORAGE* right) {
|
380
|
-
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::
|
690
|
+
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::eqeq_r, bool, nm::list_storage::RecurseData& left, nm::list_storage::RecurseData& right, const LIST* l, const LIST* r, size_t rec)
|
691
|
+
|
692
|
+
nm::list_storage::RecurseData ldata(reinterpret_cast<const LIST_STORAGE*>(left)),
|
693
|
+
rdata(reinterpret_cast<const LIST_STORAGE*>(right));
|
381
694
|
|
382
|
-
return ttable[left->dtype][right->dtype]((
|
695
|
+
return ttable[left->dtype][right->dtype](ldata, rdata, ldata.top_level_list(), rdata.top_level_list(), ldata.dim()-1);
|
383
696
|
}
|
384
697
|
|
385
698
|
//////////
|
386
699
|
// Math //
|
387
700
|
//////////
|
388
701
|
|
389
|
-
/*
|
390
|
-
* Element-wise operations for list storage.
|
391
|
-
*
|
392
|
-
* If a scalar is given, a temporary matrix is created with that scalar as a default value.
|
393
|
-
*/
|
394
|
-
STORAGE* nm_list_storage_ew_op(nm::ewop_t op, const STORAGE* left, const STORAGE* right, VALUE scalar) {
|
395
|
-
// rb_raise(rb_eNotImpError, "elementwise operations for list storage currently broken");
|
396
|
-
|
397
|
-
bool cleanup = false;
|
398
|
-
LIST_STORAGE *r, *new_l;
|
399
|
-
const LIST_STORAGE* l = reinterpret_cast<const LIST_STORAGE*>(left);
|
400
|
-
|
401
|
-
if (!right) { // need to build a right-hand matrix temporarily, with default value of 'scalar'
|
402
|
-
|
403
|
-
dtype_t scalar_dtype = nm_dtype_guess(scalar);
|
404
|
-
void* scalar_init = rubyobj_to_cval(scalar, scalar_dtype);
|
405
|
-
|
406
|
-
size_t* shape = ALLOC_N(size_t, l->dim);
|
407
|
-
memcpy(shape, left->shape, sizeof(size_t) * l->dim);
|
408
|
-
|
409
|
-
r = nm_list_storage_create(scalar_dtype, shape, l->dim, scalar_init);
|
410
|
-
|
411
|
-
cleanup = true;
|
412
|
-
|
413
|
-
} else {
|
414
|
-
|
415
|
-
r = reinterpret_cast<LIST_STORAGE*>(const_cast<STORAGE*>(right));
|
416
|
-
|
417
|
-
}
|
418
|
-
|
419
|
-
// We may need to upcast our arguments to the same type.
|
420
|
-
dtype_t new_dtype = Upcast[left->dtype][r->dtype];
|
421
|
-
|
422
|
-
// Make sure we allocate a byte-storing matrix for comparison operations; otherwise, use the argument dtype (new_dtype)
|
423
|
-
dtype_t result_dtype = static_cast<uint8_t>(op) < NUM_NONCOMP_EWOPS ? new_dtype : BYTE;
|
424
|
-
|
425
|
-
OP_LR_DTYPE_TEMPLATE_TABLE(nm::list_storage::ew_op, void*, LIST* dest, const LIST* left, const void* l_default, const LIST* right, const void* r_default, const size_t* shape, size_t dim);
|
426
|
-
|
427
|
-
// Allocate a new shape array for the resulting matrix.
|
428
|
-
size_t* new_shape = ALLOC_N(size_t, l->dim);
|
429
|
-
memcpy(new_shape, left->shape, sizeof(size_t) * l->dim);
|
430
|
-
|
431
|
-
// Create the result matrix.
|
432
|
-
LIST_STORAGE* result = nm_list_storage_create(result_dtype, new_shape, left->dim, NULL);
|
433
|
-
|
434
|
-
/*
|
435
|
-
* Call the templated elementwise multiplication function and set the default
|
436
|
-
* value for the resulting matrix.
|
437
|
-
*/
|
438
|
-
if (new_dtype != left->dtype) {
|
439
|
-
// Upcast the left-hand side if necessary.
|
440
|
-
new_l = reinterpret_cast<LIST_STORAGE*>(nm_list_storage_cast_copy(l, new_dtype));
|
441
|
-
|
442
|
-
result->default_val =
|
443
|
-
ttable[op][new_l->dtype][r->dtype](result->rows, new_l->rows, new_l->default_val, r->rows, r->default_val, result->shape, result->dim);
|
444
|
-
|
445
|
-
// Delete the temporary left-hand side matrix.
|
446
|
-
nm_list_storage_delete(reinterpret_cast<STORAGE*>(new_l));
|
447
|
-
|
448
|
-
} else {
|
449
|
-
result->default_val =
|
450
|
-
ttable[op][left->dtype][r->dtype](result->rows, l->rows, l->default_val, r->rows, r->default_val, result->shape, result->dim);
|
451
|
-
}
|
452
|
-
|
453
|
-
// If we created a temporary scalar matrix (for matrix-scalar operations), we now need to delete it.
|
454
|
-
if (cleanup) {
|
455
|
-
nm_list_storage_delete(reinterpret_cast<STORAGE*>(r));
|
456
|
-
}
|
457
|
-
|
458
|
-
return result;
|
459
|
-
}
|
460
|
-
|
461
702
|
|
462
703
|
/*
|
463
704
|
* List storage matrix multiplication.
|
@@ -564,7 +805,7 @@ LIST_STORAGE* nm_list_storage_copy(const LIST_STORAGE* rhs)
|
|
564
805
|
/*
|
565
806
|
* List storage copy constructor C access with casting.
|
566
807
|
*/
|
567
|
-
STORAGE* nm_list_storage_cast_copy(const STORAGE* rhs, dtype_t new_dtype) {
|
808
|
+
STORAGE* nm_list_storage_cast_copy(const STORAGE* rhs, dtype_t new_dtype, void* dummy) {
|
568
809
|
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::cast_copy, LIST_STORAGE*, const LIST_STORAGE* rhs, dtype_t new_dtype);
|
569
810
|
|
570
811
|
return (STORAGE*)ttable[new_dtype][rhs->dtype]((LIST_STORAGE*)rhs, new_dtype);
|
@@ -587,8 +828,11 @@ STORAGE* nm_list_storage_copy_transposed(const STORAGE* rhs_base) {
|
|
587
828
|
// Templated Functions //
|
588
829
|
/////////////////////////
|
589
830
|
|
831
|
+
|
832
|
+
|
590
833
|
namespace list_storage {
|
591
834
|
|
835
|
+
|
592
836
|
/*
|
593
837
|
* List storage copy constructor for changing dtypes.
|
594
838
|
*/
|
@@ -604,7 +848,7 @@ static LIST_STORAGE* cast_copy(const LIST_STORAGE* rhs, dtype_t new_dtype) {
|
|
604
848
|
*default_val = *reinterpret_cast<RDType*>(rhs->default_val);
|
605
849
|
|
606
850
|
LIST_STORAGE* lhs = nm_list_storage_create(new_dtype, shape, rhs->dim, default_val);
|
607
|
-
lhs->rows = list::create();
|
851
|
+
//lhs->rows = list::create();
|
608
852
|
|
609
853
|
// TODO: Needs optimization. When matrix is reference it is copped twice.
|
610
854
|
if (rhs->src == rhs)
|
@@ -620,539 +864,126 @@ static LIST_STORAGE* cast_copy(const LIST_STORAGE* rhs, dtype_t new_dtype) {
|
|
620
864
|
|
621
865
|
|
622
866
|
/*
|
623
|
-
*
|
624
|
-
*
|
867
|
+
* Recursive helper function for eqeq. Note that we use SDType and TDType instead of L and R because this function
|
868
|
+
* is a re-labeling. That is, it can be called in order L,R or order R,L; and we don't want to get confused. So we
|
869
|
+
* use S and T to denote first and second passed in.
|
625
870
|
*/
|
626
|
-
template <typename
|
627
|
-
bool
|
628
|
-
|
629
|
-
|
630
|
-
// in certain cases, we need to keep track of the number of elements checked.
|
631
|
-
size_t num_checked = 0,
|
632
|
-
max_elements = nm_storage_count_max_elements(left);
|
633
|
-
LIST_STORAGE *tmp1 = NULL, *tmp2 = NULL;
|
634
|
-
|
635
|
-
if (!left->rows->first) {
|
636
|
-
// Easy: both lists empty -- just compare default values
|
637
|
-
if (!right->rows->first) {
|
638
|
-
return *reinterpret_cast<LDType*>(left->default_val) == *reinterpret_cast<RDType*>(right->default_val);
|
639
|
-
|
640
|
-
} else if (!list::eqeq_value<RDType,LDType>(right->rows, reinterpret_cast<LDType*>(left->default_val), left->dim-1, num_checked)) {
|
641
|
-
// Left empty, right not empty. Do all values in right == left->default_val?
|
642
|
-
return false;
|
643
|
-
|
644
|
-
} else if (num_checked < max_elements) {
|
645
|
-
// If the matrix isn't full, we also need to compare default values.
|
646
|
-
return *reinterpret_cast<LDType*>(left->default_val) == *reinterpret_cast<RDType*>(right->default_val);
|
647
|
-
}
|
871
|
+
template <typename SDType, typename TDType>
|
872
|
+
static bool eqeq_empty_r(RecurseData& s, const LIST* l, size_t rec, const TDType* t_init) {
|
873
|
+
NODE* curr = l->first;
|
648
874
|
|
649
|
-
|
650
|
-
|
651
|
-
|
652
|
-
if (!list::eqeq_value<LDType,RDType>(left->rows, reinterpret_cast<RDType*>(right->default_val), left->dim-1, num_checked)) {
|
653
|
-
return false;
|
654
|
-
|
655
|
-
} else if (num_checked < max_elements) {
|
656
|
-
// If the matrix isn't full, we also need to compare default values.
|
657
|
-
return *reinterpret_cast<LDType*>(left->default_val) == *reinterpret_cast<RDType*>(right->default_val);
|
658
|
-
}
|
875
|
+
// For reference matrices, make sure we start in the correct place.
|
876
|
+
while (curr && curr->key < s.offset(rec)) { curr = curr->next; }
|
877
|
+
if (curr && curr->key - s.offset(rec) >= s.shape(rec)) curr = NULL;
|
659
878
|
|
660
|
-
|
661
|
-
|
662
|
-
|
663
|
-
|
664
|
-
|
665
|
-
|
666
|
-
tmp1 = nm_list_storage_copy(left);
|
667
|
-
result = list::eqeq<LDType,RDType>(tmp1->rows, right->rows, reinterpret_cast<LDType*>(left->default_val), reinterpret_cast<RDType*>(right->default_val), left->dim-1, num_checked);
|
668
|
-
nm_list_storage_delete(tmp1);
|
669
|
-
}
|
670
|
-
// right is reference
|
671
|
-
if (left->src == left && right->src != right) {
|
672
|
-
tmp2 = nm_list_storage_copy(right);
|
673
|
-
result = list::eqeq<LDType,RDType>(left->rows, tmp2->rows, reinterpret_cast<LDType*>(left->default_val), reinterpret_cast<RDType*>(right->default_val), left->dim-1, num_checked);
|
674
|
-
nm_list_storage_delete(tmp2);
|
675
|
-
}
|
676
|
-
// both are references
|
677
|
-
if (left->src != left && right->src != right) {
|
678
|
-
tmp1 = nm_list_storage_copy(left);
|
679
|
-
tmp2 = nm_list_storage_copy(right);
|
680
|
-
result = list::eqeq<LDType,RDType>(tmp1->rows, tmp2->rows, reinterpret_cast<LDType*>(left->default_val), reinterpret_cast<RDType*>(right->default_val), left->dim-1, num_checked);
|
681
|
-
nm_list_storage_delete(tmp1);
|
682
|
-
nm_list_storage_delete(tmp2);
|
879
|
+
if (rec) {
|
880
|
+
while (curr) {
|
881
|
+
if (!eqeq_empty_r<SDType,TDType>(s, reinterpret_cast<const LIST*>(curr->val), rec-1, t_init)) return false;
|
882
|
+
curr = curr->next;
|
883
|
+
|
884
|
+
if (curr && curr->key - s.offset(rec) >= s.shape(rec)) curr = NULL;
|
683
885
|
}
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
|
690
|
-
} else if (num_checked < max_elements) {
|
691
|
-
return *reinterpret_cast<LDType*>(left->default_val) == *reinterpret_cast<RDType*>(right->default_val);
|
886
|
+
} else {
|
887
|
+
while (curr) {
|
888
|
+
if (*reinterpret_cast<SDType*>(curr->val) != *t_init) return false;
|
889
|
+
curr = curr->next;
|
890
|
+
|
891
|
+
if (curr && curr->key - s.offset(rec) >= s.shape(rec)) curr = NULL;
|
692
892
|
}
|
693
893
|
}
|
694
|
-
|
695
894
|
return true;
|
696
895
|
}
|
697
896
|
|
698
|
-
/*
|
699
|
-
* List storage element-wise operations (including comparisons).
|
700
|
-
*/
|
701
|
-
template <ewop_t op, typename LDType, typename RDType>
|
702
|
-
static void* ew_op(LIST* dest, const LIST* left, const void* l_default, const LIST* right, const void* r_default, const size_t* shape, size_t dim) {
|
703
|
-
|
704
|
-
if (static_cast<uint8_t>(op) < NUM_NONCOMP_EWOPS) {
|
705
|
-
|
706
|
-
/*
|
707
|
-
* Allocate space for, and calculate, the default value for the destination
|
708
|
-
* matrix.
|
709
|
-
*/
|
710
|
-
LDType* d_default_mem = ALLOC(LDType);
|
711
|
-
*d_default_mem = ew_op_switch<op, LDType, RDType>(*reinterpret_cast<const LDType*>(l_default), *reinterpret_cast<const RDType*>(r_default));
|
712
|
-
|
713
|
-
// Now that setup is done call the actual elementwise operation function.
|
714
|
-
ew_op_prime<op, LDType, RDType>(dest, *reinterpret_cast<const LDType*>(d_default_mem),
|
715
|
-
left, *reinterpret_cast<const LDType*>(l_default),
|
716
|
-
right, *reinterpret_cast<const RDType*>(r_default),
|
717
|
-
shape, dim - 1, 0);
|
718
|
-
|
719
|
-
// Return a pointer to the destination matrix's default value.
|
720
|
-
return d_default_mem;
|
721
|
-
|
722
|
-
} else { // Handle comparison operations in a similar manner.
|
723
|
-
/*
|
724
|
-
* Allocate a byte for default, and set default value to 0.
|
725
|
-
*/
|
726
|
-
uint8_t* d_default_mem = ALLOC(uint8_t);
|
727
|
-
*d_default_mem = 0;
|
728
|
-
switch (op) {
|
729
|
-
case EW_EQEQ:
|
730
|
-
*d_default_mem = *reinterpret_cast<const LDType*>(l_default) == *reinterpret_cast<const RDType*>(r_default);
|
731
|
-
break;
|
732
|
-
|
733
|
-
case EW_NEQ:
|
734
|
-
*d_default_mem = *reinterpret_cast<const LDType*>(l_default) != *reinterpret_cast<const RDType*>(r_default);
|
735
|
-
break;
|
736
|
-
|
737
|
-
case EW_LT:
|
738
|
-
*d_default_mem = *reinterpret_cast<const LDType*>(l_default) < *reinterpret_cast<const RDType*>(r_default);
|
739
|
-
break;
|
740
|
-
|
741
|
-
case EW_GT:
|
742
|
-
*d_default_mem = *reinterpret_cast<const LDType*>(l_default) > *reinterpret_cast<const RDType*>(r_default);
|
743
|
-
break;
|
744
|
-
|
745
|
-
case EW_LEQ:
|
746
|
-
*d_default_mem = *reinterpret_cast<const LDType*>(l_default) <= *reinterpret_cast<const RDType*>(r_default);
|
747
|
-
break;
|
748
|
-
|
749
|
-
case EW_GEQ:
|
750
|
-
*d_default_mem = *reinterpret_cast<const LDType*>(l_default) >= *reinterpret_cast<const RDType*>(r_default);
|
751
|
-
break;
|
752
|
-
|
753
|
-
default:
|
754
|
-
rb_raise(rb_eStandardError, "this should not happen");
|
755
|
-
}
|
756
|
-
|
757
|
-
// Now that setup is done call the actual elementwise comparison function.
|
758
|
-
ew_comp_prime<op, LDType, RDType>(dest, *reinterpret_cast<const uint8_t*>(d_default_mem),
|
759
|
-
left, *reinterpret_cast<const LDType*>(l_default),
|
760
|
-
right, *reinterpret_cast<const RDType*>(r_default),
|
761
|
-
shape, dim - 1, 0);
|
762
|
-
|
763
|
-
// Return a pointer to the destination matrix's default value.
|
764
|
-
return d_default_mem;
|
765
|
-
}
|
766
|
-
}
|
767
897
|
|
768
898
|
|
769
899
|
/*
|
770
|
-
*
|
900
|
+
* Do these two list matrices of the same dtype have exactly the same contents (accounting for default_vals)?
|
901
|
+
*
|
902
|
+
* This function is recursive.
|
771
903
|
*/
|
772
|
-
template <
|
773
|
-
static
|
774
|
-
|
775
|
-
|
776
|
-
|
777
|
-
|
778
|
-
|
779
|
-
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
|
784
|
-
|
785
|
-
|
786
|
-
|
787
|
-
|
788
|
-
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
807
|
-
|
808
|
-
|
809
|
-
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
814
|
-
|
815
|
-
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
827
|
-
case EW_GEQ:
|
828
|
-
tmp_result = (uint8_t)(l_default >= *reinterpret_cast<RDType*>(r_node->val));
|
829
|
-
break;
|
830
|
-
|
831
|
-
default:
|
832
|
-
rb_raise(rb_eStandardError, "This should not happen.");
|
833
|
-
}
|
834
|
-
|
835
|
-
if (tmp_result != d_default) {
|
836
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, tmp_result);
|
837
|
-
}
|
838
|
-
|
839
|
-
} else {
|
840
|
-
new_level = nm::list::create();
|
841
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, new_level);
|
842
|
-
|
843
|
-
ew_comp_prime<op, LDType, RDType>(new_level, d_default, &EMPTY_LIST, l_default,
|
844
|
-
reinterpret_cast<LIST*>(r_node->val), r_default,
|
845
|
-
shape, last_level, level + 1);
|
846
|
-
}
|
847
|
-
|
848
|
-
r_node = r_node->next;
|
849
|
-
|
850
|
-
} else if (r_node == NULL and l_node->key == index) {
|
851
|
-
/*
|
852
|
-
* One source list is empty, but the index has caught up to the key of
|
853
|
-
* the other list.
|
854
|
-
*/
|
855
|
-
|
856
|
-
if (level == last_level) {
|
857
|
-
switch (op) {
|
858
|
-
case EW_EQEQ:
|
859
|
-
tmp_result = (uint8_t)(*reinterpret_cast<LDType*>(l_node->val) == r_default);
|
860
|
-
break;
|
861
|
-
|
862
|
-
case EW_NEQ:
|
863
|
-
tmp_result = (uint8_t)(*reinterpret_cast<LDType*>(l_node->val) != r_default);
|
864
|
-
break;
|
865
|
-
|
866
|
-
case EW_LT:
|
867
|
-
tmp_result = (uint8_t)(*reinterpret_cast<LDType*>(l_node->val) < r_default);
|
868
|
-
break;
|
869
|
-
|
870
|
-
case EW_GT:
|
871
|
-
tmp_result = (uint8_t)(*reinterpret_cast<LDType*>(l_node->val) > r_default);
|
872
|
-
break;
|
904
|
+
template <typename LDType, typename RDType>
|
905
|
+
static bool eqeq_r(RecurseData& left, RecurseData& right, const LIST* l, const LIST* r, size_t rec) {
|
906
|
+
NODE *lcurr = l->first,
|
907
|
+
*rcurr = r->first;
|
908
|
+
|
909
|
+
// For reference matrices, make sure we start in the correct place.
|
910
|
+
while (lcurr && lcurr->key < left.offset(rec)) { lcurr = lcurr->next; }
|
911
|
+
while (rcurr && rcurr->key < right.offset(rec)) { rcurr = rcurr->next; }
|
912
|
+
if (rcurr && rcurr->key - right.offset(rec) >= left.shape(rec)) rcurr = NULL;
|
913
|
+
if (lcurr && lcurr->key - left.offset(rec) >= left.shape(rec)) lcurr = NULL;
|
914
|
+
|
915
|
+
bool compared = false;
|
916
|
+
|
917
|
+
if (rec) {
|
918
|
+
|
919
|
+
while (lcurr || rcurr) {
|
920
|
+
|
921
|
+
if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {
|
922
|
+
if (!eqeq_empty_r<LDType,RDType>(left, reinterpret_cast<const LIST*>(lcurr->val), rec-1, reinterpret_cast<const RDType*>(right.init()))) return false;
|
923
|
+
lcurr = lcurr->next;
|
924
|
+
} else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {
|
925
|
+
if (!eqeq_empty_r<RDType,LDType>(right, reinterpret_cast<const LIST*>(rcurr->val), rec-1, reinterpret_cast<const LDType*>(left.init()))) return false;
|
926
|
+
rcurr = rcurr->next;
|
927
|
+
} else { // keys are == and both present
|
928
|
+
if (!eqeq_r<LDType,RDType>(left, right, reinterpret_cast<const LIST*>(lcurr->val), reinterpret_cast<const LIST*>(rcurr->val), rec-1)) return false;
|
929
|
+
lcurr = lcurr->next;
|
930
|
+
rcurr = rcurr->next;
|
931
|
+
}
|
932
|
+
if (rcurr && rcurr->key - right.offset(rec) >= left.shape(rec)) rcurr = NULL;
|
933
|
+
if (lcurr && lcurr->key - left.offset(rec) >= left.shape(rec)) lcurr = NULL;
|
934
|
+
compared = true;
|
935
|
+
}
|
936
|
+
} else {
|
937
|
+
while (lcurr || rcurr) {
|
938
|
+
|
939
|
+
if (rcurr && rcurr->key - right.offset(rec) >= left.shape(rec)) rcurr = NULL;
|
940
|
+
if (lcurr && lcurr->key - left.offset(rec) >= left.shape(rec)) lcurr = NULL;
|
941
|
+
|
942
|
+
if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {
|
943
|
+
if (*reinterpret_cast<LDType*>(lcurr->val) != *reinterpret_cast<const RDType*>(right.init())) return false;
|
944
|
+
lcurr = lcurr->next;
|
945
|
+
} else if (!lcurr || (rcurr && (rcurr->key - right.offset(rec) < lcurr->key - left.offset(rec)))) {
|
946
|
+
if (*reinterpret_cast<RDType*>(rcurr->val) != *reinterpret_cast<const LDType*>(left.init())) return false;
|
947
|
+
rcurr = rcurr->next;
|
948
|
+
} else { // keys == and both left and right nodes present
|
949
|
+
if (*reinterpret_cast<LDType*>(lcurr->val) != *reinterpret_cast<RDType*>(rcurr->val)) return false;
|
950
|
+
lcurr = lcurr->next;
|
951
|
+
rcurr = rcurr->next;
|
952
|
+
}
|
953
|
+
if (rcurr && rcurr->key - right.offset(rec) >= left.shape(rec)) rcurr = NULL;
|
954
|
+
if (lcurr && lcurr->key - left.offset(rec) >= left.shape(rec)) lcurr = NULL;
|
955
|
+
compared = true;
|
956
|
+
}
|
957
|
+
}
|
873
958
|
|
874
|
-
|
875
|
-
|
876
|
-
|
877
|
-
|
878
|
-
case EW_GEQ:
|
879
|
-
tmp_result = (uint8_t)(*reinterpret_cast<LDType*>(l_node->val) >= r_default);
|
880
|
-
break;
|
881
|
-
|
882
|
-
default:
|
883
|
-
rb_raise(rb_eStandardError, "this should not happen");
|
884
|
-
}
|
885
|
-
|
886
|
-
if (tmp_result != d_default) {
|
887
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, tmp_result);
|
888
|
-
}
|
889
|
-
|
890
|
-
} else {
|
891
|
-
new_level = nm::list::create();
|
892
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, new_level);
|
893
|
-
|
894
|
-
ew_comp_prime<op, LDType, RDType>(new_level, d_default,
|
895
|
-
reinterpret_cast<LIST*>(l_node->val), l_default,
|
896
|
-
&EMPTY_LIST, r_default,
|
897
|
-
shape, last_level, level + 1);
|
898
|
-
}
|
899
|
-
|
900
|
-
l_node = l_node->next;
|
901
|
-
|
902
|
-
} else if (l_node != NULL and r_node != NULL and index == std::min(l_node->key, r_node->key)) {
|
903
|
-
/*
|
904
|
-
* Neither list is empty and our index has caught up to one of the
|
905
|
-
* source lists.
|
906
|
-
*/
|
907
|
-
|
908
|
-
if (l_node->key == r_node->key) {
|
909
|
-
|
910
|
-
if (level == last_level) {
|
911
|
-
switch (op) {
|
912
|
-
case EW_EQEQ:
|
913
|
-
tmp_result = (uint8_t)(*reinterpret_cast<LDType*>(l_node->val) == *reinterpret_cast<RDType*>(r_node->val));
|
914
|
-
break;
|
915
|
-
|
916
|
-
case EW_NEQ:
|
917
|
-
tmp_result = (uint8_t)(*reinterpret_cast<LDType*>(l_node->val) != *reinterpret_cast<RDType*>(r_node->val));
|
918
|
-
break;
|
919
|
-
|
920
|
-
case EW_LT:
|
921
|
-
tmp_result = (uint8_t)(*reinterpret_cast<LDType*>(l_node->val) < *reinterpret_cast<RDType*>(r_node->val));
|
922
|
-
break;
|
923
|
-
|
924
|
-
case EW_GT:
|
925
|
-
tmp_result = (uint8_t)(*reinterpret_cast<LDType*>(l_node->val) > *reinterpret_cast<RDType*>(r_node->val));
|
926
|
-
break;
|
927
|
-
|
928
|
-
case EW_LEQ:
|
929
|
-
tmp_result = (uint8_t)(*reinterpret_cast<LDType*>(l_node->val) <= *reinterpret_cast<RDType*>(r_node->val));
|
930
|
-
break;
|
931
|
-
|
932
|
-
case EW_GEQ:
|
933
|
-
tmp_result = (uint8_t)(*reinterpret_cast<LDType*>(l_node->val) >= *reinterpret_cast<RDType*>(r_node->val));
|
934
|
-
break;
|
935
|
-
|
936
|
-
default:
|
937
|
-
rb_raise(rb_eStandardError, "this should not happen");
|
938
|
-
}
|
939
|
-
|
940
|
-
if (tmp_result != d_default) {
|
941
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, tmp_result);
|
942
|
-
}
|
943
|
-
|
944
|
-
} else {
|
945
|
-
new_level = nm::list::create();
|
946
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, new_level);
|
947
|
-
|
948
|
-
ew_comp_prime<op, LDType, RDType>(new_level, d_default,
|
949
|
-
reinterpret_cast<LIST*>(l_node->val), l_default,
|
950
|
-
reinterpret_cast<LIST*>(r_node->val), r_default,
|
951
|
-
shape, last_level, level + 1);
|
952
|
-
}
|
953
|
-
|
954
|
-
l_node = l_node->next;
|
955
|
-
r_node = r_node->next;
|
956
|
-
|
957
|
-
} else if (l_node->key < r_node->key) {
|
958
|
-
// Advance the left node knowing that the default value is OK.
|
959
|
-
|
960
|
-
l_node = l_node->next;
|
961
|
-
|
962
|
-
} else /* if (l_node->key > r_node->key) */ {
|
963
|
-
// Advance the right node knowing that the default value is OK.
|
964
|
-
|
965
|
-
r_node = r_node->next;
|
966
|
-
}
|
967
|
-
|
968
|
-
} else {
|
969
|
-
/*
|
970
|
-
* Our index needs to catch up but the default value is OK. This
|
971
|
-
* conditional is here only for documentation and should be optimized
|
972
|
-
* out.
|
973
|
-
*/
|
974
|
-
}
|
975
|
-
}
|
976
|
-
}
|
959
|
+
// Final condition: both containers are empty, and have different default values.
|
960
|
+
if (!compared && !lcurr && !rcurr) return *reinterpret_cast<const LDType*>(left.init()) == *reinterpret_cast<const RDType*>(right.init());
|
961
|
+
return true;
|
977
962
|
}
|
978
963
|
|
979
964
|
|
980
|
-
|
981
|
-
/*
|
982
|
-
* List storage element-wise operations, recursive helper.
|
983
|
-
*/
|
984
|
-
template <ewop_t op, typename LDType, typename RDType>
|
985
|
-
static void ew_op_prime(LIST* dest, LDType d_default, const LIST* left, LDType l_default, const LIST* right, RDType r_default, const size_t* shape, size_t last_level, size_t level) {
|
986
|
-
|
987
|
-
static LIST EMPTY_LIST = {NULL};
|
988
|
-
|
989
|
-
size_t index;
|
990
|
-
|
991
|
-
LDType tmp_result;
|
992
|
-
|
993
|
-
LIST* new_level = NULL;
|
994
|
-
|
995
|
-
NODE* l_node = left->first,
|
996
|
-
* r_node = right->first,
|
997
|
-
* dest_node = NULL;
|
998
|
-
|
999
|
-
for (index = 0; index < shape[level]; ++index) {
|
1000
|
-
if (l_node == NULL and r_node == NULL) {
|
1001
|
-
/*
|
1002
|
-
* Both source lists are now empty. Because the default value of the
|
1003
|
-
* destination is already set appropriately we can now return.
|
1004
|
-
*/
|
1005
|
-
|
1006
|
-
return;
|
1007
|
-
|
1008
|
-
} else {
|
1009
|
-
// At least one list still has entries.
|
1010
|
-
|
1011
|
-
if (op == EW_MUL) {
|
1012
|
-
// Special cases for multiplication.
|
1013
|
-
|
1014
|
-
if (l_node == NULL and (l_default == 0 and d_default == 0)) {
|
1015
|
-
/*
|
1016
|
-
* The left hand list has run out of elements. We don't need to add new
|
1017
|
-
* values to the destination if l_default and d_default are both 0.
|
1018
|
-
*/
|
1019
|
-
|
1020
|
-
return;
|
1021
|
-
|
1022
|
-
} else if (r_node == NULL and (r_default == 0 and d_default == 0)) {
|
1023
|
-
/*
|
1024
|
-
* The right hand list has run out of elements. We don't need to add new
|
1025
|
-
* values to the destination if r_default and d_default are both 0.
|
1026
|
-
*/
|
1027
|
-
|
1028
|
-
return;
|
1029
|
-
}
|
1030
|
-
|
1031
|
-
} else if (op == EW_DIV) {
|
1032
|
-
// Special cases for division.
|
1033
|
-
|
1034
|
-
if (l_node == NULL and (l_default == 0 and d_default == 0)) {
|
1035
|
-
/*
|
1036
|
-
* The left hand list has run out of elements. We don't need to add new
|
1037
|
-
* values to the destination if l_default and d_default are both 0.
|
1038
|
-
*/
|
1039
|
-
|
1040
|
-
return;
|
1041
|
-
|
1042
|
-
} else if (r_node == NULL and (r_default == 0 and d_default == 0)) {
|
1043
|
-
/*
|
1044
|
-
* The right hand list has run out of elements. If the r_default
|
1045
|
-
* value is 0 any further division will result in a SIGFPE.
|
1046
|
-
*/
|
1047
|
-
|
1048
|
-
rb_raise(rb_eZeroDivError, "Cannot divide type by 0, would throw SIGFPE.");
|
1049
|
-
}
|
1050
|
-
|
1051
|
-
// TODO: Add optimizations for addition and subtraction.
|
1052
|
-
|
1053
|
-
}
|
1054
|
-
|
1055
|
-
// We need to continue processing the lists.
|
1056
|
-
|
1057
|
-
if (l_node == NULL and r_node->key == index) {
|
1058
|
-
/*
|
1059
|
-
* One source list is empty, but the index has caught up to the key of
|
1060
|
-
* the other list.
|
1061
|
-
*/
|
1062
|
-
|
1063
|
-
if (level == last_level) {
|
1064
|
-
tmp_result = ew_op_switch<op, LDType, RDType>(l_default, *reinterpret_cast<RDType*>(r_node->val));
|
1065
|
-
std::cerr << "1. tmp_result = " << tmp_result << std::endl;
|
1066
|
-
|
1067
|
-
if (tmp_result != d_default) {
|
1068
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, tmp_result);
|
1069
|
-
}
|
1070
|
-
|
1071
|
-
} else {
|
1072
|
-
new_level = nm::list::create();
|
1073
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, new_level);
|
1074
|
-
|
1075
|
-
ew_op_prime<op, LDType, RDType>(new_level, d_default, &EMPTY_LIST, l_default,
|
1076
|
-
reinterpret_cast<LIST*>(r_node->val), r_default,
|
1077
|
-
shape, last_level, level + 1);
|
1078
|
-
}
|
1079
|
-
|
1080
|
-
r_node = r_node->next;
|
1081
|
-
|
1082
|
-
} else if (r_node == NULL and l_node->key == index) {
|
1083
|
-
/*
|
1084
|
-
* One source list is empty, but the index has caught up to the key of
|
1085
|
-
* the other list.
|
1086
|
-
*/
|
1087
|
-
|
1088
|
-
if (level == last_level) {
|
1089
|
-
tmp_result = ew_op_switch<op, LDType, RDType>(*reinterpret_cast<LDType*>(l_node->val), r_default);
|
1090
|
-
std::cerr << "2. tmp_result = " << tmp_result << std::endl;
|
1091
|
-
|
1092
|
-
if (tmp_result != d_default) {
|
1093
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, tmp_result);
|
1094
|
-
}
|
1095
|
-
|
1096
|
-
} else {
|
1097
|
-
new_level = nm::list::create();
|
1098
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, new_level);
|
1099
|
-
|
1100
|
-
ew_op_prime<op, LDType, RDType>(new_level, d_default, reinterpret_cast<LIST*>(l_node->val), l_default,
|
1101
|
-
&EMPTY_LIST, r_default, shape, last_level, level + 1);
|
1102
|
-
}
|
1103
|
-
|
1104
|
-
l_node = l_node->next;
|
1105
|
-
|
1106
|
-
} else if (l_node != NULL and r_node != NULL and index == std::min(l_node->key, r_node->key)) {
|
1107
|
-
/*
|
1108
|
-
* Neither list is empty and our index has caught up to one of the
|
1109
|
-
* source lists.
|
1110
|
-
*/
|
1111
|
-
|
1112
|
-
if (l_node->key == r_node->key) {
|
1113
|
-
|
1114
|
-
if (level == last_level) {
|
1115
|
-
tmp_result = ew_op_switch<op, LDType, RDType>(*reinterpret_cast<LDType*>(l_node->val),*reinterpret_cast<RDType*>(r_node->val));
|
1116
|
-
std::cerr << "3. tmp_result = " << tmp_result << std::endl;
|
1117
|
-
|
1118
|
-
if (tmp_result != d_default) {
|
1119
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, tmp_result);
|
1120
|
-
}
|
1121
|
-
|
1122
|
-
} else {
|
1123
|
-
new_level = nm::list::create();
|
1124
|
-
dest_node = nm::list::insert_helper(dest, dest_node, index, new_level);
|
1125
|
-
|
1126
|
-
ew_op_prime<op, LDType, RDType>(new_level, d_default,
|
1127
|
-
reinterpret_cast<LIST*>(l_node->val), l_default,
|
1128
|
-
reinterpret_cast<LIST*>(r_node->val), r_default,
|
1129
|
-
shape, last_level, level + 1);
|
1130
|
-
}
|
1131
|
-
|
1132
|
-
l_node = l_node->next;
|
1133
|
-
r_node = r_node->next;
|
1134
|
-
|
1135
|
-
} else if (l_node->key < r_node->key) {
|
1136
|
-
// Advance the left node knowing that the default value is OK.
|
1137
|
-
|
1138
|
-
l_node = l_node->next;
|
1139
|
-
|
1140
|
-
} else /* if (l_node->key > r_node->key) */ {
|
1141
|
-
// Advance the right node knowing that the default value is OK.
|
1142
|
-
|
1143
|
-
r_node = r_node->next;
|
1144
|
-
}
|
1145
|
-
|
1146
|
-
} else {
|
1147
|
-
/*
|
1148
|
-
* Our index needs to catch up but the default value is OK. This
|
1149
|
-
* conditional is here only for documentation and should be optimized
|
1150
|
-
* out.
|
1151
|
-
*/
|
1152
|
-
}
|
1153
|
-
}
|
1154
|
-
}
|
1155
|
-
}
|
1156
|
-
|
1157
965
|
}} // end of namespace nm::list_storage
|
1158
966
|
|
967
|
+
extern "C" {
|
968
|
+
/*
|
969
|
+
* call-seq:
|
970
|
+
* __list_to_hash__ -> Hash
|
971
|
+
*
|
972
|
+
* Create a Ruby Hash from a list NMatrix.
|
973
|
+
*
|
974
|
+
* This is an internal C function which handles list stype only.
|
975
|
+
*/
|
976
|
+
VALUE nm_to_hash(VALUE self) {
|
977
|
+
return nm_list_storage_to_hash(NM_STORAGE_LIST(self), NM_DTYPE(self));
|
978
|
+
}
|
979
|
+
|
980
|
+
/*
|
981
|
+
* call-seq:
|
982
|
+
* __list_default_value__ -> ...
|
983
|
+
*
|
984
|
+
* Get the default_value property from a list matrix.
|
985
|
+
*/
|
986
|
+
VALUE nm_list_default_value(VALUE self) {
|
987
|
+
return (NM_DTYPE(self) == nm::RUBYOBJ) ? *reinterpret_cast<VALUE*>(NM_DEFAULT_VAL(self)) : rubyobj_from_cval(NM_DEFAULT_VAL(self), NM_DTYPE(self)).rval;
|
988
|
+
}
|
989
|
+
} // end of extern "C" block
|