nmatrix 0.0.6 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/Gemfile +5 -0
  4. data/History.txt +97 -0
  5. data/Manifest.txt +34 -7
  6. data/README.rdoc +13 -13
  7. data/Rakefile +36 -26
  8. data/ext/nmatrix/data/data.cpp +15 -2
  9. data/ext/nmatrix/data/data.h +4 -0
  10. data/ext/nmatrix/data/ruby_object.h +5 -14
  11. data/ext/nmatrix/extconf.rb +3 -2
  12. data/ext/nmatrix/{util/math.cpp → math.cpp} +296 -6
  13. data/ext/nmatrix/math/asum.h +143 -0
  14. data/ext/nmatrix/math/geev.h +82 -0
  15. data/ext/nmatrix/math/gemm.h +267 -0
  16. data/ext/nmatrix/math/gemv.h +208 -0
  17. data/ext/nmatrix/math/ger.h +96 -0
  18. data/ext/nmatrix/math/gesdd.h +80 -0
  19. data/ext/nmatrix/math/gesvd.h +78 -0
  20. data/ext/nmatrix/math/getf2.h +86 -0
  21. data/ext/nmatrix/math/getrf.h +240 -0
  22. data/ext/nmatrix/math/getri.h +107 -0
  23. data/ext/nmatrix/math/getrs.h +125 -0
  24. data/ext/nmatrix/math/idamax.h +86 -0
  25. data/ext/nmatrix/{util → math}/lapack.h +60 -356
  26. data/ext/nmatrix/math/laswp.h +165 -0
  27. data/ext/nmatrix/math/long_dtype.h +52 -0
  28. data/ext/nmatrix/math/math.h +1154 -0
  29. data/ext/nmatrix/math/nrm2.h +181 -0
  30. data/ext/nmatrix/math/potrs.h +125 -0
  31. data/ext/nmatrix/math/rot.h +141 -0
  32. data/ext/nmatrix/math/rotg.h +115 -0
  33. data/ext/nmatrix/math/scal.h +73 -0
  34. data/ext/nmatrix/math/swap.h +73 -0
  35. data/ext/nmatrix/math/trsm.h +383 -0
  36. data/ext/nmatrix/nmatrix.cpp +176 -152
  37. data/ext/nmatrix/nmatrix.h +1 -2
  38. data/ext/nmatrix/ruby_constants.cpp +9 -4
  39. data/ext/nmatrix/ruby_constants.h +1 -0
  40. data/ext/nmatrix/storage/dense.cpp +57 -41
  41. data/ext/nmatrix/storage/list.cpp +52 -50
  42. data/ext/nmatrix/storage/storage.cpp +59 -43
  43. data/ext/nmatrix/storage/yale.cpp +352 -333
  44. data/ext/nmatrix/storage/yale.h +4 -0
  45. data/lib/nmatrix.rb +2 -2
  46. data/lib/nmatrix/blas.rb +4 -4
  47. data/lib/nmatrix/enumerate.rb +241 -0
  48. data/lib/nmatrix/lapack.rb +54 -1
  49. data/lib/nmatrix/math.rb +462 -0
  50. data/lib/nmatrix/nmatrix.rb +210 -486
  51. data/lib/nmatrix/nvector.rb +0 -62
  52. data/lib/nmatrix/rspec.rb +75 -0
  53. data/lib/nmatrix/shortcuts.rb +136 -108
  54. data/lib/nmatrix/version.rb +1 -1
  55. data/spec/blas_spec.rb +20 -12
  56. data/spec/elementwise_spec.rb +22 -13
  57. data/spec/io_spec.rb +1 -0
  58. data/spec/lapack_spec.rb +197 -0
  59. data/spec/nmatrix_spec.rb +39 -38
  60. data/spec/nvector_spec.rb +3 -9
  61. data/spec/rspec_monkeys.rb +29 -0
  62. data/spec/rspec_spec.rb +34 -0
  63. data/spec/shortcuts_spec.rb +14 -16
  64. data/spec/slice_spec.rb +242 -186
  65. data/spec/spec_helper.rb +19 -0
  66. metadata +33 -5
  67. data/ext/nmatrix/util/math.h +0 -2612
@@ -303,8 +303,7 @@ NM_DEF_STRUCT_POST(NMATRIX); // };
303
303
  #define NM_STORAGE_DENSE(val) ((struct NM_DENSE_STORAGE*)(NM_STORAGE(val)))
304
304
  #endif
305
305
 
306
- #define NM_DENSE_SRC(val) (NM_STORAGE_DENSE(val)->src)
307
- #define NM_LIST_SRC(val) (NM_STORAGE_LIST(val)->src)
306
+ #define NM_SRC(val) (NM_STORAGE(val)->src)
308
307
  #define NM_DIM(val) (NM_STORAGE(val)->dim)
309
308
  #define NM_DTYPE(val) (NM_STORAGE(val)->dtype)
310
309
  #define NM_ITYPE(val) (NM_STORAGE_YALE(val)->itype)
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2012, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012, Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -57,11 +57,12 @@ ID nm_rb_real,
57
57
 
58
58
  nm_rb_row,
59
59
  nm_rb_column,
60
-
61
60
  nm_rb_add,
62
61
  nm_rb_sub,
63
62
  nm_rb_mul,
64
63
  nm_rb_div,
64
+ nm_rb_both,
65
+ nm_rb_none,
65
66
 
66
67
  nm_rb_negate,
67
68
 
@@ -84,6 +85,7 @@ VALUE cNMatrix,
84
85
  cNMatrix_LAPACK,
85
86
 
86
87
  nm_eDataTypeError,
88
+ nm_eConvergenceError,
87
89
  nm_eStorageTypeError;
88
90
 
89
91
  /*
@@ -127,9 +129,12 @@ void nm_init_ruby_constants(void) {
127
129
  nm_rb_lower = rb_intern("lower");
128
130
  nm_rb_unit = rb_intern("unit");
129
131
  nm_rb_nonunit = rb_intern("nonunit");
130
-
131
132
  nm_rb_hash = rb_intern("hash");
132
133
 
133
134
  nm_rb_column = rb_intern("column");
134
135
  nm_rb_row = rb_intern("row");
136
+
137
+ //Added by Ryan
138
+ nm_rb_both = rb_intern("both");
139
+ nm_rb_none = rb_intern("none");
135
140
  }
@@ -87,6 +87,7 @@ extern VALUE cNMatrix,
87
87
  cNMatrix_LAPACK,
88
88
 
89
89
  nm_eDataTypeError,
90
+ nm_eConvergenceError,
90
91
  nm_eStorageTypeError;
91
92
 
92
93
  /*
@@ -35,9 +35,11 @@
35
35
  * Project Includes
36
36
  */
37
37
  // #include "types.h"
38
- #include "util/math.h"
39
-
40
38
  #include "data/data.h"
39
+ #include "math/long_dtype.h"
40
+ #include "math/gemm.h"
41
+ #include "math/gemv.h"
42
+ #include "math/math.h"
41
43
  #include "common.h"
42
44
  #include "dense.h"
43
45
 
@@ -73,6 +75,30 @@ namespace nm { namespace dense_storage {
73
75
  template <typename DType>
74
76
  bool is_symmetric(const DENSE_STORAGE* mat, int lda);
75
77
 
78
+
79
+ /*
80
+ * Recursive slicing for N-dimensional matrix.
81
+ */
82
+ template <typename LDType, typename RDType>
83
+ static void slice_copy(DENSE_STORAGE *dest, const DENSE_STORAGE *src, size_t* lengths, size_t pdest, size_t psrc, size_t n) {
84
+ if (src->dim - n > 1) {
85
+ for (size_t i = 0; i < lengths[n]; ++i) {
86
+ slice_copy<LDType,RDType>(dest, src, lengths,
87
+ pdest + dest->stride[n]*i,
88
+ psrc + src->stride[n]*i,
89
+ n + 1);
90
+ }
91
+ } else {
92
+ for (size_t p = 0; p < dest->shape[n]; ++p) {
93
+ reinterpret_cast<LDType*>(dest->elements)[p+pdest] = reinterpret_cast<RDType*>(src->elements)[p+psrc];
94
+ }
95
+ /*memcpy((char*)dest->elements + pdest*DTYPE_SIZES[dest->dtype],
96
+ (char*)src->elements + psrc*DTYPE_SIZES[src->dtype],
97
+ dest->shape[n]*DTYPE_SIZES[dest->dtype]); */
98
+ }
99
+
100
+ }
101
+
76
102
  }} // end of namespace nm::dense_storage
77
103
 
78
104
 
@@ -163,12 +189,12 @@ void nm_dense_storage_delete(STORAGE* s) {
163
189
  if (s) {
164
190
  DENSE_STORAGE* storage = (DENSE_STORAGE*)s;
165
191
  if(storage->count-- == 1) {
166
- free(storage->shape);
167
- free(storage->offset);
168
- free(storage->stride);
192
+ xfree(storage->shape);
193
+ xfree(storage->offset);
194
+ xfree(storage->stride);
169
195
  if (storage->elements != NULL) // happens with dummy objects
170
- free(storage->elements);
171
- free(storage);
196
+ xfree(storage->elements);
197
+ xfree(storage);
172
198
  }
173
199
  }
174
200
  }
@@ -181,9 +207,9 @@ void nm_dense_storage_delete_ref(STORAGE* s) {
181
207
  if (s) {
182
208
  DENSE_STORAGE* storage = (DENSE_STORAGE*)s;
183
209
  nm_dense_storage_delete( reinterpret_cast<STORAGE*>(storage->src) );
184
- free(storage->shape);
185
- free(storage->offset);
186
- free(storage);
210
+ xfree(storage->shape);
211
+ xfree(storage->offset);
212
+ xfree(storage);
187
213
  }
188
214
  }
189
215
 
@@ -367,6 +393,16 @@ VALUE nm_dense_each(VALUE nmatrix) {
367
393
  }
368
394
 
369
395
 
396
+ /*
397
+ * Non-templated version of nm::dense_storage::slice_copy
398
+ */
399
+ static void slice_copy(DENSE_STORAGE *dest, const DENSE_STORAGE *src, size_t* lengths, size_t pdest, size_t psrc, size_t n) {
400
+ NAMED_LR_DTYPE_TEMPLATE_TABLE(slice_copy_table, nm::dense_storage::slice_copy, void, DENSE_STORAGE*, const DENSE_STORAGE*, size_t*, size_t, size_t, size_t)
401
+
402
+ slice_copy_table[dest->dtype][src->dtype](dest, src, lengths, pdest, psrc, n);
403
+ }
404
+
405
+
370
406
  /*
371
407
  * Get a slice or one element, using copying.
372
408
  *
@@ -374,17 +410,16 @@ VALUE nm_dense_each(VALUE nmatrix) {
374
410
  */
375
411
  void* nm_dense_storage_get(STORAGE* storage, SLICE* slice) {
376
412
  DENSE_STORAGE* s = (DENSE_STORAGE*)storage;
377
- DENSE_STORAGE* ns;
378
413
 
379
414
  if (slice->single)
380
415
  return (char*)(s->elements) + nm_dense_storage_pos(s, slice->coords) * DTYPE_SIZES[s->dtype];
381
- else { // Make references
416
+ else {
382
417
  size_t *shape = ALLOC_N(size_t, s->dim);
383
418
  for (size_t i = 0; i < s->dim; ++i) {
384
419
  shape[i] = slice->lengths[i];
385
420
  }
386
421
 
387
- ns = nm_dense_storage_create(s->dtype, shape, s->dim, NULL, 0);
422
+ DENSE_STORAGE* ns = nm_dense_storage_create(s->dtype, shape, s->dim, NULL, 0);
388
423
 
389
424
  slice_copy(ns,
390
425
  reinterpret_cast<const DENSE_STORAGE*>(s->src),
@@ -392,6 +427,7 @@ void* nm_dense_storage_get(STORAGE* storage, SLICE* slice) {
392
427
  0,
393
428
  nm_dense_storage_pos(s, slice->coords),
394
429
  0);
430
+
395
431
  return ns;
396
432
  }
397
433
  }
@@ -552,24 +588,6 @@ static size_t* stride(size_t* shape, size_t dim) {
552
588
  return stride;
553
589
  }
554
590
 
555
- /*
556
- * Recursive slicing for N-dimensional matrix.
557
- */
558
- static void slice_copy(DENSE_STORAGE *dest, const DENSE_STORAGE *src, size_t* lengths, size_t pdest, size_t psrc, size_t n) {
559
- if (src->dim - n > 1) {
560
- for (size_t i = 0; i < lengths[n]; ++i) {
561
- slice_copy(dest, src, lengths,
562
- pdest + dest->stride[n]*i,
563
- psrc + src->stride[n]*i,
564
- n + 1);
565
- }
566
- } else {
567
- memcpy((char*)dest->elements + pdest*DTYPE_SIZES[dest->dtype],
568
- (char*)src->elements + psrc*DTYPE_SIZES[src->dtype],
569
- dest->shape[n]*DTYPE_SIZES[dest->dtype]);
570
- }
571
-
572
- }
573
591
 
574
592
  /////////////////////////
575
593
  // Copying and Casting //
@@ -694,22 +712,20 @@ DENSE_STORAGE* cast_copy(const DENSE_STORAGE* rhs, dtype_t new_dtype) {
694
712
 
695
713
  DENSE_STORAGE* lhs = nm_dense_storage_create(new_dtype, shape, rhs->dim, NULL, 0);
696
714
 
697
- RDType* rhs_els = reinterpret_cast<RDType*>(rhs->elements);
698
- LDType* lhs_els = reinterpret_cast<LDType*>(lhs->elements);
699
-
700
715
  // Ensure that allocation worked before copying.
701
716
  if (lhs && count) {
702
717
  if (rhs->src != rhs) { // Make a copy of a ref to a matrix.
718
+ size_t* offset = ALLOCA_N(size_t, rhs->dim);
719
+ memset(offset, 0, sizeof(size_t) * rhs->dim);
703
720
 
704
- DENSE_STORAGE* tmp = nm_dense_storage_copy(rhs);
705
-
706
- RDType* tmp_els = reinterpret_cast<RDType*>(tmp->elements);
707
- while (count-- > 0) {
708
- lhs_els[count] = tmp_els[count];
709
- }
710
- nm_dense_storage_delete(tmp);
721
+ slice_copy(lhs, reinterpret_cast<const DENSE_STORAGE*>(rhs->src),
722
+ rhs->shape, 0,
723
+ nm_dense_storage_pos(rhs, offset), 0);
711
724
 
712
725
  } else { // Make a regular copy.
726
+ RDType* rhs_els = reinterpret_cast<RDType*>(rhs->elements);
727
+ LDType* lhs_els = reinterpret_cast<LDType*>(lhs->elements);
728
+
713
729
  while (count-- > 0) lhs_els[count] = rhs_els[count];
714
730
  }
715
731
  }
@@ -33,6 +33,7 @@
33
33
  #include <ruby.h>
34
34
  #include <algorithm> // std::min
35
35
  #include <iostream>
36
+ #include <vector>
36
37
 
37
38
  /*
38
39
  * Project Includes
@@ -45,7 +46,7 @@
45
46
  #include "common.h"
46
47
  #include "list.h"
47
48
 
48
- #include "util/math.h"
49
+ #include "math/math.h"
49
50
  #include "util/sl_list.h"
50
51
 
51
52
  /*
@@ -83,7 +84,7 @@ public:
83
84
 
84
85
  size_t dim() const { return ref->dim; }
85
86
 
86
- size_t shape(size_t rec) const {
87
+ size_t ref_shape(size_t rec) const {
87
88
  return shape_[ref->dim - rec - 1];
88
89
  }
89
90
 
@@ -143,7 +144,7 @@ static void map_empty_stored_r(RecurseData& result, RecurseData& s, LIST* x, con
143
144
 
144
145
  // For reference matrices, make sure we start in the correct place.
145
146
  size_t offset = result.offset(rec);
146
- size_t x_shape = result.shape(rec);
147
+ size_t x_shape = result.ref_shape(rec);
147
148
 
148
149
  while (curr && curr->key < offset) { curr = curr->next; }
149
150
  if (curr && curr->key - offset >= x_shape) curr = NULL;
@@ -188,8 +189,8 @@ static void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseD
188
189
  while (lcurr && lcurr->key < left.offset(rec)) { lcurr = lcurr->next; }
189
190
  while (rcurr && rcurr->key < right.offset(rec)) { rcurr = rcurr->next; }
190
191
 
191
- if (rcurr && rcurr->key - right.offset(rec) >= result.shape(rec)) rcurr = NULL;
192
- if (lcurr && lcurr->key - left.offset(rec) >= result.shape(rec)) lcurr = NULL;
192
+ if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;
193
+ if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
193
194
 
194
195
  if (rec) {
195
196
  while (lcurr || rcurr) {
@@ -214,8 +215,8 @@ static void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseD
214
215
  if (!val->first) nm::list::del(val, 0); // empty list -- don't insert
215
216
  else xcurr = nm::list::insert_helper(x, xcurr, key, val);
216
217
 
217
- if (rcurr && rcurr->key - right.offset(rec) >= result.shape(rec)) rcurr = NULL;
218
- if (lcurr && lcurr->key - left.offset(rec) >= result.shape(rec)) lcurr = NULL;
218
+ if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;
219
+ if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
219
220
  }
220
221
  } else {
221
222
  while (lcurr || rcurr) {
@@ -239,8 +240,8 @@ static void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseD
239
240
  if (rb_funcall(val, rb_intern("!="), 1, result.init_obj()) == Qtrue)
240
241
  xcurr = nm::list::insert_helper(x, xcurr, key, val);
241
242
 
242
- if (rcurr && rcurr->key - right.offset(rec) >= result.shape(rec)) rcurr = NULL;
243
- if (lcurr && lcurr->key - left.offset(rec) >= result.shape(rec)) lcurr = NULL;
243
+ if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;
244
+ if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
244
245
  }
245
246
  }
246
247
  }
@@ -293,10 +294,10 @@ void nm_list_storage_delete(STORAGE* s) {
293
294
  if (storage->count-- == 1) {
294
295
  list::del( storage->rows, storage->dim - 1 );
295
296
 
296
- free(storage->shape);
297
- free(storage->offset);
298
- free(storage->default_val);
299
- free(s);
297
+ xfree(storage->shape);
298
+ xfree(storage->offset);
299
+ xfree(storage->default_val);
300
+ xfree(s);
300
301
  }
301
302
  }
302
303
  }
@@ -309,9 +310,9 @@ void nm_list_storage_delete_ref(STORAGE* s) {
309
310
  LIST_STORAGE* storage = (LIST_STORAGE*)s;
310
311
 
311
312
  nm_list_storage_delete( reinterpret_cast<STORAGE*>(storage->src ) );
312
- free(storage->shape);
313
- free(storage->offset);
314
- free(s);
313
+ xfree(storage->shape);
314
+ xfree(storage->offset);
315
+ xfree(s);
315
316
  }
316
317
  }
317
318
 
@@ -334,7 +335,7 @@ void nm_list_storage_mark(void* storage_base) {
334
335
  /*
335
336
  * Documentation goes here.
336
337
  */
337
- NODE* list_storage_get_single_node(LIST_STORAGE* s, SLICE* slice)
338
+ static NODE* list_storage_get_single_node(LIST_STORAGE* s, SLICE* slice)
338
339
  {
339
340
  size_t r;
340
341
  LIST* l = s->rows;
@@ -358,7 +359,7 @@ static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t r
358
359
  VALUE empty = s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s.init()) : s.init_obj();
359
360
 
360
361
  if (rec) {
361
- for (long index = 0; index < s.shape(rec); ++index) {
362
+ for (long index = 0; index < s.ref_shape(rec); ++index) {
362
363
  // Don't do an unshift/shift here -- we'll let that be handled in the lowest-level iteration (recursions == 0)
363
364
  rb_ary_push(stack, LONG2NUM(index));
364
365
  each_empty_with_indices_r(s, rec-1, stack);
@@ -366,7 +367,7 @@ static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t r
366
367
  }
367
368
  } else {
368
369
  rb_ary_unshift(stack, empty);
369
- for (long index = 0; index < s.shape(rec); ++index) {
370
+ for (long index = 0; index < s.ref_shape(rec); ++index) {
370
371
  rb_ary_push(stack, LONG2NUM(index));
371
372
  rb_yield_splat(stack);
372
373
  rb_ary_pop(stack);
@@ -382,18 +383,18 @@ static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l,
382
383
  NODE* curr = l->first;
383
384
 
384
385
  size_t offset = s.offset(rec);
385
- size_t shape = s.shape(rec);
386
+ size_t shape = s.ref_shape(rec);
386
387
 
387
388
  while (curr && curr->key < offset) curr = curr->next;
388
- if (curr && curr->key >= shape) curr = NULL;
389
+ if (curr && curr->key - offset >= shape) curr = NULL;
389
390
 
390
391
 
391
392
  if (rec) {
392
- for (long index = 0; index < shape; ++index) {
393
+ for (long index = 0; index < shape; ++index) { // index in reference
393
394
  rb_ary_push(stack, LONG2NUM(index));
394
395
  if (!curr || index < curr->key - offset) {
395
396
  each_empty_with_indices_r(s, rec-1, stack);
396
- } else {
397
+ } else { // index == curr->key - offset
397
398
  each_with_indices_r(s, reinterpret_cast<const LIST*>(curr->val), rec-1, stack);
398
399
  curr = curr->next;
399
400
  }
@@ -407,7 +408,7 @@ static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l,
407
408
  if (!curr || index < curr->key - offset) {
408
409
  rb_ary_unshift(stack, s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s.init()) : s.init_obj());
409
410
 
410
- } else { // index == curr->key
411
+ } else { // index == curr->key - offset
411
412
  rb_ary_unshift(stack, s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(curr->val) : rubyobj_from_cval(curr->val, s.dtype()).rval);
412
413
 
413
414
  curr = curr->next;
@@ -429,7 +430,7 @@ static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const L
429
430
  NODE* curr = l->first;
430
431
 
431
432
  size_t offset = s.offset(rec);
432
- size_t shape = s.shape(rec);
433
+ size_t shape = s.ref_shape(rec);
433
434
 
434
435
  while (curr && curr->key < offset) { curr = curr->next; }
435
436
  if (curr && curr->key - offset >= shape) curr = NULL;
@@ -460,7 +461,7 @@ static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const L
460
461
  rb_ary_pop(stack);
461
462
 
462
463
  curr = curr->next;
463
- if (curr && curr->key >= shape) curr = NULL;
464
+ if (curr && curr->key - offset >= shape) curr = NULL;
464
465
  }
465
466
  }
466
467
  }
@@ -500,7 +501,6 @@ VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
500
501
  nm::list_storage::RecurseData sdata(s);
501
502
 
502
503
  void* scalar_init = NULL;
503
- size_t* shape;
504
504
 
505
505
  // right might be a scalar, in which case this is a scalar operation.
506
506
  if (TYPE(right) != T_DATA || (RDATA(right)->dfree != (RUBY_DATA_FUNC)nm_delete && RDATA(right)->dfree != (RUBY_DATA_FUNC)nm_delete_ref)) {
@@ -541,8 +541,11 @@ VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
541
541
  }
542
542
 
543
543
 
544
-
544
+ /*
545
+ * Copy a slice of a list matrix into a regular list matrix.
546
+ */
545
547
  static LIST* slice_copy(const LIST_STORAGE *src, LIST *src_rows, size_t *coords, size_t *lengths, size_t n) {
548
+
546
549
  NODE *src_node;
547
550
  LIST *dst_rows = NULL;
548
551
  void *val = NULL;
@@ -586,10 +589,9 @@ void* nm_list_storage_get(STORAGE* storage, SLICE* slice) {
586
589
  NODE* n;
587
590
 
588
591
  if (slice->single) {
589
- n = list_storage_get_single_node(s, slice);
592
+ n = list_storage_get_single_node(s, slice);
590
593
  return (n ? n->val : s->default_val);
591
- }
592
- else {
594
+ } else {
593
595
  void *init_val = ALLOC_N(char, DTYPE_SIZES[s->dtype]);
594
596
  memcpy(init_val, s->default_val, DTYPE_SIZES[s->dtype]);
595
597
 
@@ -597,7 +599,7 @@ void* nm_list_storage_get(STORAGE* storage, SLICE* slice) {
597
599
  memcpy(shape, slice->lengths, sizeof(size_t) * s->dim);
598
600
 
599
601
  ns = nm_list_storage_create(s->dtype, shape, s->dim, init_val);
600
-
602
+
601
603
  ns->rows = slice_copy(s, s->rows, slice->coords, slice->lengths, 0);
602
604
  return ns;
603
605
  }
@@ -618,23 +620,23 @@ void* nm_list_storage_ref(STORAGE* storage, SLICE* slice) {
618
620
  return (n ? n->val : s->default_val);
619
621
  }
620
622
  else {
621
- ns = ALLOC( LIST_STORAGE );
623
+ ns = ALLOC( LIST_STORAGE );
622
624
 
623
- ns->dim = s->dim;
624
- ns->dtype = s->dtype;
625
- ns->offset = ALLOC_N(size_t, ns->dim);
626
- ns->shape = ALLOC_N(size_t, ns->dim);
625
+ ns->dim = s->dim;
626
+ ns->dtype = s->dtype;
627
+ ns->offset = ALLOC_N(size_t, ns->dim);
628
+ ns->shape = ALLOC_N(size_t, ns->dim);
627
629
 
628
630
  for (size_t i = 0; i < ns->dim; ++i) {
629
631
  ns->offset[i] = slice->coords[i] + s->offset[i];
630
632
  ns->shape[i] = slice->lengths[i];
631
633
  }
632
634
 
633
- ns->rows = s->rows;
635
+ ns->rows = s->rows;
634
636
  ns->default_val = s->default_val;
635
637
 
636
638
  s->src->count++;
637
- ns->src = s->src;
639
+ ns->src = s->src;
638
640
 
639
641
  return ns;
640
642
  }
@@ -874,21 +876,21 @@ static bool eqeq_empty_r(RecurseData& s, const LIST* l, size_t rec, const TDType
874
876
 
875
877
  // For reference matrices, make sure we start in the correct place.
876
878
  while (curr && curr->key < s.offset(rec)) { curr = curr->next; }
877
- if (curr && curr->key - s.offset(rec) >= s.shape(rec)) curr = NULL;
879
+ if (curr && curr->key - s.offset(rec) >= s.ref_shape(rec)) curr = NULL;
878
880
 
879
881
  if (rec) {
880
882
  while (curr) {
881
883
  if (!eqeq_empty_r<SDType,TDType>(s, reinterpret_cast<const LIST*>(curr->val), rec-1, t_init)) return false;
882
884
  curr = curr->next;
883
885
 
884
- if (curr && curr->key - s.offset(rec) >= s.shape(rec)) curr = NULL;
886
+ if (curr && curr->key - s.offset(rec) >= s.ref_shape(rec)) curr = NULL;
885
887
  }
886
888
  } else {
887
889
  while (curr) {
888
890
  if (*reinterpret_cast<SDType*>(curr->val) != *t_init) return false;
889
891
  curr = curr->next;
890
892
 
891
- if (curr && curr->key - s.offset(rec) >= s.shape(rec)) curr = NULL;
893
+ if (curr && curr->key - s.offset(rec) >= s.ref_shape(rec)) curr = NULL;
892
894
  }
893
895
  }
894
896
  return true;
@@ -909,8 +911,8 @@ static bool eqeq_r(RecurseData& left, RecurseData& right, const LIST* l, const L
909
911
  // For reference matrices, make sure we start in the correct place.
910
912
  while (lcurr && lcurr->key < left.offset(rec)) { lcurr = lcurr->next; }
911
913
  while (rcurr && rcurr->key < right.offset(rec)) { rcurr = rcurr->next; }
912
- if (rcurr && rcurr->key - right.offset(rec) >= left.shape(rec)) rcurr = NULL;
913
- if (lcurr && lcurr->key - left.offset(rec) >= left.shape(rec)) lcurr = NULL;
914
+ if (rcurr && rcurr->key - right.offset(rec) >= left.ref_shape(rec)) rcurr = NULL;
915
+ if (lcurr && lcurr->key - left.offset(rec) >= left.ref_shape(rec)) lcurr = NULL;
914
916
 
915
917
  bool compared = false;
916
918
 
@@ -929,15 +931,15 @@ static bool eqeq_r(RecurseData& left, RecurseData& right, const LIST* l, const L
929
931
  lcurr = lcurr->next;
930
932
  rcurr = rcurr->next;
931
933
  }
932
- if (rcurr && rcurr->key - right.offset(rec) >= left.shape(rec)) rcurr = NULL;
933
- if (lcurr && lcurr->key - left.offset(rec) >= left.shape(rec)) lcurr = NULL;
934
+ if (rcurr && rcurr->key - right.offset(rec) >= right.ref_shape(rec)) rcurr = NULL;
935
+ if (lcurr && lcurr->key - left.offset(rec) >= left.ref_shape(rec)) lcurr = NULL;
934
936
  compared = true;
935
937
  }
936
938
  } else {
937
939
  while (lcurr || rcurr) {
938
940
 
939
- if (rcurr && rcurr->key - right.offset(rec) >= left.shape(rec)) rcurr = NULL;
940
- if (lcurr && lcurr->key - left.offset(rec) >= left.shape(rec)) lcurr = NULL;
941
+ if (rcurr && rcurr->key - right.offset(rec) >= left.ref_shape(rec)) rcurr = NULL;
942
+ if (lcurr && lcurr->key - left.offset(rec) >= left.ref_shape(rec)) lcurr = NULL;
941
943
 
942
944
  if (!rcurr || (lcurr && (lcurr->key - left.offset(rec) < rcurr->key - right.offset(rec)))) {
943
945
  if (*reinterpret_cast<LDType*>(lcurr->val) != *reinterpret_cast<const RDType*>(right.init())) return false;
@@ -950,8 +952,8 @@ static bool eqeq_r(RecurseData& left, RecurseData& right, const LIST* l, const L
950
952
  lcurr = lcurr->next;
951
953
  rcurr = rcurr->next;
952
954
  }
953
- if (rcurr && rcurr->key - right.offset(rec) >= left.shape(rec)) rcurr = NULL;
954
- if (lcurr && lcurr->key - left.offset(rec) >= left.shape(rec)) lcurr = NULL;
955
+ if (rcurr && rcurr->key - right.offset(rec) >= right.ref_shape(rec)) rcurr = NULL;
956
+ if (lcurr && lcurr->key - left.offset(rec) >= left.ref_shape(rec)) lcurr = NULL;
955
957
  compared = true;
956
958
  }
957
959
  }