nmatrix 0.0.9 → 0.1.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (101) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile +1 -0
  3. data/History.txt +95 -1
  4. data/LICENSE.txt +2 -2
  5. data/README.rdoc +24 -26
  6. data/Rakefile +32 -16
  7. data/ext/nmatrix/data/complex.h +2 -2
  8. data/ext/nmatrix/data/data.cpp +27 -51
  9. data/ext/nmatrix/data/data.h +92 -4
  10. data/ext/nmatrix/data/meta.h +2 -2
  11. data/ext/nmatrix/data/rational.h +2 -2
  12. data/ext/nmatrix/data/ruby_object.h +2 -2
  13. data/ext/nmatrix/extconf.rb +87 -86
  14. data/ext/nmatrix/math.cpp +45 -40
  15. data/ext/nmatrix/math/asum.h +3 -3
  16. data/ext/nmatrix/math/geev.h +2 -2
  17. data/ext/nmatrix/math/gemm.h +6 -2
  18. data/ext/nmatrix/math/gemv.h +6 -2
  19. data/ext/nmatrix/math/ger.h +2 -2
  20. data/ext/nmatrix/math/gesdd.h +2 -2
  21. data/ext/nmatrix/math/gesvd.h +2 -2
  22. data/ext/nmatrix/math/getf2.h +2 -2
  23. data/ext/nmatrix/math/getrf.h +2 -2
  24. data/ext/nmatrix/math/getri.h +2 -2
  25. data/ext/nmatrix/math/getrs.h +7 -3
  26. data/ext/nmatrix/math/idamax.h +2 -2
  27. data/ext/nmatrix/math/inc.h +12 -6
  28. data/ext/nmatrix/math/laswp.h +2 -2
  29. data/ext/nmatrix/math/long_dtype.h +2 -2
  30. data/ext/nmatrix/math/math.h +16 -10
  31. data/ext/nmatrix/math/nrm2.h +3 -3
  32. data/ext/nmatrix/math/potrs.h +7 -3
  33. data/ext/nmatrix/math/rot.h +2 -2
  34. data/ext/nmatrix/math/rotg.h +2 -2
  35. data/ext/nmatrix/math/scal.h +2 -2
  36. data/ext/nmatrix/math/swap.h +2 -2
  37. data/ext/nmatrix/math/trsm.h +7 -3
  38. data/ext/nmatrix/nm_memory.h +60 -0
  39. data/ext/nmatrix/nmatrix.cpp +13 -47
  40. data/ext/nmatrix/nmatrix.h +37 -12
  41. data/ext/nmatrix/ruby_constants.cpp +4 -2
  42. data/ext/nmatrix/ruby_constants.h +4 -2
  43. data/ext/nmatrix/ruby_nmatrix.c +937 -170
  44. data/ext/nmatrix/storage/common.cpp +2 -2
  45. data/ext/nmatrix/storage/common.h +2 -2
  46. data/ext/nmatrix/storage/{dense.cpp → dense/dense.cpp} +253 -100
  47. data/ext/nmatrix/storage/{dense.h → dense/dense.h} +6 -5
  48. data/ext/nmatrix/storage/{list.cpp → list/list.cpp} +517 -98
  49. data/ext/nmatrix/storage/{list.h → list/list.h} +13 -6
  50. data/ext/nmatrix/storage/storage.cpp +48 -19
  51. data/ext/nmatrix/storage/storage.h +4 -4
  52. data/ext/nmatrix/storage/yale/class.h +112 -43
  53. data/ext/nmatrix/storage/yale/iterators/base.h +2 -2
  54. data/ext/nmatrix/storage/yale/iterators/iterator.h +2 -2
  55. data/ext/nmatrix/storage/yale/iterators/row.h +2 -2
  56. data/ext/nmatrix/storage/yale/iterators/row_stored.h +2 -2
  57. data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +4 -3
  58. data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +2 -2
  59. data/ext/nmatrix/storage/yale/math/transpose.h +2 -2
  60. data/ext/nmatrix/storage/yale/yale.cpp +343 -52
  61. data/ext/nmatrix/storage/yale/yale.h +7 -3
  62. data/ext/nmatrix/types.h +2 -2
  63. data/ext/nmatrix/util/io.cpp +5 -5
  64. data/ext/nmatrix/util/io.h +2 -2
  65. data/ext/nmatrix/util/sl_list.cpp +40 -27
  66. data/ext/nmatrix/util/sl_list.h +3 -3
  67. data/ext/nmatrix/util/util.h +2 -2
  68. data/lib/nmatrix.rb +2 -2
  69. data/lib/nmatrix/blas.rb +2 -2
  70. data/lib/nmatrix/enumerate.rb +17 -6
  71. data/lib/nmatrix/io/market.rb +2 -3
  72. data/lib/nmatrix/io/mat5_reader.rb +2 -2
  73. data/lib/nmatrix/io/mat_reader.rb +2 -2
  74. data/lib/nmatrix/lapack.rb +46 -46
  75. data/lib/nmatrix/math.rb +213 -20
  76. data/lib/nmatrix/monkeys.rb +24 -2
  77. data/lib/nmatrix/nmatrix.rb +394 -9
  78. data/lib/nmatrix/nvector.rb +2 -64
  79. data/lib/nmatrix/rspec.rb +2 -2
  80. data/lib/nmatrix/shortcuts.rb +14 -61
  81. data/lib/nmatrix/version.rb +11 -3
  82. data/lib/nmatrix/yale_functions.rb +4 -4
  83. data/nmatrix.gemspec +2 -7
  84. data/scripts/mac-brew-gcc.sh +11 -8
  85. data/scripts/mac-mavericks-brew-gcc.sh +22 -0
  86. data/spec/00_nmatrix_spec.rb +116 -7
  87. data/spec/01_enum_spec.rb +17 -3
  88. data/spec/02_slice_spec.rb +11 -3
  89. data/spec/blas_spec.rb +5 -2
  90. data/spec/elementwise_spec.rb +5 -2
  91. data/spec/io_spec.rb +27 -17
  92. data/spec/lapack_spec.rb +157 -9
  93. data/spec/math_spec.rb +95 -4
  94. data/spec/nmatrix_yale_spec.rb +21 -26
  95. data/spec/rspec_monkeys.rb +27 -0
  96. data/spec/rspec_spec.rb +2 -2
  97. data/spec/shortcuts_spec.rb +5 -10
  98. data/spec/slice_set_spec.rb +6 -2
  99. data/spec/spec_helper.rb +3 -2
  100. data/spec/stat_spec.rb +174 -158
  101. metadata +15 -15
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -34,14 +34,14 @@
34
34
  */
35
35
 
36
36
  #include <stdlib.h>
37
-
37
+ #include <list>
38
38
  /*
39
39
  * Project Includes
40
40
  */
41
41
 
42
42
  #include "types.h"
43
43
  #include "data/data.h"
44
- #include "common.h"
44
+ #include "../common.h"
45
45
  #include "util/sl_list.h"
46
46
  #include "nmatrix.h"
47
47
 
@@ -73,7 +73,13 @@ extern "C" {
73
73
  void nm_list_storage_delete(STORAGE* s);
74
74
  void nm_list_storage_delete_ref(STORAGE* s);
75
75
  void nm_list_storage_mark(STORAGE*);
76
-
76
+ void nm_list_storage_register(const STORAGE* s);
77
+ void nm_list_storage_unregister(const STORAGE* s);
78
+ void nm_list_storage_register_list(const LIST* l, size_t recursions);
79
+ void nm_list_storage_unregister_list(const LIST* l, size_t recursions);
80
+ void nm_list_storage_register_node(const NODE* n);
81
+ void nm_list_storage_unregister_node(const NODE* n);
82
+ void nm_list_storage_completely_unregister_node(const NODE* curr);
77
83
  ///////////////
78
84
  // Accessors //
79
85
  ///////////////
@@ -82,7 +88,7 @@ extern "C" {
82
88
  void* nm_list_storage_ref(const STORAGE* s, SLICE* slice);
83
89
  void* nm_list_storage_get(const STORAGE* s, SLICE* slice);
84
90
  NODE* nm_list_storage_insert(STORAGE* s, SLICE* slice, void* val);
85
- void nm_list_storage_set(VALUE left, SLICE* slice, VALUE right);
91
+ void nm_list_storage_set(VALUE left, SLICE* slice, VALUE right);
86
92
  void nm_list_storage_remove(STORAGE* s, SLICE* slice);
87
93
 
88
94
  ///////////
@@ -124,6 +130,7 @@ extern "C" {
124
130
  // Exposed functions
125
131
  VALUE nm_to_hash(VALUE self);
126
132
  VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init);
133
+ VALUE nm_list_map_stored(VALUE left, VALUE init);
127
134
  VALUE nm_list_default_value(VALUE self);
128
135
  } // end of extern "C" block
129
136
 
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -87,9 +87,9 @@ static void cast_copy_list_default(LDType* lhs, RDType* default_val, size_t& pos
87
87
  */
88
88
  template <typename LDType, typename RDType>
89
89
  DENSE_STORAGE* create_from_list_storage(const LIST_STORAGE* rhs, dtype_t l_dtype) {
90
-
90
+ nm_list_storage_register(rhs);
91
91
  // allocate and copy shape
92
- size_t* shape = ALLOC_N(size_t, rhs->dim);
92
+ size_t* shape = NM_ALLOC_N(size_t, rhs->dim);
93
93
  memcpy(shape, rhs->shape, rhs->dim * sizeof(size_t));
94
94
 
95
95
  DENSE_STORAGE* lhs = nm_dense_storage_create(l_dtype, shape, rhs->dim, NULL, 0);
@@ -114,6 +114,7 @@ DENSE_STORAGE* create_from_list_storage(const LIST_STORAGE* rhs, dtype_t l_dtype
114
114
  nm_list_storage_delete(tmp);
115
115
 
116
116
  }
117
+ nm_list_storage_unregister(rhs);
117
118
 
118
119
  return lhs;
119
120
  }
@@ -127,12 +128,13 @@ DENSE_STORAGE* create_from_list_storage(const LIST_STORAGE* rhs, dtype_t l_dtype
127
128
  template <typename LDType, typename RDType>
128
129
  DENSE_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype) {
129
130
 
131
+ nm_yale_storage_register(rhs);
130
132
  // Position in rhs->elements.
131
133
  IType* rhs_ija = reinterpret_cast<YALE_STORAGE*>(rhs->src)->ija;
132
134
  RDType* rhs_a = reinterpret_cast<RDType*>(reinterpret_cast<YALE_STORAGE*>(rhs->src)->a);
133
135
 
134
136
  // Allocate and set shape.
135
- size_t* shape = ALLOC_N(size_t, rhs->dim);
137
+ size_t* shape = NM_ALLOC_N(size_t, rhs->dim);
136
138
  shape[0] = rhs->shape[0];
137
139
  shape[1] = rhs->shape[1];
138
140
 
@@ -195,6 +197,7 @@ DENSE_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype
195
197
  }
196
198
  }
197
199
  }
200
+ nm_yale_storage_unregister(rhs);
198
201
 
199
202
  return lhs;
200
203
  }
@@ -209,7 +212,9 @@ static void cast_copy_list_contents(LDType* lhs, const LIST* rhs, RDType* defaul
209
212
  NODE *curr = rhs->first;
210
213
  int last_key = -1;
211
214
 
212
- for (size_t i = 0; i < shape[dim - 1 - recursions]; ++i, ++pos) {
215
+ nm_list_storage_register_list(rhs, recursions);
216
+
217
+ for (size_t i = 0; i < shape[dim - 1 - recursions]; ++i, ++pos) {
213
218
 
214
219
  if (!curr || (curr->key > (size_t)(last_key+1))) {
215
220
 
@@ -229,6 +234,8 @@ static void cast_copy_list_contents(LDType* lhs, const LIST* rhs, RDType* defaul
229
234
  }
230
235
  }
231
236
 
237
+ nm_list_storage_unregister_list(rhs, recursions);
238
+
232
239
  --pos;
233
240
  }
234
241
 
@@ -237,7 +244,7 @@ static void cast_copy_list_contents(LDType* lhs, const LIST* rhs, RDType* defaul
237
244
  */
238
245
  template <typename LDType,typename RDType>
239
246
  static void cast_copy_list_default(LDType* lhs, RDType* default_val, size_t& pos, const size_t* shape, size_t dim, size_t max_elements, size_t recursions) {
240
- for (size_t i = 0; i < shape[dim - 1 - recursions]; ++i, ++pos) {
247
+ for (size_t i = 0; i < shape[dim - 1 - recursions]; ++i, ++pos) {
241
248
 
242
249
  if (recursions == 0) lhs[pos] = static_cast<LDType>(*default_val);
243
250
  else cast_copy_list_default<LDType,RDType>(lhs, default_val, pos, shape, dim, max_elements, recursions-1);
@@ -261,13 +268,14 @@ static bool cast_copy_contents_dense(LIST* lhs, const RDType* rhs, RDType* zero,
261
268
  */
262
269
  template <typename LDType, typename RDType>
263
270
  LIST_STORAGE* create_from_dense_storage(const DENSE_STORAGE* rhs, dtype_t l_dtype, void* init) {
271
+ nm_dense_storage_register(rhs);
264
272
 
265
- LDType* l_default_val = ALLOC_N(LDType, 1);
266
- RDType* r_default_val = ALLOCA_N(RDType, 1); // clean up when finished with this function
273
+ LDType* l_default_val = NM_ALLOC_N(LDType, 1);
274
+ RDType* r_default_val = NM_ALLOCA_N(RDType, 1); // clean up when finished with this function
267
275
 
268
276
  // allocate and copy shape and coords
269
- size_t *shape = ALLOC_N(size_t, rhs->dim),
270
- *coords = ALLOC_N(size_t, rhs->dim);
277
+ size_t *shape = NM_ALLOC_N(size_t, rhs->dim),
278
+ *coords = NM_ALLOC_N(size_t, rhs->dim);
271
279
 
272
280
  memcpy(shape, rhs->shape, rhs->dim * sizeof(size_t));
273
281
  memset(coords, 0, rhs->dim * sizeof(size_t));
@@ -286,6 +294,8 @@ LIST_STORAGE* create_from_dense_storage(const DENSE_STORAGE* rhs, dtype_t l_dtyp
286
294
 
287
295
  LIST_STORAGE* lhs = nm_list_storage_create(l_dtype, shape, rhs->dim, l_default_val);
288
296
 
297
+ nm_list_storage_register(lhs);
298
+
289
299
  size_t pos = 0;
290
300
 
291
301
  if (rhs->src == rhs)
@@ -303,6 +313,9 @@ LIST_STORAGE* create_from_dense_storage(const DENSE_STORAGE* rhs, dtype_t l_dtyp
303
313
  nm_dense_storage_delete(tmp);
304
314
  }
305
315
 
316
+ nm_list_storage_unregister(lhs);
317
+ nm_dense_storage_unregister(rhs);
318
+
306
319
  return lhs;
307
320
  }
308
321
 
@@ -314,14 +327,16 @@ LIST_STORAGE* create_from_dense_storage(const DENSE_STORAGE* rhs, dtype_t l_dtyp
314
327
  template <typename LDType, typename RDType>
315
328
  LIST_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype) {
316
329
  // allocate and copy shape
317
- size_t *shape = ALLOC_N(size_t, rhs->dim);
330
+ nm_yale_storage_register(rhs);
331
+
332
+ size_t *shape = NM_ALLOC_N(size_t, rhs->dim);
318
333
  shape[0] = rhs->shape[0]; shape[1] = rhs->shape[1];
319
334
 
320
335
  RDType* rhs_a = reinterpret_cast<RDType*>(reinterpret_cast<YALE_STORAGE*>(rhs->src)->a);
321
336
  RDType R_ZERO = rhs_a[ rhs->src->shape[0] ];
322
337
 
323
338
  // copy default value from the zero location in the Yale matrix
324
- LDType* default_val = ALLOC_N(LDType, 1);
339
+ LDType* default_val = NM_ALLOC_N(LDType, 1);
325
340
  *default_val = static_cast<LDType>(R_ZERO);
326
341
 
327
342
  LIST_STORAGE* lhs = nm_list_storage_create(l_dtype, shape, rhs->dim, default_val);
@@ -360,7 +375,7 @@ LIST_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype)
360
375
  // Is there a nonzero diagonal item between the previously added item and the current one?
361
376
  if (rj > ri && add_diag) {
362
377
  // Allocate and copy insertion value
363
- insert_val = ALLOC_N(LDType, 1);
378
+ insert_val = NM_ALLOC_N(LDType, 1);
364
379
  *insert_val = static_cast<LDType>(rhs_a[ri]);
365
380
 
366
381
  // Insert the item in the list at the appropriate location.
@@ -375,7 +390,7 @@ LIST_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype)
375
390
  }
376
391
 
377
392
  // now allocate and add the current item
378
- insert_val = ALLOC_N(LDType, 1);
393
+ insert_val = NM_ALLOC_N(LDType, 1);
379
394
  *insert_val = static_cast<LDType>(rhs_a[ija]);
380
395
 
381
396
  if (last_added) last_added = list::insert_after(last_added, j, insert_val);
@@ -387,7 +402,7 @@ LIST_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype)
387
402
  if (add_diag) {
388
403
 
389
404
  // still haven't added the diagonal.
390
- insert_val = ALLOC_N(LDType, 1);
405
+ insert_val = NM_ALLOC_N(LDType, 1);
391
406
  *insert_val = static_cast<LDType>(rhs_a[ri]);
392
407
 
393
408
  // insert the item in the list at the appropriate location
@@ -405,6 +420,8 @@ LIST_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype)
405
420
  // end of walk through rows
406
421
  }
407
422
 
423
+ nm_yale_storage_unregister(rhs);
424
+
408
425
  return lhs;
409
426
  }
410
427
 
@@ -415,6 +432,9 @@ LIST_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype)
415
432
  */
416
433
  template <typename LDType, typename RDType>
417
434
  static bool cast_copy_contents_dense(LIST* lhs, const RDType* rhs, RDType* zero, size_t& pos, size_t* coords, const size_t* shape, size_t dim, size_t recursions) {
435
+
436
+ nm_list_storage_register_list(lhs, recursions);
437
+
418
438
  NODE *prev = NULL;
419
439
  LIST *sub_list;
420
440
  bool added = false, added_list = false;
@@ -429,7 +449,7 @@ static bool cast_copy_contents_dense(LIST* lhs, const RDType* rhs, RDType* zero,
429
449
  // is not zero
430
450
 
431
451
  // Create a copy of our value that we will insert in the list
432
- LDType* insert_value = ALLOC_N(LDType, 1);
452
+ LDType* insert_value = NM_ALLOC_N(LDType, 1);
433
453
  *insert_value = static_cast<LDType>(rhs[pos]);
434
454
 
435
455
  if (!lhs->first) prev = list::insert(lhs, false, coords[dim-1-recursions], insert_value);
@@ -453,6 +473,8 @@ static bool cast_copy_contents_dense(LIST* lhs, const RDType* rhs, RDType* zero,
453
473
  }
454
474
  }
455
475
 
476
+ nm_list_storage_unregister_list(lhs, recursions);
477
+
456
478
  coords[dim-1-recursions] = 0;
457
479
  --pos;
458
480
 
@@ -471,6 +493,8 @@ namespace yale_storage { // FIXME: Move to yale.cpp
471
493
 
472
494
  if (rhs->dim != 2) rb_raise(nm_eStorageTypeError, "can only convert matrices of dim 2 to yale");
473
495
 
496
+ nm_dense_storage_register(rhs);
497
+
474
498
  IType pos = 0;
475
499
  IType ndnz = 0;
476
500
 
@@ -495,7 +519,7 @@ namespace yale_storage { // FIXME: Move to yale.cpp
495
519
  }
496
520
 
497
521
  // Copy shape for yale construction
498
- size_t* shape = ALLOC_N(size_t, 2);
522
+ size_t* shape = NM_ALLOC_N(size_t, 2);
499
523
  shape[0] = rhs->shape[0];
500
524
  shape[1] = rhs->shape[1];
501
525
 
@@ -539,6 +563,8 @@ namespace yale_storage { // FIXME: Move to yale.cpp
539
563
  lhs_ija[shape[0]] = ija; // indicate the end of the last row
540
564
  lhs->ndnz = ndnz;
541
565
 
566
+ nm_dense_storage_unregister(rhs);
567
+
542
568
  return lhs;
543
569
  }
544
570
 
@@ -556,10 +582,11 @@ namespace yale_storage { // FIXME: Move to yale.cpp
556
582
  } else if (strncmp(reinterpret_cast<const char*>(rhs->default_val), "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", DTYPE_SIZES[rhs->dtype]))
557
583
  rb_raise(nm_eStorageTypeError, "list matrix of non-Ruby objects must have default value of 0 to convert to yale");
558
584
 
585
+ nm_list_storage_register(rhs);
559
586
 
560
587
  size_t ndnz = nm_list_storage_count_nd_elements(rhs);
561
588
  // Copy shape for yale construction
562
- size_t* shape = ALLOC_N(size_t, 2);
589
+ size_t* shape = NM_ALLOC_N(size_t, 2);
563
590
  shape[0] = rhs->shape[0];
564
591
  shape[1] = rhs->shape[1];
565
592
 
@@ -612,6 +639,8 @@ namespace yale_storage { // FIXME: Move to yale.cpp
612
639
  lhs_ija[rhs->shape[0]] = ija; // indicate the end of the last row
613
640
  lhs->ndnz = ndnz;
614
641
 
642
+ nm_list_storage_unregister(rhs);
643
+
615
644
  return lhs;
616
645
  }
617
646
 
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -45,8 +45,8 @@
45
45
  #include "data/data.h"
46
46
 
47
47
  #include "common.h"
48
- #include "dense.h"
49
- #include "list.h"
48
+ #include "dense/dense.h"
49
+ #include "list/list.h"
50
50
  #include "yale/yale.h"
51
51
 
52
52
  /*
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -29,8 +29,9 @@
29
29
  #ifndef YALE_CLASS_H
30
30
  # define YALE_CLASS_H
31
31
 
32
- #include "../dense.h"
32
+ #include "../dense/dense.h"
33
33
  #include "math/transpose.h"
34
+ #include "yale.h"
34
35
 
35
36
  namespace nm {
36
37
 
@@ -50,14 +51,22 @@ public:
50
51
  slice(storage != storage->src),
51
52
  slice_shape(storage->shape),
52
53
  slice_offset(storage->offset)
53
- { }
54
+ {
55
+ nm_yale_storage_register(storage->src);
56
+ }
54
57
 
55
58
  YaleStorage(const STORAGE* storage)
56
59
  : s(reinterpret_cast<YALE_STORAGE*>(storage->src)),
57
60
  slice(storage != storage->src),
58
61
  slice_shape(storage->shape),
59
62
  slice_offset(storage->offset)
60
- { }
63
+ {
64
+ nm_yale_storage_register(reinterpret_cast<STORAGE*>(storage->src));
65
+ }
66
+
67
+ ~YaleStorage() {
68
+ nm_yale_storage_unregister(s);
69
+ }
61
70
 
62
71
  /* Allows us to do YaleStorage<uint8>::dtype() to get an nm::dtype_t */
63
72
  static nm::dtype_t dtype() {
@@ -72,6 +81,7 @@ public:
72
81
  inline const D& default_obj() const { return a(s->shape[0]); }
73
82
  inline const D& const_default_obj() const { return a(s->shape[0]); }
74
83
 
84
+
75
85
  /*
76
86
  * Return a Ruby VALUE representation of default_obj()
77
87
  */
@@ -98,6 +108,14 @@ public:
98
108
  inline size_t size() const { return ija(real_shape(0)); }
99
109
 
100
110
 
111
+ /*
112
+ * Returns true if the value at apos is the default value.
113
+ * Mainly used for determining if the diagonal contains zeros.
114
+ */
115
+ bool is_pos_default_value(size_t apos) const {
116
+ return (a(apos) == const_default_obj());
117
+ }
118
+
101
119
  /*
102
120
  * Given a size-2 array of size_t, representing the shape, determine
103
121
  * the maximum size of YaleStorage arrays.
@@ -328,7 +346,7 @@ public:
328
346
 
329
347
  // Make the necessary modifications, which hopefully can be done in-place.
330
348
  size_t v_offset = 0;
331
- int accum = 0;
349
+ //int accum = 0;
332
350
  for (size_t ii = 0; ii < lengths[0]; ++ii, ++i) {
333
351
  i.insert(row_stored_nd_iterator(i, p.pos[ii]), j, lengths[1], v, v_size, v_offset);
334
352
  }
@@ -344,6 +362,8 @@ public:
344
362
  */
345
363
  void insert(SLICE* slice, VALUE right) {
346
364
 
365
+ NM_CONSERVATIVE(nm_register_value(right));
366
+
347
367
  std::pair<NMATRIX*,bool> nm_and_free =
348
368
  interpret_arg_as_dense_nmatrix(right, dtype());
349
369
  // Map the data onto D* v
@@ -358,10 +378,17 @@ public:
358
378
 
359
379
  } else if (TYPE(right) == T_ARRAY) {
360
380
  v_size = RARRAY_LEN(right);
361
- v = ALLOC_N(D, v_size);
381
+ v = NM_ALLOC_N(D, v_size);
382
+ if (dtype() == nm::RUBYOBJ) {
383
+ nm_register_values(reinterpret_cast<VALUE*>(v), v_size);
384
+ }
362
385
  for (size_t m = 0; m < v_size; ++m) {
363
386
  rubyval_to_cval(rb_ary_entry(right, m), s->dtype, &(v[m]));
364
387
  }
388
+ if (dtype() == nm::RUBYOBJ) {
389
+ nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);
390
+ }
391
+
365
392
  } else {
366
393
  v = reinterpret_cast<D*>(rubyobj_to_cval(right, dtype()));
367
394
  }
@@ -381,7 +408,9 @@ public:
381
408
  if (nm_and_free.second) {
382
409
  nm_delete(nm_and_free.first);
383
410
  }
384
- } else xfree(v);
411
+ } else NM_FREE(v);
412
+
413
+ NM_CONSERVATIVE(nm_unregister_value(right));
385
414
  }
386
415
 
387
416
 
@@ -489,15 +518,15 @@ public:
489
518
  * Allocate a reference pointing to s. Note that even if +this+ is a reference,
490
519
  * we can create a reference within it.
491
520
  *
492
- * Note: Make sure you xfree() the result of this call. You can't just cast it
521
+ * Note: Make sure you NM_FREE() the result of this call. You can't just cast it
493
522
  * directly into a YaleStorage<D> class.
494
523
  */
495
524
  YALE_STORAGE* alloc_ref(SLICE* slice) {
496
- YALE_STORAGE* ns = ALLOC( YALE_STORAGE );
525
+ YALE_STORAGE* ns = NM_ALLOC( YALE_STORAGE );
497
526
 
498
527
  ns->dim = s->dim;
499
- ns->offset = ALLOC_N(size_t, ns->dim);
500
- ns->shape = ALLOC_N(size_t, ns->dim);
528
+ ns->offset = NM_ALLOC_N(size_t, ns->dim);
529
+ ns->shape = NM_ALLOC_N(size_t, ns->dim);
501
530
 
502
531
  for (size_t d = 0; d < ns->dim; ++d) {
503
532
  ns->offset[d] = slice->coords[d] + offset(d);
@@ -522,12 +551,12 @@ public:
522
551
  * Allocates and initializes the basic struct (but not IJA or A vectors).
523
552
  */
524
553
  static YALE_STORAGE* alloc(size_t* shape, size_t dim = 2) {
525
- YALE_STORAGE* s = ALLOC( YALE_STORAGE );
554
+ YALE_STORAGE* s = NM_ALLOC( YALE_STORAGE );
526
555
 
527
556
  s->ndnz = 0;
528
557
  s->dtype = dtype();
529
558
  s->shape = shape;
530
- s->offset = ALLOC_N(size_t, dim);
559
+ s->offset = NM_ALLOC_N(size_t, dim);
531
560
  for (size_t d = 0; d < dim; ++d)
532
561
  s->offset[d] = 0;
533
562
  s->dim = dim;
@@ -556,8 +585,8 @@ public:
556
585
  s->capacity = reserve;
557
586
  }
558
587
 
559
- s->ija = ALLOC_N( size_t, s->capacity );
560
- s->a = ALLOC_N( D, s->capacity );
588
+ s->ija = NM_ALLOC_N( size_t, s->capacity );
589
+ s->a = NM_ALLOC_N( D, s->capacity );
561
590
 
562
591
  return s;
563
592
  }
@@ -608,14 +637,14 @@ public:
608
637
  template <typename E>
609
638
  YALE_STORAGE* alloc_basic_copy(size_t new_capacity, size_t new_ndnz) const {
610
639
  nm::dtype_t new_dtype = nm::ctype_to_dtype_enum<E>::value_type;
611
- YALE_STORAGE* lhs = ALLOC( YALE_STORAGE );
640
+ YALE_STORAGE* lhs = NM_ALLOC( YALE_STORAGE );
612
641
  lhs->dim = s->dim;
613
- lhs->shape = ALLOC_N( size_t, lhs->dim );
642
+ lhs->shape = NM_ALLOC_N( size_t, lhs->dim );
614
643
 
615
644
  lhs->shape[0] = shape(0);
616
645
  lhs->shape[1] = shape(1);
617
646
 
618
- lhs->offset = ALLOC_N( size_t, lhs->dim );
647
+ lhs->offset = NM_ALLOC_N( size_t, lhs->dim );
619
648
 
620
649
  lhs->offset[0] = 0;
621
650
  lhs->offset[1] = 0;
@@ -623,8 +652,8 @@ public:
623
652
  lhs->capacity = new_capacity;
624
653
  lhs->dtype = new_dtype;
625
654
  lhs->ndnz = new_ndnz;
626
- lhs->ija = ALLOC_N( size_t, new_capacity );
627
- lhs->a = ALLOC_N( E, new_capacity );
655
+ lhs->ija = NM_ALLOC_N( size_t, new_capacity );
656
+ lhs->a = NM_ALLOC_N( E, new_capacity );
628
657
  lhs->src = lhs;
629
658
  lhs->count = 1;
630
659
 
@@ -633,7 +662,7 @@ public:
633
662
 
634
663
 
635
664
  /*
636
- * Make a full matrix structure copy (entries remain uninitialized). Remember to xfree()!
665
+ * Make a full matrix structure copy (entries remain uninitialized). Remember to NM_FREE()!
637
666
  */
638
667
  template <typename E>
639
668
  YALE_STORAGE* alloc_struct_copy(size_t new_capacity) const {
@@ -655,7 +684,7 @@ public:
655
684
  */
656
685
  template <typename E, bool Yield=false>
657
686
  void copy(YALE_STORAGE& ns) const {
658
- nm::dtype_t new_dtype = nm::ctype_to_dtype_enum<E>::value_type;
687
+ //nm::dtype_t new_dtype = nm::ctype_to_dtype_enum<E>::value_type;
659
688
  // get the default value for initialization (we'll re-use val for other copies after this)
660
689
  E val = static_cast<E>(const_default_obj());
661
690
 
@@ -665,6 +694,7 @@ public:
665
694
 
666
695
  E* ns_a = reinterpret_cast<E*>(ns.a);
667
696
  size_t sz = shape(0) + 1; // current used size of ns
697
+ nm_yale_storage_register(&ns);
668
698
 
669
699
  // FIXME: If diagonals line up, it's probably faster to do this with stored diagonal and stored non-diagonal iterators
670
700
  for (const_row_iterator it = cribegin(); it != criend(); ++it) {
@@ -681,6 +711,7 @@ public:
681
711
  }
682
712
  ns.ija[it.i()+1] = sz;
683
713
  }
714
+ nm_yale_storage_unregister(&ns);
684
715
 
685
716
  //ns.ija[shape(0)] = sz; // indicate end of last row
686
717
  ns.ndnz = sz - shape(0) - 1; // update ndnz count
@@ -688,17 +719,17 @@ public:
688
719
 
689
720
 
690
721
  /*
691
- * Allocate a casted copy of this matrix/reference. Remember to xfree() the result!
722
+ * Allocate a casted copy of this matrix/reference. Remember to NM_FREE() the result!
692
723
  *
693
724
  * If Yield is true, E must be nm::RubyObject, and it will call an rb_yield upon the stored value.
694
725
  */
695
726
  template <typename E, bool Yield = false>
696
727
  YALE_STORAGE* alloc_copy() const {
697
- nm::dtype_t new_dtype = nm::ctype_to_dtype_enum<E>::value_type;
728
+ //nm::dtype_t new_dtype = nm::ctype_to_dtype_enum<E>::value_type;
698
729
 
699
730
  YALE_STORAGE* lhs;
700
731
  if (slice) {
701
- size_t* xshape = ALLOC_N(size_t, 2);
732
+ size_t* xshape = NM_ALLOC_N(size_t, 2);
702
733
  xshape[0] = shape(0);
703
734
  xshape[1] = shape(1);
704
735
  size_t ndnz = count_copy_ndnz();
@@ -708,6 +739,7 @@ public:
708
739
 
709
740
  lhs = YaleStorage<E>::create(xshape, reserve);
710
741
 
742
+ // FIXME: This should probably be a throw which gets caught outside of the object.
711
743
  if (lhs->capacity < reserve)
712
744
  rb_raise(nm_eStorageTypeError, "conversion failed; capacity of %lu requested, max allowable is %lu", reserve, lhs->capacity);
713
745
 
@@ -718,10 +750,15 @@ public:
718
750
  lhs = alloc_struct_copy<E>(s->capacity);
719
751
 
720
752
  E* la = reinterpret_cast<E*>(lhs->a);
753
+
754
+ nm_yale_storage_register(lhs);
721
755
  for (size_t m = 0; m < size(); ++m) {
722
- if (Yield) la[m] = rb_yield(nm::yale_storage::nm_rb_dereference(a(m)));
756
+ if (Yield) {
757
+ la[m] = rb_yield(nm::yale_storage::nm_rb_dereference(a(m)));
758
+ }
723
759
  else la[m] = static_cast<E>(a(m));
724
760
  }
761
+ nm_yale_storage_unregister(lhs);
725
762
 
726
763
  }
727
764
 
@@ -732,7 +769,7 @@ public:
732
769
  * Allocate a transposed copy of the matrix
733
770
  */
734
771
  /*
735
- * Allocate a casted copy of this matrix/reference. Remember to xfree() the result!
772
+ * Allocate a casted copy of this matrix/reference. Remember to NM_FREE() the result!
736
773
  *
737
774
  * If Yield is true, E must be nm::RubyObject, and it will call an rb_yield upon the stored value.
738
775
  */
@@ -743,7 +780,7 @@ public:
743
780
  rb_raise(rb_eNotImpError, "please make a copy before transposing");
744
781
  } else {
745
782
  // Copy the structure and setup the IJA structure.
746
- size_t* xshape = ALLOC_N(size_t, 2);
783
+ size_t* xshape = NM_ALLOC_N(size_t, 2);
747
784
  xshape[0] = shape(1);
748
785
  xshape[1] = shape(0);
749
786
 
@@ -806,30 +843,44 @@ public:
806
843
  */
807
844
  template <typename E>
808
845
  VALUE map_merged_stored(VALUE klass, nm::YaleStorage<E>& t, VALUE r_init) const {
846
+ nm_register_value(r_init);
809
847
  VALUE s_init = const_default_value(),
810
848
  t_init = t.const_default_value();
811
-
849
+ nm_register_value(s_init);
850
+ nm_register_value(t_init);
851
+
812
852
  // Make a reasonable approximation of the resulting capacity
813
853
  size_t s_ndnz = count_copy_ndnz(),
814
854
  t_ndnz = t.count_copy_ndnz();
815
855
  size_t reserve = shape(0) + std::max(s_ndnz, t_ndnz) + 1;
816
856
 
817
- size_t* xshape = ALLOC_N(size_t, 2);
857
+ size_t* xshape = NM_ALLOC_N(size_t, 2);
818
858
  xshape[0] = shape(0);
819
859
  xshape[1] = shape(1);
820
860
 
821
861
  YALE_STORAGE* rs= YaleStorage<nm::RubyObject>::create(xshape, reserve);
822
862
 
823
- if (r_init == Qnil)
863
+ if (r_init == Qnil) {
864
+ nm_unregister_value(r_init);
824
865
  r_init = rb_yield_values(2, s_init, t_init);
866
+ nm_register_value(r_init);
867
+ }
825
868
 
826
869
  nm::RubyObject r_init_obj(r_init);
827
870
 
828
871
  // Prepare the matrix structure
829
872
  YaleStorage<nm::RubyObject>::init(*rs, &r_init_obj);
830
873
  NMATRIX* m = nm_create(nm::YALE_STORE, reinterpret_cast<STORAGE*>(rs));
874
+ nm_register_nmatrix(m);
831
875
  VALUE result = Data_Wrap_Struct(klass, nm_mark, nm_delete, m);
832
-
876
+ nm_unregister_nmatrix(m);
877
+ nm_register_value(result);
878
+ nm_unregister_value(r_init);
879
+
880
+ RETURN_SIZED_ENUMERATOR_PRE
881
+ nm_unregister_value(result);
882
+ nm_unregister_value(t_init);
883
+ nm_unregister_value(s_init);
833
884
  // No obvious, efficient way to pass a length function as the fourth argument here:
834
885
  RETURN_SIZED_ENUMERATOR(result, 0, 0, 0);
835
886
 
@@ -873,6 +924,9 @@ public:
873
924
  //RB_P(rb_funcall(result, rb_intern("yale_ija"), 0));
874
925
  }
875
926
  }
927
+ nm_unregister_value(result);
928
+ nm_unregister_value(t_init);
929
+ nm_unregister_value(s_init);
876
930
 
877
931
  return result;
878
932
  }
@@ -900,12 +954,16 @@ protected:
900
954
  size_t new_cap = sz + p.total_change;
901
955
 
902
956
  if (new_cap > real_max_size()) {
903
- xfree(v);
957
+ NM_FREE(v);
904
958
  rb_raise(rb_eStandardError, "resize caused by insertion of size %d (on top of current size %lu) would have caused yale matrix size to exceed its maximum (%lu)", p.total_change, sz, real_max_size());
905
959
  }
906
960
 
907
- size_t* new_ija = ALLOC_N( size_t,new_cap );
908
- D* new_a = ALLOC_N( D, new_cap );
961
+ if (s->dtype == nm::RUBYOBJ) {
962
+ nm_register_values(reinterpret_cast<VALUE*>(v), v_size);
963
+ }
964
+
965
+ size_t* new_ija = NM_ALLOC_N( size_t,new_cap );
966
+ D* new_a = NM_ALLOC_N( D, new_cap );
909
967
 
910
968
  // Copy unchanged row pointers first.
911
969
  size_t m = 0;
@@ -967,8 +1025,12 @@ protected:
967
1025
 
968
1026
  s->capacity = new_cap;
969
1027
 
970
- xfree(s->ija);
971
- xfree(s->a);
1028
+ NM_FREE(s->ija);
1029
+ NM_FREE(s->a);
1030
+
1031
+ if (s->dtype == nm::RUBYOBJ) {
1032
+ nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);
1033
+ }
972
1034
 
973
1035
  s->ija = new_ija;
974
1036
  s->a = reinterpret_cast<void*>(new_a);
@@ -994,8 +1056,8 @@ protected:
994
1056
 
995
1057
  if (new_cap < sz + n) new_cap = sz + n;
996
1058
 
997
- size_t* new_ija = ALLOC_N( size_t,new_cap );
998
- D* new_a = ALLOC_N( D, new_cap );
1059
+ size_t* new_ija = NM_ALLOC_N( size_t,new_cap );
1060
+ D* new_a = NM_ALLOC_N( D, new_cap );
999
1061
 
1000
1062
  // Copy unchanged row pointers first.
1001
1063
  for (size_t m = 0; m <= real_i; ++m) {
@@ -1024,11 +1086,18 @@ protected:
1024
1086
  new_a[m+n] = a(m);
1025
1087
  }
1026
1088
 
1089
+ if (s->dtype == nm::RUBYOBJ) {
1090
+ nm_yale_storage_register_a(new_a, new_cap);
1091
+ }
1027
1092
 
1028
1093
  s->capacity = new_cap;
1029
1094
 
1030
- xfree(s->ija);
1031
- xfree(s->a);
1095
+ NM_FREE(s->ija);
1096
+ NM_FREE(s->a);
1097
+
1098
+ if (s->dtype == nm::RUBYOBJ) {
1099
+ nm_yale_storage_unregister_a(new_a, new_cap);
1100
+ }
1032
1101
 
1033
1102
  s->ija = new_ija;
1034
1103
  s->a = reinterpret_cast<void*>(new_a);
@@ -1067,4 +1136,4 @@ protected:
1067
1136
 
1068
1137
  } // end of nm namespace
1069
1138
 
1070
- #endif // YALE_CLASS_H
1139
+ #endif // YALE_CLASS_H