nmatrix 0.0.9 → 0.1.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (101) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile +1 -0
  3. data/History.txt +95 -1
  4. data/LICENSE.txt +2 -2
  5. data/README.rdoc +24 -26
  6. data/Rakefile +32 -16
  7. data/ext/nmatrix/data/complex.h +2 -2
  8. data/ext/nmatrix/data/data.cpp +27 -51
  9. data/ext/nmatrix/data/data.h +92 -4
  10. data/ext/nmatrix/data/meta.h +2 -2
  11. data/ext/nmatrix/data/rational.h +2 -2
  12. data/ext/nmatrix/data/ruby_object.h +2 -2
  13. data/ext/nmatrix/extconf.rb +87 -86
  14. data/ext/nmatrix/math.cpp +45 -40
  15. data/ext/nmatrix/math/asum.h +3 -3
  16. data/ext/nmatrix/math/geev.h +2 -2
  17. data/ext/nmatrix/math/gemm.h +6 -2
  18. data/ext/nmatrix/math/gemv.h +6 -2
  19. data/ext/nmatrix/math/ger.h +2 -2
  20. data/ext/nmatrix/math/gesdd.h +2 -2
  21. data/ext/nmatrix/math/gesvd.h +2 -2
  22. data/ext/nmatrix/math/getf2.h +2 -2
  23. data/ext/nmatrix/math/getrf.h +2 -2
  24. data/ext/nmatrix/math/getri.h +2 -2
  25. data/ext/nmatrix/math/getrs.h +7 -3
  26. data/ext/nmatrix/math/idamax.h +2 -2
  27. data/ext/nmatrix/math/inc.h +12 -6
  28. data/ext/nmatrix/math/laswp.h +2 -2
  29. data/ext/nmatrix/math/long_dtype.h +2 -2
  30. data/ext/nmatrix/math/math.h +16 -10
  31. data/ext/nmatrix/math/nrm2.h +3 -3
  32. data/ext/nmatrix/math/potrs.h +7 -3
  33. data/ext/nmatrix/math/rot.h +2 -2
  34. data/ext/nmatrix/math/rotg.h +2 -2
  35. data/ext/nmatrix/math/scal.h +2 -2
  36. data/ext/nmatrix/math/swap.h +2 -2
  37. data/ext/nmatrix/math/trsm.h +7 -3
  38. data/ext/nmatrix/nm_memory.h +60 -0
  39. data/ext/nmatrix/nmatrix.cpp +13 -47
  40. data/ext/nmatrix/nmatrix.h +37 -12
  41. data/ext/nmatrix/ruby_constants.cpp +4 -2
  42. data/ext/nmatrix/ruby_constants.h +4 -2
  43. data/ext/nmatrix/ruby_nmatrix.c +937 -170
  44. data/ext/nmatrix/storage/common.cpp +2 -2
  45. data/ext/nmatrix/storage/common.h +2 -2
  46. data/ext/nmatrix/storage/{dense.cpp → dense/dense.cpp} +253 -100
  47. data/ext/nmatrix/storage/{dense.h → dense/dense.h} +6 -5
  48. data/ext/nmatrix/storage/{list.cpp → list/list.cpp} +517 -98
  49. data/ext/nmatrix/storage/{list.h → list/list.h} +13 -6
  50. data/ext/nmatrix/storage/storage.cpp +48 -19
  51. data/ext/nmatrix/storage/storage.h +4 -4
  52. data/ext/nmatrix/storage/yale/class.h +112 -43
  53. data/ext/nmatrix/storage/yale/iterators/base.h +2 -2
  54. data/ext/nmatrix/storage/yale/iterators/iterator.h +2 -2
  55. data/ext/nmatrix/storage/yale/iterators/row.h +2 -2
  56. data/ext/nmatrix/storage/yale/iterators/row_stored.h +2 -2
  57. data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +4 -3
  58. data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +2 -2
  59. data/ext/nmatrix/storage/yale/math/transpose.h +2 -2
  60. data/ext/nmatrix/storage/yale/yale.cpp +343 -52
  61. data/ext/nmatrix/storage/yale/yale.h +7 -3
  62. data/ext/nmatrix/types.h +2 -2
  63. data/ext/nmatrix/util/io.cpp +5 -5
  64. data/ext/nmatrix/util/io.h +2 -2
  65. data/ext/nmatrix/util/sl_list.cpp +40 -27
  66. data/ext/nmatrix/util/sl_list.h +3 -3
  67. data/ext/nmatrix/util/util.h +2 -2
  68. data/lib/nmatrix.rb +2 -2
  69. data/lib/nmatrix/blas.rb +2 -2
  70. data/lib/nmatrix/enumerate.rb +17 -6
  71. data/lib/nmatrix/io/market.rb +2 -3
  72. data/lib/nmatrix/io/mat5_reader.rb +2 -2
  73. data/lib/nmatrix/io/mat_reader.rb +2 -2
  74. data/lib/nmatrix/lapack.rb +46 -46
  75. data/lib/nmatrix/math.rb +213 -20
  76. data/lib/nmatrix/monkeys.rb +24 -2
  77. data/lib/nmatrix/nmatrix.rb +394 -9
  78. data/lib/nmatrix/nvector.rb +2 -64
  79. data/lib/nmatrix/rspec.rb +2 -2
  80. data/lib/nmatrix/shortcuts.rb +14 -61
  81. data/lib/nmatrix/version.rb +11 -3
  82. data/lib/nmatrix/yale_functions.rb +4 -4
  83. data/nmatrix.gemspec +2 -7
  84. data/scripts/mac-brew-gcc.sh +11 -8
  85. data/scripts/mac-mavericks-brew-gcc.sh +22 -0
  86. data/spec/00_nmatrix_spec.rb +116 -7
  87. data/spec/01_enum_spec.rb +17 -3
  88. data/spec/02_slice_spec.rb +11 -3
  89. data/spec/blas_spec.rb +5 -2
  90. data/spec/elementwise_spec.rb +5 -2
  91. data/spec/io_spec.rb +27 -17
  92. data/spec/lapack_spec.rb +157 -9
  93. data/spec/math_spec.rb +95 -4
  94. data/spec/nmatrix_yale_spec.rb +21 -26
  95. data/spec/rspec_monkeys.rb +27 -0
  96. data/spec/rspec_spec.rb +2 -2
  97. data/spec/shortcuts_spec.rb +5 -10
  98. data/spec/slice_set_spec.rb +6 -2
  99. data/spec/spec_helper.rb +3 -2
  100. data/spec/stat_spec.rb +174 -158
  101. metadata +15 -15
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -87,6 +87,7 @@ public:
87
87
  if (&r != &(rhs.r))
88
88
  throw std::logic_error("can't assign iterator from another row iterator");
89
89
  p_ = rhs.p_;
90
+ return *this;
90
91
  }
91
92
 
92
93
  virtual size_t p() const { return p_; }
@@ -164,4 +165,4 @@ public:
164
165
 
165
166
  } } // end of namespace nm::yale_storage
166
167
 
167
- #endif // YALE_ITERATORS_ROW_STORED_ND_H
168
+ #endif // YALE_ITERATORS_ROW_STORED_ND_H
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -101,6 +101,7 @@ extern "C" {
101
101
  static VALUE nm_ia(VALUE self);
102
102
  static VALUE nm_ja(VALUE self);
103
103
  static VALUE nm_ija(int argc, VALUE* argv, VALUE self);
104
+ static VALUE nm_row_keys_intersection(VALUE m1, VALUE ii1, VALUE m2, VALUE ii2);
104
105
 
105
106
  static VALUE nm_nd_row(int argc, VALUE* argv, VALUE self);
106
107
 
@@ -208,8 +209,8 @@ YALE_STORAGE* create_from_old_yale(dtype_t dtype, size_t* shape, char* r_ia, cha
208
209
  s->ndnz = ndnz;
209
210
 
210
211
  // Setup IJA and A arrays
211
- s->ija = ALLOC_N( IType, s->capacity );
212
- s->a = ALLOC_N( LDType, s->capacity );
212
+ s->ija = NM_ALLOC_N( IType, s->capacity );
213
+ s->a = NM_ALLOC_N( LDType, s->capacity );
213
214
  IType* ijl = reinterpret_cast<IType*>(s->ija);
214
215
  LDType* al = reinterpret_cast<LDType*>(s->a);
215
216
 
@@ -452,14 +453,14 @@ static void vector_grow(YALE_STORAGE* s) {
452
453
  if (s != s->src) {
453
454
  throw; // need to correct this quickly.
454
455
  }
455
-
456
+ nm_yale_storage_register(s);
456
457
  size_t new_capacity = s->capacity * GROWTH_CONSTANT;
457
458
  size_t max_capacity = YaleStorage<uint8_t>::max_size(s->shape);
458
459
 
459
460
  if (new_capacity > max_capacity) new_capacity = max_capacity;
460
461
 
461
- IType* new_ija = ALLOC_N(IType, new_capacity);
462
- void* new_a = ALLOC_N(char, DTYPE_SIZES[s->dtype] * new_capacity);
462
+ IType* new_ija = NM_ALLOC_N(IType, new_capacity);
463
+ void* new_a = NM_ALLOC_N(char, DTYPE_SIZES[s->dtype] * new_capacity);
463
464
 
464
465
  IType* old_ija = s->ija;
465
466
  void* old_a = s->a;
@@ -469,11 +470,18 @@ static void vector_grow(YALE_STORAGE* s) {
469
470
 
470
471
  s->capacity = new_capacity;
471
472
 
472
- xfree(old_ija);
473
- xfree(old_a);
473
+ if (s->dtype == nm::RUBYOBJ)
474
+ nm_yale_storage_register_a(new_a, s->capacity * DTYPE_SIZES[s->dtype]);
475
+
476
+ NM_FREE(old_ija);
477
+ nm_yale_storage_unregister(s);
478
+ NM_FREE(old_a);
479
+ if (s->dtype == nm::RUBYOBJ)
480
+ nm_yale_storage_unregister_a(new_a, s->capacity * DTYPE_SIZES[s->dtype]);
474
481
 
475
482
  s->ija = new_ija;
476
483
  s->a = new_a;
484
+
477
485
  }
478
486
 
479
487
 
@@ -497,11 +505,13 @@ static char vector_insert_resize(YALE_STORAGE* s, size_t current_size, size_t po
497
505
  if (new_capacity < current_size + n)
498
506
  new_capacity = current_size + n;
499
507
 
508
+ nm_yale_storage_register(s);
509
+
500
510
  // Allocate the new vectors.
501
- IType* new_ija = ALLOC_N( IType, new_capacity );
511
+ IType* new_ija = NM_ALLOC_N( IType, new_capacity );
502
512
  NM_CHECK_ALLOC(new_ija);
503
513
 
504
- DType* new_a = ALLOC_N( DType, new_capacity );
514
+ DType* new_a = NM_ALLOC_N( DType, new_capacity );
505
515
  NM_CHECK_ALLOC(new_a);
506
516
 
507
517
  IType* old_ija = reinterpret_cast<IType*>(s->ija);
@@ -533,9 +543,15 @@ static char vector_insert_resize(YALE_STORAGE* s, size_t current_size, size_t po
533
543
  }
534
544
 
535
545
  s->capacity = new_capacity;
546
+ if (s->dtype == nm::RUBYOBJ)
547
+ nm_yale_storage_register_a(new_a, new_capacity);
536
548
 
537
- xfree(s->ija);
538
- xfree(s->a);
549
+ NM_FREE(s->ija);
550
+ nm_yale_storage_unregister(s);
551
+ NM_FREE(s->a);
552
+
553
+ if (s->dtype == nm::RUBYOBJ)
554
+ nm_yale_storage_unregister_a(new_a, new_capacity);
539
555
 
540
556
  s->ija = new_ija;
541
557
  s->a = reinterpret_cast<void*>(new_a);
@@ -566,12 +582,11 @@ static char vector_insert(YALE_STORAGE* s, size_t pos, size_t* j, void* val_, si
566
582
  DType* a = reinterpret_cast<DType*>(s->a);
567
583
 
568
584
  if (size + n > s->capacity) {
569
- vector_insert_resize<DType>(s, size, pos, j, n, struct_only);
585
+ vector_insert_resize<DType>(s, size, pos, j, n, struct_only);
570
586
 
571
587
  // Need to get the new locations for ija and a.
572
588
  ija = s->ija;
573
589
  a = reinterpret_cast<DType*>(s->a);
574
-
575
590
  } else {
576
591
  /*
577
592
  * No resize required:
@@ -673,6 +688,8 @@ static STORAGE* matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resu
673
688
  YALE_STORAGE *left = (YALE_STORAGE*)(casted_storage.left),
674
689
  *right = (YALE_STORAGE*)(casted_storage.right);
675
690
 
691
+ nm_yale_storage_register(left);
692
+ nm_yale_storage_register(right);
676
693
  // We can safely get dtype from the casted matrices; post-condition of binary_storage_cast_alloc is that dtype is the
677
694
  // same for left and right.
678
695
  // int8_t dtype = left->dtype;
@@ -704,6 +721,8 @@ static STORAGE* matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resu
704
721
  // Sort the columns
705
722
  nm::math::smmp_sort_columns<DType>(result->shape[0], ija, ija, reinterpret_cast<DType*>(result->a));
706
723
 
724
+ nm_yale_storage_unregister(right);
725
+ nm_yale_storage_unregister(left);
707
726
  return reinterpret_cast<STORAGE*>(result);
708
727
  }
709
728
 
@@ -872,11 +891,13 @@ public:
872
891
  // Helper function used only for the RETURN_SIZED_ENUMERATOR macro. Returns the length of
873
892
  // the matrix's storage.
874
893
  static VALUE nm_yale_stored_enumerator_length(VALUE nmatrix) {
894
+ NM_CONSERVATIVE(nm_register_value(nmatrix));
875
895
  YALE_STORAGE* s = NM_STORAGE_YALE(nmatrix);
876
896
  YALE_STORAGE* src = s->src == s ? s : reinterpret_cast<YALE_STORAGE*>(s->src);
877
897
  size_t ia_size = src->shape[0];
878
898
  // FIXME: This needs to be corrected for slicing.
879
899
  size_t len = std::min( s->shape[0] + s->offset[0], s->shape[1] + s->offset[1] ) + nm_yale_storage_get_size(src) - ia_size;
900
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
880
901
  return INT2FIX(len);
881
902
  }
882
903
 
@@ -884,27 +905,32 @@ static VALUE nm_yale_stored_enumerator_length(VALUE nmatrix) {
884
905
  // Helper function used only for the RETURN_SIZED_ENUMERATOR macro. Returns the length of
885
906
  // the matrix's storage.
886
907
  static VALUE nm_yale_stored_nondiagonal_enumerator_length(VALUE nmatrix) {
908
+ NM_CONSERVATIVE(nm_register_value(nmatrix));
887
909
  YALE_STORAGE* s = NM_STORAGE_YALE(nmatrix);
888
910
  if (s->src != s) s = reinterpret_cast<YALE_STORAGE*>(s->src); // need to get the original storage shape
889
911
 
890
912
  size_t ia_size = s->shape[0];
891
913
  size_t len = nm_yale_storage_get_size(NM_STORAGE_YALE(nmatrix)) - ia_size;
892
-
914
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
893
915
  return INT2FIX(len);
894
916
  }
895
917
 
896
918
  // Helper function for diagonal length.
897
919
  static VALUE nm_yale_stored_diagonal_enumerator_length(VALUE nmatrix) {
920
+ NM_CONSERVATIVE(nm_register_value(nmatrix));
898
921
  YALE_STORAGE* s = NM_STORAGE_YALE(nmatrix);
899
922
  size_t len = std::min( s->shape[0] + s->offset[0], s->shape[1] + s->offset[1] );
923
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
900
924
  return INT2FIX(len);
901
925
  }
902
926
 
903
927
 
904
928
  // Helper function for full enumerator length.
905
929
  static VALUE nm_yale_enumerator_length(VALUE nmatrix) {
930
+ NM_CONSERVATIVE(nm_register_value(nmatrix));
906
931
  YALE_STORAGE* s = NM_STORAGE_YALE(nmatrix);
907
932
  size_t len = s->shape[0] * s->shape[1];
933
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
908
934
  return INT2FIX(len);
909
935
  }
910
936
 
@@ -914,12 +940,21 @@ static VALUE nm_yale_enumerator_length(VALUE nmatrix) {
914
940
  */
915
941
  template <typename D>
916
942
  static VALUE map_stored(VALUE self) {
943
+ NM_CONSERVATIVE(nm_register_value(self));
917
944
  YALE_STORAGE* s = NM_STORAGE_YALE(self);
918
945
  YaleStorage<D> y(s);
946
+
947
+ RETURN_SIZED_ENUMERATOR_PRE
948
+ NM_CONSERVATIVE(nm_unregister_value(self));
919
949
  RETURN_SIZED_ENUMERATOR(self, 0, 0, nm_yale_stored_enumerator_length);
950
+
920
951
  YALE_STORAGE* r = y.template alloc_copy<nm::RubyObject, true>();
952
+ nm_yale_storage_register(r);
921
953
  NMATRIX* m = nm_create(nm::YALE_STORE, reinterpret_cast<STORAGE*>(r));
922
- return Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, m);
954
+ VALUE to_return = Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, m);
955
+ nm_yale_storage_unregister(r);
956
+ NM_CONSERVATIVE(nm_unregister_value(self));
957
+ return to_return;
923
958
  }
924
959
 
925
960
 
@@ -930,7 +965,8 @@ template <typename LD, typename RD>
930
965
  static VALUE map_merged_stored(VALUE left, VALUE right, VALUE init) {
931
966
  nm::YaleStorage<LD> l(NM_STORAGE_YALE(left));
932
967
  nm::YaleStorage<RD> r(NM_STORAGE_YALE(right));
933
- return l.map_merged_stored(CLASS_OF(left), r, init);
968
+ VALUE to_return = l.map_merged_stored(CLASS_OF(left), r, init);
969
+ return to_return;
934
970
  }
935
971
 
936
972
 
@@ -939,10 +975,13 @@ static VALUE map_merged_stored(VALUE left, VALUE right, VALUE init) {
939
975
  */
940
976
  template <typename DType>
941
977
  static VALUE each_stored_with_indices(VALUE nm) {
978
+ NM_CONSERVATIVE(nm_register_value(nm));
942
979
  YALE_STORAGE* s = NM_STORAGE_YALE(nm);
943
980
  YaleStorage<DType> y(s);
944
981
 
945
982
  // If we don't have a block, return an enumerator.
983
+ RETURN_SIZED_ENUMERATOR_PRE
984
+ NM_CONSERVATIVE(nm_unregister_value(nm));
946
985
  RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_stored_enumerator_length);
947
986
 
948
987
  for (typename YaleStorage<DType>::const_stored_diagonal_iterator d = y.csdbegin(); d != y.csdend(); ++d) {
@@ -955,6 +994,8 @@ static VALUE each_stored_with_indices(VALUE nm) {
955
994
  }
956
995
  }
957
996
 
997
+ NM_CONSERVATIVE(nm_unregister_value(nm));
998
+
958
999
  return nm;
959
1000
  }
960
1001
 
@@ -964,16 +1005,22 @@ static VALUE each_stored_with_indices(VALUE nm) {
964
1005
  */
965
1006
  template <typename DType>
966
1007
  static VALUE stored_diagonal_each_with_indices(VALUE nm) {
1008
+ NM_CONSERVATIVE(nm_register_value(nm));
1009
+
967
1010
  YALE_STORAGE* s = NM_STORAGE_YALE(nm);
968
1011
  YaleStorage<DType> y(s);
969
1012
 
970
1013
  // If we don't have a block, return an enumerator.
1014
+ RETURN_SIZED_ENUMERATOR_PRE
1015
+ NM_CONSERVATIVE(nm_unregister_value(nm));
971
1016
  RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_stored_diagonal_length); // FIXME: need diagonal length
972
-
1017
+
973
1018
  for (typename YaleStorage<DType>::const_stored_diagonal_iterator d = y.csdbegin(); d != y.csdend(); ++d) {
974
1019
  rb_yield_values(3, ~d, d.rb_i(), d.rb_j());
975
1020
  }
976
1021
 
1022
+ NM_CONSERVATIVE(nm_unregister_value(nm));
1023
+
977
1024
  return nm;
978
1025
  }
979
1026
 
@@ -983,10 +1030,14 @@ static VALUE stored_diagonal_each_with_indices(VALUE nm) {
983
1030
  */
984
1031
  template <typename DType>
985
1032
  static VALUE stored_nondiagonal_each_with_indices(VALUE nm) {
1033
+ NM_CONSERVATIVE(nm_register_value(nm));
1034
+
986
1035
  YALE_STORAGE* s = NM_STORAGE_YALE(nm);
987
1036
  YaleStorage<DType> y(s);
988
1037
 
989
1038
  // If we don't have a block, return an enumerator.
1039
+ RETURN_SIZED_ENUMERATOR_PRE
1040
+ NM_CONSERVATIVE(nm_unregister_value(nm));
990
1041
  RETURN_SIZED_ENUMERATOR(nm, 0, 0, 0); // FIXME: need diagonal length
991
1042
 
992
1043
  for (typename YaleStorage<DType>::const_row_iterator it = y.cribegin(); it != y.criend(); ++it) {
@@ -995,6 +1046,8 @@ static VALUE stored_nondiagonal_each_with_indices(VALUE nm) {
995
1046
  }
996
1047
  }
997
1048
 
1049
+ NM_CONSERVATIVE(nm_unregister_value(nm));
1050
+
998
1051
  return nm;
999
1052
  }
1000
1053
 
@@ -1004,10 +1057,14 @@ static VALUE stored_nondiagonal_each_with_indices(VALUE nm) {
1004
1057
  */
1005
1058
  template <typename DType>
1006
1059
  static VALUE each_ordered_stored_with_indices(VALUE nm) {
1060
+ NM_CONSERVATIVE(nm_register_value(nm));
1061
+
1007
1062
  YALE_STORAGE* s = NM_STORAGE_YALE(nm);
1008
1063
  YaleStorage<DType> y(s);
1009
1064
 
1010
1065
  // If we don't have a block, return an enumerator.
1066
+ RETURN_SIZED_ENUMERATOR_PRE
1067
+ NM_CONSERVATIVE(nm_unregister_value(nm));
1011
1068
  RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_stored_enumerator_length);
1012
1069
 
1013
1070
  for (typename YaleStorage<DType>::const_row_iterator it = y.cribegin(); it != y.criend(); ++it) {
@@ -1016,25 +1073,39 @@ static VALUE each_ordered_stored_with_indices(VALUE nm) {
1016
1073
  }
1017
1074
  }
1018
1075
 
1076
+ NM_CONSERVATIVE(nm_unregister_value(nm));
1077
+
1019
1078
  return nm;
1020
1079
  }
1021
1080
 
1022
1081
 
1023
1082
  template <typename DType>
1024
1083
  static VALUE each_with_indices(VALUE nm) {
1084
+ NM_CONSERVATIVE(nm_register_value(nm));
1085
+
1025
1086
  YALE_STORAGE* s = NM_STORAGE_YALE(nm);
1026
1087
  YaleStorage<DType> y(s);
1027
1088
 
1028
1089
  // If we don't have a block, return an enumerator.
1090
+ RETURN_SIZED_ENUMERATOR_PRE
1091
+ NM_CONSERVATIVE(nm_unregister_value(nm));
1029
1092
  RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_enumerator_length);
1030
1093
 
1031
1094
  for (typename YaleStorage<DType>::const_iterator iter = y.cbegin(); iter != y.cend(); ++iter) {
1032
1095
  rb_yield_values(3, ~iter, iter.rb_i(), iter.rb_j());
1033
1096
  }
1034
1097
 
1098
+ NM_CONSERVATIVE(nm_unregister_value(nm));
1099
+
1035
1100
  return nm;
1036
1101
  }
1037
1102
 
1103
+ template <typename D>
1104
+ static bool is_pos_default_value(YALE_STORAGE* s, size_t apos) {
1105
+ YaleStorage<D> y(s);
1106
+ return y.is_pos_default_value(apos);
1107
+ }
1108
+
1038
1109
 
1039
1110
  } // end of namespace nm::yale_storage
1040
1111
 
@@ -1056,6 +1127,10 @@ void nm_init_yale_functions() {
1056
1127
  */
1057
1128
  cNMatrix_YaleFunctions = rb_define_module_under(cNMatrix, "YaleFunctions");
1058
1129
 
1130
+ // Expert recommendation. Eventually this should go in a separate gem, or at least a separate module.
1131
+ rb_define_method(cNMatrix_YaleFunctions, "yale_row_keys_intersection", (METHOD)nm_row_keys_intersection, 3);
1132
+
1133
+ // Debugging functions.
1059
1134
  rb_define_method(cNMatrix_YaleFunctions, "yale_ija", (METHOD)nm_ija, -1);
1060
1135
  rb_define_method(cNMatrix_YaleFunctions, "yale_a", (METHOD)nm_a, -1);
1061
1136
  rb_define_method(cNMatrix_YaleFunctions, "yale_size", (METHOD)nm_size, 0);
@@ -1162,7 +1237,7 @@ void* nm_yale_storage_get(const STORAGE* storage, SLICE* slice) {
1162
1237
 
1163
1238
  return elem_copy_table[casted_storage->dtype](casted_storage, slice);
1164
1239
  } else {
1165
-
1240
+ nm_yale_storage_register(casted_storage);
1166
1241
  //return reinterpret_cast<void*>(nm::YaleStorage<nm::dtype_enum_T<storage->dtype>::type>(casted_storage).alloc_ref(slice));
1167
1242
  NAMED_DTYPE_TEMPLATE_TABLE(ref_table, nm::yale_storage::ref, YALE_STORAGE*, YALE_STORAGE* storage, SLICE* slice)
1168
1243
 
@@ -1172,7 +1247,9 @@ void* nm_yale_storage_get(const STORAGE* storage, SLICE* slice) {
1172
1247
 
1173
1248
  YALE_STORAGE* ns = slice_copy_table[casted_storage->dtype][casted_storage->dtype](ref);
1174
1249
 
1175
- xfree(ref);
1250
+ NM_FREE(ref);
1251
+
1252
+ nm_yale_storage_unregister(casted_storage);
1176
1253
 
1177
1254
  return ns;
1178
1255
  }
@@ -1339,11 +1416,11 @@ void nm_yale_storage_delete(STORAGE* s) {
1339
1416
  if (s) {
1340
1417
  YALE_STORAGE* storage = (YALE_STORAGE*)s;
1341
1418
  if (storage->count-- == 1) {
1342
- xfree(storage->shape);
1343
- xfree(storage->offset);
1344
- xfree(storage->ija);
1345
- xfree(storage->a);
1346
- xfree(storage);
1419
+ NM_FREE(storage->shape);
1420
+ NM_FREE(storage->offset);
1421
+ NM_FREE(storage->ija);
1422
+ NM_FREE(storage->a);
1423
+ NM_FREE(storage);
1347
1424
  }
1348
1425
  }
1349
1426
  }
@@ -1355,9 +1432,9 @@ void nm_yale_storage_delete_ref(STORAGE* s) {
1355
1432
  if (s) {
1356
1433
  YALE_STORAGE* storage = (YALE_STORAGE*)s;
1357
1434
  nm_yale_storage_delete( reinterpret_cast<STORAGE*>(storage->src) );
1358
- xfree(storage->shape);
1359
- xfree(storage->offset);
1360
- xfree(s);
1435
+ NM_FREE(storage->shape);
1436
+ NM_FREE(storage->offset);
1437
+ NM_FREE(s);
1361
1438
  }
1362
1439
  }
1363
1440
 
@@ -1378,15 +1455,35 @@ void nm_yale_storage_init(YALE_STORAGE* s, void* init_val) {
1378
1455
  */
1379
1456
  void nm_yale_storage_mark(STORAGE* storage_base) {
1380
1457
  YALE_STORAGE* storage = (YALE_STORAGE*)storage_base;
1381
- size_t i;
1382
1458
 
1383
1459
  if (storage && storage->dtype == nm::RUBYOBJ) {
1384
1460
 
1385
1461
  VALUE* a = (VALUE*)(storage->a);
1386
- rb_gc_mark_locations(a, a + storage->capacity * sizeof(VALUE));
1462
+ rb_gc_mark_locations(a, &(a[storage->capacity-1]));
1387
1463
  }
1388
1464
  }
1389
1465
 
1466
+ void nm_yale_storage_register_a(void* a, size_t size) {
1467
+ nm_register_values(reinterpret_cast<VALUE*>(a), size);
1468
+ }
1469
+
1470
+ void nm_yale_storage_unregister_a(void* a, size_t size) {
1471
+ nm_unregister_values(reinterpret_cast<VALUE*>(a), size);
1472
+ }
1473
+
1474
+ void nm_yale_storage_register(const STORAGE* s) {
1475
+ const YALE_STORAGE* y = reinterpret_cast<const YALE_STORAGE*>(s);
1476
+ if (y->dtype == nm::RUBYOBJ) {
1477
+ nm_register_values(reinterpret_cast<VALUE*>(y->a), nm::yale_storage::get_size(y));
1478
+ }
1479
+ }
1480
+
1481
+ void nm_yale_storage_unregister(const STORAGE* s) {
1482
+ const YALE_STORAGE* y = reinterpret_cast<const YALE_STORAGE*>(s);
1483
+ if (y->dtype == nm::RUBYOBJ) {
1484
+ nm_unregister_values(reinterpret_cast<VALUE*>(y->a), nm::yale_storage::get_size(y));
1485
+ }
1486
+ }
1390
1487
 
1391
1488
  /*
1392
1489
  * Allocates and initializes the basic struct (but not the IJA or A vectors).
@@ -1396,12 +1493,12 @@ void nm_yale_storage_mark(STORAGE* storage_base) {
1396
1493
  static YALE_STORAGE* alloc(nm::dtype_t dtype, size_t* shape, size_t dim) {
1397
1494
  YALE_STORAGE* s;
1398
1495
 
1399
- s = ALLOC( YALE_STORAGE );
1496
+ s = NM_ALLOC( YALE_STORAGE );
1400
1497
 
1401
1498
  s->ndnz = 0;
1402
1499
  s->dtype = dtype;
1403
1500
  s->shape = shape;
1404
- s->offset = ALLOC_N(size_t, dim);
1501
+ s->offset = NM_ALLOC_N(size_t, dim);
1405
1502
  for (size_t i = 0; i < dim; ++i)
1406
1503
  s->offset[i] = 0;
1407
1504
  s->dim = dim;
@@ -1432,7 +1529,110 @@ YALE_STORAGE* nm_yale_storage_create_from_old_yale(nm::dtype_t dtype, size_t* sh
1432
1529
  */
1433
1530
  static VALUE nm_size(VALUE self) {
1434
1531
  YALE_STORAGE* s = (YALE_STORAGE*)(NM_SRC(self));
1435
- return INT2FIX(nm::yale_storage::IJA(s)[s->shape[0]]);
1532
+ VALUE to_return = INT2FIX(nm::yale_storage::IJA(s)[s->shape[0]]);
1533
+ return to_return;
1534
+ }
1535
+
1536
+
1537
+ /*
1538
+ * Determine if some pos in the diagonal is the default. No bounds checking!
1539
+ */
1540
+ static bool is_pos_default_value(YALE_STORAGE* s, size_t apos) {
1541
+ DTYPE_TEMPLATE_TABLE(nm::yale_storage::is_pos_default_value, bool, YALE_STORAGE*, size_t)
1542
+ return ttable[s->dtype](s, apos);
1543
+ }
1544
+
1545
+
1546
+ /*
1547
+ * call-seq:
1548
+ * yale_row_keys_intersection(i, m2, i2) -> Array
1549
+ *
1550
+ * This function is experimental.
1551
+ *
1552
+ * It finds the intersection of row i of the current matrix with row i2 of matrix m2.
1553
+ * Both matrices must be Yale. They may not be slices.
1554
+ *
1555
+ * Only checks the stored indices; does not care about matrix default value.
1556
+ */
1557
+ static VALUE nm_row_keys_intersection(VALUE m1, VALUE ii1, VALUE m2, VALUE ii2) {
1558
+
1559
+ NM_CONSERVATIVE(nm_register_value(m1));
1560
+ NM_CONSERVATIVE(nm_register_value(m2));
1561
+
1562
+ if (NM_SRC(m1) != NM_STORAGE(m1) || NM_SRC(m2) != NM_STORAGE(m2)) {
1563
+ NM_CONSERVATIVE(nm_unregister_value(m2));
1564
+ NM_CONSERVATIVE(nm_unregister_value(m1));
1565
+ rb_raise(rb_eNotImpError, "must be called on a real matrix and not a slice");
1566
+ }
1567
+
1568
+ size_t i1 = FIX2INT(ii1),
1569
+ i2 = FIX2INT(ii2);
1570
+
1571
+ YALE_STORAGE *s = NM_STORAGE_YALE(m1),
1572
+ *t = NM_STORAGE_YALE(m2);
1573
+
1574
+ size_t pos1 = s->ija[i1],
1575
+ pos2 = t->ija[i2];
1576
+
1577
+ size_t nextpos1 = s->ija[i1+1],
1578
+ nextpos2 = t->ija[i2+1];
1579
+
1580
+ size_t diff1 = nextpos1 - pos1,
1581
+ diff2 = nextpos2 - pos2;
1582
+
1583
+ // Does the diagonal have a nonzero in it?
1584
+ bool diag1 = i1 < s->shape[0] && !is_pos_default_value(s, i1),
1585
+ diag2 = i2 < t->shape[0] && !is_pos_default_value(t, i2);
1586
+
1587
+ // Reserve max(diff1,diff2) space -- that's the max intersection possible.
1588
+ VALUE ret = rb_ary_new2(std::max(diff1,diff2)+1);
1589
+ nm_register_value(ret);
1590
+
1591
+ // Handle once the special case where both have the diagonal in exactly
1592
+ // the same place.
1593
+ if (diag1 && diag2 && i1 == i2) {
1594
+ rb_ary_push(ret, INT2FIX(i1));
1595
+ diag1 = false; diag2 = false; // no need to deal with diagonals anymore.
1596
+ }
1597
+
1598
+ // Now find the intersection.
1599
+ size_t idx1 = pos1, idx2 = pos2;
1600
+ while (idx1 < nextpos1 && idx2 < nextpos2) {
1601
+ if (s->ija[idx1] == t->ija[idx2]) {
1602
+ rb_ary_push(ret, INT2FIX(s->ija[idx1]));
1603
+ ++idx1; ++idx2;
1604
+ } else if (diag1 && i1 == t->ija[idx2]) {
1605
+ rb_ary_push(ret, INT2FIX(i1));
1606
+ diag1 = false;
1607
+ ++idx2;
1608
+ } else if (diag2 && i2 == s->ija[idx1]) {
1609
+ rb_ary_push(ret, INT2FIX(i2));
1610
+ diag2 = false;
1611
+ ++idx1;
1612
+ } else if (s->ija[idx1] < t->ija[idx2]) {
1613
+ ++idx1;
1614
+ } else { // s->ija[idx1] > t->ija[idx2]
1615
+ ++idx2;
1616
+ }
1617
+ }
1618
+
1619
+ // Past the end of row i2's stored entries; need to try to find diagonal
1620
+ if (diag2 && idx1 < nextpos1) {
1621
+ idx1 = nm::yale_storage::binary_search_left_boundary(s, idx1, nextpos1, i2);
1622
+ if (s->ija[idx1] == i2) rb_ary_push(ret, INT2FIX(i2));
1623
+ }
1624
+
1625
+ // Find the diagonal, if possible, in the other one.
1626
+ if (diag1 && idx2 < nextpos2) {
1627
+ idx2 = nm::yale_storage::binary_search_left_boundary(t, idx2, nextpos2, i1);
1628
+ if (t->ija[idx2] == i1) rb_ary_push(ret, INT2FIX(i1));
1629
+ }
1630
+
1631
+ nm_unregister_value(ret);
1632
+ NM_CONSERVATIVE(nm_unregister_value(m1));
1633
+ NM_CONSERVATIVE(nm_unregister_value(m2));
1634
+
1635
+ return ret;
1436
1636
  }
1437
1637
 
1438
1638
 
@@ -1444,15 +1644,21 @@ static VALUE nm_size(VALUE self) {
1444
1644
  * Get the A array of a Yale matrix (which stores the diagonal and the LU portions of the matrix).
1445
1645
  */
1446
1646
  static VALUE nm_a(int argc, VALUE* argv, VALUE self) {
1647
+ NM_CONSERVATIVE(nm_register_value(self));
1648
+
1447
1649
  VALUE idx;
1448
1650
  rb_scan_args(argc, argv, "01", &idx);
1651
+ NM_CONSERVATIVE(nm_register_value(idx));
1449
1652
 
1450
1653
  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
1451
1654
  size_t size = nm_yale_storage_get_size(s);
1452
1655
 
1453
1656
  if (idx == Qnil) {
1454
- VALUE* vals = ALLOCA_N(VALUE, size);
1455
1657
 
1658
+ VALUE* vals = NM_ALLOCA_N(VALUE, size);
1659
+
1660
+ nm_register_values(vals, size);
1661
+
1456
1662
  if (NM_DTYPE(self) == nm::RUBYOBJ) {
1457
1663
  for (size_t i = 0; i < size; ++i) {
1458
1664
  vals[i] = reinterpret_cast<VALUE*>(s->a)[i];
@@ -1467,11 +1673,15 @@ static VALUE nm_a(int argc, VALUE* argv, VALUE self) {
1467
1673
  for (size_t i = size; i < s->capacity; ++i)
1468
1674
  rb_ary_push(ary, Qnil);
1469
1675
 
1676
+ nm_unregister_values(vals, size);
1677
+ NM_CONSERVATIVE(nm_unregister_value(idx));
1678
+ NM_CONSERVATIVE(nm_unregister_value(self));
1470
1679
  return ary;
1471
1680
  } else {
1472
1681
  size_t index = FIX2INT(idx);
1682
+ NM_CONSERVATIVE(nm_unregister_value(idx));
1683
+ NM_CONSERVATIVE(nm_unregister_value(self));
1473
1684
  if (index >= size) rb_raise(rb_eRangeError, "out of range");
1474
-
1475
1685
  return rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype] * index, s->dtype).rval;
1476
1686
  }
1477
1687
  }
@@ -1485,13 +1695,17 @@ static VALUE nm_a(int argc, VALUE* argv, VALUE self) {
1485
1695
  * Get the diagonal ("D") portion of the A array of a Yale matrix.
1486
1696
  */
1487
1697
  static VALUE nm_d(int argc, VALUE* argv, VALUE self) {
1698
+ NM_CONSERVATIVE(nm_register_value(self));
1488
1699
  VALUE idx;
1489
1700
  rb_scan_args(argc, argv, "01", &idx);
1701
+ NM_CONSERVATIVE(nm_register_value(idx));
1490
1702
 
1491
1703
  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
1492
1704
 
1493
1705
  if (idx == Qnil) {
1494
- VALUE* vals = ALLOCA_N(VALUE, s->shape[0]);
1706
+ VALUE* vals = NM_ALLOCA_N(VALUE, s->shape[0]);
1707
+
1708
+ nm_register_values(vals, s->shape[0]);
1495
1709
 
1496
1710
  if (NM_DTYPE(self) == nm::RUBYOBJ) {
1497
1711
  for (size_t i = 0; i < s->shape[0]; ++i) {
@@ -1502,12 +1716,16 @@ static VALUE nm_d(int argc, VALUE* argv, VALUE self) {
1502
1716
  vals[i] = rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype]*i, s->dtype).rval;
1503
1717
  }
1504
1718
  }
1719
+ nm_unregister_values(vals, s->shape[0]);
1720
+ NM_CONSERVATIVE(nm_unregister_value(idx));
1721
+ NM_CONSERVATIVE(nm_unregister_value(self));
1505
1722
 
1506
1723
  return rb_ary_new4(s->shape[0], vals);
1507
1724
  } else {
1508
1725
  size_t index = FIX2INT(idx);
1726
+ NM_CONSERVATIVE(nm_unregister_value(idx));
1727
+ NM_CONSERVATIVE(nm_unregister_value(self));
1509
1728
  if (index >= s->shape[0]) rb_raise(rb_eRangeError, "out of range");
1510
-
1511
1729
  return rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype] * index, s->dtype).rval;
1512
1730
  }
1513
1731
  }
@@ -1519,11 +1737,15 @@ static VALUE nm_d(int argc, VALUE* argv, VALUE self) {
1519
1737
  * Get the non-diagonal ("LU") portion of the A array of a Yale matrix.
1520
1738
  */
1521
1739
  static VALUE nm_lu(VALUE self) {
1740
+ NM_CONSERVATIVE(nm_register_value(self));
1741
+
1522
1742
  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
1523
1743
 
1524
1744
  size_t size = nm_yale_storage_get_size(s);
1525
1745
 
1526
- VALUE* vals = ALLOCA_N(VALUE, size - s->shape[0] - 1);
1746
+ VALUE* vals = NM_ALLOCA_N(VALUE, size - s->shape[0] - 1);
1747
+
1748
+ nm_register_values(vals, size - s->shape[0] - 1);
1527
1749
 
1528
1750
  if (NM_DTYPE(self) == nm::RUBYOBJ) {
1529
1751
  for (size_t i = 0; i < size - s->shape[0] - 1; ++i) {
@@ -1540,6 +1762,9 @@ static VALUE nm_lu(VALUE self) {
1540
1762
  for (size_t i = size; i < s->capacity; ++i)
1541
1763
  rb_ary_push(ary, Qnil);
1542
1764
 
1765
+ nm_unregister_values(vals, size - s->shape[0] - 1);
1766
+ NM_CONSERVATIVE(nm_unregister_value(self));
1767
+
1543
1768
  return ary;
1544
1769
  }
1545
1770
 
@@ -1551,14 +1776,18 @@ static VALUE nm_lu(VALUE self) {
1551
1776
  * JA and LU portions of the IJA and A arrays, respectively.
1552
1777
  */
1553
1778
  static VALUE nm_ia(VALUE self) {
1779
+ NM_CONSERVATIVE(nm_register_value(self));
1780
+
1554
1781
  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
1555
1782
 
1556
- VALUE* vals = ALLOCA_N(VALUE, s->shape[0] + 1);
1783
+ VALUE* vals = NM_ALLOCA_N(VALUE, s->shape[0] + 1);
1557
1784
 
1558
1785
  for (size_t i = 0; i < s->shape[0] + 1; ++i) {
1559
1786
  vals[i] = INT2FIX(s->ija[i]);
1560
1787
  }
1561
1788
 
1789
+ NM_CONSERVATIVE(nm_unregister_value(self));
1790
+
1562
1791
  return rb_ary_new4(s->shape[0]+1, vals);
1563
1792
  }
1564
1793
 
@@ -1570,11 +1799,16 @@ static VALUE nm_ia(VALUE self) {
1570
1799
  * positions in the LU portion of the A array.
1571
1800
  */
1572
1801
  static VALUE nm_ja(VALUE self) {
1802
+
1803
+ NM_CONSERVATIVE(nm_register_value(self));
1804
+
1573
1805
  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
1574
1806
 
1575
1807
  size_t size = nm_yale_storage_get_size(s);
1576
1808
 
1577
- VALUE* vals = ALLOCA_N(VALUE, size - s->shape[0] - 1);
1809
+ VALUE* vals = NM_ALLOCA_N(VALUE, size - s->shape[0] - 1);
1810
+
1811
+ nm_register_values(vals, size - s->shape[0] - 1);
1578
1812
 
1579
1813
  for (size_t i = 0; i < size - s->shape[0] - 1; ++i) {
1580
1814
  vals[i] = INT2FIX(s->ija[s->shape[0] + 1 + i]);
@@ -1585,6 +1819,9 @@ static VALUE nm_ja(VALUE self) {
1585
1819
  for (size_t i = size; i < s->capacity; ++i)
1586
1820
  rb_ary_push(ary, Qnil);
1587
1821
 
1822
+ nm_unregister_values(vals, size - s->shape[0] - 1);
1823
+ NM_CONSERVATIVE(nm_unregister_value(self));
1824
+
1588
1825
  return ary;
1589
1826
  }
1590
1827
 
@@ -1596,15 +1833,20 @@ static VALUE nm_ja(VALUE self) {
1596
1833
  * Get the IJA array of a Yale matrix (or a component of the IJA array).
1597
1834
  */
1598
1835
  static VALUE nm_ija(int argc, VALUE* argv, VALUE self) {
1836
+ NM_CONSERVATIVE(nm_register_value(self));
1837
+
1599
1838
  VALUE idx;
1600
1839
  rb_scan_args(argc, argv, "01", &idx);
1840
+ NM_CONSERVATIVE(nm_register_value(idx));
1601
1841
 
1602
1842
  YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
1603
1843
  size_t size = nm_yale_storage_get_size(s);
1604
1844
 
1605
1845
  if (idx == Qnil) {
1606
1846
 
1607
- VALUE* vals = ALLOCA_N(VALUE, size);
1847
+ VALUE* vals = NM_ALLOCA_N(VALUE, size);
1848
+
1849
+ nm_register_values(vals, size);
1608
1850
 
1609
1851
  for (size_t i = 0; i < size; ++i) {
1610
1852
  vals[i] = INT2FIX(s->ija[i]);
@@ -1615,12 +1857,17 @@ static VALUE nm_ija(int argc, VALUE* argv, VALUE self) {
1615
1857
  for (size_t i = size; i < s->capacity; ++i)
1616
1858
  rb_ary_push(ary, Qnil);
1617
1859
 
1860
+ nm_unregister_values(vals, size);
1861
+ NM_CONSERVATIVE(nm_unregister_value(idx));
1862
+ NM_CONSERVATIVE(nm_unregister_value(self));
1863
+
1618
1864
  return ary;
1619
1865
 
1620
1866
  } else {
1621
1867
  size_t index = FIX2INT(idx);
1622
1868
  if (index >= size) rb_raise(rb_eRangeError, "out of range");
1623
-
1869
+ NM_CONSERVATIVE(nm_unregister_value(self));
1870
+ NM_CONSERVATIVE(nm_unregister_value(idx));
1624
1871
  return INT2FIX(s->ija[index]);
1625
1872
  }
1626
1873
  }
@@ -1638,11 +1885,18 @@ static VALUE nm_ija(int argc, VALUE* argv, VALUE self) {
1638
1885
  * range.
1639
1886
  */
1640
1887
  static VALUE nm_nd_row(int argc, VALUE* argv, VALUE self) {
1641
- if (NM_SRC(self) != NM_STORAGE(self))
1888
+
1889
+ NM_CONSERVATIVE(nm_register_value(self));
1890
+
1891
+ if (NM_SRC(self) != NM_STORAGE(self)) {
1892
+ NM_CONSERVATIVE(nm_unregister_value(self));
1642
1893
  rb_raise(rb_eNotImpError, "must be called on a real matrix and not a slice");
1894
+ }
1643
1895
 
1644
1896
  VALUE i_, as;
1645
1897
  rb_scan_args(argc, argv, "11", &i_, &as);
1898
+ NM_CONSERVATIVE(nm_register_value(as));
1899
+ NM_CONSERVATIVE(nm_register_value(i_));
1646
1900
 
1647
1901
  bool keys = false;
1648
1902
  if (as != Qnil && rb_to_id(as) != nm_rb_hash) keys = true;
@@ -1650,7 +1904,14 @@ static VALUE nm_nd_row(int argc, VALUE* argv, VALUE self) {
1650
1904
  size_t i = FIX2INT(i_);
1651
1905
 
1652
1906
  YALE_STORAGE* s = NM_STORAGE_YALE(self);
1653
- nm::dtype_t dtype = NM_DTYPE(self);
1907
+ //nm::dtype_t dtype = NM_DTYPE(self);
1908
+
1909
+ if (i >= s->shape[0]) {
1910
+ NM_CONSERVATIVE(nm_unregister_value(self));
1911
+ NM_CONSERVATIVE(nm_unregister_value(as));
1912
+ NM_CONSERVATIVE(nm_unregister_value(i_));
1913
+ rb_raise(rb_eRangeError, "out of range (%lu >= %lu)", i, s->shape[0]);
1914
+ }
1654
1915
 
1655
1916
  size_t pos = s->ija[i];
1656
1917
  size_t nextpos = s->ija[i+1];
@@ -1671,7 +1932,9 @@ static VALUE nm_nd_row(int argc, VALUE* argv, VALUE self) {
1671
1932
  rb_hash_aset(ret, INT2FIX(s->ija[idx]), rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype]*idx, s->dtype).rval);
1672
1933
  }
1673
1934
  }
1674
-
1935
+ NM_CONSERVATIVE(nm_unregister_value(as));
1936
+ NM_CONSERVATIVE(nm_unregister_value(i_));
1937
+ NM_CONSERVATIVE(nm_unregister_value(self));
1675
1938
  return ret;
1676
1939
  }
1677
1940
 
@@ -1706,18 +1969,32 @@ static VALUE nm_nd_row(int argc, VALUE* argv, VALUE self) {
1706
1969
  */
1707
1970
  VALUE nm_vector_set(int argc, VALUE* argv, VALUE self) { //, VALUE i_, VALUE jv, VALUE vv, VALUE pos_) {
1708
1971
 
1709
- if (NM_SRC(self) != NM_STORAGE(self))
1972
+ NM_CONSERVATIVE(nm_register_value(self));
1973
+
1974
+ if (NM_SRC(self) != NM_STORAGE(self)) {
1975
+ NM_CONSERVATIVE(nm_unregister_value(self));
1710
1976
  rb_raise(rb_eNotImpError, "must be called on a real matrix and not a slice");
1977
+ }
1711
1978
 
1712
1979
  // i, jv, vv are mandatory; pos is optional; thus "31"
1713
1980
  VALUE i_, jv, vv, pos_;
1714
1981
  rb_scan_args(argc, argv, "31", &i_, &jv, &vv, &pos_);
1982
+ NM_CONSERVATIVE(nm_register_value(i_));
1983
+ NM_CONSERVATIVE(nm_register_value(jv));
1984
+ NM_CONSERVATIVE(nm_register_value(vv));
1985
+ NM_CONSERVATIVE(nm_register_value(pos_));
1715
1986
 
1716
1987
  size_t len = RARRAY_LEN(jv); // need length in order to read the arrays in
1717
1988
  size_t vvlen = RARRAY_LEN(vv);
1718
1989
 
1719
- if (len != vvlen)
1720
- rb_raise(rb_eArgError, "lengths must match between j array (%d) and value array (%d)", len, vvlen);
1990
+ if (len != vvlen) {
1991
+ NM_CONSERVATIVE(nm_unregister_value(pos_));
1992
+ NM_CONSERVATIVE(nm_unregister_value(vv));
1993
+ NM_CONSERVATIVE(nm_unregister_value(jv));
1994
+ NM_CONSERVATIVE(nm_unregister_value(i_));
1995
+ NM_CONSERVATIVE(nm_unregister_value(self));
1996
+ rb_raise(rb_eArgError, "lengths must match between j array (%lu) and value array (%lu)", len, vvlen);
1997
+ }
1721
1998
 
1722
1999
  YALE_STORAGE* s = NM_STORAGE_YALE(self);
1723
2000
  nm::dtype_t dtype = NM_DTYPE(self);
@@ -1726,8 +2003,11 @@ VALUE nm_vector_set(int argc, VALUE* argv, VALUE self) { //, VALUE i_, VALUE jv,
1726
2003
  size_t pos = s->ija[i];
1727
2004
 
1728
2005
  // Allocate the j array and the values array
1729
- size_t* j = ALLOCA_N(size_t, len);
1730
- void* vals = ALLOCA_N(char, DTYPE_SIZES[dtype] * len);
2006
+ size_t* j = NM_ALLOCA_N(size_t, len);
2007
+ void* vals = NM_ALLOCA_N(char, DTYPE_SIZES[dtype] * len);
2008
+ if (dtype == nm::RUBYOBJ){
2009
+ nm_register_values(reinterpret_cast<VALUE*>(vals), len);
2010
+ }
1731
2011
 
1732
2012
  // Copy array contents
1733
2013
  for (size_t idx = 0; idx < len; ++idx) {
@@ -1739,6 +2019,16 @@ VALUE nm_vector_set(int argc, VALUE* argv, VALUE self) { //, VALUE i_, VALUE jv,
1739
2019
  nm_yale_storage_increment_ia_after(s, s->shape[0], i, len);
1740
2020
  s->ndnz += len;
1741
2021
 
2022
+ if (dtype == nm::RUBYOBJ){
2023
+ nm_unregister_values(reinterpret_cast<VALUE*>(vals), len);
2024
+ }
2025
+
2026
+ NM_CONSERVATIVE(nm_unregister_value(pos_));
2027
+ NM_CONSERVATIVE(nm_unregister_value(vv));
2028
+ NM_CONSERVATIVE(nm_unregister_value(jv));
2029
+ NM_CONSERVATIVE(nm_unregister_value(i_));
2030
+ NM_CONSERVATIVE(nm_unregister_value(self));
2031
+
1742
2032
  // Return the updated position
1743
2033
  pos += len;
1744
2034
  return INT2FIX(pos);
@@ -1754,7 +2044,8 @@ VALUE nm_vector_set(int argc, VALUE* argv, VALUE self) { //, VALUE i_, VALUE jv,
1754
2044
  * Get the default_value property from a yale matrix.
1755
2045
  */
1756
2046
  VALUE nm_yale_default_value(VALUE self) {
1757
- return default_value(NM_STORAGE_YALE(self));
2047
+ VALUE to_return = default_value(NM_STORAGE_YALE(self));
2048
+ return to_return;
1758
2049
  }
1759
2050
 
1760
2051