nmatrix 0.0.9 → 0.1.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (101) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile +1 -0
  3. data/History.txt +95 -1
  4. data/LICENSE.txt +2 -2
  5. data/README.rdoc +24 -26
  6. data/Rakefile +32 -16
  7. data/ext/nmatrix/data/complex.h +2 -2
  8. data/ext/nmatrix/data/data.cpp +27 -51
  9. data/ext/nmatrix/data/data.h +92 -4
  10. data/ext/nmatrix/data/meta.h +2 -2
  11. data/ext/nmatrix/data/rational.h +2 -2
  12. data/ext/nmatrix/data/ruby_object.h +2 -2
  13. data/ext/nmatrix/extconf.rb +87 -86
  14. data/ext/nmatrix/math.cpp +45 -40
  15. data/ext/nmatrix/math/asum.h +3 -3
  16. data/ext/nmatrix/math/geev.h +2 -2
  17. data/ext/nmatrix/math/gemm.h +6 -2
  18. data/ext/nmatrix/math/gemv.h +6 -2
  19. data/ext/nmatrix/math/ger.h +2 -2
  20. data/ext/nmatrix/math/gesdd.h +2 -2
  21. data/ext/nmatrix/math/gesvd.h +2 -2
  22. data/ext/nmatrix/math/getf2.h +2 -2
  23. data/ext/nmatrix/math/getrf.h +2 -2
  24. data/ext/nmatrix/math/getri.h +2 -2
  25. data/ext/nmatrix/math/getrs.h +7 -3
  26. data/ext/nmatrix/math/idamax.h +2 -2
  27. data/ext/nmatrix/math/inc.h +12 -6
  28. data/ext/nmatrix/math/laswp.h +2 -2
  29. data/ext/nmatrix/math/long_dtype.h +2 -2
  30. data/ext/nmatrix/math/math.h +16 -10
  31. data/ext/nmatrix/math/nrm2.h +3 -3
  32. data/ext/nmatrix/math/potrs.h +7 -3
  33. data/ext/nmatrix/math/rot.h +2 -2
  34. data/ext/nmatrix/math/rotg.h +2 -2
  35. data/ext/nmatrix/math/scal.h +2 -2
  36. data/ext/nmatrix/math/swap.h +2 -2
  37. data/ext/nmatrix/math/trsm.h +7 -3
  38. data/ext/nmatrix/nm_memory.h +60 -0
  39. data/ext/nmatrix/nmatrix.cpp +13 -47
  40. data/ext/nmatrix/nmatrix.h +37 -12
  41. data/ext/nmatrix/ruby_constants.cpp +4 -2
  42. data/ext/nmatrix/ruby_constants.h +4 -2
  43. data/ext/nmatrix/ruby_nmatrix.c +937 -170
  44. data/ext/nmatrix/storage/common.cpp +2 -2
  45. data/ext/nmatrix/storage/common.h +2 -2
  46. data/ext/nmatrix/storage/{dense.cpp → dense/dense.cpp} +253 -100
  47. data/ext/nmatrix/storage/{dense.h → dense/dense.h} +6 -5
  48. data/ext/nmatrix/storage/{list.cpp → list/list.cpp} +517 -98
  49. data/ext/nmatrix/storage/{list.h → list/list.h} +13 -6
  50. data/ext/nmatrix/storage/storage.cpp +48 -19
  51. data/ext/nmatrix/storage/storage.h +4 -4
  52. data/ext/nmatrix/storage/yale/class.h +112 -43
  53. data/ext/nmatrix/storage/yale/iterators/base.h +2 -2
  54. data/ext/nmatrix/storage/yale/iterators/iterator.h +2 -2
  55. data/ext/nmatrix/storage/yale/iterators/row.h +2 -2
  56. data/ext/nmatrix/storage/yale/iterators/row_stored.h +2 -2
  57. data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +4 -3
  58. data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +2 -2
  59. data/ext/nmatrix/storage/yale/math/transpose.h +2 -2
  60. data/ext/nmatrix/storage/yale/yale.cpp +343 -52
  61. data/ext/nmatrix/storage/yale/yale.h +7 -3
  62. data/ext/nmatrix/types.h +2 -2
  63. data/ext/nmatrix/util/io.cpp +5 -5
  64. data/ext/nmatrix/util/io.h +2 -2
  65. data/ext/nmatrix/util/sl_list.cpp +40 -27
  66. data/ext/nmatrix/util/sl_list.h +3 -3
  67. data/ext/nmatrix/util/util.h +2 -2
  68. data/lib/nmatrix.rb +2 -2
  69. data/lib/nmatrix/blas.rb +2 -2
  70. data/lib/nmatrix/enumerate.rb +17 -6
  71. data/lib/nmatrix/io/market.rb +2 -3
  72. data/lib/nmatrix/io/mat5_reader.rb +2 -2
  73. data/lib/nmatrix/io/mat_reader.rb +2 -2
  74. data/lib/nmatrix/lapack.rb +46 -46
  75. data/lib/nmatrix/math.rb +213 -20
  76. data/lib/nmatrix/monkeys.rb +24 -2
  77. data/lib/nmatrix/nmatrix.rb +394 -9
  78. data/lib/nmatrix/nvector.rb +2 -64
  79. data/lib/nmatrix/rspec.rb +2 -2
  80. data/lib/nmatrix/shortcuts.rb +14 -61
  81. data/lib/nmatrix/version.rb +11 -3
  82. data/lib/nmatrix/yale_functions.rb +4 -4
  83. data/nmatrix.gemspec +2 -7
  84. data/scripts/mac-brew-gcc.sh +11 -8
  85. data/scripts/mac-mavericks-brew-gcc.sh +22 -0
  86. data/spec/00_nmatrix_spec.rb +116 -7
  87. data/spec/01_enum_spec.rb +17 -3
  88. data/spec/02_slice_spec.rb +11 -3
  89. data/spec/blas_spec.rb +5 -2
  90. data/spec/elementwise_spec.rb +5 -2
  91. data/spec/io_spec.rb +27 -17
  92. data/spec/lapack_spec.rb +157 -9
  93. data/spec/math_spec.rb +95 -4
  94. data/spec/nmatrix_yale_spec.rb +21 -26
  95. data/spec/rspec_monkeys.rb +27 -0
  96. data/spec/rspec_spec.rb +2 -2
  97. data/spec/shortcuts_spec.rb +5 -10
  98. data/spec/slice_set_spec.rb +6 -2
  99. data/spec/spec_helper.rb +3 -2
  100. data/spec/stat_spec.rb +174 -158
  101. metadata +15 -15
@@ -0,0 +1,60 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == nm_memory.h
25
+ //
26
+ // Macros for memory allocation and freeing
27
+
28
+ /**
29
+ * We define these macros, which just call the ruby ones, as this makes
30
+ * debugging memory issues (particularly those involving interaction with
31
+ * the ruby GC) easier, as it's posssible to add debugging code temporarily.
32
+ */
33
+ #ifndef __NM_MEMORY_H__
34
+ #define __NM_MEMORY_H__
35
+
36
+ #include <ruby.h>
37
+
38
+ #define NM_ALLOC(type) (ALLOC(type))
39
+
40
+ #define NM_ALLOC_N(type, n) (ALLOC_N(type, n))
41
+
42
+ #define NM_REALLOC_N(var, type, n) (REALLOC_N(var, type, n))
43
+
44
+ #define NM_ALLOCA_N(type, n) (ALLOCA_N(type, n))
45
+
46
+ #define NM_FREE(var) (xfree(var))
47
+
48
+ #define NM_ALLOC_NONRUBY(type) ((type*) malloc(sizeof(type)))
49
+
50
+ //Defines whether to do conservative gc registrations, i.e. those
51
+ //registrations that we're not that sure are necessary.
52
+ //#define NM_GC_CONSERVATIVE
53
+
54
+ #ifdef NM_GC_CONSERVATIVE
55
+ #define NM_CONSERVATIVE(statement) (statement)
56
+ #else
57
+ #define NM_CONSERVATIVE(statement)
58
+ #endif //NM_GC_CONSERVATIVE
59
+
60
+ #endif
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -31,12 +31,19 @@
31
31
  * Standard Includes
32
32
  */
33
33
 
34
- #include <cblas.h>
35
- #ifdef HAVE_CLAPACK_H
36
34
  extern "C" {
35
+ #if defined HAVE_CBLAS_H
36
+ #include <cblas.h>
37
+ #elif defined HAVE_ATLAS_CBLAS_H
38
+ #include <atlas/cblas.h>
39
+ #endif
40
+
41
+ #if defined HAVE_CLAPACK_H
37
42
  #include <clapack.h>
38
- }
43
+ #elif defined HAVE_ATLAS_CLAPACK_H
44
+ #include <atlas/clapack.h>
39
45
  #endif
46
+ }
40
47
 
41
48
  #include <ruby.h>
42
49
  #include <algorithm> // std::min
@@ -52,7 +59,7 @@ extern "C" {
52
59
  #include "math/math.h"
53
60
  #include "util/io.h"
54
61
  #include "storage/storage.h"
55
- #include "storage/list.h"
62
+ #include "storage/list/list.h"
56
63
  #include "storage/yale/yale.h"
57
64
 
58
65
  #include "nmatrix.h"
@@ -75,47 +82,6 @@ extern "C" {
75
82
 
76
83
  namespace nm {
77
84
 
78
- /*
79
- * Read the shape from a matrix storage file, and ignore any padding.
80
- *
81
- * shape should already be allocated before calling this.
82
- */
83
- void read_padded_shape(std::ifstream& f, size_t dim, size_t* shape) {
84
- size_t bytes_read = 0;
85
-
86
- // Read shape
87
- for (size_t i = 0; i < dim; ++i) {
88
- IType s;
89
- f.read(reinterpret_cast<char*>(&s), sizeof(IType));
90
- shape[i] = s;
91
-
92
- bytes_read += sizeof(IType);
93
- }
94
-
95
- // Ignore padding
96
- f.ignore(bytes_read % 8);
97
- }
98
-
99
- void write_padded_shape(std::ofstream& f, size_t dim, size_t* shape) {
100
- size_t bytes_written = 0;
101
-
102
- // Write shape
103
- for (size_t i = 0; i < dim; ++i) {
104
- IType s = shape[i];
105
- f.write(reinterpret_cast<const char*>(&s), sizeof(IType));
106
-
107
- bytes_written += sizeof(IType);
108
- }
109
-
110
- // Pad with zeros
111
- while (bytes_written % 8) {
112
- IType zero = 0;
113
- f.write(reinterpret_cast<const char*>(&zero), sizeof(IType));
114
-
115
- bytes_written += sizeof(IType);
116
- }
117
- }
118
-
119
85
  /*
120
86
  * This function is pulled out separately so it can be called for hermitian matrix writing, which also uses it.
121
87
  */
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -53,6 +53,8 @@
53
53
  #endif
54
54
  #endif
55
55
 
56
+ #include "nm_memory.h"
57
+
56
58
  /*
57
59
  * Macros
58
60
  */
@@ -113,21 +115,27 @@
113
115
  * return enumerator_init(enumerator_allocate(rb_cEnumerator), obj, meth, argc, argv);
114
116
  * }
115
117
  */
118
+
119
+ //opening portion -- this allows unregistering any objects in use before returning
120
+ #define RETURN_SIZED_ENUMERATOR_PRE do { \
121
+ if (!rb_block_given_p()) {
122
+
123
+ //remaining portion
116
124
  #ifdef RUBY_2
117
125
  #ifndef RETURN_SIZED_ENUMERATOR
118
126
  #undef RETURN_SIZED_ENUMERATOR
119
127
  // Ruby 2.0 and higher has rb_enumeratorize_with_size instead of rb_enumeratorize.
120
128
  // We want to support both in the simplest way possible.
121
- #define RETURN_SIZED_ENUMERATOR(obj, argc, argv, size_fn) do { \
122
- if (!rb_block_given_p()) \
123
- return rb_enumeratorize_with_size((obj), ID2SYM(rb_frame_this_func()), (argc), (argv), (size_fn)); \
129
+ #define RETURN_SIZED_ENUMERATOR(obj, argc, argv, size_fn) \
130
+ return rb_enumeratorize_with_size((obj), ID2SYM(rb_frame_this_func()), (argc), (argv), (size_fn)); \
131
+ } \
124
132
  } while (0)
125
133
  #endif
126
134
  #else
127
135
  #undef RETURN_SIZED_ENUMERATOR
128
- #define RETURN_SIZED_ENUMERATOR(obj, argc, argv, size_fn) do { \
129
- if (!rb_block_given_p()) \
130
- return rb_enumeratorize((obj), ID2SYM(rb_frame_this_func()), (argc), (argv)); \
136
+ #define RETURN_SIZED_ENUMERATOR(obj, argc, argv, size_fn) \
137
+ return rb_enumeratorize((obj), ID2SYM(rb_frame_this_func()), (argc), (argv)); \
138
+ } \
131
139
  } while (0)
132
140
  #endif
133
141
 
@@ -278,6 +286,18 @@ NM_DEF_STRUCT_PRE(NMATRIX); // struct NMATRIX {
278
286
  NM_DECL_STRUCT(STORAGE*, storage); // STORAGE* storage; // Pointer to storage struct.
279
287
  NM_DEF_STRUCT_POST(NMATRIX); // };
280
288
 
289
+ /* Structs for dealing with VALUEs in use so that they don't get GC'd */
290
+
291
+ typedef struct __NM_GC_LL_NODE {
292
+ VALUE* val;
293
+ size_t n;
294
+ __NM_GC_LL_NODE* next;
295
+ } nm_gc_ll_node;
296
+
297
+ typedef struct __NM_GC_HOLDER {
298
+ __NM_GC_LL_NODE* start;
299
+ } nm_gc_holder;
300
+
281
301
  #define NM_MAX_RANK 15
282
302
 
283
303
  #define UnwrapNMatrix(obj,var) Data_Get_Struct(obj, NMATRIX, var)
@@ -355,16 +375,21 @@ extern "C" {
355
375
  NM_DECL_ENUM(dtype_t, nm_dtype_min(VALUE));
356
376
 
357
377
  // Non-API functions needed by other cpp files.
358
- NMATRIX* nm_create(nm::stype_t stype, STORAGE* storage);
359
- NMATRIX* nm_cast_with_ctype_args(NMATRIX* self, nm::stype_t new_stype, nm::dtype_t new_dtype, void* init_ptr);
378
+ NMATRIX* nm_create(NM_DECL_ENUM(stype_t, stype), STORAGE* storage);
379
+ NMATRIX* nm_cast_with_ctype_args(NMATRIX* self, NM_DECL_ENUM(stype_t, new_stype), NM_DECL_ENUM(dtype_t, new_dtype), void* init_ptr);
360
380
  VALUE nm_cast(VALUE self, VALUE new_stype_symbol, VALUE new_dtype_symbol, VALUE init);
361
381
  void nm_mark(NMATRIX* mat);
362
382
  void nm_delete(NMATRIX* mat);
363
383
  void nm_delete_ref(NMATRIX* mat);
364
- void nm_mark(NMATRIX* mat);
365
384
  void nm_register_values(VALUE* vals, size_t n);
366
385
  void nm_unregister_values(VALUE* vals, size_t n);
367
-
386
+ void nm_register_value(VALUE& val);
387
+ void nm_unregister_value(VALUE& val);
388
+ void nm_register_storage(nm::stype_t stype, const STORAGE* storage);
389
+ void nm_unregister_storage(nm::stype_t stype, const STORAGE* storage);
390
+ void nm_register_nmatrix(NMATRIX* nmatrix);
391
+ void nm_unregister_nmatrix(NMATRIX* nmatrix);
392
+ void nm_completely_unregister_value(VALUE& val);
368
393
  #ifdef __cplusplus
369
394
  }
370
395
  #endif
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2012, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2012, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -89,6 +89,8 @@ VALUE cNMatrix,
89
89
  cNMatrix_YaleFunctions,
90
90
  cNMatrix_BLAS,
91
91
  cNMatrix_LAPACK,
92
+
93
+ cNMatrix_GC_holder,
92
94
 
93
95
  nm_eDataTypeError,
94
96
  nm_eConvergenceError,
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -91,6 +91,8 @@ extern VALUE cNMatrix,
91
91
  cNMatrix_YaleFunctions,
92
92
  cNMatrix_BLAS,
93
93
  cNMatrix_LAPACK,
94
+
95
+ cNMatrix_GC_holder,
94
96
 
95
97
  nm_eDataTypeError,
96
98
  nm_eConvergenceError,
@@ -9,8 +9,8 @@
9
9
  //
10
10
  // == Copyright Information
11
11
  //
12
- // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
- // NMatrix is Copyright (c) 2013, Ruby Science Foundation
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
14
  //
15
15
  // Please see LICENSE.txt for additional copyright notices.
16
16
  //
@@ -51,6 +51,7 @@ static VALUE nm_capacity(VALUE self);
51
51
  static VALUE nm_each_with_indices(VALUE nmatrix);
52
52
  static VALUE nm_each_stored_with_indices(VALUE nmatrix);
53
53
  static VALUE nm_each_ordered_stored_with_indices(VALUE nmatrix);
54
+ static VALUE nm_map_stored(VALUE nmatrix);
54
55
 
55
56
  static SLICE* get_slice(size_t dim, int argc, VALUE* arg, size_t* shape);
56
57
  static VALUE nm_xslice(int argc, VALUE* argv, void* (*slice_func)(const STORAGE*, SLICE*), void (*delete_func)(NMATRIX*), VALUE self);
@@ -74,10 +75,27 @@ static VALUE nm_ew_##name(VALUE left_val, VALUE right_val) { \
74
75
  return elementwise_op(nm::EW_##oper, left_val, right_val); \
75
76
  }
76
77
 
78
+ #define DEF_UNARY_RUBY_ACCESSOR(oper, name) \
79
+ static VALUE nm_unary_##name(VALUE self) { \
80
+ return unary_op(nm::UNARY_##oper, self); \
81
+ }
82
+
83
+ #define DEF_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(oper, name) \
84
+ static VALUE nm_noncom_ew_##name(int argc, VALUE* argv, VALUE self) { \
85
+ if (argc > 1) { \
86
+ return noncom_elementwise_op(nm::NONCOM_EW_##oper, self, argv[0], argv[1]); \
87
+ } else { \
88
+ return noncom_elementwise_op(nm::NONCOM_EW_##oper, self, argv[0], Qfalse); \
89
+ } \
90
+ }
91
+
92
+
77
93
  /*
78
94
  * Macro declares a corresponding accessor function prototype for some element-wise operation.
79
95
  */
80
96
  #define DECL_ELEMENTWISE_RUBY_ACCESSOR(name) static VALUE nm_ew_##name(VALUE left_val, VALUE right_val);
97
+ #define DECL_UNARY_RUBY_ACCESSOR(name) static VALUE nm_unary_##name(VALUE self);
98
+ #define DECL_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(name) static VALUE nm_noncom_ew_##name(int argc, VALUE* argv, VALUE self);
81
99
 
82
100
  DECL_ELEMENTWISE_RUBY_ACCESSOR(add)
83
101
  DECL_ELEMENTWISE_RUBY_ACCESSOR(subtract)
@@ -91,8 +109,36 @@ DECL_ELEMENTWISE_RUBY_ACCESSOR(lt)
91
109
  DECL_ELEMENTWISE_RUBY_ACCESSOR(gt)
92
110
  DECL_ELEMENTWISE_RUBY_ACCESSOR(leq)
93
111
  DECL_ELEMENTWISE_RUBY_ACCESSOR(geq)
112
+ DECL_UNARY_RUBY_ACCESSOR(sin)
113
+ DECL_UNARY_RUBY_ACCESSOR(cos)
114
+ DECL_UNARY_RUBY_ACCESSOR(tan)
115
+ DECL_UNARY_RUBY_ACCESSOR(asin)
116
+ DECL_UNARY_RUBY_ACCESSOR(acos)
117
+ DECL_UNARY_RUBY_ACCESSOR(atan)
118
+ DECL_UNARY_RUBY_ACCESSOR(sinh)
119
+ DECL_UNARY_RUBY_ACCESSOR(cosh)
120
+ DECL_UNARY_RUBY_ACCESSOR(tanh)
121
+ DECL_UNARY_RUBY_ACCESSOR(asinh)
122
+ DECL_UNARY_RUBY_ACCESSOR(acosh)
123
+ DECL_UNARY_RUBY_ACCESSOR(atanh)
124
+ DECL_UNARY_RUBY_ACCESSOR(exp)
125
+ DECL_UNARY_RUBY_ACCESSOR(log2)
126
+ DECL_UNARY_RUBY_ACCESSOR(log10)
127
+ DECL_UNARY_RUBY_ACCESSOR(sqrt)
128
+ DECL_UNARY_RUBY_ACCESSOR(erf)
129
+ DECL_UNARY_RUBY_ACCESSOR(erfc)
130
+ DECL_UNARY_RUBY_ACCESSOR(cbrt)
131
+ DECL_UNARY_RUBY_ACCESSOR(gamma)
132
+ DECL_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(atan2)
133
+ DECL_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(ldexp)
134
+ DECL_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(hypot)
135
+
136
+ //log can be unary, but also take a base argument, as with Math.log
137
+ static VALUE nm_unary_log(int argc, VALUE* argv, VALUE self);
94
138
 
95
139
  static VALUE elementwise_op(nm::ewop_t op, VALUE left_val, VALUE right_val);
140
+ static VALUE unary_op(nm::unaryop_t op, VALUE self);
141
+ static VALUE noncom_elementwise_op(nm::noncom_ewop_t op, VALUE self, VALUE other, VALUE orderflip);
96
142
 
97
143
  static VALUE nm_symmetric(VALUE self);
98
144
  static VALUE nm_hermitian(VALUE self);
@@ -144,6 +190,12 @@ void Init_nmatrix() {
144
190
  */
145
191
  nm_eStorageTypeError = rb_define_class("StorageTypeError", rb_eStandardError);
146
192
 
193
+ /*
194
+ * Class that holds values in use by the C code.
195
+ */
196
+ cNMatrix_GC_holder = rb_define_class("NMGCHolder", rb_cObject);
197
+
198
+
147
199
  ///////////////////
148
200
  // Class Methods //
149
201
  ///////////////////
@@ -169,7 +221,7 @@ void Init_nmatrix() {
169
221
  rb_define_method(cNMatrix, "write", (METHOD)nm_write, -1);
170
222
 
171
223
  // Technically, the following function is a copy constructor.
172
- rb_define_method(cNMatrix, "transpose", (METHOD)nm_init_transposed, 0);
224
+ rb_define_protected_method(cNMatrix, "clone_transpose", (METHOD)nm_init_transposed, 0);
173
225
 
174
226
  rb_define_method(cNMatrix, "dtype", (METHOD)nm_dtype, 0);
175
227
  rb_define_method(cNMatrix, "stype", (METHOD)nm_stype, 0);
@@ -198,8 +250,10 @@ void Init_nmatrix() {
198
250
  rb_define_protected_method(cNMatrix, "__dense_map_pair__", (METHOD)nm_dense_map_pair, 1);
199
251
  rb_define_method(cNMatrix, "each_with_indices", (METHOD)nm_each_with_indices, 0);
200
252
  rb_define_method(cNMatrix, "each_stored_with_indices", (METHOD)nm_each_stored_with_indices, 0);
253
+ rb_define_method(cNMatrix, "map_stored", (METHOD)nm_map_stored, 0);
201
254
  rb_define_method(cNMatrix, "each_ordered_stored_with_indices", (METHOD)nm_each_ordered_stored_with_indices, 0);
202
255
  rb_define_protected_method(cNMatrix, "__list_map_merged_stored__", (METHOD)nm_list_map_merged_stored, 2);
256
+ rb_define_protected_method(cNMatrix, "__list_map_stored__", (METHOD)nm_list_map_stored, 1);
203
257
  rb_define_protected_method(cNMatrix, "__yale_map_merged_stored__", (METHOD)nm_yale_map_merged_stored, 2);
204
258
  rb_define_protected_method(cNMatrix, "__yale_map_stored__", (METHOD)nm_yale_map_stored, 0);
205
259
  rb_define_protected_method(cNMatrix, "__yale_stored_diagonal_each_with_indices__", (METHOD)nm_yale_stored_diagonal_each_with_indices, 0);
@@ -214,6 +268,32 @@ void Init_nmatrix() {
214
268
  rb_define_method(cNMatrix, "**", (METHOD)nm_ew_power, 1);
215
269
  rb_define_method(cNMatrix, "%", (METHOD)nm_ew_mod, 1);
216
270
 
271
+ rb_define_method(cNMatrix, "atan2", (METHOD)nm_noncom_ew_atan2, -1);
272
+ rb_define_method(cNMatrix, "ldexp", (METHOD)nm_noncom_ew_ldexp, -1);
273
+ rb_define_method(cNMatrix, "hypot", (METHOD)nm_noncom_ew_hypot, -1);
274
+
275
+ rb_define_method(cNMatrix, "sin", (METHOD)nm_unary_sin, 0);
276
+ rb_define_method(cNMatrix, "cos", (METHOD)nm_unary_cos, 0);
277
+ rb_define_method(cNMatrix, "tan", (METHOD)nm_unary_tan, 0);
278
+ rb_define_method(cNMatrix, "asin", (METHOD)nm_unary_asin, 0);
279
+ rb_define_method(cNMatrix, "acos", (METHOD)nm_unary_acos, 0);
280
+ rb_define_method(cNMatrix, "atan", (METHOD)nm_unary_atan, 0);
281
+ rb_define_method(cNMatrix, "sinh", (METHOD)nm_unary_sinh, 0);
282
+ rb_define_method(cNMatrix, "cosh", (METHOD)nm_unary_cosh, 0);
283
+ rb_define_method(cNMatrix, "tanh", (METHOD)nm_unary_tanh, 0);
284
+ rb_define_method(cNMatrix, "asinh", (METHOD)nm_unary_asinh, 0);
285
+ rb_define_method(cNMatrix, "acosh", (METHOD)nm_unary_acosh, 0);
286
+ rb_define_method(cNMatrix, "atanh", (METHOD)nm_unary_atanh, 0);
287
+ rb_define_method(cNMatrix, "exp", (METHOD)nm_unary_exp, 0);
288
+ rb_define_method(cNMatrix, "log2", (METHOD)nm_unary_log2, 0);
289
+ rb_define_method(cNMatrix, "log10", (METHOD)nm_unary_log10, 0);
290
+ rb_define_method(cNMatrix, "sqrt", (METHOD)nm_unary_sqrt, 0);
291
+ rb_define_method(cNMatrix, "erf", (METHOD)nm_unary_erf, 0);
292
+ rb_define_method(cNMatrix, "erfc", (METHOD)nm_unary_erfc, 0);
293
+ rb_define_method(cNMatrix, "cbrt", (METHOD)nm_unary_cbrt, 0);
294
+ rb_define_method(cNMatrix, "gamma", (METHOD)nm_unary_gamma, 0);
295
+ rb_define_method(cNMatrix, "log", (METHOD)nm_unary_log, -1);
296
+
217
297
  rb_define_method(cNMatrix, "=~", (METHOD)nm_ew_eqeq, 1);
218
298
  rb_define_method(cNMatrix, "!~", (METHOD)nm_ew_neq, 1);
219
299
  rb_define_method(cNMatrix, "<=", (METHOD)nm_ew_leq, 1);
@@ -283,9 +363,9 @@ void Init_nmatrix() {
283
363
  * Slice constructor.
284
364
  */
285
365
  static SLICE* alloc_slice(size_t dim) {
286
- SLICE* slice = ALLOC(SLICE);
287
- slice->coords = ALLOC_N(size_t, dim);
288
- slice->lengths = ALLOC_N(size_t, dim);
366
+ SLICE* slice = NM_ALLOC(SLICE);
367
+ slice->coords = NM_ALLOC_N(size_t, dim);
368
+ slice->lengths = NM_ALLOC_N(size_t, dim);
289
369
  return slice;
290
370
  }
291
371
 
@@ -294,9 +374,9 @@ static SLICE* alloc_slice(size_t dim) {
294
374
  * Slice destructor.
295
375
  */
296
376
  static void free_slice(SLICE* slice) {
297
- xfree(slice->coords);
298
- xfree(slice->lengths);
299
- xfree(slice);
377
+ NM_FREE(slice->coords);
378
+ NM_FREE(slice->lengths);
379
+ NM_FREE(slice);
300
380
  }
301
381
 
302
382
 
@@ -304,7 +384,7 @@ static void free_slice(SLICE* slice) {
304
384
  * Allocator.
305
385
  */
306
386
  static VALUE nm_alloc(VALUE klass) {
307
- NMATRIX* mat = ALLOC(NMATRIX);
387
+ NMATRIX* mat = NM_ALLOC(NMATRIX);
308
388
  mat->storage = NULL;
309
389
 
310
390
  // DO NOT MARK This STRUCT. It has no storage allocated, and no stype, so mark will do an invalid something.
@@ -320,6 +400,7 @@ static VALUE nm_alloc(VALUE klass) {
320
400
  * just return the original matrix's capacity.
321
401
  */
322
402
  static VALUE nm_capacity(VALUE self) {
403
+ NM_CONSERVATIVE(nm_register_value(self));
323
404
  VALUE cap;
324
405
 
325
406
  switch(NM_STYPE(self)) {
@@ -336,9 +417,11 @@ static VALUE nm_capacity(VALUE self) {
336
417
  break;
337
418
 
338
419
  default:
420
+ NM_CONSERVATIVE(nm_unregister_value(self));
339
421
  rb_raise(nm_eStorageTypeError, "unrecognized stype in nm_capacity()");
340
422
  }
341
423
 
424
+ NM_CONSERVATIVE(nm_unregister_value(self));
342
425
  return cap;
343
426
  }
344
427
 
@@ -363,7 +446,7 @@ void nm_delete(NMATRIX* mat) {
363
446
  };
364
447
  ttable[mat->stype](mat->storage);
365
448
 
366
- xfree(mat);
449
+ NM_FREE(mat);
367
450
  }
368
451
 
369
452
  /*
@@ -377,33 +460,186 @@ void nm_delete_ref(NMATRIX* mat) {
377
460
  };
378
461
  ttable[mat->stype](mat->storage);
379
462
 
380
- xfree(mat);
463
+ NM_FREE(mat);
464
+ }
465
+
466
+
467
+ /**
468
+ * These variables hold a linked list of VALUEs that are registered to be in
469
+ * use by nmatrix so that they can be marked when GC runs.
470
+ */
471
+ static VALUE* gc_value_holder = NULL;
472
+ static nm_gc_holder* gc_value_holder_struct = NULL;
473
+ static nm_gc_holder* allocated_pool = NULL; // an object pool for linked list nodes; using pooling is in some cases a substantial performance improvement
474
+
475
+ /**
476
+ * GC Marking function for the values that have been registered.
477
+ */
478
+ static void __nm_mark_value_container(nm_gc_holder* gc_value_holder_struct) {
479
+ if (gc_value_holder_struct && gc_value_holder_struct->start) {
480
+ nm_gc_ll_node* curr = gc_value_holder_struct->start;
481
+ while (curr) {
482
+ rb_gc_mark_locations(curr->val, curr->val + curr->n);
483
+ curr = curr->next;
484
+ }
485
+ }
486
+ }
487
+
488
+ /**
489
+ * Initilalizes the linked list of in-use VALUEs if it hasn't been done
490
+ * already.
491
+ */
492
+ static void __nm_initialize_value_container() {
493
+ if (gc_value_holder == NULL) {
494
+ gc_value_holder_struct = NM_ALLOC_NONRUBY(nm_gc_holder);
495
+ allocated_pool = NM_ALLOC_NONRUBY(nm_gc_holder);
496
+ gc_value_holder = NM_ALLOC_NONRUBY(VALUE);
497
+ gc_value_holder_struct->start = NULL;
498
+ allocated_pool->start = NULL;
499
+ *gc_value_holder = Data_Wrap_Struct(cNMatrix_GC_holder, __nm_mark_value_container, NULL, gc_value_holder_struct);
500
+ rb_global_variable(gc_value_holder);
501
+ }
381
502
  }
382
503
 
383
504
  /*
384
- * Register the addresses of an array of VALUEs with the gc to avoid collection
505
+ * Register an array of VALUEs to avoid their collection
385
506
  * while using them internally.
386
507
  */
387
508
  void nm_register_values(VALUE* values, size_t n) {
509
+ if (!gc_value_holder_struct)
510
+ __nm_initialize_value_container();
388
511
  if (values) {
389
- for (size_t i = n; i-- > 0;) {
390
- rb_gc_register_address(values + i);
512
+ nm_gc_ll_node* to_insert = NULL;
513
+ if (allocated_pool->start) {
514
+ to_insert = allocated_pool->start;
515
+ allocated_pool->start = to_insert->next;
516
+ } else {
517
+ to_insert = NM_ALLOC_NONRUBY(nm_gc_ll_node);
391
518
  }
519
+ to_insert->val = values;
520
+ to_insert->n = n;
521
+ to_insert->next = gc_value_holder_struct->start;
522
+ gc_value_holder_struct->start = to_insert;
392
523
  }
393
524
  }
394
525
 
395
526
  /*
396
- * Unregister the addresses of an array of VALUEs with the gc to allow normal
527
+ * Unregister an array of VALUEs with the gc to allow normal
397
528
  * garbage collection to occur again.
398
529
  */
399
530
  void nm_unregister_values(VALUE* values, size_t n) {
400
531
  if (values) {
401
- for (size_t i = n; i-- > 0;) {
402
- rb_gc_unregister_address(values + i);
532
+ if (gc_value_holder_struct) {
533
+ nm_gc_ll_node* curr = gc_value_holder_struct->start;
534
+ nm_gc_ll_node* last = NULL;
535
+ while (curr) {
536
+ if (curr->val == values) {
537
+ if (last) {
538
+ last->next = curr->next;
539
+ } else {
540
+ gc_value_holder_struct->start = curr->next;
541
+ }
542
+ curr->next = allocated_pool->start;
543
+ curr->val = NULL;
544
+ curr->n = 0;
545
+ allocated_pool->start = curr;
546
+ break;
547
+ }
548
+ last = curr;
549
+ curr = curr->next;
550
+ }
551
+ }
552
+ }
553
+ }
554
+
555
+ /**
556
+ * Register a single VALUE as in use to avoid garbage collection.
557
+ */
558
+ void nm_register_value(VALUE& val) {
559
+ nm_register_values(&val, 1);
560
+ }
561
+
562
+ /**
563
+ * Unregister a single VALUE to allow normal garbage collection.
564
+ */
565
+ void nm_unregister_value(VALUE& val) {
566
+ nm_unregister_values(&val, 1);
567
+ }
568
+
569
+ /**
570
+ * Removes all instances of a single VALUE in the gc list. This can be
571
+ * dangerous. Primarily used when something is about to be
572
+ * freed and replaced so that and residual registrations won't access after
573
+ * free.
574
+ **/
575
+ void nm_completely_unregister_value(VALUE& val) {
576
+ if (gc_value_holder_struct) {
577
+ nm_gc_ll_node* curr = gc_value_holder_struct->start;
578
+ nm_gc_ll_node* last = NULL;
579
+ while (curr) {
580
+ if (curr->val == &val) {
581
+ if (last) {
582
+ last->next = curr->next;
583
+ } else {
584
+ gc_value_holder_struct->start = curr->next;
585
+ }
586
+ nm_gc_ll_node* temp_next = curr->next;
587
+ curr->next = allocated_pool->start;
588
+ curr->val = NULL;
589
+ curr->n = 0;
590
+ allocated_pool->start = curr;
591
+ curr = temp_next;
592
+ } else {
593
+ last = curr;
594
+ curr = curr->next;
595
+ }
403
596
  }
404
597
  }
405
598
  }
406
599
 
600
+
601
+
602
+ /**
603
+ * Register a STORAGE struct of the supplied stype to avoid garbage collection
604
+ * of its internals.
605
+ *
606
+ * Delegates to the storage-specific methods. They will check dtype and ignore
607
+ * non-rubyobject dtypes, so it's safe to pass any storage in.
608
+ */
609
+ void nm_register_storage(nm::stype_t stype, const STORAGE* storage) {
610
+ STYPE_REGISTER_TABLE(ttable);
611
+ ttable[stype](storage);
612
+ }
613
+
614
+ /**
615
+ * Unregister a STORAGE struct of the supplied stype to allow normal garbage collection
616
+ * of its internals.
617
+ *
618
+ * Delegates to the storage-specific methods. They will check dtype and ignore
619
+ * non-rubyobject dtypes, so it's safe to pass any storage in.
620
+ *
621
+ */
622
+ void nm_unregister_storage(nm::stype_t stype, const STORAGE* storage) {
623
+ STYPE_UNREGISTER_TABLE(ttable);
624
+ ttable[stype](storage);
625
+ }
626
+
627
+ /**
628
+ * Registers an NMATRIX struct to avoid garbage collection of its internals.
629
+ */
630
+ void nm_register_nmatrix(NMATRIX* nmatrix) {
631
+ if (nmatrix)
632
+ nm_register_storage(nmatrix->stype, nmatrix->storage);
633
+ }
634
+
635
+ /**
636
+ * Unregisters an NMATRIX struct to avoid garbage collection of its internals.
637
+ */
638
+ void nm_unregister_nmatrix(NMATRIX* nmatrix) {
639
+ if (nmatrix)
640
+ nm_unregister_storage(nmatrix->stype, nmatrix->storage);
641
+ }
642
+
407
643
  /*
408
644
  * call-seq:
409
645
  * dtype -> Symbol
@@ -427,7 +663,6 @@ static VALUE nm_dtype(VALUE self) {
427
663
  * This is a singleton method on NMatrix, e.g., NMatrix.upcast(:int32, :int64)
428
664
  */
429
665
  static VALUE nm_upcast(VALUE self, VALUE t1, VALUE t2) {
430
-
431
666
  nm::dtype_t d1 = nm_dtype_from_rbsymbol(t1),
432
667
  d2 = nm_dtype_from_rbsymbol(t2);
433
668
 
@@ -462,18 +697,26 @@ static VALUE nm_default_value(VALUE self) {
462
697
  * Iterate over all entries of any matrix in standard storage order (as with #each), and include the indices.
463
698
  */
464
699
  static VALUE nm_each_with_indices(VALUE nmatrix) {
465
- volatile VALUE nm = nmatrix;
700
+ NM_CONSERVATIVE(nm_register_value(nmatrix));
701
+ VALUE to_return = Qnil;
466
702
 
467
- switch(NM_STYPE(nm)) {
703
+ switch(NM_STYPE(nmatrix)) {
468
704
  case nm::YALE_STORE:
469
- return nm_yale_each_with_indices(nm);
705
+ to_return = nm_yale_each_with_indices(nmatrix);
706
+ break;
470
707
  case nm::DENSE_STORE:
471
- return nm_dense_each_with_indices(nm);
708
+ to_return = nm_dense_each_with_indices(nmatrix);
709
+ break;
472
710
  case nm::LIST_STORE:
473
- return nm_list_each_with_indices(nm, false);
711
+ to_return = nm_list_each_with_indices(nmatrix, false);
712
+ break;
474
713
  default:
714
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
475
715
  rb_raise(nm_eDataTypeError, "Not a proper storage type");
476
716
  }
717
+
718
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
719
+ return to_return;
477
720
  }
478
721
 
479
722
  /*
@@ -485,21 +728,60 @@ static VALUE nm_each_with_indices(VALUE nmatrix) {
485
728
  * i, j, ..., and the entry itself.
486
729
  */
487
730
  static VALUE nm_each_stored_with_indices(VALUE nmatrix) {
488
- volatile VALUE nm = nmatrix;
731
+ NM_CONSERVATIVE(nm_register_value(nmatrix));
732
+ VALUE to_return = Qnil;
489
733
 
490
- switch(NM_STYPE(nm)) {
734
+ switch(NM_STYPE(nmatrix)) {
491
735
  case nm::YALE_STORE:
492
- return nm_yale_each_stored_with_indices(nm);
736
+ to_return = nm_yale_each_stored_with_indices(nmatrix);
737
+ break;
493
738
  case nm::DENSE_STORE:
494
- return nm_dense_each_with_indices(nm);
739
+ to_return = nm_dense_each_with_indices(nmatrix);
740
+ break;
495
741
  case nm::LIST_STORE:
496
- return nm_list_each_with_indices(nm, true);
742
+ to_return = nm_list_each_with_indices(nmatrix, true);
743
+ break;
497
744
  default:
745
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
498
746
  rb_raise(nm_eDataTypeError, "Not a proper storage type");
499
747
  }
748
+
749
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
750
+ return to_return;
500
751
  }
501
752
 
502
753
 
754
+ /*
755
+ * call-seq:
756
+ * map_stored -> Enumerator
757
+ *
758
+ * Iterate over the stored entries of any matrix. For dense and yale, this iterates over non-zero
759
+ * entries; for list, this iterates over non-default entries. Yields dim+1 values for each entry:
760
+ * i, j, ..., and the entry itself.
761
+ */
762
+ static VALUE nm_map_stored(VALUE nmatrix) {
763
+ NM_CONSERVATIVE(nm_register_value(nmatrix));
764
+ VALUE to_return = Qnil;
765
+
766
+ switch(NM_STYPE(nmatrix)) {
767
+ case nm::YALE_STORE:
768
+ to_return = nm_yale_map_stored(nmatrix);
769
+ break;
770
+ case nm::DENSE_STORE:
771
+ to_return = nm_dense_map(nmatrix);
772
+ break;
773
+ case nm::LIST_STORE:
774
+ to_return = nm_list_map_stored(nmatrix, Qnil);
775
+ break;
776
+ default:
777
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
778
+ rb_raise(nm_eDataTypeError, "Not a proper storage type");
779
+ }
780
+
781
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
782
+ return to_return;
783
+ }
784
+
503
785
  /*
504
786
  * call-seq:
505
787
  * each_ordered_stored_with_indices -> Enumerator
@@ -508,18 +790,26 @@ static VALUE nm_each_stored_with_indices(VALUE nmatrix) {
508
790
  * than storage ordering, which only matters if your matrix is Yale.
509
791
  */
510
792
  static VALUE nm_each_ordered_stored_with_indices(VALUE nmatrix) {
511
- volatile VALUE nm = nmatrix;
793
+ NM_CONSERVATIVE(nm_register_value(nmatrix));
794
+ VALUE to_return = Qnil;
512
795
 
513
- switch(NM_STYPE(nm)) {
796
+ switch(NM_STYPE(nmatrix)) {
514
797
  case nm::YALE_STORE:
515
- return nm_yale_each_ordered_stored_with_indices(nm);
798
+ to_return = nm_yale_each_ordered_stored_with_indices(nmatrix);
799
+ break;
516
800
  case nm::DENSE_STORE:
517
- return nm_dense_each_with_indices(nm);
801
+ to_return = nm_dense_each_with_indices(nmatrix);
802
+ break;
518
803
  case nm::LIST_STORE:
519
- return nm_list_each_with_indices(nm, true);
804
+ to_return = nm_list_each_with_indices(nmatrix, true);
805
+ break;
520
806
  default:
807
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
521
808
  rb_raise(nm_eDataTypeError, "Not a proper storage type");
522
809
  }
810
+
811
+ NM_CONSERVATIVE(nm_unregister_value(nmatrix));
812
+ return to_return;
523
813
  }
524
814
 
525
815
 
@@ -530,8 +820,13 @@ static VALUE nm_each_ordered_stored_with_indices(VALUE nmatrix) {
530
820
  * For elementwise, use =~ instead.
531
821
  *
532
822
  * This method will raise an exception if dimensions do not match.
823
+ *
824
+ * When stypes differ, this function calls a protected Ruby method.
533
825
  */
534
826
  static VALUE nm_eqeq(VALUE left, VALUE right) {
827
+ NM_CONSERVATIVE(nm_register_value(left));
828
+ NM_CONSERVATIVE(nm_register_value(right));
829
+
535
830
  NMATRIX *l, *r;
536
831
 
537
832
  CheckNMatrixType(left);
@@ -540,23 +835,35 @@ static VALUE nm_eqeq(VALUE left, VALUE right) {
540
835
  UnwrapNMatrix(left, l);
541
836
  UnwrapNMatrix(right, r);
542
837
 
543
- if (l->stype != r->stype)
544
- rb_raise(rb_eNotImpError, "comparison between different matrix stypes not yet implemented");
545
-
546
838
  bool result = false;
547
839
 
548
- switch(l->stype) {
549
- case nm::DENSE_STORE:
550
- result = nm_dense_storage_eqeq(l->storage, r->storage);
551
- break;
552
- case nm::LIST_STORE:
553
- result = nm_list_storage_eqeq(l->storage, r->storage);
554
- break;
555
- case nm::YALE_STORE:
556
- result = nm_yale_storage_eqeq(l->storage, r->storage);
557
- break;
840
+ if (l->stype != r->stype) { // DIFFERENT STYPES
841
+
842
+ if (l->stype == nm::DENSE_STORE)
843
+ result = rb_funcall(left, rb_intern("dense_eql_sparse?"), 1, right);
844
+ else if (r->stype == nm::DENSE_STORE)
845
+ result = rb_funcall(right, rb_intern("dense_eql_sparse?"), 1, left);
846
+ else
847
+ result = rb_funcall(left, rb_intern("sparse_eql_sparse?"), 1, right);
848
+
849
+ } else {
850
+
851
+ switch(l->stype) { // SAME STYPES
852
+ case nm::DENSE_STORE:
853
+ result = nm_dense_storage_eqeq(l->storage, r->storage);
854
+ break;
855
+ case nm::LIST_STORE:
856
+ result = nm_list_storage_eqeq(l->storage, r->storage);
857
+ break;
858
+ case nm::YALE_STORE:
859
+ result = nm_yale_storage_eqeq(l->storage, r->storage);
860
+ break;
861
+ }
558
862
  }
559
863
 
864
+ NM_CONSERVATIVE(nm_unregister_value(left));
865
+ NM_CONSERVATIVE(nm_unregister_value(right));
866
+
560
867
  return result ? Qtrue : Qfalse;
561
868
  }
562
869
 
@@ -573,6 +880,60 @@ DEF_ELEMENTWISE_RUBY_ACCESSOR(GEQ, geq)
573
880
  DEF_ELEMENTWISE_RUBY_ACCESSOR(LT, lt)
574
881
  DEF_ELEMENTWISE_RUBY_ACCESSOR(GT, gt)
575
882
 
883
+ DEF_UNARY_RUBY_ACCESSOR(SIN, sin)
884
+ DEF_UNARY_RUBY_ACCESSOR(COS, cos)
885
+ DEF_UNARY_RUBY_ACCESSOR(TAN, tan)
886
+ DEF_UNARY_RUBY_ACCESSOR(ASIN, asin)
887
+ DEF_UNARY_RUBY_ACCESSOR(ACOS, acos)
888
+ DEF_UNARY_RUBY_ACCESSOR(ATAN, atan)
889
+ DEF_UNARY_RUBY_ACCESSOR(SINH, sinh)
890
+ DEF_UNARY_RUBY_ACCESSOR(COSH, cosh)
891
+ DEF_UNARY_RUBY_ACCESSOR(TANH, tanh)
892
+ DEF_UNARY_RUBY_ACCESSOR(ASINH, asinh)
893
+ DEF_UNARY_RUBY_ACCESSOR(ACOSH, acosh)
894
+ DEF_UNARY_RUBY_ACCESSOR(ATANH, atanh)
895
+ DEF_UNARY_RUBY_ACCESSOR(EXP, exp)
896
+ DEF_UNARY_RUBY_ACCESSOR(LOG2, log2)
897
+ DEF_UNARY_RUBY_ACCESSOR(LOG10, log10)
898
+ DEF_UNARY_RUBY_ACCESSOR(SQRT, sqrt)
899
+ DEF_UNARY_RUBY_ACCESSOR(ERF, erf)
900
+ DEF_UNARY_RUBY_ACCESSOR(ERFC, erfc)
901
+ DEF_UNARY_RUBY_ACCESSOR(CBRT, cbrt)
902
+ DEF_UNARY_RUBY_ACCESSOR(GAMMA, gamma)
903
+
904
+ DEF_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(ATAN2, atan2)
905
+ DEF_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(LDEXP, ldexp)
906
+ DEF_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(HYPOT, hypot)
907
+
908
+ static VALUE nm_unary_log(int argc, VALUE* argv, VALUE self) {
909
+ NM_CONSERVATIVE(nm_register_values(argv, argc));
910
+ const double default_log_base = exp(1.0);
911
+ NMATRIX* left;
912
+ UnwrapNMatrix(self, left);
913
+ std::string sym;
914
+
915
+ switch(left->stype) {
916
+ case nm::DENSE_STORE:
917
+ sym = "__dense_unary_log__";
918
+ break;
919
+ case nm::YALE_STORE:
920
+ sym = "__yale_unary_log__";
921
+ break;
922
+ case nm::LIST_STORE:
923
+ sym = "__list_unary_log__";
924
+ break;
925
+ }
926
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
927
+ if (argc > 0) { //supplied a base
928
+ return rb_funcall(self, rb_intern(sym.c_str()), 1, argv[0]);
929
+ }
930
+ return rb_funcall(self, rb_intern(sym.c_str()), 1, nm::RubyObject(default_log_base).rval);
931
+ }
932
+
933
+ //DEF_ELEMENTWISE_RUBY_ACCESSOR(ATAN2, atan2)
934
+ //DEF_ELEMENTWISE_RUBY_ACCESSOR(LDEXP, ldexp)
935
+ //DEF_ELEMENTWISE_RUBY_ACCESSOR(HYPOT, hypot)
936
+
576
937
  /*
577
938
  * call-seq:
578
939
  * hermitian? -> Boolean
@@ -598,6 +959,7 @@ static VALUE nm_hermitian(VALUE self) {
598
959
  * Bang should imply that no copy is being made, even temporarily.
599
960
  */
600
961
  static VALUE nm_complex_conjugate_bang(VALUE self) {
962
+
601
963
  NMATRIX* m;
602
964
  void* elem;
603
965
  size_t size, p;
@@ -643,11 +1005,13 @@ static VALUE nm_complex_conjugate_bang(VALUE self) {
643
1005
  * need to worry about deleting it.
644
1006
  */
645
1007
  NMATRIX* nm_create(nm::stype_t stype, STORAGE* storage) {
646
- NMATRIX* mat = ALLOC(NMATRIX);
1008
+ nm_register_storage(stype, storage);
1009
+ NMATRIX* mat = NM_ALLOC(NMATRIX);
647
1010
 
648
1011
  mat->stype = stype;
649
1012
  mat->storage = storage;
650
1013
 
1014
+ nm_unregister_storage(stype, storage);
651
1015
  return mat;
652
1016
  }
653
1017
 
@@ -655,6 +1019,8 @@ NMATRIX* nm_create(nm::stype_t stype, STORAGE* storage) {
655
1019
  * @see nm_init
656
1020
  */
657
1021
  static VALUE nm_init_new_version(int argc, VALUE* argv, VALUE self) {
1022
+ NM_CONSERVATIVE(nm_register_values(argv, argc));
1023
+ NM_CONSERVATIVE(nm_register_value(self));
658
1024
  VALUE shape_ary, initial_ary, hash;
659
1025
  //VALUE shape_ary, default_val, capacity, initial_ary, dtype_sym, stype_sym;
660
1026
  // Mandatory args: shape, dtype, stype
@@ -676,7 +1042,9 @@ static VALUE nm_init_new_version(int argc, VALUE* argv, VALUE self) {
676
1042
  }
677
1043
  }
678
1044
  #endif
679
-
1045
+ NM_CONSERVATIVE(nm_register_value(shape_ary));
1046
+ NM_CONSERVATIVE(nm_register_value(initial_ary));
1047
+ NM_CONSERVATIVE(nm_register_value(hash));
680
1048
  // Get the shape.
681
1049
  size_t dim;
682
1050
  size_t* shape = interpret_shape(shape_ary, &dim);
@@ -692,7 +1060,9 @@ static VALUE nm_init_new_version(int argc, VALUE* argv, VALUE self) {
692
1060
  dtype_sym = rb_hash_aref(hash, ID2SYM(nm_rb_dtype));
693
1061
  stype_sym = rb_hash_aref(hash, ID2SYM(nm_rb_stype));
694
1062
  capacity_num = rb_hash_aref(hash, ID2SYM(nm_rb_capacity));
1063
+ NM_CONSERVATIVE(nm_register_value(capacity_num));
695
1064
  default_val_num = rb_hash_aref(hash, ID2SYM(nm_rb_default));
1065
+ NM_CONSERVATIVE(nm_register_value(default_val_num));
696
1066
  }
697
1067
 
698
1068
  // stype ||= :dense
@@ -724,6 +1094,10 @@ static VALUE nm_init_new_version(int argc, VALUE* argv, VALUE self) {
724
1094
  init = RARRAY_LEN(initial_ary) == 1 ? rubyobj_to_cval(rb_ary_entry(initial_ary, 0), dtype) : NULL;
725
1095
  else
726
1096
  init = rubyobj_to_cval(initial_ary, dtype);
1097
+
1098
+ if (dtype == nm::RUBYOBJ) {
1099
+ nm_register_values(reinterpret_cast<VALUE*>(init), 1);
1100
+ }
727
1101
  }
728
1102
 
729
1103
  // capacity = h[:capacity] || 0
@@ -732,47 +1106,55 @@ static VALUE nm_init_new_version(int argc, VALUE* argv, VALUE self) {
732
1106
  }
733
1107
 
734
1108
  if (!NIL_P(initial_ary)) {
735
- v = interpret_initial_value(initial_ary, dtype);
736
-
1109
+
737
1110
  if (TYPE(initial_ary) == T_ARRAY) v_size = RARRAY_LEN(initial_ary);
738
1111
  else v_size = 1;
1112
+
1113
+ v = interpret_initial_value(initial_ary, dtype);
1114
+
1115
+ if (dtype == nm::RUBYOBJ) {
1116
+ nm_register_values(reinterpret_cast<VALUE*>(v), v_size);
1117
+ }
739
1118
  }
740
1119
 
741
1120
  // :object matrices MUST be initialized.
742
1121
  else if (stype == nm::DENSE_STORE && dtype == nm::RUBYOBJ) {
743
1122
  // Pretend [nil] was passed for RUBYOBJ.
744
- v = ALLOC(VALUE);
1123
+ v = NM_ALLOC(VALUE);
745
1124
  *(VALUE*)v = Qnil;
746
1125
 
747
1126
  v_size = 1;
748
1127
 
749
1128
  }
750
1129
 
751
- NMATRIX* nmatrix;
1130
+ NMATRIX* nmatrix;
752
1131
  UnwrapNMatrix(self, nmatrix);
753
1132
 
754
1133
  nmatrix->stype = stype;
755
1134
 
756
1135
  switch (stype) {
757
- case nm::DENSE_STORE:
758
- nmatrix->storage = (STORAGE*)nm_dense_storage_create(dtype, shape, dim, v, v_size);
759
- break;
1136
+ case nm::DENSE_STORE:
1137
+ nmatrix->storage = (STORAGE*)nm_dense_storage_create(dtype, shape, dim, v, v_size);
1138
+ break;
760
1139
 
761
- case nm::LIST_STORE:
762
- nmatrix->storage = (STORAGE*)nm_list_storage_create(dtype, shape, dim, init);
763
- break;
1140
+ case nm::LIST_STORE:
1141
+ nmatrix->storage = (STORAGE*)nm_list_storage_create(dtype, shape, dim, init);
1142
+ break;
764
1143
 
765
- case nm::YALE_STORE:
766
- nmatrix->storage = (STORAGE*)nm_yale_storage_create(dtype, shape, dim, capacity);
767
- nm_yale_storage_init((YALE_STORAGE*)(nmatrix->storage), init);
768
- break;
1144
+ case nm::YALE_STORE:
1145
+ nmatrix->storage = (STORAGE*)nm_yale_storage_create(dtype, shape, dim, capacity);
1146
+ nm_yale_storage_init((YALE_STORAGE*)(nmatrix->storage), init);
1147
+ break;
769
1148
  }
770
1149
 
1150
+ nm_register_storage(stype, nmatrix->storage);
1151
+
771
1152
  // If we're not creating a dense, and an initial array was provided, use that and multi-slice-set
772
1153
  // to set the contents of the matrix right now.
773
1154
  if (stype != nm::DENSE_STORE && v_size > 1) {
774
- VALUE* slice_argv = ALLOCA_N(VALUE, dim);
775
- size_t* tmp_shape = ALLOC_N(size_t, dim);
1155
+ VALUE* slice_argv = NM_ALLOCA_N(VALUE, dim);
1156
+ nm_register_values(slice_argv, dim);
1157
+ size_t* tmp_shape = NM_ALLOC_N(size_t, dim);
776
1158
  for (size_t m = 0; m < dim; ++m) {
777
1159
  slice_argv[m] = ID2SYM(nm_rb_mul); // :* -- full range
778
1160
  tmp_shape[m] = shape[m];
@@ -780,20 +1162,47 @@ static VALUE nm_init_new_version(int argc, VALUE* argv, VALUE self) {
780
1162
 
781
1163
  SLICE* slice = get_slice(dim, dim, slice_argv, shape);
782
1164
  // Create a temporary dense matrix and use it to do a slice assignment on self.
783
- NMATRIX* tmp = nm_create(nm::DENSE_STORE, (STORAGE*)nm_dense_storage_create(dtype, tmp_shape, dim, v, v_size));
784
- volatile VALUE rb_tmp = Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, tmp);
1165
+ NMATRIX* tmp = nm_create(nm::DENSE_STORE, (STORAGE*)nm_dense_storage_create(dtype, tmp_shape, dim, v, v_size));
1166
+ nm_register_nmatrix(tmp);
1167
+ VALUE rb_tmp = Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, tmp);
1168
+ nm_unregister_nmatrix(tmp);
1169
+ nm_register_value(rb_tmp);
785
1170
  if (stype == nm::YALE_STORE) nm_yale_storage_set(self, slice, rb_tmp);
786
1171
  else nm_list_storage_set(self, slice, rb_tmp);
787
1172
 
788
1173
  free_slice(slice);
789
1174
 
790
1175
  // We need to free v if it's not the same size as tmp -- because tmp will have made a copy instead.
791
- if (nm_storage_count_max_elements(tmp->storage) != v_size)
792
- xfree(v);
1176
+ //if (nm_storage_count_max_elements(tmp->storage) != v_size)
1177
+ // NM_FREE(v);
793
1178
 
794
1179
  // nm_delete(tmp); // This seems to enrage the garbage collector (because rb_tmp is still available). It'd be better if we could force it to free immediately, but no sweat.
1180
+
1181
+ nm_unregister_value(rb_tmp);
1182
+ nm_unregister_values(slice_argv, dim);
1183
+ }
1184
+
1185
+ if (!NIL_P(initial_ary) && dtype == nm::RUBYOBJ) {
1186
+ nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);
795
1187
  }
796
1188
 
1189
+ if (stype != nm::DENSE_STORE && dtype == nm::RUBYOBJ) {
1190
+ nm_unregister_values(reinterpret_cast<VALUE*>(init), 1);
1191
+ }
1192
+
1193
+ if (!NIL_P(hash)) {
1194
+ NM_CONSERVATIVE(nm_unregister_value(capacity_num));
1195
+ NM_CONSERVATIVE(nm_unregister_value(default_val_num));
1196
+ }
1197
+
1198
+ NM_CONSERVATIVE(nm_unregister_value(shape_ary));
1199
+ NM_CONSERVATIVE(nm_unregister_value(initial_ary));
1200
+ NM_CONSERVATIVE(nm_unregister_value(hash));
1201
+
1202
+ NM_CONSERVATIVE(nm_unregister_value(self));
1203
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1204
+ nm_unregister_storage(stype, nmatrix->storage);
1205
+
797
1206
  return self;
798
1207
  }
799
1208
 
@@ -834,8 +1243,12 @@ static VALUE nm_init_new_version(int argc, VALUE* argv, VALUE self) {
834
1243
  * shortcuts.rb.
835
1244
  */
836
1245
  static VALUE nm_init(int argc, VALUE* argv, VALUE nm) {
837
-
1246
+ NM_CONSERVATIVE(nm_register_value(nm));
1247
+ NM_CONSERVATIVE(nm_register_values(argv, argc));
1248
+
838
1249
  if (argc <= 3) { // Call the new constructor unless all four arguments are given (or the 7-arg version is given)
1250
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1251
+ NM_CONSERVATIVE(nm_unregister_value(nm));
839
1252
  return nm_init_new_version(argc, argv, nm);
840
1253
  }
841
1254
 
@@ -854,16 +1267,20 @@ static VALUE nm_init(int argc, VALUE* argv, VALUE nm) {
854
1267
 
855
1268
  // If there are 7 arguments and Yale, refer to a different init function with fewer sanity checks.
856
1269
  if (argc == 7) {
857
- if (stype == nm::YALE_STORE) {
858
- return nm_init_yale_from_old_yale(argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], nm);
1270
+ if (stype == nm::YALE_STORE) {
1271
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1272
+ NM_CONSERVATIVE(nm_unregister_value(nm));
1273
+ return nm_init_yale_from_old_yale(argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], nm);
859
1274
 
860
- } else {
861
- rb_raise(rb_eArgError, "Expected 2-4 arguments (or 7 for internal Yale creation)");
862
- }
1275
+ } else {
1276
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1277
+ NM_CONSERVATIVE(nm_unregister_value(nm));
1278
+ rb_raise(rb_eArgError, "Expected 2-4 arguments (or 7 for internal Yale creation)");
1279
+ }
863
1280
  }
864
1281
 
865
- // 1: Array or Fixnum
866
- size_t dim;
1282
+ // 1: Array or Fixnum
1283
+ size_t dim;
867
1284
  size_t* shape = interpret_shape(argv[offset], &dim);
868
1285
 
869
1286
  // 2-3: dtype
@@ -895,7 +1312,7 @@ static VALUE nm_init(int argc, VALUE* argv, VALUE nm) {
895
1312
  */
896
1313
  if (dtype == nm::RUBYOBJ) {
897
1314
  // Pretend [nil] was passed for RUBYOBJ.
898
- init_val = ALLOC(VALUE);
1315
+ init_val = NM_ALLOC(VALUE);
899
1316
  *(VALUE*)init_val = Qnil;
900
1317
 
901
1318
  init_val_len = 1;
@@ -904,32 +1321,43 @@ static VALUE nm_init(int argc, VALUE* argv, VALUE nm) {
904
1321
  init_val = NULL;
905
1322
  }
906
1323
  } else if (stype == nm::LIST_STORE) {
907
- init_val = ALLOC_N(char, DTYPE_SIZES[dtype]);
1324
+ init_val = NM_ALLOC_N(char, DTYPE_SIZES[dtype]);
908
1325
  std::memset(init_val, 0, DTYPE_SIZES[dtype]);
909
1326
  }
910
1327
  }
911
1328
 
1329
+ if (dtype == nm::RUBYOBJ) {
1330
+ nm_register_values(reinterpret_cast<VALUE*>(init_val), init_val_len);
1331
+ }
1332
+
912
1333
  // TODO: Update to allow an array as the initial value.
913
- NMATRIX* nmatrix;
1334
+ NMATRIX* nmatrix;
914
1335
  UnwrapNMatrix(nm, nmatrix);
915
1336
 
916
1337
  nmatrix->stype = stype;
917
1338
 
918
1339
  switch (stype) {
919
- case nm::DENSE_STORE:
920
- nmatrix->storage = (STORAGE*)nm_dense_storage_create(dtype, shape, dim, init_val, init_val_len);
921
- break;
1340
+ case nm::DENSE_STORE:
1341
+ nmatrix->storage = (STORAGE*)nm_dense_storage_create(dtype, shape, dim, init_val, init_val_len);
1342
+ break;
922
1343
 
923
- case nm::LIST_STORE:
924
- nmatrix->storage = (STORAGE*)nm_list_storage_create(dtype, shape, dim, init_val);
925
- break;
1344
+ case nm::LIST_STORE:
1345
+ nmatrix->storage = (STORAGE*)nm_list_storage_create(dtype, shape, dim, init_val);
1346
+ break;
926
1347
 
927
- case nm::YALE_STORE:
928
- nmatrix->storage = (STORAGE*)nm_yale_storage_create(dtype, shape, dim, init_cap);
929
- nm_yale_storage_init((YALE_STORAGE*)(nmatrix->storage), NULL);
930
- break;
1348
+ case nm::YALE_STORE:
1349
+ nmatrix->storage = (STORAGE*)nm_yale_storage_create(dtype, shape, dim, init_cap);
1350
+ nm_yale_storage_init((YALE_STORAGE*)(nmatrix->storage), NULL);
1351
+ break;
1352
+ }
1353
+
1354
+ if (dtype == nm::RUBYOBJ) {
1355
+ nm_unregister_values(reinterpret_cast<VALUE*>(init_val), init_val_len);
931
1356
  }
932
1357
 
1358
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1359
+ NM_CONSERVATIVE(nm_unregister_value(nm));
1360
+
933
1361
  return nm;
934
1362
  }
935
1363
 
@@ -938,13 +1366,18 @@ static VALUE nm_init(int argc, VALUE* argv, VALUE nm) {
938
1366
  * Helper for nm_cast which uses the C types instead of the Ruby objects. Called by nm_cast.
939
1367
  */
940
1368
  NMATRIX* nm_cast_with_ctype_args(NMATRIX* self, nm::stype_t new_stype, nm::dtype_t new_dtype, void* init_ptr) {
941
- NMATRIX* lhs = ALLOC(NMATRIX);
1369
+
1370
+ nm_register_nmatrix(self);
1371
+
1372
+ NMATRIX* lhs = NM_ALLOC(NMATRIX);
942
1373
  lhs->stype = new_stype;
943
1374
 
944
1375
  // Copy the storage
945
1376
  CAST_TABLE(cast_copy);
946
1377
  lhs->storage = cast_copy[lhs->stype][self->stype](self->storage, new_dtype, init_ptr);
947
1378
 
1379
+ nm_unregister_nmatrix(self);
1380
+
948
1381
  return lhs;
949
1382
  }
950
1383
 
@@ -957,6 +1390,9 @@ NMATRIX* nm_cast_with_ctype_args(NMATRIX* self, nm::stype_t new_stype, nm::dtype
957
1390
  * Copy constructor for changing dtypes and stypes.
958
1391
  */
959
1392
  VALUE nm_cast(VALUE self, VALUE new_stype_symbol, VALUE new_dtype_symbol, VALUE init) {
1393
+ NM_CONSERVATIVE(nm_register_value(self));
1394
+ NM_CONSERVATIVE(nm_register_value(init));
1395
+
960
1396
  nm::dtype_t new_dtype = nm_dtype_from_rbsymbol(new_dtype_symbol);
961
1397
  nm::stype_t new_stype = nm_stype_from_rbsymbol(new_stype_symbol);
962
1398
 
@@ -965,16 +1401,27 @@ VALUE nm_cast(VALUE self, VALUE new_stype_symbol, VALUE new_dtype_symbol, VALUE
965
1401
 
966
1402
  UnwrapNMatrix( self, rhs );
967
1403
 
968
- void* init_ptr = ALLOCA_N(char, DTYPE_SIZES[new_dtype]);
1404
+ void* init_ptr = NM_ALLOCA_N(char, DTYPE_SIZES[new_dtype]);
969
1405
  rubyval_to_cval(init, new_dtype, init_ptr);
970
1406
 
971
- return Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, nm_cast_with_ctype_args(rhs, new_stype, new_dtype, init_ptr));
1407
+ NMATRIX* m = nm_cast_with_ctype_args(rhs, new_stype, new_dtype, init_ptr);
1408
+ nm_register_nmatrix(m);
1409
+
1410
+ VALUE to_return = Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, m);
1411
+
1412
+ nm_unregister_nmatrix(m);
1413
+ NM_CONSERVATIVE(nm_unregister_value(self));
1414
+ NM_CONSERVATIVE(nm_unregister_value(init));
1415
+ return to_return;
1416
+
972
1417
  }
973
1418
 
974
1419
  /*
975
1420
  * Copy constructor for transposing.
976
1421
  */
977
1422
  static VALUE nm_init_transposed(VALUE self) {
1423
+ NM_CONSERVATIVE(nm_register_value(self));
1424
+
978
1425
  static STORAGE* (*storage_copy_transposed[nm::NUM_STYPES])(const STORAGE* rhs_base) = {
979
1426
  nm_dense_storage_copy_transposed,
980
1427
  nm_list_storage_copy_transposed,
@@ -984,19 +1431,30 @@ static VALUE nm_init_transposed(VALUE self) {
984
1431
  NMATRIX* lhs = nm_create( NM_STYPE(self),
985
1432
  storage_copy_transposed[NM_STYPE(self)]( NM_STORAGE(self) )
986
1433
  );
1434
+ nm_register_nmatrix(lhs);
1435
+ VALUE to_return = Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, lhs);
987
1436
 
988
- return Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, lhs);
1437
+ nm_unregister_nmatrix(lhs);
1438
+ NM_CONSERVATIVE(nm_unregister_value(self));
1439
+ return to_return;
989
1440
  }
990
1441
 
991
1442
  /*
992
1443
  * Copy constructor for no change of dtype or stype (used for #initialize_copy hook).
993
1444
  */
994
1445
  static VALUE nm_init_copy(VALUE copy, VALUE original) {
1446
+ NM_CONSERVATIVE(nm_register_value(copy));
1447
+ NM_CONSERVATIVE(nm_register_value(original));
1448
+
995
1449
  NMATRIX *lhs, *rhs;
996
1450
 
997
1451
  CheckNMatrixType(original);
998
1452
 
999
- if (copy == original) return copy;
1453
+ if (copy == original) {
1454
+ NM_CONSERVATIVE(nm_unregister_value(copy));
1455
+ NM_CONSERVATIVE(nm_unregister_value(original));
1456
+ return copy;
1457
+ }
1000
1458
 
1001
1459
  UnwrapNMatrix( original, rhs );
1002
1460
  UnwrapNMatrix( copy, lhs );
@@ -1007,25 +1465,24 @@ static VALUE nm_init_copy(VALUE copy, VALUE original) {
1007
1465
  CAST_TABLE(ttable);
1008
1466
  lhs->storage = ttable[lhs->stype][rhs->stype](rhs->storage, rhs->storage->dtype, NULL);
1009
1467
 
1468
+ NM_CONSERVATIVE(nm_unregister_value(copy));
1469
+ NM_CONSERVATIVE(nm_unregister_value(original));
1470
+
1010
1471
  return copy;
1011
1472
  }
1012
1473
 
1013
1474
  /*
1014
- * Get major, minor, and release components of NMatrix::VERSION. Store in function parameters.
1475
+ * Get major, minor, and release components of NMatrix::VERSION. Store in function parameters. Doesn't get
1476
+ * the "pre" field currently (beta1/rc1/etc).
1015
1477
  */
1016
1478
  static void get_version_info(uint16_t& major, uint16_t& minor, uint16_t& release) {
1017
1479
  // Get VERSION and split it on periods. Result is an Array.
1018
- VALUE version = rb_funcall(rb_const_get(cNMatrix, rb_intern("VERSION")), rb_intern("split"), 1, rb_str_new_cstr("."));
1019
- VALUE* ary = RARRAY_PTR(version); // major, minor, and release
1480
+ VALUE cVersion = rb_const_get(cNMatrix, rb_intern("VERSION"));
1020
1481
 
1021
1482
  // Convert each to an integer
1022
- VALUE maj = rb_funcall(ary[0], rb_intern("to_i"), 0);
1023
- VALUE min = rb_funcall(ary[1], rb_intern("to_i"), 0);
1024
- VALUE rel = rb_funcall(ary[2], rb_intern("to_i"), 0);
1025
-
1026
- major = static_cast<uint16_t>(nm::RubyObject(maj));
1027
- minor = static_cast<uint16_t>(nm::RubyObject(min));
1028
- release = static_cast<uint16_t>(nm::RubyObject(rel));
1483
+ major = FIX2INT(rb_const_get(cVersion, rb_intern("MAJOR")));
1484
+ minor = FIX2INT(rb_const_get(cVersion, rb_intern("MINOR")));
1485
+ release = FIX2INT(rb_const_get(cVersion, rb_intern("TINY")));
1029
1486
  }
1030
1487
 
1031
1488
 
@@ -1055,12 +1512,40 @@ static nm::symm_t interpret_symm(VALUE symm) {
1055
1512
 
1056
1513
 
1057
1514
  void read_padded_shape(std::ifstream& f, size_t dim, size_t* shape) {
1058
- nm::read_padded_shape(f, dim, shape);
1515
+ size_t bytes_read = 0;
1516
+
1517
+ // Read shape
1518
+ for (size_t i = 0; i < dim; ++i) {
1519
+ size_t s;
1520
+ f.read(reinterpret_cast<char*>(&s), sizeof(size_t));
1521
+ shape[i] = s;
1522
+
1523
+ bytes_read += sizeof(size_t);
1524
+ }
1525
+
1526
+ // Ignore padding
1527
+ f.ignore(bytes_read % 8);
1059
1528
  }
1060
1529
 
1061
1530
 
1062
1531
  void write_padded_shape(std::ofstream& f, size_t dim, size_t* shape) {
1063
- nm::write_padded_shape(f, dim, shape);
1532
+ size_t bytes_written = 0;
1533
+
1534
+ // Write shape
1535
+ for (size_t i = 0; i < dim; ++i) {
1536
+ size_t s = shape[i];
1537
+ f.write(reinterpret_cast<const char*>(&s), sizeof(size_t));
1538
+
1539
+ bytes_written += sizeof(size_t);
1540
+ }
1541
+
1542
+ // Pad with zeros
1543
+ size_t zero = 0;
1544
+ while (bytes_written % 8) {
1545
+ f.write(reinterpret_cast<const char*>(&zero), sizeof(size_t));
1546
+
1547
+ bytes_written += sizeof(IType);
1548
+ }
1064
1549
  }
1065
1550
 
1066
1551
 
@@ -1116,6 +1601,10 @@ static VALUE nm_write(int argc, VALUE* argv, VALUE self) {
1116
1601
  if (argc < 1 || argc > 2) {
1117
1602
  rb_raise(rb_eArgError, "Expected one or two arguments");
1118
1603
  }
1604
+
1605
+ NM_CONSERVATIVE(nm_register_values(argv, argc));
1606
+ NM_CONSERVATIVE(nm_register_value(self));
1607
+
1119
1608
  VALUE file = argv[0],
1120
1609
  symm = argc == 1 ? Qnil : argv[1];
1121
1610
 
@@ -1125,6 +1614,8 @@ static VALUE nm_write(int argc, VALUE* argv, VALUE self) {
1125
1614
  nm::symm_t symm_ = interpret_symm(symm);
1126
1615
 
1127
1616
  if (nmatrix->storage->dtype == nm::RUBYOBJ) {
1617
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1618
+ NM_CONSERVATIVE(nm_unregister_value(self));
1128
1619
  rb_raise(rb_eNotImpError, "Ruby Object writing is not implemented yet");
1129
1620
  }
1130
1621
 
@@ -1137,8 +1628,15 @@ static VALUE nm_write(int argc, VALUE* argv, VALUE self) {
1137
1628
  //FIXME: Cast the matrix to the smallest possible index type. Write that in the place of IType.
1138
1629
 
1139
1630
  // Check arguments before starting to write.
1140
- if (nmatrix->stype == nm::LIST_STORE) rb_raise(nm_eStorageTypeError, "cannot save list matrix; cast to yale or dense first");
1631
+ if (nmatrix->stype == nm::LIST_STORE) {
1632
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1633
+ NM_CONSERVATIVE(nm_unregister_value(self));
1634
+ rb_raise(nm_eStorageTypeError, "cannot save list matrix; cast to yale or dense first");
1635
+ }
1141
1636
  if (symm_ != nm::NONSYMM) {
1637
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1638
+ NM_CONSERVATIVE(nm_unregister_value(self));
1639
+
1142
1640
  if (dim != 2) rb_raise(rb_eArgError, "symmetry/triangularity not defined for a non-2D matrix");
1143
1641
  if (nmatrix->storage->shape[0] != nmatrix->storage->shape[1])
1144
1642
  rb_raise(rb_eArgError, "symmetry/triangularity not defined for a non-square matrix");
@@ -1185,6 +1683,9 @@ static VALUE nm_write(int argc, VALUE* argv, VALUE self) {
1185
1683
 
1186
1684
  f.close();
1187
1685
 
1686
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1687
+ NM_CONSERVATIVE(nm_unregister_value(self));
1688
+
1188
1689
  return Qtrue;
1189
1690
  }
1190
1691
 
@@ -1202,6 +1703,9 @@ static VALUE nm_write(int argc, VALUE* argv, VALUE self) {
1202
1703
  static VALUE nm_read(int argc, VALUE* argv, VALUE self) {
1203
1704
  using std::ifstream;
1204
1705
 
1706
+ NM_CONSERVATIVE(nm_register_values(argv, argc));
1707
+ NM_CONSERVATIVE(nm_register_value(self));
1708
+
1205
1709
  VALUE file, force_;
1206
1710
 
1207
1711
  // Read the arguments
@@ -1210,6 +1714,8 @@ static VALUE nm_read(int argc, VALUE* argv, VALUE self) {
1210
1714
 
1211
1715
 
1212
1716
  if (!RB_FILE_EXISTS(file)) { // FIXME: Errno::ENOENT
1717
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1718
+ NM_CONSERVATIVE(nm_unregister_value(self));
1213
1719
  rb_raise(rb_get_errno_exc("ENOENT"), "%s", RSTRING_PTR(file));
1214
1720
  }
1215
1721
 
@@ -1230,9 +1736,11 @@ static VALUE nm_read(int argc, VALUE* argv, VALUE self) {
1230
1736
  int ver = major * 10000 + minor * 100 + release,
1231
1737
  fver = fmajor * 10000 + fminor * 100 + release;
1232
1738
  if (fver > ver && force == false) {
1233
- rb_raise(rb_eIOError, "File was created in newer version of NMatrix than current");
1739
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1740
+ NM_CONSERVATIVE(nm_unregister_value(self));
1741
+ rb_raise(rb_eIOError, "File was created in newer version of NMatrix than current (%u.%u.%u)", fmajor, fminor, frelease);
1234
1742
  }
1235
- if (null16 != 0) fprintf(stderr, "Warning: Expected zero padding was not zero\n");
1743
+ if (null16 != 0) rb_warn("nm_read: Expected zero padding was not zero (0)\n");
1236
1744
 
1237
1745
  uint8_t dt, st, it, sm;
1238
1746
  uint16_t dim;
@@ -1245,19 +1753,20 @@ static VALUE nm_read(int argc, VALUE* argv, VALUE self) {
1245
1753
  f.read(reinterpret_cast<char*>(&null16), sizeof(uint16_t));
1246
1754
  f.read(reinterpret_cast<char*>(&dim), sizeof(uint16_t));
1247
1755
 
1248
- if (null16 != 0) fprintf(stderr, "Warning: Expected zero padding was not zero\n");
1756
+ if (null16 != 0) rb_warn("nm_read: Expected zero padding was not zero (1)");
1249
1757
  nm::stype_t stype = static_cast<nm::stype_t>(st);
1250
1758
  nm::dtype_t dtype = static_cast<nm::dtype_t>(dt);
1251
1759
  nm::symm_t symm = static_cast<nm::symm_t>(sm);
1252
1760
  //nm::itype_t itype = static_cast<nm::itype_t>(it);
1253
1761
 
1254
1762
  // READ NEXT FEW 64-BIT BLOCKS
1255
- size_t* shape = ALLOC_N(size_t, dim);
1763
+ size_t* shape = NM_ALLOC_N(size_t, dim);
1256
1764
  read_padded_shape(f, dim, shape);
1257
1765
 
1258
1766
  STORAGE* s;
1259
1767
  if (stype == nm::DENSE_STORE) {
1260
1768
  s = nm_dense_storage_create(dtype, shape, dim, NULL, 0);
1769
+ nm_register_storage(stype, s);
1261
1770
 
1262
1771
  read_padded_dense_elements(f, reinterpret_cast<DENSE_STORAGE*>(s), symm, dtype);
1263
1772
 
@@ -1270,8 +1779,12 @@ static VALUE nm_read(int argc, VALUE* argv, VALUE self) {
1270
1779
 
1271
1780
  s = nm_yale_storage_create(dtype, shape, dim, length); // set length as init capacity
1272
1781
 
1782
+ nm_register_storage(stype, s);
1783
+
1273
1784
  read_padded_yale_elements(f, reinterpret_cast<YALE_STORAGE*>(s), length, symm, dtype);
1274
1785
  } else {
1786
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1787
+ NM_CONSERVATIVE(nm_unregister_value(self));
1275
1788
  rb_raise(nm_eStorageTypeError, "please convert to yale or dense before saving");
1276
1789
  }
1277
1790
 
@@ -1280,10 +1793,18 @@ static VALUE nm_read(int argc, VALUE* argv, VALUE self) {
1280
1793
  // Return the appropriate matrix object (Ruby VALUE)
1281
1794
  // FIXME: This should probably return CLASS_OF(self) instead of cNMatrix, but I don't know how that works for
1282
1795
  // FIXME: class methods.
1796
+ nm_register_nmatrix(nm);
1797
+ VALUE to_return = Data_Wrap_Struct(cNMatrix, nm_mark, nm_delete, nm);
1798
+
1799
+ nm_unregister_nmatrix(nm);
1800
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1801
+ NM_CONSERVATIVE(nm_unregister_value(self));
1802
+ nm_unregister_storage(stype, s);
1803
+
1283
1804
  switch(stype) {
1284
1805
  case nm::DENSE_STORE:
1285
1806
  case nm::YALE_STORE:
1286
- return Data_Wrap_Struct(cNMatrix, nm_mark, nm_delete, nm);
1807
+ return to_return;
1287
1808
  default: // this case never occurs (due to earlier rb_raise)
1288
1809
  return Qnil;
1289
1810
  }
@@ -1321,7 +1842,7 @@ static VALUE nm_init_yale_from_old_yale(VALUE shape, VALUE dtype, VALUE ia, VALU
1321
1842
  */
1322
1843
  static VALUE nm_is_ref(VALUE self) {
1323
1844
  if (NM_SRC(self) == NM_STORAGE(self)) return Qfalse;
1324
- else return Qtrue;
1845
+ return Qtrue;
1325
1846
  }
1326
1847
 
1327
1848
  /*
@@ -1340,7 +1861,8 @@ static VALUE nm_mget(int argc, VALUE* argv, VALUE self) {
1340
1861
  nm_list_storage_get,
1341
1862
  nm_yale_storage_get
1342
1863
  };
1343
- return nm_xslice(argc, argv, ttable[NM_STYPE(self)], nm_delete, self);
1864
+ nm::stype_t stype = NM_STYPE(self);
1865
+ return nm_xslice(argc, argv, ttable[stype], nm_delete, self);
1344
1866
  }
1345
1867
 
1346
1868
  /*
@@ -1359,7 +1881,8 @@ static VALUE nm_mref(int argc, VALUE* argv, VALUE self) {
1359
1881
  nm_list_storage_ref,
1360
1882
  nm_yale_storage_ref
1361
1883
  };
1362
- return nm_xslice(argc, argv, ttable[NM_STYPE(self)], nm_delete_ref, self);
1884
+ nm::stype_t stype = NM_STYPE(self);
1885
+ return nm_xslice(argc, argv, ttable[stype], nm_delete_ref, self);
1363
1886
  }
1364
1887
 
1365
1888
  /*
@@ -1372,11 +1895,17 @@ static VALUE nm_mref(int argc, VALUE* argv, VALUE self) {
1372
1895
  * n[3,3] = n[2,3] = 5.0
1373
1896
  */
1374
1897
  static VALUE nm_mset(int argc, VALUE* argv, VALUE self) {
1898
+
1375
1899
  size_t dim = NM_DIM(self); // last arg is the value
1376
1900
 
1901
+ VALUE to_return = Qnil;
1902
+
1377
1903
  if ((size_t)(argc) > NM_DIM(self)+1) {
1378
- rb_raise(rb_eArgError, "wrong number of arguments (%d for %u)", argc, effective_dim(NM_STORAGE(self))+1);
1904
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for %lu)", argc, effective_dim(NM_STORAGE(self))+1);
1379
1905
  } else {
1906
+ NM_CONSERVATIVE(nm_register_value(self));
1907
+ NM_CONSERVATIVE(nm_register_values(argv, argc));
1908
+
1380
1909
  SLICE* slice = get_slice(dim, argc-1, argv, NM_STORAGE(self)->shape);
1381
1910
 
1382
1911
  static void (*ttable[nm::NUM_STYPES])(VALUE, SLICE*, VALUE) = {
@@ -1389,9 +1918,13 @@ static VALUE nm_mset(int argc, VALUE* argv, VALUE self) {
1389
1918
 
1390
1919
  free_slice(slice);
1391
1920
 
1392
- return argv[argc-1];
1921
+ to_return = argv[argc-1];
1922
+
1923
+ NM_CONSERVATIVE(nm_unregister_value(self));
1924
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1393
1925
  }
1394
- return Qnil;
1926
+
1927
+ return to_return;
1395
1928
  }
1396
1929
 
1397
1930
  /*
@@ -1402,30 +1935,50 @@ static VALUE nm_mset(int argc, VALUE* argv, VALUE self) {
1402
1935
  * The two matrices must be of the same stype (for now). If dtype differs, an upcast will occur.
1403
1936
  */
1404
1937
  static VALUE nm_multiply(VALUE left_v, VALUE right_v) {
1938
+ NM_CONSERVATIVE(nm_register_value(left_v));
1939
+ NM_CONSERVATIVE(nm_register_value(right_v));
1940
+
1405
1941
  NMATRIX *left, *right;
1406
1942
 
1407
1943
  UnwrapNMatrix( left_v, left );
1408
1944
 
1409
- if (NM_RUBYVAL_IS_NUMERIC(right_v))
1945
+ if (NM_RUBYVAL_IS_NUMERIC(right_v)) {
1946
+ NM_CONSERVATIVE(nm_unregister_value(left_v));
1947
+ NM_CONSERVATIVE(nm_unregister_value(right_v));
1410
1948
  return matrix_multiply_scalar(left, right_v);
1949
+ }
1411
1950
 
1412
- else if (TYPE(right_v) == T_ARRAY)
1951
+ else if (TYPE(right_v) == T_ARRAY) {
1952
+ NM_CONSERVATIVE(nm_unregister_value(left_v));
1953
+ NM_CONSERVATIVE(nm_unregister_value(right_v));
1413
1954
  rb_raise(rb_eNotImpError, "please convert array to nx1 or 1xn NMatrix first");
1955
+ }
1414
1956
 
1415
1957
  else { // both are matrices (probably)
1416
1958
  CheckNMatrixType(right_v);
1417
1959
  UnwrapNMatrix( right_v, right );
1418
1960
 
1419
- if (left->storage->shape[1] != right->storage->shape[0])
1961
+ if (left->storage->shape[1] != right->storage->shape[0]) {
1962
+ NM_CONSERVATIVE(nm_unregister_value(left_v));
1963
+ NM_CONSERVATIVE(nm_unregister_value(right_v));
1420
1964
  rb_raise(rb_eArgError, "incompatible dimensions");
1965
+ }
1421
1966
 
1422
- if (left->stype != right->stype)
1967
+ if (left->stype != right->stype) {
1968
+ NM_CONSERVATIVE(nm_unregister_value(left_v));
1969
+ NM_CONSERVATIVE(nm_unregister_value(right_v));
1423
1970
  rb_raise(rb_eNotImpError, "matrices must have same stype");
1971
+ }
1424
1972
 
1973
+ NM_CONSERVATIVE(nm_unregister_value(left_v));
1974
+ NM_CONSERVATIVE(nm_unregister_value(right_v));
1425
1975
  return matrix_multiply(left, right);
1426
1976
 
1427
1977
  }
1428
1978
 
1979
+ NM_CONSERVATIVE(nm_unregister_value(left_v));
1980
+ NM_CONSERVATIVE(nm_unregister_value(right_v));
1981
+
1429
1982
  return Qnil;
1430
1983
  }
1431
1984
 
@@ -1452,13 +2005,17 @@ static VALUE nm_dim(VALUE self) {
1452
2005
  * Get the shape (dimensions) of a matrix.
1453
2006
  */
1454
2007
  static VALUE nm_shape(VALUE self) {
2008
+ NM_CONSERVATIVE(nm_register_value(self));
1455
2009
  STORAGE* s = NM_STORAGE(self);
1456
2010
 
1457
2011
  // Copy elements into a VALUE array and then use those to create a Ruby array with rb_ary_new4.
1458
- VALUE* shape = ALLOCA_N(VALUE, s->dim);
2012
+ VALUE* shape = NM_ALLOCA_N(VALUE, s->dim);
2013
+ nm_register_values(shape, s->dim);
1459
2014
  for (size_t index = 0; index < s->dim; ++index)
1460
2015
  shape[index] = INT2FIX(s->shape[index]);
1461
-
2016
+
2017
+ nm_unregister_values(shape, s->dim);
2018
+ NM_CONSERVATIVE(nm_unregister_value(self));
1462
2019
  return rb_ary_new4(s->dim, shape);
1463
2020
  }
1464
2021
 
@@ -1470,13 +2027,17 @@ static VALUE nm_shape(VALUE self) {
1470
2027
  * Get the offset (slice position) of a matrix. Typically all zeros, unless you have a reference slice.
1471
2028
  */
1472
2029
  static VALUE nm_offset(VALUE self) {
2030
+ NM_CONSERVATIVE(nm_register_value(self));
1473
2031
  STORAGE* s = NM_STORAGE(self);
1474
2032
 
1475
2033
  // Copy elements into a VALUE array and then use those to create a Ruby array with rb_ary_new4.
1476
- VALUE* offset = ALLOCA_N(VALUE, s->dim);
2034
+ VALUE* offset = NM_ALLOCA_N(VALUE, s->dim);
2035
+ nm_register_values(offset, s->dim);
1477
2036
  for (size_t index = 0; index < s->dim; ++index)
1478
2037
  offset[index] = INT2FIX(s->offset[index]);
1479
2038
 
2039
+ nm_unregister_values(offset, s->dim);
2040
+ NM_CONSERVATIVE(nm_unregister_value(self));
1480
2041
  return rb_ary_new4(s->dim, offset);
1481
2042
  }
1482
2043
 
@@ -1490,13 +2051,20 @@ static VALUE nm_offset(VALUE self) {
1490
2051
  static VALUE nm_supershape(VALUE self) {
1491
2052
 
1492
2053
  STORAGE* s = NM_STORAGE(self);
1493
- if (s->src == s) return nm_shape(self); // easy case (not a slice)
2054
+ if (s->src == s) {
2055
+ return nm_shape(self); // easy case (not a slice)
2056
+ }
1494
2057
  else s = s->src;
1495
2058
 
1496
- VALUE* shape = ALLOCA_N(VALUE, s->dim);
2059
+ NM_CONSERVATIVE(nm_register_value(self));
2060
+
2061
+ VALUE* shape = NM_ALLOCA_N(VALUE, s->dim);
2062
+ nm_register_values(shape, s->dim);
1497
2063
  for (size_t index = 0; index < s->dim; ++index)
1498
2064
  shape[index] = INT2FIX(s->shape[index]);
1499
2065
 
2066
+ nm_unregister_values(shape, s->dim);
2067
+ NM_CONSERVATIVE(nm_unregister_value(self));
1500
2068
  return rb_ary_new4(s->dim, shape);
1501
2069
  }
1502
2070
 
@@ -1507,8 +2075,10 @@ static VALUE nm_supershape(VALUE self) {
1507
2075
  * Get the storage type (stype) of a matrix, e.g., :yale, :dense, or :list.
1508
2076
  */
1509
2077
  static VALUE nm_stype(VALUE self) {
1510
- ID stype = rb_intern(STYPE_NAMES[NM_STYPE(self)]);
1511
- return ID2SYM(stype);
2078
+ NM_CONSERVATIVE(nm_register_value(self));
2079
+ VALUE stype = ID2SYM(rb_intern(STYPE_NAMES[NM_STYPE(self)]));
2080
+ NM_CONSERVATIVE(nm_unregister_value(self));
2081
+ return stype;
1512
2082
  }
1513
2083
 
1514
2084
  /*
@@ -1550,11 +2120,18 @@ static VALUE nm_effective_dim(VALUE self) {
1550
2120
  */
1551
2121
  static VALUE nm_xslice(int argc, VALUE* argv, void* (*slice_func)(const STORAGE*, SLICE*), void (*delete_func)(NMATRIX*), VALUE self) {
1552
2122
  VALUE result = Qnil;
2123
+
1553
2124
  STORAGE* s = NM_STORAGE(self);
1554
2125
 
1555
2126
  if (NM_DIM(self) < (size_t)(argc)) {
1556
- rb_raise(rb_eArgError, "wrong number of arguments (%d for %u)", argc, effective_dim(s));
2127
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for %lu)", argc, effective_dim(s));
1557
2128
  } else {
2129
+
2130
+ NM_CONSERVATIVE(nm_register_values(argv, argc));
2131
+ NM_CONSERVATIVE(nm_register_value(self));
2132
+
2133
+ nm_register_value(result);
2134
+
1558
2135
  SLICE* slice = get_slice(NM_DIM(self), argc, argv, s->shape);
1559
2136
 
1560
2137
  if (slice->single) {
@@ -1569,16 +2146,21 @@ static VALUE nm_xslice(int argc, VALUE* argv, void* (*slice_func)(const STORAGE*
1569
2146
 
1570
2147
  } else {
1571
2148
 
1572
- NMATRIX* mat = ALLOC(NMATRIX);
2149
+ NMATRIX* mat = NM_ALLOC(NMATRIX);
1573
2150
  mat->stype = NM_STYPE(self);
1574
2151
  mat->storage = (STORAGE*)((*slice_func)( s, slice ));
1575
-
2152
+ nm_register_nmatrix(mat);
1576
2153
  result = Data_Wrap_Struct(CLASS_OF(self), nm_mark, delete_func, mat);
2154
+ nm_unregister_nmatrix(mat);
1577
2155
  }
1578
2156
 
1579
2157
  free_slice(slice);
1580
2158
  }
1581
2159
 
2160
+ nm_unregister_value(result);
2161
+ NM_CONSERVATIVE(nm_unregister_values(argv, argc));
2162
+ NM_CONSERVATIVE(nm_unregister_value(self));
2163
+
1582
2164
  return result;
1583
2165
  }
1584
2166
 
@@ -1586,13 +2168,49 @@ static VALUE nm_xslice(int argc, VALUE* argv, void* (*slice_func)(const STORAGE*
1586
2168
  // Helper Functions //
1587
2169
  //////////////////////
1588
2170
 
2171
+ static VALUE unary_op(nm::unaryop_t op, VALUE self) {
2172
+ NM_CONSERVATIVE(nm_register_value(self));
2173
+ NMATRIX* left;
2174
+ UnwrapNMatrix(self, left);
2175
+ std::string sym;
2176
+
2177
+ switch(left->stype) {
2178
+ case nm::DENSE_STORE:
2179
+ sym = "__dense_unary_" + nm::UNARYOPS[op] + "__";
2180
+ break;
2181
+ case nm::YALE_STORE:
2182
+ sym = "__yale_unary_" + nm::UNARYOPS[op] + "__";
2183
+ break;
2184
+ case nm::LIST_STORE:
2185
+ sym = "__list_unary_" + nm::UNARYOPS[op] + "__";
2186
+ break;
2187
+ }
2188
+
2189
+ NM_CONSERVATIVE(nm_unregister_value(self));
2190
+ return rb_funcall(self, rb_intern(sym.c_str()), 0);
2191
+ }
2192
+
2193
+ static void check_dims_and_shape(VALUE left_val, VALUE right_val) {
2194
+ // Check that the left- and right-hand sides have the same dimensionality.
2195
+ if (NM_DIM(left_val) != NM_DIM(right_val)) {
2196
+ rb_raise(rb_eArgError, "The left- and right-hand sides of the operation must have the same dimensionality.");
2197
+ }
2198
+ // Check that the left- and right-hand sides have the same shape.
2199
+ if (memcmp(&NM_SHAPE(left_val, 0), &NM_SHAPE(right_val, 0), sizeof(size_t) * NM_DIM(left_val)) != 0) {
2200
+ rb_raise(rb_eArgError, "The left- and right-hand sides of the operation must have the same shape.");
2201
+ }
2202
+ }
2203
+
1589
2204
  static VALUE elementwise_op(nm::ewop_t op, VALUE left_val, VALUE right_val) {
1590
2205
 
1591
- NMATRIX* left;
1592
- NMATRIX* result;
2206
+ NM_CONSERVATIVE(nm_register_value(left_val));
2207
+ NM_CONSERVATIVE(nm_register_value(right_val));
1593
2208
 
1594
- CheckNMatrixType(left_val);
1595
- UnwrapNMatrix(left_val, left);
2209
+ NMATRIX* left;
2210
+ NMATRIX* result;
2211
+
2212
+ CheckNMatrixType(left_val);
2213
+ UnwrapNMatrix(left_val, left);
1596
2214
 
1597
2215
  if (TYPE(right_val) != T_DATA || (RDATA(right_val)->dfree != (RUBY_DATA_FUNC)nm_delete && RDATA(right_val)->dfree != (RUBY_DATA_FUNC)nm_delete_ref)) {
1598
2216
  // This is a matrix-scalar element-wise operation.
@@ -1608,21 +2226,18 @@ static VALUE elementwise_op(nm::ewop_t op, VALUE left_val, VALUE right_val) {
1608
2226
  sym = "__list_scalar_" + nm::EWOP_NAMES[op] + "__";
1609
2227
  break;
1610
2228
  default:
2229
+ NM_CONSERVATIVE(nm_unregister_value(left_val));
2230
+ NM_CONSERVATIVE(nm_unregister_value(right_val));
1611
2231
  rb_raise(rb_eNotImpError, "unknown storage type requested scalar element-wise operation");
1612
2232
  }
1613
- return rb_funcall(left_val, rb_intern(sym.c_str()), 1, right_val);
2233
+ VALUE symv = rb_intern(sym.c_str());
2234
+ NM_CONSERVATIVE(nm_unregister_value(left_val));
2235
+ NM_CONSERVATIVE(nm_unregister_value(right_val));
2236
+ return rb_funcall(left_val, symv, 1, right_val);
1614
2237
 
1615
2238
  } else {
1616
2239
 
1617
- // Check that the left- and right-hand sides have the same dimensionality.
1618
- if (NM_DIM(left_val) != NM_DIM(right_val)) {
1619
- rb_raise(rb_eArgError, "The left- and right-hand sides of the operation must have the same dimensionality.");
1620
- }
1621
-
1622
- // Check that the left- and right-hand sides have the same shape.
1623
- if (memcmp(&NM_SHAPE(left_val, 0), &NM_SHAPE(right_val, 0), sizeof(size_t) * NM_DIM(left_val)) != 0) {
1624
- rb_raise(rb_eArgError, "The left- and right-hand sides of the operation must have the same shape.");
1625
- }
2240
+ check_dims_and_shape(left_val, right_val);
1626
2241
 
1627
2242
  NMATRIX* right;
1628
2243
  UnwrapNMatrix(right_val, right);
@@ -1641,16 +2256,99 @@ static VALUE elementwise_op(nm::ewop_t op, VALUE left_val, VALUE right_val) {
1641
2256
  sym = "__list_elementwise_" + nm::EWOP_NAMES[op] + "__";
1642
2257
  break;
1643
2258
  default:
2259
+ NM_CONSERVATIVE(nm_unregister_value(left_val));
2260
+ NM_CONSERVATIVE(nm_unregister_value(right_val));
1644
2261
  rb_raise(rb_eNotImpError, "unknown storage type requested element-wise operation");
1645
2262
  }
1646
- return rb_funcall(left_val, rb_intern(sym.c_str()), 1, right_val);
2263
+
2264
+ VALUE symv = rb_intern(sym.c_str());
2265
+ NM_CONSERVATIVE(nm_unregister_value(left_val));
2266
+ NM_CONSERVATIVE(nm_unregister_value(right_val));
2267
+ return rb_funcall(left_val, symv, 1, right_val);
1647
2268
 
1648
2269
  } else {
2270
+ NM_CONSERVATIVE(nm_unregister_value(left_val));
2271
+ NM_CONSERVATIVE(nm_unregister_value(right_val));
1649
2272
  rb_raise(rb_eArgError, "Element-wise operations are not currently supported between matrices with differing stypes.");
1650
2273
  }
1651
2274
  }
1652
2275
 
1653
- return Data_Wrap_Struct(CLASS_OF(left_val), nm_mark, nm_delete, result);
2276
+ NM_CONSERVATIVE(nm_unregister_value(left_val));
2277
+ NM_CONSERVATIVE(nm_unregister_value(right_val));
2278
+ return Data_Wrap_Struct(CLASS_OF(left_val), nm_mark, nm_delete, result);
2279
+ }
2280
+
2281
+ static VALUE noncom_elementwise_op(nm::noncom_ewop_t op, VALUE self, VALUE other, VALUE flip) {
2282
+
2283
+ NM_CONSERVATIVE(nm_register_value(self));
2284
+ NM_CONSERVATIVE(nm_register_value(other));
2285
+
2286
+ NMATRIX* self_nm;
2287
+ NMATRIX* result;
2288
+
2289
+ CheckNMatrixType(self);
2290
+ UnwrapNMatrix(self, self_nm);
2291
+
2292
+ if (TYPE(other) != T_DATA || (RDATA(other)->dfree != (RUBY_DATA_FUNC)nm_delete && RDATA(other)->dfree != (RUBY_DATA_FUNC)nm_delete_ref)) {
2293
+ // This is a matrix-scalar element-wise operation.
2294
+ std::string sym;
2295
+ switch(self_nm->stype) {
2296
+ case nm::DENSE_STORE:
2297
+ sym = "__dense_scalar_" + nm::NONCOM_EWOP_NAMES[op] + "__";
2298
+ break;
2299
+ case nm::YALE_STORE:
2300
+ sym = "__yale_scalar_" + nm::NONCOM_EWOP_NAMES[op] + "__";
2301
+ break;
2302
+ case nm::LIST_STORE:
2303
+ sym = "__list_scalar_" + nm::NONCOM_EWOP_NAMES[op] + "__";
2304
+ break;
2305
+ default:
2306
+ NM_CONSERVATIVE(nm_unregister_value(self));
2307
+ NM_CONSERVATIVE(nm_unregister_value(other));
2308
+ rb_raise(rb_eNotImpError, "unknown storage type requested scalar element-wise operation");
2309
+ }
2310
+ NM_CONSERVATIVE(nm_unregister_value(self));
2311
+ NM_CONSERVATIVE(nm_unregister_value(other));
2312
+ return rb_funcall(self, rb_intern(sym.c_str()), 2, other, flip);
2313
+
2314
+ } else {
2315
+
2316
+ check_dims_and_shape(self, other);
2317
+
2318
+ NMATRIX* other_nm;
2319
+ UnwrapNMatrix(other, other_nm);
2320
+
2321
+ if (self_nm->stype == other_nm->stype) {
2322
+ std::string sym;
2323
+
2324
+ switch(self_nm->stype) {
2325
+ case nm::DENSE_STORE:
2326
+ sym = "__dense_elementwise_" + nm::NONCOM_EWOP_NAMES[op] + "__";
2327
+ break;
2328
+ case nm::YALE_STORE:
2329
+ sym = "__yale_elementwise_" + nm::NONCOM_EWOP_NAMES[op] + "__";
2330
+ break;
2331
+ case nm::LIST_STORE:
2332
+ sym = "__list_elementwise_" + nm::NONCOM_EWOP_NAMES[op] + "__";
2333
+ break;
2334
+ default:
2335
+ NM_CONSERVATIVE(nm_unregister_value(self));
2336
+ NM_CONSERVATIVE(nm_unregister_value(other));
2337
+ rb_raise(rb_eNotImpError, "unknown storage type requested element-wise operation");
2338
+ }
2339
+ NM_CONSERVATIVE(nm_unregister_value(self));
2340
+ NM_CONSERVATIVE(nm_unregister_value(other));
2341
+ return rb_funcall(self, rb_intern(sym.c_str()), 2, other, flip);
2342
+
2343
+ } else {
2344
+ nm_unregister_value(self);
2345
+ nm_unregister_value(other);
2346
+ rb_raise(rb_eArgError, "Element-wise operations are not currently supported between matrices with differing stypes.");
2347
+ }
2348
+ }
2349
+ NM_CONSERVATIVE(nm_unregister_value(self));
2350
+ NM_CONSERVATIVE(nm_unregister_value(other));
2351
+ return Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, result);
1654
2352
  }
1655
2353
 
1656
2354
  /*
@@ -1664,11 +2362,13 @@ bool is_ref(const NMATRIX* matrix) {
1664
2362
  * Helper function for nm_symmetric and nm_hermitian.
1665
2363
  */
1666
2364
  static VALUE is_symmetric(VALUE self, bool hermitian) {
2365
+ NM_CONSERVATIVE(nm_register_value(self));
2366
+
1667
2367
  NMATRIX* m;
1668
2368
  UnwrapNMatrix(self, m);
1669
2369
 
1670
2370
  if (m->storage->shape[0] == m->storage->shape[1] and m->storage->dim == 2) {
1671
- if (NM_STYPE(self) == nm::DENSE_STORE) {
2371
+ if (NM_STYPE(self) == nm::DENSE_STORE) {
1672
2372
  if (hermitian) {
1673
2373
  nm_dense_storage_is_hermitian((DENSE_STORAGE*)(m->storage), m->storage->shape[0]);
1674
2374
 
@@ -1678,11 +2378,12 @@ static VALUE is_symmetric(VALUE self, bool hermitian) {
1678
2378
 
1679
2379
  } else {
1680
2380
  // TODO: Implement, at the very least, yale_is_symmetric. Model it after yale/transp.template.c.
2381
+ NM_CONSERVATIVE(nm_unregister_value(self));
1681
2382
  rb_raise(rb_eNotImpError, "symmetric? and hermitian? only implemented for dense currently");
1682
2383
  }
1683
2384
 
1684
2385
  }
1685
-
2386
+ NM_CONSERVATIVE(nm_unregister_value(self));
1686
2387
  return Qfalse;
1687
2388
  }
1688
2389
 
@@ -1724,9 +2425,10 @@ nm::dtype_t nm_dtype_min_fixnum(int64_t v) {
1724
2425
  * Helper for nm_dtype_min(), handling rationals.
1725
2426
  */
1726
2427
  nm::dtype_t nm_dtype_min_rational(VALUE vv) {
1727
- nm::Rational128* v = ALLOCA_N(nm::Rational128, 1);
2428
+ NM_CONSERVATIVE(nm_register_value(vv));
2429
+ nm::Rational128* v = NM_ALLOCA_N(nm::Rational128, 1);
1728
2430
  rubyval_to_cval(vv, nm::RATIONAL128, v);
1729
-
2431
+ NM_CONSERVATIVE(nm_unregister_value(vv));
1730
2432
  int64_t i = std::max(std::abs(v->n), v->d);
1731
2433
  if (i <= SHRT_MAX) return nm::INT16;
1732
2434
  else if (i <= INT_MAX) return nm::INT32;
@@ -1848,6 +2550,8 @@ nm::dtype_t nm_dtype_guess(VALUE v) {
1848
2550
  * accessing some part of a matrix.
1849
2551
  */
1850
2552
  static SLICE* get_slice(size_t dim, int argc, VALUE* arg, size_t* shape) {
2553
+ NM_CONSERVATIVE(nm_register_values(arg, argc));
2554
+
1851
2555
  VALUE beg, end;
1852
2556
  int excl;
1853
2557
 
@@ -1875,15 +2579,17 @@ static SLICE* get_slice(size_t dim, int argc, VALUE* arg, size_t* shape) {
1875
2579
  slice->coords[r] = 0;
1876
2580
  slice->lengths[r] = shape[r];
1877
2581
  slice->single = false;
2582
+ t++;
1878
2583
 
1879
2584
  } else if (TYPE(arg[t]) == T_HASH) { // 3:5 notation (inclusive)
1880
2585
  VALUE begin_end = rb_funcall(v, rb_intern("shift"), 0); // rb_hash_shift
2586
+ nm_register_value(begin_end);
1881
2587
  slice->coords[r] = FIX2UINT(rb_ary_entry(begin_end, 0));
1882
2588
  slice->lengths[r] = FIX2UINT(rb_ary_entry(begin_end, 1)) - slice->coords[r];
1883
2589
 
1884
2590
  if (RHASH_EMPTY_P(v)) t++; // go on to the next
1885
-
1886
2591
  slice->single = false;
2592
+ nm_unregister_value(begin_end);
1887
2593
 
1888
2594
  } else if (CLASS_OF(v) == rb_cRange) {
1889
2595
  rb_range_values(arg[t], &beg, &end, &excl);
@@ -1896,13 +2602,17 @@ static SLICE* get_slice(size_t dim, int argc, VALUE* arg, size_t* shape) {
1896
2602
  t++;
1897
2603
 
1898
2604
  } else {
2605
+ NM_CONSERVATIVE(nm_unregister_values(arg, argc));
1899
2606
  rb_raise(rb_eArgError, "expected Fixnum, Range, or Hash for slice component instead of %s", rb_obj_classname(v));
1900
2607
  }
1901
2608
 
1902
- if (slice->coords[r] > shape[r] || slice->coords[r] + slice->lengths[r] > shape[r])
1903
- rb_raise(rb_eRangeError, "slice is larger than matrix in dimension %u (slice component %u)", r, t);
2609
+ if (slice->coords[r] > shape[r] || slice->coords[r] + slice->lengths[r] > shape[r]) {
2610
+ NM_CONSERVATIVE(nm_unregister_values(arg, argc));
2611
+ rb_raise(rb_eRangeError, "slice is larger than matrix in dimension %lu (slice component %lu)", r, t);
2612
+ }
1904
2613
  }
1905
2614
 
2615
+ NM_CONSERVATIVE(nm_unregister_values(arg, argc));
1906
2616
  return slice;
1907
2617
  }
1908
2618
 
@@ -1960,12 +2670,14 @@ static nm::dtype_t interpret_dtype(int argc, VALUE* argv, nm::stype_t stype) {
1960
2670
  * Convert an Ruby value or an array of Ruby values into initial C values.
1961
2671
  */
1962
2672
  static void* interpret_initial_value(VALUE arg, nm::dtype_t dtype) {
2673
+ NM_CONSERVATIVE(nm_register_value(arg));
2674
+
1963
2675
  unsigned int index;
1964
2676
  void* init_val;
1965
2677
 
1966
2678
  if (TYPE(arg) == T_ARRAY) {
1967
2679
  // Array
1968
- init_val = ALLOC_N(char, DTYPE_SIZES[dtype] * RARRAY_LEN(arg));
2680
+ init_val = NM_ALLOC_N(char, DTYPE_SIZES[dtype] * RARRAY_LEN(arg));
1969
2681
  NM_CHECK_ALLOC(init_val);
1970
2682
  for (index = 0; index < RARRAY_LEN(arg); ++index) {
1971
2683
  rubyval_to_cval(RARRAY_PTR(arg)[index], dtype, (char*)init_val + (index * DTYPE_SIZES[dtype]));
@@ -1976,6 +2688,7 @@ static void* interpret_initial_value(VALUE arg, nm::dtype_t dtype) {
1976
2688
  init_val = rubyobj_to_cval(arg, dtype);
1977
2689
  }
1978
2690
 
2691
+ NM_CONSERVATIVE(nm_unregister_value(arg));
1979
2692
  return init_val;
1980
2693
  }
1981
2694
 
@@ -1986,11 +2699,12 @@ static void* interpret_initial_value(VALUE arg, nm::dtype_t dtype) {
1986
2699
  * array describing the shape, which must be freed manually.
1987
2700
  */
1988
2701
  static size_t* interpret_shape(VALUE arg, size_t* dim) {
2702
+ NM_CONSERVATIVE(nm_register_value(arg));
1989
2703
  size_t* shape;
1990
2704
 
1991
2705
  if (TYPE(arg) == T_ARRAY) {
1992
2706
  *dim = RARRAY_LEN(arg);
1993
- shape = ALLOC_N(size_t, *dim);
2707
+ shape = NM_ALLOC_N(size_t, *dim);
1994
2708
 
1995
2709
  for (size_t index = 0; index < *dim; ++index) {
1996
2710
  shape[index] = FIX2UINT( RARRAY_PTR(arg)[index] );
@@ -1998,15 +2712,17 @@ static size_t* interpret_shape(VALUE arg, size_t* dim) {
1998
2712
 
1999
2713
  } else if (FIXNUM_P(arg)) {
2000
2714
  *dim = 2;
2001
- shape = ALLOC_N(size_t, *dim);
2715
+ shape = NM_ALLOC_N(size_t, *dim);
2002
2716
 
2003
2717
  shape[0] = FIX2UINT(arg);
2004
2718
  shape[1] = FIX2UINT(arg);
2005
2719
 
2006
2720
  } else {
2721
+ nm_unregister_value(arg);
2007
2722
  rb_raise(rb_eArgError, "Expected an array of numbers or a single Fixnum for matrix shape");
2008
2723
  }
2009
2724
 
2725
+ NM_CONSERVATIVE(nm_unregister_value(arg));
2010
2726
  return shape;
2011
2727
  }
2012
2728
 
@@ -2038,12 +2754,20 @@ STORAGE* matrix_storage_cast_alloc(NMATRIX* matrix, nm::dtype_t new_dtype) {
2038
2754
  }
2039
2755
 
2040
2756
  STORAGE_PAIR binary_storage_cast_alloc(NMATRIX* left_matrix, NMATRIX* right_matrix) {
2757
+ nm_register_nmatrix(left_matrix);
2758
+ nm_register_nmatrix(right_matrix);
2759
+
2041
2760
  STORAGE_PAIR casted;
2042
2761
  nm::dtype_t new_dtype = Upcast[left_matrix->storage->dtype][right_matrix->storage->dtype];
2043
2762
 
2044
2763
  casted.left = matrix_storage_cast_alloc(left_matrix, new_dtype);
2764
+ nm_register_storage(left_matrix->stype, casted.left);
2045
2765
  casted.right = matrix_storage_cast_alloc(right_matrix, new_dtype);
2046
2766
 
2767
+ nm_unregister_nmatrix(left_matrix);
2768
+ nm_unregister_nmatrix(right_matrix);
2769
+ nm_unregister_storage(left_matrix->stype, casted.left);
2770
+
2047
2771
  return casted;
2048
2772
  }
2049
2773
 
@@ -2053,12 +2777,16 @@ static VALUE matrix_multiply_scalar(NMATRIX* left, VALUE scalar) {
2053
2777
  }
2054
2778
 
2055
2779
  static VALUE matrix_multiply(NMATRIX* left, NMATRIX* right) {
2780
+ nm_register_nmatrix(left);
2781
+ nm_register_nmatrix(right);
2056
2782
  ///TODO: multiplication for non-dense and/or non-decimal matrices
2057
2783
 
2058
2784
  // Make sure both of our matrices are of the correct type.
2059
2785
  STORAGE_PAIR casted = binary_storage_cast_alloc(left, right);
2786
+ nm_register_storage(left->stype, casted.left);
2787
+ nm_register_storage(right->stype, casted.right);
2060
2788
 
2061
- size_t* resulting_shape = ALLOC_N(size_t, 2);
2789
+ size_t* resulting_shape = NM_ALLOC_N(size_t, 2);
2062
2790
  resulting_shape[0] = left->storage->shape[0];
2063
2791
  resulting_shape[1] = right->storage->shape[1];
2064
2792
 
@@ -2074,6 +2802,7 @@ static VALUE matrix_multiply(NMATRIX* left, NMATRIX* right) {
2074
2802
 
2075
2803
  STORAGE* resulting_storage = storage_matrix_multiply[left->stype](casted, resulting_shape, vector);
2076
2804
  NMATRIX* result = nm_create(left->stype, resulting_storage);
2805
+ nm_register_nmatrix(result);
2077
2806
 
2078
2807
  // Free any casted-storage we created for the multiplication.
2079
2808
  // TODO: Can we make the Ruby GC take care of this stuff now that we're using it?
@@ -2085,11 +2814,19 @@ static VALUE matrix_multiply(NMATRIX* left, NMATRIX* right) {
2085
2814
  nm_yale_storage_delete
2086
2815
  };
2087
2816
 
2817
+ nm_unregister_storage(left->stype, casted.left);
2088
2818
  if (left->storage != casted.left) free_storage[result->stype](casted.left);
2819
+
2820
+ nm_unregister_storage(right->stype, casted.right);
2089
2821
  if (right->storage != casted.right) free_storage[result->stype](casted.right);
2090
2822
 
2091
- if (result) return Data_Wrap_Struct(cNMatrix, nm_mark, nm_delete, result);
2092
- return Qnil; // Only if we try to multiply list matrices should we return Qnil.
2823
+ VALUE to_return = result ? Data_Wrap_Struct(cNMatrix, nm_mark, nm_delete, result) : Qnil; // Only if we try to multiply list matrices should we return Qnil.
2824
+
2825
+ nm_unregister_nmatrix(left);
2826
+ nm_unregister_nmatrix(right);
2827
+ nm_unregister_nmatrix(result);
2828
+
2829
+ return to_return;
2093
2830
  }
2094
2831
 
2095
2832
  /*
@@ -2100,15 +2837,31 @@ static VALUE matrix_multiply(NMATRIX* left, NMATRIX* right) {
2100
2837
  * Note: Currently only implemented for 2x2 and 3x3 matrices.
2101
2838
  */
2102
2839
  static VALUE nm_det_exact(VALUE self) {
2103
- if (NM_STYPE(self) != nm::DENSE_STORE) rb_raise(nm_eStorageTypeError, "can only calculate exact determinant for dense matrices");
2104
2840
 
2105
- if (NM_DIM(self) != 2 || NM_SHAPE0(self) != NM_SHAPE1(self)) return Qnil;
2841
+ if (NM_STYPE(self) != nm::DENSE_STORE) {
2842
+ rb_raise(nm_eStorageTypeError, "can only calculate exact determinant for dense matrices");
2843
+ }
2844
+ if (NM_DIM(self) != 2 || NM_SHAPE0(self) != NM_SHAPE1(self)) {
2845
+ return Qnil;
2846
+ }
2847
+
2848
+ NM_CONSERVATIVE(nm_register_value(self));
2106
2849
 
2107
2850
  // Calculate the determinant and then assign it to the return value
2108
- void* result = ALLOCA_N(char, DTYPE_SIZES[NM_DTYPE(self)]);
2851
+ void* result = NM_ALLOCA_N(char, DTYPE_SIZES[NM_DTYPE(self)]);
2852
+ nm::dtype_t dtype = NM_DTYPE(self);
2109
2853
  nm_math_det_exact(NM_SHAPE0(self), NM_STORAGE_DENSE(self)->elements, NM_SHAPE0(self), NM_DTYPE(self), result);
2110
2854
 
2111
- return rubyobj_from_cval(result, NM_DTYPE(self)).rval;
2855
+ if (dtype == nm::RUBYOBJ) {
2856
+ nm_register_values(reinterpret_cast<VALUE*>(result), 1);
2857
+ }
2858
+ VALUE to_return = rubyobj_from_cval(result, NM_DTYPE(self)).rval;
2859
+ if (dtype == nm::RUBYOBJ) {
2860
+ nm_unregister_values(reinterpret_cast<VALUE*>(result), 1);
2861
+ }
2862
+ NM_CONSERVATIVE(nm_unregister_value(self));
2863
+
2864
+ return to_return;
2112
2865
  }
2113
2866
 
2114
2867
  /////////////////
@@ -2127,6 +2880,11 @@ static VALUE nm_det_exact(VALUE self) {
2127
2880
  * TODO: Add a column-major option for libraries that use column-major matrices.
2128
2881
  */
2129
2882
  VALUE rb_nmatrix_dense_create(nm::dtype_t dtype, size_t* shape, size_t dim, void* elements, size_t length) {
2883
+
2884
+ if (dtype == nm::RUBYOBJ) {
2885
+ nm_register_values(reinterpret_cast<VALUE*>(elements), length);
2886
+ }
2887
+
2130
2888
  NMATRIX* nm;
2131
2889
  size_t nm_dim;
2132
2890
  size_t* shape_copy;
@@ -2134,25 +2892,34 @@ VALUE rb_nmatrix_dense_create(nm::dtype_t dtype, size_t* shape, size_t dim, void
2134
2892
  // Do not allow a dim of 1. Treat it as a column or row matrix.
2135
2893
  if (dim == 1) {
2136
2894
  nm_dim = 2;
2137
- shape_copy = ALLOC_N(size_t, nm_dim);
2895
+ shape_copy = NM_ALLOC_N(size_t, nm_dim);
2138
2896
  shape_copy[0] = shape[0];
2139
2897
  shape_copy[1] = 1;
2140
2898
 
2141
2899
  } else {
2142
2900
  nm_dim = dim;
2143
- shape_copy = ALLOC_N(size_t, nm_dim);
2901
+ shape_copy = NM_ALLOC_N(size_t, nm_dim);
2144
2902
  memcpy(shape_copy, shape, sizeof(size_t)*nm_dim);
2145
2903
  }
2146
2904
 
2147
2905
  // Copy elements
2148
- void* elements_copy = ALLOC_N(char, DTYPE_SIZES[dtype]*length);
2906
+ void* elements_copy = NM_ALLOC_N(char, DTYPE_SIZES[dtype]*length);
2149
2907
  memcpy(elements_copy, elements, DTYPE_SIZES[dtype]*length);
2150
2908
 
2151
2909
  // allocate and create the matrix and its storage
2152
2910
  nm = nm_create(nm::DENSE_STORE, nm_dense_storage_create(dtype, shape_copy, dim, elements_copy, length));
2153
2911
 
2912
+ nm_register_nmatrix(nm);
2913
+
2914
+ VALUE to_return = Data_Wrap_Struct(cNMatrix, nm_mark, nm_delete, nm);
2915
+
2916
+ nm_unregister_nmatrix(nm);
2917
+ if (dtype == nm::RUBYOBJ) {
2918
+ nm_unregister_values(reinterpret_cast<VALUE*>(elements), length);
2919
+ }
2920
+
2154
2921
  // tell Ruby about the matrix and its storage, particularly how to garbage collect it.
2155
- return Data_Wrap_Struct(cNMatrix, nm_mark, nm_delete, nm);
2922
+ return to_return;
2156
2923
  }
2157
2924
 
2158
2925
  /*