nmatrix 0.1.0.rc4 → 0.1.0.rc5

Sign up to get free protection for your applications and to get access to all the features.
@@ -123,8 +123,8 @@ namespace nm { namespace dense_storage {
123
123
  */
124
124
  template <typename D>
125
125
  void set(VALUE left, SLICE* slice, VALUE right) {
126
- NM_CONSERVATIVE(nm_register_value(left));
127
- NM_CONSERVATIVE(nm_register_value(right));
126
+ NM_CONSERVATIVE(nm_register_value(&left));
127
+ NM_CONSERVATIVE(nm_register_value(&right));
128
128
 
129
129
  DENSE_STORAGE* s = NM_STORAGE_DENSE(left);
130
130
 
@@ -174,8 +174,8 @@ namespace nm { namespace dense_storage {
174
174
  nm_unregister_values(reinterpret_cast<VALUE*>(v), v_size);
175
175
  NM_FREE(v);
176
176
  }
177
- NM_CONSERVATIVE(nm_unregister_value(left));
178
- NM_CONSERVATIVE(nm_unregister_value(right));
177
+ NM_CONSERVATIVE(nm_unregister_value(&left));
178
+ NM_CONSERVATIVE(nm_unregister_value(&right));
179
179
 
180
180
  }
181
181
 
@@ -361,12 +361,12 @@ void nm_dense_storage_unregister(const STORAGE* s) {
361
361
  */
362
362
  VALUE nm_dense_map_pair(VALUE self, VALUE right) {
363
363
 
364
- NM_CONSERVATIVE(nm_register_value(self));
365
- NM_CONSERVATIVE(nm_register_value(right));
364
+ NM_CONSERVATIVE(nm_register_value(&self));
365
+ NM_CONSERVATIVE(nm_register_value(&right));
366
366
 
367
367
  RETURN_SIZED_ENUMERATOR_PRE
368
- NM_CONSERVATIVE(nm_unregister_value(right));
369
- NM_CONSERVATIVE(nm_unregister_value(self));
368
+ NM_CONSERVATIVE(nm_unregister_value(&right));
369
+ NM_CONSERVATIVE(nm_unregister_value(&self));
370
370
  RETURN_SIZED_ENUMERATOR(self, 0, 0, nm_enumerator_length);
371
371
 
372
372
  DENSE_STORAGE *s = NM_STORAGE_DENSE(self),
@@ -391,10 +391,10 @@ VALUE nm_dense_map_pair(VALUE self, VALUE right) {
391
391
  t_index = nm_dense_storage_pos(t, coords);
392
392
 
393
393
  VALUE sval = NM_DTYPE(self) == nm::RUBYOBJ ? reinterpret_cast<VALUE*>(s->elements)[s_index] : rubyobj_from_cval((char*)(s->elements) + s_index*DTYPE_SIZES[NM_DTYPE(self)], NM_DTYPE(self)).rval;
394
- nm_register_value(sval);
394
+ nm_register_value(&sval);
395
395
  VALUE tval = NM_DTYPE(right) == nm::RUBYOBJ ? reinterpret_cast<VALUE*>(t->elements)[t_index] : rubyobj_from_cval((char*)(t->elements) + t_index*DTYPE_SIZES[NM_DTYPE(right)], NM_DTYPE(right)).rval;
396
396
  result_elem[k] = rb_yield_values(2, sval, tval);
397
- nm_unregister_value(sval);
397
+ nm_unregister_value(&sval);
398
398
  }
399
399
 
400
400
  VALUE klass = CLASS_OF(self);
@@ -404,8 +404,8 @@ VALUE nm_dense_map_pair(VALUE self, VALUE right) {
404
404
 
405
405
  nm_unregister_nmatrix(m);
406
406
  nm_dense_storage_unregister(result);
407
- NM_CONSERVATIVE(nm_unregister_value(self));
408
- NM_CONSERVATIVE(nm_unregister_value(right));
407
+ NM_CONSERVATIVE(nm_unregister_value(&self));
408
+ NM_CONSERVATIVE(nm_unregister_value(&right));
409
409
 
410
410
  return to_return;
411
411
 
@@ -416,10 +416,10 @@ VALUE nm_dense_map_pair(VALUE self, VALUE right) {
416
416
  */
417
417
  VALUE nm_dense_map(VALUE self) {
418
418
 
419
- NM_CONSERVATIVE(nm_register_value(self));
419
+ NM_CONSERVATIVE(nm_register_value(&self));
420
420
 
421
421
  RETURN_SIZED_ENUMERATOR_PRE
422
- NM_CONSERVATIVE(nm_unregister_value(self));
422
+ NM_CONSERVATIVE(nm_unregister_value(&self));
423
423
  RETURN_SIZED_ENUMERATOR(self, 0, 0, nm_enumerator_length);
424
424
 
425
425
  DENSE_STORAGE *s = NM_STORAGE_DENSE(self);
@@ -454,7 +454,7 @@ VALUE nm_dense_map(VALUE self) {
454
454
 
455
455
  nm_unregister_nmatrix(m);
456
456
  nm_dense_storage_unregister(result);
457
- NM_CONSERVATIVE(nm_unregister_value(self));
457
+ NM_CONSERVATIVE(nm_unregister_value(&self));
458
458
 
459
459
  return to_return;
460
460
  }
@@ -465,10 +465,10 @@ VALUE nm_dense_map(VALUE self) {
465
465
  */
466
466
  VALUE nm_dense_each_with_indices(VALUE nmatrix) {
467
467
 
468
- NM_CONSERVATIVE(nm_register_value(nmatrix));
468
+ NM_CONSERVATIVE(nm_register_value(&nmatrix));
469
469
 
470
470
  RETURN_SIZED_ENUMERATOR_PRE
471
- NM_CONSERVATIVE(nm_unregister_value(nmatrix));
471
+ NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
472
472
  RETURN_SIZED_ENUMERATOR(nmatrix, 0, 0, nm_enumerator_length); // fourth argument only used by Ruby2+
473
473
  DENSE_STORAGE* s = NM_STORAGE_DENSE(nmatrix);
474
474
 
@@ -486,7 +486,7 @@ VALUE nm_dense_each_with_indices(VALUE nmatrix) {
486
486
  nm_dense_storage_coords(sliced_dummy, k, coords);
487
487
  slice_index = nm_dense_storage_pos(s, coords);
488
488
  VALUE ary = rb_ary_new();
489
- nm_register_value(ary);
489
+ nm_register_value(&ary);
490
490
  if (NM_DTYPE(nmatrix) == nm::RUBYOBJ) rb_ary_push(ary, reinterpret_cast<VALUE*>(s->elements)[slice_index]);
491
491
  else rb_ary_push(ary, rubyobj_from_cval((char*)(s->elements) + slice_index*DTYPE_SIZES[NM_DTYPE(nmatrix)], NM_DTYPE(nmatrix)).rval);
492
492
 
@@ -496,12 +496,12 @@ VALUE nm_dense_each_with_indices(VALUE nmatrix) {
496
496
 
497
497
  // yield the array which now consists of the value and the indices
498
498
  rb_yield(ary);
499
- nm_unregister_value(ary);
499
+ nm_unregister_value(&ary);
500
500
  }
501
501
 
502
502
  nm_dense_storage_delete(sliced_dummy);
503
503
 
504
- NM_CONSERVATIVE(nm_unregister_value(nmatrix));
504
+ NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
505
505
 
506
506
  return nmatrix;
507
507
 
@@ -517,10 +517,10 @@ VALUE nm_dense_each_with_indices(VALUE nmatrix) {
517
517
  */
518
518
  VALUE nm_dense_each(VALUE nmatrix) {
519
519
 
520
- NM_CONSERVATIVE(nm_register_value(nmatrix));
520
+ NM_CONSERVATIVE(nm_register_value(&nmatrix));
521
521
 
522
522
  RETURN_SIZED_ENUMERATOR_PRE
523
- NM_CONSERVATIVE(nm_unregister_value(nmatrix));
523
+ NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
524
524
  RETURN_SIZED_ENUMERATOR(nmatrix, 0, 0, nm_enumerator_length);
525
525
 
526
526
  DENSE_STORAGE* s = NM_STORAGE_DENSE(nmatrix);
@@ -553,7 +553,7 @@ VALUE nm_dense_each(VALUE nmatrix) {
553
553
  }
554
554
 
555
555
  nm_dense_storage_delete(sliced_dummy);
556
- NM_CONSERVATIVE(nm_unregister_value(nmatrix));
556
+ NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
557
557
 
558
558
  return nmatrix;
559
559
 
@@ -874,26 +874,26 @@ namespace nm {
874
874
  * Otherwise, the NMATRIX* still belongs to Ruby and Ruby will free it.
875
875
  */
876
876
  std::pair<NMATRIX*,bool> interpret_arg_as_dense_nmatrix(VALUE right, nm::dtype_t dtype) {
877
- NM_CONSERVATIVE(nm_register_value(right));
877
+ NM_CONSERVATIVE(nm_register_value(&right));
878
878
  if (TYPE(right) == T_DATA && (RDATA(right)->dfree == (RUBY_DATA_FUNC)nm_delete || RDATA(right)->dfree == (RUBY_DATA_FUNC)nm_delete_ref)) {
879
879
  NMATRIX *r;
880
880
  if (NM_STYPE(right) != DENSE_STORE || NM_DTYPE(right) != dtype || NM_SRC(right) != NM_STORAGE(right)) {
881
881
  UnwrapNMatrix( right, r );
882
882
  NMATRIX* ldtype_r = nm_cast_with_ctype_args(r, nm::DENSE_STORE, dtype, NULL);
883
- NM_CONSERVATIVE(nm_unregister_value(right));
883
+ NM_CONSERVATIVE(nm_unregister_value(&right));
884
884
  return std::make_pair(ldtype_r,true);
885
885
  } else { // simple case -- right-hand matrix is dense and is not a reference and has same dtype
886
886
  UnwrapNMatrix( right, r );
887
- NM_CONSERVATIVE(nm_unregister_value(right));
887
+ NM_CONSERVATIVE(nm_unregister_value(&right));
888
888
  return std::make_pair(r, false);
889
889
  }
890
890
  // Do not set v_alloc = true for either of these. It is the responsibility of r/ldtype_r
891
891
  } else if (TYPE(right) == T_DATA) {
892
- NM_CONSERVATIVE(nm_unregister_value(right));
892
+ NM_CONSERVATIVE(nm_unregister_value(&right));
893
893
  rb_raise(rb_eTypeError, "unrecognized type for slice assignment");
894
894
  }
895
895
 
896
- NM_CONSERVATIVE(nm_unregister_value(right));
896
+ NM_CONSERVATIVE(nm_unregister_value(&right));
897
897
  return std::make_pair<NMATRIX*,bool>(NULL, false);
898
898
  }
899
899
 
@@ -87,11 +87,11 @@ public:
87
87
  if (init_obj_ == Qnil) {
88
88
  init_obj_ = s->dtype == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s->default_val) : rubyobj_from_cval(s->default_val, s->dtype).rval;
89
89
  }
90
- nm_register_value(init_obj_);
90
+ nm_register_value(&init_obj_);
91
91
  }
92
92
 
93
93
  ~RecurseData() {
94
- nm_unregister_value(init_obj_);
94
+ nm_unregister_value(&init_obj_);
95
95
  nm_list_storage_unregister(ref);
96
96
  nm_list_storage_unregister(actual);
97
97
  }
@@ -198,14 +198,14 @@ static void map_empty_stored_r(RecurseData& result, RecurseData& s, LIST* x, con
198
198
  if (rev) val = rb_yield_values(2, t_init, s_val);
199
199
  else val = rb_yield_values(2, s_val, t_init);
200
200
 
201
- nm_register_value(val);
201
+ nm_register_value(&val);
202
202
 
203
203
  if (rb_funcall(val, rb_intern("!="), 1, result.init_obj()) == Qtrue) {
204
204
  xcurr = nm::list::insert_helper(x, xcurr, curr->key - offset, val);
205
205
  temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
206
- nm_register_value(*reinterpret_cast<VALUE*>(xcurr->val));
206
+ nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));
207
207
  }
208
- nm_unregister_value(val);
208
+ nm_unregister_value(&val);
209
209
 
210
210
  curr = curr->next;
211
211
  if (curr && curr->key - offset >= x_shape) curr = NULL;
@@ -272,7 +272,7 @@ static void map_stored_r(RecurseData& result, RecurseData& left, LIST* x, const
272
272
  if (!rb_equal(val, result.init_obj())) {
273
273
  xcurr = nm::list::insert_helper(x, xcurr, key, val);
274
274
  temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
275
- nm_register_value(*reinterpret_cast<VALUE*>(xcurr->val));
275
+ nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));
276
276
  }
277
277
 
278
278
  if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
@@ -369,15 +369,15 @@ static void map_merged_stored_r(RecurseData& result, RecurseData& left, RecurseD
369
369
  rcurr = rcurr->next;
370
370
  }
371
371
 
372
- nm_register_value(val);
372
+ nm_register_value(&val);
373
373
 
374
374
  if (rb_funcall(val, rb_intern("!="), 1, result.init_obj()) == Qtrue) {
375
375
  xcurr = nm::list::insert_helper(x, xcurr, key, val);
376
376
  temp_vals.push_front(reinterpret_cast<VALUE*>(xcurr->val));
377
- nm_register_value(*reinterpret_cast<VALUE*>(xcurr->val));
377
+ nm_register_value(&*reinterpret_cast<VALUE*>(xcurr->val));
378
378
  }
379
379
 
380
- nm_unregister_value(val);
380
+ nm_unregister_value(&val);
381
381
 
382
382
  if (rcurr && rcurr->key - right.offset(rec) >= result.ref_shape(rec)) rcurr = NULL;
383
383
  if (lcurr && lcurr->key - left.offset(rec) >= result.ref_shape(rec)) lcurr = NULL;
@@ -495,7 +495,7 @@ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengt
495
495
  } else if (node->key > key) {
496
496
  D* nv = NM_ALLOC(D); *nv = v[v_offset++];
497
497
  if (dest->dtype == nm::RUBYOBJ) {
498
- nm_register_value(*reinterpret_cast<VALUE*>(nv));
498
+ nm_register_value(&*reinterpret_cast<VALUE*>(nv));
499
499
  temp_vals.push_front(reinterpret_cast<VALUE*>(nv));
500
500
  }
501
501
 
@@ -508,7 +508,7 @@ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengt
508
508
  } else { // no node -- insert a new one
509
509
  D* nv = NM_ALLOC(D); *nv = v[v_offset++];
510
510
  if (dest->dtype == nm::RUBYOBJ) {
511
- nm_register_value(*reinterpret_cast<VALUE*>(nv));
511
+ nm_register_value(&*reinterpret_cast<VALUE*>(nv));
512
512
  temp_vals.push_front(reinterpret_cast<VALUE*>(nv));
513
513
  }
514
514
  if (prev) node = insert_after(prev, key, nv);
@@ -535,8 +535,8 @@ static bool slice_set(LIST_STORAGE* dest, LIST* l, size_t* coords, size_t* lengt
535
535
 
536
536
  template <typename D>
537
537
  void set(VALUE left, SLICE* slice, VALUE right) {
538
- NM_CONSERVATIVE(nm_register_value(left));
539
- NM_CONSERVATIVE(nm_register_value(right));
538
+ NM_CONSERVATIVE(nm_register_value(&left));
539
+ NM_CONSERVATIVE(nm_register_value(&right));
540
540
  LIST_STORAGE* s = NM_STORAGE_LIST(left);
541
541
 
542
542
  std::pair<NMATRIX*,bool> nm_and_free =
@@ -590,8 +590,8 @@ void set(VALUE left, SLICE* slice, VALUE right) {
590
590
  NM_FREE(v);
591
591
  nm_unregister_nmatrix(nm_and_free.first);
592
592
  }
593
- NM_CONSERVATIVE(nm_unregister_value(left));
594
- NM_CONSERVATIVE(nm_unregister_value(right));
593
+ NM_CONSERVATIVE(nm_unregister_value(&left));
594
+ NM_CONSERVATIVE(nm_unregister_value(&right));
595
595
  }
596
596
 
597
597
  /*
@@ -693,7 +693,7 @@ void nm_list_storage_mark(STORAGE* storage_base) {
693
693
 
694
694
  static void __nm_list_storage_unregister_temp_value_list(std::list<VALUE*>& temp_vals) {
695
695
  for (std::list<VALUE*>::iterator it = temp_vals.begin(); it != temp_vals.end(); ++it) {
696
- nm_unregister_value(**it);
696
+ nm_unregister_value(&**it);
697
697
  }
698
698
  }
699
699
 
@@ -704,11 +704,11 @@ static void __nm_list_storage_unregister_temp_list_list(std::list<LIST*>& temp_v
704
704
  }
705
705
 
706
706
  void nm_list_storage_register_node(const NODE* curr) {
707
- nm_register_value(*reinterpret_cast<VALUE*>(curr->val));
707
+ nm_register_value(&*reinterpret_cast<VALUE*>(curr->val));
708
708
  }
709
709
 
710
710
  void nm_list_storage_unregister_node(const NODE* curr) {
711
- nm_unregister_value(*reinterpret_cast<VALUE*>(curr->val));
711
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(curr->val));
712
712
  }
713
713
 
714
714
  /**
@@ -719,7 +719,7 @@ void nm_list_storage_unregister_node(const NODE* curr) {
719
719
  * node so that this won't happen.
720
720
  */
721
721
  void nm_list_storage_completely_unregister_node(const NODE* curr) {
722
- nm_completely_unregister_value(*reinterpret_cast<VALUE*>(curr->val));
722
+ nm_completely_unregister_value(&*reinterpret_cast<VALUE*>(curr->val));
723
723
  }
724
724
 
725
725
  void nm_list_storage_register_list(const LIST* list, size_t recursions) {
@@ -757,7 +757,7 @@ void nm_list_storage_unregister_list(const LIST* list, size_t recursions) {
757
757
  void nm_list_storage_register(const STORAGE* s) {
758
758
  const LIST_STORAGE* storage = reinterpret_cast<const LIST_STORAGE*>(s);
759
759
  if (storage && storage->dtype == nm::RUBYOBJ) {
760
- nm_register_value(*reinterpret_cast<VALUE*>(storage->default_val));
760
+ nm_register_value(&*reinterpret_cast<VALUE*>(storage->default_val));
761
761
  nm_list_storage_register_list(storage->rows, storage->dim - 1);
762
762
  }
763
763
  }
@@ -765,7 +765,7 @@ void nm_list_storage_register(const STORAGE* s) {
765
765
  void nm_list_storage_unregister(const STORAGE* s) {
766
766
  const LIST_STORAGE* storage = reinterpret_cast<const LIST_STORAGE*>(s);
767
767
  if (storage && storage->dtype == nm::RUBYOBJ) {
768
- nm_unregister_value(*reinterpret_cast<VALUE*>(storage->default_val));
768
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(storage->default_val));
769
769
  nm_list_storage_unregister_list(storage->rows, storage->dim - 1);
770
770
  }
771
771
  }
@@ -798,7 +798,7 @@ static NODE* list_storage_get_single_node(LIST_STORAGE* s, SLICE* slice) {
798
798
  */
799
799
  static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t rec, VALUE& stack) {
800
800
  VALUE empty = s.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(s.init()) : s.init_obj();
801
- NM_CONSERVATIVE(nm_register_value(stack));
801
+ NM_CONSERVATIVE(nm_register_value(&stack));
802
802
 
803
803
  if (rec) {
804
804
  for (unsigned long index = 0; index < s.ref_shape(rec); ++index) {
@@ -816,7 +816,7 @@ static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t r
816
816
  }
817
817
  rb_ary_shift(stack);
818
818
  }
819
- NM_CONSERVATIVE(nm_unregister_value(stack));
819
+ NM_CONSERVATIVE(nm_unregister_value(&stack));
820
820
  }
821
821
 
822
822
  /*
@@ -825,7 +825,7 @@ static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t r
825
825
  static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {
826
826
  if (s.dtype() == nm::RUBYOBJ)
827
827
  nm_list_storage_register_list(l, rec);
828
- NM_CONSERVATIVE(nm_register_value(stack));
828
+ NM_CONSERVATIVE(nm_register_value(&stack));
829
829
  NODE* curr = l->first;
830
830
 
831
831
  size_t offset = s.offset(rec);
@@ -865,7 +865,7 @@ static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l,
865
865
  rb_ary_pop(stack);
866
866
  }
867
867
  }
868
- NM_CONSERVATIVE(nm_unregister_value(stack));
868
+ NM_CONSERVATIVE(nm_unregister_value(&stack));
869
869
  if (s.dtype() == nm::RUBYOBJ)
870
870
  nm_list_storage_unregister_list(l, rec);
871
871
  }
@@ -877,7 +877,7 @@ static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l,
877
877
  static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l, size_t rec, VALUE& stack) {
878
878
  if (s.dtype() == nm::RUBYOBJ)
879
879
  nm_list_storage_register_list(l, rec);
880
- NM_CONSERVATIVE(nm_register_value(stack));
880
+ NM_CONSERVATIVE(nm_register_value(&stack));
881
881
 
882
882
  NODE* curr = l->first;
883
883
 
@@ -916,7 +916,7 @@ static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const L
916
916
  if (curr && curr->key - offset >= shape) curr = NULL;
917
917
  }
918
918
  }
919
- NM_CONSERVATIVE(nm_unregister_value(stack));
919
+ NM_CONSERVATIVE(nm_unregister_value(&stack));
920
920
  if (s.dtype() == nm::RUBYOBJ)
921
921
  nm_list_storage_unregister_list(l, rec);
922
922
  }
@@ -927,11 +927,11 @@ static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const L
927
927
  */
928
928
  VALUE nm_list_each_with_indices(VALUE nmatrix, bool stored) {
929
929
 
930
- NM_CONSERVATIVE(nm_register_value(nmatrix));
930
+ NM_CONSERVATIVE(nm_register_value(&nmatrix));
931
931
 
932
932
  // If we don't have a block, return an enumerator.
933
933
  RETURN_SIZED_ENUMERATOR_PRE
934
- NM_CONSERVATIVE(nm_unregister_value(nmatrix));
934
+ NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
935
935
  RETURN_SIZED_ENUMERATOR(nmatrix, 0, 0, 0);
936
936
 
937
937
  nm::list_storage::RecurseData sdata(NM_STORAGE_LIST(nmatrix));
@@ -941,7 +941,7 @@ VALUE nm_list_each_with_indices(VALUE nmatrix, bool stored) {
941
941
  if (stored) each_stored_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);
942
942
  else each_with_indices_r(sdata, sdata.top_level_list(), sdata.dim() - 1, stack);
943
943
 
944
- NM_CONSERVATIVE(nm_unregister_value(nmatrix));
944
+ NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
945
945
  return nmatrix;
946
946
  }
947
947
 
@@ -951,8 +951,8 @@ VALUE nm_list_each_with_indices(VALUE nmatrix, bool stored) {
951
951
  * which probably needs to be casted.
952
952
  */
953
953
  VALUE nm_list_map_stored(VALUE left, VALUE init) {
954
- NM_CONSERVATIVE(nm_register_value(left));
955
- NM_CONSERVATIVE(nm_register_value(init));
954
+ NM_CONSERVATIVE(nm_register_value(&left));
955
+ NM_CONSERVATIVE(nm_register_value(&init));
956
956
 
957
957
  LIST_STORAGE *s = NM_STORAGE_LIST(left);
958
958
 
@@ -965,20 +965,20 @@ VALUE nm_list_map_stored(VALUE left, VALUE init) {
965
965
  //}
966
966
  // If we don't have a block, return an enumerator.
967
967
  RETURN_SIZED_ENUMERATOR_PRE
968
- NM_CONSERVATIVE(nm_unregister_value(left));
969
- NM_CONSERVATIVE(nm_unregister_value(init));
968
+ NM_CONSERVATIVE(nm_unregister_value(&left));
969
+ NM_CONSERVATIVE(nm_unregister_value(&init));
970
970
  RETURN_SIZED_ENUMERATOR(left, 0, 0, 0); // FIXME: Test this. Probably won't work. Enable above code instead.
971
971
 
972
972
  // Figure out default value if none provided by the user
973
973
  if (init == Qnil) {
974
- nm_unregister_value(init);
974
+ nm_unregister_value(&init);
975
975
  init = rb_yield_values(1, sdata.init_obj());
976
- nm_register_value(init);
976
+ nm_register_value(&init);
977
977
  }
978
978
  // Allocate a new shape array for the resulting matrix.
979
979
  void* init_val = NM_ALLOC(VALUE);
980
980
  memcpy(init_val, &init, sizeof(VALUE));
981
- nm_register_value(*reinterpret_cast<VALUE*>(init_val));
981
+ nm_register_value(&*reinterpret_cast<VALUE*>(init_val));
982
982
 
983
983
  NMATRIX* result = nm_create(nm::LIST_STORE, nm_list_storage_create(nm::RUBYOBJ, sdata.copy_alloc_shape(), s->dim, init_val));
984
984
  LIST_STORAGE* r = reinterpret_cast<LIST_STORAGE*>(result->storage);
@@ -989,9 +989,9 @@ VALUE nm_list_map_stored(VALUE left, VALUE init) {
989
989
  VALUE to_return = Data_Wrap_Struct(CLASS_OF(left), nm_mark, nm_delete, result);
990
990
 
991
991
  nm_unregister_nmatrix(result);
992
- nm_unregister_value(*reinterpret_cast<VALUE*>(init_val));
993
- NM_CONSERVATIVE(nm_unregister_value(init));
994
- NM_CONSERVATIVE(nm_unregister_value(left));
992
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));
993
+ NM_CONSERVATIVE(nm_unregister_value(&init));
994
+ NM_CONSERVATIVE(nm_unregister_value(&left));
995
995
 
996
996
  return to_return;
997
997
  }
@@ -1001,9 +1001,9 @@ VALUE nm_list_map_stored(VALUE left, VALUE init) {
1001
1001
  * map merged stored iterator. Always returns a matrix containing RubyObjects which probably needs to be casted.
1002
1002
  */
1003
1003
  VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
1004
- NM_CONSERVATIVE(nm_register_value(left));
1005
- NM_CONSERVATIVE(nm_register_value(right));
1006
- NM_CONSERVATIVE(nm_register_value(init));
1004
+ NM_CONSERVATIVE(nm_register_value(&left));
1005
+ NM_CONSERVATIVE(nm_register_value(&right));
1006
+ NM_CONSERVATIVE(nm_register_value(&init));
1007
1007
 
1008
1008
  bool scalar = false;
1009
1009
 
@@ -1031,23 +1031,23 @@ VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
1031
1031
  //}
1032
1032
  // If we don't have a block, return an enumerator.
1033
1033
  RETURN_SIZED_ENUMERATOR_PRE
1034
- NM_CONSERVATIVE(nm_unregister_value(left));
1035
- NM_CONSERVATIVE(nm_unregister_value(right));
1036
- NM_CONSERVATIVE(nm_unregister_value(init));
1034
+ NM_CONSERVATIVE(nm_unregister_value(&left));
1035
+ NM_CONSERVATIVE(nm_unregister_value(&right));
1036
+ NM_CONSERVATIVE(nm_unregister_value(&init));
1037
1037
  RETURN_SIZED_ENUMERATOR(left, 0, 0, 0); // FIXME: Test this. Probably won't work. Enable above code instead.
1038
1038
 
1039
1039
  // Figure out default value if none provided by the user
1040
1040
  nm::list_storage::RecurseData& tdata = *(new nm::list_storage::RecurseData(t)); //FIXME: this is a hack to make sure that we can run the destructor before nm_list_storage_delete(t) below.
1041
1041
  if (init == Qnil) {
1042
- nm_unregister_value(init);
1042
+ nm_unregister_value(&init);
1043
1043
  init = rb_yield_values(2, sdata.init_obj(), tdata.init_obj());
1044
- nm_register_value(init);
1044
+ nm_register_value(&init);
1045
1045
  }
1046
1046
 
1047
1047
  // Allocate a new shape array for the resulting matrix.
1048
1048
  void* init_val = NM_ALLOC(VALUE);
1049
1049
  memcpy(init_val, &init, sizeof(VALUE));
1050
- nm_register_value(*reinterpret_cast<VALUE*>(init_val));
1050
+ nm_register_value(&*reinterpret_cast<VALUE*>(init_val));
1051
1051
 
1052
1052
  NMATRIX* result = nm_create(nm::LIST_STORE, nm_list_storage_create(nm::RUBYOBJ, sdata.copy_alloc_shape(), s->dim, init_val));
1053
1053
  LIST_STORAGE* r = reinterpret_cast<LIST_STORAGE*>(result->storage);
@@ -1060,11 +1060,11 @@ VALUE nm_list_map_merged_stored(VALUE left, VALUE right, VALUE init) {
1060
1060
 
1061
1061
  VALUE to_return = Data_Wrap_Struct(CLASS_OF(left), nm_mark, nm_delete, result);
1062
1062
 
1063
- nm_unregister_value(*reinterpret_cast<VALUE*>(init_val));
1063
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));
1064
1064
 
1065
- NM_CONSERVATIVE(nm_unregister_value(init));
1066
- NM_CONSERVATIVE(nm_unregister_value(right));
1067
- NM_CONSERVATIVE(nm_unregister_value(left));
1065
+ NM_CONSERVATIVE(nm_unregister_value(&init));
1066
+ NM_CONSERVATIVE(nm_unregister_value(&right));
1067
+ NM_CONSERVATIVE(nm_unregister_value(&left));
1068
1068
 
1069
1069
  return to_return;
1070
1070
  }
@@ -1101,7 +1101,7 @@ static LIST* slice_copy(const LIST_STORAGE* src, LIST* src_rows, size_t* coords,
1101
1101
  }
1102
1102
  } else { // matches src->dim - n > 1
1103
1103
  if (src->dtype == nm::RUBYOBJ) {
1104
- nm_register_value(*reinterpret_cast<VALUE*>(src_node->val));
1104
+ nm_register_value(&*reinterpret_cast<VALUE*>(src_node->val));
1105
1105
  temp_vals.push_front(reinterpret_cast<VALUE*>(src_node->val));
1106
1106
  }
1107
1107
  nm::list::insert_copy(dst_rows, false, key, src_node->val, DTYPE_SIZES[src->dtype]);
@@ -1134,7 +1134,7 @@ void* nm_list_storage_get(const STORAGE* storage, SLICE* slice) {
1134
1134
  void *init_val = NM_ALLOC_N(char, DTYPE_SIZES[s->dtype]);
1135
1135
  memcpy(init_val, s->default_val, DTYPE_SIZES[s->dtype]);
1136
1136
  if (s->dtype == nm::RUBYOBJ)
1137
- nm_register_value(*reinterpret_cast<VALUE*>(init_val));
1137
+ nm_register_value(&*reinterpret_cast<VALUE*>(init_val));
1138
1138
 
1139
1139
  size_t *shape = NM_ALLOC_N(size_t, s->dim);
1140
1140
  memcpy(shape, slice->lengths, sizeof(size_t) * s->dim);
@@ -1144,7 +1144,7 @@ void* nm_list_storage_get(const STORAGE* storage, SLICE* slice) {
1144
1144
  ns->rows = slice_copy(s, s->rows, slice->coords, slice->lengths, 0);
1145
1145
 
1146
1146
  if (s->dtype == nm::RUBYOBJ) {
1147
- nm_unregister_value(*reinterpret_cast<VALUE*>(init_val));
1147
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(init_val));
1148
1148
  }
1149
1149
 
1150
1150
  nm_list_storage_unregister(s);
@@ -1197,7 +1197,7 @@ void* nm_list_storage_ref(const STORAGE* storage, SLICE* slice) {
1197
1197
  static void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coords, size_t* lengths, size_t n) {
1198
1198
  nm_list_storage_register(dest);
1199
1199
  if (dest->dtype == nm::RUBYOBJ) {
1200
- nm_register_value(*reinterpret_cast<VALUE*>(val));
1200
+ nm_register_value(&*reinterpret_cast<VALUE*>(val));
1201
1201
  nm_list_storage_register_list(l, dest->dim - n - 1);
1202
1202
  }
1203
1203
 
@@ -1240,7 +1240,7 @@ static void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coo
1240
1240
  }
1241
1241
  if (dest->dtype == nm::RUBYOBJ) {
1242
1242
  temp_vals.push_front(reinterpret_cast<VALUE*>(node->val));
1243
- nm_register_value(*reinterpret_cast<VALUE*>(node->val));
1243
+ nm_register_value(&*reinterpret_cast<VALUE*>(node->val));
1244
1244
  }
1245
1245
  }
1246
1246
  __nm_list_storage_unregister_temp_value_list(temp_vals);
@@ -1248,7 +1248,7 @@ static void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coo
1248
1248
 
1249
1249
  nm_list_storage_unregister(dest);
1250
1250
  if (dest->dtype == nm::RUBYOBJ) {
1251
- nm_unregister_value(*reinterpret_cast<VALUE*>(val));
1251
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(val));
1252
1252
  nm_list_storage_unregister_list(l, dest->dim - n - 1);
1253
1253
  }
1254
1254
  }
@@ -1275,7 +1275,7 @@ NODE* nm_list_storage_insert(STORAGE* storage, SLICE* slice, void* val) {
1275
1275
  LIST_STORAGE* s = (LIST_STORAGE*)storage;
1276
1276
  nm_list_storage_register(s);
1277
1277
  if (s->dtype == nm::RUBYOBJ)
1278
- nm_register_value(*reinterpret_cast<VALUE*>(val));
1278
+ nm_register_value(&*reinterpret_cast<VALUE*>(val));
1279
1279
  // Pretend dims = 2
1280
1280
  // Then coords is going to be size 2
1281
1281
  // So we need to find out if some key already exists
@@ -1291,7 +1291,7 @@ NODE* nm_list_storage_insert(STORAGE* storage, SLICE* slice, void* val) {
1291
1291
 
1292
1292
  nm_list_storage_unregister(s);
1293
1293
  if (s->dtype == nm::RUBYOBJ)
1294
- nm_unregister_value(*reinterpret_cast<VALUE*>(val));
1294
+ nm_unregister_value(&*reinterpret_cast<VALUE*>(val));
1295
1295
 
1296
1296
  return nm::list::insert(l, true, s->offset[r] + slice->coords[r], val);
1297
1297
  }
@@ -1620,9 +1620,9 @@ extern "C" {
1620
1620
  * Get the default_value property from a list matrix.
1621
1621
  */
1622
1622
  VALUE nm_list_default_value(VALUE self) {
1623
- NM_CONSERVATIVE(nm_register_value(self));
1623
+ NM_CONSERVATIVE(nm_register_value(&self));
1624
1624
  VALUE to_return = (NM_DTYPE(self) == nm::RUBYOBJ) ? *reinterpret_cast<VALUE*>(NM_DEFAULT_VAL(self)) : rubyobj_from_cval(NM_DEFAULT_VAL(self), NM_DTYPE(self)).rval;
1625
- NM_CONSERVATIVE(nm_unregister_value(self));
1625
+ NM_CONSERVATIVE(nm_unregister_value(&self));
1626
1626
  return to_return;
1627
1627
  }
1628
1628
  } // end of extern "C" block