nmatrix 0.1.0.rc3 → 0.1.0.rc4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/CONTRIBUTING.md +22 -21
  3. data/History.txt +13 -0
  4. data/Manifest.txt +1 -2
  5. data/README.rdoc +8 -8
  6. data/ext/nmatrix/binary_format.txt +1 -1
  7. data/ext/nmatrix/data/complex.h +21 -21
  8. data/ext/nmatrix/data/data.cpp +9 -2
  9. data/ext/nmatrix/data/data.h +4 -2
  10. data/ext/nmatrix/math.cpp +69 -31
  11. data/ext/nmatrix/math/getf2.h +2 -2
  12. data/ext/nmatrix/math/getrf.h +2 -2
  13. data/ext/nmatrix/math/imax.h +101 -0
  14. data/ext/nmatrix/math/scal.h +30 -10
  15. data/ext/nmatrix/math/swap.h +1 -22
  16. data/ext/nmatrix/nm_memory.h +1 -1
  17. data/ext/nmatrix/nmatrix.h +2 -2
  18. data/ext/nmatrix/ruby_constants.cpp +1 -2
  19. data/ext/nmatrix/ruby_constants.h +6 -7
  20. data/ext/nmatrix/ruby_nmatrix.c +23 -18
  21. data/ext/nmatrix/storage/list/list.cpp +48 -47
  22. data/ext/nmatrix/util/io.cpp +2 -2
  23. data/lib/nmatrix.rb +0 -1
  24. data/lib/nmatrix/enumerate.rb +1 -1
  25. data/lib/nmatrix/io/market.rb +1 -1
  26. data/lib/nmatrix/io/mat_reader.rb +41 -41
  27. data/lib/nmatrix/lapack.rb +0 -1
  28. data/lib/nmatrix/math.rb +43 -0
  29. data/lib/nmatrix/nmatrix.rb +5 -1
  30. data/lib/nmatrix/version.rb +1 -1
  31. data/nmatrix.gemspec +3 -4
  32. data/spec/00_nmatrix_spec.rb +13 -6
  33. data/spec/01_enum_spec.rb +17 -25
  34. data/spec/02_slice_spec.rb +74 -82
  35. data/spec/blas_spec.rb +21 -6
  36. data/spec/elementwise_spec.rb +1 -6
  37. data/spec/io_spec.rb +15 -22
  38. data/spec/lapack_spec.rb +1 -6
  39. data/spec/leakcheck.rb +1 -1
  40. data/spec/math_spec.rb +43 -4
  41. data/spec/nmatrix_yale_spec.rb +1 -4
  42. data/spec/rspec_spec.rb +1 -1
  43. data/spec/shortcuts_spec.rb +1 -6
  44. data/spec/slice_set_spec.rb +1 -5
  45. data/spec/stat_spec.rb +46 -51
  46. metadata +32 -22
  47. data/Guardfile +0 -6
  48. data/ext/nmatrix/math/idamax.h +0 -86
  49. data/lib/nmatrix/nvector.rb +0 -184
@@ -183,7 +183,7 @@ static void map_empty_stored_r(RecurseData& result, RecurseData& s, LIST* x, con
183
183
  nm_list_storage_register_list(val, rec-1);
184
184
  temp_vals.push_front(val);
185
185
  nm::list::insert_helper(x, xcurr, curr->key - offset, val);
186
- }
186
+ }
187
187
  curr = curr->next;
188
188
  if (curr && curr->key - offset >= x_shape) curr = NULL;
189
189
  }
@@ -191,7 +191,10 @@ static void map_empty_stored_r(RecurseData& result, RecurseData& s, LIST* x, con
191
191
  } else {
192
192
  std::list<VALUE*> temp_vals;
193
193
  while (curr) {
194
- VALUE val, s_val = rubyobj_from_cval(curr->val, s.dtype()).rval;
194
+ VALUE val, s_val;
195
+ if (s.dtype() == nm::RUBYOBJ) s_val = (*reinterpret_cast<nm::RubyObject*>(curr->val)).rval;
196
+ else s_val = rubyobj_from_cval(curr->val, s.dtype()).rval;
197
+
195
198
  if (rev) val = rb_yield_values(2, t_init, s_val);
196
199
  else val = rb_yield_values(2, s_val, t_init);
197
200
 
@@ -262,7 +265,7 @@ static void map_stored_r(RecurseData& result, RecurseData& left, LIST* x, const
262
265
  size_t key;
263
266
  VALUE val;
264
267
 
265
- val = rb_yield_values(1, rubyobj_from_cval(lcurr->val, left.dtype()).rval);
268
+ val = rb_yield_values(1, left.dtype() == nm::RUBYOBJ ? *reinterpret_cast<VALUE*>(lcurr->val) : rubyobj_from_cval(lcurr->val, left.dtype()).rval);
266
269
  key = lcurr->key - left.offset(rec);
267
270
  lcurr = lcurr->next;
268
271
 
@@ -535,7 +538,7 @@ void set(VALUE left, SLICE* slice, VALUE right) {
535
538
  NM_CONSERVATIVE(nm_register_value(left));
536
539
  NM_CONSERVATIVE(nm_register_value(right));
537
540
  LIST_STORAGE* s = NM_STORAGE_LIST(left);
538
-
541
+
539
542
  std::pair<NMATRIX*,bool> nm_and_free =
540
543
  interpret_arg_as_dense_nmatrix(right, NM_DTYPE(left));
541
544
 
@@ -701,11 +704,11 @@ static void __nm_list_storage_unregister_temp_list_list(std::list<LIST*>& temp_v
701
704
  }
702
705
 
703
706
  void nm_list_storage_register_node(const NODE* curr) {
704
- nm_register_value(*reinterpret_cast<VALUE*>(curr->val));
707
+ nm_register_value(*reinterpret_cast<VALUE*>(curr->val));
705
708
  }
706
709
 
707
710
  void nm_list_storage_unregister_node(const NODE* curr) {
708
- nm_unregister_value(*reinterpret_cast<VALUE*>(curr->val));
711
+ nm_unregister_value(*reinterpret_cast<VALUE*>(curr->val));
709
712
  }
710
713
 
711
714
  /**
@@ -775,13 +778,13 @@ void nm_list_storage_unregister(const STORAGE* s) {
775
778
  * Documentation goes here.
776
779
  */
777
780
  static NODE* list_storage_get_single_node(LIST_STORAGE* s, SLICE* slice) {
778
- size_t r;
779
- LIST* l = s->rows;
780
- NODE* n;
781
+ LIST* l = s->rows;
782
+ NODE* n;
781
783
 
782
- for (r = 0; r < s->dim; r++) {
784
+ for (size_t r = 0; r < s->dim; r++) {
783
785
  n = nm::list::find(l, s->offset[r] + slice->coords[r]);
784
- if (n) l = reinterpret_cast<LIST*>(n->val);
786
+
787
+ if (n) l = reinterpret_cast<LIST*>(n->val);
785
788
  else return NULL;
786
789
  }
787
790
 
@@ -798,7 +801,7 @@ static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t r
798
801
  NM_CONSERVATIVE(nm_register_value(stack));
799
802
 
800
803
  if (rec) {
801
- for (long index = 0; index < s.ref_shape(rec); ++index) {
804
+ for (unsigned long index = 0; index < s.ref_shape(rec); ++index) {
802
805
  // Don't do an unshift/shift here -- we'll let that be handled in the lowest-level iteration (recursions == 0)
803
806
  rb_ary_push(stack, LONG2NUM(index));
804
807
  each_empty_with_indices_r(s, rec-1, stack);
@@ -806,7 +809,7 @@ static void each_empty_with_indices_r(nm::list_storage::RecurseData& s, size_t r
806
809
  }
807
810
  } else {
808
811
  rb_ary_unshift(stack, empty);
809
- for (long index = 0; index < s.ref_shape(rec); ++index) {
812
+ for (unsigned long index = 0; index < s.ref_shape(rec); ++index) {
810
813
  rb_ary_push(stack, LONG2NUM(index));
811
814
  rb_yield_splat(stack);
812
815
  rb_ary_pop(stack);
@@ -833,7 +836,7 @@ static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l,
833
836
 
834
837
 
835
838
  if (rec) {
836
- for (long index = 0; index < shape; ++index) { // index in reference
839
+ for (unsigned long index = 0; index < shape; ++index) { // index in reference
837
840
  rb_ary_push(stack, LONG2NUM(index));
838
841
  if (!curr || index < curr->key - offset) {
839
842
  each_empty_with_indices_r(s, rec-1, stack);
@@ -844,7 +847,7 @@ static void each_with_indices_r(nm::list_storage::RecurseData& s, const LIST* l,
844
847
  rb_ary_pop(stack);
845
848
  }
846
849
  } else {
847
- for (long index = 0; index < shape; ++index) {
850
+ for (unsigned long index = 0; index < shape; ++index) {
848
851
 
849
852
  rb_ary_push(stack, LONG2NUM(index));
850
853
 
@@ -875,7 +878,7 @@ static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const L
875
878
  if (s.dtype() == nm::RUBYOBJ)
876
879
  nm_list_storage_register_list(l, rec);
877
880
  NM_CONSERVATIVE(nm_register_value(stack));
878
-
881
+
879
882
  NODE* curr = l->first;
880
883
 
881
884
  size_t offset = s.offset(rec);
@@ -919,7 +922,6 @@ static void each_stored_with_indices_r(nm::list_storage::RecurseData& s, const L
919
922
  }
920
923
 
921
924
 
922
-
923
925
  /*
924
926
  * Each/each-stored iterator, brings along the indices.
925
927
  */
@@ -945,21 +947,19 @@ VALUE nm_list_each_with_indices(VALUE nmatrix, bool stored) {
945
947
 
946
948
 
947
949
  /*
948
- * map merged stored iterator. Always returns a matrix containing RubyObjects which probably needs to be casted.
950
+ * map merged stored iterator. Always returns a matrix containing RubyObjects
951
+ * which probably needs to be casted.
949
952
  */
950
953
  VALUE nm_list_map_stored(VALUE left, VALUE init) {
951
954
  NM_CONSERVATIVE(nm_register_value(left));
952
955
  NM_CONSERVATIVE(nm_register_value(init));
953
956
 
954
- bool scalar = false;
955
-
956
- LIST_STORAGE *s = NM_STORAGE_LIST(left);
957
+ LIST_STORAGE *s = NM_STORAGE_LIST(left);
957
958
 
958
- // For each matrix, if it's a reference, we want to deal directly with the original (with appropriate offsetting)
959
+ // For each matrix, if it's a reference, we want to deal directly with the
960
+ // original (with appropriate offsetting)
959
961
  nm::list_storage::RecurseData sdata(s);
960
962
 
961
- void* scalar_init = NULL;
962
-
963
963
  //if (!rb_block_given_p()) {
964
964
  // rb_raise(rb_eNotImpError, "RETURN_SIZED_ENUMERATOR probably won't work for a map_merged since no merged object is created");
965
965
  //}
@@ -1077,14 +1077,14 @@ static LIST* slice_copy(const LIST_STORAGE* src, LIST* src_rows, size_t* coords,
1077
1077
  nm_list_storage_register(src);
1078
1078
  void *val = NULL;
1079
1079
  int key;
1080
-
1080
+
1081
1081
  LIST* dst_rows = nm::list::create();
1082
1082
  NODE* src_node = src_rows->first;
1083
1083
  std::list<VALUE*> temp_vals;
1084
1084
  std::list<LIST*> temp_lists;
1085
1085
  while (src_node) {
1086
1086
  key = src_node->key - (src->offset[n] + coords[n]);
1087
-
1087
+
1088
1088
  if (key >= 0 && (size_t)key < lengths[n]) {
1089
1089
  if (src->dim - n > 1) {
1090
1090
  val = slice_copy( src,
@@ -1130,7 +1130,6 @@ void* nm_list_storage_get(const STORAGE* storage, SLICE* slice) {
1130
1130
  NODE* n = list_storage_get_single_node(s, slice);
1131
1131
  nm_list_storage_unregister(s);
1132
1132
  return (n ? n->val : s->default_val);
1133
-
1134
1133
  } else {
1135
1134
  void *init_val = NM_ALLOC_N(char, DTYPE_SIZES[s->dtype]);
1136
1135
  memcpy(init_val, s->default_val, DTYPE_SIZES[s->dtype]);
@@ -1141,11 +1140,13 @@ void* nm_list_storage_get(const STORAGE* storage, SLICE* slice) {
1141
1140
  memcpy(shape, slice->lengths, sizeof(size_t) * s->dim);
1142
1141
 
1143
1142
  ns = nm_list_storage_create(s->dtype, shape, s->dim, init_val);
1144
-
1143
+
1145
1144
  ns->rows = slice_copy(s, s->rows, slice->coords, slice->lengths, 0);
1146
1145
 
1147
- if (s->dtype == nm::RUBYOBJ)
1146
+ if (s->dtype == nm::RUBYOBJ) {
1148
1147
  nm_unregister_value(*reinterpret_cast<VALUE*>(init_val));
1148
+ }
1149
+
1149
1150
  nm_list_storage_unregister(s);
1150
1151
 
1151
1152
  return ns;
@@ -1166,14 +1167,13 @@ void* nm_list_storage_ref(const STORAGE* storage, SLICE* slice) {
1166
1167
  NODE* n = list_storage_get_single_node(s, slice);
1167
1168
  nm_list_storage_unregister(s);
1168
1169
  return (n ? n->val : s->default_val);
1169
- }
1170
- else {
1171
- ns = NM_ALLOC( LIST_STORAGE );
1172
-
1173
- ns->dim = s->dim;
1174
- ns->dtype = s->dtype;
1175
- ns->offset = NM_ALLOC_N(size_t, ns->dim);
1176
- ns->shape = NM_ALLOC_N(size_t, ns->dim);
1170
+ } else {
1171
+ ns = NM_ALLOC( LIST_STORAGE );
1172
+
1173
+ ns->dim = s->dim;
1174
+ ns->dtype = s->dtype;
1175
+ ns->offset = NM_ALLOC_N(size_t, ns->dim);
1176
+ ns->shape = NM_ALLOC_N(size_t, ns->dim);
1177
1177
 
1178
1178
  for (size_t i = 0; i < ns->dim; ++i) {
1179
1179
  ns->offset[i] = slice->coords[i] + s->offset[i];
@@ -1182,7 +1182,7 @@ void* nm_list_storage_ref(const STORAGE* storage, SLICE* slice) {
1182
1182
 
1183
1183
  ns->rows = s->rows;
1184
1184
  ns->default_val = s->default_val;
1185
-
1185
+
1186
1186
  s->src->count++;
1187
1187
  ns->src = s->src;
1188
1188
  nm_list_storage_unregister(s);
@@ -1204,13 +1204,14 @@ static void slice_set_single(LIST_STORAGE* dest, LIST* l, void* val, size_t* coo
1204
1204
  // drill down into the structure
1205
1205
  NODE* node = NULL;
1206
1206
  if (dest->dim - n > 1) {
1207
- std::list<LIST*> temp_nodes;
1207
+ std::list<LIST*> temp_nodes;
1208
1208
  for (size_t i = 0; i < lengths[n]; ++i) {
1209
1209
 
1210
1210
  size_t key = i + dest->offset[n] + coords[n];
1211
1211
 
1212
1212
  if (!node) {
1213
- node = nm::list::insert(l, false, key, nm::list::create()); // try to insert list
1213
+ // try to insert list
1214
+ node = nm::list::insert(l, false, key, nm::list::create());
1214
1215
  } else if (!node->next || (node->next && node->next->key > key)) {
1215
1216
  node = nm::list::insert_after(node, key, nm::list::create());
1216
1217
  } else {
@@ -1364,20 +1365,20 @@ VALUE nm_list_storage_to_hash(const LIST_STORAGE* s, const nm::dtype_t dtype) {
1364
1365
  size_t nm_list_storage_count_elements_r(const LIST* l, size_t recursions) {
1365
1366
  size_t count = 0;
1366
1367
  NODE* curr = l->first;
1367
-
1368
+
1368
1369
  if (recursions) {
1369
1370
  while (curr) {
1370
1371
  count += nm_list_storage_count_elements_r(reinterpret_cast<const LIST*>(curr->val), recursions - 1);
1371
1372
  curr = curr->next;
1372
1373
  }
1373
-
1374
+
1374
1375
  } else {
1375
1376
  while (curr) {
1376
1377
  ++count;
1377
1378
  curr = curr->next;
1378
1379
  }
1379
1380
  }
1380
-
1381
+
1381
1382
  return count;
1382
1383
  }
1383
1384
 
@@ -1387,7 +1388,7 @@ size_t nm_list_storage_count_elements_r(const LIST* l, size_t recursions) {
1387
1388
  size_t nm_list_storage_count_nd_elements(const LIST_STORAGE* s) {
1388
1389
  NODE *i_curr, *j_curr;
1389
1390
  size_t count = 0;
1390
-
1391
+
1391
1392
  if (s->dim != 2) {
1392
1393
  rb_raise(rb_eNotImpError, "non-diagonal element counting only defined for dim = 2");
1393
1394
  }
@@ -1403,7 +1404,7 @@ size_t nm_list_storage_count_nd_elements(const LIST_STORAGE* s) {
1403
1404
  if (i != j) ++count;
1404
1405
  }
1405
1406
  }
1406
-
1407
+
1407
1408
  return count;
1408
1409
  }
1409
1410
 
@@ -1419,7 +1420,7 @@ LIST_STORAGE* nm_list_storage_copy(const LIST_STORAGE* rhs) {
1419
1420
  nm_list_storage_register(rhs);
1420
1421
  size_t *shape = NM_ALLOC_N(size_t, rhs->dim);
1421
1422
  memcpy(shape, rhs->shape, sizeof(size_t) * rhs->dim);
1422
-
1423
+
1423
1424
  void *init_val = NM_ALLOC_N(char, DTYPE_SIZES[rhs->dtype]);
1424
1425
  memcpy(init_val, rhs->default_val, DTYPE_SIZES[rhs->dtype]);
1425
1426
 
@@ -1483,7 +1484,7 @@ static LIST_STORAGE* cast_copy(const LIST_STORAGE* rhs, dtype_t new_dtype) {
1483
1484
 
1484
1485
  nm_list_storage_register(lhs);
1485
1486
  // TODO: Needs optimization. When matrix is reference it is copped twice.
1486
- if (rhs->src == rhs)
1487
+ if (rhs->src == rhs)
1487
1488
  nm::list::cast_copy_contents<LDType, RDType>(lhs->rows, rhs->rows, rhs->dim - 1);
1488
1489
  else {
1489
1490
  LIST_STORAGE *tmp = nm_list_storage_copy(rhs);
@@ -251,7 +251,7 @@ static VALUE nm_rbstring_merge(VALUE self, VALUE rb_real, VALUE rb_imaginary, VA
251
251
  size_t merge_pos = 0;
252
252
 
253
253
  // Merge the two sequences
254
- for (size_t i = 0; i < RSTRING_LEN(rb_real); i += len) {
254
+ for (size_t i = 0; i < (size_t)RSTRING_LEN(rb_real); i += len) {
255
255
 
256
256
  // Copy real number
257
257
  memcpy(merge + merge_pos, real + i, len);
@@ -276,4 +276,4 @@ void nm_init_io() {
276
276
 
277
277
 
278
278
 
279
- }
279
+ }
data/lib/nmatrix.rb CHANGED
@@ -36,7 +36,6 @@ end
36
36
 
37
37
  require 'nmatrix/nmatrix.rb'
38
38
  require 'nmatrix/version.rb'
39
- #require 'nmatrix/nvector.rb'
40
39
  require 'nmatrix/blas.rb'
41
40
  require 'nmatrix/monkeys'
42
41
  require "nmatrix/shortcuts.rb"
@@ -72,7 +72,7 @@ class NMatrix
72
72
  def map(&bl)
73
73
  return enum_for(:map) unless block_given?
74
74
  cp = self.cast(dtype: :object)
75
- cp.map! &bl
75
+ cp.map!(&bl)
76
76
  cp
77
77
  end
78
78
 
@@ -210,7 +210,7 @@ module NMatrix::IO::Market
210
210
  line.lstrip!
211
211
  line, comment = line.split('%', 2) # ignore comments
212
212
  if line.size > 4
213
- shape0, shape1, nz = line.split
213
+ shape0, shape1 = line.split
214
214
  mat = NMatrix.new(:list, [shape0.to_i, shape1.to_i], 0, dtype)
215
215
  break
216
216
  end
@@ -38,62 +38,62 @@ module NMatrix::IO::Matlab
38
38
  #
39
39
  class MatReader
40
40
  MDTYPE_UNPACK_ARGS = {
41
- :miINT8 => [Integer, {:signed => true, :bytes => 1}],
42
- :miUINT8 => [Integer, {:signed => false, :bytes => 1}],
43
- :miINT16 => [Integer, {:signed => true, :bytes => 2}],
44
- :miUINT16 => [Integer, {:signed => false, :bytes => 2}],
45
- :miINT32 => [Integer, {:signed => true, :bytes => 4}],
46
- :miUINT32 => [Integer, {:signed => false, :bytes => 4}],
47
- :miSINGLE => [Float, {:precision => :single, :bytes => 4, :endian => :native}],
48
- :miDOUBLE => [Float, {:precision => :double, :bytes => 4, :endian => :native}],
49
- :miINT64 => [Integer, {:signed => true, :bytes => 8}],
50
- :miUINT64 => [Integer, {:signed => false, :bytes => 8}]
41
+ :miINT8 => [Integer, {:signed => true, :bytes => 1}],
42
+ :miUINT8 => [Integer, {:signed => false, :bytes => 1}],
43
+ :miINT16 => [Integer, {:signed => true, :bytes => 2}],
44
+ :miUINT16 => [Integer, {:signed => false, :bytes => 2}],
45
+ :miINT32 => [Integer, {:signed => true, :bytes => 4}],
46
+ :miUINT32 => [Integer, {:signed => false, :bytes => 4}],
47
+ :miSINGLE => [Float, {:precision => :single, :bytes => 4, :endian => :native}],
48
+ :miDOUBLE => [Float, {:precision => :double, :bytes => 4, :endian => :native}],
49
+ :miINT64 => [Integer, {:signed => true, :bytes => 8}],
50
+ :miUINT64 => [Integer, {:signed => false, :bytes => 8}]
51
51
  }
52
52
 
53
53
  DTYPE_PACK_ARGS = {
54
- :byte => [Integer, {:signed => false, :bytes => 1}],
55
- :int8 => [Integer, {:signed => true, :bytes => 1}],
56
- :int16 => [Integer, {:signed => true, :bytes => 2}],
57
- :int32 => [Integer, {:signed => true, :bytes => 4}],
58
- :int64 => [Integer, {:signed => true, :bytes => 8}],
59
- :float32 => [Float, {:precision => :single, :bytes => 4, :endian => :native}],
60
- :float64 => [Float, {:precision => :double, :bytes => 8, :endian => :native}],
61
- :complex64 => [Float, {:precision => :single, :bytes => 4, :endian => :native}], #2x
62
- :complex128 => [Float, {:precision => :double, :bytes => 8, :endian => :native}]
54
+ :byte => [Integer, {:signed => false, :bytes => 1}],
55
+ :int8 => [Integer, {:signed => true, :bytes => 1}],
56
+ :int16 => [Integer, {:signed => true, :bytes => 2}],
57
+ :int32 => [Integer, {:signed => true, :bytes => 4}],
58
+ :int64 => [Integer, {:signed => true, :bytes => 8}],
59
+ :float32 => [Float, {:precision => :single, :bytes => 4, :endian => :native}],
60
+ :float64 => [Float, {:precision => :double, :bytes => 8, :endian => :native}],
61
+ :complex64 => [Float, {:precision => :single, :bytes => 4, :endian => :native}], #2x
62
+ :complex128 => [Float, {:precision => :double, :bytes => 8, :endian => :native}]
63
63
  }
64
64
 
65
65
  ITYPE_PACK_ARGS = {
66
- :uint8 => [Integer, {:signed => false, :bytes => 1}],
67
- :uint16 => [Integer, {:signed => false, :bytes => 2}],
68
- :uint32 => [Integer, {:signed => false, :bytes => 4}],
69
- :uint64 => [Integer, {:signed => false, :bytes => 8}],
66
+ :uint8 => [Integer, {:signed => false, :bytes => 1}],
67
+ :uint16 => [Integer, {:signed => false, :bytes => 2}],
68
+ :uint32 => [Integer, {:signed => false, :bytes => 4}],
69
+ :uint64 => [Integer, {:signed => false, :bytes => 8}],
70
70
  }
71
71
 
72
72
  NO_REPACK = [:miINT8, :miUINT8, :miINT16, :miINT32, :miSINGLE, :miDOUBLE, :miINT64]
73
73
 
74
74
  # Convert from MATLAB dtype to NMatrix dtype.
75
75
  MDTYPE_TO_DTYPE = {
76
- :miUINT8 => :byte,
77
- :miINT8 => :int8,
78
- :miINT16 => :int16,
79
- :miUINT16 => :int16,
80
- :miINT32 => :int32,
81
- :miUINT32 => :int32,
82
- :miINT64 => :int64,
83
- :miUINT64 => :int64,
84
- :miSINGLE => :float32,
85
- :miDOUBLE => :float64
76
+ :miUINT8 => :byte,
77
+ :miINT8 => :int8,
78
+ :miINT16 => :int16,
79
+ :miUINT16 => :int16,
80
+ :miINT32 => :int32,
81
+ :miUINT32 => :int32,
82
+ :miINT64 => :int64,
83
+ :miUINT64 => :int64,
84
+ :miSINGLE => :float32,
85
+ :miDOUBLE => :float64
86
86
  }
87
87
 
88
88
  MDTYPE_TO_ITYPE = {
89
- :miUINT8 => :uint8,
90
- :miINT8 => :uint8,
91
- :miINT16 => :uint16,
92
- :miUINT16 => :uint16,
93
- :miINT32 => :uint32,
94
- :miUINT32 => :uint32,
95
- :miINT64 => :uint64,
96
- :miUINT64 => :uint64
89
+ :miUINT8 => :uint8,
90
+ :miINT8 => :uint8,
91
+ :miINT16 => :uint16,
92
+ :miUINT16 => :uint16,
93
+ :miINT32 => :uint32,
94
+ :miUINT32 => :uint32,
95
+ :miINT64 => :uint64,
96
+ :miUINT64 => :uint64
97
97
  }
98
98
 
99
99
  # Before release v7.1 (release 14) matlab (TM) used the system
@@ -193,7 +193,6 @@ class NMatrix
193
193
  # Perform eigenvalue decomposition on a matrix using LAPACK's xGEEV function.
194
194
  #
195
195
  def geev(matrix, which=:both)
196
- result = alloc_evd_result(matrix)
197
196
  jobvl = (which == :both || which == :left) ? :left : false
198
197
  jobvr = (which == :both || which == :right) ? :right : false
199
198
 
data/lib/nmatrix/math.rb CHANGED
@@ -607,6 +607,49 @@ protected
607
607
  self.__dense_map__ { |l| -l }.cast(stype, dtype)
608
608
  end
609
609
 
610
+ # These are for calculating the floor or ceil of matrix
611
+ def dtype_for_floor_or_ceil
612
+ if self.integer_dtype? or [:complex64, :complex128, :object].include?(self.dtype)
613
+ return_dtype = dtype
614
+ elsif [:float32, :float64, :rational32,:rational64, :rational128].include?(self.dtype)
615
+ return_dtype = :int64
616
+ end
617
+
618
+ return_dtype
619
+ end
620
+
621
+ [:floor, :ceil].each do |meth|
622
+ define_method("__list_unary_#{meth}__") do
623
+ return_dtype = dtype_for_floor_or_ceil
624
+
625
+ if [:complex64, :complex128].include?(self.dtype)
626
+ self.__list_map_stored__(nil) { |l| Complex(l.real.send(meth), l.imag.send(meth)) }.cast(stype, return_dtype)
627
+ else
628
+ self.__list_map_stored__(nil) { |l| l.send(meth) }.cast(stype, return_dtype)
629
+ end
630
+ end
631
+
632
+ define_method("__yale_unary_#{meth}__") do
633
+ return_dtype = dtype_for_floor_or_ceil
634
+
635
+ if [:complex64, :complex128].include?(self.dtype)
636
+ self.__yale_map_stored__ { |l| Complex(l.real.send(meth), l.imag.send(meth)) }.cast(stype, return_dtype)
637
+ else
638
+ self.__yale_map_stored__ { |l| l.send(meth) }.cast(stype, return_dtype)
639
+ end
640
+ end
641
+
642
+ define_method("__dense_unary_#{meth}__") do
643
+ return_dtype = dtype_for_floor_or_ceil
644
+
645
+ if [:complex64, :complex128].include?(self.dtype)
646
+ self.__dense_map__ { |l| Complex(l.real.send(meth), l.imag.send(meth)) }.cast(stype, return_dtype)
647
+ else
648
+ self.__dense_map__ { |l| l.send(meth) }.cast(stype, return_dtype)
649
+ end
650
+ end
651
+ end
652
+
610
653
  # These take two arguments. One might be a matrix, and one might be a scalar.
611
654
  # See also monkeys.rb, which contains Math module patches to let the first
612
655
  # arg be a scalar