nmatrix 0.1.0.rc3 → 0.1.0.rc4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/CONTRIBUTING.md +22 -21
  3. data/History.txt +13 -0
  4. data/Manifest.txt +1 -2
  5. data/README.rdoc +8 -8
  6. data/ext/nmatrix/binary_format.txt +1 -1
  7. data/ext/nmatrix/data/complex.h +21 -21
  8. data/ext/nmatrix/data/data.cpp +9 -2
  9. data/ext/nmatrix/data/data.h +4 -2
  10. data/ext/nmatrix/math.cpp +69 -31
  11. data/ext/nmatrix/math/getf2.h +2 -2
  12. data/ext/nmatrix/math/getrf.h +2 -2
  13. data/ext/nmatrix/math/imax.h +101 -0
  14. data/ext/nmatrix/math/scal.h +30 -10
  15. data/ext/nmatrix/math/swap.h +1 -22
  16. data/ext/nmatrix/nm_memory.h +1 -1
  17. data/ext/nmatrix/nmatrix.h +2 -2
  18. data/ext/nmatrix/ruby_constants.cpp +1 -2
  19. data/ext/nmatrix/ruby_constants.h +6 -7
  20. data/ext/nmatrix/ruby_nmatrix.c +23 -18
  21. data/ext/nmatrix/storage/list/list.cpp +48 -47
  22. data/ext/nmatrix/util/io.cpp +2 -2
  23. data/lib/nmatrix.rb +0 -1
  24. data/lib/nmatrix/enumerate.rb +1 -1
  25. data/lib/nmatrix/io/market.rb +1 -1
  26. data/lib/nmatrix/io/mat_reader.rb +41 -41
  27. data/lib/nmatrix/lapack.rb +0 -1
  28. data/lib/nmatrix/math.rb +43 -0
  29. data/lib/nmatrix/nmatrix.rb +5 -1
  30. data/lib/nmatrix/version.rb +1 -1
  31. data/nmatrix.gemspec +3 -4
  32. data/spec/00_nmatrix_spec.rb +13 -6
  33. data/spec/01_enum_spec.rb +17 -25
  34. data/spec/02_slice_spec.rb +74 -82
  35. data/spec/blas_spec.rb +21 -6
  36. data/spec/elementwise_spec.rb +1 -6
  37. data/spec/io_spec.rb +15 -22
  38. data/spec/lapack_spec.rb +1 -6
  39. data/spec/leakcheck.rb +1 -1
  40. data/spec/math_spec.rb +43 -4
  41. data/spec/nmatrix_yale_spec.rb +1 -4
  42. data/spec/rspec_spec.rb +1 -1
  43. data/spec/shortcuts_spec.rb +1 -6
  44. data/spec/slice_set_spec.rb +1 -5
  45. data/spec/stat_spec.rb +46 -51
  46. metadata +32 -22
  47. data/Guardfile +0 -6
  48. data/ext/nmatrix/math/idamax.h +0 -86
  49. data/lib/nmatrix/nvector.rb +0 -184
@@ -46,7 +46,7 @@ inline int getf2(const int m, const int n, DType* a, const int lda, int *ipiv) {
46
46
 
47
47
  /* Find pivot and test for singularity. */
48
48
 
49
- int jp = j - 1 + idamax<DType>(m-j+1, &a[j + j * lda], 1);
49
+ int jp = j - 1 + imax<DType>(m-j+1, &a[j + j * lda], 1);
50
50
 
51
51
  ipiv[j] = jp;
52
52
 
@@ -83,4 +83,4 @@ inline int getf2(const int m, const int n, DType* a, const int lda, int *ipiv) {
83
83
 
84
84
  }} // end of namespace nm::math
85
85
 
86
- #endif // GETF2
86
+ #endif // GETF2
@@ -150,7 +150,7 @@ inline int getrf_nothrow(const int M, const int N, DType* A, const int lda, int*
150
150
 
151
151
  } else if (MN == 1) { // there's another case for the colmajor version, but i don't know that it's that critical. Calls ATLAS LU2, who knows what that does.
152
152
 
153
- int i = *ipiv = nm::math::idamax<DType>(N, A, 1); // cblas_iamax(N, A, 1);
153
+ int i = *ipiv = nm::math::imax<DType>(N, A, 1); // cblas_iamax(N, A, 1);
154
154
 
155
155
  DType tmp = A[i];
156
156
  if (tmp != 0) {
@@ -237,4 +237,4 @@ inline int clapack_getrf(const enum CBLAS_ORDER order, const int m, const int n,
237
237
 
238
238
  } } // end nm::math
239
239
 
240
- #endif
240
+ #endif
@@ -0,0 +1,101 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == imax.h
25
+ //
26
+ // BLAS level 1 function imax.
27
+ //
28
+
29
+ #ifndef IMAX_H
30
+ #define IMAX_H
31
+
32
+ namespace nm { namespace math {
33
+
34
+ template<typename DType>
35
+ inline int imax(const int n, const DType *x, const int incx) {
36
+
37
+ if (n < 1 || incx <= 0) {
38
+ return -1;
39
+ }
40
+ if (n == 1) {
41
+ return 0;
42
+ }
43
+
44
+ DType dmax;
45
+ int imax = 0;
46
+
47
+ if (incx == 1) { // if incrementing by 1
48
+
49
+ dmax = abs(x[0]);
50
+
51
+ for (int i = 1; i < n; ++i) {
52
+ if (std::abs(x[i]) > dmax) {
53
+ imax = i;
54
+ dmax = std::abs(x[i]);
55
+ }
56
+ }
57
+
58
+ } else { // if incrementing by more than 1
59
+
60
+ dmax = std::abs(x[0]);
61
+
62
+ for (int i = 1, ix = incx; i < n; ++i, ix += incx) {
63
+ if (std::abs(x[ix]) > dmax) {
64
+ imax = i;
65
+ dmax = std::abs(x[ix]);
66
+ }
67
+ }
68
+ }
69
+ return imax;
70
+ }
71
+
72
+ #if defined HAVE_CBLAS_H || defined HAVE_ATLAS_CBLAS_H
73
+ template<>
74
+ inline int imax(const int n, const float* x, const int incx) {
75
+ return cblas_isamax(n, x, incx);
76
+ }
77
+
78
+ template<>
79
+ inline int imax(const int n, const double* x, const int incx) {
80
+ return cblas_idamax(n, x, incx);
81
+ }
82
+
83
+ template<>
84
+ inline int imax(const int n, const Complex64* x, const int incx) {
85
+ return cblas_icamax(n, x, incx);
86
+ }
87
+
88
+ template <>
89
+ inline int imax(const int n, const Complex128* x, const int incx) {
90
+ return cblas_izamax(n, x, incx);
91
+ }
92
+ #endif
93
+
94
+ template<typename DType>
95
+ inline int cblas_imax(const int n, const void* x, const int incx) {
96
+ return imax<DType>(n, reinterpret_cast<const DType*>(x), incx);
97
+ }
98
+
99
+ }} // end of namespace nm::math
100
+
101
+ #endif /* IMAX_H */
@@ -23,7 +23,7 @@
23
23
  //
24
24
  // == scal.h
25
25
  //
26
- // LAPACK scal function in native C.
26
+ // BLAS scal function.
27
27
  //
28
28
 
29
29
  #ifndef SCAL_H
@@ -47,25 +47,45 @@ namespace nm { namespace math {
47
47
  /* ===================================================================== */
48
48
 
49
49
  template <typename DType>
50
- inline void scal(const int n, const DType da, DType* dx, const int incx) {
50
+ inline void scal(const int n, const DType scalar, DType* x, const int incx) {
51
51
 
52
- // This used to have unrolled loops, like dswap. They were in the way.
53
-
54
- if (n <= 0 || incx <= 0) return;
52
+ if (n <= 0 || incx <= 0) {
53
+ return;
54
+ }
55
55
 
56
56
  for (int i = 0; incx < 0 ? i > n*incx : i < n*incx; i += incx) {
57
- dx[i] = da * dx[i];
57
+ x[i] = scalar * x[i];
58
58
  }
59
- } /* scal */
59
+ }
60
60
 
61
+ #if defined HAVE_CBLAS_H || defined HAVE_ATLAS_CBLAS_H
62
+ template <>
63
+ inline void scal(const int n, const float scalar, float* x, const int incx) {
64
+ cblas_sscal(n, scalar, x, incx);
65
+ }
66
+
67
+ template <>
68
+ inline void scal(const int n, const double scalar, double* x, const int incx) {
69
+ cblas_dscal(n, scalar, x, incx);
70
+ }
71
+
72
+ template <>
73
+ inline void scal(const int n, const Complex64 scalar, Complex64* x, const int incx) {
74
+ cblas_cscal(n, (const void*)(&scalar), (void*)(x), incx);
75
+ }
76
+
77
+ template <>
78
+ inline void scal(const int n, const Complex128 scalar, Complex128* x, const int incx) {
79
+ cblas_zscal(n, (const void*)(&scalar), (void*)(x), incx);
80
+ }
81
+ #endif
61
82
 
62
83
  /*
63
84
  * Function signature conversion for LAPACK's scal function.
64
85
  */
65
86
  template <typename DType>
66
- inline void clapack_scal(const int n, const void* da, void* dx, const int incx) {
67
- // FIXME: See if we can call the clapack version instead of our C++ version.
68
- scal<DType>(n, *reinterpret_cast<const DType*>(da), reinterpret_cast<DType*>(dx), incx);
87
+ inline void cblas_scal(const int n, const void* scalar, void* x, const int incx) {
88
+ scal<DType>(n, *reinterpret_cast<const DType*>(scalar), reinterpret_cast<DType*>(x), incx);
69
89
  }
70
90
 
71
91
  }} // end of nm::math
@@ -30,27 +30,6 @@
30
30
  #define SWAP_H
31
31
 
32
32
  namespace nm { namespace math {
33
- /*
34
- template <typename DType>
35
- inline void swap(int n, DType *dx, int incx, DType *dy, int incy) {
36
-
37
- if (n <= 0) return;
38
-
39
- // For negative increments, start at the end of the array.
40
- int ix = incx < 0 ? (-n+1)*incx : 0,
41
- iy = incy < 0 ? (-n+1)*incy : 0;
42
-
43
- if (incx < 0) ix = (-n + 1) * incx;
44
- if (incy < 0) iy = (-n + 1) * incy;
45
-
46
- for (size_t i = 0; i < n; ++i, ix += incx, iy += incy) {
47
- DType dtemp = dx[ix];
48
- dx[ix] = dy[iy];
49
- dy[iy] = dtemp;
50
- }
51
- return;
52
- } /* dswap */
53
-
54
33
  // This is the old BLAS version of this function. ATLAS has an optimized version, but
55
34
  // it's going to be tough to translate.
56
35
  template <typename DType>
@@ -70,4 +49,4 @@ static void swap(const int N, DType* X, const int incX, DType* Y, const int incY
70
49
 
71
50
  }} // end nm::math
72
51
 
73
- #endif
52
+ #endif
@@ -26,7 +26,7 @@
26
26
  // Macros for memory allocation and freeing
27
27
 
28
28
  /**
29
- * We define these macros, which just call the ruby ones, as this makes
29
+ * We define these macros, which just call the ruby ones, as this makes
30
30
  * debugging memory issues (particularly those involving interaction with
31
31
  * the ruby GC) easier, as it's posssible to add debugging code temporarily.
32
32
  */
@@ -179,7 +179,7 @@
179
179
  size_t* shape; \
180
180
  size_t* offset; \
181
181
  int count; \
182
- STORAGE* src;
182
+ STORAGE* src;
183
183
  #define NM_DEF_STORAGE_CHILD_STRUCT_PRE(name) typedef struct NM_ ## name { \
184
184
  NM_DEF_STORAGE_ELEMENTS;
185
185
 
@@ -328,7 +328,7 @@ typedef struct __NM_GC_HOLDER {
328
328
  #define NM_DENSE_ELEMENTS(val) (NM_STORAGE_DENSE(val)->elements)
329
329
  #define NM_SIZEOF_DTYPE(val) (DTYPE_SIZES[NM_DTYPE(val)])
330
330
  #define NM_REF(val,slice) (RefFuncs[NM_STYPE(val)]( NM_STORAGE(val), slice, NM_SIZEOF_DTYPE(val) ))
331
-
331
+
332
332
  #define NM_MAX(a,b) (((a)>(b))?(a):(b))
333
333
  #define NM_MIN(a,b) (((a)>(b))?(b):(a))
334
334
  #define NM_SWAP(a,b,tmp) {(tmp)=(a);(a)=(b);(b)=(tmp);}
@@ -85,13 +85,12 @@ ID nm_rb_dtype,
85
85
  VALUE cNMatrix,
86
86
  cNMatrix_IO,
87
87
  cNMatrix_IO_Matlab,
88
- cNVector,
89
88
  cNMatrix_YaleFunctions,
90
89
  cNMatrix_BLAS,
91
90
  cNMatrix_LAPACK,
92
91
 
93
92
  cNMatrix_GC_holder,
94
-
93
+
95
94
  nm_eDataTypeError,
96
95
  nm_eConvergenceError,
97
96
  nm_eStorageTypeError,
@@ -46,10 +46,10 @@ extern ID nm_rb_dtype,
46
46
 
47
47
  nm_rb_real,
48
48
  nm_rb_imag,
49
-
49
+
50
50
  nm_rb_numer,
51
51
  nm_rb_denom,
52
-
52
+
53
53
  nm_rb_complex_conjugate,
54
54
  nm_rb_transpose,
55
55
  nm_rb_no_transpose,
@@ -59,21 +59,21 @@ extern ID nm_rb_dtype,
59
59
  nm_rb_lower,
60
60
  nm_rb_unit,
61
61
  nm_rb_nonunit,
62
-
62
+
63
63
  nm_rb_dense,
64
64
  nm_rb_list,
65
65
  nm_rb_yale,
66
66
 
67
67
  nm_rb_row,
68
68
  nm_rb_column,
69
-
69
+
70
70
  nm_rb_add,
71
71
  nm_rb_sub,
72
72
  nm_rb_mul,
73
73
  nm_rb_div,
74
74
 
75
75
  nm_rb_negate,
76
-
76
+
77
77
  nm_rb_percent,
78
78
  nm_rb_gt,
79
79
  nm_rb_lt,
@@ -87,13 +87,12 @@ extern ID nm_rb_dtype,
87
87
  extern VALUE cNMatrix,
88
88
  cNMatrix_IO,
89
89
  cNMatrix_IO_Matlab,
90
- cNVector,
91
90
  cNMatrix_YaleFunctions,
92
91
  cNMatrix_BLAS,
93
92
  cNMatrix_LAPACK,
94
93
 
95
94
  cNMatrix_GC_holder,
96
-
95
+
97
96
  nm_eDataTypeError,
98
97
  nm_eConvergenceError,
99
98
  nm_eStorageTypeError,
@@ -132,6 +132,8 @@ DECL_UNARY_RUBY_ACCESSOR(erfc)
132
132
  DECL_UNARY_RUBY_ACCESSOR(cbrt)
133
133
  DECL_UNARY_RUBY_ACCESSOR(gamma)
134
134
  DECL_UNARY_RUBY_ACCESSOR(negate)
135
+ DECL_UNARY_RUBY_ACCESSOR(floor)
136
+ DECL_UNARY_RUBY_ACCESSOR(ceil)
135
137
  DECL_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(atan2)
136
138
  DECL_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(ldexp)
137
139
  DECL_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(hypot)
@@ -182,7 +184,6 @@ void Init_nmatrix() {
182
184
  ///////////////////////
183
185
 
184
186
  cNMatrix = rb_define_class("NMatrix", rb_cObject);
185
- //cNVector = rb_define_class("NVector", cNMatrix);
186
187
 
187
188
  // Special exceptions
188
189
 
@@ -313,6 +314,8 @@ void Init_nmatrix() {
313
314
  rb_define_method(cNMatrix, "gamma", (METHOD)nm_unary_gamma, 0);
314
315
  rb_define_method(cNMatrix, "log", (METHOD)nm_unary_log, -1);
315
316
  rb_define_method(cNMatrix, "-@", (METHOD)nm_unary_negate,0);
317
+ rb_define_method(cNMatrix, "floor", (METHOD)nm_unary_floor, 0);
318
+ rb_define_method(cNMatrix, "ceil", (METHOD)nm_unary_ceil, 0);
316
319
 
317
320
  rb_define_method(cNMatrix, "=~", (METHOD)nm_ew_eqeq, 1);
318
321
  rb_define_method(cNMatrix, "!~", (METHOD)nm_ew_neq, 1);
@@ -522,7 +525,7 @@ static void __nm_initialize_value_container() {
522
525
  gc_value_holder_struct->start = NULL;
523
526
  allocated_pool->start = NULL;
524
527
  *gc_value_holder = Data_Wrap_Struct(cNMatrix_GC_holder, __nm_mark_value_container, NULL, gc_value_holder_struct);
525
- rb_global_variable(gc_value_holder);
528
+ rb_global_variable(gc_value_holder);
526
529
  }
527
530
  }
528
531
 
@@ -926,6 +929,8 @@ DEF_UNARY_RUBY_ACCESSOR(ERFC, erfc)
926
929
  DEF_UNARY_RUBY_ACCESSOR(CBRT, cbrt)
927
930
  DEF_UNARY_RUBY_ACCESSOR(GAMMA, gamma)
928
931
  DEF_UNARY_RUBY_ACCESSOR(NEGATE, negate)
932
+ DEF_UNARY_RUBY_ACCESSOR(FLOOR, floor)
933
+ DEF_UNARY_RUBY_ACCESSOR(CEIL, ceil)
929
934
 
930
935
  DEF_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(ATAN2, atan2)
931
936
  DEF_NONCOM_ELEMENTWISE_RUBY_ACCESSOR(LDEXP, ldexp)
@@ -1018,7 +1023,7 @@ static VALUE nm_complex_conjugate_bang(VALUE self) {
1018
1023
  reinterpret_cast<nm::Complex128*>(elem)[p].i = -reinterpret_cast<nm::Complex128*>(elem)[p].i;
1019
1024
  }
1020
1025
 
1021
- }
1026
+ }
1022
1027
  return self;
1023
1028
  }
1024
1029
 
@@ -1045,7 +1050,7 @@ static VALUE nm_complex_conjugate(VALUE self) {
1045
1050
  */
1046
1051
  static VALUE nm_reshape_bang(VALUE self, VALUE arg){
1047
1052
  NMATRIX* m;
1048
- UnwrapNMatrix(self, m);
1053
+ UnwrapNMatrix(self, m);
1049
1054
  if(m->stype == nm::DENSE_STORE){
1050
1055
  DENSE_STORAGE* s = NM_STORAGE_DENSE(self);
1051
1056
  VALUE shape_ary = arg;
@@ -1056,9 +1061,9 @@ static VALUE nm_reshape_bang(VALUE self, VALUE arg){
1056
1061
  void* elem = s->elements;
1057
1062
  for (size_t index = 0; index < dim; ++index){
1058
1063
  new_size *= shape[index];}
1059
-
1064
+
1060
1065
  if (size == new_size){
1061
- s->shape = shape;
1066
+ s->shape = shape;
1062
1067
  s->dim = dim;
1063
1068
  size_t i, j;
1064
1069
  size_t* stride = NM_ALLOC_N(size_t, dim);
@@ -1072,7 +1077,7 @@ static VALUE nm_reshape_bang(VALUE self, VALUE arg){
1072
1077
  return self;
1073
1078
  }
1074
1079
  else
1075
- rb_raise(rb_eArgError, "reshape cannot resize; size of new and old matrices must match");
1080
+ rb_raise(rb_eArgError, "reshape cannot resize; size of new and old matrices must match");
1076
1081
  }
1077
1082
  else {
1078
1083
  rb_raise(rb_eNotImpError, "reshape in place only for dense stype");
@@ -1173,7 +1178,7 @@ static VALUE nm_init_new_version(int argc, VALUE* argv, VALUE self) {
1173
1178
  init = RARRAY_LEN(initial_ary) == 1 ? rubyobj_to_cval(rb_ary_entry(initial_ary, 0), dtype) : NULL;
1174
1179
  else
1175
1180
  init = rubyobj_to_cval(initial_ary, dtype);
1176
-
1181
+
1177
1182
  if (dtype == nm::RUBYOBJ) {
1178
1183
  nm_register_values(reinterpret_cast<VALUE*>(init), 1);
1179
1184
  }
@@ -1185,7 +1190,7 @@ static VALUE nm_init_new_version(int argc, VALUE* argv, VALUE self) {
1185
1190
  }
1186
1191
 
1187
1192
  if (!NIL_P(initial_ary)) {
1188
-
1193
+
1189
1194
  if (TYPE(initial_ary) == T_ARRAY) v_size = RARRAY_LEN(initial_ary);
1190
1195
  else v_size = 1;
1191
1196
 
@@ -1324,7 +1329,7 @@ static VALUE nm_init_new_version(int argc, VALUE* argv, VALUE self) {
1324
1329
  static VALUE nm_init(int argc, VALUE* argv, VALUE nm) {
1325
1330
  NM_CONSERVATIVE(nm_register_value(nm));
1326
1331
  NM_CONSERVATIVE(nm_register_values(argv, argc));
1327
-
1332
+
1328
1333
  if (argc <= 3) { // Call the new constructor unless all four arguments are given (or the 7-arg version is given)
1329
1334
  NM_CONSERVATIVE(nm_unregister_values(argv, argc));
1330
1335
  NM_CONSERVATIVE(nm_unregister_value(nm));
@@ -1487,7 +1492,7 @@ VALUE nm_cast(VALUE self, VALUE new_stype_symbol, VALUE new_dtype_symbol, VALUE
1487
1492
  nm_register_nmatrix(m);
1488
1493
 
1489
1494
  VALUE to_return = Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, m);
1490
-
1495
+
1491
1496
  nm_unregister_nmatrix(m);
1492
1497
  NM_CONSERVATIVE(nm_unregister_value(self));
1493
1498
  NM_CONSERVATIVE(nm_unregister_value(init));
@@ -1974,7 +1979,7 @@ static VALUE nm_mref(int argc, VALUE* argv, VALUE self) {
1974
1979
  * n[3,3] = n[2,3] = 5.0
1975
1980
  */
1976
1981
  static VALUE nm_mset(int argc, VALUE* argv, VALUE self) {
1977
-
1982
+
1978
1983
  size_t dim = NM_DIM(self); // last arg is the value
1979
1984
 
1980
1985
  VALUE to_return = Qnil;
@@ -2107,7 +2112,7 @@ static VALUE nm_shape(VALUE self) {
2107
2112
  nm_register_values(shape, s->dim);
2108
2113
  for (size_t index = 0; index < s->dim; ++index)
2109
2114
  shape[index] = INT2FIX(s->shape[index]);
2110
-
2115
+
2111
2116
  nm_unregister_values(shape, s->dim);
2112
2117
  NM_CONSERVATIVE(nm_unregister_value(self));
2113
2118
  return rb_ary_new4(s->dim, shape);
@@ -2147,11 +2152,11 @@ static VALUE nm_supershape(VALUE self) {
2147
2152
  STORAGE* s = NM_STORAGE(self);
2148
2153
  if (s->src == s) {
2149
2154
  return nm_shape(self); // easy case (not a slice)
2150
- }
2155
+ }
2151
2156
  else s = s->src;
2152
2157
 
2153
2158
  NM_CONSERVATIVE(nm_register_value(self));
2154
-
2159
+
2155
2160
  VALUE* shape = NM_ALLOCA_N(VALUE, s->dim);
2156
2161
  nm_register_values(shape, s->dim);
2157
2162
  for (size_t index = 0; index < s->dim; ++index)
@@ -2655,7 +2660,7 @@ static SLICE* get_slice(size_t dim, int argc, VALUE* arg, size_t* shape) {
2655
2660
  // r is the shape position; t is the slice position. They may differ when we're dealing with a
2656
2661
  // matrix where the effective dimension is less than the dimension (e.g., a vector).
2657
2662
  for (size_t r = 0, t = 0; r < dim; ++r) {
2658
- VALUE v = t == argc ? Qnil : arg[t];
2663
+ VALUE v = t == (unsigned int)argc ? Qnil : arg[t];
2659
2664
 
2660
2665
  // if the current shape indicates a vector and fewer args were supplied than necessary, just use 0
2661
2666
  if (argc - t + r < dim && shape[r] == 1) {
@@ -2684,11 +2689,11 @@ static SLICE* get_slice(size_t dim, int argc, VALUE* arg, size_t* shape) {
2684
2689
 
2685
2690
  if (rb_ary_entry(begin_end, 0) >= 0)
2686
2691
  slice->coords[r] = FIX2INT(rb_ary_entry(begin_end, 0));
2687
- else
2692
+ else
2688
2693
  slice->coords[r] = shape[r] + FIX2INT(rb_ary_entry(begin_end, 0));
2689
2694
  if (rb_ary_entry(begin_end, 1) >= 0)
2690
2695
  slice->lengths[r] = FIX2INT(rb_ary_entry(begin_end, 1)) - slice->coords[r];
2691
- else
2696
+ else
2692
2697
  slice->lengths[r] = shape[r] + FIX2INT(rb_ary_entry(begin_end, 1)) - slice->coords[r];
2693
2698
 
2694
2699
  if (RHASH_EMPTY_P(v)) t++; // go on to the next