cumo 0.1.0 → 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (158) hide show
  1. checksums.yaml +5 -5
  2. data/.gitignore +1 -0
  3. data/3rd_party/LICENSE.txt +60 -0
  4. data/3rd_party/mkmf-cu/lib/mkmf-cu/cli.rb +13 -1
  5. data/LICENSE.txt +1 -62
  6. data/README.md +33 -29
  7. data/bench/cumo_bench.rb +47 -25
  8. data/bench/numo_bench.rb +27 -25
  9. data/docs/src-tree.md +16 -0
  10. data/ext/cumo/cuda/cublas.c +69 -219
  11. data/ext/cumo/cuda/memory_pool_impl.hpp +1 -0
  12. data/ext/cumo/cuda/runtime.c +2 -14
  13. data/ext/cumo/cumo.c +16 -16
  14. data/ext/cumo/include/cumo.h +2 -2
  15. data/ext/cumo/include/cumo/cuda/cublas.h +6 -129
  16. data/ext/cumo/include/cumo/cuda/runtime.h +16 -0
  17. data/ext/cumo/include/cumo/indexer.h +46 -63
  18. data/ext/cumo/include/cumo/intern.h +58 -112
  19. data/ext/cumo/include/cumo/narray.h +214 -185
  20. data/ext/cumo/include/cumo/narray_kernel.h +66 -37
  21. data/ext/cumo/include/cumo/ndloop.h +42 -42
  22. data/ext/cumo/include/cumo/reduce_kernel.h +55 -71
  23. data/ext/cumo/include/cumo/template.h +56 -51
  24. data/ext/cumo/include/cumo/template_kernel.h +31 -31
  25. data/ext/cumo/include/cumo/types/bit.h +3 -3
  26. data/ext/cumo/include/cumo/types/bit_kernel.h +2 -2
  27. data/ext/cumo/include/cumo/types/complex.h +126 -126
  28. data/ext/cumo/include/cumo/types/complex_kernel.h +126 -126
  29. data/ext/cumo/include/cumo/types/complex_macro.h +28 -28
  30. data/ext/cumo/include/cumo/types/complex_macro_kernel.h +20 -20
  31. data/ext/cumo/include/cumo/types/dcomplex.h +5 -5
  32. data/ext/cumo/include/cumo/types/dcomplex_kernel.h +1 -1
  33. data/ext/cumo/include/cumo/types/int_macro.h +1 -1
  34. data/ext/cumo/include/cumo/types/int_macro_kernel.h +1 -1
  35. data/ext/cumo/include/cumo/types/robj_macro.h +30 -30
  36. data/ext/cumo/include/cumo/types/scomplex.h +5 -5
  37. data/ext/cumo/include/cumo/types/scomplex_kernel.h +1 -1
  38. data/ext/cumo/narray/array.c +143 -143
  39. data/ext/cumo/narray/data.c +184 -184
  40. data/ext/cumo/narray/gen/cogen.rb +5 -2
  41. data/ext/cumo/narray/gen/cogen_kernel.rb +5 -2
  42. data/ext/cumo/narray/gen/def/dcomplex.rb +1 -1
  43. data/ext/cumo/narray/gen/def/scomplex.rb +1 -1
  44. data/ext/cumo/narray/gen/erbln.rb +132 -0
  45. data/ext/cumo/narray/gen/erbpp2.rb +18 -13
  46. data/ext/cumo/narray/gen/narray_def.rb +3 -3
  47. data/ext/cumo/narray/gen/spec.rb +2 -2
  48. data/ext/cumo/narray/gen/tmpl/accum.c +15 -15
  49. data/ext/cumo/narray/gen/tmpl/accum_binary.c +22 -22
  50. data/ext/cumo/narray/gen/tmpl/accum_binary_kernel.cu +3 -3
  51. data/ext/cumo/narray/gen/tmpl/accum_index.c +30 -30
  52. data/ext/cumo/narray/gen/tmpl/accum_index_kernel.cu +2 -2
  53. data/ext/cumo/narray/gen/tmpl/accum_kernel.cu +3 -3
  54. data/ext/cumo/narray/gen/tmpl/alloc_func.c +14 -14
  55. data/ext/cumo/narray/gen/tmpl/allocate.c +11 -11
  56. data/ext/cumo/narray/gen/tmpl/aref.c +2 -2
  57. data/ext/cumo/narray/gen/tmpl/aref_cpu.c +4 -4
  58. data/ext/cumo/narray/gen/tmpl/aset.c +2 -2
  59. data/ext/cumo/narray/gen/tmpl/binary.c +28 -28
  60. data/ext/cumo/narray/gen/tmpl/binary2.c +18 -18
  61. data/ext/cumo/narray/gen/tmpl/binary2_kernel.cu +3 -3
  62. data/ext/cumo/narray/gen/tmpl/binary_kernel.cu +6 -6
  63. data/ext/cumo/narray/gen/tmpl/binary_s.c +13 -13
  64. data/ext/cumo/narray/gen/tmpl/binary_s_kernel.cu +3 -3
  65. data/ext/cumo/narray/gen/tmpl/bincount.c +23 -23
  66. data/ext/cumo/narray/gen/tmpl/cast.c +7 -7
  67. data/ext/cumo/narray/gen/tmpl/cast_array.c +3 -3
  68. data/ext/cumo/narray/gen/tmpl/clip.c +38 -38
  69. data/ext/cumo/narray/gen/tmpl/complex_accum_kernel.cu +2 -2
  70. data/ext/cumo/narray/gen/tmpl/cond_binary.c +19 -19
  71. data/ext/cumo/narray/gen/tmpl/cond_binary_kernel.cu +7 -7
  72. data/ext/cumo/narray/gen/tmpl/cond_unary.c +15 -15
  73. data/ext/cumo/narray/gen/tmpl/cum.c +15 -15
  74. data/ext/cumo/narray/gen/tmpl/each.c +9 -9
  75. data/ext/cumo/narray/gen/tmpl/each_with_index.c +9 -9
  76. data/ext/cumo/narray/gen/tmpl/ewcomp.c +15 -15
  77. data/ext/cumo/narray/gen/tmpl/ewcomp_kernel.cu +3 -3
  78. data/ext/cumo/narray/gen/tmpl/extract_cpu.c +5 -5
  79. data/ext/cumo/narray/gen/tmpl/extract_data.c +12 -12
  80. data/ext/cumo/narray/gen/tmpl/eye.c +9 -9
  81. data/ext/cumo/narray/gen/tmpl/eye_kernel.cu +3 -3
  82. data/ext/cumo/narray/gen/tmpl/fill.c +9 -9
  83. data/ext/cumo/narray/gen/tmpl/fill_kernel.cu +6 -6
  84. data/ext/cumo/narray/gen/tmpl/float_accum_kernel.cu +1 -1
  85. data/ext/cumo/narray/gen/tmpl/format.c +11 -11
  86. data/ext/cumo/narray/gen/tmpl/format_to_a.c +8 -8
  87. data/ext/cumo/narray/gen/tmpl/frexp.c +13 -13
  88. data/ext/cumo/narray/gen/tmpl/gemm.c +252 -108
  89. data/ext/cumo/narray/gen/tmpl/inspect.c +1 -1
  90. data/ext/cumo/narray/gen/tmpl/lib.c +2 -2
  91. data/ext/cumo/narray/gen/tmpl/logseq.c +7 -7
  92. data/ext/cumo/narray/gen/tmpl/logseq_kernel.cu +6 -6
  93. data/ext/cumo/narray/gen/tmpl/map_with_index.c +17 -17
  94. data/ext/cumo/narray/gen/tmpl/median.c +10 -10
  95. data/ext/cumo/narray/gen/tmpl/minmax.c +10 -10
  96. data/ext/cumo/narray/gen/tmpl/new_dim0.c +3 -3
  97. data/ext/cumo/narray/gen/tmpl/poly.c +6 -6
  98. data/ext/cumo/narray/gen/tmpl/pow.c +28 -28
  99. data/ext/cumo/narray/gen/tmpl/pow_kernel.cu +6 -6
  100. data/ext/cumo/narray/gen/tmpl/rand.c +10 -10
  101. data/ext/cumo/narray/gen/tmpl/rand_norm.c +7 -7
  102. data/ext/cumo/narray/gen/tmpl/real_accum_kernel.cu +6 -6
  103. data/ext/cumo/narray/gen/tmpl/seq.c +7 -7
  104. data/ext/cumo/narray/gen/tmpl/seq_kernel.cu +6 -6
  105. data/ext/cumo/narray/gen/tmpl/set2.c +20 -20
  106. data/ext/cumo/narray/gen/tmpl/sort.c +11 -11
  107. data/ext/cumo/narray/gen/tmpl/sort_index.c +18 -18
  108. data/ext/cumo/narray/gen/tmpl/store.c +6 -6
  109. data/ext/cumo/narray/gen/tmpl/store_array.c +19 -19
  110. data/ext/cumo/narray/gen/tmpl/store_array_kernel.cu +12 -12
  111. data/ext/cumo/narray/gen/tmpl/store_bit.c +23 -23
  112. data/ext/cumo/narray/gen/tmpl/store_bit_kernel.cu +28 -28
  113. data/ext/cumo/narray/gen/tmpl/store_from.c +16 -16
  114. data/ext/cumo/narray/gen/tmpl/store_from_kernel.cu +12 -12
  115. data/ext/cumo/narray/gen/tmpl/to_a.c +10 -10
  116. data/ext/cumo/narray/gen/tmpl/unary.c +25 -25
  117. data/ext/cumo/narray/gen/tmpl/unary2.c +17 -17
  118. data/ext/cumo/narray/gen/tmpl/unary_kernel.cu +15 -15
  119. data/ext/cumo/narray/gen/tmpl/unary_ret2.c +13 -13
  120. data/ext/cumo/narray/gen/tmpl/unary_s.c +17 -17
  121. data/ext/cumo/narray/gen/tmpl/unary_s_kernel.cu +12 -12
  122. data/ext/cumo/narray/gen/tmpl_bit/allocate.c +9 -9
  123. data/ext/cumo/narray/gen/tmpl_bit/aref.c +2 -2
  124. data/ext/cumo/narray/gen/tmpl_bit/aref_cpu.c +5 -5
  125. data/ext/cumo/narray/gen/tmpl_bit/aset.c +2 -2
  126. data/ext/cumo/narray/gen/tmpl_bit/binary.c +29 -29
  127. data/ext/cumo/narray/gen/tmpl_bit/bit_count.c +14 -14
  128. data/ext/cumo/narray/gen/tmpl_bit/bit_count_cpu.c +21 -21
  129. data/ext/cumo/narray/gen/tmpl_bit/bit_count_kernel.cu +28 -28
  130. data/ext/cumo/narray/gen/tmpl_bit/bit_reduce.c +29 -29
  131. data/ext/cumo/narray/gen/tmpl_bit/each.c +10 -10
  132. data/ext/cumo/narray/gen/tmpl_bit/each_with_index.c +10 -10
  133. data/ext/cumo/narray/gen/tmpl_bit/extract.c +8 -8
  134. data/ext/cumo/narray/gen/tmpl_bit/extract_cpu.c +8 -8
  135. data/ext/cumo/narray/gen/tmpl_bit/fill.c +17 -17
  136. data/ext/cumo/narray/gen/tmpl_bit/format.c +14 -14
  137. data/ext/cumo/narray/gen/tmpl_bit/format_to_a.c +11 -11
  138. data/ext/cumo/narray/gen/tmpl_bit/inspect.c +3 -3
  139. data/ext/cumo/narray/gen/tmpl_bit/mask.c +33 -33
  140. data/ext/cumo/narray/gen/tmpl_bit/store_array.c +19 -19
  141. data/ext/cumo/narray/gen/tmpl_bit/store_bit.c +22 -22
  142. data/ext/cumo/narray/gen/tmpl_bit/store_from.c +18 -18
  143. data/ext/cumo/narray/gen/tmpl_bit/to_a.c +12 -12
  144. data/ext/cumo/narray/gen/tmpl_bit/unary.c +24 -24
  145. data/ext/cumo/narray/gen/tmpl_bit/where.c +16 -16
  146. data/ext/cumo/narray/gen/tmpl_bit/where2.c +20 -20
  147. data/ext/cumo/narray/index.c +213 -213
  148. data/ext/cumo/narray/math.c +27 -27
  149. data/ext/cumo/narray/narray.c +484 -484
  150. data/ext/cumo/narray/ndloop.c +259 -258
  151. data/ext/cumo/narray/rand.c +3 -3
  152. data/ext/cumo/narray/step.c +70 -70
  153. data/ext/cumo/narray/struct.c +139 -139
  154. metadata +6 -7
  155. data/ext/cumo/include/cumo/intern_fwd.h +0 -38
  156. data/lib/erbpp.rb +0 -294
  157. data/lib/erbpp/line_number.rb +0 -137
  158. data/lib/erbpp/narray_def.rb +0 -381
@@ -31,7 +31,7 @@ struct enumerator {
31
31
  // note: the memory refed by this pointer is not freed and causes memroy leak.
32
32
  //
33
33
  // @example
34
- // a[1..3,1] generates two na_index_arg_t(s). First is for 1..3, and second is for 1.
34
+ // a[1..3,1] generates two cumo_na_index_arg_t(s). First is for 1..3, and second is for 1.
35
35
  typedef struct {
36
36
  size_t n; // the number of elements of the dimesnion
37
37
  size_t beg; // the starting point in the dimension
@@ -39,14 +39,14 @@ typedef struct {
39
39
  size_t *idx; // list of indices
40
40
  int reduce; // true if the dimension is reduced by addition
41
41
  int orig_dim; // the dimension of original array
42
- } na_index_arg_t;
42
+ } cumo_na_index_arg_t;
43
43
 
44
44
 
45
45
  static void
46
- print_index_arg(na_index_arg_t *q, int n)
46
+ print_index_arg(cumo_na_index_arg_t *q, int n)
47
47
  {
48
48
  int i;
49
- printf("na_index_arg_t = 0x%"SZF"x {\n",(size_t)q);
49
+ printf("cumo_na_index_arg_t = 0x%"SZF"x {\n",(size_t)q);
50
50
  for (i=0; i<n; i++) {
51
51
  printf(" q[%d].n=%"SZF"d\n",i,q[i].n);
52
52
  printf(" q[%d].beg=%"SZF"d\n",i,q[i].beg);
@@ -58,29 +58,29 @@ print_index_arg(na_index_arg_t *q, int n)
58
58
  printf("}\n");
59
59
  }
60
60
 
61
- static VALUE sym_ast;
62
- static VALUE sym_all;
63
- //static VALUE sym_reduce;
64
- static VALUE sym_minus;
65
- static VALUE sym_new;
66
- static VALUE sym_reverse;
67
- static VALUE sym_plus;
68
- static VALUE sym_sum;
69
- static VALUE sym_tilde;
70
- static VALUE sym_rest;
71
- static ID id_beg;
72
- static ID id_end;
73
- static ID id_exclude_end;
74
- static ID id_each;
75
- static ID id_step;
76
- static ID id_dup;
77
- static ID id_bracket;
78
- static ID id_shift_left;
79
- static ID id_mask;
61
+ static VALUE cumo_sym_ast;
62
+ static VALUE cumo_sym_all;
63
+ //static VALUE cumo_sym_reduce;
64
+ static VALUE cumo_sym_minus;
65
+ static VALUE cumo_sym_new;
66
+ static VALUE cumo_sym_reverse;
67
+ static VALUE cumo_sym_plus;
68
+ static VALUE cumo_sym_sum;
69
+ static VALUE cumo_sym_tilde;
70
+ static VALUE cumo_sym_rest;
71
+ static ID cumo_id_beg;
72
+ static ID cumo_id_end;
73
+ static ID cumo_id_exclude_end;
74
+ static ID cumo_id_each;
75
+ static ID cumo_id_step;
76
+ static ID cumo_id_dup;
77
+ static ID cumo_id_bracket;
78
+ static ID cumo_id_shift_left;
79
+ static ID cumo_id_mask;
80
80
 
81
81
 
82
82
  static void
83
- na_index_set_step(na_index_arg_t *q, int i, size_t n, size_t beg, ssize_t step)
83
+ cumo_na_index_set_step(cumo_na_index_arg_t *q, int i, size_t n, size_t beg, ssize_t step)
84
84
  {
85
85
  q->n = n;
86
86
  q->beg = beg;
@@ -91,7 +91,7 @@ na_index_set_step(na_index_arg_t *q, int i, size_t n, size_t beg, ssize_t step)
91
91
  }
92
92
 
93
93
  static void
94
- na_index_set_scalar(na_index_arg_t *q, int i, ssize_t size, ssize_t x)
94
+ cumo_na_index_set_scalar(cumo_na_index_arg_t *q, int i, ssize_t size, ssize_t x)
95
95
  {
96
96
  if (x < -size || x >= size)
97
97
  rb_raise(rb_eRangeError,
@@ -108,7 +108,7 @@ na_index_set_scalar(na_index_arg_t *q, int i, ssize_t size, ssize_t x)
108
108
  }
109
109
 
110
110
  static inline ssize_t
111
- na_range_check(ssize_t pos, ssize_t size, int dim)
111
+ cumo_na_range_check(ssize_t pos, ssize_t size, int dim)
112
112
  {
113
113
  ssize_t idx=pos;
114
114
 
@@ -121,13 +121,13 @@ na_range_check(ssize_t pos, ssize_t size, int dim)
121
121
  }
122
122
 
123
123
  static void
124
- na_parse_array(VALUE ary, int orig_dim, ssize_t size, na_index_arg_t *q)
124
+ cumo_na_parse_array(VALUE ary, int orig_dim, ssize_t size, cumo_na_index_arg_t *q)
125
125
  {
126
126
  int k;
127
127
  int n = RARRAY_LEN(ary);
128
128
  q->idx = ALLOC_N(size_t, n);
129
129
  for (k=0; k<n; k++) {
130
- q->idx[k] = na_range_check(NUM2SSIZET(RARRAY_AREF(ary,k)), size, orig_dim);
130
+ q->idx[k] = cumo_na_range_check(NUM2SSIZET(RARRAY_AREF(ary,k)), size, orig_dim);
131
131
  }
132
132
  q->n = n;
133
133
  q->beg = 0;
@@ -137,32 +137,32 @@ na_parse_array(VALUE ary, int orig_dim, ssize_t size, na_index_arg_t *q)
137
137
  }
138
138
 
139
139
  static void
140
- na_parse_narray_index(VALUE a, int orig_dim, ssize_t size, na_index_arg_t *q)
140
+ cumo_na_parse_narray_index(VALUE a, int orig_dim, ssize_t size, cumo_na_index_arg_t *q)
141
141
  {
142
142
  VALUE idx;
143
- narray_t *na;
144
- narray_data_t *nidx;
143
+ cumo_narray_t *na;
144
+ cumo_narray_data_t *nidx;
145
145
  size_t k, n;
146
146
  ssize_t *nidxp;
147
147
 
148
- GetNArray(a,na);
149
- if (NA_NDIM(na) != 1) {
148
+ CumoGetNArray(a,na);
149
+ if (CUMO_NA_NDIM(na) != 1) {
150
150
  rb_raise(rb_eIndexError, "should be 1-d NArray");
151
151
  }
152
- n = NA_SIZE(na);
153
- idx = nary_new(cIndex,1,&n);
154
- na_store(idx,a);
152
+ n = CUMO_NA_SIZE(na);
153
+ idx = cumo_na_new(cIndex,1,&n);
154
+ cumo_na_store(idx,a);
155
155
 
156
- GetNArrayData(idx,nidx);
156
+ CumoGetNArrayData(idx,nidx);
157
157
  nidxp = (ssize_t*)nidx->ptr;
158
158
  q->idx = ALLOC_N(size_t, n);
159
159
 
160
160
  // ndixp is cuda memory (cuda narray)
161
- SHOW_SYNCHRONIZE_WARNING_ONCE("na_parse_narray_index", "any");
161
+ CUMO_SHOW_SYNCHRONIZE_WARNING_ONCE("cumo_na_parse_narray_index", "any");
162
162
  cumo_cuda_runtime_check_status(cudaDeviceSynchronize());
163
163
 
164
164
  for (k=0; k<n; k++) {
165
- q->idx[k] = na_range_check(nidxp[k], size, orig_dim);
165
+ q->idx[k] = cumo_na_range_check(nidxp[k], size, orig_dim);
166
166
  }
167
167
  q->n = n;
168
168
  q->beg = 0;
@@ -172,22 +172,22 @@ na_parse_narray_index(VALUE a, int orig_dim, ssize_t size, na_index_arg_t *q)
172
172
  }
173
173
 
174
174
  static void
175
- na_parse_range(VALUE range, ssize_t step, int orig_dim, ssize_t size, na_index_arg_t *q)
175
+ cumo_na_parse_range(VALUE range, ssize_t step, int orig_dim, ssize_t size, cumo_na_index_arg_t *q)
176
176
  {
177
177
  int n;
178
178
  VALUE excl_end;
179
179
  ssize_t beg, end, beg_orig, end_orig;
180
180
  const char *dot = "..", *edot = "...";
181
181
 
182
- beg = beg_orig = NUM2SSIZET(rb_funcall(range,id_beg,0));
182
+ beg = beg_orig = NUM2SSIZET(rb_funcall(range,cumo_id_beg,0));
183
183
  if (beg < 0) {
184
184
  beg += size;
185
185
  }
186
- end = end_orig = NUM2SSIZET(rb_funcall(range,id_end,0));
186
+ end = end_orig = NUM2SSIZET(rb_funcall(range,cumo_id_end,0));
187
187
  if (end < 0) {
188
188
  end += size;
189
189
  }
190
- excl_end = rb_funcall(range,id_exclude_end,0);
190
+ excl_end = rb_funcall(range,cumo_id_exclude_end,0);
191
191
  if (RTEST(excl_end)) {
192
192
  end--;
193
193
  dot = edot;
@@ -199,12 +199,12 @@ na_parse_range(VALUE range, ssize_t step, int orig_dim, ssize_t size, na_index_a
199
199
  }
200
200
  n = (end-beg)/step+1;
201
201
  if (n<0) n=0;
202
- na_index_set_step(q,orig_dim,n,beg,step);
202
+ cumo_na_index_set_step(q,orig_dim,n,beg,step);
203
203
 
204
204
  }
205
205
 
206
206
  static void
207
- na_parse_enumerator(VALUE enum_obj, int orig_dim, ssize_t size, na_index_arg_t *q)
207
+ cumo_na_parse_enumerator(VALUE enum_obj, int orig_dim, ssize_t size, cumo_na_index_arg_t *q)
208
208
  {
209
209
  int len;
210
210
  ssize_t step;
@@ -216,10 +216,10 @@ na_parse_enumerator(VALUE enum_obj, int orig_dim, ssize_t size, na_index_arg_t *
216
216
  e = (struct enumerator *)DATA_PTR(enum_obj);
217
217
 
218
218
  if (rb_obj_is_kind_of(e->obj, rb_cRange)) {
219
- if (e->meth == id_each) {
220
- na_parse_range(e->obj, 1, orig_dim, size, q);
219
+ if (e->meth == cumo_id_each) {
220
+ cumo_na_parse_range(e->obj, 1, orig_dim, size, q);
221
221
  }
222
- else if (e->meth == id_step) {
222
+ else if (e->meth == cumo_id_step) {
223
223
  if (TYPE(e->args) != T_ARRAY) {
224
224
  rb_raise(rb_eArgError,"no argument for step");
225
225
  }
@@ -228,7 +228,7 @@ na_parse_enumerator(VALUE enum_obj, int orig_dim, ssize_t size, na_index_arg_t *
228
228
  rb_raise(rb_eArgError,"invalid number of step argument (1 for %d)",len);
229
229
  }
230
230
  step = NUM2SSIZET(RARRAY_AREF(e->args,0));
231
- na_parse_range(e->obj, step, orig_dim, size, q);
231
+ cumo_na_parse_range(e->obj, step, orig_dim, size, q);
232
232
  } else {
233
233
  rb_raise(rb_eTypeError,"unknown Range method: %s",rb_id2name(e->meth));
234
234
  }
@@ -244,39 +244,39 @@ na_parse_enumerator(VALUE enum_obj, int orig_dim, ssize_t size, na_index_arg_t *
244
244
  // i: parse i-th index
245
245
  // q: parsed information is stored to *q
246
246
  static void
247
- na_index_parse_each(volatile VALUE a, ssize_t size, int i, na_index_arg_t *q)
247
+ cumo_na_index_parse_each(volatile VALUE a, ssize_t size, int i, cumo_na_index_arg_t *q)
248
248
  {
249
249
  switch(TYPE(a)) {
250
250
 
251
251
  case T_FIXNUM:
252
- na_index_set_scalar(q,i,size,FIX2LONG(a));
252
+ cumo_na_index_set_scalar(q,i,size,FIX2LONG(a));
253
253
  break;
254
254
 
255
255
  case T_BIGNUM:
256
- na_index_set_scalar(q,i,size,NUM2SSIZET(a));
256
+ cumo_na_index_set_scalar(q,i,size,NUM2SSIZET(a));
257
257
  break;
258
258
 
259
259
  case T_FLOAT:
260
- na_index_set_scalar(q,i,size,NUM2SSIZET(a));
260
+ cumo_na_index_set_scalar(q,i,size,NUM2SSIZET(a));
261
261
  break;
262
262
 
263
263
  case T_NIL:
264
264
  case T_TRUE:
265
- na_index_set_step(q,i,size,0,1);
265
+ cumo_na_index_set_step(q,i,size,0,1);
266
266
  break;
267
267
 
268
268
  case T_SYMBOL:
269
- if (a==sym_all || a==sym_ast) {
270
- na_index_set_step(q,i,size,0,1);
269
+ if (a==cumo_sym_all || a==cumo_sym_ast) {
270
+ cumo_na_index_set_step(q,i,size,0,1);
271
271
  }
272
- else if (a==sym_reverse) {
273
- na_index_set_step(q,i,size,size-1,-1);
272
+ else if (a==cumo_sym_reverse) {
273
+ cumo_na_index_set_step(q,i,size,size-1,-1);
274
274
  }
275
- else if (a==sym_new) {
276
- na_index_set_step(q,i,1,0,1);
275
+ else if (a==cumo_sym_new) {
276
+ cumo_na_index_set_step(q,i,1,0,1);
277
277
  }
278
- else if (a==sym_reduce || a==sym_sum || a==sym_plus) {
279
- na_index_set_step(q,i,size,0,1);
278
+ else if (a==cumo_sym_reduce || a==cumo_sym_sum || a==cumo_sym_plus) {
279
+ cumo_na_index_set_step(q,i,size,0,1);
280
280
  q->reduce = 1;
281
281
  } else {
282
282
  rb_raise(rb_eIndexError, "invalid symbol for index");
@@ -284,24 +284,24 @@ na_index_parse_each(volatile VALUE a, ssize_t size, int i, na_index_arg_t *q)
284
284
  break;
285
285
 
286
286
  case T_ARRAY:
287
- na_parse_array(a, i, size, q);
287
+ cumo_na_parse_array(a, i, size, q);
288
288
  break;
289
289
 
290
290
  default:
291
291
  if (rb_obj_is_kind_of(a, rb_cRange)) {
292
- na_parse_range(a, 1, i, size, q);
292
+ cumo_na_parse_range(a, 1, i, size, q);
293
293
  }
294
294
  else if (rb_obj_is_kind_of(a, rb_cEnumerator)) {
295
- na_parse_enumerator(a, i, size, q);
295
+ cumo_na_parse_enumerator(a, i, size, q);
296
296
  }
297
- else if (rb_obj_is_kind_of(a, na_cStep)) {
297
+ else if (rb_obj_is_kind_of(a, cumo_na_cStep)) {
298
298
  ssize_t beg, step, n;
299
- nary_step_array_index(a, size, (size_t*)(&n), &beg, &step);
300
- na_index_set_step(q,i,n,beg,step);
299
+ cumo_na_step_array_index(a, size, (size_t*)(&n), &beg, &step);
300
+ cumo_na_index_set_step(q,i,n,beg,step);
301
301
  }
302
302
  // NArray index
303
- else if (NA_IsNArray(a)) {
304
- na_parse_narray_index(a, i, size, q);
303
+ else if (CUMO_NA_CumoIsNArray(a)) {
304
+ cumo_na_parse_narray_index(a, i, size, q);
305
305
  }
306
306
  else {
307
307
  rb_raise(rb_eIndexError, "not allowed type");
@@ -311,7 +311,7 @@ na_index_parse_each(volatile VALUE a, ssize_t size, int i, na_index_arg_t *q)
311
311
 
312
312
 
313
313
  static size_t
314
- na_index_parse_args(VALUE args, narray_t *na, na_index_arg_t *q, int ndim)
314
+ cumo_na_index_parse_args(VALUE args, cumo_narray_t *na, cumo_na_index_arg_t *q, int ndim)
315
315
  {
316
316
  int i, j, k, l, nidx;
317
317
  size_t total=1;
@@ -329,7 +329,7 @@ na_index_parse_args(VALUE args, narray_t *na, na_index_arg_t *q, int ndim)
329
329
  if (v==Qfalse) {
330
330
  for (l = ndim - (nidx-1); l>0; l--) {
331
331
  //printf("i=%d j=%d k=%d l=%d ndim=%d nidx=%d\n",i,j,k,l,ndim,nidx);
332
- na_index_parse_each(Qtrue, na->shape[k], k, &q[j]);
332
+ cumo_na_index_parse_each(Qtrue, na->shape[k], k, &q[j]);
333
333
  if (q[j].n > 1) {
334
334
  total *= q[j].n;
335
335
  }
@@ -338,13 +338,13 @@ na_index_parse_args(VALUE args, narray_t *na, na_index_arg_t *q, int ndim)
338
338
  }
339
339
  }
340
340
  // new dimension
341
- else if (v==sym_new) {
342
- na_index_parse_each(v, 1, k, &q[j]);
341
+ else if (v==cumo_sym_new) {
342
+ cumo_na_index_parse_each(v, 1, k, &q[j]);
343
343
  j++;
344
344
  }
345
345
  // other dimention
346
346
  else {
347
- na_index_parse_each(v, na->shape[k], k, &q[j]);
347
+ cumo_na_index_parse_each(v, na->shape[k], k, &q[j]);
348
348
  if (q[j].n > 1) {
349
349
  total *= q[j].n;
350
350
  }
@@ -357,7 +357,7 @@ na_index_parse_args(VALUE args, narray_t *na, na_index_arg_t *q, int ndim)
357
357
 
358
358
 
359
359
  static void
360
- na_get_strides_nadata(const narray_data_t *na, ssize_t *strides, ssize_t elmsz)
360
+ cumo_na_get_strides_nadata(const cumo_narray_data_t *na, ssize_t *strides, ssize_t elmsz)
361
361
  {
362
362
  int i = na->base.ndim - 1;
363
363
  strides[i] = elmsz;
@@ -367,8 +367,8 @@ na_get_strides_nadata(const narray_data_t *na, ssize_t *strides, ssize_t elmsz)
367
367
  }
368
368
 
369
369
  static void
370
- na_index_aref_nadata(narray_data_t *na1, narray_view_t *na2,
371
- na_index_arg_t *q, ssize_t elmsz, int ndim, int keep_dim)
370
+ cumo_na_index_aref_nadata(cumo_narray_data_t *na1, cumo_narray_view_t *na2,
371
+ cumo_na_index_arg_t *q, ssize_t elmsz, int ndim, int keep_dim)
372
372
  {
373
373
  int i, j;
374
374
  ssize_t size, k, total=1;
@@ -379,7 +379,7 @@ na_index_aref_nadata(narray_data_t *na1, narray_view_t *na2,
379
379
  VALUE m;
380
380
 
381
381
  strides_na1 = ALLOCA_N(ssize_t, na1->base.ndim);
382
- na_get_strides_nadata(na1, strides_na1, elmsz);
382
+ cumo_na_get_strides_nadata(na1, strides_na1, elmsz);
383
383
 
384
384
  for (i=j=0; i<ndim; i++) {
385
385
  stride1 = strides_na1[q[i].orig_dim];
@@ -394,14 +394,14 @@ na_index_aref_nadata(narray_data_t *na1, narray_view_t *na2,
394
394
  na2->base.shape[j] = size = q[i].n;
395
395
 
396
396
  if (q[i].reduce != 0) {
397
- m = rb_funcall(INT2FIX(1),id_shift_left,1,INT2FIX(j));
397
+ m = rb_funcall(INT2FIX(1),cumo_id_shift_left,1,INT2FIX(j));
398
398
  na2->base.reduce = rb_funcall(m,'|',1,na2->base.reduce);
399
399
  }
400
400
 
401
401
  // array index
402
402
  if (q[i].idx != NULL) {
403
403
  index = q[i].idx;
404
- SDX_SET_INDEX(na2->stridx[j],index);
404
+ CUMO_SDX_SET_INDEX(na2->stridx[j],index);
405
405
  q[i].idx = NULL;
406
406
  for (k=0; k<size; k++) {
407
407
  index[k] = index[k] * stride1;
@@ -410,7 +410,7 @@ na_index_aref_nadata(narray_data_t *na1, narray_view_t *na2,
410
410
  beg = q[i].beg;
411
411
  step = q[i].step;
412
412
  na2->offset += stride1*beg;
413
- SDX_SET_STRIDE(na2->stridx[j], stride1*step);
413
+ CUMO_SDX_SET_STRIDE(na2->stridx[j], stride1*step);
414
414
  }
415
415
  j++;
416
416
  total *= size;
@@ -420,22 +420,22 @@ na_index_aref_nadata(narray_data_t *na1, narray_view_t *na2,
420
420
 
421
421
 
422
422
  static void
423
- na_index_aref_naview(narray_view_t *na1, narray_view_t *na2,
424
- na_index_arg_t *q, ssize_t elmsz, int ndim, int keep_dim)
423
+ cumo_na_index_aref_naview(cumo_narray_view_t *na1, cumo_narray_view_t *na2,
424
+ cumo_na_index_arg_t *q, ssize_t elmsz, int ndim, int keep_dim)
425
425
  {
426
426
  int i, j;
427
427
  ssize_t total=1;
428
428
 
429
429
  for (i=j=0; i<ndim; i++) {
430
- stridx_t sdx1 = na1->stridx[q[i].orig_dim];
430
+ cumo_stridx_t sdx1 = na1->stridx[q[i].orig_dim];
431
431
  ssize_t size;
432
432
 
433
433
  // numeric index -- trim dimension
434
434
  if (!keep_dim && q[i].n==1 && q[i].step==0) {
435
- if (SDX_IS_INDEX(sdx1)) {
436
- na2->offset += SDX_GET_INDEX(sdx1)[q[i].beg];
435
+ if (CUMO_SDX_IS_INDEX(sdx1)) {
436
+ na2->offset += CUMO_SDX_GET_INDEX(sdx1)[q[i].beg];
437
437
  } else {
438
- na2->offset += SDX_GET_STRIDE(sdx1)*q[i].beg;
438
+ na2->offset += CUMO_SDX_GET_STRIDE(sdx1)*q[i].beg;
439
439
  }
440
440
  continue;
441
441
  }
@@ -443,30 +443,30 @@ na_index_aref_naview(narray_view_t *na1, narray_view_t *na2,
443
443
  na2->base.shape[j] = size = q[i].n;
444
444
 
445
445
  if (q[i].reduce != 0) {
446
- VALUE m = rb_funcall(INT2FIX(1),id_shift_left,1,INT2FIX(j));
446
+ VALUE m = rb_funcall(INT2FIX(1),cumo_id_shift_left,1,INT2FIX(j));
447
447
  na2->base.reduce = rb_funcall(m,'|',1,na2->base.reduce);
448
448
  }
449
449
 
450
450
  if (q[i].orig_dim >= na1->base.ndim) {
451
451
  // new dimension
452
- SDX_SET_STRIDE(na2->stridx[j], elmsz);
452
+ CUMO_SDX_SET_STRIDE(na2->stridx[j], elmsz);
453
453
  }
454
- else if (q[i].idx != NULL && SDX_IS_INDEX(sdx1)) {
454
+ else if (q[i].idx != NULL && CUMO_SDX_IS_INDEX(sdx1)) {
455
455
  // index <- index
456
456
  int k;
457
457
  size_t *index = q[i].idx;
458
- SDX_SET_INDEX(na2->stridx[j], index);
458
+ CUMO_SDX_SET_INDEX(na2->stridx[j], index);
459
459
  q[i].idx = NULL;
460
460
 
461
461
  for (k=0; k<size; k++) {
462
- index[k] = SDX_GET_INDEX(sdx1)[index[k]];
462
+ index[k] = CUMO_SDX_GET_INDEX(sdx1)[index[k]];
463
463
  }
464
464
  }
465
- else if (q[i].idx != NULL && SDX_IS_STRIDE(sdx1)) {
465
+ else if (q[i].idx != NULL && CUMO_SDX_IS_STRIDE(sdx1)) {
466
466
  // index <- step
467
- ssize_t stride1 = SDX_GET_STRIDE(sdx1);
467
+ ssize_t stride1 = CUMO_SDX_GET_STRIDE(sdx1);
468
468
  size_t *index = q[i].idx;
469
- SDX_SET_INDEX(na2->stridx[j],index);
469
+ CUMO_SDX_SET_INDEX(na2->stridx[j],index);
470
470
  q[i].idx = NULL;
471
471
 
472
472
  if (stride1<0) {
@@ -488,24 +488,24 @@ na_index_aref_naview(narray_view_t *na1, narray_view_t *na2,
488
488
  }
489
489
  }
490
490
  }
491
- else if (q[i].idx == NULL && SDX_IS_INDEX(sdx1)) {
491
+ else if (q[i].idx == NULL && CUMO_SDX_IS_INDEX(sdx1)) {
492
492
  // step <- index
493
493
  int k;
494
494
  size_t beg = q[i].beg;
495
495
  ssize_t step = q[i].step;
496
496
  size_t *index = ALLOC_N(size_t, size);
497
- SDX_SET_INDEX(na2->stridx[j],index);
497
+ CUMO_SDX_SET_INDEX(na2->stridx[j],index);
498
498
  for (k=0; k<size; k++) {
499
- index[k] = SDX_GET_INDEX(sdx1)[beg+step*k];
499
+ index[k] = CUMO_SDX_GET_INDEX(sdx1)[beg+step*k];
500
500
  }
501
501
  }
502
- else if (q[i].idx == NULL && SDX_IS_STRIDE(sdx1)) {
502
+ else if (q[i].idx == NULL && CUMO_SDX_IS_STRIDE(sdx1)) {
503
503
  // step <- step
504
504
  size_t beg = q[i].beg;
505
505
  ssize_t step = q[i].step;
506
- ssize_t stride1 = SDX_GET_STRIDE(sdx1);
506
+ ssize_t stride1 = CUMO_SDX_GET_STRIDE(sdx1);
507
507
  na2->offset += stride1*beg;
508
- SDX_SET_STRIDE(na2->stridx[j], stride1*step);
508
+ CUMO_SDX_SET_STRIDE(na2->stridx[j], stride1*step);
509
509
  }
510
510
 
511
511
  j++;
@@ -516,7 +516,7 @@ na_index_aref_naview(narray_view_t *na1, narray_view_t *na2,
516
516
 
517
517
 
518
518
  static int
519
- na_ndim_new_narray(int ndim, const na_index_arg_t *q)
519
+ cumo_na_ndim_new_narray(int ndim, const cumo_na_index_arg_t *q)
520
520
  {
521
521
  int i, ndim_new=0;
522
522
  for (i=0; i<ndim; i++) {
@@ -530,20 +530,20 @@ na_ndim_new_narray(int ndim, const na_index_arg_t *q)
530
530
  typedef struct {
531
531
  VALUE args, self, store;
532
532
  int ndim;
533
- na_index_arg_t *q; // multi-dimensional index args
534
- narray_t *na1;
533
+ cumo_na_index_arg_t *q; // multi-dimensional index args
534
+ cumo_narray_t *na1;
535
535
  int keep_dim;
536
536
  size_t pos; // offset position for 0-dimensional narray. 0-dimensional array does not use q.
537
- } na_aref_md_data_t;
537
+ } cumo_na_aref_md_data_t;
538
538
 
539
- static na_index_arg_t*
540
- na_allocate_index_args(int ndim)
539
+ static cumo_na_index_arg_t*
540
+ cumo_na_allocate_index_args(int ndim)
541
541
  {
542
- na_index_arg_t *q;
542
+ cumo_na_index_arg_t *q;
543
543
  int i;
544
544
  if (ndim == 0) return NULL;
545
545
 
546
- q = ALLOC_N(na_index_arg_t, ndim);
546
+ q = ALLOC_N(cumo_na_index_arg_t, ndim);
547
547
  for (i=0; i<ndim; i++) {
548
548
  q[i].idx = NULL;
549
549
  }
@@ -551,77 +551,77 @@ na_allocate_index_args(int ndim)
551
551
  }
552
552
 
553
553
  static
554
- VALUE na_aref_md_protected(VALUE data_value)
554
+ VALUE cumo_na_aref_md_protected(VALUE data_value)
555
555
  {
556
- na_aref_md_data_t *data = (na_aref_md_data_t*)(data_value);
556
+ cumo_na_aref_md_data_t *data = (cumo_na_aref_md_data_t*)(data_value);
557
557
  VALUE self = data->self;
558
558
  VALUE args = data->args;
559
559
  VALUE store = data->store;
560
560
  int ndim = data->ndim;
561
- na_index_arg_t *q = data->q;
562
- narray_t *na1 = data->na1;
561
+ cumo_na_index_arg_t *q = data->q;
562
+ cumo_narray_t *na1 = data->na1;
563
563
  int keep_dim = data->keep_dim;
564
564
 
565
565
  int ndim_new;
566
566
  VALUE view;
567
- narray_view_t *na2;
567
+ cumo_narray_view_t *na2;
568
568
  ssize_t elmsz;
569
569
 
570
- na_index_parse_args(args, na1, q, ndim);
570
+ cumo_na_index_parse_args(args, na1, q, ndim);
571
571
 
572
- if (na_debug_flag) print_index_arg(q,ndim);
572
+ if (cumo_na_debug_flag) print_index_arg(q,ndim);
573
573
 
574
574
  if (keep_dim) {
575
575
  ndim_new = ndim;
576
576
  } else {
577
- ndim_new = na_ndim_new_narray(ndim, q);
577
+ ndim_new = cumo_na_ndim_new_narray(ndim, q);
578
578
  }
579
- view = na_s_allocate_view(CLASS_OF(self));
579
+ view = cumo_na_s_allocate_view(rb_obj_class(self));
580
580
 
581
- na_copy_flags(self, view);
582
- GetNArrayView(view,na2);
581
+ cumo_na_copy_flags(self, view);
582
+ CumoGetNArrayView(view,na2);
583
583
 
584
- na_alloc_shape((narray_t*)na2, ndim_new);
584
+ cumo_na_alloc_shape((cumo_narray_t*)na2, ndim_new);
585
585
 
586
- na2->stridx = ALLOC_N(stridx_t,ndim_new);
586
+ na2->stridx = ALLOC_N(cumo_stridx_t,ndim_new);
587
587
 
588
- elmsz = nary_element_stride(self);
588
+ elmsz = cumo_na_element_stride(self);
589
589
 
590
590
  switch(na1->type) {
591
- case NARRAY_DATA_T:
592
- case NARRAY_FILEMAP_T:
591
+ case CUMO_NARRAY_DATA_T:
592
+ case CUMO_NARRAY_FILEMAP_T:
593
593
  if (ndim == 0) {
594
594
  na2->offset = data->pos;
595
595
  na2->base.size = 1;
596
596
  } else {
597
- na_index_aref_nadata((narray_data_t *)na1,na2,q,elmsz,ndim,keep_dim);
597
+ cumo_na_index_aref_nadata((cumo_narray_data_t *)na1,na2,q,elmsz,ndim,keep_dim);
598
598
  }
599
599
  na2->data = self;
600
600
  break;
601
- case NARRAY_VIEW_T:
601
+ case CUMO_NARRAY_VIEW_T:
602
602
  if (ndim == 0) {
603
- na2->offset = ((narray_view_t *)na1)->offset + data->pos;
604
- na2->data = ((narray_view_t *)na1)->data;
603
+ na2->offset = ((cumo_narray_view_t *)na1)->offset + data->pos;
604
+ na2->data = ((cumo_narray_view_t *)na1)->data;
605
605
  na2->base.size = 1;
606
606
  } else {
607
- na2->offset = ((narray_view_t *)na1)->offset;
608
- na2->data = ((narray_view_t *)na1)->data;
609
- na_index_aref_naview((narray_view_t *)na1,na2,q,elmsz,ndim,keep_dim);
607
+ na2->offset = ((cumo_narray_view_t *)na1)->offset;
608
+ na2->data = ((cumo_narray_view_t *)na1)->data;
609
+ cumo_na_index_aref_naview((cumo_narray_view_t *)na1,na2,q,elmsz,ndim,keep_dim);
610
610
  }
611
611
  break;
612
612
  }
613
613
  if (store) {
614
- na_get_pointer_for_write(store); // allocate memory
615
- na_store(na_flatten_dim(store,0),view);
614
+ cumo_na_get_pointer_for_write(store); // allocate memory
615
+ cumo_na_store(cumo_na_flatten_dim(store,0),view);
616
616
  return store;
617
617
  }
618
618
  return view;
619
619
  }
620
620
 
621
621
  static VALUE
622
- na_aref_md_ensure(VALUE data_value)
622
+ cumo_na_aref_md_ensure(VALUE data_value)
623
623
  {
624
- na_aref_md_data_t *data = (na_aref_md_data_t*)(data_value);
624
+ cumo_na_aref_md_data_t *data = (cumo_na_aref_md_data_t*)(data_value);
625
625
  int i;
626
626
  for (i=0; i<data->ndim; i++) {
627
627
  xfree(data->q[i].idx);
@@ -631,36 +631,36 @@ na_aref_md_ensure(VALUE data_value)
631
631
  }
632
632
 
633
633
  static VALUE
634
- na_aref_md(int argc, VALUE *argv, VALUE self, int keep_dim, int result_nd, size_t pos)
634
+ cumo_na_aref_md(int argc, VALUE *argv, VALUE self, int keep_dim, int result_nd, size_t pos)
635
635
  {
636
636
  VALUE args; // should be GC protected
637
- narray_t *na1;
638
- na_aref_md_data_t data;
637
+ cumo_narray_t *na1;
638
+ cumo_na_aref_md_data_t data;
639
639
  VALUE store = 0;
640
640
  VALUE idx;
641
- narray_t *nidx;
641
+ cumo_narray_t *nidx;
642
642
 
643
- GetNArray(self,na1);
643
+ CumoGetNArray(self,na1);
644
644
 
645
645
  args = rb_ary_new4(argc,argv);
646
646
 
647
647
  if (argc == 1 && result_nd == 1) {
648
648
  idx = argv[0];
649
649
  if (rb_obj_is_kind_of(idx, rb_cArray)) {
650
- idx = rb_apply(cumo_cNArray,id_bracket,idx);
650
+ idx = rb_apply(cumo_cNArray,cumo_id_bracket,idx);
651
651
  }
652
652
  if (rb_obj_is_kind_of(idx, cumo_cNArray)) {
653
- GetNArray(idx,nidx);
654
- if (NA_NDIM(nidx)>1) {
655
- store = nary_new(CLASS_OF(self),NA_NDIM(nidx),NA_SHAPE(nidx));
656
- idx = na_flatten(idx);
653
+ CumoGetNArray(idx,nidx);
654
+ if (CUMO_NA_NDIM(nidx)>1) {
655
+ store = cumo_na_new(rb_obj_class(self),CUMO_NA_NDIM(nidx),CUMO_NA_SHAPE(nidx));
656
+ idx = cumo_na_flatten(idx);
657
657
  RARRAY_ASET(args,0,idx);
658
658
  }
659
659
  }
660
660
  // flatten should be done only for narray-view with non-uniform stride.
661
661
  if (na1->ndim > 1) {
662
- self = na_flatten(self);
663
- GetNArray(self,na1);
662
+ self = cumo_na_flatten(self);
663
+ CumoGetNArray(self,na1);
664
664
  }
665
665
  }
666
666
 
@@ -668,95 +668,95 @@ na_aref_md(int argc, VALUE *argv, VALUE self, int keep_dim, int result_nd, size_
668
668
  data.self = self;
669
669
  data.store = store;
670
670
  data.ndim = result_nd;
671
- data.q = na_allocate_index_args(result_nd);
671
+ data.q = cumo_na_allocate_index_args(result_nd);
672
672
  data.na1 = na1;
673
673
  data.keep_dim = keep_dim;
674
674
 
675
675
  switch(na1->type) {
676
- case NARRAY_DATA_T:
676
+ case CUMO_NARRAY_DATA_T:
677
677
  data.pos = pos;
678
678
  break;
679
- case NARRAY_FILEMAP_T:
679
+ case CUMO_NARRAY_FILEMAP_T:
680
680
  data.pos = pos; // correct? I have never used..
681
681
  break;
682
- case NARRAY_VIEW_T:
682
+ case CUMO_NARRAY_VIEW_T:
683
683
  {
684
- narray_view_t *nv;
685
- GetNArrayView(self,nv);
686
- // pos obtained by na_get_result_dimension adds view->offset.
684
+ cumo_narray_view_t *nv;
685
+ CumoGetNArrayView(self,nv);
686
+ // pos obtained by cumo_na_get_result_dimension adds view->offset.
687
687
  data.pos = pos - nv->offset;
688
688
  }
689
689
  break;
690
690
  }
691
691
 
692
- return rb_ensure(na_aref_md_protected, (VALUE)&data, na_aref_md_ensure, (VALUE)&data);
692
+ return rb_ensure(cumo_na_aref_md_protected, (VALUE)&data, cumo_na_aref_md_ensure, (VALUE)&data);
693
693
  }
694
694
 
695
695
 
696
696
  /* method: [](idx1,idx2,...,idxN) */
697
697
  VALUE
698
- na_aref_main(int nidx, VALUE *idx, VALUE self, int keep_dim, int result_nd, size_t pos)
698
+ cumo_na_aref_main(int nidx, VALUE *idx, VALUE self, int keep_dim, int result_nd, size_t pos)
699
699
  {
700
- na_index_arg_to_internal_order(nidx, idx, self);
700
+ cumo_na_index_arg_to_internal_order(nidx, idx, self);
701
701
 
702
702
  if (nidx==0) {
703
- return rb_funcall(self,id_dup,0);
703
+ return rb_funcall(self,cumo_id_dup,0);
704
704
  }
705
705
  if (nidx==1) {
706
- if (CLASS_OF(*idx)==cumo_cBit) {
707
- return rb_funcall(*idx,id_mask,1,self);
706
+ if (rb_obj_class(*idx)==cumo_cBit) {
707
+ return rb_funcall(*idx,cumo_id_mask,1,self);
708
708
  }
709
709
  }
710
- return na_aref_md(nidx, idx, self, keep_dim, result_nd, pos);
710
+ return cumo_na_aref_md(nidx, idx, self, keep_dim, result_nd, pos);
711
711
  }
712
712
 
713
713
 
714
714
  /* method: slice(idx1,idx2,...,idxN) */
715
- static VALUE na_slice(int argc, VALUE *argv, VALUE self)
715
+ static VALUE cumo_na_slice(int argc, VALUE *argv, VALUE self)
716
716
  {
717
717
  int result_nd;
718
718
  size_t pos;
719
719
 
720
- result_nd = na_get_result_dimension(self, argc, argv, 0, &pos);
721
- return na_aref_main(argc, argv, self, 1, result_nd, pos);
720
+ result_nd = cumo_na_get_result_dimension(self, argc, argv, 0, &pos);
721
+ return cumo_na_aref_main(argc, argv, self, 1, result_nd, pos);
722
722
  }
723
723
 
724
724
 
725
725
  static int
726
- check_index_count(int argc, int na_ndim, int count_new, int count_rest)
726
+ check_index_count(int argc, int cumo_na_ndim, int count_new, int count_rest)
727
727
  {
728
- int result_nd = na_ndim + count_new;
728
+ int result_nd = cumo_na_ndim + count_new;
729
729
 
730
730
  switch(count_rest) {
731
731
  case 0:
732
732
  if (count_new == 0 && argc == 1) return 1;
733
733
  if (argc == result_nd) return result_nd;
734
734
  rb_raise(rb_eIndexError,"# of index(=%i) should be "
735
- "equal to ndim(=%i)",argc,na_ndim);
735
+ "equal to ndim(=%i)",argc,cumo_na_ndim);
736
736
  break;
737
737
  case 1:
738
738
  if (argc-1 <= result_nd) return result_nd;
739
739
  rb_raise(rb_eIndexError,"# of index(=%i) > ndim(=%i) with :rest",
740
- argc,na_ndim);
740
+ argc,cumo_na_ndim);
741
741
  break;
742
742
  }
743
743
  return -1;
744
744
  }
745
745
 
746
746
  int
747
- na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_t *pos_idx)
747
+ cumo_na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_t *pos_idx)
748
748
  {
749
749
  int i, j;
750
750
  int count_new=0;
751
751
  int count_rest=0;
752
752
  int count_else=0;
753
753
  ssize_t x, s, m, pos, *idx;
754
- narray_t *na;
755
- narray_view_t *nv;
756
- stridx_t sdx;
754
+ cumo_narray_t *na;
755
+ cumo_narray_view_t *nv;
756
+ cumo_stridx_t sdx;
757
757
  VALUE a;
758
758
 
759
- GetNArray(self,na);
759
+ CumoGetNArray(self,na);
760
760
  if (na->size == 0) {
761
761
  rb_raise(rb_eRuntimeError, "cannot get index of empty array");
762
762
  return -1;
@@ -774,12 +774,12 @@ na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_
774
774
  break;
775
775
  case T_FALSE:
776
776
  case T_SYMBOL:
777
- if (a==sym_rest || a==sym_tilde || a==Qfalse) {
777
+ if (a==cumo_sym_rest || a==cumo_sym_tilde || a==Qfalse) {
778
778
  argv[i] = Qfalse;
779
779
  count_rest++;
780
780
  break;
781
- } else if (a==sym_new || a==sym_minus) {
782
- argv[i] = sym_new;
781
+ } else if (a==cumo_sym_new || a==cumo_sym_minus) {
782
+ argv[i] = cumo_sym_new;
783
783
  count_new++;
784
784
  }
785
785
  // not break
@@ -796,32 +796,32 @@ na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_
796
796
  }
797
797
 
798
798
  switch(na->type) {
799
- case NARRAY_VIEW_T:
800
- GetNArrayView(self,nv);
799
+ case CUMO_NARRAY_VIEW_T:
800
+ CumoGetNArrayView(self,nv);
801
801
  pos = nv->offset;
802
802
  if (j == na->ndim) {
803
803
  for (i=j-1; i>=0; i--) {
804
- x = na_range_check(idx[i], na->shape[i], i);
804
+ x = cumo_na_range_check(idx[i], na->shape[i], i);
805
805
  sdx = nv->stridx[i];
806
- if (SDX_IS_INDEX(sdx)) {
807
- pos += SDX_GET_INDEX(sdx)[x];
806
+ if (CUMO_SDX_IS_INDEX(sdx)) {
807
+ pos += CUMO_SDX_GET_INDEX(sdx)[x];
808
808
  } else {
809
- pos += SDX_GET_STRIDE(sdx)*x;
809
+ pos += CUMO_SDX_GET_STRIDE(sdx)*x;
810
810
  }
811
811
  }
812
812
  *pos_idx = pos;
813
813
  }
814
814
  else if (argc==1 && j==1) {
815
- x = na_range_check(idx[0], na->size, 0);
815
+ x = cumo_na_range_check(idx[0], na->size, 0);
816
816
  for (i=na->ndim-1; i>=0; i--) {
817
817
  s = na->shape[i];
818
818
  m = x % s;
819
819
  x = x / s;
820
820
  sdx = nv->stridx[i];
821
- if (SDX_IS_INDEX(sdx)) {
822
- pos += SDX_GET_INDEX(sdx)[m];
821
+ if (CUMO_SDX_IS_INDEX(sdx)) {
822
+ pos += CUMO_SDX_GET_INDEX(sdx)[m];
823
823
  } else {
824
- pos += SDX_GET_STRIDE(sdx)*m;
824
+ pos += CUMO_SDX_GET_STRIDE(sdx)*m;
825
825
  }
826
826
  }
827
827
  *pos_idx = pos;
@@ -831,16 +831,16 @@ na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_
831
831
  break;
832
832
  default:
833
833
  if (!stride) {
834
- stride = nary_element_stride(self);
834
+ stride = cumo_na_element_stride(self);
835
835
  }
836
836
  if (argc==1 && j==1) {
837
- x = na_range_check(idx[0], na->size, 0);
837
+ x = cumo_na_range_check(idx[0], na->size, 0);
838
838
  *pos_idx = stride * x;
839
839
  }
840
840
  else if (j == na->ndim) {
841
841
  pos = 0;
842
842
  for (i=j-1; i>=0; i--) {
843
- x = na_range_check(idx[i], na->shape[i], i);
843
+ x = cumo_na_range_check(idx[i], na->shape[i], i);
844
844
  pos += stride * x;
845
845
  stride *= na->shape[i];
846
846
  }
@@ -854,27 +854,27 @@ na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_
854
854
 
855
855
 
856
856
  void
857
- Init_cumo_nary_index()
857
+ Init_cumo_na_index()
858
858
  {
859
- rb_define_method(cNArray, "slice", na_slice, -1);
860
-
861
- sym_ast = ID2SYM(rb_intern("*"));
862
- sym_all = ID2SYM(rb_intern("all"));
863
- sym_minus = ID2SYM(rb_intern("-"));
864
- sym_new = ID2SYM(rb_intern("new"));
865
- sym_reverse = ID2SYM(rb_intern("reverse"));
866
- sym_plus = ID2SYM(rb_intern("+"));
867
- //sym_reduce = ID2SYM(rb_intern("reduce"));
868
- sym_sum = ID2SYM(rb_intern("sum"));
869
- sym_tilde = ID2SYM(rb_intern("~"));
870
- sym_rest = ID2SYM(rb_intern("rest"));
871
- id_beg = rb_intern("begin");
872
- id_end = rb_intern("end");
873
- id_exclude_end = rb_intern("exclude_end?");
874
- id_each = rb_intern("each");
875
- id_step = rb_intern("step");
876
- id_dup = rb_intern("dup");
877
- id_bracket = rb_intern("[]");
878
- id_shift_left = rb_intern("<<");
879
- id_mask = rb_intern("mask");
859
+ rb_define_method(cNArray, "slice", cumo_na_slice, -1);
860
+
861
+ cumo_sym_ast = ID2SYM(rb_intern("*"));
862
+ cumo_sym_all = ID2SYM(rb_intern("all"));
863
+ cumo_sym_minus = ID2SYM(rb_intern("-"));
864
+ cumo_sym_new = ID2SYM(rb_intern("new"));
865
+ cumo_sym_reverse = ID2SYM(rb_intern("reverse"));
866
+ cumo_sym_plus = ID2SYM(rb_intern("+"));
867
+ //cumo_sym_reduce = ID2SYM(rb_intern("reduce"));
868
+ cumo_sym_sum = ID2SYM(rb_intern("sum"));
869
+ cumo_sym_tilde = ID2SYM(rb_intern("~"));
870
+ cumo_sym_rest = ID2SYM(rb_intern("rest"));
871
+ cumo_id_beg = rb_intern("begin");
872
+ cumo_id_end = rb_intern("end");
873
+ cumo_id_exclude_end = rb_intern("exclude_end?");
874
+ cumo_id_each = rb_intern("each");
875
+ cumo_id_step = rb_intern("step");
876
+ cumo_id_dup = rb_intern("dup");
877
+ cumo_id_bracket = rb_intern("[]");
878
+ cumo_id_shift_left = rb_intern("<<");
879
+ cumo_id_mask = rb_intern("mask");
880
880
  }