cumo 0.1.0 → 0.1.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/.gitignore +1 -0
- data/3rd_party/LICENSE.txt +60 -0
- data/3rd_party/mkmf-cu/lib/mkmf-cu/cli.rb +13 -1
- data/LICENSE.txt +1 -62
- data/README.md +33 -29
- data/bench/cumo_bench.rb +47 -25
- data/bench/numo_bench.rb +27 -25
- data/docs/src-tree.md +16 -0
- data/ext/cumo/cuda/cublas.c +69 -219
- data/ext/cumo/cuda/memory_pool_impl.hpp +1 -0
- data/ext/cumo/cuda/runtime.c +2 -14
- data/ext/cumo/cumo.c +16 -16
- data/ext/cumo/include/cumo.h +2 -2
- data/ext/cumo/include/cumo/cuda/cublas.h +6 -129
- data/ext/cumo/include/cumo/cuda/runtime.h +16 -0
- data/ext/cumo/include/cumo/indexer.h +46 -63
- data/ext/cumo/include/cumo/intern.h +58 -112
- data/ext/cumo/include/cumo/narray.h +214 -185
- data/ext/cumo/include/cumo/narray_kernel.h +66 -37
- data/ext/cumo/include/cumo/ndloop.h +42 -42
- data/ext/cumo/include/cumo/reduce_kernel.h +55 -71
- data/ext/cumo/include/cumo/template.h +56 -51
- data/ext/cumo/include/cumo/template_kernel.h +31 -31
- data/ext/cumo/include/cumo/types/bit.h +3 -3
- data/ext/cumo/include/cumo/types/bit_kernel.h +2 -2
- data/ext/cumo/include/cumo/types/complex.h +126 -126
- data/ext/cumo/include/cumo/types/complex_kernel.h +126 -126
- data/ext/cumo/include/cumo/types/complex_macro.h +28 -28
- data/ext/cumo/include/cumo/types/complex_macro_kernel.h +20 -20
- data/ext/cumo/include/cumo/types/dcomplex.h +5 -5
- data/ext/cumo/include/cumo/types/dcomplex_kernel.h +1 -1
- data/ext/cumo/include/cumo/types/int_macro.h +1 -1
- data/ext/cumo/include/cumo/types/int_macro_kernel.h +1 -1
- data/ext/cumo/include/cumo/types/robj_macro.h +30 -30
- data/ext/cumo/include/cumo/types/scomplex.h +5 -5
- data/ext/cumo/include/cumo/types/scomplex_kernel.h +1 -1
- data/ext/cumo/narray/array.c +143 -143
- data/ext/cumo/narray/data.c +184 -184
- data/ext/cumo/narray/gen/cogen.rb +5 -2
- data/ext/cumo/narray/gen/cogen_kernel.rb +5 -2
- data/ext/cumo/narray/gen/def/dcomplex.rb +1 -1
- data/ext/cumo/narray/gen/def/scomplex.rb +1 -1
- data/ext/cumo/narray/gen/erbln.rb +132 -0
- data/ext/cumo/narray/gen/erbpp2.rb +18 -13
- data/ext/cumo/narray/gen/narray_def.rb +3 -3
- data/ext/cumo/narray/gen/spec.rb +2 -2
- data/ext/cumo/narray/gen/tmpl/accum.c +15 -15
- data/ext/cumo/narray/gen/tmpl/accum_binary.c +22 -22
- data/ext/cumo/narray/gen/tmpl/accum_binary_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/accum_index.c +30 -30
- data/ext/cumo/narray/gen/tmpl/accum_index_kernel.cu +2 -2
- data/ext/cumo/narray/gen/tmpl/accum_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/alloc_func.c +14 -14
- data/ext/cumo/narray/gen/tmpl/allocate.c +11 -11
- data/ext/cumo/narray/gen/tmpl/aref.c +2 -2
- data/ext/cumo/narray/gen/tmpl/aref_cpu.c +4 -4
- data/ext/cumo/narray/gen/tmpl/aset.c +2 -2
- data/ext/cumo/narray/gen/tmpl/binary.c +28 -28
- data/ext/cumo/narray/gen/tmpl/binary2.c +18 -18
- data/ext/cumo/narray/gen/tmpl/binary2_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/binary_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/binary_s.c +13 -13
- data/ext/cumo/narray/gen/tmpl/binary_s_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/bincount.c +23 -23
- data/ext/cumo/narray/gen/tmpl/cast.c +7 -7
- data/ext/cumo/narray/gen/tmpl/cast_array.c +3 -3
- data/ext/cumo/narray/gen/tmpl/clip.c +38 -38
- data/ext/cumo/narray/gen/tmpl/complex_accum_kernel.cu +2 -2
- data/ext/cumo/narray/gen/tmpl/cond_binary.c +19 -19
- data/ext/cumo/narray/gen/tmpl/cond_binary_kernel.cu +7 -7
- data/ext/cumo/narray/gen/tmpl/cond_unary.c +15 -15
- data/ext/cumo/narray/gen/tmpl/cum.c +15 -15
- data/ext/cumo/narray/gen/tmpl/each.c +9 -9
- data/ext/cumo/narray/gen/tmpl/each_with_index.c +9 -9
- data/ext/cumo/narray/gen/tmpl/ewcomp.c +15 -15
- data/ext/cumo/narray/gen/tmpl/ewcomp_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/extract_cpu.c +5 -5
- data/ext/cumo/narray/gen/tmpl/extract_data.c +12 -12
- data/ext/cumo/narray/gen/tmpl/eye.c +9 -9
- data/ext/cumo/narray/gen/tmpl/eye_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/fill.c +9 -9
- data/ext/cumo/narray/gen/tmpl/fill_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/float_accum_kernel.cu +1 -1
- data/ext/cumo/narray/gen/tmpl/format.c +11 -11
- data/ext/cumo/narray/gen/tmpl/format_to_a.c +8 -8
- data/ext/cumo/narray/gen/tmpl/frexp.c +13 -13
- data/ext/cumo/narray/gen/tmpl/gemm.c +252 -108
- data/ext/cumo/narray/gen/tmpl/inspect.c +1 -1
- data/ext/cumo/narray/gen/tmpl/lib.c +2 -2
- data/ext/cumo/narray/gen/tmpl/logseq.c +7 -7
- data/ext/cumo/narray/gen/tmpl/logseq_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/map_with_index.c +17 -17
- data/ext/cumo/narray/gen/tmpl/median.c +10 -10
- data/ext/cumo/narray/gen/tmpl/minmax.c +10 -10
- data/ext/cumo/narray/gen/tmpl/new_dim0.c +3 -3
- data/ext/cumo/narray/gen/tmpl/poly.c +6 -6
- data/ext/cumo/narray/gen/tmpl/pow.c +28 -28
- data/ext/cumo/narray/gen/tmpl/pow_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/rand.c +10 -10
- data/ext/cumo/narray/gen/tmpl/rand_norm.c +7 -7
- data/ext/cumo/narray/gen/tmpl/real_accum_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/seq.c +7 -7
- data/ext/cumo/narray/gen/tmpl/seq_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/set2.c +20 -20
- data/ext/cumo/narray/gen/tmpl/sort.c +11 -11
- data/ext/cumo/narray/gen/tmpl/sort_index.c +18 -18
- data/ext/cumo/narray/gen/tmpl/store.c +6 -6
- data/ext/cumo/narray/gen/tmpl/store_array.c +19 -19
- data/ext/cumo/narray/gen/tmpl/store_array_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl/store_bit.c +23 -23
- data/ext/cumo/narray/gen/tmpl/store_bit_kernel.cu +28 -28
- data/ext/cumo/narray/gen/tmpl/store_from.c +16 -16
- data/ext/cumo/narray/gen/tmpl/store_from_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl/to_a.c +10 -10
- data/ext/cumo/narray/gen/tmpl/unary.c +25 -25
- data/ext/cumo/narray/gen/tmpl/unary2.c +17 -17
- data/ext/cumo/narray/gen/tmpl/unary_kernel.cu +15 -15
- data/ext/cumo/narray/gen/tmpl/unary_ret2.c +13 -13
- data/ext/cumo/narray/gen/tmpl/unary_s.c +17 -17
- data/ext/cumo/narray/gen/tmpl/unary_s_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl_bit/allocate.c +9 -9
- data/ext/cumo/narray/gen/tmpl_bit/aref.c +2 -2
- data/ext/cumo/narray/gen/tmpl_bit/aref_cpu.c +5 -5
- data/ext/cumo/narray/gen/tmpl_bit/aset.c +2 -2
- data/ext/cumo/narray/gen/tmpl_bit/binary.c +29 -29
- data/ext/cumo/narray/gen/tmpl_bit/bit_count.c +14 -14
- data/ext/cumo/narray/gen/tmpl_bit/bit_count_cpu.c +21 -21
- data/ext/cumo/narray/gen/tmpl_bit/bit_count_kernel.cu +28 -28
- data/ext/cumo/narray/gen/tmpl_bit/bit_reduce.c +29 -29
- data/ext/cumo/narray/gen/tmpl_bit/each.c +10 -10
- data/ext/cumo/narray/gen/tmpl_bit/each_with_index.c +10 -10
- data/ext/cumo/narray/gen/tmpl_bit/extract.c +8 -8
- data/ext/cumo/narray/gen/tmpl_bit/extract_cpu.c +8 -8
- data/ext/cumo/narray/gen/tmpl_bit/fill.c +17 -17
- data/ext/cumo/narray/gen/tmpl_bit/format.c +14 -14
- data/ext/cumo/narray/gen/tmpl_bit/format_to_a.c +11 -11
- data/ext/cumo/narray/gen/tmpl_bit/inspect.c +3 -3
- data/ext/cumo/narray/gen/tmpl_bit/mask.c +33 -33
- data/ext/cumo/narray/gen/tmpl_bit/store_array.c +19 -19
- data/ext/cumo/narray/gen/tmpl_bit/store_bit.c +22 -22
- data/ext/cumo/narray/gen/tmpl_bit/store_from.c +18 -18
- data/ext/cumo/narray/gen/tmpl_bit/to_a.c +12 -12
- data/ext/cumo/narray/gen/tmpl_bit/unary.c +24 -24
- data/ext/cumo/narray/gen/tmpl_bit/where.c +16 -16
- data/ext/cumo/narray/gen/tmpl_bit/where2.c +20 -20
- data/ext/cumo/narray/index.c +213 -213
- data/ext/cumo/narray/math.c +27 -27
- data/ext/cumo/narray/narray.c +484 -484
- data/ext/cumo/narray/ndloop.c +259 -258
- data/ext/cumo/narray/rand.c +3 -3
- data/ext/cumo/narray/step.c +70 -70
- data/ext/cumo/narray/struct.c +139 -139
- metadata +6 -7
- data/ext/cumo/include/cumo/intern_fwd.h +0 -38
- data/lib/erbpp.rb +0 -294
- data/lib/erbpp/line_number.rb +0 -137
- data/lib/erbpp/narray_def.rb +0 -381
data/ext/cumo/narray/index.c
CHANGED
@@ -31,7 +31,7 @@ struct enumerator {
|
|
31
31
|
// note: the memory refed by this pointer is not freed and causes memroy leak.
|
32
32
|
//
|
33
33
|
// @example
|
34
|
-
// a[1..3,1] generates two
|
34
|
+
// a[1..3,1] generates two cumo_na_index_arg_t(s). First is for 1..3, and second is for 1.
|
35
35
|
typedef struct {
|
36
36
|
size_t n; // the number of elements of the dimesnion
|
37
37
|
size_t beg; // the starting point in the dimension
|
@@ -39,14 +39,14 @@ typedef struct {
|
|
39
39
|
size_t *idx; // list of indices
|
40
40
|
int reduce; // true if the dimension is reduced by addition
|
41
41
|
int orig_dim; // the dimension of original array
|
42
|
-
}
|
42
|
+
} cumo_na_index_arg_t;
|
43
43
|
|
44
44
|
|
45
45
|
static void
|
46
|
-
print_index_arg(
|
46
|
+
print_index_arg(cumo_na_index_arg_t *q, int n)
|
47
47
|
{
|
48
48
|
int i;
|
49
|
-
printf("
|
49
|
+
printf("cumo_na_index_arg_t = 0x%"SZF"x {\n",(size_t)q);
|
50
50
|
for (i=0; i<n; i++) {
|
51
51
|
printf(" q[%d].n=%"SZF"d\n",i,q[i].n);
|
52
52
|
printf(" q[%d].beg=%"SZF"d\n",i,q[i].beg);
|
@@ -58,29 +58,29 @@ print_index_arg(na_index_arg_t *q, int n)
|
|
58
58
|
printf("}\n");
|
59
59
|
}
|
60
60
|
|
61
|
-
static VALUE
|
62
|
-
static VALUE
|
63
|
-
//static VALUE
|
64
|
-
static VALUE
|
65
|
-
static VALUE
|
66
|
-
static VALUE
|
67
|
-
static VALUE
|
68
|
-
static VALUE
|
69
|
-
static VALUE
|
70
|
-
static VALUE
|
71
|
-
static ID
|
72
|
-
static ID
|
73
|
-
static ID
|
74
|
-
static ID
|
75
|
-
static ID
|
76
|
-
static ID
|
77
|
-
static ID
|
78
|
-
static ID
|
79
|
-
static ID
|
61
|
+
static VALUE cumo_sym_ast;
|
62
|
+
static VALUE cumo_sym_all;
|
63
|
+
//static VALUE cumo_sym_reduce;
|
64
|
+
static VALUE cumo_sym_minus;
|
65
|
+
static VALUE cumo_sym_new;
|
66
|
+
static VALUE cumo_sym_reverse;
|
67
|
+
static VALUE cumo_sym_plus;
|
68
|
+
static VALUE cumo_sym_sum;
|
69
|
+
static VALUE cumo_sym_tilde;
|
70
|
+
static VALUE cumo_sym_rest;
|
71
|
+
static ID cumo_id_beg;
|
72
|
+
static ID cumo_id_end;
|
73
|
+
static ID cumo_id_exclude_end;
|
74
|
+
static ID cumo_id_each;
|
75
|
+
static ID cumo_id_step;
|
76
|
+
static ID cumo_id_dup;
|
77
|
+
static ID cumo_id_bracket;
|
78
|
+
static ID cumo_id_shift_left;
|
79
|
+
static ID cumo_id_mask;
|
80
80
|
|
81
81
|
|
82
82
|
static void
|
83
|
-
|
83
|
+
cumo_na_index_set_step(cumo_na_index_arg_t *q, int i, size_t n, size_t beg, ssize_t step)
|
84
84
|
{
|
85
85
|
q->n = n;
|
86
86
|
q->beg = beg;
|
@@ -91,7 +91,7 @@ na_index_set_step(na_index_arg_t *q, int i, size_t n, size_t beg, ssize_t step)
|
|
91
91
|
}
|
92
92
|
|
93
93
|
static void
|
94
|
-
|
94
|
+
cumo_na_index_set_scalar(cumo_na_index_arg_t *q, int i, ssize_t size, ssize_t x)
|
95
95
|
{
|
96
96
|
if (x < -size || x >= size)
|
97
97
|
rb_raise(rb_eRangeError,
|
@@ -108,7 +108,7 @@ na_index_set_scalar(na_index_arg_t *q, int i, ssize_t size, ssize_t x)
|
|
108
108
|
}
|
109
109
|
|
110
110
|
static inline ssize_t
|
111
|
-
|
111
|
+
cumo_na_range_check(ssize_t pos, ssize_t size, int dim)
|
112
112
|
{
|
113
113
|
ssize_t idx=pos;
|
114
114
|
|
@@ -121,13 +121,13 @@ na_range_check(ssize_t pos, ssize_t size, int dim)
|
|
121
121
|
}
|
122
122
|
|
123
123
|
static void
|
124
|
-
|
124
|
+
cumo_na_parse_array(VALUE ary, int orig_dim, ssize_t size, cumo_na_index_arg_t *q)
|
125
125
|
{
|
126
126
|
int k;
|
127
127
|
int n = RARRAY_LEN(ary);
|
128
128
|
q->idx = ALLOC_N(size_t, n);
|
129
129
|
for (k=0; k<n; k++) {
|
130
|
-
q->idx[k] =
|
130
|
+
q->idx[k] = cumo_na_range_check(NUM2SSIZET(RARRAY_AREF(ary,k)), size, orig_dim);
|
131
131
|
}
|
132
132
|
q->n = n;
|
133
133
|
q->beg = 0;
|
@@ -137,32 +137,32 @@ na_parse_array(VALUE ary, int orig_dim, ssize_t size, na_index_arg_t *q)
|
|
137
137
|
}
|
138
138
|
|
139
139
|
static void
|
140
|
-
|
140
|
+
cumo_na_parse_narray_index(VALUE a, int orig_dim, ssize_t size, cumo_na_index_arg_t *q)
|
141
141
|
{
|
142
142
|
VALUE idx;
|
143
|
-
|
144
|
-
|
143
|
+
cumo_narray_t *na;
|
144
|
+
cumo_narray_data_t *nidx;
|
145
145
|
size_t k, n;
|
146
146
|
ssize_t *nidxp;
|
147
147
|
|
148
|
-
|
149
|
-
if (
|
148
|
+
CumoGetNArray(a,na);
|
149
|
+
if (CUMO_NA_NDIM(na) != 1) {
|
150
150
|
rb_raise(rb_eIndexError, "should be 1-d NArray");
|
151
151
|
}
|
152
|
-
n =
|
153
|
-
idx =
|
154
|
-
|
152
|
+
n = CUMO_NA_SIZE(na);
|
153
|
+
idx = cumo_na_new(cIndex,1,&n);
|
154
|
+
cumo_na_store(idx,a);
|
155
155
|
|
156
|
-
|
156
|
+
CumoGetNArrayData(idx,nidx);
|
157
157
|
nidxp = (ssize_t*)nidx->ptr;
|
158
158
|
q->idx = ALLOC_N(size_t, n);
|
159
159
|
|
160
160
|
// ndixp is cuda memory (cuda narray)
|
161
|
-
|
161
|
+
CUMO_SHOW_SYNCHRONIZE_WARNING_ONCE("cumo_na_parse_narray_index", "any");
|
162
162
|
cumo_cuda_runtime_check_status(cudaDeviceSynchronize());
|
163
163
|
|
164
164
|
for (k=0; k<n; k++) {
|
165
|
-
q->idx[k] =
|
165
|
+
q->idx[k] = cumo_na_range_check(nidxp[k], size, orig_dim);
|
166
166
|
}
|
167
167
|
q->n = n;
|
168
168
|
q->beg = 0;
|
@@ -172,22 +172,22 @@ na_parse_narray_index(VALUE a, int orig_dim, ssize_t size, na_index_arg_t *q)
|
|
172
172
|
}
|
173
173
|
|
174
174
|
static void
|
175
|
-
|
175
|
+
cumo_na_parse_range(VALUE range, ssize_t step, int orig_dim, ssize_t size, cumo_na_index_arg_t *q)
|
176
176
|
{
|
177
177
|
int n;
|
178
178
|
VALUE excl_end;
|
179
179
|
ssize_t beg, end, beg_orig, end_orig;
|
180
180
|
const char *dot = "..", *edot = "...";
|
181
181
|
|
182
|
-
beg = beg_orig = NUM2SSIZET(rb_funcall(range,
|
182
|
+
beg = beg_orig = NUM2SSIZET(rb_funcall(range,cumo_id_beg,0));
|
183
183
|
if (beg < 0) {
|
184
184
|
beg += size;
|
185
185
|
}
|
186
|
-
end = end_orig = NUM2SSIZET(rb_funcall(range,
|
186
|
+
end = end_orig = NUM2SSIZET(rb_funcall(range,cumo_id_end,0));
|
187
187
|
if (end < 0) {
|
188
188
|
end += size;
|
189
189
|
}
|
190
|
-
excl_end = rb_funcall(range,
|
190
|
+
excl_end = rb_funcall(range,cumo_id_exclude_end,0);
|
191
191
|
if (RTEST(excl_end)) {
|
192
192
|
end--;
|
193
193
|
dot = edot;
|
@@ -199,12 +199,12 @@ na_parse_range(VALUE range, ssize_t step, int orig_dim, ssize_t size, na_index_a
|
|
199
199
|
}
|
200
200
|
n = (end-beg)/step+1;
|
201
201
|
if (n<0) n=0;
|
202
|
-
|
202
|
+
cumo_na_index_set_step(q,orig_dim,n,beg,step);
|
203
203
|
|
204
204
|
}
|
205
205
|
|
206
206
|
static void
|
207
|
-
|
207
|
+
cumo_na_parse_enumerator(VALUE enum_obj, int orig_dim, ssize_t size, cumo_na_index_arg_t *q)
|
208
208
|
{
|
209
209
|
int len;
|
210
210
|
ssize_t step;
|
@@ -216,10 +216,10 @@ na_parse_enumerator(VALUE enum_obj, int orig_dim, ssize_t size, na_index_arg_t *
|
|
216
216
|
e = (struct enumerator *)DATA_PTR(enum_obj);
|
217
217
|
|
218
218
|
if (rb_obj_is_kind_of(e->obj, rb_cRange)) {
|
219
|
-
if (e->meth ==
|
220
|
-
|
219
|
+
if (e->meth == cumo_id_each) {
|
220
|
+
cumo_na_parse_range(e->obj, 1, orig_dim, size, q);
|
221
221
|
}
|
222
|
-
else if (e->meth ==
|
222
|
+
else if (e->meth == cumo_id_step) {
|
223
223
|
if (TYPE(e->args) != T_ARRAY) {
|
224
224
|
rb_raise(rb_eArgError,"no argument for step");
|
225
225
|
}
|
@@ -228,7 +228,7 @@ na_parse_enumerator(VALUE enum_obj, int orig_dim, ssize_t size, na_index_arg_t *
|
|
228
228
|
rb_raise(rb_eArgError,"invalid number of step argument (1 for %d)",len);
|
229
229
|
}
|
230
230
|
step = NUM2SSIZET(RARRAY_AREF(e->args,0));
|
231
|
-
|
231
|
+
cumo_na_parse_range(e->obj, step, orig_dim, size, q);
|
232
232
|
} else {
|
233
233
|
rb_raise(rb_eTypeError,"unknown Range method: %s",rb_id2name(e->meth));
|
234
234
|
}
|
@@ -244,39 +244,39 @@ na_parse_enumerator(VALUE enum_obj, int orig_dim, ssize_t size, na_index_arg_t *
|
|
244
244
|
// i: parse i-th index
|
245
245
|
// q: parsed information is stored to *q
|
246
246
|
static void
|
247
|
-
|
247
|
+
cumo_na_index_parse_each(volatile VALUE a, ssize_t size, int i, cumo_na_index_arg_t *q)
|
248
248
|
{
|
249
249
|
switch(TYPE(a)) {
|
250
250
|
|
251
251
|
case T_FIXNUM:
|
252
|
-
|
252
|
+
cumo_na_index_set_scalar(q,i,size,FIX2LONG(a));
|
253
253
|
break;
|
254
254
|
|
255
255
|
case T_BIGNUM:
|
256
|
-
|
256
|
+
cumo_na_index_set_scalar(q,i,size,NUM2SSIZET(a));
|
257
257
|
break;
|
258
258
|
|
259
259
|
case T_FLOAT:
|
260
|
-
|
260
|
+
cumo_na_index_set_scalar(q,i,size,NUM2SSIZET(a));
|
261
261
|
break;
|
262
262
|
|
263
263
|
case T_NIL:
|
264
264
|
case T_TRUE:
|
265
|
-
|
265
|
+
cumo_na_index_set_step(q,i,size,0,1);
|
266
266
|
break;
|
267
267
|
|
268
268
|
case T_SYMBOL:
|
269
|
-
if (a==
|
270
|
-
|
269
|
+
if (a==cumo_sym_all || a==cumo_sym_ast) {
|
270
|
+
cumo_na_index_set_step(q,i,size,0,1);
|
271
271
|
}
|
272
|
-
else if (a==
|
273
|
-
|
272
|
+
else if (a==cumo_sym_reverse) {
|
273
|
+
cumo_na_index_set_step(q,i,size,size-1,-1);
|
274
274
|
}
|
275
|
-
else if (a==
|
276
|
-
|
275
|
+
else if (a==cumo_sym_new) {
|
276
|
+
cumo_na_index_set_step(q,i,1,0,1);
|
277
277
|
}
|
278
|
-
else if (a==
|
279
|
-
|
278
|
+
else if (a==cumo_sym_reduce || a==cumo_sym_sum || a==cumo_sym_plus) {
|
279
|
+
cumo_na_index_set_step(q,i,size,0,1);
|
280
280
|
q->reduce = 1;
|
281
281
|
} else {
|
282
282
|
rb_raise(rb_eIndexError, "invalid symbol for index");
|
@@ -284,24 +284,24 @@ na_index_parse_each(volatile VALUE a, ssize_t size, int i, na_index_arg_t *q)
|
|
284
284
|
break;
|
285
285
|
|
286
286
|
case T_ARRAY:
|
287
|
-
|
287
|
+
cumo_na_parse_array(a, i, size, q);
|
288
288
|
break;
|
289
289
|
|
290
290
|
default:
|
291
291
|
if (rb_obj_is_kind_of(a, rb_cRange)) {
|
292
|
-
|
292
|
+
cumo_na_parse_range(a, 1, i, size, q);
|
293
293
|
}
|
294
294
|
else if (rb_obj_is_kind_of(a, rb_cEnumerator)) {
|
295
|
-
|
295
|
+
cumo_na_parse_enumerator(a, i, size, q);
|
296
296
|
}
|
297
|
-
else if (rb_obj_is_kind_of(a,
|
297
|
+
else if (rb_obj_is_kind_of(a, cumo_na_cStep)) {
|
298
298
|
ssize_t beg, step, n;
|
299
|
-
|
300
|
-
|
299
|
+
cumo_na_step_array_index(a, size, (size_t*)(&n), &beg, &step);
|
300
|
+
cumo_na_index_set_step(q,i,n,beg,step);
|
301
301
|
}
|
302
302
|
// NArray index
|
303
|
-
else if (
|
304
|
-
|
303
|
+
else if (CUMO_NA_CumoIsNArray(a)) {
|
304
|
+
cumo_na_parse_narray_index(a, i, size, q);
|
305
305
|
}
|
306
306
|
else {
|
307
307
|
rb_raise(rb_eIndexError, "not allowed type");
|
@@ -311,7 +311,7 @@ na_index_parse_each(volatile VALUE a, ssize_t size, int i, na_index_arg_t *q)
|
|
311
311
|
|
312
312
|
|
313
313
|
static size_t
|
314
|
-
|
314
|
+
cumo_na_index_parse_args(VALUE args, cumo_narray_t *na, cumo_na_index_arg_t *q, int ndim)
|
315
315
|
{
|
316
316
|
int i, j, k, l, nidx;
|
317
317
|
size_t total=1;
|
@@ -329,7 +329,7 @@ na_index_parse_args(VALUE args, narray_t *na, na_index_arg_t *q, int ndim)
|
|
329
329
|
if (v==Qfalse) {
|
330
330
|
for (l = ndim - (nidx-1); l>0; l--) {
|
331
331
|
//printf("i=%d j=%d k=%d l=%d ndim=%d nidx=%d\n",i,j,k,l,ndim,nidx);
|
332
|
-
|
332
|
+
cumo_na_index_parse_each(Qtrue, na->shape[k], k, &q[j]);
|
333
333
|
if (q[j].n > 1) {
|
334
334
|
total *= q[j].n;
|
335
335
|
}
|
@@ -338,13 +338,13 @@ na_index_parse_args(VALUE args, narray_t *na, na_index_arg_t *q, int ndim)
|
|
338
338
|
}
|
339
339
|
}
|
340
340
|
// new dimension
|
341
|
-
else if (v==
|
342
|
-
|
341
|
+
else if (v==cumo_sym_new) {
|
342
|
+
cumo_na_index_parse_each(v, 1, k, &q[j]);
|
343
343
|
j++;
|
344
344
|
}
|
345
345
|
// other dimention
|
346
346
|
else {
|
347
|
-
|
347
|
+
cumo_na_index_parse_each(v, na->shape[k], k, &q[j]);
|
348
348
|
if (q[j].n > 1) {
|
349
349
|
total *= q[j].n;
|
350
350
|
}
|
@@ -357,7 +357,7 @@ na_index_parse_args(VALUE args, narray_t *na, na_index_arg_t *q, int ndim)
|
|
357
357
|
|
358
358
|
|
359
359
|
static void
|
360
|
-
|
360
|
+
cumo_na_get_strides_nadata(const cumo_narray_data_t *na, ssize_t *strides, ssize_t elmsz)
|
361
361
|
{
|
362
362
|
int i = na->base.ndim - 1;
|
363
363
|
strides[i] = elmsz;
|
@@ -367,8 +367,8 @@ na_get_strides_nadata(const narray_data_t *na, ssize_t *strides, ssize_t elmsz)
|
|
367
367
|
}
|
368
368
|
|
369
369
|
static void
|
370
|
-
|
371
|
-
|
370
|
+
cumo_na_index_aref_nadata(cumo_narray_data_t *na1, cumo_narray_view_t *na2,
|
371
|
+
cumo_na_index_arg_t *q, ssize_t elmsz, int ndim, int keep_dim)
|
372
372
|
{
|
373
373
|
int i, j;
|
374
374
|
ssize_t size, k, total=1;
|
@@ -379,7 +379,7 @@ na_index_aref_nadata(narray_data_t *na1, narray_view_t *na2,
|
|
379
379
|
VALUE m;
|
380
380
|
|
381
381
|
strides_na1 = ALLOCA_N(ssize_t, na1->base.ndim);
|
382
|
-
|
382
|
+
cumo_na_get_strides_nadata(na1, strides_na1, elmsz);
|
383
383
|
|
384
384
|
for (i=j=0; i<ndim; i++) {
|
385
385
|
stride1 = strides_na1[q[i].orig_dim];
|
@@ -394,14 +394,14 @@ na_index_aref_nadata(narray_data_t *na1, narray_view_t *na2,
|
|
394
394
|
na2->base.shape[j] = size = q[i].n;
|
395
395
|
|
396
396
|
if (q[i].reduce != 0) {
|
397
|
-
m = rb_funcall(INT2FIX(1),
|
397
|
+
m = rb_funcall(INT2FIX(1),cumo_id_shift_left,1,INT2FIX(j));
|
398
398
|
na2->base.reduce = rb_funcall(m,'|',1,na2->base.reduce);
|
399
399
|
}
|
400
400
|
|
401
401
|
// array index
|
402
402
|
if (q[i].idx != NULL) {
|
403
403
|
index = q[i].idx;
|
404
|
-
|
404
|
+
CUMO_SDX_SET_INDEX(na2->stridx[j],index);
|
405
405
|
q[i].idx = NULL;
|
406
406
|
for (k=0; k<size; k++) {
|
407
407
|
index[k] = index[k] * stride1;
|
@@ -410,7 +410,7 @@ na_index_aref_nadata(narray_data_t *na1, narray_view_t *na2,
|
|
410
410
|
beg = q[i].beg;
|
411
411
|
step = q[i].step;
|
412
412
|
na2->offset += stride1*beg;
|
413
|
-
|
413
|
+
CUMO_SDX_SET_STRIDE(na2->stridx[j], stride1*step);
|
414
414
|
}
|
415
415
|
j++;
|
416
416
|
total *= size;
|
@@ -420,22 +420,22 @@ na_index_aref_nadata(narray_data_t *na1, narray_view_t *na2,
|
|
420
420
|
|
421
421
|
|
422
422
|
static void
|
423
|
-
|
424
|
-
|
423
|
+
cumo_na_index_aref_naview(cumo_narray_view_t *na1, cumo_narray_view_t *na2,
|
424
|
+
cumo_na_index_arg_t *q, ssize_t elmsz, int ndim, int keep_dim)
|
425
425
|
{
|
426
426
|
int i, j;
|
427
427
|
ssize_t total=1;
|
428
428
|
|
429
429
|
for (i=j=0; i<ndim; i++) {
|
430
|
-
|
430
|
+
cumo_stridx_t sdx1 = na1->stridx[q[i].orig_dim];
|
431
431
|
ssize_t size;
|
432
432
|
|
433
433
|
// numeric index -- trim dimension
|
434
434
|
if (!keep_dim && q[i].n==1 && q[i].step==0) {
|
435
|
-
if (
|
436
|
-
na2->offset +=
|
435
|
+
if (CUMO_SDX_IS_INDEX(sdx1)) {
|
436
|
+
na2->offset += CUMO_SDX_GET_INDEX(sdx1)[q[i].beg];
|
437
437
|
} else {
|
438
|
-
na2->offset +=
|
438
|
+
na2->offset += CUMO_SDX_GET_STRIDE(sdx1)*q[i].beg;
|
439
439
|
}
|
440
440
|
continue;
|
441
441
|
}
|
@@ -443,30 +443,30 @@ na_index_aref_naview(narray_view_t *na1, narray_view_t *na2,
|
|
443
443
|
na2->base.shape[j] = size = q[i].n;
|
444
444
|
|
445
445
|
if (q[i].reduce != 0) {
|
446
|
-
VALUE m = rb_funcall(INT2FIX(1),
|
446
|
+
VALUE m = rb_funcall(INT2FIX(1),cumo_id_shift_left,1,INT2FIX(j));
|
447
447
|
na2->base.reduce = rb_funcall(m,'|',1,na2->base.reduce);
|
448
448
|
}
|
449
449
|
|
450
450
|
if (q[i].orig_dim >= na1->base.ndim) {
|
451
451
|
// new dimension
|
452
|
-
|
452
|
+
CUMO_SDX_SET_STRIDE(na2->stridx[j], elmsz);
|
453
453
|
}
|
454
|
-
else if (q[i].idx != NULL &&
|
454
|
+
else if (q[i].idx != NULL && CUMO_SDX_IS_INDEX(sdx1)) {
|
455
455
|
// index <- index
|
456
456
|
int k;
|
457
457
|
size_t *index = q[i].idx;
|
458
|
-
|
458
|
+
CUMO_SDX_SET_INDEX(na2->stridx[j], index);
|
459
459
|
q[i].idx = NULL;
|
460
460
|
|
461
461
|
for (k=0; k<size; k++) {
|
462
|
-
index[k] =
|
462
|
+
index[k] = CUMO_SDX_GET_INDEX(sdx1)[index[k]];
|
463
463
|
}
|
464
464
|
}
|
465
|
-
else if (q[i].idx != NULL &&
|
465
|
+
else if (q[i].idx != NULL && CUMO_SDX_IS_STRIDE(sdx1)) {
|
466
466
|
// index <- step
|
467
|
-
ssize_t stride1 =
|
467
|
+
ssize_t stride1 = CUMO_SDX_GET_STRIDE(sdx1);
|
468
468
|
size_t *index = q[i].idx;
|
469
|
-
|
469
|
+
CUMO_SDX_SET_INDEX(na2->stridx[j],index);
|
470
470
|
q[i].idx = NULL;
|
471
471
|
|
472
472
|
if (stride1<0) {
|
@@ -488,24 +488,24 @@ na_index_aref_naview(narray_view_t *na1, narray_view_t *na2,
|
|
488
488
|
}
|
489
489
|
}
|
490
490
|
}
|
491
|
-
else if (q[i].idx == NULL &&
|
491
|
+
else if (q[i].idx == NULL && CUMO_SDX_IS_INDEX(sdx1)) {
|
492
492
|
// step <- index
|
493
493
|
int k;
|
494
494
|
size_t beg = q[i].beg;
|
495
495
|
ssize_t step = q[i].step;
|
496
496
|
size_t *index = ALLOC_N(size_t, size);
|
497
|
-
|
497
|
+
CUMO_SDX_SET_INDEX(na2->stridx[j],index);
|
498
498
|
for (k=0; k<size; k++) {
|
499
|
-
index[k] =
|
499
|
+
index[k] = CUMO_SDX_GET_INDEX(sdx1)[beg+step*k];
|
500
500
|
}
|
501
501
|
}
|
502
|
-
else if (q[i].idx == NULL &&
|
502
|
+
else if (q[i].idx == NULL && CUMO_SDX_IS_STRIDE(sdx1)) {
|
503
503
|
// step <- step
|
504
504
|
size_t beg = q[i].beg;
|
505
505
|
ssize_t step = q[i].step;
|
506
|
-
ssize_t stride1 =
|
506
|
+
ssize_t stride1 = CUMO_SDX_GET_STRIDE(sdx1);
|
507
507
|
na2->offset += stride1*beg;
|
508
|
-
|
508
|
+
CUMO_SDX_SET_STRIDE(na2->stridx[j], stride1*step);
|
509
509
|
}
|
510
510
|
|
511
511
|
j++;
|
@@ -516,7 +516,7 @@ na_index_aref_naview(narray_view_t *na1, narray_view_t *na2,
|
|
516
516
|
|
517
517
|
|
518
518
|
static int
|
519
|
-
|
519
|
+
cumo_na_ndim_new_narray(int ndim, const cumo_na_index_arg_t *q)
|
520
520
|
{
|
521
521
|
int i, ndim_new=0;
|
522
522
|
for (i=0; i<ndim; i++) {
|
@@ -530,20 +530,20 @@ na_ndim_new_narray(int ndim, const na_index_arg_t *q)
|
|
530
530
|
typedef struct {
|
531
531
|
VALUE args, self, store;
|
532
532
|
int ndim;
|
533
|
-
|
534
|
-
|
533
|
+
cumo_na_index_arg_t *q; // multi-dimensional index args
|
534
|
+
cumo_narray_t *na1;
|
535
535
|
int keep_dim;
|
536
536
|
size_t pos; // offset position for 0-dimensional narray. 0-dimensional array does not use q.
|
537
|
-
}
|
537
|
+
} cumo_na_aref_md_data_t;
|
538
538
|
|
539
|
-
static
|
540
|
-
|
539
|
+
static cumo_na_index_arg_t*
|
540
|
+
cumo_na_allocate_index_args(int ndim)
|
541
541
|
{
|
542
|
-
|
542
|
+
cumo_na_index_arg_t *q;
|
543
543
|
int i;
|
544
544
|
if (ndim == 0) return NULL;
|
545
545
|
|
546
|
-
q = ALLOC_N(
|
546
|
+
q = ALLOC_N(cumo_na_index_arg_t, ndim);
|
547
547
|
for (i=0; i<ndim; i++) {
|
548
548
|
q[i].idx = NULL;
|
549
549
|
}
|
@@ -551,77 +551,77 @@ na_allocate_index_args(int ndim)
|
|
551
551
|
}
|
552
552
|
|
553
553
|
static
|
554
|
-
VALUE
|
554
|
+
VALUE cumo_na_aref_md_protected(VALUE data_value)
|
555
555
|
{
|
556
|
-
|
556
|
+
cumo_na_aref_md_data_t *data = (cumo_na_aref_md_data_t*)(data_value);
|
557
557
|
VALUE self = data->self;
|
558
558
|
VALUE args = data->args;
|
559
559
|
VALUE store = data->store;
|
560
560
|
int ndim = data->ndim;
|
561
|
-
|
562
|
-
|
561
|
+
cumo_na_index_arg_t *q = data->q;
|
562
|
+
cumo_narray_t *na1 = data->na1;
|
563
563
|
int keep_dim = data->keep_dim;
|
564
564
|
|
565
565
|
int ndim_new;
|
566
566
|
VALUE view;
|
567
|
-
|
567
|
+
cumo_narray_view_t *na2;
|
568
568
|
ssize_t elmsz;
|
569
569
|
|
570
|
-
|
570
|
+
cumo_na_index_parse_args(args, na1, q, ndim);
|
571
571
|
|
572
|
-
if (
|
572
|
+
if (cumo_na_debug_flag) print_index_arg(q,ndim);
|
573
573
|
|
574
574
|
if (keep_dim) {
|
575
575
|
ndim_new = ndim;
|
576
576
|
} else {
|
577
|
-
ndim_new =
|
577
|
+
ndim_new = cumo_na_ndim_new_narray(ndim, q);
|
578
578
|
}
|
579
|
-
view =
|
579
|
+
view = cumo_na_s_allocate_view(rb_obj_class(self));
|
580
580
|
|
581
|
-
|
582
|
-
|
581
|
+
cumo_na_copy_flags(self, view);
|
582
|
+
CumoGetNArrayView(view,na2);
|
583
583
|
|
584
|
-
|
584
|
+
cumo_na_alloc_shape((cumo_narray_t*)na2, ndim_new);
|
585
585
|
|
586
|
-
na2->stridx = ALLOC_N(
|
586
|
+
na2->stridx = ALLOC_N(cumo_stridx_t,ndim_new);
|
587
587
|
|
588
|
-
elmsz =
|
588
|
+
elmsz = cumo_na_element_stride(self);
|
589
589
|
|
590
590
|
switch(na1->type) {
|
591
|
-
case
|
592
|
-
case
|
591
|
+
case CUMO_NARRAY_DATA_T:
|
592
|
+
case CUMO_NARRAY_FILEMAP_T:
|
593
593
|
if (ndim == 0) {
|
594
594
|
na2->offset = data->pos;
|
595
595
|
na2->base.size = 1;
|
596
596
|
} else {
|
597
|
-
|
597
|
+
cumo_na_index_aref_nadata((cumo_narray_data_t *)na1,na2,q,elmsz,ndim,keep_dim);
|
598
598
|
}
|
599
599
|
na2->data = self;
|
600
600
|
break;
|
601
|
-
case
|
601
|
+
case CUMO_NARRAY_VIEW_T:
|
602
602
|
if (ndim == 0) {
|
603
|
-
na2->offset = ((
|
604
|
-
na2->data = ((
|
603
|
+
na2->offset = ((cumo_narray_view_t *)na1)->offset + data->pos;
|
604
|
+
na2->data = ((cumo_narray_view_t *)na1)->data;
|
605
605
|
na2->base.size = 1;
|
606
606
|
} else {
|
607
|
-
na2->offset = ((
|
608
|
-
na2->data = ((
|
609
|
-
|
607
|
+
na2->offset = ((cumo_narray_view_t *)na1)->offset;
|
608
|
+
na2->data = ((cumo_narray_view_t *)na1)->data;
|
609
|
+
cumo_na_index_aref_naview((cumo_narray_view_t *)na1,na2,q,elmsz,ndim,keep_dim);
|
610
610
|
}
|
611
611
|
break;
|
612
612
|
}
|
613
613
|
if (store) {
|
614
|
-
|
615
|
-
|
614
|
+
cumo_na_get_pointer_for_write(store); // allocate memory
|
615
|
+
cumo_na_store(cumo_na_flatten_dim(store,0),view);
|
616
616
|
return store;
|
617
617
|
}
|
618
618
|
return view;
|
619
619
|
}
|
620
620
|
|
621
621
|
static VALUE
|
622
|
-
|
622
|
+
cumo_na_aref_md_ensure(VALUE data_value)
|
623
623
|
{
|
624
|
-
|
624
|
+
cumo_na_aref_md_data_t *data = (cumo_na_aref_md_data_t*)(data_value);
|
625
625
|
int i;
|
626
626
|
for (i=0; i<data->ndim; i++) {
|
627
627
|
xfree(data->q[i].idx);
|
@@ -631,36 +631,36 @@ na_aref_md_ensure(VALUE data_value)
|
|
631
631
|
}
|
632
632
|
|
633
633
|
static VALUE
|
634
|
-
|
634
|
+
cumo_na_aref_md(int argc, VALUE *argv, VALUE self, int keep_dim, int result_nd, size_t pos)
|
635
635
|
{
|
636
636
|
VALUE args; // should be GC protected
|
637
|
-
|
638
|
-
|
637
|
+
cumo_narray_t *na1;
|
638
|
+
cumo_na_aref_md_data_t data;
|
639
639
|
VALUE store = 0;
|
640
640
|
VALUE idx;
|
641
|
-
|
641
|
+
cumo_narray_t *nidx;
|
642
642
|
|
643
|
-
|
643
|
+
CumoGetNArray(self,na1);
|
644
644
|
|
645
645
|
args = rb_ary_new4(argc,argv);
|
646
646
|
|
647
647
|
if (argc == 1 && result_nd == 1) {
|
648
648
|
idx = argv[0];
|
649
649
|
if (rb_obj_is_kind_of(idx, rb_cArray)) {
|
650
|
-
idx = rb_apply(cumo_cNArray,
|
650
|
+
idx = rb_apply(cumo_cNArray,cumo_id_bracket,idx);
|
651
651
|
}
|
652
652
|
if (rb_obj_is_kind_of(idx, cumo_cNArray)) {
|
653
|
-
|
654
|
-
if (
|
655
|
-
store =
|
656
|
-
idx =
|
653
|
+
CumoGetNArray(idx,nidx);
|
654
|
+
if (CUMO_NA_NDIM(nidx)>1) {
|
655
|
+
store = cumo_na_new(rb_obj_class(self),CUMO_NA_NDIM(nidx),CUMO_NA_SHAPE(nidx));
|
656
|
+
idx = cumo_na_flatten(idx);
|
657
657
|
RARRAY_ASET(args,0,idx);
|
658
658
|
}
|
659
659
|
}
|
660
660
|
// flatten should be done only for narray-view with non-uniform stride.
|
661
661
|
if (na1->ndim > 1) {
|
662
|
-
self =
|
663
|
-
|
662
|
+
self = cumo_na_flatten(self);
|
663
|
+
CumoGetNArray(self,na1);
|
664
664
|
}
|
665
665
|
}
|
666
666
|
|
@@ -668,95 +668,95 @@ na_aref_md(int argc, VALUE *argv, VALUE self, int keep_dim, int result_nd, size_
|
|
668
668
|
data.self = self;
|
669
669
|
data.store = store;
|
670
670
|
data.ndim = result_nd;
|
671
|
-
data.q =
|
671
|
+
data.q = cumo_na_allocate_index_args(result_nd);
|
672
672
|
data.na1 = na1;
|
673
673
|
data.keep_dim = keep_dim;
|
674
674
|
|
675
675
|
switch(na1->type) {
|
676
|
-
case
|
676
|
+
case CUMO_NARRAY_DATA_T:
|
677
677
|
data.pos = pos;
|
678
678
|
break;
|
679
|
-
case
|
679
|
+
case CUMO_NARRAY_FILEMAP_T:
|
680
680
|
data.pos = pos; // correct? I have never used..
|
681
681
|
break;
|
682
|
-
case
|
682
|
+
case CUMO_NARRAY_VIEW_T:
|
683
683
|
{
|
684
|
-
|
685
|
-
|
686
|
-
// pos obtained by
|
684
|
+
cumo_narray_view_t *nv;
|
685
|
+
CumoGetNArrayView(self,nv);
|
686
|
+
// pos obtained by cumo_na_get_result_dimension adds view->offset.
|
687
687
|
data.pos = pos - nv->offset;
|
688
688
|
}
|
689
689
|
break;
|
690
690
|
}
|
691
691
|
|
692
|
-
return rb_ensure(
|
692
|
+
return rb_ensure(cumo_na_aref_md_protected, (VALUE)&data, cumo_na_aref_md_ensure, (VALUE)&data);
|
693
693
|
}
|
694
694
|
|
695
695
|
|
696
696
|
/* method: [](idx1,idx2,...,idxN) */
|
697
697
|
VALUE
|
698
|
-
|
698
|
+
cumo_na_aref_main(int nidx, VALUE *idx, VALUE self, int keep_dim, int result_nd, size_t pos)
|
699
699
|
{
|
700
|
-
|
700
|
+
cumo_na_index_arg_to_internal_order(nidx, idx, self);
|
701
701
|
|
702
702
|
if (nidx==0) {
|
703
|
-
return rb_funcall(self,
|
703
|
+
return rb_funcall(self,cumo_id_dup,0);
|
704
704
|
}
|
705
705
|
if (nidx==1) {
|
706
|
-
if (
|
707
|
-
return rb_funcall(*idx,
|
706
|
+
if (rb_obj_class(*idx)==cumo_cBit) {
|
707
|
+
return rb_funcall(*idx,cumo_id_mask,1,self);
|
708
708
|
}
|
709
709
|
}
|
710
|
-
return
|
710
|
+
return cumo_na_aref_md(nidx, idx, self, keep_dim, result_nd, pos);
|
711
711
|
}
|
712
712
|
|
713
713
|
|
714
714
|
/* method: slice(idx1,idx2,...,idxN) */
|
715
|
-
static VALUE
|
715
|
+
static VALUE cumo_na_slice(int argc, VALUE *argv, VALUE self)
|
716
716
|
{
|
717
717
|
int result_nd;
|
718
718
|
size_t pos;
|
719
719
|
|
720
|
-
result_nd =
|
721
|
-
return
|
720
|
+
result_nd = cumo_na_get_result_dimension(self, argc, argv, 0, &pos);
|
721
|
+
return cumo_na_aref_main(argc, argv, self, 1, result_nd, pos);
|
722
722
|
}
|
723
723
|
|
724
724
|
|
725
725
|
static int
|
726
|
-
check_index_count(int argc, int
|
726
|
+
check_index_count(int argc, int cumo_na_ndim, int count_new, int count_rest)
|
727
727
|
{
|
728
|
-
int result_nd =
|
728
|
+
int result_nd = cumo_na_ndim + count_new;
|
729
729
|
|
730
730
|
switch(count_rest) {
|
731
731
|
case 0:
|
732
732
|
if (count_new == 0 && argc == 1) return 1;
|
733
733
|
if (argc == result_nd) return result_nd;
|
734
734
|
rb_raise(rb_eIndexError,"# of index(=%i) should be "
|
735
|
-
"equal to ndim(=%i)",argc,
|
735
|
+
"equal to ndim(=%i)",argc,cumo_na_ndim);
|
736
736
|
break;
|
737
737
|
case 1:
|
738
738
|
if (argc-1 <= result_nd) return result_nd;
|
739
739
|
rb_raise(rb_eIndexError,"# of index(=%i) > ndim(=%i) with :rest",
|
740
|
-
argc,
|
740
|
+
argc,cumo_na_ndim);
|
741
741
|
break;
|
742
742
|
}
|
743
743
|
return -1;
|
744
744
|
}
|
745
745
|
|
746
746
|
int
|
747
|
-
|
747
|
+
cumo_na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_t *pos_idx)
|
748
748
|
{
|
749
749
|
int i, j;
|
750
750
|
int count_new=0;
|
751
751
|
int count_rest=0;
|
752
752
|
int count_else=0;
|
753
753
|
ssize_t x, s, m, pos, *idx;
|
754
|
-
|
755
|
-
|
756
|
-
|
754
|
+
cumo_narray_t *na;
|
755
|
+
cumo_narray_view_t *nv;
|
756
|
+
cumo_stridx_t sdx;
|
757
757
|
VALUE a;
|
758
758
|
|
759
|
-
|
759
|
+
CumoGetNArray(self,na);
|
760
760
|
if (na->size == 0) {
|
761
761
|
rb_raise(rb_eRuntimeError, "cannot get index of empty array");
|
762
762
|
return -1;
|
@@ -774,12 +774,12 @@ na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_
|
|
774
774
|
break;
|
775
775
|
case T_FALSE:
|
776
776
|
case T_SYMBOL:
|
777
|
-
if (a==
|
777
|
+
if (a==cumo_sym_rest || a==cumo_sym_tilde || a==Qfalse) {
|
778
778
|
argv[i] = Qfalse;
|
779
779
|
count_rest++;
|
780
780
|
break;
|
781
|
-
} else if (a==
|
782
|
-
argv[i] =
|
781
|
+
} else if (a==cumo_sym_new || a==cumo_sym_minus) {
|
782
|
+
argv[i] = cumo_sym_new;
|
783
783
|
count_new++;
|
784
784
|
}
|
785
785
|
// not break
|
@@ -796,32 +796,32 @@ na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_
|
|
796
796
|
}
|
797
797
|
|
798
798
|
switch(na->type) {
|
799
|
-
case
|
800
|
-
|
799
|
+
case CUMO_NARRAY_VIEW_T:
|
800
|
+
CumoGetNArrayView(self,nv);
|
801
801
|
pos = nv->offset;
|
802
802
|
if (j == na->ndim) {
|
803
803
|
for (i=j-1; i>=0; i--) {
|
804
|
-
x =
|
804
|
+
x = cumo_na_range_check(idx[i], na->shape[i], i);
|
805
805
|
sdx = nv->stridx[i];
|
806
|
-
if (
|
807
|
-
pos +=
|
806
|
+
if (CUMO_SDX_IS_INDEX(sdx)) {
|
807
|
+
pos += CUMO_SDX_GET_INDEX(sdx)[x];
|
808
808
|
} else {
|
809
|
-
pos +=
|
809
|
+
pos += CUMO_SDX_GET_STRIDE(sdx)*x;
|
810
810
|
}
|
811
811
|
}
|
812
812
|
*pos_idx = pos;
|
813
813
|
}
|
814
814
|
else if (argc==1 && j==1) {
|
815
|
-
x =
|
815
|
+
x = cumo_na_range_check(idx[0], na->size, 0);
|
816
816
|
for (i=na->ndim-1; i>=0; i--) {
|
817
817
|
s = na->shape[i];
|
818
818
|
m = x % s;
|
819
819
|
x = x / s;
|
820
820
|
sdx = nv->stridx[i];
|
821
|
-
if (
|
822
|
-
pos +=
|
821
|
+
if (CUMO_SDX_IS_INDEX(sdx)) {
|
822
|
+
pos += CUMO_SDX_GET_INDEX(sdx)[m];
|
823
823
|
} else {
|
824
|
-
pos +=
|
824
|
+
pos += CUMO_SDX_GET_STRIDE(sdx)*m;
|
825
825
|
}
|
826
826
|
}
|
827
827
|
*pos_idx = pos;
|
@@ -831,16 +831,16 @@ na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_
|
|
831
831
|
break;
|
832
832
|
default:
|
833
833
|
if (!stride) {
|
834
|
-
stride =
|
834
|
+
stride = cumo_na_element_stride(self);
|
835
835
|
}
|
836
836
|
if (argc==1 && j==1) {
|
837
|
-
x =
|
837
|
+
x = cumo_na_range_check(idx[0], na->size, 0);
|
838
838
|
*pos_idx = stride * x;
|
839
839
|
}
|
840
840
|
else if (j == na->ndim) {
|
841
841
|
pos = 0;
|
842
842
|
for (i=j-1; i>=0; i--) {
|
843
|
-
x =
|
843
|
+
x = cumo_na_range_check(idx[i], na->shape[i], i);
|
844
844
|
pos += stride * x;
|
845
845
|
stride *= na->shape[i];
|
846
846
|
}
|
@@ -854,27 +854,27 @@ na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_
|
|
854
854
|
|
855
855
|
|
856
856
|
void
|
857
|
-
|
857
|
+
Init_cumo_na_index()
|
858
858
|
{
|
859
|
-
rb_define_method(cNArray, "slice",
|
860
|
-
|
861
|
-
|
862
|
-
|
863
|
-
|
864
|
-
|
865
|
-
|
866
|
-
|
867
|
-
//
|
868
|
-
|
869
|
-
|
870
|
-
|
871
|
-
|
872
|
-
|
873
|
-
|
874
|
-
|
875
|
-
|
876
|
-
|
877
|
-
|
878
|
-
|
879
|
-
|
859
|
+
rb_define_method(cNArray, "slice", cumo_na_slice, -1);
|
860
|
+
|
861
|
+
cumo_sym_ast = ID2SYM(rb_intern("*"));
|
862
|
+
cumo_sym_all = ID2SYM(rb_intern("all"));
|
863
|
+
cumo_sym_minus = ID2SYM(rb_intern("-"));
|
864
|
+
cumo_sym_new = ID2SYM(rb_intern("new"));
|
865
|
+
cumo_sym_reverse = ID2SYM(rb_intern("reverse"));
|
866
|
+
cumo_sym_plus = ID2SYM(rb_intern("+"));
|
867
|
+
//cumo_sym_reduce = ID2SYM(rb_intern("reduce"));
|
868
|
+
cumo_sym_sum = ID2SYM(rb_intern("sum"));
|
869
|
+
cumo_sym_tilde = ID2SYM(rb_intern("~"));
|
870
|
+
cumo_sym_rest = ID2SYM(rb_intern("rest"));
|
871
|
+
cumo_id_beg = rb_intern("begin");
|
872
|
+
cumo_id_end = rb_intern("end");
|
873
|
+
cumo_id_exclude_end = rb_intern("exclude_end?");
|
874
|
+
cumo_id_each = rb_intern("each");
|
875
|
+
cumo_id_step = rb_intern("step");
|
876
|
+
cumo_id_dup = rb_intern("dup");
|
877
|
+
cumo_id_bracket = rb_intern("[]");
|
878
|
+
cumo_id_shift_left = rb_intern("<<");
|
879
|
+
cumo_id_mask = rb_intern("mask");
|
880
880
|
}
|