cumo 0.1.0 → 0.1.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/.gitignore +1 -0
- data/3rd_party/LICENSE.txt +60 -0
- data/3rd_party/mkmf-cu/lib/mkmf-cu/cli.rb +13 -1
- data/LICENSE.txt +1 -62
- data/README.md +33 -29
- data/bench/cumo_bench.rb +47 -25
- data/bench/numo_bench.rb +27 -25
- data/docs/src-tree.md +16 -0
- data/ext/cumo/cuda/cublas.c +69 -219
- data/ext/cumo/cuda/memory_pool_impl.hpp +1 -0
- data/ext/cumo/cuda/runtime.c +2 -14
- data/ext/cumo/cumo.c +16 -16
- data/ext/cumo/include/cumo.h +2 -2
- data/ext/cumo/include/cumo/cuda/cublas.h +6 -129
- data/ext/cumo/include/cumo/cuda/runtime.h +16 -0
- data/ext/cumo/include/cumo/indexer.h +46 -63
- data/ext/cumo/include/cumo/intern.h +58 -112
- data/ext/cumo/include/cumo/narray.h +214 -185
- data/ext/cumo/include/cumo/narray_kernel.h +66 -37
- data/ext/cumo/include/cumo/ndloop.h +42 -42
- data/ext/cumo/include/cumo/reduce_kernel.h +55 -71
- data/ext/cumo/include/cumo/template.h +56 -51
- data/ext/cumo/include/cumo/template_kernel.h +31 -31
- data/ext/cumo/include/cumo/types/bit.h +3 -3
- data/ext/cumo/include/cumo/types/bit_kernel.h +2 -2
- data/ext/cumo/include/cumo/types/complex.h +126 -126
- data/ext/cumo/include/cumo/types/complex_kernel.h +126 -126
- data/ext/cumo/include/cumo/types/complex_macro.h +28 -28
- data/ext/cumo/include/cumo/types/complex_macro_kernel.h +20 -20
- data/ext/cumo/include/cumo/types/dcomplex.h +5 -5
- data/ext/cumo/include/cumo/types/dcomplex_kernel.h +1 -1
- data/ext/cumo/include/cumo/types/int_macro.h +1 -1
- data/ext/cumo/include/cumo/types/int_macro_kernel.h +1 -1
- data/ext/cumo/include/cumo/types/robj_macro.h +30 -30
- data/ext/cumo/include/cumo/types/scomplex.h +5 -5
- data/ext/cumo/include/cumo/types/scomplex_kernel.h +1 -1
- data/ext/cumo/narray/array.c +143 -143
- data/ext/cumo/narray/data.c +184 -184
- data/ext/cumo/narray/gen/cogen.rb +5 -2
- data/ext/cumo/narray/gen/cogen_kernel.rb +5 -2
- data/ext/cumo/narray/gen/def/dcomplex.rb +1 -1
- data/ext/cumo/narray/gen/def/scomplex.rb +1 -1
- data/ext/cumo/narray/gen/erbln.rb +132 -0
- data/ext/cumo/narray/gen/erbpp2.rb +18 -13
- data/ext/cumo/narray/gen/narray_def.rb +3 -3
- data/ext/cumo/narray/gen/spec.rb +2 -2
- data/ext/cumo/narray/gen/tmpl/accum.c +15 -15
- data/ext/cumo/narray/gen/tmpl/accum_binary.c +22 -22
- data/ext/cumo/narray/gen/tmpl/accum_binary_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/accum_index.c +30 -30
- data/ext/cumo/narray/gen/tmpl/accum_index_kernel.cu +2 -2
- data/ext/cumo/narray/gen/tmpl/accum_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/alloc_func.c +14 -14
- data/ext/cumo/narray/gen/tmpl/allocate.c +11 -11
- data/ext/cumo/narray/gen/tmpl/aref.c +2 -2
- data/ext/cumo/narray/gen/tmpl/aref_cpu.c +4 -4
- data/ext/cumo/narray/gen/tmpl/aset.c +2 -2
- data/ext/cumo/narray/gen/tmpl/binary.c +28 -28
- data/ext/cumo/narray/gen/tmpl/binary2.c +18 -18
- data/ext/cumo/narray/gen/tmpl/binary2_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/binary_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/binary_s.c +13 -13
- data/ext/cumo/narray/gen/tmpl/binary_s_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/bincount.c +23 -23
- data/ext/cumo/narray/gen/tmpl/cast.c +7 -7
- data/ext/cumo/narray/gen/tmpl/cast_array.c +3 -3
- data/ext/cumo/narray/gen/tmpl/clip.c +38 -38
- data/ext/cumo/narray/gen/tmpl/complex_accum_kernel.cu +2 -2
- data/ext/cumo/narray/gen/tmpl/cond_binary.c +19 -19
- data/ext/cumo/narray/gen/tmpl/cond_binary_kernel.cu +7 -7
- data/ext/cumo/narray/gen/tmpl/cond_unary.c +15 -15
- data/ext/cumo/narray/gen/tmpl/cum.c +15 -15
- data/ext/cumo/narray/gen/tmpl/each.c +9 -9
- data/ext/cumo/narray/gen/tmpl/each_with_index.c +9 -9
- data/ext/cumo/narray/gen/tmpl/ewcomp.c +15 -15
- data/ext/cumo/narray/gen/tmpl/ewcomp_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/extract_cpu.c +5 -5
- data/ext/cumo/narray/gen/tmpl/extract_data.c +12 -12
- data/ext/cumo/narray/gen/tmpl/eye.c +9 -9
- data/ext/cumo/narray/gen/tmpl/eye_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/fill.c +9 -9
- data/ext/cumo/narray/gen/tmpl/fill_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/float_accum_kernel.cu +1 -1
- data/ext/cumo/narray/gen/tmpl/format.c +11 -11
- data/ext/cumo/narray/gen/tmpl/format_to_a.c +8 -8
- data/ext/cumo/narray/gen/tmpl/frexp.c +13 -13
- data/ext/cumo/narray/gen/tmpl/gemm.c +252 -108
- data/ext/cumo/narray/gen/tmpl/inspect.c +1 -1
- data/ext/cumo/narray/gen/tmpl/lib.c +2 -2
- data/ext/cumo/narray/gen/tmpl/logseq.c +7 -7
- data/ext/cumo/narray/gen/tmpl/logseq_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/map_with_index.c +17 -17
- data/ext/cumo/narray/gen/tmpl/median.c +10 -10
- data/ext/cumo/narray/gen/tmpl/minmax.c +10 -10
- data/ext/cumo/narray/gen/tmpl/new_dim0.c +3 -3
- data/ext/cumo/narray/gen/tmpl/poly.c +6 -6
- data/ext/cumo/narray/gen/tmpl/pow.c +28 -28
- data/ext/cumo/narray/gen/tmpl/pow_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/rand.c +10 -10
- data/ext/cumo/narray/gen/tmpl/rand_norm.c +7 -7
- data/ext/cumo/narray/gen/tmpl/real_accum_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/seq.c +7 -7
- data/ext/cumo/narray/gen/tmpl/seq_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/set2.c +20 -20
- data/ext/cumo/narray/gen/tmpl/sort.c +11 -11
- data/ext/cumo/narray/gen/tmpl/sort_index.c +18 -18
- data/ext/cumo/narray/gen/tmpl/store.c +6 -6
- data/ext/cumo/narray/gen/tmpl/store_array.c +19 -19
- data/ext/cumo/narray/gen/tmpl/store_array_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl/store_bit.c +23 -23
- data/ext/cumo/narray/gen/tmpl/store_bit_kernel.cu +28 -28
- data/ext/cumo/narray/gen/tmpl/store_from.c +16 -16
- data/ext/cumo/narray/gen/tmpl/store_from_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl/to_a.c +10 -10
- data/ext/cumo/narray/gen/tmpl/unary.c +25 -25
- data/ext/cumo/narray/gen/tmpl/unary2.c +17 -17
- data/ext/cumo/narray/gen/tmpl/unary_kernel.cu +15 -15
- data/ext/cumo/narray/gen/tmpl/unary_ret2.c +13 -13
- data/ext/cumo/narray/gen/tmpl/unary_s.c +17 -17
- data/ext/cumo/narray/gen/tmpl/unary_s_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl_bit/allocate.c +9 -9
- data/ext/cumo/narray/gen/tmpl_bit/aref.c +2 -2
- data/ext/cumo/narray/gen/tmpl_bit/aref_cpu.c +5 -5
- data/ext/cumo/narray/gen/tmpl_bit/aset.c +2 -2
- data/ext/cumo/narray/gen/tmpl_bit/binary.c +29 -29
- data/ext/cumo/narray/gen/tmpl_bit/bit_count.c +14 -14
- data/ext/cumo/narray/gen/tmpl_bit/bit_count_cpu.c +21 -21
- data/ext/cumo/narray/gen/tmpl_bit/bit_count_kernel.cu +28 -28
- data/ext/cumo/narray/gen/tmpl_bit/bit_reduce.c +29 -29
- data/ext/cumo/narray/gen/tmpl_bit/each.c +10 -10
- data/ext/cumo/narray/gen/tmpl_bit/each_with_index.c +10 -10
- data/ext/cumo/narray/gen/tmpl_bit/extract.c +8 -8
- data/ext/cumo/narray/gen/tmpl_bit/extract_cpu.c +8 -8
- data/ext/cumo/narray/gen/tmpl_bit/fill.c +17 -17
- data/ext/cumo/narray/gen/tmpl_bit/format.c +14 -14
- data/ext/cumo/narray/gen/tmpl_bit/format_to_a.c +11 -11
- data/ext/cumo/narray/gen/tmpl_bit/inspect.c +3 -3
- data/ext/cumo/narray/gen/tmpl_bit/mask.c +33 -33
- data/ext/cumo/narray/gen/tmpl_bit/store_array.c +19 -19
- data/ext/cumo/narray/gen/tmpl_bit/store_bit.c +22 -22
- data/ext/cumo/narray/gen/tmpl_bit/store_from.c +18 -18
- data/ext/cumo/narray/gen/tmpl_bit/to_a.c +12 -12
- data/ext/cumo/narray/gen/tmpl_bit/unary.c +24 -24
- data/ext/cumo/narray/gen/tmpl_bit/where.c +16 -16
- data/ext/cumo/narray/gen/tmpl_bit/where2.c +20 -20
- data/ext/cumo/narray/index.c +213 -213
- data/ext/cumo/narray/math.c +27 -27
- data/ext/cumo/narray/narray.c +484 -484
- data/ext/cumo/narray/ndloop.c +259 -258
- data/ext/cumo/narray/rand.c +3 -3
- data/ext/cumo/narray/step.c +70 -70
- data/ext/cumo/narray/struct.c +139 -139
- metadata +6 -7
- data/ext/cumo/include/cumo/intern_fwd.h +0 -38
- data/lib/erbpp.rb +0 -294
- data/lib/erbpp/line_number.rb +0 -137
- data/lib/erbpp/narray_def.rb +0 -381
@@ -4,7 +4,7 @@ typedef struct {
|
|
4
4
|
} randn_opt_t;
|
5
5
|
|
6
6
|
static void
|
7
|
-
<%=c_iter%>(
|
7
|
+
<%=c_iter%>(cumo_na_loop_t *const lp)
|
8
8
|
{
|
9
9
|
size_t i;
|
10
10
|
char *p1;
|
@@ -19,13 +19,13 @@ static void
|
|
19
19
|
rtype sigma;
|
20
20
|
randn_opt_t *g;
|
21
21
|
|
22
|
-
|
23
|
-
|
22
|
+
CUMO_INIT_COUNTER(lp, i);
|
23
|
+
CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
|
24
24
|
g = (randn_opt_t*)(lp->opt_ptr);
|
25
25
|
mu = g->mu;
|
26
26
|
sigma = g->sigma;
|
27
27
|
|
28
|
-
|
28
|
+
CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
29
29
|
cumo_cuda_runtime_check_status(cudaDeviceSynchronize());
|
30
30
|
if (idx1) {
|
31
31
|
<% if is_complex %>
|
@@ -102,8 +102,8 @@ static VALUE
|
|
102
102
|
int n;
|
103
103
|
randn_opt_t g;
|
104
104
|
VALUE v1=Qnil, v2=Qnil;
|
105
|
-
|
106
|
-
|
105
|
+
cumo_ndfunc_arg_in_t ain[1] = {{CUMO_OVERWRITE,0}};
|
106
|
+
cumo_ndfunc_t ndf = {<%=c_iter%>, CUMO_FULL_LOOP, 1,0, ain,0};
|
107
107
|
|
108
108
|
n = rb_scan_args(argc, args, "02", &v1, &v2);
|
109
109
|
if (n == 0) {
|
@@ -116,6 +116,6 @@ static VALUE
|
|
116
116
|
} else {
|
117
117
|
g.sigma = 1;
|
118
118
|
}
|
119
|
-
|
119
|
+
cumo_na_ndloop3(&ndf, &g, 1, self);
|
120
120
|
return self;
|
121
121
|
}
|
@@ -34,7 +34,7 @@ struct cumo_<%=type_name%>_max_impl {
|
|
34
34
|
};
|
35
35
|
|
36
36
|
// TODO(sonots): Implement minmax
|
37
|
-
__global__ void cumo_<%=type_name%>_ptp_kernel(
|
37
|
+
__global__ void cumo_<%=type_name%>_ptp_kernel(cumo_na_reduction_arg_t arg)
|
38
38
|
{
|
39
39
|
dtype min=0,max=1;
|
40
40
|
//<%=type_name%>_minmax_kernel<<<1,1>>>(n,p1,s1,&min,&max);
|
@@ -49,27 +49,27 @@ extern "C" {
|
|
49
49
|
#endif
|
50
50
|
#endif
|
51
51
|
|
52
|
-
void cumo_<%=type_name%>_sum_kernel_launch(
|
52
|
+
void cumo_<%=type_name%>_sum_kernel_launch(cumo_na_reduction_arg_t* arg)
|
53
53
|
{
|
54
54
|
cumo_reduce<dtype, <%=dtype%>, cumo_<%=type_name%>_sum_impl>(*arg, cumo_<%=type_name%>_sum_impl{});
|
55
55
|
}
|
56
56
|
|
57
|
-
void cumo_<%=type_name%>_prod_kernel_launch(
|
57
|
+
void cumo_<%=type_name%>_prod_kernel_launch(cumo_na_reduction_arg_t* arg)
|
58
58
|
{
|
59
59
|
cumo_reduce<dtype, <%=dtype%>, cumo_<%=type_name%>_prod_impl>(*arg, cumo_<%=type_name%>_prod_impl{});
|
60
60
|
}
|
61
61
|
|
62
|
-
void cumo_<%=type_name%>_min_kernel_launch(
|
62
|
+
void cumo_<%=type_name%>_min_kernel_launch(cumo_na_reduction_arg_t* arg)
|
63
63
|
{
|
64
64
|
cumo_reduce<dtype, dtype, cumo_<%=type_name%>_min_impl>(*arg, cumo_<%=type_name%>_min_impl{});
|
65
65
|
}
|
66
66
|
|
67
|
-
void cumo_<%=type_name%>_max_kernel_launch(
|
67
|
+
void cumo_<%=type_name%>_max_kernel_launch(cumo_na_reduction_arg_t* arg)
|
68
68
|
{
|
69
69
|
cumo_reduce<dtype, dtype, cumo_<%=type_name%>_max_impl>(*arg, cumo_<%=type_name%>_max_impl{});
|
70
70
|
}
|
71
71
|
|
72
|
-
void cumo_<%=type_name%>_ptp_kernel_launch(
|
72
|
+
void cumo_<%=type_name%>_ptp_kernel_launch(cumo_na_reduction_arg_t* arg)
|
73
73
|
{
|
74
74
|
cumo_<%=type_name%>_ptp_kernel<<<1,1>>>(*arg);
|
75
75
|
}
|
@@ -22,7 +22,7 @@ void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, ssize_t s1, seq_data_t
|
|
22
22
|
<% end %>
|
23
23
|
|
24
24
|
static void
|
25
|
-
<%=c_iter%>(
|
25
|
+
<%=c_iter%>(cumo_na_loop_t *const lp)
|
26
26
|
{
|
27
27
|
size_t i;
|
28
28
|
char *p1;
|
@@ -32,8 +32,8 @@ static void
|
|
32
32
|
seq_count_t c;
|
33
33
|
seq_opt_t *g;
|
34
34
|
|
35
|
-
|
36
|
-
|
35
|
+
CUMO_INIT_COUNTER(lp, i);
|
36
|
+
CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
|
37
37
|
g = (seq_opt_t*)(lp->opt_ptr);
|
38
38
|
beg = g->beg;
|
39
39
|
step = g->step;
|
@@ -41,7 +41,7 @@ static void
|
|
41
41
|
<% if is_object %>
|
42
42
|
{
|
43
43
|
dtype x;
|
44
|
-
|
44
|
+
CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
45
45
|
if (idx1) {
|
46
46
|
for (; i--;) {
|
47
47
|
x = f_seq(beg,step,c++);
|
@@ -91,8 +91,8 @@ static VALUE
|
|
91
91
|
{
|
92
92
|
seq_opt_t *g;
|
93
93
|
VALUE vbeg=Qnil, vstep=Qnil;
|
94
|
-
|
95
|
-
|
94
|
+
cumo_ndfunc_arg_in_t ain[1] = {{CUMO_OVERWRITE,0}};
|
95
|
+
cumo_ndfunc_t ndf = {<%=c_iter%>, CUMO_FULL_LOOP, 1,0, ain,0};
|
96
96
|
|
97
97
|
g = ALLOCA_N(seq_opt_t,1);
|
98
98
|
g->beg = m_zero;
|
@@ -107,6 +107,6 @@ static VALUE
|
|
107
107
|
if (vstep!=Qnil) {g->step = m_num_to_data(vstep);}
|
108
108
|
<% end %>
|
109
109
|
|
110
|
-
|
110
|
+
cumo_na_ndloop3(&ndf, g, 1, self);
|
111
111
|
return self;
|
112
112
|
}
|
@@ -29,15 +29,15 @@ __global__ void <%="cumo_#{c_iter}_stride_kernel"%>(char *p1, size_t s1, seq_dat
|
|
29
29
|
|
30
30
|
void <%="cumo_#{c_iter}_index_kernel_launch"%>(char *p1, size_t* idx1, seq_data_t beg, seq_data_t step, seq_count_t c, uint64_t n)
|
31
31
|
{
|
32
|
-
size_t
|
33
|
-
size_t
|
34
|
-
<%="cumo_#{c_iter}_index_kernel"%><<<
|
32
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
33
|
+
size_t block_dim = cumo_get_block_dim(n);
|
34
|
+
<%="cumo_#{c_iter}_index_kernel"%><<<grid_dim, block_dim>>>(p1,idx1,beg,step,c,n);
|
35
35
|
}
|
36
36
|
|
37
37
|
void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, ssize_t s1, seq_data_t beg, seq_data_t step, seq_count_t c, uint64_t n)
|
38
38
|
{
|
39
|
-
size_t
|
40
|
-
size_t
|
41
|
-
<%="cumo_#{c_iter}_stride_kernel"%><<<
|
39
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
40
|
+
size_t block_dim = cumo_get_block_dim(n);
|
41
|
+
<%="cumo_#{c_iter}_stride_kernel"%><<<grid_dim, block_dim>>>(p1,s1,beg,step,c,n);
|
42
42
|
}
|
43
43
|
<% end %>
|
@@ -1,5 +1,5 @@
|
|
1
1
|
static void
|
2
|
-
<%=c_iter%>(
|
2
|
+
<%=c_iter%>(cumo_na_loop_t *const lp)
|
3
3
|
{
|
4
4
|
size_t i;
|
5
5
|
char *p1, *p2;
|
@@ -7,40 +7,40 @@ static void
|
|
7
7
|
size_t *idx1, *idx2;
|
8
8
|
dtype x;
|
9
9
|
<%=dtype%> y;
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
10
|
+
CUMO_INIT_COUNTER(lp, i);
|
11
|
+
CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
|
12
|
+
CUMO_INIT_PTR_IDX(lp, 1, p2, s2, idx2);
|
13
|
+
CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
14
14
|
if (idx1) {
|
15
15
|
if (idx2) {
|
16
16
|
for (; i--;) {
|
17
|
-
|
18
|
-
|
17
|
+
CUMO_GET_DATA(p1+*idx1,dtype,x);
|
18
|
+
CUMO_GET_DATA_INDEX(p2,idx2,<%=dtype%>,y);
|
19
19
|
x = m_<%=name%>(x,y);
|
20
|
-
|
20
|
+
CUMO_SET_DATA_INDEX(p1,idx1,dtype,x);
|
21
21
|
}
|
22
22
|
} else {
|
23
23
|
for (; i--;) {
|
24
|
-
|
25
|
-
|
24
|
+
CUMO_GET_DATA(p1+*idx1,dtype,x);
|
25
|
+
CUMO_GET_DATA_STRIDE(p2,s2,<%=dtype%>,y);
|
26
26
|
x = m_<%=name%>(x,y);
|
27
|
-
|
27
|
+
CUMO_SET_DATA_INDEX(p1,idx1,dtype,x);
|
28
28
|
}
|
29
29
|
}
|
30
30
|
} else {
|
31
31
|
if (idx2) {
|
32
32
|
for (; i--;) {
|
33
|
-
|
34
|
-
|
33
|
+
CUMO_GET_DATA(p1,dtype,x);
|
34
|
+
CUMO_GET_DATA_INDEX(p2,idx2,<%=dtype%>,y);
|
35
35
|
x = m_<%=name%>(x,y);
|
36
|
-
|
36
|
+
CUMO_SET_DATA_STRIDE(p1,s1,dtype,x);
|
37
37
|
}
|
38
38
|
} else {
|
39
39
|
for (; i--;) {
|
40
|
-
|
41
|
-
|
40
|
+
CUMO_GET_DATA(p1,dtype,x);
|
41
|
+
CUMO_GET_DATA_STRIDE(p2,s2,<%=dtype%>,y);
|
42
42
|
x = m_<%=name%>(x,y);
|
43
|
-
|
43
|
+
CUMO_SET_DATA_STRIDE(p1,s1,dtype,x);
|
44
44
|
}
|
45
45
|
}
|
46
46
|
}
|
@@ -49,9 +49,9 @@ static void
|
|
49
49
|
static VALUE
|
50
50
|
<%=c_func(1)%>(VALUE self, VALUE a1)
|
51
51
|
{
|
52
|
-
|
53
|
-
|
52
|
+
cumo_ndfunc_arg_in_t ain[2] = {{CUMO_OVERWRITE,0},{<%=result_class%>,0}};
|
53
|
+
cumo_ndfunc_t ndf = { <%=c_iter%>, CUMO_FULL_LOOP, 2, 0, ain, 0 };
|
54
54
|
|
55
|
-
|
55
|
+
cumo_na_ndloop(&ndf, 2, self, a1);
|
56
56
|
return a1;
|
57
57
|
}
|
@@ -1,14 +1,14 @@
|
|
1
1
|
<% (is_float ? ["_ignan","_prnan"] : [""]).each do |j| %>
|
2
2
|
static void
|
3
|
-
<%=c_iter%><%=j%>(
|
3
|
+
<%=c_iter%><%=j%>(cumo_na_loop_t *const lp)
|
4
4
|
{
|
5
5
|
size_t n;
|
6
6
|
char *ptr;
|
7
7
|
ssize_t step;
|
8
8
|
|
9
|
-
|
10
|
-
|
11
|
-
|
9
|
+
CUMO_INIT_COUNTER(lp, n);
|
10
|
+
CUMO_INIT_PTR(lp, 0, ptr, step);
|
11
|
+
CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
12
12
|
<%=type_name%>_qsort<%=j%>(ptr, n, step);
|
13
13
|
}
|
14
14
|
<% end %>
|
@@ -30,19 +30,19 @@ static VALUE
|
|
30
30
|
<%=c_func(-1)%>(int argc, VALUE *argv, VALUE self)
|
31
31
|
{
|
32
32
|
VALUE reduce;
|
33
|
-
|
34
|
-
|
33
|
+
cumo_ndfunc_arg_in_t ain[2] = {{CUMO_OVERWRITE,0},{cumo_sym_reduce,0}};
|
34
|
+
cumo_ndfunc_t ndf = {0, CUMO_STRIDE_LOOP|CUMO_NDF_FLAT_REDUCE, 2,0, ain,0};
|
35
35
|
|
36
|
-
if (!
|
37
|
-
self =
|
36
|
+
if (!CUMO_TEST_INPLACE(self)) {
|
37
|
+
self = cumo_na_copy(self);
|
38
38
|
}
|
39
39
|
<% if is_float %>
|
40
40
|
ndf.func = <%=c_iter%>_ignan;
|
41
|
-
reduce =
|
41
|
+
reduce = cumo_na_reduce_dimension(argc, argv, 1, &self, &ndf, <%=c_iter%>_prnan);
|
42
42
|
<% else %>
|
43
43
|
ndf.func = <%=c_iter%>;
|
44
|
-
reduce =
|
44
|
+
reduce = cumo_na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
|
45
45
|
<% end %>
|
46
|
-
|
46
|
+
cumo_na_ndloop(&ndf, 2, self, reduce);
|
47
47
|
return self;
|
48
48
|
}
|
@@ -2,17 +2,17 @@
|
|
2
2
|
[64,32].each do |i| %>
|
3
3
|
#define idx_t int<%=i%>_t
|
4
4
|
static void
|
5
|
-
<%=type_name%>_index<%=i%>_qsort<%=j%>(
|
5
|
+
<%=type_name%>_index<%=i%>_qsort<%=j%>(cumo_na_loop_t *const lp)
|
6
6
|
{
|
7
7
|
size_t i, n, idx;
|
8
8
|
char *d_ptr, *i_ptr, *o_ptr;
|
9
9
|
ssize_t d_step, i_step, o_step;
|
10
10
|
char **ptr;
|
11
11
|
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
12
|
+
CUMO_INIT_COUNTER(lp, n);
|
13
|
+
CUMO_INIT_PTR(lp, 0, d_ptr, d_step);
|
14
|
+
CUMO_INIT_PTR(lp, 1, i_ptr, i_step);
|
15
|
+
CUMO_INIT_PTR(lp, 2, o_ptr, o_step);
|
16
16
|
|
17
17
|
ptr = (char**)(lp->opt_ptr);
|
18
18
|
|
@@ -61,40 +61,40 @@ static VALUE
|
|
61
61
|
<%=c_func(-1)%>(int argc, VALUE *argv, VALUE self)
|
62
62
|
{
|
63
63
|
size_t size;
|
64
|
-
|
64
|
+
cumo_narray_t *na;
|
65
65
|
VALUE idx, tmp, reduce, res;
|
66
66
|
char *buf;
|
67
|
-
|
68
|
-
|
69
|
-
|
67
|
+
cumo_ndfunc_arg_in_t ain[3] = {{cT,0},{0,0},{cumo_sym_reduce,0}};
|
68
|
+
cumo_ndfunc_arg_out_t aout[1] = {{0,0,0}};
|
69
|
+
cumo_ndfunc_t ndf = {0, CUMO_STRIDE_LOOP_NIP|CUMO_NDF_FLAT_REDUCE|CUMO_NDF_CUM, 3,1, ain,aout};
|
70
70
|
|
71
|
-
|
71
|
+
CumoGetNArray(self,na);
|
72
72
|
if (na->ndim==0) {
|
73
73
|
return INT2FIX(0);
|
74
74
|
}
|
75
75
|
if (na->size > (~(u_int32_t)0)) {
|
76
76
|
ain[1].type =
|
77
77
|
aout[0].type = cumo_cInt64;
|
78
|
-
idx =
|
78
|
+
idx = cumo_na_new(cumo_cInt64, na->ndim, na->shape);
|
79
79
|
<% if is_float %>
|
80
80
|
ndf.func = <%=type_name%>_index64_qsort_ignan;
|
81
|
-
reduce =
|
81
|
+
reduce = cumo_na_reduce_dimension(argc, argv, 1, &self, &ndf,
|
82
82
|
<%=type_name%>_index64_qsort_prnan);
|
83
83
|
<% else %>
|
84
84
|
ndf.func = <%=type_name%>_index64_qsort;
|
85
|
-
reduce =
|
85
|
+
reduce = cumo_na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
|
86
86
|
<% end %>
|
87
87
|
} else {
|
88
88
|
ain[1].type =
|
89
89
|
aout[0].type = cumo_cInt32;
|
90
|
-
idx =
|
90
|
+
idx = cumo_na_new(cumo_cInt32, na->ndim, na->shape);
|
91
91
|
<% if is_float %>
|
92
92
|
ndf.func = <%=type_name%>_index32_qsort_ignan;
|
93
|
-
reduce =
|
93
|
+
reduce = cumo_na_reduce_dimension(argc, argv, 1, &self, &ndf,
|
94
94
|
<%=type_name%>_index32_qsort_prnan);
|
95
95
|
<% else %>
|
96
96
|
ndf.func = <%=type_name%>_index32_qsort;
|
97
|
-
reduce =
|
97
|
+
reduce = cumo_na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
|
98
98
|
<% end %>
|
99
99
|
}
|
100
100
|
rb_funcall(idx, rb_intern("seq"), 0);
|
@@ -102,10 +102,10 @@ static VALUE
|
|
102
102
|
size = na->size*sizeof(void*); // max capa
|
103
103
|
buf = rb_alloc_tmp_buffer(&tmp, size);
|
104
104
|
|
105
|
-
|
105
|
+
CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
106
106
|
cudaDeviceSynchronize();
|
107
107
|
|
108
|
-
res =
|
108
|
+
res = cumo_na_ndloop3(&ndf, buf, 3, self, idx, reduce);
|
109
109
|
rb_free_tmp_buffer(&tmp);
|
110
110
|
return res;
|
111
111
|
}
|
@@ -13,7 +13,7 @@ static VALUE
|
|
13
13
|
{
|
14
14
|
VALUE r, klass;
|
15
15
|
|
16
|
-
klass =
|
16
|
+
klass = rb_obj_class(obj);
|
17
17
|
|
18
18
|
<% definitions.each do |x| %>
|
19
19
|
if (<%=x.condition("klass")%>) {
|
@@ -22,9 +22,9 @@ static VALUE
|
|
22
22
|
}
|
23
23
|
<% end %>
|
24
24
|
|
25
|
-
if (
|
25
|
+
if (CumoIsNArray(obj)) {
|
26
26
|
r = rb_funcall(obj, rb_intern("coerce_cast"), 1, cT);
|
27
|
-
if (
|
27
|
+
if (rb_obj_class(r)==cT) {
|
28
28
|
<%=c_func%>(self,r);
|
29
29
|
return self;
|
30
30
|
}
|
@@ -33,9 +33,9 @@ static VALUE
|
|
33
33
|
<% if is_object %>
|
34
34
|
robject_store_numeric(self,obj);
|
35
35
|
<% else %>
|
36
|
-
rb_raise(
|
37
|
-
rb_class2name(
|
38
|
-
rb_class2name(
|
36
|
+
rb_raise(cumo_na_eCastError, "unknown conversion from %s to %s",
|
37
|
+
rb_class2name(rb_obj_class(obj)),
|
38
|
+
rb_class2name(rb_obj_class(self)));
|
39
39
|
<% end %>
|
40
40
|
return self;
|
41
41
|
}
|
@@ -12,7 +12,7 @@ static void CUDART_CB
|
|
12
12
|
//<% end %>
|
13
13
|
|
14
14
|
static void
|
15
|
-
<%=c_iter%>(
|
15
|
+
<%=c_iter%>(cumo_na_loop_t *const lp)
|
16
16
|
{
|
17
17
|
size_t i, n;
|
18
18
|
size_t i1, n1;
|
@@ -25,8 +25,8 @@ static void
|
|
25
25
|
size_t len, c;
|
26
26
|
double beg, step;
|
27
27
|
|
28
|
-
|
29
|
-
|
28
|
+
CUMO_INIT_COUNTER(lp, n);
|
29
|
+
CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
|
30
30
|
v1 = lp->args[1].value;
|
31
31
|
i = 0;
|
32
32
|
|
@@ -59,39 +59,39 @@ static void
|
|
59
59
|
|
60
60
|
//<% if c_iter.include? 'robject' %>
|
61
61
|
{
|
62
|
-
|
62
|
+
CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("store_<%=name%>", "<%=type_name%>");
|
63
63
|
cumo_cuda_runtime_check_status(cudaDeviceSynchronize());
|
64
64
|
|
65
65
|
if (idx1) {
|
66
66
|
for (i=i1=0; i1<n1 && i<n; i++,i1++) {
|
67
67
|
x = ptr[i1];
|
68
|
-
if (rb_obj_is_kind_of(x, rb_cRange) || rb_obj_is_kind_of(x,
|
69
|
-
|
68
|
+
if (rb_obj_is_kind_of(x, rb_cRange) || rb_obj_is_kind_of(x, cumo_na_cStep)) {
|
69
|
+
cumo_na_step_sequence(x,&len,&beg,&step);
|
70
70
|
for (c=0; c<len && i<n; c++,i++) {
|
71
71
|
y = beg + step * c;
|
72
72
|
z = m_from_double(y);
|
73
|
-
|
73
|
+
CUMO_SET_DATA_INDEX(p1, idx1, dtype, z);
|
74
74
|
}
|
75
75
|
}
|
76
76
|
else if (TYPE(x) != T_ARRAY) {
|
77
77
|
z = m_num_to_data(x);
|
78
|
-
|
78
|
+
CUMO_SET_DATA_INDEX(p1, idx1, dtype, z);
|
79
79
|
}
|
80
80
|
}
|
81
81
|
} else {
|
82
82
|
for (i=i1=0; i1<n1 && i<n; i++,i1++) {
|
83
83
|
x = ptr[i1];
|
84
|
-
if (rb_obj_is_kind_of(x, rb_cRange) || rb_obj_is_kind_of(x,
|
85
|
-
|
84
|
+
if (rb_obj_is_kind_of(x, rb_cRange) || rb_obj_is_kind_of(x, cumo_na_cStep)) {
|
85
|
+
cumo_na_step_sequence(x,&len,&beg,&step);
|
86
86
|
for (c=0; c<len && i<n; c++,i++) {
|
87
87
|
y = beg + step * c;
|
88
88
|
z = m_from_double(y);
|
89
|
-
|
89
|
+
CUMO_SET_DATA_STRIDE(p1, s1, dtype, z);
|
90
90
|
}
|
91
91
|
}
|
92
92
|
else if (TYPE(x) != T_ARRAY) {
|
93
93
|
z = m_num_to_data(x);
|
94
|
-
|
94
|
+
CUMO_SET_DATA_STRIDE(p1, s1, dtype, z);
|
95
95
|
}
|
96
96
|
}
|
97
97
|
}
|
@@ -110,8 +110,8 @@ static void
|
|
110
110
|
dtype* host_z = ALLOC_N(dtype, n);
|
111
111
|
for (i=i1=0; i1<n1 && i<n; i1++) {
|
112
112
|
x = ptr[i1];
|
113
|
-
if (rb_obj_is_kind_of(x, rb_cRange) || rb_obj_is_kind_of(x,
|
114
|
-
|
113
|
+
if (rb_obj_is_kind_of(x, rb_cRange) || rb_obj_is_kind_of(x, cumo_na_cStep)) {
|
114
|
+
cumo_na_step_sequence(x,&len,&beg,&step);
|
115
115
|
for (c=0; c<len && i<n; c++,i++) {
|
116
116
|
y = beg + step * c;
|
117
117
|
host_z[i] = m_from_double(y);
|
@@ -157,11 +157,11 @@ static void
|
|
157
157
|
{
|
158
158
|
if (idx1) {
|
159
159
|
for (; i<n; i++) {
|
160
|
-
|
160
|
+
CUMO_SET_DATA_INDEX(p1, idx1, dtype, z);
|
161
161
|
}
|
162
162
|
} else {
|
163
163
|
for (; i<n; i++) {
|
164
|
-
|
164
|
+
CUMO_SET_DATA_STRIDE(p1, s1, dtype, z);
|
165
165
|
}
|
166
166
|
}
|
167
167
|
}
|
@@ -179,9 +179,9 @@ static void
|
|
179
179
|
static VALUE
|
180
180
|
<%=c_func%>(VALUE self, VALUE rary)
|
181
181
|
{
|
182
|
-
|
183
|
-
|
182
|
+
cumo_ndfunc_arg_in_t ain[2] = {{CUMO_OVERWRITE,0},{rb_cArray,0}};
|
183
|
+
cumo_ndfunc_t ndf = {<%=c_iter%>, CUMO_FULL_LOOP, 2, 0, ain, 0};
|
184
184
|
|
185
|
-
|
185
|
+
cumo_na_ndloop_store_rarray(&ndf, self, rary);
|
186
186
|
return self;
|
187
187
|
}
|