cumo 0.1.0 → 0.1.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/.gitignore +1 -0
- data/3rd_party/LICENSE.txt +60 -0
- data/3rd_party/mkmf-cu/lib/mkmf-cu/cli.rb +13 -1
- data/LICENSE.txt +1 -62
- data/README.md +33 -29
- data/bench/cumo_bench.rb +47 -25
- data/bench/numo_bench.rb +27 -25
- data/docs/src-tree.md +16 -0
- data/ext/cumo/cuda/cublas.c +69 -219
- data/ext/cumo/cuda/memory_pool_impl.hpp +1 -0
- data/ext/cumo/cuda/runtime.c +2 -14
- data/ext/cumo/cumo.c +16 -16
- data/ext/cumo/include/cumo.h +2 -2
- data/ext/cumo/include/cumo/cuda/cublas.h +6 -129
- data/ext/cumo/include/cumo/cuda/runtime.h +16 -0
- data/ext/cumo/include/cumo/indexer.h +46 -63
- data/ext/cumo/include/cumo/intern.h +58 -112
- data/ext/cumo/include/cumo/narray.h +214 -185
- data/ext/cumo/include/cumo/narray_kernel.h +66 -37
- data/ext/cumo/include/cumo/ndloop.h +42 -42
- data/ext/cumo/include/cumo/reduce_kernel.h +55 -71
- data/ext/cumo/include/cumo/template.h +56 -51
- data/ext/cumo/include/cumo/template_kernel.h +31 -31
- data/ext/cumo/include/cumo/types/bit.h +3 -3
- data/ext/cumo/include/cumo/types/bit_kernel.h +2 -2
- data/ext/cumo/include/cumo/types/complex.h +126 -126
- data/ext/cumo/include/cumo/types/complex_kernel.h +126 -126
- data/ext/cumo/include/cumo/types/complex_macro.h +28 -28
- data/ext/cumo/include/cumo/types/complex_macro_kernel.h +20 -20
- data/ext/cumo/include/cumo/types/dcomplex.h +5 -5
- data/ext/cumo/include/cumo/types/dcomplex_kernel.h +1 -1
- data/ext/cumo/include/cumo/types/int_macro.h +1 -1
- data/ext/cumo/include/cumo/types/int_macro_kernel.h +1 -1
- data/ext/cumo/include/cumo/types/robj_macro.h +30 -30
- data/ext/cumo/include/cumo/types/scomplex.h +5 -5
- data/ext/cumo/include/cumo/types/scomplex_kernel.h +1 -1
- data/ext/cumo/narray/array.c +143 -143
- data/ext/cumo/narray/data.c +184 -184
- data/ext/cumo/narray/gen/cogen.rb +5 -2
- data/ext/cumo/narray/gen/cogen_kernel.rb +5 -2
- data/ext/cumo/narray/gen/def/dcomplex.rb +1 -1
- data/ext/cumo/narray/gen/def/scomplex.rb +1 -1
- data/ext/cumo/narray/gen/erbln.rb +132 -0
- data/ext/cumo/narray/gen/erbpp2.rb +18 -13
- data/ext/cumo/narray/gen/narray_def.rb +3 -3
- data/ext/cumo/narray/gen/spec.rb +2 -2
- data/ext/cumo/narray/gen/tmpl/accum.c +15 -15
- data/ext/cumo/narray/gen/tmpl/accum_binary.c +22 -22
- data/ext/cumo/narray/gen/tmpl/accum_binary_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/accum_index.c +30 -30
- data/ext/cumo/narray/gen/tmpl/accum_index_kernel.cu +2 -2
- data/ext/cumo/narray/gen/tmpl/accum_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/alloc_func.c +14 -14
- data/ext/cumo/narray/gen/tmpl/allocate.c +11 -11
- data/ext/cumo/narray/gen/tmpl/aref.c +2 -2
- data/ext/cumo/narray/gen/tmpl/aref_cpu.c +4 -4
- data/ext/cumo/narray/gen/tmpl/aset.c +2 -2
- data/ext/cumo/narray/gen/tmpl/binary.c +28 -28
- data/ext/cumo/narray/gen/tmpl/binary2.c +18 -18
- data/ext/cumo/narray/gen/tmpl/binary2_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/binary_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/binary_s.c +13 -13
- data/ext/cumo/narray/gen/tmpl/binary_s_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/bincount.c +23 -23
- data/ext/cumo/narray/gen/tmpl/cast.c +7 -7
- data/ext/cumo/narray/gen/tmpl/cast_array.c +3 -3
- data/ext/cumo/narray/gen/tmpl/clip.c +38 -38
- data/ext/cumo/narray/gen/tmpl/complex_accum_kernel.cu +2 -2
- data/ext/cumo/narray/gen/tmpl/cond_binary.c +19 -19
- data/ext/cumo/narray/gen/tmpl/cond_binary_kernel.cu +7 -7
- data/ext/cumo/narray/gen/tmpl/cond_unary.c +15 -15
- data/ext/cumo/narray/gen/tmpl/cum.c +15 -15
- data/ext/cumo/narray/gen/tmpl/each.c +9 -9
- data/ext/cumo/narray/gen/tmpl/each_with_index.c +9 -9
- data/ext/cumo/narray/gen/tmpl/ewcomp.c +15 -15
- data/ext/cumo/narray/gen/tmpl/ewcomp_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/extract_cpu.c +5 -5
- data/ext/cumo/narray/gen/tmpl/extract_data.c +12 -12
- data/ext/cumo/narray/gen/tmpl/eye.c +9 -9
- data/ext/cumo/narray/gen/tmpl/eye_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/fill.c +9 -9
- data/ext/cumo/narray/gen/tmpl/fill_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/float_accum_kernel.cu +1 -1
- data/ext/cumo/narray/gen/tmpl/format.c +11 -11
- data/ext/cumo/narray/gen/tmpl/format_to_a.c +8 -8
- data/ext/cumo/narray/gen/tmpl/frexp.c +13 -13
- data/ext/cumo/narray/gen/tmpl/gemm.c +252 -108
- data/ext/cumo/narray/gen/tmpl/inspect.c +1 -1
- data/ext/cumo/narray/gen/tmpl/lib.c +2 -2
- data/ext/cumo/narray/gen/tmpl/logseq.c +7 -7
- data/ext/cumo/narray/gen/tmpl/logseq_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/map_with_index.c +17 -17
- data/ext/cumo/narray/gen/tmpl/median.c +10 -10
- data/ext/cumo/narray/gen/tmpl/minmax.c +10 -10
- data/ext/cumo/narray/gen/tmpl/new_dim0.c +3 -3
- data/ext/cumo/narray/gen/tmpl/poly.c +6 -6
- data/ext/cumo/narray/gen/tmpl/pow.c +28 -28
- data/ext/cumo/narray/gen/tmpl/pow_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/rand.c +10 -10
- data/ext/cumo/narray/gen/tmpl/rand_norm.c +7 -7
- data/ext/cumo/narray/gen/tmpl/real_accum_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/seq.c +7 -7
- data/ext/cumo/narray/gen/tmpl/seq_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/set2.c +20 -20
- data/ext/cumo/narray/gen/tmpl/sort.c +11 -11
- data/ext/cumo/narray/gen/tmpl/sort_index.c +18 -18
- data/ext/cumo/narray/gen/tmpl/store.c +6 -6
- data/ext/cumo/narray/gen/tmpl/store_array.c +19 -19
- data/ext/cumo/narray/gen/tmpl/store_array_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl/store_bit.c +23 -23
- data/ext/cumo/narray/gen/tmpl/store_bit_kernel.cu +28 -28
- data/ext/cumo/narray/gen/tmpl/store_from.c +16 -16
- data/ext/cumo/narray/gen/tmpl/store_from_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl/to_a.c +10 -10
- data/ext/cumo/narray/gen/tmpl/unary.c +25 -25
- data/ext/cumo/narray/gen/tmpl/unary2.c +17 -17
- data/ext/cumo/narray/gen/tmpl/unary_kernel.cu +15 -15
- data/ext/cumo/narray/gen/tmpl/unary_ret2.c +13 -13
- data/ext/cumo/narray/gen/tmpl/unary_s.c +17 -17
- data/ext/cumo/narray/gen/tmpl/unary_s_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl_bit/allocate.c +9 -9
- data/ext/cumo/narray/gen/tmpl_bit/aref.c +2 -2
- data/ext/cumo/narray/gen/tmpl_bit/aref_cpu.c +5 -5
- data/ext/cumo/narray/gen/tmpl_bit/aset.c +2 -2
- data/ext/cumo/narray/gen/tmpl_bit/binary.c +29 -29
- data/ext/cumo/narray/gen/tmpl_bit/bit_count.c +14 -14
- data/ext/cumo/narray/gen/tmpl_bit/bit_count_cpu.c +21 -21
- data/ext/cumo/narray/gen/tmpl_bit/bit_count_kernel.cu +28 -28
- data/ext/cumo/narray/gen/tmpl_bit/bit_reduce.c +29 -29
- data/ext/cumo/narray/gen/tmpl_bit/each.c +10 -10
- data/ext/cumo/narray/gen/tmpl_bit/each_with_index.c +10 -10
- data/ext/cumo/narray/gen/tmpl_bit/extract.c +8 -8
- data/ext/cumo/narray/gen/tmpl_bit/extract_cpu.c +8 -8
- data/ext/cumo/narray/gen/tmpl_bit/fill.c +17 -17
- data/ext/cumo/narray/gen/tmpl_bit/format.c +14 -14
- data/ext/cumo/narray/gen/tmpl_bit/format_to_a.c +11 -11
- data/ext/cumo/narray/gen/tmpl_bit/inspect.c +3 -3
- data/ext/cumo/narray/gen/tmpl_bit/mask.c +33 -33
- data/ext/cumo/narray/gen/tmpl_bit/store_array.c +19 -19
- data/ext/cumo/narray/gen/tmpl_bit/store_bit.c +22 -22
- data/ext/cumo/narray/gen/tmpl_bit/store_from.c +18 -18
- data/ext/cumo/narray/gen/tmpl_bit/to_a.c +12 -12
- data/ext/cumo/narray/gen/tmpl_bit/unary.c +24 -24
- data/ext/cumo/narray/gen/tmpl_bit/where.c +16 -16
- data/ext/cumo/narray/gen/tmpl_bit/where2.c +20 -20
- data/ext/cumo/narray/index.c +213 -213
- data/ext/cumo/narray/math.c +27 -27
- data/ext/cumo/narray/narray.c +484 -484
- data/ext/cumo/narray/ndloop.c +259 -258
- data/ext/cumo/narray/rand.c +3 -3
- data/ext/cumo/narray/step.c +70 -70
- data/ext/cumo/narray/struct.c +139 -139
- metadata +6 -7
- data/ext/cumo/include/cumo/intern_fwd.h +0 -38
- data/lib/erbpp.rb +0 -294
- data/lib/erbpp/line_number.rb +0 -137
- data/lib/erbpp/narray_def.rb +0 -381
@@ -29,30 +29,30 @@ __global__ void <%="cumo_#{c_iter}_stride_scalar_kernel"%>(char *p1, ssize_t s1,
|
|
29
29
|
|
30
30
|
void <%="cumo_#{c_iter}_index_kernel_launch"%>(char *p1, size_t *idx1, dtype* z, uint64_t n)
|
31
31
|
{
|
32
|
-
size_t
|
33
|
-
size_t
|
34
|
-
<%="cumo_#{c_iter}_index_kernel"%><<<
|
32
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
33
|
+
size_t block_dim = cumo_get_block_dim(n);
|
34
|
+
<%="cumo_#{c_iter}_index_kernel"%><<<grid_dim, block_dim>>>(p1,idx1,z,n);
|
35
35
|
}
|
36
36
|
|
37
37
|
void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, ssize_t s1, dtype* z, uint64_t n)
|
38
38
|
{
|
39
|
-
size_t
|
40
|
-
size_t
|
41
|
-
<%="cumo_#{c_iter}_stride_kernel"%><<<
|
39
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
40
|
+
size_t block_dim = cumo_get_block_dim(n);
|
41
|
+
<%="cumo_#{c_iter}_stride_kernel"%><<<grid_dim, block_dim>>>(p1,s1,z,n);
|
42
42
|
}
|
43
43
|
|
44
44
|
void <%="cumo_#{c_iter}_index_scalar_kernel_launch"%>(char *p1, size_t *idx1, dtype z, uint64_t n)
|
45
45
|
{
|
46
|
-
size_t
|
47
|
-
size_t
|
48
|
-
<%="cumo_#{c_iter}_index_scalar_kernel"%><<<
|
46
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
47
|
+
size_t block_dim = cumo_get_block_dim(n);
|
48
|
+
<%="cumo_#{c_iter}_index_scalar_kernel"%><<<grid_dim, block_dim>>>(p1,idx1,z,n);
|
49
49
|
}
|
50
50
|
|
51
51
|
void <%="cumo_#{c_iter}_stride_scalar_kernel_launch"%>(char *p1, ssize_t s1, dtype z, uint64_t n)
|
52
52
|
{
|
53
|
-
size_t
|
54
|
-
size_t
|
55
|
-
<%="cumo_#{c_iter}_stride_scalar_kernel"%><<<
|
53
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
54
|
+
size_t block_dim = cumo_get_block_dim(n);
|
55
|
+
<%="cumo_#{c_iter}_stride_scalar_kernel"%><<<grid_dim, block_dim>>>(p1,s1,z,n);
|
56
56
|
}
|
57
57
|
|
58
58
|
<% end %>
|
@@ -1,56 +1,56 @@
|
|
1
1
|
//<% unless c_iter.include? 'robject' %>
|
2
|
-
void <%="cumo_#{c_iter}_index_index_kernel_launch"%>(char *p1, size_t p2,
|
3
|
-
void <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(char *p1, size_t p2,
|
4
|
-
void <%="cumo_#{c_iter}_index_stride_kernel_launch"%>(char *p1, size_t p2,
|
5
|
-
void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, size_t p2,
|
2
|
+
void <%="cumo_#{c_iter}_index_index_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, size_t *idx2, uint64_t n);
|
3
|
+
void <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, size_t *idx2, uint64_t n);
|
4
|
+
void <%="cumo_#{c_iter}_index_stride_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, ssize_t s2, uint64_t n);
|
5
|
+
void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, ssize_t s2, uint64_t n);
|
6
6
|
//<% end %>
|
7
7
|
|
8
8
|
static void
|
9
|
-
<%=c_iter%>(
|
9
|
+
<%=c_iter%>(cumo_na_loop_t *const lp)
|
10
10
|
{
|
11
11
|
size_t i;
|
12
12
|
char *p1;
|
13
13
|
size_t p2;
|
14
14
|
ssize_t s1, s2;
|
15
15
|
size_t *idx1, *idx2;
|
16
|
-
|
16
|
+
CUMO_BIT_DIGIT *a2;
|
17
17
|
|
18
|
-
|
19
|
-
|
20
|
-
|
18
|
+
CUMO_INIT_COUNTER(lp, i);
|
19
|
+
CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
|
20
|
+
CUMO_INIT_PTR_BIT_IDX(lp, 1, a2, p2, s2, idx2);
|
21
21
|
|
22
22
|
//<% if c_iter.include? 'robject' %>
|
23
23
|
{
|
24
|
-
|
24
|
+
CUMO_BIT_DIGIT x;
|
25
25
|
dtype y;
|
26
|
-
|
27
|
-
|
26
|
+
CUMO_SHOW_SYNCHRONIZE_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
27
|
+
CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
28
28
|
if (idx2) {
|
29
29
|
if (idx1) {
|
30
30
|
for (; i--;) {
|
31
|
-
|
31
|
+
CUMO_LOAD_BIT(a2, p2+*idx2, x); idx2++;
|
32
32
|
y = m_from_sint(x);
|
33
|
-
|
33
|
+
CUMO_SET_DATA_INDEX(p1,idx1,dtype,y);
|
34
34
|
}
|
35
35
|
} else {
|
36
36
|
for (; i--;) {
|
37
|
-
|
37
|
+
CUMO_LOAD_BIT(a2, p2+*idx2, x); idx2++;
|
38
38
|
y = m_from_sint(x);
|
39
|
-
|
39
|
+
CUMO_SET_DATA_STRIDE(p1,s1,dtype,y);
|
40
40
|
}
|
41
41
|
}
|
42
42
|
} else {
|
43
43
|
if (idx1) {
|
44
44
|
for (; i--;) {
|
45
|
-
|
45
|
+
CUMO_LOAD_BIT(a2, p2, x); p2 += s2;
|
46
46
|
y = m_from_sint(x);
|
47
|
-
|
47
|
+
CUMO_SET_DATA_INDEX(p1,idx1,dtype,y);
|
48
48
|
}
|
49
49
|
} else {
|
50
50
|
for (; i--;) {
|
51
|
-
|
51
|
+
CUMO_LOAD_BIT(a2, p2, x); p2 += s2;
|
52
52
|
y = m_from_sint(x);
|
53
|
-
|
53
|
+
CUMO_SET_DATA_STRIDE(p1,s1,dtype,y);
|
54
54
|
}
|
55
55
|
}
|
56
56
|
}
|
@@ -78,9 +78,9 @@ static void
|
|
78
78
|
static VALUE
|
79
79
|
<%=c_func(:nodef)%>(VALUE self, VALUE obj)
|
80
80
|
{
|
81
|
-
|
82
|
-
|
81
|
+
cumo_ndfunc_arg_in_t ain[2] = {{CUMO_OVERWRITE,0},{Qnil,0}};
|
82
|
+
cumo_ndfunc_t ndf = {<%=c_iter%>, CUMO_FULL_LOOP, 2,0, ain,0};
|
83
83
|
|
84
|
-
|
84
|
+
cumo_na_ndloop(&ndf, 2, self, obj);
|
85
85
|
return self;
|
86
86
|
}
|
@@ -1,66 +1,66 @@
|
|
1
1
|
<% unless c_iter.include? 'robject' %>
|
2
|
-
__global__ void <%="cumo_#{c_iter}_index_index_kernel"%>(char *p1, size_t p2,
|
2
|
+
__global__ void <%="cumo_#{c_iter}_index_index_kernel"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, size_t *idx2, uint64_t n)
|
3
3
|
{
|
4
4
|
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
|
5
|
-
|
6
|
-
|
5
|
+
CUMO_BIT_DIGIT x;
|
6
|
+
CUMO_LOAD_BIT(a2, p2 + idx2[i], x);
|
7
7
|
*(dtype*)(p1 + idx1[i]) = m_from_real(x);
|
8
8
|
}
|
9
9
|
}
|
10
10
|
|
11
|
-
__global__ void <%="cumo_#{c_iter}_stride_index_kernel"%>(char *p1, size_t p2,
|
11
|
+
__global__ void <%="cumo_#{c_iter}_stride_index_kernel"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, size_t *idx2, uint64_t n)
|
12
12
|
{
|
13
13
|
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
|
14
|
-
|
15
|
-
|
14
|
+
CUMO_BIT_DIGIT x;
|
15
|
+
CUMO_LOAD_BIT(a2, p2 + idx2[i], x);
|
16
16
|
*(dtype*)(p1 + (i * s1)) = m_from_real(x);
|
17
17
|
}
|
18
18
|
}
|
19
19
|
|
20
|
-
__global__ void <%="cumo_#{c_iter}_index_stride_kernel"%>(char *p1, size_t p2,
|
20
|
+
__global__ void <%="cumo_#{c_iter}_index_stride_kernel"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, ssize_t s2, uint64_t n)
|
21
21
|
{
|
22
22
|
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
|
23
|
-
|
24
|
-
|
23
|
+
CUMO_BIT_DIGIT x;
|
24
|
+
CUMO_LOAD_BIT(a2, p2 + (i * s2), x);
|
25
25
|
*(dtype*)(p1 + idx1[i]) = m_from_real(x);
|
26
26
|
}
|
27
27
|
}
|
28
28
|
|
29
|
-
__global__ void <%="cumo_#{c_iter}_stride_stride_kernel"%>(char *p1, size_t p2,
|
29
|
+
__global__ void <%="cumo_#{c_iter}_stride_stride_kernel"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, ssize_t s2, uint64_t n)
|
30
30
|
{
|
31
31
|
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
|
32
|
-
|
33
|
-
|
32
|
+
CUMO_BIT_DIGIT x;
|
33
|
+
CUMO_LOAD_BIT(a2, p2 + (i * s2), x);
|
34
34
|
*(dtype*)(p1 + (i * s1)) = m_from_real(x);
|
35
35
|
}
|
36
36
|
}
|
37
37
|
|
38
|
-
void <%="cumo_#{c_iter}_index_index_kernel_launch"%>(char *p1, size_t p2,
|
38
|
+
void <%="cumo_#{c_iter}_index_index_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, size_t *idx2, uint64_t n)
|
39
39
|
{
|
40
|
-
size_t
|
41
|
-
size_t
|
42
|
-
<%="cumo_#{c_iter}_index_index_kernel"%><<<
|
40
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
41
|
+
size_t block_dim = cumo_get_block_dim(n);
|
42
|
+
<%="cumo_#{c_iter}_index_index_kernel"%><<<grid_dim, block_dim>>>(p1,p2,a2,idx1,idx2,n);
|
43
43
|
}
|
44
44
|
|
45
|
-
void <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(char *p1, size_t p2,
|
45
|
+
void <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, size_t *idx2, uint64_t n)
|
46
46
|
{
|
47
|
-
size_t
|
48
|
-
size_t
|
49
|
-
<%="cumo_#{c_iter}_stride_index_kernel"%><<<
|
47
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
48
|
+
size_t block_dim = cumo_get_block_dim(n);
|
49
|
+
<%="cumo_#{c_iter}_stride_index_kernel"%><<<grid_dim, block_dim>>>(p1,p2,a2,s1,idx2,n);
|
50
50
|
}
|
51
51
|
|
52
|
-
void <%="cumo_#{c_iter}_index_stride_kernel_launch"%>(char *p1, size_t p2,
|
52
|
+
void <%="cumo_#{c_iter}_index_stride_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, ssize_t s2, uint64_t n)
|
53
53
|
{
|
54
|
-
size_t
|
55
|
-
size_t
|
56
|
-
<%="cumo_#{c_iter}_index_stride_kernel"%><<<
|
54
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
55
|
+
size_t block_dim = cumo_get_block_dim(n);
|
56
|
+
<%="cumo_#{c_iter}_index_stride_kernel"%><<<grid_dim, block_dim>>>(p1,p2,a2,idx1,s2,n);
|
57
57
|
}
|
58
58
|
|
59
|
-
void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, size_t p2,
|
59
|
+
void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, ssize_t s2, uint64_t n)
|
60
60
|
{
|
61
|
-
size_t
|
62
|
-
size_t
|
63
|
-
<%="cumo_#{c_iter}_stride_stride_kernel"%><<<
|
61
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
62
|
+
size_t block_dim = cumo_get_block_dim(n);
|
63
|
+
<%="cumo_#{c_iter}_stride_stride_kernel"%><<<grid_dim, block_dim>>>(p1,p2,a2,s1,s2,n);
|
64
64
|
}
|
65
65
|
|
66
66
|
<% end %>
|
@@ -6,46 +6,46 @@ void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, char *p2, ssize
|
|
6
6
|
//<% end %>
|
7
7
|
|
8
8
|
static void
|
9
|
-
<%=c_iter%>(
|
9
|
+
<%=c_iter%>(cumo_na_loop_t *const lp)
|
10
10
|
{
|
11
11
|
size_t i, s1, s2;
|
12
12
|
char *p1, *p2;
|
13
13
|
size_t *idx1, *idx2;
|
14
14
|
|
15
|
-
|
16
|
-
|
17
|
-
|
15
|
+
CUMO_INIT_COUNTER(lp, i);
|
16
|
+
CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
|
17
|
+
CUMO_INIT_PTR_IDX(lp, 1, p2, s2, idx2);
|
18
18
|
//<% if c_iter.include? 'robject' %>
|
19
|
-
|
19
|
+
CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
20
20
|
{
|
21
21
|
<%=dtype%> x;
|
22
22
|
dtype y;
|
23
23
|
if (idx2) {
|
24
24
|
if (idx1) {
|
25
25
|
for (; i--;) {
|
26
|
-
|
26
|
+
CUMO_GET_DATA_INDEX(p2,idx2,<%=dtype%>,x);
|
27
27
|
y = <%=macro%>(x);
|
28
|
-
|
28
|
+
CUMO_SET_DATA_INDEX(p1,idx1,dtype,y);
|
29
29
|
}
|
30
30
|
} else {
|
31
31
|
for (; i--;) {
|
32
|
-
|
32
|
+
CUMO_GET_DATA_INDEX(p2,idx2,<%=dtype%>,x);
|
33
33
|
y = <%=macro%>(x);
|
34
|
-
|
34
|
+
CUMO_SET_DATA_STRIDE(p1,s1,dtype,y);
|
35
35
|
}
|
36
36
|
}
|
37
37
|
} else {
|
38
38
|
if (idx1) {
|
39
39
|
for (; i--;) {
|
40
|
-
|
40
|
+
CUMO_GET_DATA_STRIDE(p2,s2,<%=dtype%>,x);
|
41
41
|
y = <%=macro%>(x);
|
42
|
-
|
42
|
+
CUMO_SET_DATA_INDEX(p1,idx1,dtype,y);
|
43
43
|
}
|
44
44
|
} else {
|
45
45
|
for (; i--;) {
|
46
|
-
|
46
|
+
CUMO_GET_DATA_STRIDE(p2,s2,<%=dtype%>,x);
|
47
47
|
y = <%=macro%>(x);
|
48
|
-
|
48
|
+
CUMO_SET_DATA_STRIDE(p1,s1,dtype,y);
|
49
49
|
}
|
50
50
|
}
|
51
51
|
}
|
@@ -73,9 +73,9 @@ static void
|
|
73
73
|
static VALUE
|
74
74
|
<%=c_func(:nodef)%>(VALUE self, VALUE obj)
|
75
75
|
{
|
76
|
-
|
77
|
-
|
76
|
+
cumo_ndfunc_arg_in_t ain[2] = {{CUMO_OVERWRITE,0},{Qnil,0}};
|
77
|
+
cumo_ndfunc_t ndf = { <%=c_iter%>, CUMO_FULL_LOOP, 2, 0, ain, 0 };
|
78
78
|
|
79
|
-
|
79
|
+
cumo_na_ndloop(&ndf, 2, self, obj);
|
80
80
|
return self;
|
81
81
|
}
|
@@ -29,30 +29,30 @@ __global__ void <%="cumo_#{c_iter}_stride_stride_kernel"%>(char *p1, char *p2, s
|
|
29
29
|
|
30
30
|
void <%="cumo_#{c_iter}_index_index_kernel_launch"%>(char *p1, char *p2, size_t *idx1, size_t *idx2, uint64_t n)
|
31
31
|
{
|
32
|
-
size_t
|
33
|
-
size_t
|
34
|
-
<%="cumo_#{c_iter}_index_index_kernel"%><<<
|
32
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
33
|
+
size_t block_dim = cumo_get_block_dim(n);
|
34
|
+
<%="cumo_#{c_iter}_index_index_kernel"%><<<grid_dim, block_dim>>>(p1,p2,idx1,idx2,n);
|
35
35
|
}
|
36
36
|
|
37
37
|
void <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(char *p1, char *p2, ssize_t s1, size_t *idx2, uint64_t n)
|
38
38
|
{
|
39
|
-
size_t
|
40
|
-
size_t
|
41
|
-
<%="cumo_#{c_iter}_stride_index_kernel"%><<<
|
39
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
40
|
+
size_t block_dim = cumo_get_block_dim(n);
|
41
|
+
<%="cumo_#{c_iter}_stride_index_kernel"%><<<grid_dim, block_dim>>>(p1,p2,s1,idx2,n);
|
42
42
|
}
|
43
43
|
|
44
44
|
void <%="cumo_#{c_iter}_index_stride_kernel_launch"%>(char *p1, char *p2, size_t *idx1, ssize_t s2, uint64_t n)
|
45
45
|
{
|
46
|
-
size_t
|
47
|
-
size_t
|
48
|
-
<%="cumo_#{c_iter}_index_stride_kernel"%><<<
|
46
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
47
|
+
size_t block_dim = cumo_get_block_dim(n);
|
48
|
+
<%="cumo_#{c_iter}_index_stride_kernel"%><<<grid_dim, block_dim>>>(p1,p2,idx1,s2,n);
|
49
49
|
}
|
50
50
|
|
51
51
|
void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, char *p2, ssize_t s1, ssize_t s2, uint64_t n)
|
52
52
|
{
|
53
|
-
size_t
|
54
|
-
size_t
|
55
|
-
<%="cumo_#{c_iter}_stride_stride_kernel"%><<<
|
53
|
+
size_t grid_dim = cumo_get_grid_dim(n);
|
54
|
+
size_t block_dim = cumo_get_block_dim(n);
|
55
|
+
<%="cumo_#{c_iter}_stride_stride_kernel"%><<<grid_dim, block_dim>>>(p1,p2,s1,s2,n);
|
56
56
|
}
|
57
57
|
|
58
58
|
<% end %>
|
@@ -1,5 +1,5 @@
|
|
1
1
|
static void
|
2
|
-
<%=c_iter%>(
|
2
|
+
<%=c_iter%>(cumo_na_loop_t *const lp)
|
3
3
|
{
|
4
4
|
size_t i, s1;
|
5
5
|
char *p1;
|
@@ -7,20 +7,20 @@ static void
|
|
7
7
|
dtype x;
|
8
8
|
volatile VALUE a, y;
|
9
9
|
|
10
|
-
|
11
|
-
|
10
|
+
CUMO_INIT_COUNTER(lp, i);
|
11
|
+
CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
|
12
12
|
a = rb_ary_new2(i);
|
13
13
|
rb_ary_push(lp->args[1].value, a);
|
14
|
-
//
|
14
|
+
//CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
15
15
|
if (idx1) {
|
16
16
|
for (; i--;) {
|
17
|
-
|
17
|
+
CUMO_GET_DATA_INDEX(p1,idx1,dtype,x);
|
18
18
|
y = m_data_to_num(x);
|
19
19
|
rb_ary_push(a,y);
|
20
20
|
}
|
21
21
|
} else {
|
22
22
|
for (; i--;) {
|
23
|
-
|
23
|
+
CUMO_GET_DATA_STRIDE(p1,s1,dtype,x);
|
24
24
|
y = m_data_to_num(x);
|
25
25
|
rb_ary_push(a,y);
|
26
26
|
}
|
@@ -35,9 +35,9 @@ static void
|
|
35
35
|
static VALUE
|
36
36
|
<%=c_func(0)%>(VALUE self)
|
37
37
|
{
|
38
|
-
|
39
|
-
|
40
|
-
|
38
|
+
cumo_ndfunc_arg_in_t ain[3] = {{Qnil,0},{cumo_sym_loop_opt},{cumo_sym_option}};
|
39
|
+
cumo_ndfunc_arg_out_t aout[1] = {{rb_cArray,0}}; // dummy?
|
40
|
+
cumo_ndfunc_t ndf = { <%=c_iter%>, CUMO_FULL_LOOP_NIP, 3, 1, ain, aout };
|
41
41
|
cumo_cuda_runtime_check_status(cudaDeviceSynchronize());
|
42
|
-
return
|
42
|
+
return cumo_na_ndloop_cast_narray_to_rarray(&ndf, self, Qnil);
|
43
43
|
}
|
@@ -8,47 +8,47 @@ void <%="cumo_#{c_iter}_contiguous_kernel_launch"%>(char *p1, char *p2, uint64_t
|
|
8
8
|
<% end %>
|
9
9
|
|
10
10
|
static void
|
11
|
-
<%=c_iter%>(
|
11
|
+
<%=c_iter%>(cumo_na_loop_t *const lp)
|
12
12
|
{
|
13
13
|
size_t n;
|
14
14
|
char *p1, *p2;
|
15
15
|
ssize_t s1, s2;
|
16
16
|
size_t *idx1, *idx2;
|
17
17
|
|
18
|
-
|
19
|
-
|
20
|
-
|
18
|
+
CUMO_INIT_COUNTER(lp, n);
|
19
|
+
CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
|
20
|
+
CUMO_INIT_PTR_IDX(lp, 1, p2, s2, idx2);
|
21
21
|
|
22
22
|
<% if type_name == 'robject' || name == 'map' %>
|
23
23
|
{
|
24
24
|
size_t i;
|
25
25
|
dtype x;
|
26
|
-
|
26
|
+
CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
27
27
|
if (idx1) {
|
28
28
|
if (idx2) {
|
29
29
|
for (i=0; i<n; i++) {
|
30
|
-
|
30
|
+
CUMO_GET_DATA_INDEX(p1,idx1,dtype,x);
|
31
31
|
x = m_<%=name%>(x);
|
32
|
-
|
32
|
+
CUMO_SET_DATA_INDEX(p2,idx2,dtype,x);
|
33
33
|
}
|
34
34
|
} else {
|
35
35
|
for (i=0; i<n; i++) {
|
36
|
-
|
36
|
+
CUMO_GET_DATA_INDEX(p1,idx1,dtype,x);
|
37
37
|
x = m_<%=name%>(x);
|
38
|
-
|
38
|
+
CUMO_SET_DATA_STRIDE(p2,s2,dtype,x);
|
39
39
|
}
|
40
40
|
}
|
41
41
|
} else {
|
42
42
|
if (idx2) {
|
43
43
|
for (i=0; i<n; i++) {
|
44
|
-
|
44
|
+
CUMO_GET_DATA_STRIDE(p1,s1,dtype,x);
|
45
45
|
x = m_<%=name%>(x);
|
46
|
-
|
46
|
+
CUMO_SET_DATA_INDEX(p2,idx2,dtype,x);
|
47
47
|
}
|
48
48
|
} else {
|
49
49
|
//<% if need_align %>
|
50
|
-
if (
|
51
|
-
|
50
|
+
if (cumo_is_aligned(p1,sizeof(dtype)) &&
|
51
|
+
cumo_is_aligned(p2,sizeof(dtype)) ) {
|
52
52
|
if (s1 == sizeof(dtype) &&
|
53
53
|
s2 == sizeof(dtype) ) {
|
54
54
|
for (i=0; i<n; i++) {
|
@@ -56,8 +56,8 @@ static void
|
|
56
56
|
}
|
57
57
|
return;
|
58
58
|
}
|
59
|
-
if (
|
60
|
-
|
59
|
+
if (cumo_is_aligned_step(s1,sizeof(dtype)) &&
|
60
|
+
cumo_is_aligned_step(s2,sizeof(dtype)) ) {
|
61
61
|
//<% end %>
|
62
62
|
for (i=0; i<n; i++) {
|
63
63
|
*(dtype*)p2 = m_<%=name%>(*(dtype*)p1);
|
@@ -69,9 +69,9 @@ static void
|
|
69
69
|
}
|
70
70
|
}
|
71
71
|
for (i=0; i<n; i++) {
|
72
|
-
|
72
|
+
CUMO_GET_DATA_STRIDE(p1,s1,dtype,x);
|
73
73
|
x = m_<%=name%>(x);
|
74
|
-
|
74
|
+
CUMO_SET_DATA_STRIDE(p2,s2,dtype,x);
|
75
75
|
}
|
76
76
|
//<% end %>
|
77
77
|
}
|
@@ -90,15 +90,15 @@ static void
|
|
90
90
|
<%="cumo_#{c_iter}_stride_index_kernel_launch"%>(p1,p2,s1,idx2,n);
|
91
91
|
} else {
|
92
92
|
//<% if need_align %>
|
93
|
-
if (
|
94
|
-
|
93
|
+
if (cumo_is_aligned(p1,sizeof(dtype)) &&
|
94
|
+
cumo_is_aligned(p2,sizeof(dtype)) ) {
|
95
95
|
if (s1 == sizeof(dtype) &&
|
96
96
|
s2 == sizeof(dtype) ) {
|
97
97
|
<%="cumo_#{c_iter}_contiguous_kernel_launch"%>(p1,p2,n);
|
98
98
|
return;
|
99
99
|
}
|
100
|
-
if (
|
101
|
-
|
100
|
+
if (cumo_is_aligned_step(s1,sizeof(dtype)) &&
|
101
|
+
cumo_is_aligned_step(s2,sizeof(dtype)) ) {
|
102
102
|
//<% end %>
|
103
103
|
<%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(p1,p2,s1,s2,n);
|
104
104
|
return;
|
@@ -121,12 +121,12 @@ static void
|
|
121
121
|
static VALUE
|
122
122
|
<%=c_func(0)%>(VALUE self)
|
123
123
|
{
|
124
|
-
|
125
|
-
|
126
|
-
|
124
|
+
cumo_ndfunc_arg_in_t ain[1] = {{cT,0}};
|
125
|
+
cumo_ndfunc_arg_out_t aout[1] = {{cT,0}};
|
126
|
+
cumo_ndfunc_t ndf = {<%=c_iter%>, CUMO_FULL_LOOP, 1,1, ain,aout};
|
127
127
|
|
128
128
|
<% if name == 'map' %>
|
129
129
|
cumo_cuda_runtime_check_status(cudaDeviceSynchronize());
|
130
130
|
<% end %>
|
131
|
-
return
|
131
|
+
return cumo_na_ndloop(&ndf, 1, self);
|
132
132
|
}
|