cumo 0.1.0 → 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (158) hide show
  1. checksums.yaml +5 -5
  2. data/.gitignore +1 -0
  3. data/3rd_party/LICENSE.txt +60 -0
  4. data/3rd_party/mkmf-cu/lib/mkmf-cu/cli.rb +13 -1
  5. data/LICENSE.txt +1 -62
  6. data/README.md +33 -29
  7. data/bench/cumo_bench.rb +47 -25
  8. data/bench/numo_bench.rb +27 -25
  9. data/docs/src-tree.md +16 -0
  10. data/ext/cumo/cuda/cublas.c +69 -219
  11. data/ext/cumo/cuda/memory_pool_impl.hpp +1 -0
  12. data/ext/cumo/cuda/runtime.c +2 -14
  13. data/ext/cumo/cumo.c +16 -16
  14. data/ext/cumo/include/cumo.h +2 -2
  15. data/ext/cumo/include/cumo/cuda/cublas.h +6 -129
  16. data/ext/cumo/include/cumo/cuda/runtime.h +16 -0
  17. data/ext/cumo/include/cumo/indexer.h +46 -63
  18. data/ext/cumo/include/cumo/intern.h +58 -112
  19. data/ext/cumo/include/cumo/narray.h +214 -185
  20. data/ext/cumo/include/cumo/narray_kernel.h +66 -37
  21. data/ext/cumo/include/cumo/ndloop.h +42 -42
  22. data/ext/cumo/include/cumo/reduce_kernel.h +55 -71
  23. data/ext/cumo/include/cumo/template.h +56 -51
  24. data/ext/cumo/include/cumo/template_kernel.h +31 -31
  25. data/ext/cumo/include/cumo/types/bit.h +3 -3
  26. data/ext/cumo/include/cumo/types/bit_kernel.h +2 -2
  27. data/ext/cumo/include/cumo/types/complex.h +126 -126
  28. data/ext/cumo/include/cumo/types/complex_kernel.h +126 -126
  29. data/ext/cumo/include/cumo/types/complex_macro.h +28 -28
  30. data/ext/cumo/include/cumo/types/complex_macro_kernel.h +20 -20
  31. data/ext/cumo/include/cumo/types/dcomplex.h +5 -5
  32. data/ext/cumo/include/cumo/types/dcomplex_kernel.h +1 -1
  33. data/ext/cumo/include/cumo/types/int_macro.h +1 -1
  34. data/ext/cumo/include/cumo/types/int_macro_kernel.h +1 -1
  35. data/ext/cumo/include/cumo/types/robj_macro.h +30 -30
  36. data/ext/cumo/include/cumo/types/scomplex.h +5 -5
  37. data/ext/cumo/include/cumo/types/scomplex_kernel.h +1 -1
  38. data/ext/cumo/narray/array.c +143 -143
  39. data/ext/cumo/narray/data.c +184 -184
  40. data/ext/cumo/narray/gen/cogen.rb +5 -2
  41. data/ext/cumo/narray/gen/cogen_kernel.rb +5 -2
  42. data/ext/cumo/narray/gen/def/dcomplex.rb +1 -1
  43. data/ext/cumo/narray/gen/def/scomplex.rb +1 -1
  44. data/ext/cumo/narray/gen/erbln.rb +132 -0
  45. data/ext/cumo/narray/gen/erbpp2.rb +18 -13
  46. data/ext/cumo/narray/gen/narray_def.rb +3 -3
  47. data/ext/cumo/narray/gen/spec.rb +2 -2
  48. data/ext/cumo/narray/gen/tmpl/accum.c +15 -15
  49. data/ext/cumo/narray/gen/tmpl/accum_binary.c +22 -22
  50. data/ext/cumo/narray/gen/tmpl/accum_binary_kernel.cu +3 -3
  51. data/ext/cumo/narray/gen/tmpl/accum_index.c +30 -30
  52. data/ext/cumo/narray/gen/tmpl/accum_index_kernel.cu +2 -2
  53. data/ext/cumo/narray/gen/tmpl/accum_kernel.cu +3 -3
  54. data/ext/cumo/narray/gen/tmpl/alloc_func.c +14 -14
  55. data/ext/cumo/narray/gen/tmpl/allocate.c +11 -11
  56. data/ext/cumo/narray/gen/tmpl/aref.c +2 -2
  57. data/ext/cumo/narray/gen/tmpl/aref_cpu.c +4 -4
  58. data/ext/cumo/narray/gen/tmpl/aset.c +2 -2
  59. data/ext/cumo/narray/gen/tmpl/binary.c +28 -28
  60. data/ext/cumo/narray/gen/tmpl/binary2.c +18 -18
  61. data/ext/cumo/narray/gen/tmpl/binary2_kernel.cu +3 -3
  62. data/ext/cumo/narray/gen/tmpl/binary_kernel.cu +6 -6
  63. data/ext/cumo/narray/gen/tmpl/binary_s.c +13 -13
  64. data/ext/cumo/narray/gen/tmpl/binary_s_kernel.cu +3 -3
  65. data/ext/cumo/narray/gen/tmpl/bincount.c +23 -23
  66. data/ext/cumo/narray/gen/tmpl/cast.c +7 -7
  67. data/ext/cumo/narray/gen/tmpl/cast_array.c +3 -3
  68. data/ext/cumo/narray/gen/tmpl/clip.c +38 -38
  69. data/ext/cumo/narray/gen/tmpl/complex_accum_kernel.cu +2 -2
  70. data/ext/cumo/narray/gen/tmpl/cond_binary.c +19 -19
  71. data/ext/cumo/narray/gen/tmpl/cond_binary_kernel.cu +7 -7
  72. data/ext/cumo/narray/gen/tmpl/cond_unary.c +15 -15
  73. data/ext/cumo/narray/gen/tmpl/cum.c +15 -15
  74. data/ext/cumo/narray/gen/tmpl/each.c +9 -9
  75. data/ext/cumo/narray/gen/tmpl/each_with_index.c +9 -9
  76. data/ext/cumo/narray/gen/tmpl/ewcomp.c +15 -15
  77. data/ext/cumo/narray/gen/tmpl/ewcomp_kernel.cu +3 -3
  78. data/ext/cumo/narray/gen/tmpl/extract_cpu.c +5 -5
  79. data/ext/cumo/narray/gen/tmpl/extract_data.c +12 -12
  80. data/ext/cumo/narray/gen/tmpl/eye.c +9 -9
  81. data/ext/cumo/narray/gen/tmpl/eye_kernel.cu +3 -3
  82. data/ext/cumo/narray/gen/tmpl/fill.c +9 -9
  83. data/ext/cumo/narray/gen/tmpl/fill_kernel.cu +6 -6
  84. data/ext/cumo/narray/gen/tmpl/float_accum_kernel.cu +1 -1
  85. data/ext/cumo/narray/gen/tmpl/format.c +11 -11
  86. data/ext/cumo/narray/gen/tmpl/format_to_a.c +8 -8
  87. data/ext/cumo/narray/gen/tmpl/frexp.c +13 -13
  88. data/ext/cumo/narray/gen/tmpl/gemm.c +252 -108
  89. data/ext/cumo/narray/gen/tmpl/inspect.c +1 -1
  90. data/ext/cumo/narray/gen/tmpl/lib.c +2 -2
  91. data/ext/cumo/narray/gen/tmpl/logseq.c +7 -7
  92. data/ext/cumo/narray/gen/tmpl/logseq_kernel.cu +6 -6
  93. data/ext/cumo/narray/gen/tmpl/map_with_index.c +17 -17
  94. data/ext/cumo/narray/gen/tmpl/median.c +10 -10
  95. data/ext/cumo/narray/gen/tmpl/minmax.c +10 -10
  96. data/ext/cumo/narray/gen/tmpl/new_dim0.c +3 -3
  97. data/ext/cumo/narray/gen/tmpl/poly.c +6 -6
  98. data/ext/cumo/narray/gen/tmpl/pow.c +28 -28
  99. data/ext/cumo/narray/gen/tmpl/pow_kernel.cu +6 -6
  100. data/ext/cumo/narray/gen/tmpl/rand.c +10 -10
  101. data/ext/cumo/narray/gen/tmpl/rand_norm.c +7 -7
  102. data/ext/cumo/narray/gen/tmpl/real_accum_kernel.cu +6 -6
  103. data/ext/cumo/narray/gen/tmpl/seq.c +7 -7
  104. data/ext/cumo/narray/gen/tmpl/seq_kernel.cu +6 -6
  105. data/ext/cumo/narray/gen/tmpl/set2.c +20 -20
  106. data/ext/cumo/narray/gen/tmpl/sort.c +11 -11
  107. data/ext/cumo/narray/gen/tmpl/sort_index.c +18 -18
  108. data/ext/cumo/narray/gen/tmpl/store.c +6 -6
  109. data/ext/cumo/narray/gen/tmpl/store_array.c +19 -19
  110. data/ext/cumo/narray/gen/tmpl/store_array_kernel.cu +12 -12
  111. data/ext/cumo/narray/gen/tmpl/store_bit.c +23 -23
  112. data/ext/cumo/narray/gen/tmpl/store_bit_kernel.cu +28 -28
  113. data/ext/cumo/narray/gen/tmpl/store_from.c +16 -16
  114. data/ext/cumo/narray/gen/tmpl/store_from_kernel.cu +12 -12
  115. data/ext/cumo/narray/gen/tmpl/to_a.c +10 -10
  116. data/ext/cumo/narray/gen/tmpl/unary.c +25 -25
  117. data/ext/cumo/narray/gen/tmpl/unary2.c +17 -17
  118. data/ext/cumo/narray/gen/tmpl/unary_kernel.cu +15 -15
  119. data/ext/cumo/narray/gen/tmpl/unary_ret2.c +13 -13
  120. data/ext/cumo/narray/gen/tmpl/unary_s.c +17 -17
  121. data/ext/cumo/narray/gen/tmpl/unary_s_kernel.cu +12 -12
  122. data/ext/cumo/narray/gen/tmpl_bit/allocate.c +9 -9
  123. data/ext/cumo/narray/gen/tmpl_bit/aref.c +2 -2
  124. data/ext/cumo/narray/gen/tmpl_bit/aref_cpu.c +5 -5
  125. data/ext/cumo/narray/gen/tmpl_bit/aset.c +2 -2
  126. data/ext/cumo/narray/gen/tmpl_bit/binary.c +29 -29
  127. data/ext/cumo/narray/gen/tmpl_bit/bit_count.c +14 -14
  128. data/ext/cumo/narray/gen/tmpl_bit/bit_count_cpu.c +21 -21
  129. data/ext/cumo/narray/gen/tmpl_bit/bit_count_kernel.cu +28 -28
  130. data/ext/cumo/narray/gen/tmpl_bit/bit_reduce.c +29 -29
  131. data/ext/cumo/narray/gen/tmpl_bit/each.c +10 -10
  132. data/ext/cumo/narray/gen/tmpl_bit/each_with_index.c +10 -10
  133. data/ext/cumo/narray/gen/tmpl_bit/extract.c +8 -8
  134. data/ext/cumo/narray/gen/tmpl_bit/extract_cpu.c +8 -8
  135. data/ext/cumo/narray/gen/tmpl_bit/fill.c +17 -17
  136. data/ext/cumo/narray/gen/tmpl_bit/format.c +14 -14
  137. data/ext/cumo/narray/gen/tmpl_bit/format_to_a.c +11 -11
  138. data/ext/cumo/narray/gen/tmpl_bit/inspect.c +3 -3
  139. data/ext/cumo/narray/gen/tmpl_bit/mask.c +33 -33
  140. data/ext/cumo/narray/gen/tmpl_bit/store_array.c +19 -19
  141. data/ext/cumo/narray/gen/tmpl_bit/store_bit.c +22 -22
  142. data/ext/cumo/narray/gen/tmpl_bit/store_from.c +18 -18
  143. data/ext/cumo/narray/gen/tmpl_bit/to_a.c +12 -12
  144. data/ext/cumo/narray/gen/tmpl_bit/unary.c +24 -24
  145. data/ext/cumo/narray/gen/tmpl_bit/where.c +16 -16
  146. data/ext/cumo/narray/gen/tmpl_bit/where2.c +20 -20
  147. data/ext/cumo/narray/index.c +213 -213
  148. data/ext/cumo/narray/math.c +27 -27
  149. data/ext/cumo/narray/narray.c +484 -484
  150. data/ext/cumo/narray/ndloop.c +259 -258
  151. data/ext/cumo/narray/rand.c +3 -3
  152. data/ext/cumo/narray/step.c +70 -70
  153. data/ext/cumo/narray/struct.c +139 -139
  154. metadata +6 -7
  155. data/ext/cumo/include/cumo/intern_fwd.h +0 -38
  156. data/lib/erbpp.rb +0 -294
  157. data/lib/erbpp/line_number.rb +0 -137
  158. data/lib/erbpp/narray_def.rb +0 -381
@@ -29,30 +29,30 @@ __global__ void <%="cumo_#{c_iter}_stride_scalar_kernel"%>(char *p1, ssize_t s1,
29
29
 
30
30
  void <%="cumo_#{c_iter}_index_kernel_launch"%>(char *p1, size_t *idx1, dtype* z, uint64_t n)
31
31
  {
32
- size_t gridDim = get_gridDim(n);
33
- size_t blockDim = get_blockDim(n);
34
- <%="cumo_#{c_iter}_index_kernel"%><<<gridDim, blockDim>>>(p1,idx1,z,n);
32
+ size_t grid_dim = cumo_get_grid_dim(n);
33
+ size_t block_dim = cumo_get_block_dim(n);
34
+ <%="cumo_#{c_iter}_index_kernel"%><<<grid_dim, block_dim>>>(p1,idx1,z,n);
35
35
  }
36
36
 
37
37
  void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, ssize_t s1, dtype* z, uint64_t n)
38
38
  {
39
- size_t gridDim = get_gridDim(n);
40
- size_t blockDim = get_blockDim(n);
41
- <%="cumo_#{c_iter}_stride_kernel"%><<<gridDim, blockDim>>>(p1,s1,z,n);
39
+ size_t grid_dim = cumo_get_grid_dim(n);
40
+ size_t block_dim = cumo_get_block_dim(n);
41
+ <%="cumo_#{c_iter}_stride_kernel"%><<<grid_dim, block_dim>>>(p1,s1,z,n);
42
42
  }
43
43
 
44
44
  void <%="cumo_#{c_iter}_index_scalar_kernel_launch"%>(char *p1, size_t *idx1, dtype z, uint64_t n)
45
45
  {
46
- size_t gridDim = get_gridDim(n);
47
- size_t blockDim = get_blockDim(n);
48
- <%="cumo_#{c_iter}_index_scalar_kernel"%><<<gridDim, blockDim>>>(p1,idx1,z,n);
46
+ size_t grid_dim = cumo_get_grid_dim(n);
47
+ size_t block_dim = cumo_get_block_dim(n);
48
+ <%="cumo_#{c_iter}_index_scalar_kernel"%><<<grid_dim, block_dim>>>(p1,idx1,z,n);
49
49
  }
50
50
 
51
51
  void <%="cumo_#{c_iter}_stride_scalar_kernel_launch"%>(char *p1, ssize_t s1, dtype z, uint64_t n)
52
52
  {
53
- size_t gridDim = get_gridDim(n);
54
- size_t blockDim = get_blockDim(n);
55
- <%="cumo_#{c_iter}_stride_scalar_kernel"%><<<gridDim, blockDim>>>(p1,s1,z,n);
53
+ size_t grid_dim = cumo_get_grid_dim(n);
54
+ size_t block_dim = cumo_get_block_dim(n);
55
+ <%="cumo_#{c_iter}_stride_scalar_kernel"%><<<grid_dim, block_dim>>>(p1,s1,z,n);
56
56
  }
57
57
 
58
58
  <% end %>
@@ -1,56 +1,56 @@
1
1
  //<% unless c_iter.include? 'robject' %>
2
- void <%="cumo_#{c_iter}_index_index_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, size_t *idx1, size_t *idx2, uint64_t n);
3
- void <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, ssize_t s1, size_t *idx2, uint64_t n);
4
- void <%="cumo_#{c_iter}_index_stride_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, size_t *idx1, ssize_t s2, uint64_t n);
5
- void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, ssize_t s1, ssize_t s2, uint64_t n);
2
+ void <%="cumo_#{c_iter}_index_index_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, size_t *idx2, uint64_t n);
3
+ void <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, size_t *idx2, uint64_t n);
4
+ void <%="cumo_#{c_iter}_index_stride_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, ssize_t s2, uint64_t n);
5
+ void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, ssize_t s2, uint64_t n);
6
6
  //<% end %>
7
7
 
8
8
  static void
9
- <%=c_iter%>(na_loop_t *const lp)
9
+ <%=c_iter%>(cumo_na_loop_t *const lp)
10
10
  {
11
11
  size_t i;
12
12
  char *p1;
13
13
  size_t p2;
14
14
  ssize_t s1, s2;
15
15
  size_t *idx1, *idx2;
16
- BIT_DIGIT *a2;
16
+ CUMO_BIT_DIGIT *a2;
17
17
 
18
- INIT_COUNTER(lp, i);
19
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
20
- INIT_PTR_BIT_IDX(lp, 1, a2, p2, s2, idx2);
18
+ CUMO_INIT_COUNTER(lp, i);
19
+ CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
20
+ CUMO_INIT_PTR_BIT_IDX(lp, 1, a2, p2, s2, idx2);
21
21
 
22
22
  //<% if c_iter.include? 'robject' %>
23
23
  {
24
- BIT_DIGIT x;
24
+ CUMO_BIT_DIGIT x;
25
25
  dtype y;
26
- SHOW_SYNCHRONIZE_WARNING_ONCE("<%=name%>", "<%=type_name%>");
27
- SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
26
+ CUMO_SHOW_SYNCHRONIZE_WARNING_ONCE("<%=name%>", "<%=type_name%>");
27
+ CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
28
28
  if (idx2) {
29
29
  if (idx1) {
30
30
  for (; i--;) {
31
- LOAD_BIT(a2, p2+*idx2, x); idx2++;
31
+ CUMO_LOAD_BIT(a2, p2+*idx2, x); idx2++;
32
32
  y = m_from_sint(x);
33
- SET_DATA_INDEX(p1,idx1,dtype,y);
33
+ CUMO_SET_DATA_INDEX(p1,idx1,dtype,y);
34
34
  }
35
35
  } else {
36
36
  for (; i--;) {
37
- LOAD_BIT(a2, p2+*idx2, x); idx2++;
37
+ CUMO_LOAD_BIT(a2, p2+*idx2, x); idx2++;
38
38
  y = m_from_sint(x);
39
- SET_DATA_STRIDE(p1,s1,dtype,y);
39
+ CUMO_SET_DATA_STRIDE(p1,s1,dtype,y);
40
40
  }
41
41
  }
42
42
  } else {
43
43
  if (idx1) {
44
44
  for (; i--;) {
45
- LOAD_BIT(a2, p2, x); p2 += s2;
45
+ CUMO_LOAD_BIT(a2, p2, x); p2 += s2;
46
46
  y = m_from_sint(x);
47
- SET_DATA_INDEX(p1,idx1,dtype,y);
47
+ CUMO_SET_DATA_INDEX(p1,idx1,dtype,y);
48
48
  }
49
49
  } else {
50
50
  for (; i--;) {
51
- LOAD_BIT(a2, p2, x); p2 += s2;
51
+ CUMO_LOAD_BIT(a2, p2, x); p2 += s2;
52
52
  y = m_from_sint(x);
53
- SET_DATA_STRIDE(p1,s1,dtype,y);
53
+ CUMO_SET_DATA_STRIDE(p1,s1,dtype,y);
54
54
  }
55
55
  }
56
56
  }
@@ -78,9 +78,9 @@ static void
78
78
  static VALUE
79
79
  <%=c_func(:nodef)%>(VALUE self, VALUE obj)
80
80
  {
81
- ndfunc_arg_in_t ain[2] = {{OVERWRITE,0},{Qnil,0}};
82
- ndfunc_t ndf = {<%=c_iter%>, FULL_LOOP, 2,0, ain,0};
81
+ cumo_ndfunc_arg_in_t ain[2] = {{CUMO_OVERWRITE,0},{Qnil,0}};
82
+ cumo_ndfunc_t ndf = {<%=c_iter%>, CUMO_FULL_LOOP, 2,0, ain,0};
83
83
 
84
- na_ndloop(&ndf, 2, self, obj);
84
+ cumo_na_ndloop(&ndf, 2, self, obj);
85
85
  return self;
86
86
  }
@@ -1,66 +1,66 @@
1
1
  <% unless c_iter.include? 'robject' %>
2
- __global__ void <%="cumo_#{c_iter}_index_index_kernel"%>(char *p1, size_t p2, BIT_DIGIT *a2, size_t *idx1, size_t *idx2, uint64_t n)
2
+ __global__ void <%="cumo_#{c_iter}_index_index_kernel"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, size_t *idx2, uint64_t n)
3
3
  {
4
4
  for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
5
- BIT_DIGIT x;
6
- LOAD_BIT(a2, p2 + idx2[i], x);
5
+ CUMO_BIT_DIGIT x;
6
+ CUMO_LOAD_BIT(a2, p2 + idx2[i], x);
7
7
  *(dtype*)(p1 + idx1[i]) = m_from_real(x);
8
8
  }
9
9
  }
10
10
 
11
- __global__ void <%="cumo_#{c_iter}_stride_index_kernel"%>(char *p1, size_t p2, BIT_DIGIT *a2, ssize_t s1, size_t *idx2, uint64_t n)
11
+ __global__ void <%="cumo_#{c_iter}_stride_index_kernel"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, size_t *idx2, uint64_t n)
12
12
  {
13
13
  for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
14
- BIT_DIGIT x;
15
- LOAD_BIT(a2, p2 + idx2[i], x);
14
+ CUMO_BIT_DIGIT x;
15
+ CUMO_LOAD_BIT(a2, p2 + idx2[i], x);
16
16
  *(dtype*)(p1 + (i * s1)) = m_from_real(x);
17
17
  }
18
18
  }
19
19
 
20
- __global__ void <%="cumo_#{c_iter}_index_stride_kernel"%>(char *p1, size_t p2, BIT_DIGIT *a2, size_t *idx1, ssize_t s2, uint64_t n)
20
+ __global__ void <%="cumo_#{c_iter}_index_stride_kernel"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, ssize_t s2, uint64_t n)
21
21
  {
22
22
  for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
23
- BIT_DIGIT x;
24
- LOAD_BIT(a2, p2 + (i * s2), x);
23
+ CUMO_BIT_DIGIT x;
24
+ CUMO_LOAD_BIT(a2, p2 + (i * s2), x);
25
25
  *(dtype*)(p1 + idx1[i]) = m_from_real(x);
26
26
  }
27
27
  }
28
28
 
29
- __global__ void <%="cumo_#{c_iter}_stride_stride_kernel"%>(char *p1, size_t p2, BIT_DIGIT *a2, ssize_t s1, ssize_t s2, uint64_t n)
29
+ __global__ void <%="cumo_#{c_iter}_stride_stride_kernel"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, ssize_t s2, uint64_t n)
30
30
  {
31
31
  for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
32
- BIT_DIGIT x;
33
- LOAD_BIT(a2, p2 + (i * s2), x);
32
+ CUMO_BIT_DIGIT x;
33
+ CUMO_LOAD_BIT(a2, p2 + (i * s2), x);
34
34
  *(dtype*)(p1 + (i * s1)) = m_from_real(x);
35
35
  }
36
36
  }
37
37
 
38
- void <%="cumo_#{c_iter}_index_index_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, size_t *idx1, size_t *idx2, uint64_t n)
38
+ void <%="cumo_#{c_iter}_index_index_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, size_t *idx2, uint64_t n)
39
39
  {
40
- size_t gridDim = get_gridDim(n);
41
- size_t blockDim = get_blockDim(n);
42
- <%="cumo_#{c_iter}_index_index_kernel"%><<<gridDim, blockDim>>>(p1,p2,a2,idx1,idx2,n);
40
+ size_t grid_dim = cumo_get_grid_dim(n);
41
+ size_t block_dim = cumo_get_block_dim(n);
42
+ <%="cumo_#{c_iter}_index_index_kernel"%><<<grid_dim, block_dim>>>(p1,p2,a2,idx1,idx2,n);
43
43
  }
44
44
 
45
- void <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, ssize_t s1, size_t *idx2, uint64_t n)
45
+ void <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, size_t *idx2, uint64_t n)
46
46
  {
47
- size_t gridDim = get_gridDim(n);
48
- size_t blockDim = get_blockDim(n);
49
- <%="cumo_#{c_iter}_stride_index_kernel"%><<<gridDim, blockDim>>>(p1,p2,a2,s1,idx2,n);
47
+ size_t grid_dim = cumo_get_grid_dim(n);
48
+ size_t block_dim = cumo_get_block_dim(n);
49
+ <%="cumo_#{c_iter}_stride_index_kernel"%><<<grid_dim, block_dim>>>(p1,p2,a2,s1,idx2,n);
50
50
  }
51
51
 
52
- void <%="cumo_#{c_iter}_index_stride_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, size_t *idx1, ssize_t s2, uint64_t n)
52
+ void <%="cumo_#{c_iter}_index_stride_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, size_t *idx1, ssize_t s2, uint64_t n)
53
53
  {
54
- size_t gridDim = get_gridDim(n);
55
- size_t blockDim = get_blockDim(n);
56
- <%="cumo_#{c_iter}_index_stride_kernel"%><<<gridDim, blockDim>>>(p1,p2,a2,idx1,s2,n);
54
+ size_t grid_dim = cumo_get_grid_dim(n);
55
+ size_t block_dim = cumo_get_block_dim(n);
56
+ <%="cumo_#{c_iter}_index_stride_kernel"%><<<grid_dim, block_dim>>>(p1,p2,a2,idx1,s2,n);
57
57
  }
58
58
 
59
- void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, ssize_t s1, ssize_t s2, uint64_t n)
59
+ void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, size_t p2, CUMO_BIT_DIGIT *a2, ssize_t s1, ssize_t s2, uint64_t n)
60
60
  {
61
- size_t gridDim = get_gridDim(n);
62
- size_t blockDim = get_blockDim(n);
63
- <%="cumo_#{c_iter}_stride_stride_kernel"%><<<gridDim, blockDim>>>(p1,p2,a2,s1,s2,n);
61
+ size_t grid_dim = cumo_get_grid_dim(n);
62
+ size_t block_dim = cumo_get_block_dim(n);
63
+ <%="cumo_#{c_iter}_stride_stride_kernel"%><<<grid_dim, block_dim>>>(p1,p2,a2,s1,s2,n);
64
64
  }
65
65
 
66
66
  <% end %>
@@ -6,46 +6,46 @@ void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, char *p2, ssize
6
6
  //<% end %>
7
7
 
8
8
  static void
9
- <%=c_iter%>(na_loop_t *const lp)
9
+ <%=c_iter%>(cumo_na_loop_t *const lp)
10
10
  {
11
11
  size_t i, s1, s2;
12
12
  char *p1, *p2;
13
13
  size_t *idx1, *idx2;
14
14
 
15
- INIT_COUNTER(lp, i);
16
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
17
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
15
+ CUMO_INIT_COUNTER(lp, i);
16
+ CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
17
+ CUMO_INIT_PTR_IDX(lp, 1, p2, s2, idx2);
18
18
  //<% if c_iter.include? 'robject' %>
19
- SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
19
+ CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
20
20
  {
21
21
  <%=dtype%> x;
22
22
  dtype y;
23
23
  if (idx2) {
24
24
  if (idx1) {
25
25
  for (; i--;) {
26
- GET_DATA_INDEX(p2,idx2,<%=dtype%>,x);
26
+ CUMO_GET_DATA_INDEX(p2,idx2,<%=dtype%>,x);
27
27
  y = <%=macro%>(x);
28
- SET_DATA_INDEX(p1,idx1,dtype,y);
28
+ CUMO_SET_DATA_INDEX(p1,idx1,dtype,y);
29
29
  }
30
30
  } else {
31
31
  for (; i--;) {
32
- GET_DATA_INDEX(p2,idx2,<%=dtype%>,x);
32
+ CUMO_GET_DATA_INDEX(p2,idx2,<%=dtype%>,x);
33
33
  y = <%=macro%>(x);
34
- SET_DATA_STRIDE(p1,s1,dtype,y);
34
+ CUMO_SET_DATA_STRIDE(p1,s1,dtype,y);
35
35
  }
36
36
  }
37
37
  } else {
38
38
  if (idx1) {
39
39
  for (; i--;) {
40
- GET_DATA_STRIDE(p2,s2,<%=dtype%>,x);
40
+ CUMO_GET_DATA_STRIDE(p2,s2,<%=dtype%>,x);
41
41
  y = <%=macro%>(x);
42
- SET_DATA_INDEX(p1,idx1,dtype,y);
42
+ CUMO_SET_DATA_INDEX(p1,idx1,dtype,y);
43
43
  }
44
44
  } else {
45
45
  for (; i--;) {
46
- GET_DATA_STRIDE(p2,s2,<%=dtype%>,x);
46
+ CUMO_GET_DATA_STRIDE(p2,s2,<%=dtype%>,x);
47
47
  y = <%=macro%>(x);
48
- SET_DATA_STRIDE(p1,s1,dtype,y);
48
+ CUMO_SET_DATA_STRIDE(p1,s1,dtype,y);
49
49
  }
50
50
  }
51
51
  }
@@ -73,9 +73,9 @@ static void
73
73
  static VALUE
74
74
  <%=c_func(:nodef)%>(VALUE self, VALUE obj)
75
75
  {
76
- ndfunc_arg_in_t ain[2] = {{OVERWRITE,0},{Qnil,0}};
77
- ndfunc_t ndf = { <%=c_iter%>, FULL_LOOP, 2, 0, ain, 0 };
76
+ cumo_ndfunc_arg_in_t ain[2] = {{CUMO_OVERWRITE,0},{Qnil,0}};
77
+ cumo_ndfunc_t ndf = { <%=c_iter%>, CUMO_FULL_LOOP, 2, 0, ain, 0 };
78
78
 
79
- na_ndloop(&ndf, 2, self, obj);
79
+ cumo_na_ndloop(&ndf, 2, self, obj);
80
80
  return self;
81
81
  }
@@ -29,30 +29,30 @@ __global__ void <%="cumo_#{c_iter}_stride_stride_kernel"%>(char *p1, char *p2, s
29
29
 
30
30
  void <%="cumo_#{c_iter}_index_index_kernel_launch"%>(char *p1, char *p2, size_t *idx1, size_t *idx2, uint64_t n)
31
31
  {
32
- size_t gridDim = get_gridDim(n);
33
- size_t blockDim = get_blockDim(n);
34
- <%="cumo_#{c_iter}_index_index_kernel"%><<<gridDim, blockDim>>>(p1,p2,idx1,idx2,n);
32
+ size_t grid_dim = cumo_get_grid_dim(n);
33
+ size_t block_dim = cumo_get_block_dim(n);
34
+ <%="cumo_#{c_iter}_index_index_kernel"%><<<grid_dim, block_dim>>>(p1,p2,idx1,idx2,n);
35
35
  }
36
36
 
37
37
  void <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(char *p1, char *p2, ssize_t s1, size_t *idx2, uint64_t n)
38
38
  {
39
- size_t gridDim = get_gridDim(n);
40
- size_t blockDim = get_blockDim(n);
41
- <%="cumo_#{c_iter}_stride_index_kernel"%><<<gridDim, blockDim>>>(p1,p2,s1,idx2,n);
39
+ size_t grid_dim = cumo_get_grid_dim(n);
40
+ size_t block_dim = cumo_get_block_dim(n);
41
+ <%="cumo_#{c_iter}_stride_index_kernel"%><<<grid_dim, block_dim>>>(p1,p2,s1,idx2,n);
42
42
  }
43
43
 
44
44
  void <%="cumo_#{c_iter}_index_stride_kernel_launch"%>(char *p1, char *p2, size_t *idx1, ssize_t s2, uint64_t n)
45
45
  {
46
- size_t gridDim = get_gridDim(n);
47
- size_t blockDim = get_blockDim(n);
48
- <%="cumo_#{c_iter}_index_stride_kernel"%><<<gridDim, blockDim>>>(p1,p2,idx1,s2,n);
46
+ size_t grid_dim = cumo_get_grid_dim(n);
47
+ size_t block_dim = cumo_get_block_dim(n);
48
+ <%="cumo_#{c_iter}_index_stride_kernel"%><<<grid_dim, block_dim>>>(p1,p2,idx1,s2,n);
49
49
  }
50
50
 
51
51
  void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, char *p2, ssize_t s1, ssize_t s2, uint64_t n)
52
52
  {
53
- size_t gridDim = get_gridDim(n);
54
- size_t blockDim = get_blockDim(n);
55
- <%="cumo_#{c_iter}_stride_stride_kernel"%><<<gridDim, blockDim>>>(p1,p2,s1,s2,n);
53
+ size_t grid_dim = cumo_get_grid_dim(n);
54
+ size_t block_dim = cumo_get_block_dim(n);
55
+ <%="cumo_#{c_iter}_stride_stride_kernel"%><<<grid_dim, block_dim>>>(p1,p2,s1,s2,n);
56
56
  }
57
57
 
58
58
  <% end %>
@@ -1,5 +1,5 @@
1
1
  static void
2
- <%=c_iter%>(na_loop_t *const lp)
2
+ <%=c_iter%>(cumo_na_loop_t *const lp)
3
3
  {
4
4
  size_t i, s1;
5
5
  char *p1;
@@ -7,20 +7,20 @@ static void
7
7
  dtype x;
8
8
  volatile VALUE a, y;
9
9
 
10
- INIT_COUNTER(lp, i);
11
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
10
+ CUMO_INIT_COUNTER(lp, i);
11
+ CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
12
12
  a = rb_ary_new2(i);
13
13
  rb_ary_push(lp->args[1].value, a);
14
- //SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
14
+ //CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
15
15
  if (idx1) {
16
16
  for (; i--;) {
17
- GET_DATA_INDEX(p1,idx1,dtype,x);
17
+ CUMO_GET_DATA_INDEX(p1,idx1,dtype,x);
18
18
  y = m_data_to_num(x);
19
19
  rb_ary_push(a,y);
20
20
  }
21
21
  } else {
22
22
  for (; i--;) {
23
- GET_DATA_STRIDE(p1,s1,dtype,x);
23
+ CUMO_GET_DATA_STRIDE(p1,s1,dtype,x);
24
24
  y = m_data_to_num(x);
25
25
  rb_ary_push(a,y);
26
26
  }
@@ -35,9 +35,9 @@ static void
35
35
  static VALUE
36
36
  <%=c_func(0)%>(VALUE self)
37
37
  {
38
- ndfunc_arg_in_t ain[3] = {{Qnil,0},{sym_loop_opt},{sym_option}};
39
- ndfunc_arg_out_t aout[1] = {{rb_cArray,0}}; // dummy?
40
- ndfunc_t ndf = { <%=c_iter%>, FULL_LOOP_NIP, 3, 1, ain, aout };
38
+ cumo_ndfunc_arg_in_t ain[3] = {{Qnil,0},{cumo_sym_loop_opt},{cumo_sym_option}};
39
+ cumo_ndfunc_arg_out_t aout[1] = {{rb_cArray,0}}; // dummy?
40
+ cumo_ndfunc_t ndf = { <%=c_iter%>, CUMO_FULL_LOOP_NIP, 3, 1, ain, aout };
41
41
  cumo_cuda_runtime_check_status(cudaDeviceSynchronize());
42
- return na_ndloop_cast_narray_to_rarray(&ndf, self, Qnil);
42
+ return cumo_na_ndloop_cast_narray_to_rarray(&ndf, self, Qnil);
43
43
  }
@@ -8,47 +8,47 @@ void <%="cumo_#{c_iter}_contiguous_kernel_launch"%>(char *p1, char *p2, uint64_t
8
8
  <% end %>
9
9
 
10
10
  static void
11
- <%=c_iter%>(na_loop_t *const lp)
11
+ <%=c_iter%>(cumo_na_loop_t *const lp)
12
12
  {
13
13
  size_t n;
14
14
  char *p1, *p2;
15
15
  ssize_t s1, s2;
16
16
  size_t *idx1, *idx2;
17
17
 
18
- INIT_COUNTER(lp, n);
19
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
20
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
18
+ CUMO_INIT_COUNTER(lp, n);
19
+ CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1);
20
+ CUMO_INIT_PTR_IDX(lp, 1, p2, s2, idx2);
21
21
 
22
22
  <% if type_name == 'robject' || name == 'map' %>
23
23
  {
24
24
  size_t i;
25
25
  dtype x;
26
- SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
26
+ CUMO_SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
27
27
  if (idx1) {
28
28
  if (idx2) {
29
29
  for (i=0; i<n; i++) {
30
- GET_DATA_INDEX(p1,idx1,dtype,x);
30
+ CUMO_GET_DATA_INDEX(p1,idx1,dtype,x);
31
31
  x = m_<%=name%>(x);
32
- SET_DATA_INDEX(p2,idx2,dtype,x);
32
+ CUMO_SET_DATA_INDEX(p2,idx2,dtype,x);
33
33
  }
34
34
  } else {
35
35
  for (i=0; i<n; i++) {
36
- GET_DATA_INDEX(p1,idx1,dtype,x);
36
+ CUMO_GET_DATA_INDEX(p1,idx1,dtype,x);
37
37
  x = m_<%=name%>(x);
38
- SET_DATA_STRIDE(p2,s2,dtype,x);
38
+ CUMO_SET_DATA_STRIDE(p2,s2,dtype,x);
39
39
  }
40
40
  }
41
41
  } else {
42
42
  if (idx2) {
43
43
  for (i=0; i<n; i++) {
44
- GET_DATA_STRIDE(p1,s1,dtype,x);
44
+ CUMO_GET_DATA_STRIDE(p1,s1,dtype,x);
45
45
  x = m_<%=name%>(x);
46
- SET_DATA_INDEX(p2,idx2,dtype,x);
46
+ CUMO_SET_DATA_INDEX(p2,idx2,dtype,x);
47
47
  }
48
48
  } else {
49
49
  //<% if need_align %>
50
- if (is_aligned(p1,sizeof(dtype)) &&
51
- is_aligned(p2,sizeof(dtype)) ) {
50
+ if (cumo_is_aligned(p1,sizeof(dtype)) &&
51
+ cumo_is_aligned(p2,sizeof(dtype)) ) {
52
52
  if (s1 == sizeof(dtype) &&
53
53
  s2 == sizeof(dtype) ) {
54
54
  for (i=0; i<n; i++) {
@@ -56,8 +56,8 @@ static void
56
56
  }
57
57
  return;
58
58
  }
59
- if (is_aligned_step(s1,sizeof(dtype)) &&
60
- is_aligned_step(s2,sizeof(dtype)) ) {
59
+ if (cumo_is_aligned_step(s1,sizeof(dtype)) &&
60
+ cumo_is_aligned_step(s2,sizeof(dtype)) ) {
61
61
  //<% end %>
62
62
  for (i=0; i<n; i++) {
63
63
  *(dtype*)p2 = m_<%=name%>(*(dtype*)p1);
@@ -69,9 +69,9 @@ static void
69
69
  }
70
70
  }
71
71
  for (i=0; i<n; i++) {
72
- GET_DATA_STRIDE(p1,s1,dtype,x);
72
+ CUMO_GET_DATA_STRIDE(p1,s1,dtype,x);
73
73
  x = m_<%=name%>(x);
74
- SET_DATA_STRIDE(p2,s2,dtype,x);
74
+ CUMO_SET_DATA_STRIDE(p2,s2,dtype,x);
75
75
  }
76
76
  //<% end %>
77
77
  }
@@ -90,15 +90,15 @@ static void
90
90
  <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(p1,p2,s1,idx2,n);
91
91
  } else {
92
92
  //<% if need_align %>
93
- if (is_aligned(p1,sizeof(dtype)) &&
94
- is_aligned(p2,sizeof(dtype)) ) {
93
+ if (cumo_is_aligned(p1,sizeof(dtype)) &&
94
+ cumo_is_aligned(p2,sizeof(dtype)) ) {
95
95
  if (s1 == sizeof(dtype) &&
96
96
  s2 == sizeof(dtype) ) {
97
97
  <%="cumo_#{c_iter}_contiguous_kernel_launch"%>(p1,p2,n);
98
98
  return;
99
99
  }
100
- if (is_aligned_step(s1,sizeof(dtype)) &&
101
- is_aligned_step(s2,sizeof(dtype)) ) {
100
+ if (cumo_is_aligned_step(s1,sizeof(dtype)) &&
101
+ cumo_is_aligned_step(s2,sizeof(dtype)) ) {
102
102
  //<% end %>
103
103
  <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(p1,p2,s1,s2,n);
104
104
  return;
@@ -121,12 +121,12 @@ static void
121
121
  static VALUE
122
122
  <%=c_func(0)%>(VALUE self)
123
123
  {
124
- ndfunc_arg_in_t ain[1] = {{cT,0}};
125
- ndfunc_arg_out_t aout[1] = {{cT,0}};
126
- ndfunc_t ndf = {<%=c_iter%>, FULL_LOOP, 1,1, ain,aout};
124
+ cumo_ndfunc_arg_in_t ain[1] = {{cT,0}};
125
+ cumo_ndfunc_arg_out_t aout[1] = {{cT,0}};
126
+ cumo_ndfunc_t ndf = {<%=c_iter%>, CUMO_FULL_LOOP, 1,1, ain,aout};
127
127
 
128
128
  <% if name == 'map' %>
129
129
  cumo_cuda_runtime_check_status(cudaDeviceSynchronize());
130
130
  <% end %>
131
- return na_ndloop(&ndf, 1, self);
131
+ return cumo_na_ndloop(&ndf, 1, self);
132
132
  }