cumo 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +27 -0
- data/.travis.yml +5 -0
- data/3rd_party/mkmf-cu/.gitignore +36 -0
- data/3rd_party/mkmf-cu/Gemfile +3 -0
- data/3rd_party/mkmf-cu/LICENSE +21 -0
- data/3rd_party/mkmf-cu/README.md +36 -0
- data/3rd_party/mkmf-cu/Rakefile +11 -0
- data/3rd_party/mkmf-cu/bin/mkmf-cu-nvcc +4 -0
- data/3rd_party/mkmf-cu/lib/mkmf-cu.rb +32 -0
- data/3rd_party/mkmf-cu/lib/mkmf-cu/cli.rb +80 -0
- data/3rd_party/mkmf-cu/lib/mkmf-cu/nvcc.rb +157 -0
- data/3rd_party/mkmf-cu/mkmf-cu.gemspec +16 -0
- data/3rd_party/mkmf-cu/test/test_mkmf-cu.rb +67 -0
- data/CODE_OF_CONDUCT.md +46 -0
- data/Gemfile +8 -0
- data/LICENSE.txt +82 -0
- data/README.md +252 -0
- data/Rakefile +43 -0
- data/bench/broadcast_fp32.rb +138 -0
- data/bench/cumo_bench.rb +193 -0
- data/bench/numo_bench.rb +138 -0
- data/bench/reduction_fp32.rb +117 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/cumo.gemspec +32 -0
- data/ext/cumo/cuda/cublas.c +278 -0
- data/ext/cumo/cuda/driver.c +421 -0
- data/ext/cumo/cuda/memory_pool.cpp +185 -0
- data/ext/cumo/cuda/memory_pool_impl.cpp +308 -0
- data/ext/cumo/cuda/memory_pool_impl.hpp +370 -0
- data/ext/cumo/cuda/memory_pool_impl_test.cpp +554 -0
- data/ext/cumo/cuda/nvrtc.c +207 -0
- data/ext/cumo/cuda/runtime.c +167 -0
- data/ext/cumo/cumo.c +148 -0
- data/ext/cumo/depend.erb +58 -0
- data/ext/cumo/extconf.rb +179 -0
- data/ext/cumo/include/cumo.h +25 -0
- data/ext/cumo/include/cumo/compat.h +23 -0
- data/ext/cumo/include/cumo/cuda/cublas.h +153 -0
- data/ext/cumo/include/cumo/cuda/cumo_thrust.hpp +187 -0
- data/ext/cumo/include/cumo/cuda/cumo_thrust_complex.hpp +79 -0
- data/ext/cumo/include/cumo/cuda/driver.h +22 -0
- data/ext/cumo/include/cumo/cuda/memory_pool.h +28 -0
- data/ext/cumo/include/cumo/cuda/nvrtc.h +22 -0
- data/ext/cumo/include/cumo/cuda/runtime.h +40 -0
- data/ext/cumo/include/cumo/indexer.h +238 -0
- data/ext/cumo/include/cumo/intern.h +142 -0
- data/ext/cumo/include/cumo/intern_fwd.h +38 -0
- data/ext/cumo/include/cumo/intern_kernel.h +6 -0
- data/ext/cumo/include/cumo/narray.h +429 -0
- data/ext/cumo/include/cumo/narray_kernel.h +149 -0
- data/ext/cumo/include/cumo/ndloop.h +95 -0
- data/ext/cumo/include/cumo/reduce_kernel.h +126 -0
- data/ext/cumo/include/cumo/template.h +158 -0
- data/ext/cumo/include/cumo/template_kernel.h +77 -0
- data/ext/cumo/include/cumo/types/bit.h +40 -0
- data/ext/cumo/include/cumo/types/bit_kernel.h +34 -0
- data/ext/cumo/include/cumo/types/complex.h +402 -0
- data/ext/cumo/include/cumo/types/complex_kernel.h +414 -0
- data/ext/cumo/include/cumo/types/complex_macro.h +382 -0
- data/ext/cumo/include/cumo/types/complex_macro_kernel.h +186 -0
- data/ext/cumo/include/cumo/types/dcomplex.h +46 -0
- data/ext/cumo/include/cumo/types/dcomplex_kernel.h +13 -0
- data/ext/cumo/include/cumo/types/dfloat.h +47 -0
- data/ext/cumo/include/cumo/types/dfloat_kernel.h +14 -0
- data/ext/cumo/include/cumo/types/float_def.h +34 -0
- data/ext/cumo/include/cumo/types/float_def_kernel.h +39 -0
- data/ext/cumo/include/cumo/types/float_macro.h +191 -0
- data/ext/cumo/include/cumo/types/float_macro_kernel.h +158 -0
- data/ext/cumo/include/cumo/types/int16.h +24 -0
- data/ext/cumo/include/cumo/types/int16_kernel.h +23 -0
- data/ext/cumo/include/cumo/types/int32.h +24 -0
- data/ext/cumo/include/cumo/types/int32_kernel.h +19 -0
- data/ext/cumo/include/cumo/types/int64.h +24 -0
- data/ext/cumo/include/cumo/types/int64_kernel.h +19 -0
- data/ext/cumo/include/cumo/types/int8.h +24 -0
- data/ext/cumo/include/cumo/types/int8_kernel.h +19 -0
- data/ext/cumo/include/cumo/types/int_macro.h +67 -0
- data/ext/cumo/include/cumo/types/int_macro_kernel.h +48 -0
- data/ext/cumo/include/cumo/types/real_accum.h +486 -0
- data/ext/cumo/include/cumo/types/real_accum_kernel.h +101 -0
- data/ext/cumo/include/cumo/types/robj_macro.h +80 -0
- data/ext/cumo/include/cumo/types/robj_macro_kernel.h +0 -0
- data/ext/cumo/include/cumo/types/robject.h +27 -0
- data/ext/cumo/include/cumo/types/robject_kernel.h +7 -0
- data/ext/cumo/include/cumo/types/scomplex.h +46 -0
- data/ext/cumo/include/cumo/types/scomplex_kernel.h +13 -0
- data/ext/cumo/include/cumo/types/sfloat.h +48 -0
- data/ext/cumo/include/cumo/types/sfloat_kernel.h +14 -0
- data/ext/cumo/include/cumo/types/uint16.h +25 -0
- data/ext/cumo/include/cumo/types/uint16_kernel.h +20 -0
- data/ext/cumo/include/cumo/types/uint32.h +25 -0
- data/ext/cumo/include/cumo/types/uint32_kernel.h +20 -0
- data/ext/cumo/include/cumo/types/uint64.h +25 -0
- data/ext/cumo/include/cumo/types/uint64_kernel.h +20 -0
- data/ext/cumo/include/cumo/types/uint8.h +25 -0
- data/ext/cumo/include/cumo/types/uint8_kernel.h +20 -0
- data/ext/cumo/include/cumo/types/uint_macro.h +58 -0
- data/ext/cumo/include/cumo/types/uint_macro_kernel.h +38 -0
- data/ext/cumo/include/cumo/types/xint_macro.h +169 -0
- data/ext/cumo/include/cumo/types/xint_macro_kernel.h +88 -0
- data/ext/cumo/narray/SFMT-params.h +97 -0
- data/ext/cumo/narray/SFMT-params19937.h +46 -0
- data/ext/cumo/narray/SFMT.c +620 -0
- data/ext/cumo/narray/SFMT.h +167 -0
- data/ext/cumo/narray/array.c +638 -0
- data/ext/cumo/narray/data.c +961 -0
- data/ext/cumo/narray/gen/cogen.rb +56 -0
- data/ext/cumo/narray/gen/cogen_kernel.rb +58 -0
- data/ext/cumo/narray/gen/def/bit.rb +37 -0
- data/ext/cumo/narray/gen/def/dcomplex.rb +39 -0
- data/ext/cumo/narray/gen/def/dfloat.rb +37 -0
- data/ext/cumo/narray/gen/def/int16.rb +36 -0
- data/ext/cumo/narray/gen/def/int32.rb +36 -0
- data/ext/cumo/narray/gen/def/int64.rb +36 -0
- data/ext/cumo/narray/gen/def/int8.rb +36 -0
- data/ext/cumo/narray/gen/def/robject.rb +37 -0
- data/ext/cumo/narray/gen/def/scomplex.rb +39 -0
- data/ext/cumo/narray/gen/def/sfloat.rb +37 -0
- data/ext/cumo/narray/gen/def/uint16.rb +36 -0
- data/ext/cumo/narray/gen/def/uint32.rb +36 -0
- data/ext/cumo/narray/gen/def/uint64.rb +36 -0
- data/ext/cumo/narray/gen/def/uint8.rb +36 -0
- data/ext/cumo/narray/gen/erbpp2.rb +346 -0
- data/ext/cumo/narray/gen/narray_def.rb +268 -0
- data/ext/cumo/narray/gen/spec.rb +425 -0
- data/ext/cumo/narray/gen/tmpl/accum.c +86 -0
- data/ext/cumo/narray/gen/tmpl/accum_binary.c +121 -0
- data/ext/cumo/narray/gen/tmpl/accum_binary_kernel.cu +61 -0
- data/ext/cumo/narray/gen/tmpl/accum_index.c +119 -0
- data/ext/cumo/narray/gen/tmpl/accum_index_kernel.cu +66 -0
- data/ext/cumo/narray/gen/tmpl/accum_kernel.cu +12 -0
- data/ext/cumo/narray/gen/tmpl/alloc_func.c +107 -0
- data/ext/cumo/narray/gen/tmpl/allocate.c +37 -0
- data/ext/cumo/narray/gen/tmpl/aref.c +66 -0
- data/ext/cumo/narray/gen/tmpl/aref_cpu.c +50 -0
- data/ext/cumo/narray/gen/tmpl/aset.c +56 -0
- data/ext/cumo/narray/gen/tmpl/binary.c +162 -0
- data/ext/cumo/narray/gen/tmpl/binary2.c +70 -0
- data/ext/cumo/narray/gen/tmpl/binary2_kernel.cu +15 -0
- data/ext/cumo/narray/gen/tmpl/binary_kernel.cu +31 -0
- data/ext/cumo/narray/gen/tmpl/binary_s.c +45 -0
- data/ext/cumo/narray/gen/tmpl/binary_s_kernel.cu +15 -0
- data/ext/cumo/narray/gen/tmpl/bincount.c +181 -0
- data/ext/cumo/narray/gen/tmpl/cast.c +44 -0
- data/ext/cumo/narray/gen/tmpl/cast_array.c +13 -0
- data/ext/cumo/narray/gen/tmpl/class.c +9 -0
- data/ext/cumo/narray/gen/tmpl/class_kernel.cu +6 -0
- data/ext/cumo/narray/gen/tmpl/clip.c +121 -0
- data/ext/cumo/narray/gen/tmpl/coerce_cast.c +10 -0
- data/ext/cumo/narray/gen/tmpl/complex_accum_kernel.cu +129 -0
- data/ext/cumo/narray/gen/tmpl/cond_binary.c +68 -0
- data/ext/cumo/narray/gen/tmpl/cond_binary_kernel.cu +18 -0
- data/ext/cumo/narray/gen/tmpl/cond_unary.c +46 -0
- data/ext/cumo/narray/gen/tmpl/cum.c +50 -0
- data/ext/cumo/narray/gen/tmpl/each.c +47 -0
- data/ext/cumo/narray/gen/tmpl/each_with_index.c +70 -0
- data/ext/cumo/narray/gen/tmpl/ewcomp.c +79 -0
- data/ext/cumo/narray/gen/tmpl/ewcomp_kernel.cu +19 -0
- data/ext/cumo/narray/gen/tmpl/extract.c +22 -0
- data/ext/cumo/narray/gen/tmpl/extract_cpu.c +26 -0
- data/ext/cumo/narray/gen/tmpl/extract_data.c +53 -0
- data/ext/cumo/narray/gen/tmpl/eye.c +105 -0
- data/ext/cumo/narray/gen/tmpl/eye_kernel.cu +19 -0
- data/ext/cumo/narray/gen/tmpl/fill.c +52 -0
- data/ext/cumo/narray/gen/tmpl/fill_kernel.cu +29 -0
- data/ext/cumo/narray/gen/tmpl/float_accum_kernel.cu +106 -0
- data/ext/cumo/narray/gen/tmpl/format.c +62 -0
- data/ext/cumo/narray/gen/tmpl/format_to_a.c +49 -0
- data/ext/cumo/narray/gen/tmpl/frexp.c +38 -0
- data/ext/cumo/narray/gen/tmpl/gemm.c +203 -0
- data/ext/cumo/narray/gen/tmpl/init_class.c +20 -0
- data/ext/cumo/narray/gen/tmpl/init_module.c +12 -0
- data/ext/cumo/narray/gen/tmpl/inspect.c +21 -0
- data/ext/cumo/narray/gen/tmpl/lib.c +50 -0
- data/ext/cumo/narray/gen/tmpl/lib_kernel.cu +24 -0
- data/ext/cumo/narray/gen/tmpl/logseq.c +102 -0
- data/ext/cumo/narray/gen/tmpl/logseq_kernel.cu +31 -0
- data/ext/cumo/narray/gen/tmpl/map_with_index.c +98 -0
- data/ext/cumo/narray/gen/tmpl/median.c +66 -0
- data/ext/cumo/narray/gen/tmpl/minmax.c +47 -0
- data/ext/cumo/narray/gen/tmpl/module.c +9 -0
- data/ext/cumo/narray/gen/tmpl/module_kernel.cu +1 -0
- data/ext/cumo/narray/gen/tmpl/new_dim0.c +15 -0
- data/ext/cumo/narray/gen/tmpl/new_dim0_kernel.cu +8 -0
- data/ext/cumo/narray/gen/tmpl/poly.c +50 -0
- data/ext/cumo/narray/gen/tmpl/pow.c +97 -0
- data/ext/cumo/narray/gen/tmpl/pow_kernel.cu +29 -0
- data/ext/cumo/narray/gen/tmpl/powint.c +17 -0
- data/ext/cumo/narray/gen/tmpl/qsort.c +212 -0
- data/ext/cumo/narray/gen/tmpl/rand.c +168 -0
- data/ext/cumo/narray/gen/tmpl/rand_norm.c +121 -0
- data/ext/cumo/narray/gen/tmpl/real_accum_kernel.cu +75 -0
- data/ext/cumo/narray/gen/tmpl/seq.c +112 -0
- data/ext/cumo/narray/gen/tmpl/seq_kernel.cu +43 -0
- data/ext/cumo/narray/gen/tmpl/set2.c +57 -0
- data/ext/cumo/narray/gen/tmpl/sort.c +48 -0
- data/ext/cumo/narray/gen/tmpl/sort_index.c +111 -0
- data/ext/cumo/narray/gen/tmpl/store.c +41 -0
- data/ext/cumo/narray/gen/tmpl/store_array.c +187 -0
- data/ext/cumo/narray/gen/tmpl/store_array_kernel.cu +58 -0
- data/ext/cumo/narray/gen/tmpl/store_bit.c +86 -0
- data/ext/cumo/narray/gen/tmpl/store_bit_kernel.cu +66 -0
- data/ext/cumo/narray/gen/tmpl/store_from.c +81 -0
- data/ext/cumo/narray/gen/tmpl/store_from_kernel.cu +58 -0
- data/ext/cumo/narray/gen/tmpl/store_kernel.cu +3 -0
- data/ext/cumo/narray/gen/tmpl/store_numeric.c +9 -0
- data/ext/cumo/narray/gen/tmpl/to_a.c +43 -0
- data/ext/cumo/narray/gen/tmpl/unary.c +132 -0
- data/ext/cumo/narray/gen/tmpl/unary2.c +60 -0
- data/ext/cumo/narray/gen/tmpl/unary_kernel.cu +72 -0
- data/ext/cumo/narray/gen/tmpl/unary_ret2.c +34 -0
- data/ext/cumo/narray/gen/tmpl/unary_s.c +86 -0
- data/ext/cumo/narray/gen/tmpl/unary_s_kernel.cu +58 -0
- data/ext/cumo/narray/gen/tmpl_bit/allocate.c +24 -0
- data/ext/cumo/narray/gen/tmpl_bit/aref.c +54 -0
- data/ext/cumo/narray/gen/tmpl_bit/aref_cpu.c +57 -0
- data/ext/cumo/narray/gen/tmpl_bit/aset.c +56 -0
- data/ext/cumo/narray/gen/tmpl_bit/binary.c +98 -0
- data/ext/cumo/narray/gen/tmpl_bit/bit_count.c +64 -0
- data/ext/cumo/narray/gen/tmpl_bit/bit_count_cpu.c +88 -0
- data/ext/cumo/narray/gen/tmpl_bit/bit_count_kernel.cu +76 -0
- data/ext/cumo/narray/gen/tmpl_bit/bit_reduce.c +133 -0
- data/ext/cumo/narray/gen/tmpl_bit/each.c +48 -0
- data/ext/cumo/narray/gen/tmpl_bit/each_with_index.c +70 -0
- data/ext/cumo/narray/gen/tmpl_bit/extract.c +30 -0
- data/ext/cumo/narray/gen/tmpl_bit/extract_cpu.c +29 -0
- data/ext/cumo/narray/gen/tmpl_bit/fill.c +69 -0
- data/ext/cumo/narray/gen/tmpl_bit/format.c +64 -0
- data/ext/cumo/narray/gen/tmpl_bit/format_to_a.c +51 -0
- data/ext/cumo/narray/gen/tmpl_bit/inspect.c +21 -0
- data/ext/cumo/narray/gen/tmpl_bit/mask.c +136 -0
- data/ext/cumo/narray/gen/tmpl_bit/none_p.c +14 -0
- data/ext/cumo/narray/gen/tmpl_bit/store_array.c +108 -0
- data/ext/cumo/narray/gen/tmpl_bit/store_bit.c +70 -0
- data/ext/cumo/narray/gen/tmpl_bit/store_from.c +60 -0
- data/ext/cumo/narray/gen/tmpl_bit/to_a.c +47 -0
- data/ext/cumo/narray/gen/tmpl_bit/unary.c +81 -0
- data/ext/cumo/narray/gen/tmpl_bit/where.c +90 -0
- data/ext/cumo/narray/gen/tmpl_bit/where2.c +95 -0
- data/ext/cumo/narray/index.c +880 -0
- data/ext/cumo/narray/kwargs.c +153 -0
- data/ext/cumo/narray/math.c +142 -0
- data/ext/cumo/narray/narray.c +1948 -0
- data/ext/cumo/narray/ndloop.c +2105 -0
- data/ext/cumo/narray/rand.c +45 -0
- data/ext/cumo/narray/step.c +474 -0
- data/ext/cumo/narray/struct.c +886 -0
- data/lib/cumo.rb +3 -0
- data/lib/cumo/cuda.rb +11 -0
- data/lib/cumo/cuda/compile_error.rb +36 -0
- data/lib/cumo/cuda/compiler.rb +161 -0
- data/lib/cumo/cuda/device.rb +47 -0
- data/lib/cumo/cuda/link_state.rb +31 -0
- data/lib/cumo/cuda/module.rb +40 -0
- data/lib/cumo/cuda/nvrtc_program.rb +27 -0
- data/lib/cumo/linalg.rb +12 -0
- data/lib/cumo/narray.rb +2 -0
- data/lib/cumo/narray/extra.rb +1278 -0
- data/lib/erbpp.rb +294 -0
- data/lib/erbpp/line_number.rb +137 -0
- data/lib/erbpp/narray_def.rb +381 -0
- data/numo-narray-version +1 -0
- data/run.gdb +7 -0
- metadata +353 -0
@@ -0,0 +1,111 @@
|
|
1
|
+
<% (is_float ? ["_ignan","_prnan"] : [""]).each do |j|
|
2
|
+
[64,32].each do |i| %>
|
3
|
+
#define idx_t int<%=i%>_t
|
4
|
+
static void
|
5
|
+
<%=type_name%>_index<%=i%>_qsort<%=j%>(na_loop_t *const lp)
|
6
|
+
{
|
7
|
+
size_t i, n, idx;
|
8
|
+
char *d_ptr, *i_ptr, *o_ptr;
|
9
|
+
ssize_t d_step, i_step, o_step;
|
10
|
+
char **ptr;
|
11
|
+
|
12
|
+
INIT_COUNTER(lp, n);
|
13
|
+
INIT_PTR(lp, 0, d_ptr, d_step);
|
14
|
+
INIT_PTR(lp, 1, i_ptr, i_step);
|
15
|
+
INIT_PTR(lp, 2, o_ptr, o_step);
|
16
|
+
|
17
|
+
ptr = (char**)(lp->opt_ptr);
|
18
|
+
|
19
|
+
//printf("(ptr=%lx, d_ptr=%lx,d_step=%ld, i_ptr=%lx,i_step=%ld, o_ptr=%lx,o_step=%ld)\n",(size_t)ptr,(size_t)d_ptr,(ssize_t)d_step,(size_t)i_ptr,(ssize_t)i_step,(size_t)o_ptr,(ssize_t)o_step);
|
20
|
+
|
21
|
+
if (n==1) {
|
22
|
+
*(idx_t*)o_ptr = *(idx_t*)(i_ptr);
|
23
|
+
return;
|
24
|
+
}
|
25
|
+
|
26
|
+
for (i=0; i<n; i++) {
|
27
|
+
ptr[i] = d_ptr + d_step * i;
|
28
|
+
//printf("(%ld,%.3f)",i,*(double*)ptr[i]);
|
29
|
+
}
|
30
|
+
|
31
|
+
<%=type_name%>_index_qsort<%=j%>(ptr, n, sizeof(dtype*));
|
32
|
+
|
33
|
+
//d_ptr = lp->args[0].ptr;
|
34
|
+
//printf("(d_ptr=%lx)\n",(size_t)d_ptr);
|
35
|
+
|
36
|
+
for (i=0; i<n; i++) {
|
37
|
+
idx = (ptr[i] - d_ptr) / d_step;
|
38
|
+
*(idx_t*)o_ptr = *(idx_t*)(i_ptr + i_step * idx);
|
39
|
+
//printf("(idx[%ld]=%ld,%d)",i,idx,*(idx_t*)o_ptr);
|
40
|
+
o_ptr += o_step;
|
41
|
+
}
|
42
|
+
//printf("\n");
|
43
|
+
}
|
44
|
+
#undef idx_t
|
45
|
+
<% end;end %>
|
46
|
+
|
47
|
+
/*
|
48
|
+
<%=name%>. Returns an index array of sort result.
|
49
|
+
<% if is_float %>
|
50
|
+
@overload <%=name%>(axis:nil, nan:false)
|
51
|
+
@param [TrueClass] nan If true, propagete NaN. If false, ignore NaN.
|
52
|
+
<% else %>
|
53
|
+
@overload <%=name%>(axis:nil)
|
54
|
+
<% end %>
|
55
|
+
@param [Numeric,Array,Range] axis Affected dimensions.
|
56
|
+
@return [Integer,Cumo::Int] returns result index of <%=name%>.
|
57
|
+
@example
|
58
|
+
Cumo::NArray[3,4,1,2].sort_index => Cumo::Int32[2,3,0,1]
|
59
|
+
*/
|
60
|
+
static VALUE
|
61
|
+
<%=c_func(-1)%>(int argc, VALUE *argv, VALUE self)
|
62
|
+
{
|
63
|
+
size_t size;
|
64
|
+
narray_t *na;
|
65
|
+
VALUE idx, tmp, reduce, res;
|
66
|
+
char *buf;
|
67
|
+
ndfunc_arg_in_t ain[3] = {{cT,0},{0,0},{sym_reduce,0}};
|
68
|
+
ndfunc_arg_out_t aout[1] = {{0,0,0}};
|
69
|
+
ndfunc_t ndf = {0, STRIDE_LOOP_NIP|NDF_FLAT_REDUCE|NDF_CUM, 3,1, ain,aout};
|
70
|
+
|
71
|
+
GetNArray(self,na);
|
72
|
+
if (na->ndim==0) {
|
73
|
+
return INT2FIX(0);
|
74
|
+
}
|
75
|
+
if (na->size > (~(u_int32_t)0)) {
|
76
|
+
ain[1].type =
|
77
|
+
aout[0].type = cumo_cInt64;
|
78
|
+
idx = nary_new(cumo_cInt64, na->ndim, na->shape);
|
79
|
+
<% if is_float %>
|
80
|
+
ndf.func = <%=type_name%>_index64_qsort_ignan;
|
81
|
+
reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf,
|
82
|
+
<%=type_name%>_index64_qsort_prnan);
|
83
|
+
<% else %>
|
84
|
+
ndf.func = <%=type_name%>_index64_qsort;
|
85
|
+
reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
|
86
|
+
<% end %>
|
87
|
+
} else {
|
88
|
+
ain[1].type =
|
89
|
+
aout[0].type = cumo_cInt32;
|
90
|
+
idx = nary_new(cumo_cInt32, na->ndim, na->shape);
|
91
|
+
<% if is_float %>
|
92
|
+
ndf.func = <%=type_name%>_index32_qsort_ignan;
|
93
|
+
reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf,
|
94
|
+
<%=type_name%>_index32_qsort_prnan);
|
95
|
+
<% else %>
|
96
|
+
ndf.func = <%=type_name%>_index32_qsort;
|
97
|
+
reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
|
98
|
+
<% end %>
|
99
|
+
}
|
100
|
+
rb_funcall(idx, rb_intern("seq"), 0);
|
101
|
+
|
102
|
+
size = na->size*sizeof(void*); // max capa
|
103
|
+
buf = rb_alloc_tmp_buffer(&tmp, size);
|
104
|
+
|
105
|
+
SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
106
|
+
cudaDeviceSynchronize();
|
107
|
+
|
108
|
+
res = na_ndloop3(&ndf, buf, 3, self, idx, reduce);
|
109
|
+
rb_free_tmp_buffer(&tmp);
|
110
|
+
return res;
|
111
|
+
}
|
@@ -0,0 +1,41 @@
|
|
1
|
+
<% children.each do |c|%>
|
2
|
+
<%= c.result %>
|
3
|
+
|
4
|
+
<% end %>
|
5
|
+
/*
|
6
|
+
Store elements to Cumo::<%=class_name%> from other.
|
7
|
+
@overload store(other)
|
8
|
+
@param [Object] other
|
9
|
+
@return [Cumo::<%=class_name%>] self
|
10
|
+
*/
|
11
|
+
static VALUE
|
12
|
+
<%=c_func(1)%>(VALUE self, VALUE obj)
|
13
|
+
{
|
14
|
+
VALUE r, klass;
|
15
|
+
|
16
|
+
klass = CLASS_OF(obj);
|
17
|
+
|
18
|
+
<% definitions.each do |x| %>
|
19
|
+
if (<%=x.condition("klass")%>) {
|
20
|
+
<%=x.c_func%>(self,obj);
|
21
|
+
return self;
|
22
|
+
}
|
23
|
+
<% end %>
|
24
|
+
|
25
|
+
if (IsNArray(obj)) {
|
26
|
+
r = rb_funcall(obj, rb_intern("coerce_cast"), 1, cT);
|
27
|
+
if (CLASS_OF(r)==cT) {
|
28
|
+
<%=c_func%>(self,r);
|
29
|
+
return self;
|
30
|
+
}
|
31
|
+
}
|
32
|
+
|
33
|
+
<% if is_object %>
|
34
|
+
robject_store_numeric(self,obj);
|
35
|
+
<% else %>
|
36
|
+
rb_raise(nary_eCastError, "unknown conversion from %s to %s",
|
37
|
+
rb_class2name(CLASS_OF(obj)),
|
38
|
+
rb_class2name(CLASS_OF(self)));
|
39
|
+
<% end %>
|
40
|
+
return self;
|
41
|
+
}
|
@@ -0,0 +1,187 @@
|
|
1
|
+
//<% unless c_iter.include? 'robject' %>
|
2
|
+
void <%="cumo_#{c_iter}_index_kernel_launch"%>(char *p1, size_t *idx1, dtype* z, uint64_t n);
|
3
|
+
void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, ssize_t s1, dtype* z, uint64_t n);
|
4
|
+
void <%="cumo_#{c_iter}_index_scalar_kernel_launch"%>(char *p1, size_t *idx1, dtype z, uint64_t n);
|
5
|
+
void <%="cumo_#{c_iter}_stride_scalar_kernel_launch"%>(char *p1, ssize_t s1, dtype z, uint64_t n);
|
6
|
+
|
7
|
+
static void CUDART_CB
|
8
|
+
<%=c_iter%>_callback(cudaStream_t stream, cudaError_t status, void *data)
|
9
|
+
{
|
10
|
+
xfree(data);
|
11
|
+
}
|
12
|
+
//<% end %>
|
13
|
+
|
14
|
+
static void
|
15
|
+
<%=c_iter%>(na_loop_t *const lp)
|
16
|
+
{
|
17
|
+
size_t i, n;
|
18
|
+
size_t i1, n1;
|
19
|
+
VALUE v1, *ptr;
|
20
|
+
char *p1;
|
21
|
+
size_t s1, *idx1;
|
22
|
+
VALUE x;
|
23
|
+
double y;
|
24
|
+
dtype z;
|
25
|
+
size_t len, c;
|
26
|
+
double beg, step;
|
27
|
+
|
28
|
+
INIT_COUNTER(lp, n);
|
29
|
+
INIT_PTR_IDX(lp, 0, p1, s1, idx1);
|
30
|
+
v1 = lp->args[1].value;
|
31
|
+
i = 0;
|
32
|
+
|
33
|
+
if (lp->args[1].ptr) {
|
34
|
+
if (v1 == Qtrue) {
|
35
|
+
iter_<%=type_name%>_store_<%=type_name%>(lp);
|
36
|
+
i = lp->args[1].shape[0];
|
37
|
+
if (idx1) {
|
38
|
+
idx1 += i;
|
39
|
+
} else {
|
40
|
+
p1 += s1 * i;
|
41
|
+
}
|
42
|
+
}
|
43
|
+
goto loop_end;
|
44
|
+
}
|
45
|
+
|
46
|
+
ptr = &v1;
|
47
|
+
|
48
|
+
switch(TYPE(v1)) {
|
49
|
+
case T_ARRAY:
|
50
|
+
n1 = RARRAY_LEN(v1);
|
51
|
+
ptr = RARRAY_PTR(v1);
|
52
|
+
break;
|
53
|
+
case T_NIL:
|
54
|
+
n1 = 0;
|
55
|
+
break;
|
56
|
+
default:
|
57
|
+
n1 = 1;
|
58
|
+
}
|
59
|
+
|
60
|
+
//<% if c_iter.include? 'robject' %>
|
61
|
+
{
|
62
|
+
SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("store_<%=name%>", "<%=type_name%>");
|
63
|
+
cumo_cuda_runtime_check_status(cudaDeviceSynchronize());
|
64
|
+
|
65
|
+
if (idx1) {
|
66
|
+
for (i=i1=0; i1<n1 && i<n; i++,i1++) {
|
67
|
+
x = ptr[i1];
|
68
|
+
if (rb_obj_is_kind_of(x, rb_cRange) || rb_obj_is_kind_of(x, na_cStep)) {
|
69
|
+
nary_step_sequence(x,&len,&beg,&step);
|
70
|
+
for (c=0; c<len && i<n; c++,i++) {
|
71
|
+
y = beg + step * c;
|
72
|
+
z = m_from_double(y);
|
73
|
+
SET_DATA_INDEX(p1, idx1, dtype, z);
|
74
|
+
}
|
75
|
+
}
|
76
|
+
else if (TYPE(x) != T_ARRAY) {
|
77
|
+
z = m_num_to_data(x);
|
78
|
+
SET_DATA_INDEX(p1, idx1, dtype, z);
|
79
|
+
}
|
80
|
+
}
|
81
|
+
} else {
|
82
|
+
for (i=i1=0; i1<n1 && i<n; i++,i1++) {
|
83
|
+
x = ptr[i1];
|
84
|
+
if (rb_obj_is_kind_of(x, rb_cRange) || rb_obj_is_kind_of(x, na_cStep)) {
|
85
|
+
nary_step_sequence(x,&len,&beg,&step);
|
86
|
+
for (c=0; c<len && i<n; c++,i++) {
|
87
|
+
y = beg + step * c;
|
88
|
+
z = m_from_double(y);
|
89
|
+
SET_DATA_STRIDE(p1, s1, dtype, z);
|
90
|
+
}
|
91
|
+
}
|
92
|
+
else if (TYPE(x) != T_ARRAY) {
|
93
|
+
z = m_num_to_data(x);
|
94
|
+
SET_DATA_STRIDE(p1, s1, dtype, z);
|
95
|
+
}
|
96
|
+
}
|
97
|
+
}
|
98
|
+
}
|
99
|
+
//<% else %>
|
100
|
+
{
|
101
|
+
// To copy ruby non-contiguous array values into cuda memory asynchronously, we do
|
102
|
+
// 1. copy to contiguous heap memory
|
103
|
+
// 2. copy to contiguous device memory
|
104
|
+
// 3. launch kernel to copy the contiguous device memory into strided (or indexed) narray cuda memory
|
105
|
+
// 4. free the contiguous device memory
|
106
|
+
// 5. run callback to free the heap memory after kernel finishes
|
107
|
+
//
|
108
|
+
// FYI: We may have to care of cuda stream callback serializes stream execution when we support stream.
|
109
|
+
// https://devtalk.nvidia.com/default/topic/822942/why-does-cudastreamaddcallback-serialize-kernel-execution-and-break-concurrency-/
|
110
|
+
dtype* host_z = ALLOC_N(dtype, n);
|
111
|
+
for (i=i1=0; i1<n1 && i<n; i1++) {
|
112
|
+
x = ptr[i1];
|
113
|
+
if (rb_obj_is_kind_of(x, rb_cRange) || rb_obj_is_kind_of(x, na_cStep)) {
|
114
|
+
nary_step_sequence(x,&len,&beg,&step);
|
115
|
+
for (c=0; c<len && i<n; c++,i++) {
|
116
|
+
y = beg + step * c;
|
117
|
+
host_z[i] = m_from_double(y);
|
118
|
+
}
|
119
|
+
}
|
120
|
+
else if (TYPE(x) != T_ARRAY) {
|
121
|
+
host_z[i] = m_num_to_data(x);
|
122
|
+
i++;
|
123
|
+
}
|
124
|
+
}
|
125
|
+
|
126
|
+
if (!idx1 && s1 == sizeof(dtype)) {
|
127
|
+
// optimization: Since p1 is contiguous, we skip creating another contiguous device memory
|
128
|
+
cudaError_t status = cudaMemcpyAsync(p1,host_z,sizeof(dtype)*i,cudaMemcpyHostToDevice,0);
|
129
|
+
if (status == 0) {
|
130
|
+
cumo_cuda_runtime_check_status(cudaStreamAddCallback(0,<%=c_iter%>_callback,host_z,0));
|
131
|
+
} else {
|
132
|
+
xfree(host_z);
|
133
|
+
}
|
134
|
+
cumo_cuda_runtime_check_status(status);
|
135
|
+
} else {
|
136
|
+
dtype* device_z = (dtype*)cumo_cuda_runtime_malloc(sizeof(dtype) * n);
|
137
|
+
cudaError_t status = cudaMemcpyAsync(device_z,host_z,sizeof(dtype)*i,cudaMemcpyHostToDevice,0);
|
138
|
+
if (status == 0) {
|
139
|
+
if (idx1) {
|
140
|
+
<%="cumo_#{c_iter}_index_kernel_launch"%>(p1,idx1,device_z,i);
|
141
|
+
} else {
|
142
|
+
<%="cumo_#{c_iter}_stride_kernel_launch"%>(p1,s1,device_z,i);
|
143
|
+
}
|
144
|
+
cumo_cuda_runtime_check_status(cudaStreamAddCallback(0,<%=c_iter%>_callback,host_z,0));
|
145
|
+
} else {
|
146
|
+
xfree(host_z);
|
147
|
+
}
|
148
|
+
cumo_cuda_runtime_free((void*)device_z);
|
149
|
+
cumo_cuda_runtime_check_status(status);
|
150
|
+
}
|
151
|
+
}
|
152
|
+
//<% end %>
|
153
|
+
|
154
|
+
loop_end:
|
155
|
+
z = m_zero;
|
156
|
+
//<% if c_iter.include? 'robject' %>
|
157
|
+
{
|
158
|
+
if (idx1) {
|
159
|
+
for (; i<n; i++) {
|
160
|
+
SET_DATA_INDEX(p1, idx1, dtype, z);
|
161
|
+
}
|
162
|
+
} else {
|
163
|
+
for (; i<n; i++) {
|
164
|
+
SET_DATA_STRIDE(p1, s1, dtype, z);
|
165
|
+
}
|
166
|
+
}
|
167
|
+
}
|
168
|
+
//<% else %>
|
169
|
+
{
|
170
|
+
if (idx1) {
|
171
|
+
<%="cumo_#{c_iter}_index_scalar_kernel_launch"%>(p1,idx1+i,z,n-i);
|
172
|
+
} else {
|
173
|
+
<%="cumo_#{c_iter}_stride_scalar_kernel_launch"%>(p1+s1*i,s1,z,n-i);
|
174
|
+
}
|
175
|
+
}
|
176
|
+
//<% end %>
|
177
|
+
}
|
178
|
+
|
179
|
+
static VALUE
|
180
|
+
<%=c_func%>(VALUE self, VALUE rary)
|
181
|
+
{
|
182
|
+
ndfunc_arg_in_t ain[2] = {{OVERWRITE,0},{rb_cArray,0}};
|
183
|
+
ndfunc_t ndf = {<%=c_iter%>, FULL_LOOP, 2, 0, ain, 0};
|
184
|
+
|
185
|
+
na_ndloop_store_rarray(&ndf, self, rary);
|
186
|
+
return self;
|
187
|
+
}
|
@@ -0,0 +1,58 @@
|
|
1
|
+
<% unless c_iter.include? 'robject' %>
|
2
|
+
__global__ void <%="cumo_#{c_iter}_index_kernel"%>(char *p1, size_t *idx1, dtype* z, uint64_t n)
|
3
|
+
{
|
4
|
+
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
|
5
|
+
*(dtype*)(p1 + idx1[i]) = z[i];
|
6
|
+
}
|
7
|
+
}
|
8
|
+
|
9
|
+
__global__ void <%="cumo_#{c_iter}_stride_kernel"%>(char *p1, ssize_t s1, dtype* z, uint64_t n)
|
10
|
+
{
|
11
|
+
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
|
12
|
+
*(dtype*)(p1 + (i * s1)) = z[i];
|
13
|
+
}
|
14
|
+
}
|
15
|
+
|
16
|
+
__global__ void <%="cumo_#{c_iter}_index_scalar_kernel"%>(char *p1, size_t *idx1, dtype z, uint64_t n)
|
17
|
+
{
|
18
|
+
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
|
19
|
+
*(dtype*)(p1 + idx1[i]) = z;
|
20
|
+
}
|
21
|
+
}
|
22
|
+
|
23
|
+
__global__ void <%="cumo_#{c_iter}_stride_scalar_kernel"%>(char *p1, ssize_t s1, dtype z, uint64_t n)
|
24
|
+
{
|
25
|
+
for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
|
26
|
+
*(dtype*)(p1 + (i * s1)) = z;
|
27
|
+
}
|
28
|
+
}
|
29
|
+
|
30
|
+
void <%="cumo_#{c_iter}_index_kernel_launch"%>(char *p1, size_t *idx1, dtype* z, uint64_t n)
|
31
|
+
{
|
32
|
+
size_t gridDim = get_gridDim(n);
|
33
|
+
size_t blockDim = get_blockDim(n);
|
34
|
+
<%="cumo_#{c_iter}_index_kernel"%><<<gridDim, blockDim>>>(p1,idx1,z,n);
|
35
|
+
}
|
36
|
+
|
37
|
+
void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, ssize_t s1, dtype* z, uint64_t n)
|
38
|
+
{
|
39
|
+
size_t gridDim = get_gridDim(n);
|
40
|
+
size_t blockDim = get_blockDim(n);
|
41
|
+
<%="cumo_#{c_iter}_stride_kernel"%><<<gridDim, blockDim>>>(p1,s1,z,n);
|
42
|
+
}
|
43
|
+
|
44
|
+
void <%="cumo_#{c_iter}_index_scalar_kernel_launch"%>(char *p1, size_t *idx1, dtype z, uint64_t n)
|
45
|
+
{
|
46
|
+
size_t gridDim = get_gridDim(n);
|
47
|
+
size_t blockDim = get_blockDim(n);
|
48
|
+
<%="cumo_#{c_iter}_index_scalar_kernel"%><<<gridDim, blockDim>>>(p1,idx1,z,n);
|
49
|
+
}
|
50
|
+
|
51
|
+
void <%="cumo_#{c_iter}_stride_scalar_kernel_launch"%>(char *p1, ssize_t s1, dtype z, uint64_t n)
|
52
|
+
{
|
53
|
+
size_t gridDim = get_gridDim(n);
|
54
|
+
size_t blockDim = get_blockDim(n);
|
55
|
+
<%="cumo_#{c_iter}_stride_scalar_kernel"%><<<gridDim, blockDim>>>(p1,s1,z,n);
|
56
|
+
}
|
57
|
+
|
58
|
+
<% end %>
|
@@ -0,0 +1,86 @@
|
|
1
|
+
//<% unless c_iter.include? 'robject' %>
|
2
|
+
void <%="cumo_#{c_iter}_index_index_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, size_t *idx1, size_t *idx2, uint64_t n);
|
3
|
+
void <%="cumo_#{c_iter}_stride_index_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, ssize_t s1, size_t *idx2, uint64_t n);
|
4
|
+
void <%="cumo_#{c_iter}_index_stride_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, size_t *idx1, ssize_t s2, uint64_t n);
|
5
|
+
void <%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(char *p1, size_t p2, BIT_DIGIT *a2, ssize_t s1, ssize_t s2, uint64_t n);
|
6
|
+
//<% end %>
|
7
|
+
|
8
|
+
static void
|
9
|
+
<%=c_iter%>(na_loop_t *const lp)
|
10
|
+
{
|
11
|
+
size_t i;
|
12
|
+
char *p1;
|
13
|
+
size_t p2;
|
14
|
+
ssize_t s1, s2;
|
15
|
+
size_t *idx1, *idx2;
|
16
|
+
BIT_DIGIT *a2;
|
17
|
+
|
18
|
+
INIT_COUNTER(lp, i);
|
19
|
+
INIT_PTR_IDX(lp, 0, p1, s1, idx1);
|
20
|
+
INIT_PTR_BIT_IDX(lp, 1, a2, p2, s2, idx2);
|
21
|
+
|
22
|
+
//<% if c_iter.include? 'robject' %>
|
23
|
+
{
|
24
|
+
BIT_DIGIT x;
|
25
|
+
dtype y;
|
26
|
+
SHOW_SYNCHRONIZE_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
27
|
+
SHOW_SYNCHRONIZE_FIXME_WARNING_ONCE("<%=name%>", "<%=type_name%>");
|
28
|
+
if (idx2) {
|
29
|
+
if (idx1) {
|
30
|
+
for (; i--;) {
|
31
|
+
LOAD_BIT(a2, p2+*idx2, x); idx2++;
|
32
|
+
y = m_from_sint(x);
|
33
|
+
SET_DATA_INDEX(p1,idx1,dtype,y);
|
34
|
+
}
|
35
|
+
} else {
|
36
|
+
for (; i--;) {
|
37
|
+
LOAD_BIT(a2, p2+*idx2, x); idx2++;
|
38
|
+
y = m_from_sint(x);
|
39
|
+
SET_DATA_STRIDE(p1,s1,dtype,y);
|
40
|
+
}
|
41
|
+
}
|
42
|
+
} else {
|
43
|
+
if (idx1) {
|
44
|
+
for (; i--;) {
|
45
|
+
LOAD_BIT(a2, p2, x); p2 += s2;
|
46
|
+
y = m_from_sint(x);
|
47
|
+
SET_DATA_INDEX(p1,idx1,dtype,y);
|
48
|
+
}
|
49
|
+
} else {
|
50
|
+
for (; i--;) {
|
51
|
+
LOAD_BIT(a2, p2, x); p2 += s2;
|
52
|
+
y = m_from_sint(x);
|
53
|
+
SET_DATA_STRIDE(p1,s1,dtype,y);
|
54
|
+
}
|
55
|
+
}
|
56
|
+
}
|
57
|
+
}
|
58
|
+
//<% else %>
|
59
|
+
{
|
60
|
+
if (idx2) {
|
61
|
+
if (idx1) {
|
62
|
+
<%="cumo_#{c_iter}_index_index_kernel_launch"%>(p1,p2,a2,idx1,idx2,i);
|
63
|
+
} else {
|
64
|
+
<%="cumo_#{c_iter}_stride_index_kernel_launch"%>(p1,p2,a2,s1,idx2,i);
|
65
|
+
}
|
66
|
+
} else {
|
67
|
+
if (idx1) {
|
68
|
+
<%="cumo_#{c_iter}_index_stride_kernel_launch"%>(p1,p2,a2,idx1,s2,i);
|
69
|
+
} else {
|
70
|
+
<%="cumo_#{c_iter}_stride_stride_kernel_launch"%>(p1,p2,a2,s1,s2,i);
|
71
|
+
}
|
72
|
+
}
|
73
|
+
}
|
74
|
+
//<% end %>
|
75
|
+
}
|
76
|
+
|
77
|
+
|
78
|
+
static VALUE
|
79
|
+
<%=c_func(:nodef)%>(VALUE self, VALUE obj)
|
80
|
+
{
|
81
|
+
ndfunc_arg_in_t ain[2] = {{OVERWRITE,0},{Qnil,0}};
|
82
|
+
ndfunc_t ndf = {<%=c_iter%>, FULL_LOOP, 2,0, ain,0};
|
83
|
+
|
84
|
+
na_ndloop(&ndf, 2, self, obj);
|
85
|
+
return self;
|
86
|
+
}
|