cumo 0.1.0 → 0.1.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/.gitignore +1 -0
- data/3rd_party/LICENSE.txt +60 -0
- data/3rd_party/mkmf-cu/lib/mkmf-cu/cli.rb +13 -1
- data/LICENSE.txt +1 -62
- data/README.md +33 -29
- data/bench/cumo_bench.rb +47 -25
- data/bench/numo_bench.rb +27 -25
- data/docs/src-tree.md +16 -0
- data/ext/cumo/cuda/cublas.c +69 -219
- data/ext/cumo/cuda/memory_pool_impl.hpp +1 -0
- data/ext/cumo/cuda/runtime.c +2 -14
- data/ext/cumo/cumo.c +16 -16
- data/ext/cumo/include/cumo.h +2 -2
- data/ext/cumo/include/cumo/cuda/cublas.h +6 -129
- data/ext/cumo/include/cumo/cuda/runtime.h +16 -0
- data/ext/cumo/include/cumo/indexer.h +46 -63
- data/ext/cumo/include/cumo/intern.h +58 -112
- data/ext/cumo/include/cumo/narray.h +214 -185
- data/ext/cumo/include/cumo/narray_kernel.h +66 -37
- data/ext/cumo/include/cumo/ndloop.h +42 -42
- data/ext/cumo/include/cumo/reduce_kernel.h +55 -71
- data/ext/cumo/include/cumo/template.h +56 -51
- data/ext/cumo/include/cumo/template_kernel.h +31 -31
- data/ext/cumo/include/cumo/types/bit.h +3 -3
- data/ext/cumo/include/cumo/types/bit_kernel.h +2 -2
- data/ext/cumo/include/cumo/types/complex.h +126 -126
- data/ext/cumo/include/cumo/types/complex_kernel.h +126 -126
- data/ext/cumo/include/cumo/types/complex_macro.h +28 -28
- data/ext/cumo/include/cumo/types/complex_macro_kernel.h +20 -20
- data/ext/cumo/include/cumo/types/dcomplex.h +5 -5
- data/ext/cumo/include/cumo/types/dcomplex_kernel.h +1 -1
- data/ext/cumo/include/cumo/types/int_macro.h +1 -1
- data/ext/cumo/include/cumo/types/int_macro_kernel.h +1 -1
- data/ext/cumo/include/cumo/types/robj_macro.h +30 -30
- data/ext/cumo/include/cumo/types/scomplex.h +5 -5
- data/ext/cumo/include/cumo/types/scomplex_kernel.h +1 -1
- data/ext/cumo/narray/array.c +143 -143
- data/ext/cumo/narray/data.c +184 -184
- data/ext/cumo/narray/gen/cogen.rb +5 -2
- data/ext/cumo/narray/gen/cogen_kernel.rb +5 -2
- data/ext/cumo/narray/gen/def/dcomplex.rb +1 -1
- data/ext/cumo/narray/gen/def/scomplex.rb +1 -1
- data/ext/cumo/narray/gen/erbln.rb +132 -0
- data/ext/cumo/narray/gen/erbpp2.rb +18 -13
- data/ext/cumo/narray/gen/narray_def.rb +3 -3
- data/ext/cumo/narray/gen/spec.rb +2 -2
- data/ext/cumo/narray/gen/tmpl/accum.c +15 -15
- data/ext/cumo/narray/gen/tmpl/accum_binary.c +22 -22
- data/ext/cumo/narray/gen/tmpl/accum_binary_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/accum_index.c +30 -30
- data/ext/cumo/narray/gen/tmpl/accum_index_kernel.cu +2 -2
- data/ext/cumo/narray/gen/tmpl/accum_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/alloc_func.c +14 -14
- data/ext/cumo/narray/gen/tmpl/allocate.c +11 -11
- data/ext/cumo/narray/gen/tmpl/aref.c +2 -2
- data/ext/cumo/narray/gen/tmpl/aref_cpu.c +4 -4
- data/ext/cumo/narray/gen/tmpl/aset.c +2 -2
- data/ext/cumo/narray/gen/tmpl/binary.c +28 -28
- data/ext/cumo/narray/gen/tmpl/binary2.c +18 -18
- data/ext/cumo/narray/gen/tmpl/binary2_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/binary_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/binary_s.c +13 -13
- data/ext/cumo/narray/gen/tmpl/binary_s_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/bincount.c +23 -23
- data/ext/cumo/narray/gen/tmpl/cast.c +7 -7
- data/ext/cumo/narray/gen/tmpl/cast_array.c +3 -3
- data/ext/cumo/narray/gen/tmpl/clip.c +38 -38
- data/ext/cumo/narray/gen/tmpl/complex_accum_kernel.cu +2 -2
- data/ext/cumo/narray/gen/tmpl/cond_binary.c +19 -19
- data/ext/cumo/narray/gen/tmpl/cond_binary_kernel.cu +7 -7
- data/ext/cumo/narray/gen/tmpl/cond_unary.c +15 -15
- data/ext/cumo/narray/gen/tmpl/cum.c +15 -15
- data/ext/cumo/narray/gen/tmpl/each.c +9 -9
- data/ext/cumo/narray/gen/tmpl/each_with_index.c +9 -9
- data/ext/cumo/narray/gen/tmpl/ewcomp.c +15 -15
- data/ext/cumo/narray/gen/tmpl/ewcomp_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/extract_cpu.c +5 -5
- data/ext/cumo/narray/gen/tmpl/extract_data.c +12 -12
- data/ext/cumo/narray/gen/tmpl/eye.c +9 -9
- data/ext/cumo/narray/gen/tmpl/eye_kernel.cu +3 -3
- data/ext/cumo/narray/gen/tmpl/fill.c +9 -9
- data/ext/cumo/narray/gen/tmpl/fill_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/float_accum_kernel.cu +1 -1
- data/ext/cumo/narray/gen/tmpl/format.c +11 -11
- data/ext/cumo/narray/gen/tmpl/format_to_a.c +8 -8
- data/ext/cumo/narray/gen/tmpl/frexp.c +13 -13
- data/ext/cumo/narray/gen/tmpl/gemm.c +252 -108
- data/ext/cumo/narray/gen/tmpl/inspect.c +1 -1
- data/ext/cumo/narray/gen/tmpl/lib.c +2 -2
- data/ext/cumo/narray/gen/tmpl/logseq.c +7 -7
- data/ext/cumo/narray/gen/tmpl/logseq_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/map_with_index.c +17 -17
- data/ext/cumo/narray/gen/tmpl/median.c +10 -10
- data/ext/cumo/narray/gen/tmpl/minmax.c +10 -10
- data/ext/cumo/narray/gen/tmpl/new_dim0.c +3 -3
- data/ext/cumo/narray/gen/tmpl/poly.c +6 -6
- data/ext/cumo/narray/gen/tmpl/pow.c +28 -28
- data/ext/cumo/narray/gen/tmpl/pow_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/rand.c +10 -10
- data/ext/cumo/narray/gen/tmpl/rand_norm.c +7 -7
- data/ext/cumo/narray/gen/tmpl/real_accum_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/seq.c +7 -7
- data/ext/cumo/narray/gen/tmpl/seq_kernel.cu +6 -6
- data/ext/cumo/narray/gen/tmpl/set2.c +20 -20
- data/ext/cumo/narray/gen/tmpl/sort.c +11 -11
- data/ext/cumo/narray/gen/tmpl/sort_index.c +18 -18
- data/ext/cumo/narray/gen/tmpl/store.c +6 -6
- data/ext/cumo/narray/gen/tmpl/store_array.c +19 -19
- data/ext/cumo/narray/gen/tmpl/store_array_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl/store_bit.c +23 -23
- data/ext/cumo/narray/gen/tmpl/store_bit_kernel.cu +28 -28
- data/ext/cumo/narray/gen/tmpl/store_from.c +16 -16
- data/ext/cumo/narray/gen/tmpl/store_from_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl/to_a.c +10 -10
- data/ext/cumo/narray/gen/tmpl/unary.c +25 -25
- data/ext/cumo/narray/gen/tmpl/unary2.c +17 -17
- data/ext/cumo/narray/gen/tmpl/unary_kernel.cu +15 -15
- data/ext/cumo/narray/gen/tmpl/unary_ret2.c +13 -13
- data/ext/cumo/narray/gen/tmpl/unary_s.c +17 -17
- data/ext/cumo/narray/gen/tmpl/unary_s_kernel.cu +12 -12
- data/ext/cumo/narray/gen/tmpl_bit/allocate.c +9 -9
- data/ext/cumo/narray/gen/tmpl_bit/aref.c +2 -2
- data/ext/cumo/narray/gen/tmpl_bit/aref_cpu.c +5 -5
- data/ext/cumo/narray/gen/tmpl_bit/aset.c +2 -2
- data/ext/cumo/narray/gen/tmpl_bit/binary.c +29 -29
- data/ext/cumo/narray/gen/tmpl_bit/bit_count.c +14 -14
- data/ext/cumo/narray/gen/tmpl_bit/bit_count_cpu.c +21 -21
- data/ext/cumo/narray/gen/tmpl_bit/bit_count_kernel.cu +28 -28
- data/ext/cumo/narray/gen/tmpl_bit/bit_reduce.c +29 -29
- data/ext/cumo/narray/gen/tmpl_bit/each.c +10 -10
- data/ext/cumo/narray/gen/tmpl_bit/each_with_index.c +10 -10
- data/ext/cumo/narray/gen/tmpl_bit/extract.c +8 -8
- data/ext/cumo/narray/gen/tmpl_bit/extract_cpu.c +8 -8
- data/ext/cumo/narray/gen/tmpl_bit/fill.c +17 -17
- data/ext/cumo/narray/gen/tmpl_bit/format.c +14 -14
- data/ext/cumo/narray/gen/tmpl_bit/format_to_a.c +11 -11
- data/ext/cumo/narray/gen/tmpl_bit/inspect.c +3 -3
- data/ext/cumo/narray/gen/tmpl_bit/mask.c +33 -33
- data/ext/cumo/narray/gen/tmpl_bit/store_array.c +19 -19
- data/ext/cumo/narray/gen/tmpl_bit/store_bit.c +22 -22
- data/ext/cumo/narray/gen/tmpl_bit/store_from.c +18 -18
- data/ext/cumo/narray/gen/tmpl_bit/to_a.c +12 -12
- data/ext/cumo/narray/gen/tmpl_bit/unary.c +24 -24
- data/ext/cumo/narray/gen/tmpl_bit/where.c +16 -16
- data/ext/cumo/narray/gen/tmpl_bit/where2.c +20 -20
- data/ext/cumo/narray/index.c +213 -213
- data/ext/cumo/narray/math.c +27 -27
- data/ext/cumo/narray/narray.c +484 -484
- data/ext/cumo/narray/ndloop.c +259 -258
- data/ext/cumo/narray/rand.c +3 -3
- data/ext/cumo/narray/step.c +70 -70
- data/ext/cumo/narray/struct.c +139 -139
- metadata +6 -7
- data/ext/cumo/include/cumo/intern_fwd.h +0 -38
- data/lib/erbpp.rb +0 -294
- data/lib/erbpp/line_number.rb +0 -137
- data/lib/erbpp/narray_def.rb +0 -381
data/ext/cumo/narray/data.c
CHANGED
@@ -2,11 +2,11 @@
|
|
2
2
|
#include "cumo/narray.h"
|
3
3
|
#include "cumo/template.h"
|
4
4
|
|
5
|
-
static VALUE
|
6
|
-
static ID
|
7
|
-
static ID
|
8
|
-
static ID
|
9
|
-
static ID
|
5
|
+
static VALUE cumo_sym_mulsum;
|
6
|
+
static ID cumo_id_mulsum;
|
7
|
+
static ID cumo_id_respond_to_p;
|
8
|
+
static ID cumo_id_store;
|
9
|
+
static ID cumo_id_swap_byte;
|
10
10
|
|
11
11
|
// ---------------------------------------------------------------------
|
12
12
|
|
@@ -16,9 +16,9 @@ static ID id_swap_byte;
|
|
16
16
|
ssize_t s1, s2; \
|
17
17
|
char *p1, *p2; \
|
18
18
|
size_t *idx1, *idx2; \
|
19
|
-
|
20
|
-
|
21
|
-
|
19
|
+
CUMO_INIT_COUNTER(lp, i); \
|
20
|
+
CUMO_INIT_PTR_IDX(lp, 0, p1, s1, idx1); \
|
21
|
+
CUMO_INIT_PTR_IDX(lp, 1, p2, s2, idx2); \
|
22
22
|
if (idx1) { \
|
23
23
|
if (idx2) { \
|
24
24
|
for (; i--;) { \
|
@@ -52,7 +52,7 @@ static ID id_swap_byte;
|
|
52
52
|
|
53
53
|
#define m_memcpy(src,dst) memcpy(dst,src,e)
|
54
54
|
static void
|
55
|
-
iter_copy_bytes(
|
55
|
+
iter_copy_bytes(cumo_na_loop_t *const lp)
|
56
56
|
{
|
57
57
|
size_t e;
|
58
58
|
e = lp->args[0].elmsz;
|
@@ -61,21 +61,21 @@ iter_copy_bytes(na_loop_t *const lp)
|
|
61
61
|
}
|
62
62
|
|
63
63
|
VALUE
|
64
|
-
|
64
|
+
cumo_na_copy(VALUE self)
|
65
65
|
{
|
66
66
|
VALUE v;
|
67
|
-
|
68
|
-
|
69
|
-
|
67
|
+
cumo_ndfunc_arg_in_t ain[1] = {{Qnil,0}};
|
68
|
+
cumo_ndfunc_arg_out_t aout[1] = {{INT2FIX(0),0}};
|
69
|
+
cumo_ndfunc_t ndf = { iter_copy_bytes, CUMO_FULL_LOOP, 1, 1, ain, aout };
|
70
70
|
|
71
|
-
v =
|
71
|
+
v = cumo_na_ndloop(&ndf, 1, self);
|
72
72
|
return v;
|
73
73
|
}
|
74
74
|
|
75
75
|
VALUE
|
76
|
-
|
76
|
+
cumo_na_store(VALUE self, VALUE src)
|
77
77
|
{
|
78
|
-
return rb_funcall(self,
|
78
|
+
return rb_funcall(self,cumo_id_store,1,src);
|
79
79
|
}
|
80
80
|
|
81
81
|
// ---------------------------------------------------------------------
|
@@ -91,7 +91,7 @@ na_store(VALUE self, VALUE src)
|
|
91
91
|
}
|
92
92
|
|
93
93
|
static void
|
94
|
-
iter_swap_byte(
|
94
|
+
iter_swap_byte(cumo_na_loop_t *const lp)
|
95
95
|
{
|
96
96
|
char *b1, *b2;
|
97
97
|
size_t e;
|
@@ -103,57 +103,57 @@ iter_swap_byte(na_loop_t *const lp)
|
|
103
103
|
}
|
104
104
|
|
105
105
|
static VALUE
|
106
|
-
|
106
|
+
cumo_na_swap_byte(VALUE self)
|
107
107
|
{
|
108
108
|
VALUE v;
|
109
|
-
|
110
|
-
|
111
|
-
|
109
|
+
cumo_ndfunc_arg_in_t ain[1] = {{Qnil,0}};
|
110
|
+
cumo_ndfunc_arg_out_t aout[1] = {{INT2FIX(0),0}};
|
111
|
+
cumo_ndfunc_t ndf = { iter_swap_byte, CUMO_FULL_LOOP|CUMO_NDF_ACCEPT_BYTESWAP,
|
112
112
|
1, 1, ain, aout };
|
113
113
|
|
114
|
-
v =
|
114
|
+
v = cumo_na_ndloop(&ndf, 1, self);
|
115
115
|
if (self!=v) {
|
116
|
-
|
116
|
+
cumo_na_copy_flags(self, v);
|
117
117
|
}
|
118
|
-
|
118
|
+
CUMO_REVERSE_ENDIAN(v);
|
119
119
|
return v;
|
120
120
|
}
|
121
121
|
|
122
122
|
|
123
123
|
static VALUE
|
124
|
-
|
124
|
+
cumo_na_to_network(VALUE self)
|
125
125
|
{
|
126
|
-
if (
|
126
|
+
if (CUMO_TEST_BIG_ENDIAN(self)) {
|
127
127
|
return self;
|
128
128
|
}
|
129
|
-
return rb_funcall(self,
|
129
|
+
return rb_funcall(self, cumo_id_swap_byte, 0);
|
130
130
|
}
|
131
131
|
|
132
132
|
static VALUE
|
133
|
-
|
133
|
+
cumo_na_to_vacs(VALUE self)
|
134
134
|
{
|
135
|
-
if (
|
135
|
+
if (CUMO_TEST_LITTLE_ENDIAN(self)) {
|
136
136
|
return self;
|
137
137
|
}
|
138
|
-
return rb_funcall(self,
|
138
|
+
return rb_funcall(self, cumo_id_swap_byte, 0);
|
139
139
|
}
|
140
140
|
|
141
141
|
static VALUE
|
142
|
-
|
142
|
+
cumo_na_to_host(VALUE self)
|
143
143
|
{
|
144
|
-
if (
|
144
|
+
if (CUMO_TEST_HOST_ORDER(self)) {
|
145
145
|
return self;
|
146
146
|
}
|
147
|
-
return rb_funcall(self,
|
147
|
+
return rb_funcall(self, cumo_id_swap_byte, 0);
|
148
148
|
}
|
149
149
|
|
150
150
|
static VALUE
|
151
|
-
|
151
|
+
cumo_na_to_swapped(VALUE self)
|
152
152
|
{
|
153
|
-
if (
|
153
|
+
if (CUMO_TEST_BYTE_SWAPPED(self)) {
|
154
154
|
return self;
|
155
155
|
}
|
156
|
-
return rb_funcall(self,
|
156
|
+
return rb_funcall(self, cumo_id_swap_byte, 0);
|
157
157
|
}
|
158
158
|
|
159
159
|
|
@@ -163,7 +163,7 @@ static inline int
|
|
163
163
|
check_axis(int axis, int ndim)
|
164
164
|
{
|
165
165
|
if (axis < -ndim || axis >= ndim) {
|
166
|
-
rb_raise(
|
166
|
+
rb_raise(cumo_na_eDimensionError,"invalid axis (%d for %d-dimension)",
|
167
167
|
axis, ndim);
|
168
168
|
}
|
169
169
|
if (axis < 0) {
|
@@ -202,16 +202,16 @@ check_axis(int axis, int ndim)
|
|
202
202
|
# [3, 7]]]
|
203
203
|
*/
|
204
204
|
static VALUE
|
205
|
-
|
205
|
+
cumo_na_swapaxes(VALUE self, VALUE a1, VALUE a2)
|
206
206
|
{
|
207
207
|
int i, j, ndim;
|
208
208
|
size_t tmp_shape;
|
209
|
-
|
210
|
-
|
209
|
+
cumo_stridx_t tmp_stridx;
|
210
|
+
cumo_narray_view_t *na;
|
211
211
|
volatile VALUE view;
|
212
212
|
|
213
|
-
view =
|
214
|
-
|
213
|
+
view = cumo_na_make_view(self);
|
214
|
+
CumoGetNArrayView(view,na);
|
215
215
|
|
216
216
|
ndim = na->base.ndim;
|
217
217
|
i = check_axis(NUM2INT(a1), ndim);
|
@@ -228,20 +228,20 @@ na_swapaxes(VALUE self, VALUE a1, VALUE a2)
|
|
228
228
|
}
|
229
229
|
|
230
230
|
static VALUE
|
231
|
-
|
231
|
+
cumo_na_transpose_map(VALUE self, int *map)
|
232
232
|
{
|
233
233
|
int i, ndim;
|
234
234
|
size_t *shape;
|
235
|
-
|
236
|
-
|
235
|
+
cumo_stridx_t *stridx;
|
236
|
+
cumo_narray_view_t *na;
|
237
237
|
volatile VALUE view;
|
238
238
|
|
239
|
-
view =
|
240
|
-
|
239
|
+
view = cumo_na_make_view(self);
|
240
|
+
CumoGetNArrayView(view,na);
|
241
241
|
|
242
242
|
ndim = na->base.ndim;
|
243
243
|
shape = ALLOCA_N(size_t,ndim);
|
244
|
-
stridx = ALLOCA_N(
|
244
|
+
stridx = ALLOCA_N(cumo_stridx_t,ndim);
|
245
245
|
|
246
246
|
for (i=0; i<ndim; i++) {
|
247
247
|
shape[i] = na->base.shape[i];
|
@@ -258,27 +258,27 @@ na_transpose_map(VALUE self, int *map)
|
|
258
258
|
#define SWAP(a,b,tmp) {tmp=a;a=b;b=tmp;}
|
259
259
|
|
260
260
|
static VALUE
|
261
|
-
|
261
|
+
cumo_na_transpose(int argc, VALUE *argv, VALUE self)
|
262
262
|
{
|
263
263
|
int ndim, *map, *permute;
|
264
264
|
int i, d;
|
265
265
|
bool is_positive, is_negative;
|
266
|
-
|
266
|
+
cumo_narray_t *na1;
|
267
267
|
|
268
|
-
|
268
|
+
CumoGetNArray(self,na1);
|
269
269
|
ndim = na1->ndim;
|
270
270
|
if (ndim < 2) {
|
271
271
|
if (argc > 0) {
|
272
272
|
rb_raise(rb_eArgError, "unnecessary argument for 1-d array");
|
273
273
|
}
|
274
|
-
return
|
274
|
+
return cumo_na_make_view(self);
|
275
275
|
}
|
276
276
|
map = ALLOCA_N(int,ndim);
|
277
277
|
if (argc == 0) {
|
278
278
|
for (i=0; i < ndim; i++) {
|
279
279
|
map[i] = ndim-1-i;
|
280
280
|
}
|
281
|
-
return
|
281
|
+
return cumo_na_transpose_map(self,map);
|
282
282
|
}
|
283
283
|
// with argument
|
284
284
|
if (argc > ndim) {
|
@@ -325,23 +325,23 @@ na_transpose(int argc, VALUE *argv, VALUE self)
|
|
325
325
|
is_negative = 1;
|
326
326
|
}
|
327
327
|
}
|
328
|
-
return
|
328
|
+
return cumo_na_transpose_map(self,map);
|
329
329
|
}
|
330
330
|
|
331
331
|
//----------------------------------------------------------------------
|
332
332
|
|
333
333
|
static void
|
334
|
-
|
334
|
+
cumo_na_check_reshape(int argc, VALUE *argv, VALUE self, size_t *shape)
|
335
335
|
{
|
336
336
|
int i, unfixed=-1;
|
337
337
|
size_t total=1;
|
338
|
-
|
338
|
+
cumo_narray_t *na;
|
339
339
|
|
340
340
|
if (argc == 0) {
|
341
341
|
rb_raise(rb_eArgError, "No argrument");
|
342
342
|
}
|
343
|
-
|
344
|
-
if (
|
343
|
+
CumoGetNArray(self,na);
|
344
|
+
if (CUMO_NA_SIZE(na) == 0) {
|
345
345
|
rb_raise(rb_eRuntimeError, "cannot reshape empty array");
|
346
346
|
}
|
347
347
|
|
@@ -364,12 +364,12 @@ na_check_reshape(int argc, VALUE *argv, VALUE self, size_t *shape)
|
|
364
364
|
}
|
365
365
|
|
366
366
|
if (unfixed>=0) {
|
367
|
-
if (
|
367
|
+
if (CUMO_NA_SIZE(na) % total != 0) {
|
368
368
|
rb_raise(rb_eArgError, "Total size size must be divisor");
|
369
369
|
}
|
370
|
-
shape[unfixed] =
|
370
|
+
shape[unfixed] = CUMO_NA_SIZE(na) / total;
|
371
371
|
}
|
372
|
-
else if (total !=
|
372
|
+
else if (total != CUMO_NA_SIZE(na)) {
|
373
373
|
rb_raise(rb_eArgError, "Total size must be same");
|
374
374
|
}
|
375
375
|
}
|
@@ -384,19 +384,19 @@ na_check_reshape(int argc, VALUE *argv, VALUE self, size_t *shape)
|
|
384
384
|
@example
|
385
385
|
*/
|
386
386
|
static VALUE
|
387
|
-
|
387
|
+
cumo_na_reshape_bang(int argc, VALUE *argv, VALUE self)
|
388
388
|
{
|
389
389
|
size_t *shape;
|
390
|
-
|
390
|
+
cumo_narray_t *na;
|
391
391
|
|
392
|
-
if (
|
392
|
+
if (cumo_na_check_contiguous(self)==Qfalse) {
|
393
393
|
rb_raise(rb_eStandardError, "cannot change shape of non-contiguous NArray");
|
394
394
|
}
|
395
395
|
shape = ALLOCA_N(size_t, argc);
|
396
|
-
|
396
|
+
cumo_na_check_reshape(argc, argv, self, shape);
|
397
397
|
|
398
|
-
|
399
|
-
|
398
|
+
CumoGetNArray(self, na);
|
399
|
+
cumo_na_setup_shape(na, argc, shape);
|
400
400
|
return self;
|
401
401
|
}
|
402
402
|
|
@@ -410,44 +410,44 @@ na_reshape_bang(int argc, VALUE *argv, VALUE self)
|
|
410
410
|
@example
|
411
411
|
*/
|
412
412
|
static VALUE
|
413
|
-
|
413
|
+
cumo_na_reshape(int argc, VALUE *argv, VALUE self)
|
414
414
|
{
|
415
415
|
size_t *shape;
|
416
|
-
|
416
|
+
cumo_narray_t *na;
|
417
417
|
VALUE copy;
|
418
418
|
|
419
419
|
shape = ALLOCA_N(size_t, argc);
|
420
|
-
|
420
|
+
cumo_na_check_reshape(argc, argv, self, shape);
|
421
421
|
|
422
422
|
copy = rb_funcall(self, rb_intern("dup"), 0);
|
423
|
-
|
424
|
-
|
423
|
+
CumoGetNArray(copy, na);
|
424
|
+
cumo_na_setup_shape(na, argc, shape);
|
425
425
|
return copy;
|
426
426
|
}
|
427
427
|
|
428
428
|
//----------------------------------------------------------------------
|
429
429
|
|
430
430
|
VALUE
|
431
|
-
|
431
|
+
cumo_na_flatten_dim(VALUE self, int sd)
|
432
432
|
{
|
433
433
|
int i, nd, fd;
|
434
434
|
size_t j;
|
435
435
|
size_t *c, *pos, *idx1, *idx2;
|
436
436
|
size_t stride;
|
437
437
|
size_t *shape, size;
|
438
|
-
|
439
|
-
|
440
|
-
|
438
|
+
cumo_stridx_t sdx;
|
439
|
+
cumo_narray_t *na;
|
440
|
+
cumo_narray_view_t *na1, *na2;
|
441
441
|
volatile VALUE view;
|
442
442
|
|
443
|
-
|
443
|
+
CumoGetNArray(self,na);
|
444
444
|
nd = na->ndim;
|
445
445
|
|
446
446
|
if (nd==0) {
|
447
|
-
return
|
447
|
+
return cumo_na_make_view(self);
|
448
448
|
}
|
449
449
|
if (sd<0 || sd>=nd) {
|
450
|
-
rb_bug("
|
450
|
+
rb_bug("cumo_na_flaten_dim: start_dim (%d) out of range",sd);
|
451
451
|
}
|
452
452
|
|
453
453
|
// new shape
|
@@ -462,51 +462,51 @@ na_flatten_dim(VALUE self, int sd)
|
|
462
462
|
shape[sd] = size;
|
463
463
|
|
464
464
|
// new object
|
465
|
-
view =
|
466
|
-
|
467
|
-
|
465
|
+
view = cumo_na_s_allocate_view(rb_obj_class(self));
|
466
|
+
cumo_na_copy_flags(self, view);
|
467
|
+
CumoGetNArrayView(view, na2);
|
468
468
|
|
469
469
|
// new stride
|
470
|
-
|
471
|
-
na2->stridx = ALLOC_N(
|
470
|
+
cumo_na_setup_shape((cumo_narray_t*)na2, sd+1, shape);
|
471
|
+
na2->stridx = ALLOC_N(cumo_stridx_t,sd+1);
|
472
472
|
|
473
473
|
switch(na->type) {
|
474
|
-
case
|
475
|
-
case
|
476
|
-
stride =
|
474
|
+
case CUMO_NARRAY_DATA_T:
|
475
|
+
case CUMO_NARRAY_FILEMAP_T:
|
476
|
+
stride = cumo_na_element_stride(self);
|
477
477
|
for (i=sd+1; i--; ) {
|
478
478
|
//printf("data: i=%d shpae[i]=%ld stride=%ld\n",i,shape[i],stride);
|
479
|
-
|
479
|
+
CUMO_SDX_SET_STRIDE(na2->stridx[i],stride);
|
480
480
|
stride *= shape[i];
|
481
481
|
}
|
482
482
|
na2->offset = 0;
|
483
483
|
na2->data = self;
|
484
484
|
break;
|
485
|
-
case
|
486
|
-
|
485
|
+
case CUMO_NARRAY_VIEW_T:
|
486
|
+
CumoGetNArrayView(self, na1);
|
487
487
|
na2->data = na1->data;
|
488
488
|
na2->offset = na1->offset;
|
489
489
|
for (i=0; i<sd; i++) {
|
490
|
-
if (
|
491
|
-
idx1 =
|
490
|
+
if (CUMO_SDX_IS_INDEX(na1->stridx[i])) {
|
491
|
+
idx1 = CUMO_SDX_GET_INDEX(na1->stridx[i]);
|
492
492
|
idx2 = ALLOC_N(size_t, shape[i]);
|
493
493
|
for (j=0; j<shape[i]; j++) {
|
494
494
|
idx2[j] = idx1[j];
|
495
495
|
}
|
496
|
-
|
496
|
+
CUMO_SDX_SET_INDEX(na2->stridx[i],idx2);
|
497
497
|
} else {
|
498
498
|
na2->stridx[i] = na1->stridx[i];
|
499
|
-
//printf("view: i=%d stridx=%d\n",i,
|
499
|
+
//printf("view: i=%d stridx=%d\n",i,CUMO_SDX_GET_STRIDE(sdx));
|
500
500
|
}
|
501
501
|
}
|
502
502
|
// flat dimenion == last dimension
|
503
|
-
if (RTEST(
|
503
|
+
if (RTEST(cumo_na_check_ladder(self,sd))) {
|
504
504
|
//if (0) {
|
505
505
|
na2->stridx[sd] = na1->stridx[nd-1];
|
506
506
|
} else {
|
507
507
|
// set index
|
508
508
|
idx2 = ALLOC_N(size_t, shape[sd]);
|
509
|
-
|
509
|
+
CUMO_SDX_SET_INDEX(na2->stridx[sd],idx2);
|
510
510
|
// init for md-loop
|
511
511
|
fd = nd-sd;
|
512
512
|
c = ALLOC_N(size_t, fd);
|
@@ -517,10 +517,10 @@ na_flatten_dim(VALUE self, int sd)
|
|
517
517
|
for (i=j=0;;) {
|
518
518
|
for (; i<fd; i++) {
|
519
519
|
sdx = na1->stridx[i+sd];
|
520
|
-
if (
|
521
|
-
pos[i+1] = pos[i] +
|
520
|
+
if (CUMO_SDX_IS_INDEX(sdx)) {
|
521
|
+
pos[i+1] = pos[i] + CUMO_SDX_GET_INDEX(sdx)[c[i]];
|
522
522
|
} else {
|
523
|
-
pos[i+1] = pos[i] +
|
523
|
+
pos[i+1] = pos[i] + CUMO_SDX_GET_STRIDE(sdx)*c[i];
|
524
524
|
}
|
525
525
|
}
|
526
526
|
idx2[j++] = pos[i];
|
@@ -542,9 +542,9 @@ na_flatten_dim(VALUE self, int sd)
|
|
542
542
|
}
|
543
543
|
|
544
544
|
VALUE
|
545
|
-
|
545
|
+
cumo_na_flatten(VALUE self)
|
546
546
|
{
|
547
|
-
return
|
547
|
+
return cumo_na_flatten_dim(self,0);
|
548
548
|
}
|
549
549
|
|
550
550
|
//----------------------------------------------------------------------
|
@@ -587,7 +587,7 @@ na_flatten(VALUE self)
|
|
587
587
|
[15, 16, 17, 18, 4]]
|
588
588
|
*/
|
589
589
|
static VALUE
|
590
|
-
|
590
|
+
cumo_na_diagonal(int argc, VALUE *argv, VALUE self)
|
591
591
|
{
|
592
592
|
int i, k, nd;
|
593
593
|
size_t j;
|
@@ -595,8 +595,8 @@ na_diagonal(int argc, VALUE *argv, VALUE self)
|
|
595
595
|
size_t *shape;
|
596
596
|
size_t diag_size;
|
597
597
|
ssize_t stride, stride0, stride1;
|
598
|
-
|
599
|
-
|
598
|
+
cumo_narray_t *na;
|
599
|
+
cumo_narray_view_t *na1, *na2;
|
600
600
|
VALUE view;
|
601
601
|
VALUE vofs=0, vaxes=0;
|
602
602
|
ssize_t kofs;
|
@@ -631,10 +631,10 @@ na_diagonal(int argc, VALUE *argv, VALUE self)
|
|
631
631
|
kofs = 0;
|
632
632
|
}
|
633
633
|
|
634
|
-
|
634
|
+
CumoGetNArray(self,na);
|
635
635
|
nd = na->ndim;
|
636
636
|
if (nd < 2) {
|
637
|
-
rb_raise(
|
637
|
+
rb_raise(cumo_na_eDimensionError,"less than 2-d array");
|
638
638
|
}
|
639
639
|
|
640
640
|
if (vaxes) {
|
@@ -685,20 +685,20 @@ na_diagonal(int argc, VALUE *argv, VALUE self)
|
|
685
685
|
shape[k] = diag_size;
|
686
686
|
|
687
687
|
// new object
|
688
|
-
view =
|
689
|
-
|
690
|
-
|
688
|
+
view = cumo_na_s_allocate_view(rb_obj_class(self));
|
689
|
+
cumo_na_copy_flags(self, view);
|
690
|
+
CumoGetNArrayView(view, na2);
|
691
691
|
|
692
692
|
// new stride
|
693
|
-
|
694
|
-
na2->stridx = ALLOC_N(
|
693
|
+
cumo_na_setup_shape((cumo_narray_t*)na2, nd-1, shape);
|
694
|
+
na2->stridx = ALLOC_N(cumo_stridx_t, nd-1);
|
695
695
|
|
696
696
|
switch(na->type) {
|
697
|
-
case
|
698
|
-
case
|
697
|
+
case CUMO_NARRAY_DATA_T:
|
698
|
+
case CUMO_NARRAY_FILEMAP_T:
|
699
699
|
na2->offset = 0;
|
700
700
|
na2->data = self;
|
701
|
-
stride = stride0 = stride1 =
|
701
|
+
stride = stride0 = stride1 = cumo_na_element_stride(self);
|
702
702
|
for (i=nd,k=nd-2; i--; ) {
|
703
703
|
if (i==ax[1]) {
|
704
704
|
stride1 = stride;
|
@@ -711,60 +711,60 @@ na_diagonal(int argc, VALUE *argv, VALUE self)
|
|
711
711
|
na2->offset = (-kofs)*stride;
|
712
712
|
}
|
713
713
|
} else {
|
714
|
-
|
714
|
+
CUMO_SDX_SET_STRIDE(na2->stridx[--k],stride);
|
715
715
|
}
|
716
716
|
stride *= na->shape[i];
|
717
717
|
}
|
718
|
-
|
718
|
+
CUMO_SDX_SET_STRIDE(na2->stridx[nd-2],stride0+stride1);
|
719
719
|
break;
|
720
720
|
|
721
|
-
case
|
722
|
-
|
721
|
+
case CUMO_NARRAY_VIEW_T:
|
722
|
+
CumoGetNArrayView(self, na1);
|
723
723
|
na2->data = na1->data;
|
724
724
|
na2->offset = na1->offset;
|
725
725
|
for (i=k=0; i<nd; i++) {
|
726
726
|
if (i != ax[0] && i != ax[1]) {
|
727
|
-
if (
|
728
|
-
idx0 =
|
727
|
+
if (CUMO_SDX_IS_INDEX(na1->stridx[i])) {
|
728
|
+
idx0 = CUMO_SDX_GET_INDEX(na1->stridx[i]);
|
729
729
|
idx1 = ALLOC_N(size_t, na->shape[i]);
|
730
730
|
for (j=0; j<na->shape[i]; j++) {
|
731
731
|
idx1[j] = idx0[j];
|
732
732
|
}
|
733
|
-
|
733
|
+
CUMO_SDX_SET_INDEX(na2->stridx[k],idx1);
|
734
734
|
} else {
|
735
735
|
na2->stridx[k] = na1->stridx[i];
|
736
736
|
}
|
737
737
|
k++;
|
738
738
|
}
|
739
739
|
}
|
740
|
-
if (
|
741
|
-
idx0 =
|
740
|
+
if (CUMO_SDX_IS_INDEX(na1->stridx[ax[0]])) {
|
741
|
+
idx0 = CUMO_SDX_GET_INDEX(na1->stridx[ax[0]]);
|
742
742
|
diag_idx = ALLOC_N(size_t, diag_size);
|
743
|
-
if (
|
744
|
-
idx1 =
|
743
|
+
if (CUMO_SDX_IS_INDEX(na1->stridx[ax[1]])) {
|
744
|
+
idx1 = CUMO_SDX_GET_INDEX(na1->stridx[ax[1]]);
|
745
745
|
for (j=0; j<diag_size; j++) {
|
746
746
|
diag_idx[j] = idx0[j+k0] + idx1[j+k1];
|
747
747
|
}
|
748
748
|
} else {
|
749
|
-
stride1 =
|
749
|
+
stride1 = CUMO_SDX_GET_STRIDE(na1->stridx[ax[1]]);
|
750
750
|
for (j=0; j<diag_size; j++) {
|
751
751
|
diag_idx[j] = idx0[j+k0] + stride1*(j+k1);
|
752
752
|
}
|
753
753
|
}
|
754
|
-
|
754
|
+
CUMO_SDX_SET_INDEX(na2->stridx[nd-2],diag_idx);
|
755
755
|
} else {
|
756
|
-
stride0 =
|
757
|
-
if (
|
758
|
-
idx1 =
|
756
|
+
stride0 = CUMO_SDX_GET_STRIDE(na1->stridx[ax[0]]);
|
757
|
+
if (CUMO_SDX_IS_INDEX(na1->stridx[ax[1]])) {
|
758
|
+
idx1 = CUMO_SDX_GET_INDEX(na1->stridx[ax[1]]);
|
759
759
|
diag_idx = ALLOC_N(size_t, diag_size);
|
760
760
|
for (j=0; j<diag_size; j++) {
|
761
761
|
diag_idx[j] = stride0*(j+k0) + idx1[j+k1];
|
762
762
|
}
|
763
|
-
|
763
|
+
CUMO_SDX_SET_INDEX(na2->stridx[nd-2],diag_idx);
|
764
764
|
} else {
|
765
|
-
stride1 =
|
765
|
+
stride1 = CUMO_SDX_GET_STRIDE(na1->stridx[ax[1]]);
|
766
766
|
na2->offset += stride0*k0 + stride1*k1;
|
767
|
-
|
767
|
+
CUMO_SDX_SET_STRIDE(na2->stridx[nd-2],stride0+stride1);
|
768
768
|
}
|
769
769
|
}
|
770
770
|
break;
|
@@ -782,26 +782,26 @@ na_diagonal(int argc, VALUE *argv, VALUE self)
|
|
782
782
|
#define SWAP(a,b,t) {t=a;a=b;b=t;}
|
783
783
|
|
784
784
|
static VALUE
|
785
|
-
|
785
|
+
cumo_na_new_dimension_for_dot(VALUE self, int pos, int len, bool transpose)
|
786
786
|
{
|
787
787
|
int i, k, l, nd;
|
788
788
|
size_t j;
|
789
789
|
size_t *idx1, *idx2;
|
790
790
|
size_t *shape;
|
791
791
|
ssize_t stride;
|
792
|
-
|
793
|
-
|
792
|
+
cumo_narray_t *na;
|
793
|
+
cumo_narray_view_t *na1, *na2;
|
794
794
|
size_t shape_n;
|
795
|
-
|
795
|
+
cumo_stridx_t stridx_n;
|
796
796
|
volatile VALUE view;
|
797
797
|
|
798
|
-
|
798
|
+
CumoGetNArray(self,na);
|
799
799
|
nd = na->ndim;
|
800
800
|
|
801
|
-
view =
|
801
|
+
view = cumo_na_s_allocate_view(rb_obj_class(self));
|
802
802
|
|
803
|
-
|
804
|
-
|
803
|
+
cumo_na_copy_flags(self, view);
|
804
|
+
CumoGetNArrayView(view, na2);
|
805
805
|
|
806
806
|
// new dimension
|
807
807
|
if (pos < 0) pos += nd;
|
@@ -810,11 +810,11 @@ na_new_dimension_for_dot(VALUE self, int pos, int len, bool transpose)
|
|
810
810
|
}
|
811
811
|
nd += len;
|
812
812
|
shape = ALLOCA_N(size_t,nd);
|
813
|
-
na2->stridx = ALLOC_N(
|
813
|
+
na2->stridx = ALLOC_N(cumo_stridx_t,nd);
|
814
814
|
|
815
815
|
switch(na->type) {
|
816
|
-
case
|
817
|
-
case
|
816
|
+
case CUMO_NARRAY_DATA_T:
|
817
|
+
case CUMO_NARRAY_FILEMAP_T:
|
818
818
|
i = k = 0;
|
819
819
|
while (i < nd) {
|
820
820
|
if (i == pos && len > 0) {
|
@@ -825,46 +825,46 @@ na_new_dimension_for_dot(VALUE self, int pos, int len, bool transpose)
|
|
825
825
|
shape[i++] = na->shape[k++];
|
826
826
|
}
|
827
827
|
}
|
828
|
-
|
829
|
-
stride =
|
828
|
+
cumo_na_setup_shape((cumo_narray_t*)na2, nd, shape);
|
829
|
+
stride = cumo_na_element_stride(self);
|
830
830
|
for (i=nd; i--;) {
|
831
|
-
|
831
|
+
CUMO_SDX_SET_STRIDE(na2->stridx[i], stride);
|
832
832
|
stride *= shape[i];
|
833
833
|
}
|
834
834
|
na2->offset = 0;
|
835
835
|
na2->data = self;
|
836
836
|
break;
|
837
|
-
case
|
838
|
-
|
837
|
+
case CUMO_NARRAY_VIEW_T:
|
838
|
+
CumoGetNArrayView(self, na1);
|
839
839
|
i = k = 0;
|
840
840
|
while (i < nd) {
|
841
841
|
if (i == pos && len > 0) {
|
842
|
-
if (
|
843
|
-
stride =
|
842
|
+
if (CUMO_SDX_IS_INDEX(na1->stridx[k])) {
|
843
|
+
stride = CUMO_SDX_GET_INDEX(na1->stridx[k])[0];
|
844
844
|
} else {
|
845
|
-
stride =
|
845
|
+
stride = CUMO_SDX_GET_STRIDE(na1->stridx[k]);
|
846
846
|
}
|
847
847
|
for (l=0; l<len; l++) {
|
848
848
|
shape[i] = 1;
|
849
|
-
|
849
|
+
CUMO_SDX_SET_STRIDE(na2->stridx[i], stride);
|
850
850
|
i++;
|
851
851
|
}
|
852
852
|
} else {
|
853
853
|
shape[i] = na1->base.shape[k];
|
854
|
-
if (
|
855
|
-
idx1 =
|
854
|
+
if (CUMO_SDX_IS_INDEX(na1->stridx[k])) {
|
855
|
+
idx1 = CUMO_SDX_GET_INDEX(na1->stridx[k]);
|
856
856
|
idx2 = ALLOC_N(size_t,na1->base.shape[k]);
|
857
857
|
for (j=0; j<na1->base.shape[k]; j++) {
|
858
858
|
idx2[j] = idx1[j];
|
859
859
|
}
|
860
|
-
|
860
|
+
CUMO_SDX_SET_INDEX(na2->stridx[i], idx2);
|
861
861
|
} else {
|
862
862
|
na2->stridx[i] = na1->stridx[k];
|
863
863
|
}
|
864
864
|
i++; k++;
|
865
865
|
}
|
866
866
|
}
|
867
|
-
|
867
|
+
cumo_na_setup_shape((cumo_narray_t*)na2, nd, shape);
|
868
868
|
na2->offset = na1->offset;
|
869
869
|
na2->data = na1->data;
|
870
870
|
break;
|
@@ -894,48 +894,48 @@ cumo_na_dot(VALUE self, VALUE other)
|
|
894
894
|
{
|
895
895
|
VALUE test;
|
896
896
|
volatile VALUE a1=self, a2=other;
|
897
|
-
|
897
|
+
cumo_narray_t *na1, *na2;
|
898
898
|
|
899
|
-
test = rb_funcall(a1,
|
899
|
+
test = rb_funcall(a1, cumo_id_respond_to_p, 1, cumo_sym_mulsum);
|
900
900
|
if (!RTEST(test)) {
|
901
901
|
rb_raise(rb_eNoMethodError,"requires mulsum method for dot method");
|
902
902
|
}
|
903
|
-
|
904
|
-
|
903
|
+
CumoGetNArray(a1,na1);
|
904
|
+
CumoGetNArray(a2,na2);
|
905
905
|
if (na1->ndim==0 || na2->ndim==0) {
|
906
|
-
rb_raise(
|
906
|
+
rb_raise(cumo_na_eDimensionError,"zero dimensional narray");
|
907
907
|
}
|
908
908
|
if (na2->ndim > 1) {
|
909
909
|
if (na1->shape[na1->ndim-1] != na2->shape[na2->ndim-2]) {
|
910
|
-
rb_raise(
|
910
|
+
rb_raise(cumo_na_eShapeError,"shape mismatch: self.shape[-1](=%"SZF"d) != other.shape[-2](=%"SZF"d)",
|
911
911
|
na1->shape[na1->ndim-1], na2->shape[na2->ndim-2]);
|
912
912
|
}
|
913
913
|
// insert new axis [ ..., last-1-dim, newaxis*other.ndim, last-dim ]
|
914
|
-
a1 =
|
914
|
+
a1 = cumo_na_new_dimension_for_dot(a1, na1->ndim-1, na2->ndim-1, 0);
|
915
915
|
// insert & transpose [ newaxis*self.ndim, ..., last-dim, last-1-dim ]
|
916
|
-
a2 =
|
916
|
+
a2 = cumo_na_new_dimension_for_dot(a2, 0, na1->ndim-1, 1);
|
917
917
|
}
|
918
|
-
return rb_funcall(a1,
|
918
|
+
return rb_funcall(a1,cumo_id_mulsum,2,a2,INT2FIX(-1));
|
919
919
|
}
|
920
920
|
#endif
|
921
921
|
|
922
922
|
void
|
923
|
-
|
923
|
+
Init_cumo_na_data()
|
924
924
|
{
|
925
|
-
rb_define_method(cNArray, "copy",
|
925
|
+
rb_define_method(cNArray, "copy", cumo_na_copy, 0); // deprecated
|
926
926
|
|
927
|
-
rb_define_method(cNArray, "flatten",
|
928
|
-
rb_define_method(cNArray, "swapaxes",
|
929
|
-
rb_define_method(cNArray, "transpose",
|
927
|
+
rb_define_method(cNArray, "flatten", cumo_na_flatten, 0);
|
928
|
+
rb_define_method(cNArray, "swapaxes", cumo_na_swapaxes, 2);
|
929
|
+
rb_define_method(cNArray, "transpose", cumo_na_transpose, -1);
|
930
930
|
|
931
|
-
rb_define_method(cNArray, "reshape",
|
932
|
-
rb_define_method(cNArray, "reshape!",
|
931
|
+
rb_define_method(cNArray, "reshape", cumo_na_reshape,-1);
|
932
|
+
rb_define_method(cNArray, "reshape!", cumo_na_reshape_bang,-1);
|
933
933
|
/*
|
934
934
|
rb_define_alias(cNArray, "shape=","reshape!");
|
935
935
|
*/
|
936
|
-
rb_define_method(cNArray, "diagonal",
|
936
|
+
rb_define_method(cNArray, "diagonal", cumo_na_diagonal,-1);
|
937
937
|
|
938
|
-
rb_define_method(cNArray, "swap_byte",
|
938
|
+
rb_define_method(cNArray, "swap_byte", cumo_na_swap_byte, 0);
|
939
939
|
#ifdef DYNAMIC_ENDIAN
|
940
940
|
#else
|
941
941
|
#ifdef WORDS_BIGENDIAN
|
@@ -946,16 +946,16 @@ Init_cumo_nary_data()
|
|
946
946
|
rb_define_alias(cNArray, "vacs_order?", "host_order?");
|
947
947
|
#endif
|
948
948
|
#endif
|
949
|
-
rb_define_method(cNArray, "to_network",
|
950
|
-
rb_define_method(cNArray, "to_vacs",
|
951
|
-
rb_define_method(cNArray, "to_host",
|
952
|
-
rb_define_method(cNArray, "to_swapped",
|
949
|
+
rb_define_method(cNArray, "to_network", cumo_na_to_network, 0);
|
950
|
+
rb_define_method(cNArray, "to_vacs", cumo_na_to_vacs, 0);
|
951
|
+
rb_define_method(cNArray, "to_host", cumo_na_to_host, 0);
|
952
|
+
rb_define_method(cNArray, "to_swapped", cumo_na_to_swapped, 0);
|
953
953
|
|
954
954
|
//rb_define_method(cNArray, "dot", cumo_na_dot, 1);
|
955
955
|
|
956
|
-
|
957
|
-
|
958
|
-
|
959
|
-
|
960
|
-
|
956
|
+
cumo_id_mulsum = rb_intern("mulsum");
|
957
|
+
cumo_sym_mulsum = ID2SYM(cumo_id_mulsum);
|
958
|
+
cumo_id_respond_to_p = rb_intern("respond_to?");
|
959
|
+
cumo_id_store = rb_intern("store");
|
960
|
+
cumo_id_swap_byte = rb_intern("swap_byte");
|
961
961
|
}
|