numo-narray 0.9.0.1-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (124) hide show
  1. checksums.yaml +7 -0
  2. data/Gemfile +4 -0
  3. data/README.md +47 -0
  4. data/Rakefile +41 -0
  5. data/ext/numo/narray/SFMT-params.h +97 -0
  6. data/ext/numo/narray/SFMT-params19937.h +46 -0
  7. data/ext/numo/narray/SFMT.c +620 -0
  8. data/ext/numo/narray/SFMT.h +157 -0
  9. data/ext/numo/narray/array.c +525 -0
  10. data/ext/numo/narray/data.c +901 -0
  11. data/ext/numo/narray/depend.erb +33 -0
  12. data/ext/numo/narray/extconf.rb +117 -0
  13. data/ext/numo/narray/gen/bit.erb.c +811 -0
  14. data/ext/numo/narray/gen/cogen.rb +18 -0
  15. data/ext/numo/narray/gen/def/dcomplex.rb +32 -0
  16. data/ext/numo/narray/gen/def/dfloat.rb +30 -0
  17. data/ext/numo/narray/gen/def/int16.rb +29 -0
  18. data/ext/numo/narray/gen/def/int32.rb +29 -0
  19. data/ext/numo/narray/gen/def/int64.rb +29 -0
  20. data/ext/numo/narray/gen/def/int8.rb +29 -0
  21. data/ext/numo/narray/gen/def/robject.rb +30 -0
  22. data/ext/numo/narray/gen/def/scomplex.rb +32 -0
  23. data/ext/numo/narray/gen/def/sfloat.rb +30 -0
  24. data/ext/numo/narray/gen/def/uint16.rb +29 -0
  25. data/ext/numo/narray/gen/def/uint32.rb +29 -0
  26. data/ext/numo/narray/gen/def/uint64.rb +29 -0
  27. data/ext/numo/narray/gen/def/uint8.rb +29 -0
  28. data/ext/numo/narray/gen/dtype.erb.c +328 -0
  29. data/ext/numo/narray/gen/tmpl/accum.c +36 -0
  30. data/ext/numo/narray/gen/tmpl/accum_binary.c +75 -0
  31. data/ext/numo/narray/gen/tmpl/accum_index.c +58 -0
  32. data/ext/numo/narray/gen/tmpl/allocate.c +35 -0
  33. data/ext/numo/narray/gen/tmpl/aref.c +51 -0
  34. data/ext/numo/narray/gen/tmpl/aset.c +61 -0
  35. data/ext/numo/narray/gen/tmpl/binary.c +53 -0
  36. data/ext/numo/narray/gen/tmpl/binary2.c +55 -0
  37. data/ext/numo/narray/gen/tmpl/binary_s.c +34 -0
  38. data/ext/numo/narray/gen/tmpl/bit_binary.c +94 -0
  39. data/ext/numo/narray/gen/tmpl/bit_count.c +82 -0
  40. data/ext/numo/narray/gen/tmpl/bit_unary.c +77 -0
  41. data/ext/numo/narray/gen/tmpl/cast.c +37 -0
  42. data/ext/numo/narray/gen/tmpl/cast_array.c +79 -0
  43. data/ext/numo/narray/gen/tmpl/cast_numeric.c +22 -0
  44. data/ext/numo/narray/gen/tmpl/coerce_cast.c +8 -0
  45. data/ext/numo/narray/gen/tmpl/cond_binary.c +51 -0
  46. data/ext/numo/narray/gen/tmpl/cond_unary.c +45 -0
  47. data/ext/numo/narray/gen/tmpl/cum.c +42 -0
  48. data/ext/numo/narray/gen/tmpl/each.c +43 -0
  49. data/ext/numo/narray/gen/tmpl/each_with_index.c +64 -0
  50. data/ext/numo/narray/gen/tmpl/extract.c +23 -0
  51. data/ext/numo/narray/gen/tmpl/eye.c +91 -0
  52. data/ext/numo/narray/gen/tmpl/fill.c +38 -0
  53. data/ext/numo/narray/gen/tmpl/format.c +60 -0
  54. data/ext/numo/narray/gen/tmpl/format_to_a.c +47 -0
  55. data/ext/numo/narray/gen/tmpl/head.c +25 -0
  56. data/ext/numo/narray/gen/tmpl/inspect.c +16 -0
  57. data/ext/numo/narray/gen/tmpl/map_with_index.c +94 -0
  58. data/ext/numo/narray/gen/tmpl/median.c +44 -0
  59. data/ext/numo/narray/gen/tmpl/minmax.c +47 -0
  60. data/ext/numo/narray/gen/tmpl/poly.c +49 -0
  61. data/ext/numo/narray/gen/tmpl/pow.c +74 -0
  62. data/ext/numo/narray/gen/tmpl/powint.c +17 -0
  63. data/ext/numo/narray/gen/tmpl/qsort.c +149 -0
  64. data/ext/numo/narray/gen/tmpl/rand.c +33 -0
  65. data/ext/numo/narray/gen/tmpl/rand_norm.c +46 -0
  66. data/ext/numo/narray/gen/tmpl/robj_allocate.c +32 -0
  67. data/ext/numo/narray/gen/tmpl/seq.c +61 -0
  68. data/ext/numo/narray/gen/tmpl/set2.c +56 -0
  69. data/ext/numo/narray/gen/tmpl/sort.c +36 -0
  70. data/ext/numo/narray/gen/tmpl/sort_index.c +86 -0
  71. data/ext/numo/narray/gen/tmpl/store.c +31 -0
  72. data/ext/numo/narray/gen/tmpl/store_array.c +5 -0
  73. data/ext/numo/narray/gen/tmpl/store_from.c +53 -0
  74. data/ext/numo/narray/gen/tmpl/store_numeric.c +22 -0
  75. data/ext/numo/narray/gen/tmpl/to_a.c +41 -0
  76. data/ext/numo/narray/gen/tmpl/unary.c +58 -0
  77. data/ext/numo/narray/gen/tmpl/unary2.c +58 -0
  78. data/ext/numo/narray/gen/tmpl/unary_s.c +57 -0
  79. data/ext/numo/narray/index.c +822 -0
  80. data/ext/numo/narray/kwarg.c +79 -0
  81. data/ext/numo/narray/math.c +140 -0
  82. data/ext/numo/narray/narray.c +1539 -0
  83. data/ext/numo/narray/ndloop.c +1928 -0
  84. data/ext/numo/narray/numo/compat.h +23 -0
  85. data/ext/numo/narray/numo/intern.h +112 -0
  86. data/ext/numo/narray/numo/narray.h +411 -0
  87. data/ext/numo/narray/numo/ndloop.h +99 -0
  88. data/ext/numo/narray/numo/template.h +140 -0
  89. data/ext/numo/narray/numo/types/bit.h +19 -0
  90. data/ext/numo/narray/numo/types/complex.h +410 -0
  91. data/ext/numo/narray/numo/types/complex_macro.h +205 -0
  92. data/ext/numo/narray/numo/types/dcomplex.h +11 -0
  93. data/ext/numo/narray/numo/types/dfloat.h +12 -0
  94. data/ext/numo/narray/numo/types/float_def.h +34 -0
  95. data/ext/numo/narray/numo/types/float_macro.h +277 -0
  96. data/ext/numo/narray/numo/types/int16.h +12 -0
  97. data/ext/numo/narray/numo/types/int32.h +12 -0
  98. data/ext/numo/narray/numo/types/int64.h +12 -0
  99. data/ext/numo/narray/numo/types/int8.h +12 -0
  100. data/ext/numo/narray/numo/types/int_macro.h +34 -0
  101. data/ext/numo/narray/numo/types/robj_macro.h +218 -0
  102. data/ext/numo/narray/numo/types/robject.h +21 -0
  103. data/ext/numo/narray/numo/types/scomplex.h +11 -0
  104. data/ext/numo/narray/numo/types/sfloat.h +13 -0
  105. data/ext/numo/narray/numo/types/uint16.h +12 -0
  106. data/ext/numo/narray/numo/types/uint32.h +12 -0
  107. data/ext/numo/narray/numo/types/uint64.h +12 -0
  108. data/ext/numo/narray/numo/types/uint8.h +12 -0
  109. data/ext/numo/narray/numo/types/uint_macro.h +31 -0
  110. data/ext/numo/narray/numo/types/xint_macro.h +133 -0
  111. data/ext/numo/narray/rand.c +87 -0
  112. data/ext/numo/narray/step.c +506 -0
  113. data/ext/numo/narray/struct.c +872 -0
  114. data/lib/2.1/numo/narray.so +0 -0
  115. data/lib/2.2/numo/narray.so +0 -0
  116. data/lib/2.3/numo/narray.so +0 -0
  117. data/lib/erbpp/line_number.rb +126 -0
  118. data/lib/erbpp/narray_def.rb +338 -0
  119. data/lib/erbpp.rb +286 -0
  120. data/lib/numo/narray.rb +6 -0
  121. data/numo-narray.gemspec +35 -0
  122. data/spec/bit_spec.rb +93 -0
  123. data/spec/narray_spec.rb +249 -0
  124. metadata +238 -0
@@ -0,0 +1,901 @@
1
+ /*
2
+ data.c
3
+ Numerical Array Extension for Ruby
4
+ (C) Copyright 1999-2011,2013 by Masahiro TANAKA
5
+
6
+ This program is free software.
7
+ You can distribute/modify this program
8
+ under the same terms as Ruby itself.
9
+ NO WARRANTY.
10
+ */
11
+
12
+ #include <ruby.h>
13
+ #include "numo/narray.h"
14
+ #include "numo/template.h"
15
+
16
+ // ---------------------------------------------------------------------
17
+
18
+ #define LOOP_UNARY_PTR(lp,proc) \
19
+ { \
20
+ size_t i; \
21
+ ssize_t s1, s2; \
22
+ char *p1, *p2; \
23
+ size_t *idx1, *idx2; \
24
+ INIT_COUNTER(lp, i); \
25
+ INIT_PTR_IDX(lp, 0, p1, s1, idx1); \
26
+ INIT_PTR_IDX(lp, 1, p2, s2, idx2); \
27
+ if (idx1) { \
28
+ if (idx2) { \
29
+ for (; i--;) { \
30
+ proc((p1+*idx1), (p2+*idx2)); \
31
+ idx1++; \
32
+ idx2++; \
33
+ } \
34
+ } else { \
35
+ for (; i--;) { \
36
+ proc((p1+*idx1), p2); \
37
+ idx1++; \
38
+ p2 += s2; \
39
+ } \
40
+ } \
41
+ } else { \
42
+ if (idx2) { \
43
+ for (; i--;) { \
44
+ proc(p1, (p1+*idx2)); \
45
+ p1 += s1; \
46
+ idx2++; \
47
+ } \
48
+ } else { \
49
+ for (; i--;) { \
50
+ proc(p1, p2); \
51
+ p1 += s1; \
52
+ p2 += s2; \
53
+ } \
54
+ } \
55
+ } \
56
+ }
57
+
58
+ #define m_memcpy(src,dst) memcpy(dst,src,e)
59
+ void
60
+ iter_copy_bytes(na_loop_t *const lp)
61
+ {
62
+ size_t e;
63
+ e = lp->args[0].elmsz;
64
+ LOOP_UNARY_PTR(lp,m_memcpy);
65
+ }
66
+
67
+ VALUE
68
+ na_copy(VALUE self)
69
+ {
70
+ VALUE v;
71
+ ndfunc_arg_in_t ain[1] = {{Qnil,0}};
72
+ ndfunc_arg_out_t aout[1] = {{INT2FIX(0),0}};
73
+ ndfunc_t ndf = { iter_copy_bytes, FULL_LOOP, 1, 1, ain, aout };
74
+
75
+ v = na_ndloop(&ndf, 1, self);
76
+ return v;
77
+ }
78
+
79
+
80
+ VALUE
81
+ na_store(VALUE self, VALUE src)
82
+ {
83
+ return rb_funcall(self,rb_intern("store"),1,src);
84
+ }
85
+
86
+ // ---------------------------------------------------------------------
87
+
88
+ #define m_swap_byte(q1,q2) \
89
+ { \
90
+ size_t j; \
91
+ memcpy(b1,q1,e); \
92
+ for (j=0; j<e; j++) { \
93
+ b2[e-1-j] = b1[j]; \
94
+ } \
95
+ memcpy(q2,b2,e); \
96
+ }
97
+
98
+ static void
99
+ iter_swap_byte(na_loop_t *const lp)
100
+ {
101
+ char *b1, *b2;
102
+ size_t e;
103
+
104
+ e = lp->args[0].elmsz;
105
+ b1 = ALLOCA_N(char, e);
106
+ b2 = ALLOCA_N(char, e);
107
+ LOOP_UNARY_PTR(lp,m_swap_byte);
108
+ }
109
+
110
+ static VALUE
111
+ nary_swap_byte(VALUE self)
112
+ {
113
+ VALUE v;
114
+ ndfunc_arg_in_t ain[1] = {{Qnil,0}};
115
+ ndfunc_arg_out_t aout[1] = {{INT2FIX(0),0}};
116
+ ndfunc_t ndf = { iter_swap_byte, FULL_LOOP|NDF_ACCEPT_BYTESWAP,
117
+ 1, 1, ain, aout };
118
+
119
+ v = na_ndloop(&ndf, 1, self);
120
+ if (self!=v) {
121
+ na_copy_flags(self, v);
122
+ }
123
+ REVERSE_BYTE_SWAPPED(v);
124
+ return v;
125
+ }
126
+
127
+
128
+ static VALUE
129
+ nary_to_network(VALUE self)
130
+ {
131
+ if (TEST_NETWORK_ORDER(self)) {
132
+ return self;
133
+ }
134
+ return rb_funcall(self, rb_intern("swap_byte"), 0);
135
+ }
136
+
137
+ static VALUE
138
+ nary_to_vacs(VALUE self)
139
+ {
140
+ if (TEST_VACS_ORDER(self)) {
141
+ return self;
142
+ }
143
+ return rb_funcall(self, rb_intern("swap_byte"), 0);
144
+ }
145
+
146
+ static VALUE
147
+ nary_to_host(VALUE self)
148
+ {
149
+ if (TEST_HOST_ORDER(self)) {
150
+ return self;
151
+ }
152
+ return rb_funcall(self, rb_intern("swap_byte"), 0);
153
+ }
154
+
155
+ static VALUE
156
+ nary_to_swapped(VALUE self)
157
+ {
158
+ if (TEST_BYTE_SWAPPED(self)) {
159
+ return self;
160
+ }
161
+ return rb_funcall(self, rb_intern("swap_byte"), 0);
162
+ }
163
+
164
+
165
+ //----------------------------------------------------------------------
166
+
167
+
168
+ VALUE
169
+ na_transpose_map(VALUE self, int *map)
170
+ {
171
+ int i, ndim;
172
+ size_t *shape;
173
+ stridx_t *stridx;
174
+ narray_view_t *na;
175
+ volatile VALUE view;
176
+
177
+ view = na_make_view(self);
178
+ GetNArrayView(view,na);
179
+
180
+ ndim = na->base.ndim;
181
+ shape = ALLOCA_N(size_t,ndim);
182
+ stridx = ALLOCA_N(stridx_t,ndim);
183
+
184
+ for (i=0; i<ndim; i++) {
185
+ shape[i] = na->base.shape[i];
186
+ stridx[i] = na->stridx[i];
187
+ }
188
+ for (i=0; i<ndim; i++) {
189
+ na->base.shape[i] = shape[map[i]];
190
+ na->stridx[i] = stridx[map[i]];
191
+ }
192
+ return view;
193
+ }
194
+
195
+
196
+ #define SWAP(a,b,tmp) {tmp=a;a=b;b=tmp;}
197
+
198
+ VALUE
199
+ na_transpose(int argc, VALUE *argv, VALUE self)
200
+ {
201
+ int ndim, *map, tmp;
202
+ int row_major;
203
+ int i, j, c, r;
204
+ size_t len;
205
+ ssize_t beg, step;
206
+ volatile VALUE v, view;
207
+ narray_t *na1;
208
+
209
+ GetNArray(self,na1);
210
+ ndim = na1->ndim;
211
+ row_major = TEST_COLUMN_MAJOR( self );
212
+
213
+ map = ALLOCA_N(int,ndim);
214
+ for (i=0;i<ndim;i++) {
215
+ map[i] = i;
216
+ }
217
+ if (argc==0) {
218
+ SWAP(map[ndim-1], map[ndim-2], tmp);
219
+ goto new_object;
220
+ }
221
+ if (argc==2) {
222
+ if (TYPE(argv[0])==T_FIXNUM && TYPE(argv[1])==T_FIXNUM) {
223
+ i = FIX2INT(argv[0]);
224
+ j = FIX2INT(argv[1]);
225
+ if (row_major) {
226
+ i = ndim-1-i;
227
+ j = ndim-1-j;
228
+ }
229
+ SWAP( map[i], map[j], tmp );
230
+ goto new_object;
231
+ }
232
+ }
233
+ for (i=argc,c=ndim-1; i;) {
234
+ v = argv[--i];
235
+ if (TYPE(v)==T_FIXNUM) {
236
+ beg = FIX2INT(v);
237
+ len = 1;
238
+ step = 0;
239
+ } else if (rb_obj_is_kind_of(v,rb_cRange) || rb_obj_is_kind_of(v,na_cStep)) {
240
+ // write me
241
+ nary_step_array_index(v, ndim, &len, &beg, &step);
242
+ //printf("len=%d beg=%d step=%d\n",len,beg,step);
243
+ }
244
+ for (j=len; j; ) {
245
+ r = beg + step*(--j);
246
+ if (row_major) {
247
+ r = ndim-1-r;
248
+ }
249
+ if ( c < 0 ) {
250
+ rb_raise(rb_eArgError, "too many dims");
251
+ }
252
+ map[c--] = r;
253
+ //printf("r=%d\n",r);
254
+ }
255
+ }
256
+
257
+ new_object:
258
+ view = na_transpose_map(self,map);
259
+ return view;
260
+ }
261
+
262
+ //----------------------------------------------------------------------
263
+
264
+ /* private function for reshape */
265
+ static VALUE
266
+ na_reshape(int argc, VALUE *argv, VALUE self)
267
+ {
268
+ int i, unfixed=-1;
269
+ size_t total=1;
270
+ size_t *shape; //, *shape_save;
271
+ narray_t *na;
272
+ VALUE copy;
273
+
274
+ if (argc == 0) {
275
+ rb_raise(rb_eRuntimeError, "No argrument");
276
+ }
277
+ GetNArray(self,na);
278
+ if (NA_SIZE(na) == 0) {
279
+ rb_raise(rb_eRuntimeError, "cannot reshape empty array");
280
+ }
281
+
282
+ /* get shape from argument */
283
+ shape = ALLOCA_N(size_t,argc);
284
+ for (i=0; i<argc; ++i) {
285
+ switch(TYPE(argv[i])) {
286
+ case T_FIXNUM:
287
+ total *= shape[i] = NUM2INT(argv[i]);
288
+ break;
289
+ case T_NIL:
290
+ case T_TRUE:
291
+ unfixed = i;
292
+ break;
293
+ default:
294
+ rb_raise(rb_eArgError,"illegal type");
295
+ }
296
+ }
297
+
298
+ if (unfixed>=0) {
299
+ if (NA_SIZE(na) % total != 0)
300
+ rb_raise(rb_eArgError, "Total size size must be divisor");
301
+ shape[unfixed] = NA_SIZE(na) / total;
302
+ }
303
+ else if (total != NA_SIZE(na)) {
304
+ rb_raise(rb_eArgError, "Total size must be same");
305
+ }
306
+
307
+ copy = na_copy(self);
308
+ GetNArray(copy,na);
309
+ //shape_save = NA_SHAPE(na);
310
+ na_setup_shape(na,argc,shape);
311
+ //if (NA_SHAPE(na) != shape_save) {
312
+ // xfree(shape_save);
313
+ //}
314
+ return copy;
315
+ }
316
+
317
+
318
+ //----------------------------------------------------------------------
319
+
320
+ VALUE
321
+ na_flatten_dim(VALUE self, int sd)
322
+ {
323
+ int i, nd, fd;
324
+ size_t j;
325
+ size_t *c, *pos, *idx1, *idx2;
326
+ size_t stride;
327
+ size_t *shape, size;
328
+ stridx_t sdx;
329
+ narray_t *na;
330
+ narray_view_t *na1, *na2;
331
+ volatile VALUE view;
332
+
333
+ GetNArray(self,na);
334
+ nd = na->ndim;
335
+
336
+ if (sd<0 || sd>=nd) {
337
+ rb_bug("na_flaten_dim: start_dim (%d) out of range",sd);
338
+ }
339
+
340
+ // new shape
341
+ shape = ALLOCA_N(size_t,sd+1);
342
+ for (i=0; i<sd; i++) {
343
+ shape[i] = na->shape[i];
344
+ }
345
+ size = 1;
346
+ for (i=sd; i<nd; i++) {
347
+ size *= na->shape[i];
348
+ }
349
+ shape[sd] = size;
350
+
351
+ // new object
352
+ view = na_s_allocate_view(CLASS_OF(self));
353
+ na_copy_flags(self, view);
354
+ GetNArrayView(view, na2);
355
+
356
+ // new stride
357
+ na_setup_shape((narray_t*)na2, sd+1, shape);
358
+ na2->stridx = ALLOC_N(stridx_t,sd+1);
359
+
360
+ switch(na->type) {
361
+ case NARRAY_DATA_T:
362
+ case NARRAY_FILEMAP_T:
363
+ stride = na_get_elmsz(self);
364
+ for (i=sd+1; i--; ) {
365
+ //printf("data: i=%d stride=%d\n",i,stride);
366
+ SDX_SET_STRIDE(na2->stridx[i],stride);
367
+ stride *= shape[i];
368
+ }
369
+ na2->offset = 0;
370
+ na2->data = self;
371
+ break;
372
+ case NARRAY_VIEW_T:
373
+ GetNArrayView(self, na1);
374
+ na2->data = na1->data;
375
+ na2->offset = na1->offset;
376
+ for (i=0; i<sd; i++) {
377
+ if (SDX_IS_INDEX(na1->stridx[i])) {
378
+ idx1 = SDX_GET_INDEX(na1->stridx[i]);
379
+ idx2 = ALLOC_N(size_t, shape[i]);
380
+ for (j=0; j<shape[i]; j++) {
381
+ idx2[j] = idx1[j];
382
+ }
383
+ SDX_SET_INDEX(na2->stridx[i],idx2);
384
+ } else {
385
+ na2->stridx[i] = na1->stridx[i];
386
+ //printf("view: i=%d stridx=%d\n",i,SDX_GET_STRIDE(sdx));
387
+ }
388
+ }
389
+ // flat dimenion == last dimension
390
+ if (RTEST(na_check_ladder(self,sd))) {
391
+ //if (0) {
392
+ na2->stridx[sd] = na1->stridx[nd-1];
393
+ } else {
394
+ // set index
395
+ idx2 = ALLOC_N(size_t, shape[sd]);
396
+ SDX_SET_INDEX(na2->stridx[sd],idx2);
397
+ // init for md-loop
398
+ fd = nd-sd;
399
+ c = ALLOC_N(size_t, fd);
400
+ for (i=0; i<fd; i++) c[i]=0;
401
+ pos = ALLOC_N(size_t, fd+1);
402
+ pos[0] = 0;
403
+ // md-loop
404
+ for (i=j=0;;) {
405
+ for (; i<fd; i++) {
406
+ sdx = na1->stridx[i+sd];
407
+ if (SDX_IS_INDEX(sdx)) {
408
+ pos[i+1] = pos[i] + SDX_GET_INDEX(sdx)[c[i]];
409
+ } else {
410
+ pos[i+1] = pos[i] + SDX_GET_STRIDE(sdx)*c[i];
411
+ }
412
+ }
413
+ idx2[j++] = pos[i];
414
+ for (;;) {
415
+ if (i==0) goto loop_end;
416
+ i--;
417
+ c[i]++;
418
+ if (c[i] < na1->base.shape[i+sd]) break;
419
+ c[i] = 0;
420
+ }
421
+ }
422
+ loop_end:
423
+ xfree(pos);
424
+ xfree(c);
425
+ }
426
+ break;
427
+ }
428
+ return view;
429
+ }
430
+
431
+ VALUE
432
+ na_flatten(VALUE self)
433
+ {
434
+ return na_flatten_dim(self,0);
435
+ }
436
+
437
+
438
+ VALUE
439
+ na_flatten_by_reduce(int argc, VALUE *argv, VALUE self)
440
+ {
441
+ size_t sz_reduce=1;
442
+ int i, j, ndim;
443
+ int nd_reduce=0, nd_rest=0;
444
+ int *dim_reduce, *dim_rest;
445
+ int *map;
446
+ volatile VALUE view, reduce;
447
+ narray_t *na;
448
+
449
+ //puts("pass1");
450
+ //rb_p(self);
451
+ reduce = na_reduce_dimension(argc, argv, 1, &self);
452
+ //reduce = INT2FIX(1);
453
+ //rb_p(self);
454
+ //puts("pass2");
455
+
456
+ if (reduce==INT2FIX(0)) {
457
+ //puts("pass flatten_dim");
458
+ //rb_funcall(self,rb_intern("debug_info"),0);
459
+ //rb_p(self);
460
+ view = na_flatten_dim(self,0);
461
+ //rb_funcall(view,rb_intern("debug_info"),0);
462
+ //rb_p(view);
463
+ } else {
464
+ //printf("reduce=0x%x\n",NUM2INT(reduce));
465
+ GetNArray(self,na);
466
+ ndim = na->ndim;
467
+ if (ndim==0) {
468
+ rb_raise(rb_eStandardError,"cannot flatten scalar(dim-0 array)");
469
+ return Qnil;
470
+ }
471
+ map = ALLOC_N(int,ndim);
472
+ dim_reduce = ALLOC_N(int,ndim);
473
+ dim_rest = ALLOC_N(int,ndim);
474
+ for (i=0; i<ndim; i++) {
475
+ if (na_test_reduce( reduce, i )) {
476
+ sz_reduce *= na->shape[i];
477
+ //printf("i=%d, nd_reduce=%d, na->shape[i]=%ld\n", i, nd_reduce, na->shape[i]);
478
+ dim_reduce[nd_reduce++] = i;
479
+ } else {
480
+ //shape[nd_rest] = na->shape[i];
481
+ //sz_rest *= na->shape[i];
482
+ //printf("i=%d, nd_rest=%d, na->shape[i]=%ld\n", i, nd_rest, na->shape[i]);
483
+ dim_rest[nd_rest++] = i;
484
+ }
485
+ }
486
+ for (i=0; i<nd_rest; i++) {
487
+ map[i] = dim_rest[i];
488
+ //printf("dim_rest[i=%d]=%d\n",i,dim_rest[i]);
489
+ //printf("map[i=%d]=%d\n",i,map[i]);
490
+ }
491
+ for (j=0; j<nd_reduce; j++,i++) {
492
+ map[i] = dim_reduce[j];
493
+ //printf("dim_reduce[j=%d]=%d\n",j,dim_reduce[j]);
494
+ //printf("map[i=%d]=%d\n",i,map[i]);
495
+ }
496
+ xfree(dim_reduce);
497
+ xfree(dim_rest);
498
+ //for (i=0; i<ndim; i++) {
499
+ // printf("map[%d]=%d\n",i,map[i]);
500
+ //}
501
+ //puts("pass transpose_map");
502
+ view = na_transpose_map(self,map);
503
+ xfree(map);
504
+ //rb_p(view);
505
+ //rb_funcall(view,rb_intern("debug_print"),0);
506
+
507
+ //puts("pass flatten_dim");
508
+ view = na_flatten_dim(view,nd_rest);
509
+ //rb_funcall(view,rb_intern("debug_print"),0);
510
+ //rb_p(view);
511
+ }
512
+ return view;
513
+ }
514
+
515
+
516
+ //----------------------------------------------------------------------
517
+
518
+ #define MIN(a,b) (((a)<(b))?(a):(b))
519
+
520
+ /*
521
+ Returns a diagonal view of NArray
522
+ @overload diagonal([offset,axes])
523
+ @param [Integer] offset Diagonal offset from the main diagonal.
524
+ The default is 0. k>0 for diagonals above the main diagonal,
525
+ and k<0 for diagonals below the main diagonal.
526
+ @param [Array] axes Array of axes to be used as the 2-d sub-arrays
527
+ from which the diagonals should be taken. Defaults to last-two
528
+ axes ([-2,-1]).
529
+ @return [Numo::NArray] diagonal view of NArray.
530
+ @example
531
+ a = Numo::DFloat.new(4,5).seq
532
+ => Numo::DFloat#shape=[4,5]
533
+ [[0, 1, 2, 3, 4],
534
+ [5, 6, 7, 8, 9],
535
+ [10, 11, 12, 13, 14],
536
+ [15, 16, 17, 18, 19]]
537
+ b = a.diagonal(1)
538
+ => Numo::DFloat(view)#shape=[4]
539
+ [1, 7, 13, 19]
540
+ b.store(0)
541
+ a
542
+ => Numo::DFloat#shape=[4,5]
543
+ [[0, 0, 2, 3, 4],
544
+ [5, 6, 0, 8, 9],
545
+ [10, 11, 12, 0, 14],
546
+ [15, 16, 17, 18, 0]]
547
+ b.store([1,2,3,4])
548
+ a
549
+ => Numo::DFloat#shape=[4,5]
550
+ [[0, 1, 2, 3, 4],
551
+ [5, 6, 2, 8, 9],
552
+ [10, 11, 12, 3, 14],
553
+ [15, 16, 17, 18, 4]]
554
+ */
555
+ VALUE
556
+ na_diagonal(int argc, VALUE *argv, VALUE self)
557
+ {
558
+ int i, k, nd;
559
+ size_t j;
560
+ size_t *idx0, *idx1, *diag_idx;
561
+ size_t *shape;
562
+ size_t diag_size;
563
+ ssize_t stride, stride0, stride1;
564
+ narray_t *na;
565
+ narray_view_t *na1, *na2;
566
+ VALUE view;
567
+ VALUE vofs=0, vaxes=0;
568
+ ssize_t kofs;
569
+ size_t k0, k1;
570
+ int ax[2];
571
+
572
+ // check arguments
573
+ if (argc>2) {
574
+ rb_raise(rb_eArgError,"too many arguments (%d for 0..2)",argc);
575
+ }
576
+
577
+ for (i=0; i<argc; i++) {
578
+ switch(TYPE(argv[i])) {
579
+ case T_FIXNUM:
580
+ if (vofs) {
581
+ rb_raise(rb_eArgError,"offset is given twice");
582
+ }
583
+ vofs = argv[i];
584
+ break;
585
+ case T_ARRAY:
586
+ if (vaxes) {
587
+ rb_raise(rb_eArgError,"axes-array is given twice");
588
+ }
589
+ vaxes = argv[i];
590
+ break;
591
+ }
592
+ }
593
+
594
+ if (vofs) {
595
+ kofs = NUM2SSIZE(vofs);
596
+ } else {
597
+ kofs = 0;
598
+ }
599
+
600
+ GetNArray(self,na);
601
+ nd = na->ndim;
602
+ if (nd < 2) {
603
+ rb_raise(nary_eDimensionError,"less than 2-d array");
604
+ }
605
+
606
+ if (vaxes) {
607
+ if (RARRAY_LEN(vaxes) != 2) {
608
+ rb_raise(rb_eArgError,"axes must be 2-element array");
609
+ }
610
+ ax[0] = NUM2INT(RARRAY_AREF(vaxes,0));
611
+ ax[1] = NUM2INT(RARRAY_AREF(vaxes,1));
612
+ if (ax[0]<-nd || ax[0]>=nd || ax[1]<-nd || ax[1]>=nd) {
613
+ rb_raise(rb_eArgError,"axis out of range:[%d,%d]",ax[0],ax[1]);
614
+ }
615
+ if (ax[0]<0) {ax[0] += nd;}
616
+ if (ax[1]<0) {ax[1] += nd;}
617
+ if (ax[0]==ax[1]) {
618
+ rb_raise(rb_eArgError,"same axes:[%d,%d]",ax[0],ax[1]);
619
+ }
620
+ } else {
621
+ ax[0] = nd-2;
622
+ ax[1] = nd-1;
623
+ }
624
+
625
+ // Diagonal offset from the main diagonal.
626
+ if (kofs >= 0) {
627
+ k0 = 0;
628
+ k1 = kofs;
629
+ if (k1 >= na->shape[ax[1]]) {
630
+ rb_raise(rb_eArgError,"invalid diagonal offset(%ld) for "
631
+ "last dimension size(%ld)",kofs,na->shape[ax[1]]);
632
+ }
633
+ } else {
634
+ k0 = -kofs;
635
+ k1 = 0;
636
+ if (k0 >= na->shape[ax[0]]) {
637
+ rb_raise(rb_eArgError,"invalid diagonal offset(=%ld) for "
638
+ "last-1 dimension size(%ld)",kofs,na->shape[ax[0]]);
639
+ }
640
+ }
641
+
642
+ diag_size = MIN(na->shape[ax[0]]-k0,na->shape[ax[1]]-k1);
643
+
644
+ // new shape
645
+ shape = ALLOCA_N(size_t,nd-1);
646
+ for (i=k=0; i<nd; i++) {
647
+ if (i != ax[0] && i != ax[1]) {
648
+ shape[k++] = na->shape[i];
649
+ }
650
+ }
651
+ shape[k] = diag_size;
652
+
653
+ // new object
654
+ view = na_s_allocate_view(CLASS_OF(self));
655
+ na_copy_flags(self, view);
656
+ GetNArrayView(view, na2);
657
+
658
+ // new stride
659
+ na_setup_shape((narray_t*)na2, nd-1, shape);
660
+ na2->stridx = ALLOC_N(stridx_t, nd-1);
661
+
662
+ switch(na->type) {
663
+ case NARRAY_DATA_T:
664
+ case NARRAY_FILEMAP_T:
665
+ na2->offset = 0;
666
+ na2->data = self;
667
+ stride = stride0 = stride1 = na_get_elmsz(self);
668
+ for (i=nd,k=nd-2; i--; ) {
669
+ if (i==ax[1]) {
670
+ stride1 = stride;
671
+ if (kofs > 0) {
672
+ na2->offset = kofs*stride;
673
+ }
674
+ } else if (i==ax[0]) {
675
+ stride0 = stride;
676
+ if (kofs < 0) {
677
+ na2->offset = (-kofs)*stride;
678
+ }
679
+ } else {
680
+ SDX_SET_STRIDE(na2->stridx[--k],stride);
681
+ }
682
+ stride *= na->shape[i];
683
+ }
684
+ SDX_SET_STRIDE(na2->stridx[nd-2],stride0+stride1);
685
+ break;
686
+
687
+ case NARRAY_VIEW_T:
688
+ GetNArrayView(self, na1);
689
+ na2->data = na1->data;
690
+ na2->offset = na1->offset;
691
+ for (i=k=0; i<nd; i++) {
692
+ if (i != ax[0] && i != ax[1]) {
693
+ if (SDX_IS_INDEX(na1->stridx[i])) {
694
+ idx0 = SDX_GET_INDEX(na1->stridx[i]);
695
+ idx1 = ALLOC_N(size_t, na->shape[i]);
696
+ for (j=0; j<na->shape[i]; j++) {
697
+ idx1[j] = idx0[j];
698
+ }
699
+ SDX_SET_INDEX(na2->stridx[k],idx1);
700
+ } else {
701
+ na2->stridx[k] = na1->stridx[i];
702
+ }
703
+ k++;
704
+ }
705
+ }
706
+ if (SDX_IS_INDEX(na1->stridx[ax[0]])) {
707
+ idx0 = SDX_GET_INDEX(na1->stridx[ax[0]]);
708
+ diag_idx = ALLOC_N(size_t, diag_size);
709
+ if (SDX_IS_INDEX(na1->stridx[ax[1]])) {
710
+ idx1 = SDX_GET_INDEX(na1->stridx[ax[1]]);
711
+ for (j=0; j<diag_size; j++) {
712
+ diag_idx[j] = idx0[j+k0] + idx1[j+k1];
713
+ }
714
+ } else {
715
+ stride1 = SDX_GET_STRIDE(na1->stridx[ax[1]]);
716
+ for (j=0; j<diag_size; j++) {
717
+ diag_idx[j] = idx0[j+k0] + stride1*(j+k1);
718
+ }
719
+ }
720
+ SDX_SET_INDEX(na2->stridx[nd-2],diag_idx);
721
+ } else {
722
+ stride0 = SDX_GET_STRIDE(na1->stridx[ax[0]]);
723
+ if (SDX_IS_INDEX(na1->stridx[ax[1]])) {
724
+ idx1 = SDX_GET_INDEX(na1->stridx[ax[1]]);
725
+ diag_idx = ALLOC_N(size_t, diag_size);
726
+ for (j=0; j<diag_size; j++) {
727
+ diag_idx[j] = stride0*(j+k0) + idx1[j+k1];
728
+ }
729
+ SDX_SET_INDEX(na2->stridx[nd-2],diag_idx);
730
+ } else {
731
+ stride1 = SDX_GET_STRIDE(na1->stridx[ax[1]]);
732
+ na2->offset += stride0*k0 + stride1*k1;
733
+ SDX_SET_STRIDE(na2->stridx[nd-2],stride0+stride1);
734
+ }
735
+ }
736
+ break;
737
+ }
738
+ return view;
739
+ }
740
+
741
+ //----------------------------------------------------------------------
742
+
743
+
744
+ #ifdef SWAP
745
+ #undef SWAP
746
+ #endif
747
+ #define SWAP(a,b,t) {t=a;a=b;b=t;}
748
+
749
+ static VALUE
750
+ na_new_dimension_for_dot(VALUE self, int pos, int len, bool transpose)
751
+ {
752
+ int i, k, nd;
753
+ size_t j;
754
+ size_t *idx1, *idx2;
755
+ size_t *shape;
756
+ ssize_t stride;
757
+ narray_t *na;
758
+ narray_view_t *na1, *na2;
759
+ size_t shape_n;
760
+ stridx_t stridx_n;
761
+ volatile VALUE view;
762
+
763
+ GetNArray(self,na);
764
+ nd = na->ndim;
765
+
766
+ view = na_s_allocate_view(CLASS_OF(self));
767
+
768
+ na_copy_flags(self, view);
769
+ GetNArrayView(view, na2);
770
+
771
+ // new dimension
772
+ if (pos < 0) pos += nd;
773
+ if (pos > nd || pos < 0) {
774
+ rb_raise(rb_eRangeError,"new dimension is out of range");
775
+ }
776
+ nd += len;
777
+ shape = ALLOCA_N(size_t,nd);
778
+ i = k = 0;
779
+ while (i < nd) {
780
+ if (i == pos) {
781
+ for (; len; len--) {
782
+ shape[i++] = 1;
783
+ }
784
+ pos = -1; // new axis done
785
+ } else {
786
+ shape[i++] = na->shape[k++];
787
+ }
788
+ }
789
+
790
+ na_setup_shape((narray_t*)na2, nd, shape);
791
+ na2->stridx = ALLOC_N(stridx_t,nd);
792
+
793
+ switch(na->type) {
794
+ case NARRAY_DATA_T:
795
+ case NARRAY_FILEMAP_T:
796
+ stride = na_get_elmsz(self);
797
+ for (i=nd; i--;) {
798
+ SDX_SET_STRIDE(na2->stridx[i],stride);
799
+ stride *= shape[i];
800
+ }
801
+ na2->offset = 0;
802
+ na2->data = self;
803
+ break;
804
+ case NARRAY_VIEW_T:
805
+ GetNArrayView(self, na1);
806
+ for (i=0; i<nd; i++) {
807
+ if (SDX_IS_INDEX(na1->stridx[i])) {
808
+ idx1 = SDX_GET_INDEX(na1->stridx[i]);
809
+ idx2 = ALLOC_N(size_t,na1->base.shape[i]);
810
+ for (j=0; j<na1->base.shape[i]; j++) {
811
+ idx2[j] = idx1[j];
812
+ }
813
+ SDX_SET_INDEX(na2->stridx[i],idx2);
814
+ } else {
815
+ na2->stridx[i] = na1->stridx[i];
816
+ }
817
+ }
818
+ na2->offset = na1->offset;
819
+ na2->data = na1->data;
820
+ break;
821
+ }
822
+
823
+ if (transpose) {
824
+ SWAP(na2->base.shape[nd-1], na2->base.shape[nd-2], shape_n);
825
+ SWAP(na2->stridx[nd-1], na2->stridx[nd-2], stridx_n);
826
+ }
827
+
828
+ return view;
829
+ }
830
+
831
+
832
+ //----------------------------------------------------------------------
833
+
834
+ /*
835
+ * call-seq:
836
+ * narray.dot(other) => narray
837
+ *
838
+ * Returns dot product.
839
+ *
840
+ */
841
+
842
+ static VALUE
843
+ numo_na_dot(VALUE self, VALUE other)
844
+ {
845
+ VALUE test, sym_mulsum;
846
+ volatile VALUE a1=self, a2=other;
847
+ ID id_mulsum;
848
+ narray_t *na1, *na2;
849
+
850
+ id_mulsum = rb_intern("mulsum");
851
+ sym_mulsum = ID2SYM(id_mulsum);
852
+ test = rb_funcall(a1, rb_intern("respond_to?"), 1, sym_mulsum);
853
+ if (!RTEST(test)) {
854
+ rb_raise(rb_eNoMethodError,"requires mulsum method for dot method");
855
+ }
856
+ GetNArray(a1,na1);
857
+ GetNArray(a2,na2);
858
+ if (na2->ndim > 1) {
859
+ // insert new axis [ ..., last-1-dim, newaxis*other.ndim, last-dim ]
860
+ a1 = na_new_dimension_for_dot(a1, na1->ndim-1, na2->ndim-1, 0);
861
+ // insert & transpose [ newaxis*self.ndim, ..., last-dim, last-1-dim ]
862
+ a2 = na_new_dimension_for_dot(a2, 0, na1->ndim-1, 1);
863
+ }
864
+ return rb_funcall(a1,rb_intern("mulsum"),2,a2,INT2FIX(-1));
865
+ }
866
+
867
+
868
+ void
869
+ Init_nary_data()
870
+ {
871
+ rb_define_method(cNArray, "copy", na_copy, 0);
872
+
873
+ rb_define_method(cNArray, "flatten", na_flatten, 0);
874
+ rb_define_method(cNArray, "transpose", na_transpose, -1);
875
+
876
+ rb_define_method(cNArray, "reshape", na_reshape,-1);
877
+ /*
878
+ rb_define_method(cNArray, "reshape!", na_reshape_bang,-1);
879
+ rb_define_alias(cNArray, "shape=","reshape!");
880
+ */
881
+ rb_define_method(cNArray, "diagonal", na_diagonal,-1);
882
+
883
+ rb_define_method(cNArray, "swap_byte", nary_swap_byte, 0);
884
+ #ifdef DYNAMIC_ENDIAN
885
+ #else
886
+ #ifdef WORDS_BIGENDIAN
887
+ #else // LITTLE_ENDIAN
888
+ rb_define_alias(cNArray, "hton", "swap_byte");
889
+ rb_define_alias(cNArray, "hton", "swap_byte");
890
+ rb_define_alias(cNArray, "network_order?", "byte_swapped?");
891
+ rb_define_alias(cNArray, "little_endian?", "host_order?");
892
+ rb_define_alias(cNArray, "vacs_order?", "host_order?");
893
+ #endif
894
+ #endif
895
+ rb_define_method(cNArray, "to_network", nary_to_network, 0);
896
+ rb_define_method(cNArray, "to_vacs", nary_to_vacs, 0);
897
+ rb_define_method(cNArray, "to_host", nary_to_host, 0);
898
+ rb_define_method(cNArray, "to_swapped", nary_to_swapped, 0);
899
+
900
+ rb_define_method(cNArray, "dot", numo_na_dot, 1);
901
+ }