numo-narray-alt 0.9.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. checksums.yaml +7 -0
  2. data/Gemfile +14 -0
  3. data/LICENSE +30 -0
  4. data/README.md +71 -0
  5. data/Rakefile +24 -0
  6. data/ext/numo/narray/SFMT-params.h +97 -0
  7. data/ext/numo/narray/SFMT-params19937.h +48 -0
  8. data/ext/numo/narray/SFMT.c +602 -0
  9. data/ext/numo/narray/SFMT.h +147 -0
  10. data/ext/numo/narray/array.c +575 -0
  11. data/ext/numo/narray/data.c +958 -0
  12. data/ext/numo/narray/extconf.rb +84 -0
  13. data/ext/numo/narray/index.c +1092 -0
  14. data/ext/numo/narray/kwargs.c +142 -0
  15. data/ext/numo/narray/math.c +133 -0
  16. data/ext/numo/narray/narray.c +1976 -0
  17. data/ext/numo/narray/narray.def +28 -0
  18. data/ext/numo/narray/ndloop.c +1840 -0
  19. data/ext/numo/narray/numo/compat.h +23 -0
  20. data/ext/numo/narray/numo/intern.h +115 -0
  21. data/ext/numo/narray/numo/narray.h +480 -0
  22. data/ext/numo/narray/numo/ndloop.h +93 -0
  23. data/ext/numo/narray/numo/template.h +149 -0
  24. data/ext/numo/narray/numo/types/bit.h +38 -0
  25. data/ext/numo/narray/numo/types/complex.h +404 -0
  26. data/ext/numo/narray/numo/types/complex_macro.h +384 -0
  27. data/ext/numo/narray/numo/types/dcomplex.h +42 -0
  28. data/ext/numo/narray/numo/types/dfloat.h +44 -0
  29. data/ext/numo/narray/numo/types/float_def.h +34 -0
  30. data/ext/numo/narray/numo/types/float_macro.h +202 -0
  31. data/ext/numo/narray/numo/types/int16.h +27 -0
  32. data/ext/numo/narray/numo/types/int32.h +23 -0
  33. data/ext/numo/narray/numo/types/int64.h +23 -0
  34. data/ext/numo/narray/numo/types/int8.h +23 -0
  35. data/ext/numo/narray/numo/types/int_macro.h +66 -0
  36. data/ext/numo/narray/numo/types/real_accum.h +481 -0
  37. data/ext/numo/narray/numo/types/robj_macro.h +78 -0
  38. data/ext/numo/narray/numo/types/robject.h +25 -0
  39. data/ext/numo/narray/numo/types/scomplex.h +42 -0
  40. data/ext/numo/narray/numo/types/sfloat.h +45 -0
  41. data/ext/numo/narray/numo/types/uint16.h +24 -0
  42. data/ext/numo/narray/numo/types/uint32.h +20 -0
  43. data/ext/numo/narray/numo/types/uint64.h +20 -0
  44. data/ext/numo/narray/numo/types/uint8.h +20 -0
  45. data/ext/numo/narray/numo/types/uint_macro.h +57 -0
  46. data/ext/numo/narray/numo/types/xint_macro.h +166 -0
  47. data/ext/numo/narray/rand.c +40 -0
  48. data/ext/numo/narray/src/t_bit.c +3236 -0
  49. data/ext/numo/narray/src/t_dcomplex.c +6776 -0
  50. data/ext/numo/narray/src/t_dfloat.c +9417 -0
  51. data/ext/numo/narray/src/t_int16.c +5757 -0
  52. data/ext/numo/narray/src/t_int32.c +5757 -0
  53. data/ext/numo/narray/src/t_int64.c +5759 -0
  54. data/ext/numo/narray/src/t_int8.c +5355 -0
  55. data/ext/numo/narray/src/t_robject.c +5567 -0
  56. data/ext/numo/narray/src/t_scomplex.c +6731 -0
  57. data/ext/numo/narray/src/t_sfloat.c +9374 -0
  58. data/ext/numo/narray/src/t_uint16.c +5753 -0
  59. data/ext/numo/narray/src/t_uint32.c +5753 -0
  60. data/ext/numo/narray/src/t_uint64.c +5755 -0
  61. data/ext/numo/narray/src/t_uint8.c +5351 -0
  62. data/ext/numo/narray/step.c +266 -0
  63. data/ext/numo/narray/struct.c +814 -0
  64. data/lib/numo/narray/extra.rb +1266 -0
  65. data/lib/numo/narray.rb +4 -0
  66. metadata +106 -0
@@ -0,0 +1,1092 @@
1
+ /*
2
+ index.c
3
+ Ruby/Numo::NArray - Numerical Array class for Ruby
4
+ Copyright (C) 1999-2020 Masahiro TANAKA
5
+ */
6
+ // #define NARRAY_C
7
+ #include <ruby.h>
8
+ #include <string.h>
9
+
10
+ #include "numo/narray.h"
11
+ #include "numo/template.h"
12
+
13
+ #if SIZEOF_VOIDP == 8
14
+ #define cIndex numo_cInt64
15
+ #elif SIZEOF_VOIDP == 4
16
+ #define cIndex numo_cInt32
17
+ #endif
18
+
19
+ // note: the memory refed by this pointer is not freed and causes memory leak.
20
+ typedef struct {
21
+ size_t n; // the number of elements of the dimension
22
+ size_t beg; // the starting point in the dimension
23
+ ssize_t step; // the step size of the dimension
24
+ size_t* idx; // list of indices
25
+ int reduce; // true if the dimension is reduced by addition
26
+ int orig_dim; // the dimension of original array
27
+ } na_index_arg_t;
28
+
29
+ static void print_index_arg(na_index_arg_t* q, int n) {
30
+ int i;
31
+ printf("na_index_arg_t = 0x%" SZF "x {\n", (size_t)q);
32
+ for (i = 0; i < n; i++) {
33
+ printf(" q[%d].n=%" SZF "d\n", i, q[i].n);
34
+ printf(" q[%d].beg=%" SZF "d\n", i, q[i].beg);
35
+ printf(" q[%d].step=%" SZF "d\n", i, q[i].step);
36
+ printf(" q[%d].idx=0x%" SZF "x\n", i, (size_t)q[i].idx);
37
+ printf(" q[%d].reduce=0x%x\n", i, q[i].reduce);
38
+ printf(" q[%d].orig_dim=%d\n", i, q[i].orig_dim);
39
+ }
40
+ printf("}\n");
41
+ }
42
+
43
+ static VALUE sym_ast;
44
+ static VALUE sym_all;
45
+ // static VALUE sym_reduce;
46
+ static VALUE sym_minus;
47
+ static VALUE sym_new;
48
+ static VALUE sym_reverse;
49
+ static VALUE sym_plus;
50
+ static VALUE sym_sum;
51
+ static VALUE sym_tilde;
52
+ static VALUE sym_rest;
53
+ static ID id_beg;
54
+ static ID id_end;
55
+ static ID id_exclude_end;
56
+ static ID id_each;
57
+ static ID id_step;
58
+ static ID id_dup;
59
+ static ID id_bracket;
60
+ static ID id_shift_left;
61
+ static ID id_mask;
62
+ static ID id_where;
63
+
64
+ static void na_index_set_step(na_index_arg_t* q, int i, size_t n, size_t beg, ssize_t step) {
65
+ q->n = n;
66
+ q->beg = beg;
67
+ q->step = step;
68
+ q->idx = NULL;
69
+ q->reduce = 0;
70
+ q->orig_dim = i;
71
+ }
72
+
73
+ static void na_index_set_scalar(na_index_arg_t* q, int i, ssize_t size, ssize_t x) {
74
+ if (x < -size || x >= size) rb_raise(rb_eRangeError, "array index (%" SZF "d) is out of array size (%" SZF "d)", x, size);
75
+ if (x < 0) x += size;
76
+ q->n = 1;
77
+ q->beg = x;
78
+ q->step = 0;
79
+ q->idx = NULL;
80
+ q->reduce = 0;
81
+ q->orig_dim = i;
82
+ }
83
+
84
+ static inline ssize_t na_range_check(ssize_t pos, ssize_t size, int dim) {
85
+ ssize_t idx = pos;
86
+
87
+ if (idx < 0) idx += size;
88
+ if (idx < 0 || idx >= size) {
89
+ rb_raise(rb_eIndexError, "index=%" SZF "d out of shape[%d]=%" SZF "d", pos, dim, size);
90
+ }
91
+ return idx;
92
+ }
93
+
94
+ static void na_parse_array(VALUE ary, int orig_dim, ssize_t size, na_index_arg_t* q) {
95
+ int k;
96
+ int n = (int)RARRAY_LEN(ary);
97
+ q->idx = ALLOC_N(size_t, n);
98
+ for (k = 0; k < n; k++) {
99
+ q->idx[k] = na_range_check(NUM2SSIZET(RARRAY_AREF(ary, k)), size, orig_dim);
100
+ }
101
+ q->n = n;
102
+ q->beg = 0;
103
+ q->step = 1;
104
+ q->reduce = 0;
105
+ q->orig_dim = orig_dim;
106
+ }
107
+
108
+ static void na_parse_narray_index(VALUE a, int orig_dim, ssize_t size, na_index_arg_t* q) {
109
+ VALUE idx, cls;
110
+ narray_t *na, *nidx;
111
+ size_t k, n;
112
+
113
+ GetNArray(a, na);
114
+ if (NA_NDIM(na) != 1) {
115
+ rb_raise(rb_eIndexError, "should be 1-d NArray");
116
+ }
117
+ cls = rb_obj_class(a);
118
+ if (cls == numo_cBit) {
119
+ if (NA_SIZE(na) != (size_t)size) {
120
+ rb_raise(rb_eIndexError, "Bit-NArray size mismatch");
121
+ }
122
+ idx = rb_funcall(a, id_where, 0);
123
+ GetNArray(idx, nidx);
124
+ n = NA_SIZE(nidx);
125
+ q->idx = ALLOC_N(size_t, n);
126
+ if (na->type != NARRAY_DATA_T) {
127
+ rb_bug("NArray#where returned wrong type of NArray");
128
+ }
129
+ if (rb_obj_class(idx) == numo_cInt32) {
130
+ int32_t* p = (int32_t*)NA_DATA_PTR(nidx);
131
+ for (k = 0; k < n; k++) {
132
+ q->idx[k] = (size_t)p[k];
133
+ }
134
+ } else if (rb_obj_class(idx) == numo_cInt64) {
135
+ int64_t* p = (int64_t*)NA_DATA_PTR(nidx);
136
+ for (k = 0; k < n; k++) {
137
+ q->idx[k] = (size_t)p[k];
138
+ }
139
+ } else {
140
+ rb_bug("NArray#where should return Int32 or Int64");
141
+ }
142
+ RB_GC_GUARD(idx);
143
+ } else {
144
+ n = NA_SIZE(na);
145
+ q->idx = ALLOC_N(size_t, n);
146
+ if (cls == numo_cInt32 && na->type == NARRAY_DATA_T) {
147
+ int32_t* p = (int32_t*)NA_DATA_PTR(na);
148
+ for (k = 0; k < n; k++) {
149
+ q->idx[k] = na_range_check(p[k], size, orig_dim);
150
+ }
151
+ } else if (cls == numo_cInt64 && na->type == NARRAY_DATA_T) {
152
+ int64_t* p = (int64_t*)NA_DATA_PTR(na);
153
+ for (k = 0; k < n; k++) {
154
+ q->idx[k] = na_range_check(p[k], size, orig_dim);
155
+ }
156
+ } else {
157
+ ssize_t* p;
158
+ idx = nary_new(cIndex, 1, &n);
159
+ na_store(idx, a);
160
+ GetNArray(idx, nidx);
161
+ p = (ssize_t*)NA_DATA_PTR(nidx);
162
+ for (k = 0; k < n; k++) {
163
+ q->idx[k] = na_range_check(p[k], size, orig_dim);
164
+ }
165
+ RB_GC_GUARD(idx);
166
+ }
167
+ }
168
+ q->n = n;
169
+ q->beg = 0;
170
+ q->step = 1;
171
+ q->reduce = 0;
172
+ q->orig_dim = orig_dim;
173
+ }
174
+
175
+ static void na_parse_range(VALUE range, ssize_t step, int orig_dim, ssize_t size, na_index_arg_t* q) {
176
+ int n;
177
+ ssize_t beg, end, beg_orig, end_orig;
178
+ const char *dot = "..", *edot = "...";
179
+
180
+ #ifdef HAVE_RB_ARITHMETIC_SEQUENCE_EXTRACT
181
+ rb_arithmetic_sequence_components_t x;
182
+ rb_arithmetic_sequence_extract(range, &x);
183
+ step = NUM2SSIZET(x.step);
184
+
185
+ beg = beg_orig = NUM2SSIZET(x.begin);
186
+ if (beg < 0) {
187
+ beg += size;
188
+ }
189
+ if (T_NIL == TYPE(x.end)) { // endless range
190
+ end = size - 1;
191
+ if (RTEST(x.exclude_end)) {
192
+ dot = edot;
193
+ }
194
+ if (beg < 0 || beg >= size) {
195
+ rb_raise(rb_eRangeError, "%" SZF "d%s is out of range for size=%" SZF "d", beg_orig, dot, size);
196
+ }
197
+ } else {
198
+ end = end_orig = NUM2SSIZET(x.end);
199
+ if (end < 0) {
200
+ end += size;
201
+ }
202
+ if (RTEST(x.exclude_end)) {
203
+ end--;
204
+ dot = edot;
205
+ }
206
+ if (beg < 0 || beg >= size || end < 0 || end >= size) {
207
+ rb_raise(rb_eRangeError, "%" SZF "d%s%" SZF "d is out of range for size=%" SZF "d", beg_orig, dot, end_orig, size);
208
+ }
209
+ }
210
+ #else
211
+ VALUE excl_end;
212
+
213
+ beg = beg_orig = NUM2SSIZET(rb_funcall(range, id_beg, 0));
214
+ if (beg < 0) {
215
+ beg += size;
216
+ }
217
+ end = end_orig = NUM2SSIZET(rb_funcall(range, id_end, 0));
218
+ if (end < 0) {
219
+ end += size;
220
+ }
221
+ excl_end = rb_funcall(range, id_exclude_end, 0);
222
+ if (RTEST(excl_end)) {
223
+ end--;
224
+ dot = edot;
225
+ }
226
+ if (beg < 0 || beg >= size || end < 0 || end >= size) {
227
+ rb_raise(rb_eRangeError, "%" SZF "d%s%" SZF "d is out of range for size=%" SZF "d", beg_orig, dot, end_orig, size);
228
+ }
229
+ #endif
230
+ n = (int)((end - beg) / step + 1);
231
+ if (n < 0) n = 0;
232
+ na_index_set_step(q, orig_dim, n, beg, step);
233
+ }
234
+
235
+ void na_parse_enumerator_step(VALUE enum_obj, VALUE* pstep) {
236
+ int len;
237
+ VALUE step;
238
+ struct enumerator* e;
239
+
240
+ if (!RB_TYPE_P(enum_obj, T_DATA)) {
241
+ rb_raise(rb_eTypeError, "wrong argument type (not T_DATA)");
242
+ }
243
+ e = RENUMERATOR_PTR(enum_obj);
244
+
245
+ if (!rb_obj_is_kind_of(e->obj, rb_cRange)) {
246
+ rb_raise(rb_eTypeError, "not Range object");
247
+ }
248
+
249
+ if (e->meth == id_each) {
250
+ step = INT2NUM(1);
251
+ } else if (e->meth == id_step) {
252
+ if (TYPE(e->args) != T_ARRAY) {
253
+ rb_raise(rb_eArgError, "no argument for step");
254
+ }
255
+ len = (int)RARRAY_LEN(e->args);
256
+ if (len != 1) {
257
+ rb_raise(rb_eArgError, "invalid number of step argument (1 for %d)", len);
258
+ }
259
+ step = RARRAY_AREF(e->args, 0);
260
+ } else {
261
+ rb_raise(rb_eTypeError, "unknown Range method: %s", rb_id2name(e->meth));
262
+ }
263
+ if (pstep) *pstep = step;
264
+ }
265
+
266
+ static void na_parse_enumerator(VALUE enum_obj, int orig_dim, ssize_t size, na_index_arg_t* q) {
267
+ VALUE step;
268
+ struct enumerator* e;
269
+
270
+ if (!RB_TYPE_P(enum_obj, T_DATA)) {
271
+ rb_raise(rb_eTypeError, "wrong argument type (not T_DATA)");
272
+ }
273
+ na_parse_enumerator_step(enum_obj, &step);
274
+ e = RENUMERATOR_PTR(enum_obj);
275
+ na_parse_range(e->obj, NUM2SSIZET(step), orig_dim, size, q); // e->obj : Range Object
276
+ }
277
+
278
+ // Analyze *a* which is *i*-th index object and store the information to q
279
+ //
280
+ // a: a ruby object of i-th index
281
+ // size: size of i-th dimension of original NArray
282
+ // i: parse i-th index
283
+ // q: parsed information is stored to *q
284
+ static void na_index_parse_each(volatile VALUE a, ssize_t size, int i, na_index_arg_t* q) {
285
+ switch (TYPE(a)) {
286
+
287
+ case T_FIXNUM:
288
+ na_index_set_scalar(q, i, size, FIX2LONG(a));
289
+ break;
290
+
291
+ case T_BIGNUM:
292
+ na_index_set_scalar(q, i, size, NUM2SSIZET(a));
293
+ break;
294
+
295
+ case T_FLOAT:
296
+ na_index_set_scalar(q, i, size, NUM2SSIZET(a));
297
+ break;
298
+
299
+ case T_NIL:
300
+ case T_TRUE:
301
+ na_index_set_step(q, i, size, 0, 1);
302
+ break;
303
+
304
+ case T_SYMBOL:
305
+ if (a == sym_all || a == sym_ast) {
306
+ na_index_set_step(q, i, size, 0, 1);
307
+ } else if (a == sym_reverse) {
308
+ na_index_set_step(q, i, size, size - 1, -1);
309
+ } else if (a == sym_new) {
310
+ na_index_set_step(q, i, 1, 0, 1);
311
+ } else if (a == sym_reduce || a == sym_sum || a == sym_plus) {
312
+ na_index_set_step(q, i, size, 0, 1);
313
+ q->reduce = 1;
314
+ } else {
315
+ rb_raise(rb_eIndexError, "invalid symbol for index");
316
+ }
317
+ break;
318
+
319
+ case T_ARRAY:
320
+ na_parse_array(a, i, size, q);
321
+ break;
322
+
323
+ default:
324
+ if (rb_obj_is_kind_of(a, rb_cRange)) {
325
+ na_parse_range(a, 1, i, size, q);
326
+ }
327
+ #ifdef HAVE_RB_ARITHMETIC_SEQUENCE_EXTRACT
328
+ else if (rb_obj_is_kind_of(a, rb_cArithSeq)) {
329
+ // na_parse_arith_seq(a, i, size, q);
330
+ na_parse_range(a, 1, i, size, q);
331
+ }
332
+ #endif
333
+ else if (rb_obj_is_kind_of(a, rb_cEnumerator)) {
334
+ na_parse_enumerator(a, i, size, q);
335
+ }
336
+ // NArray index
337
+ else if (NA_IsNArray(a)) {
338
+ na_parse_narray_index(a, i, size, q);
339
+ } else {
340
+ rb_raise(rb_eIndexError, "not allowed type");
341
+ }
342
+ }
343
+ }
344
+
345
+ static void na_at_parse_each(volatile VALUE a, ssize_t size, int i, VALUE* idx, ssize_t stride) {
346
+ na_index_arg_t q;
347
+ size_t n, k;
348
+ ssize_t* index;
349
+
350
+ // NArray index
351
+ if (NA_IsNArray(a)) {
352
+ VALUE a2;
353
+ narray_t *na, *na2;
354
+ ssize_t* p2;
355
+ GetNArray(a, na);
356
+ if (NA_NDIM(na) != 1) {
357
+ rb_raise(rb_eIndexError, "should be 1-d NArray");
358
+ }
359
+ n = NA_SIZE(na);
360
+ a2 = nary_new(cIndex, 1, &n);
361
+ na_store(a2, a);
362
+ GetNArray(a2, na2);
363
+ p2 = (ssize_t*)NA_DATA_PTR(na2);
364
+ if (*idx == Qnil) {
365
+ *idx = a2;
366
+ for (k = 0; k < n; k++) {
367
+ na_range_check(p2[k], size, i);
368
+ }
369
+ } else {
370
+ narray_t* nidx;
371
+ GetNArray(*idx, nidx);
372
+ index = (ssize_t*)NA_DATA_PTR(nidx);
373
+ if (NA_SIZE(nidx) != n) {
374
+ rb_raise(nary_eShapeError, "index array sizes mismatch");
375
+ }
376
+ for (k = 0; k < n; k++) {
377
+ index[k] += na_range_check(p2[k], size, i) * stride;
378
+ }
379
+ }
380
+ RB_GC_GUARD(a2);
381
+ return;
382
+ } else if (TYPE(a) == T_ARRAY) {
383
+ n = RARRAY_LEN(a);
384
+ if (*idx == Qnil) {
385
+ *idx = nary_new(cIndex, 1, &n);
386
+ index = (ssize_t*)na_get_pointer_for_write(*idx); // allocate memory
387
+ for (k = 0; k < n; k++) {
388
+ index[k] = na_range_check(NUM2SSIZET(RARRAY_AREF(a, k)), size, i);
389
+ }
390
+ } else {
391
+ narray_t* nidx;
392
+ GetNArray(*idx, nidx);
393
+ index = (ssize_t*)NA_DATA_PTR(nidx);
394
+ if (NA_SIZE(nidx) != n) {
395
+ rb_raise(nary_eShapeError, "index array sizes mismatch");
396
+ }
397
+ for (k = 0; k < n; k++) {
398
+ index[k] += na_range_check(NUM2SSIZET(RARRAY_AREF(a, k)), size, i) * stride;
399
+ }
400
+ }
401
+ return;
402
+ } else if (rb_obj_is_kind_of(a, rb_cRange)) {
403
+ na_parse_range(a, 1, i, size, &q);
404
+ }
405
+ #ifdef HAVE_RB_ARITHMETIC_SEQUENCE_EXTRACT
406
+ else if (rb_obj_is_kind_of(a, rb_cArithSeq)) {
407
+ na_parse_range(a, 1, i, size, &q);
408
+ }
409
+ #endif
410
+ else if (rb_obj_is_kind_of(a, rb_cEnumerator)) {
411
+ na_parse_enumerator(a, i, size, &q);
412
+ } else {
413
+ rb_raise(rb_eIndexError, "not allowed type");
414
+ }
415
+
416
+ if (*idx == Qnil) {
417
+ *idx = nary_new(cIndex, 1, &q.n);
418
+ index = (ssize_t*)na_get_pointer_for_write(*idx); // allocate memory
419
+ for (k = 0; k < q.n; k++) {
420
+ index[k] = q.beg + q.step * k;
421
+ }
422
+ } else {
423
+ narray_t* nidx;
424
+ GetNArray(*idx, nidx);
425
+ index = (ssize_t*)NA_DATA_PTR(nidx);
426
+ if (NA_SIZE(nidx) != q.n) {
427
+ rb_raise(nary_eShapeError, "index array sizes mismatch");
428
+ }
429
+ for (k = 0; k < q.n; k++) {
430
+ index[k] += (q.beg + q.step * k) * stride;
431
+ }
432
+ }
433
+ }
434
+
435
+ static size_t na_index_parse_args(VALUE args, narray_t* na, na_index_arg_t* q, int ndim) {
436
+ int i, j, k, l, nidx;
437
+ size_t total = 1;
438
+ VALUE v;
439
+
440
+ nidx = (int)RARRAY_LEN(args);
441
+
442
+ for (i = j = k = 0; i < nidx; i++) {
443
+ v = RARRAY_AREF(args, i);
444
+ // rest (ellipsis) dimension
445
+ if (v == Qfalse) {
446
+ for (l = ndim - (nidx - 1); l > 0; l--) {
447
+ // printf("i=%d j=%d k=%d l=%d ndim=%d nidx=%d\n",i,j,k,l,ndim,nidx);
448
+ na_index_parse_each(Qtrue, na->shape[k], k, &q[j]);
449
+ if (q[j].n > 1) {
450
+ total *= q[j].n;
451
+ }
452
+ j++;
453
+ k++;
454
+ }
455
+ }
456
+ // new dimension
457
+ else if (v == sym_new) {
458
+ na_index_parse_each(v, 1, k, &q[j]);
459
+ j++;
460
+ }
461
+ // other dimension
462
+ else {
463
+ na_index_parse_each(v, na->shape[k], k, &q[j]);
464
+ if (q[j].n > 1) {
465
+ total *= q[j].n;
466
+ }
467
+ j++;
468
+ k++;
469
+ }
470
+ }
471
+ return total;
472
+ }
473
+
474
+ static void na_get_strides_nadata(const narray_data_t* na, ssize_t* strides, ssize_t elmsz) {
475
+ int i = na->base.ndim - 1;
476
+ strides[i] = elmsz;
477
+ for (; i > 0; i--) {
478
+ strides[i - 1] = strides[i] * na->base.shape[i];
479
+ }
480
+ }
481
+
482
+ static void na_index_aref_nadata(narray_data_t* na1, narray_view_t* na2, na_index_arg_t* q, ssize_t elmsz, int ndim,
483
+ int keep_dim) {
484
+ int i, j;
485
+ ssize_t size, k, total = 1;
486
+ ssize_t stride1;
487
+ ssize_t* strides_na1;
488
+ size_t* index;
489
+ ssize_t beg, step;
490
+ VALUE m;
491
+
492
+ strides_na1 = ALLOCA_N(ssize_t, na1->base.ndim);
493
+ na_get_strides_nadata(na1, strides_na1, elmsz);
494
+
495
+ for (i = j = 0; i < ndim; i++) {
496
+ stride1 = strides_na1[q[i].orig_dim];
497
+
498
+ // numeric index -- trim dimension
499
+ if (!keep_dim && q[i].n == 1 && q[i].step == 0) {
500
+ beg = q[i].beg;
501
+ na2->offset += stride1 * beg;
502
+ continue;
503
+ }
504
+
505
+ na2->base.shape[j] = size = q[i].n;
506
+
507
+ if (q[i].reduce != 0) {
508
+ m = rb_funcall(INT2FIX(1), id_shift_left, 1, INT2FIX(j));
509
+ na2->base.reduce = rb_funcall(m, '|', 1, na2->base.reduce);
510
+ }
511
+
512
+ // array index
513
+ if (q[i].idx != NULL) {
514
+ index = q[i].idx;
515
+ SDX_SET_INDEX(na2->stridx[j], index);
516
+ q[i].idx = NULL;
517
+ for (k = 0; k < size; k++) {
518
+ index[k] = index[k] * stride1;
519
+ }
520
+ } else {
521
+ beg = q[i].beg;
522
+ step = q[i].step;
523
+ na2->offset += stride1 * beg;
524
+ SDX_SET_STRIDE(na2->stridx[j], stride1 * step);
525
+ }
526
+ j++;
527
+ total *= size;
528
+ }
529
+ na2->base.size = total;
530
+ }
531
+
532
+ static void na_index_aref_naview(narray_view_t* na1, narray_view_t* na2, na_index_arg_t* q, ssize_t elmsz, int ndim,
533
+ int keep_dim) {
534
+ int i, j;
535
+ ssize_t total = 1;
536
+
537
+ for (i = j = 0; i < ndim; i++) {
538
+ stridx_t sdx1 = na1->stridx[q[i].orig_dim];
539
+ ssize_t size;
540
+
541
+ // numeric index -- trim dimension
542
+ if (!keep_dim && q[i].n == 1 && q[i].step == 0) {
543
+ if (SDX_IS_INDEX(sdx1)) {
544
+ na2->offset += SDX_GET_INDEX(sdx1)[q[i].beg];
545
+ } else {
546
+ na2->offset += SDX_GET_STRIDE(sdx1) * q[i].beg;
547
+ }
548
+ continue;
549
+ }
550
+
551
+ na2->base.shape[j] = size = q[i].n;
552
+
553
+ if (q[i].reduce != 0) {
554
+ VALUE m = rb_funcall(INT2FIX(1), id_shift_left, 1, INT2FIX(j));
555
+ na2->base.reduce = rb_funcall(m, '|', 1, na2->base.reduce);
556
+ }
557
+
558
+ if (q[i].orig_dim >= na1->base.ndim) {
559
+ // new dimension
560
+ SDX_SET_STRIDE(na2->stridx[j], elmsz);
561
+ } else if (q[i].idx != NULL && SDX_IS_INDEX(sdx1)) {
562
+ // index <- index
563
+ int k;
564
+ size_t* index = q[i].idx;
565
+ SDX_SET_INDEX(na2->stridx[j], index);
566
+ q[i].idx = NULL;
567
+
568
+ for (k = 0; k < size; k++) {
569
+ index[k] = SDX_GET_INDEX(sdx1)[index[k]];
570
+ }
571
+ } else if (q[i].idx != NULL && SDX_IS_STRIDE(sdx1)) {
572
+ // index <- step
573
+ ssize_t stride1 = SDX_GET_STRIDE(sdx1);
574
+ size_t* index = q[i].idx;
575
+ SDX_SET_INDEX(na2->stridx[j], index);
576
+ q[i].idx = NULL;
577
+
578
+ if (stride1 < 0) {
579
+ size_t last;
580
+ int k;
581
+ stride1 = -stride1;
582
+ last = na1->base.shape[q[i].orig_dim] - 1;
583
+ if (na2->offset < last * stride1) {
584
+ rb_raise(rb_eStandardError, "bug: negative offset");
585
+ }
586
+ na2->offset -= last * stride1;
587
+ for (k = 0; k < size; k++) {
588
+ index[k] = (last - index[k]) * stride1;
589
+ }
590
+ } else {
591
+ int k;
592
+ for (k = 0; k < size; k++) {
593
+ index[k] = index[k] * stride1;
594
+ }
595
+ }
596
+ } else if (q[i].idx == NULL && SDX_IS_INDEX(sdx1)) {
597
+ // step <- index
598
+ int k;
599
+ size_t beg = q[i].beg;
600
+ ssize_t step = q[i].step;
601
+ size_t* index = ALLOC_N(size_t, size);
602
+ SDX_SET_INDEX(na2->stridx[j], index);
603
+ for (k = 0; k < size; k++) {
604
+ index[k] = SDX_GET_INDEX(sdx1)[beg + step * k];
605
+ }
606
+ } else if (q[i].idx == NULL && SDX_IS_STRIDE(sdx1)) {
607
+ // step <- step
608
+ size_t beg = q[i].beg;
609
+ ssize_t step = q[i].step;
610
+ ssize_t stride1 = SDX_GET_STRIDE(sdx1);
611
+ na2->offset += stride1 * beg;
612
+ SDX_SET_STRIDE(na2->stridx[j], stride1 * step);
613
+ }
614
+
615
+ j++;
616
+ total *= size;
617
+ }
618
+ na2->base.size = total;
619
+ }
620
+
621
+ static int na_ndim_new_narray(int ndim, const na_index_arg_t* q) {
622
+ int i, ndim_new = 0;
623
+ for (i = 0; i < ndim; i++) {
624
+ if (q[i].n > 1 || q[i].step != 0) {
625
+ ndim_new++;
626
+ }
627
+ }
628
+ return ndim_new;
629
+ }
630
+
631
+ typedef struct {
632
+ VALUE args, self, store;
633
+ int ndim;
634
+ na_index_arg_t* q;
635
+ narray_t* na1;
636
+ int keep_dim;
637
+ } na_aref_md_data_t;
638
+
639
+ static na_index_arg_t* na_allocate_index_args(int ndim) {
640
+ na_index_arg_t* q = ALLOC_N(na_index_arg_t, ndim);
641
+ int i;
642
+
643
+ for (i = 0; i < ndim; i++) {
644
+ q[i].idx = NULL;
645
+ }
646
+ return q;
647
+ }
648
+
649
+ static VALUE na_aref_md_protected(VALUE data_value) {
650
+ na_aref_md_data_t* data = (na_aref_md_data_t*)(data_value);
651
+ VALUE self = data->self;
652
+ VALUE args = data->args;
653
+ VALUE store = data->store;
654
+ int ndim = data->ndim;
655
+ na_index_arg_t* q = data->q;
656
+ narray_t* na1 = data->na1;
657
+ int keep_dim = data->keep_dim;
658
+
659
+ int ndim_new;
660
+ VALUE view;
661
+ narray_view_t* na2;
662
+ ssize_t elmsz;
663
+
664
+ na_index_parse_args(args, na1, q, ndim);
665
+
666
+ if (na_debug_flag) print_index_arg(q, ndim);
667
+
668
+ if (keep_dim) {
669
+ ndim_new = ndim;
670
+ } else {
671
+ ndim_new = na_ndim_new_narray(ndim, q);
672
+ }
673
+ view = na_s_allocate_view(rb_obj_class(self));
674
+
675
+ na_copy_flags(self, view);
676
+ GetNArrayView(view, na2);
677
+
678
+ na_alloc_shape((narray_t*)na2, ndim_new);
679
+
680
+ na2->stridx = ZALLOC_N(stridx_t, ndim_new);
681
+
682
+ elmsz = nary_element_stride(self);
683
+
684
+ switch (na1->type) {
685
+ case NARRAY_DATA_T:
686
+ case NARRAY_FILEMAP_T:
687
+ na_index_aref_nadata((narray_data_t*)na1, na2, q, elmsz, ndim, keep_dim);
688
+ na2->data = self;
689
+ break;
690
+ case NARRAY_VIEW_T:
691
+ na2->offset = ((narray_view_t*)na1)->offset;
692
+ na2->data = ((narray_view_t*)na1)->data;
693
+ na_index_aref_naview((narray_view_t*)na1, na2, q, elmsz, ndim, keep_dim);
694
+ break;
695
+ }
696
+ if (store) {
697
+ na_get_pointer_for_write(store); // allocate memory
698
+ na_store(na_flatten_dim(store, 0), view);
699
+ return store;
700
+ }
701
+ return view;
702
+ }
703
+
704
+ static VALUE na_aref_md_ensure(VALUE data_value) {
705
+ na_aref_md_data_t* data = (na_aref_md_data_t*)(data_value);
706
+ int i;
707
+ for (i = 0; i < data->ndim; i++) {
708
+ xfree(data->q[i].idx);
709
+ }
710
+ xfree(data->q);
711
+ return Qnil;
712
+ }
713
+
714
+ static VALUE na_aref_md(int argc, VALUE* argv, VALUE self, int keep_dim, int result_nd) {
715
+ VALUE args; // should be GC protected
716
+ narray_t* na1;
717
+ na_aref_md_data_t data;
718
+ VALUE store = 0;
719
+ VALUE idx;
720
+ narray_t* nidx;
721
+
722
+ GetNArray(self, na1);
723
+
724
+ args = rb_ary_new4(argc, argv);
725
+
726
+ if (argc == 1 && result_nd == 1) {
727
+ idx = argv[0];
728
+ if (rb_obj_is_kind_of(idx, rb_cArray)) {
729
+ idx = rb_apply(numo_cNArray, id_bracket, idx);
730
+ }
731
+ if (rb_obj_is_kind_of(idx, numo_cNArray)) {
732
+ GetNArray(idx, nidx);
733
+ if (NA_NDIM(nidx) > 1) {
734
+ store = nary_new(rb_obj_class(self), NA_NDIM(nidx), NA_SHAPE(nidx));
735
+ idx = na_flatten(idx);
736
+ RARRAY_ASET(args, 0, idx);
737
+ }
738
+ }
739
+ // flatten should be done only for narray-view with non-uniform stride.
740
+ if (na1->ndim > 1) {
741
+ self = na_flatten(self);
742
+ GetNArray(self, na1);
743
+ }
744
+ }
745
+
746
+ data.args = args;
747
+ data.self = self;
748
+ data.store = store;
749
+ data.ndim = result_nd;
750
+ data.q = na_allocate_index_args(result_nd);
751
+ data.na1 = na1;
752
+ data.keep_dim = keep_dim;
753
+
754
+ return rb_ensure(na_aref_md_protected, (VALUE)&data, na_aref_md_ensure, (VALUE)&data);
755
+ }
756
+
757
+ /* method: [](idx1,idx2,...,idxN) */
758
+ VALUE
759
+ na_aref_main(int nidx, VALUE* idx, VALUE self, int keep_dim, int nd) {
760
+ na_index_arg_to_internal_order(nidx, idx, self);
761
+
762
+ if (nidx == 0) {
763
+ return rb_funcall(self, id_dup, 0);
764
+ }
765
+ if (nidx == 1) {
766
+ if (rb_obj_class(*idx) == numo_cBit) {
767
+ return rb_funcall(*idx, id_mask, 1, self);
768
+ }
769
+ }
770
+ return na_aref_md(nidx, idx, self, keep_dim, nd);
771
+ }
772
+
773
+ static int check_index_count(int argc, int na_ndim, int count_new, int count_rest) {
774
+ int result_nd = na_ndim + count_new;
775
+
776
+ switch (count_rest) {
777
+ case 0:
778
+ if (argc == 1 && count_new == 0) return 1;
779
+ if (argc == result_nd) return result_nd;
780
+ rb_raise(rb_eIndexError,
781
+ "# of index(=%i) should be "
782
+ "equal to ndim(=%i) or 1",
783
+ argc, na_ndim);
784
+ break;
785
+ case 1:
786
+ if (argc - 1 <= result_nd) return result_nd;
787
+ rb_raise(rb_eIndexError, "# of index(=%i) > ndim(=%i) with :rest", argc, na_ndim);
788
+ break;
789
+ default:
790
+ rb_raise(rb_eIndexError, "multiple rest-dimension is not allowed");
791
+ }
792
+ return -1;
793
+ }
794
+
795
+ int na_get_result_dimension(VALUE self, int argc, VALUE* argv, ssize_t stride, size_t* pos_idx) {
796
+ int i, j;
797
+ int count_new = 0;
798
+ int count_rest = 0;
799
+ ssize_t x, s, m, pos, *idx;
800
+ narray_t* na;
801
+ narray_view_t* nv;
802
+ stridx_t sdx;
803
+ VALUE a;
804
+
805
+ GetNArray(self, na);
806
+ if (na->size == 0) {
807
+ rb_raise(nary_eShapeError, "cannot get element of empty array");
808
+ }
809
+ idx = ALLOCA_N(ssize_t, argc);
810
+ for (i = j = 0; i < argc; i++) {
811
+ a = argv[i];
812
+ switch (TYPE(a)) {
813
+ case T_FIXNUM:
814
+ idx[j++] = FIX2LONG(a);
815
+ break;
816
+ case T_BIGNUM:
817
+ case T_FLOAT:
818
+ idx[j++] = NUM2SSIZET(a);
819
+ break;
820
+ case T_FALSE:
821
+ case T_SYMBOL:
822
+ if (a == sym_rest || a == sym_tilde || a == Qfalse) {
823
+ argv[i] = Qfalse;
824
+ count_rest++;
825
+ break;
826
+ } else if (a == sym_new || a == sym_minus) {
827
+ argv[i] = sym_new;
828
+ count_new++;
829
+ }
830
+ }
831
+ }
832
+
833
+ if (j != argc) {
834
+ return check_index_count(argc, na->ndim, count_new, count_rest);
835
+ }
836
+
837
+ switch (na->type) {
838
+ case NARRAY_VIEW_T:
839
+ GetNArrayView(self, nv);
840
+ pos = nv->offset;
841
+ if (j == na->ndim) {
842
+ for (i = j - 1; i >= 0; i--) {
843
+ x = na_range_check(idx[i], na->shape[i], i);
844
+ sdx = nv->stridx[i];
845
+ if (SDX_IS_INDEX(sdx)) {
846
+ pos += SDX_GET_INDEX(sdx)[x];
847
+ } else {
848
+ pos += SDX_GET_STRIDE(sdx) * x;
849
+ }
850
+ }
851
+ *pos_idx = pos;
852
+ return 0;
853
+ }
854
+ if (j == 1) {
855
+ x = na_range_check(idx[0], na->size, 0);
856
+ for (i = na->ndim - 1; i >= 0; i--) {
857
+ s = na->shape[i];
858
+ m = x % s;
859
+ x = x / s;
860
+ sdx = nv->stridx[i];
861
+ if (SDX_IS_INDEX(sdx)) {
862
+ pos += SDX_GET_INDEX(sdx)[m];
863
+ } else {
864
+ pos += SDX_GET_STRIDE(sdx) * m;
865
+ }
866
+ }
867
+ *pos_idx = pos;
868
+ return 0;
869
+ }
870
+ break;
871
+ default:
872
+ if (!stride) {
873
+ stride = nary_element_stride(self);
874
+ }
875
+ if (j == 1) {
876
+ x = na_range_check(idx[0], na->size, 0);
877
+ *pos_idx = stride * x;
878
+ return 0;
879
+ }
880
+ if (j == na->ndim) {
881
+ pos = 0;
882
+ for (i = j - 1; i >= 0; i--) {
883
+ x = na_range_check(idx[i], na->shape[i], i);
884
+ pos += stride * x;
885
+ stride *= na->shape[i];
886
+ }
887
+ *pos_idx = pos;
888
+ return 0;
889
+ }
890
+ }
891
+ rb_raise(rb_eIndexError,
892
+ "# of index(=%i) should be "
893
+ "equal to ndim(=%i) or 1",
894
+ argc, na->ndim);
895
+ return -1;
896
+ }
897
+
898
+ static int na_get_result_dimension_for_slice(VALUE self, int argc, VALUE* argv) {
899
+ int i;
900
+ int count_new = 0;
901
+ int count_rest = 0;
902
+ narray_t* na;
903
+ VALUE a;
904
+
905
+ GetNArray(self, na);
906
+ if (na->size == 0) {
907
+ rb_raise(nary_eShapeError, "cannot get element of empty array");
908
+ }
909
+ for (i = 0; i < argc; i++) {
910
+ a = argv[i];
911
+ switch (TYPE(a)) {
912
+ case T_FALSE:
913
+ case T_SYMBOL:
914
+ if (a == sym_rest || a == sym_tilde || a == Qfalse) {
915
+ argv[i] = Qfalse;
916
+ count_rest++;
917
+ } else if (a == sym_new || a == sym_minus) {
918
+ argv[i] = sym_new;
919
+ count_new++;
920
+ }
921
+ }
922
+ }
923
+
924
+ return check_index_count(argc, na->ndim, count_new, count_rest);
925
+ }
926
+
927
+ /* method: slice(idx1,idx2,...,idxN) */
928
+ static VALUE na_slice(int argc, VALUE* argv, VALUE self) {
929
+ int nd;
930
+
931
+ nd = na_get_result_dimension_for_slice(self, argc, argv);
932
+ return na_aref_main(argc, argv, self, 1, nd);
933
+ }
934
+
935
+ /*
936
+ Multi-dimensional element reference.
937
+ Returns an element at `dim0`, `dim1`, ... are Numeric indices for each dimension, or returns a NArray View as a sliced array
938
+ if `dim0`, `dim1`, ... includes other than Numeric index, e.g., Range or Array or true.
939
+ @overload [](dim0,...,dimL)
940
+ @param [Numeric,Range,Array,Numo::Int32,Numo::Int64,Numo::Bit,TrueClass,FalseClass,Symbol] dim0,...,dimL multi-dimensional
941
+ indices.
942
+ @return [Numeric,Numo::NArray] an element or NArray view.
943
+ @see #[]=
944
+ @see #at
945
+
946
+ @example
947
+ a = Numo::DFloat.new(4,5).seq
948
+ # => Numo::DFloat#shape=[4,5]
949
+ # [[0, 1, 2, 3, 4],
950
+ # [5, 6, 7, 8, 9],
951
+ # [10, 11, 12, 13, 14],
952
+ # [15, 16, 17, 18, 19]]
953
+
954
+ a[1,1]
955
+ # => 6.0
956
+
957
+ a[1..3,1]
958
+ # => Numo::DFloat#shape=[3]
959
+ # [6, 11, 16]
960
+
961
+ a[1,[1,3,4]]
962
+ # => Numo::DFloat#shape=[3]
963
+ # [6, 8, 9]
964
+
965
+ a[true,2].fill(99)
966
+ a
967
+ # => Numo::DFloat#shape=[4,5]
968
+ # [[0, 1, 99, 3, 4],
969
+ # [5, 6, 99, 8, 9],
970
+ # [10, 11, 99, 13, 14],
971
+ # [15, 16, 99, 18, 19]]
972
+ */
973
+ // implemented in subclasses
974
+ #define na_aref rb_f_notimplement
975
+
976
+ /*
977
+ Multi-dimensional element assignment.
978
+ Replace element(s) at `dim0`, `dim1`, ... .
979
+ Broadcasting mechanism is applied.
980
+ @overload []=(dim0,...,dimL,val)
981
+ @param [Numeric,Range,Array,Numo::Int32,Numo::Int64,Numo::Bit,TrueClass,FalseClass,Symbol] dim0,...,dimL multi-dimensional
982
+ indices.
983
+ @param [Numeric,Numo::NArray,Array] val Value(s) to be set to self.
984
+ @return [Numeric,Numo::NArray,Array] returns `val` (last argument).
985
+ @see #[]
986
+ @example
987
+ a = Numo::DFloat.new(3,4).seq
988
+ # => Numo::DFloat#shape=[3,4]
989
+ # [[0, 1, 2, 3],
990
+ # [4, 5, 6, 7],
991
+ # [8, 9, 10, 11]]
992
+
993
+ a[1,2]=99
994
+ a
995
+ # => Numo::DFloat#shape=[3,4]
996
+ # [[0, 1, 2, 3],
997
+ # [4, 5, 99, 7],
998
+ # [8, 9, 10, 11]]
999
+
1000
+ a[1,[0,2]] = [101,102]
1001
+ a
1002
+ # => Numo::DFloat#shape=[3,4]
1003
+ # [[0, 1, 2, 3],
1004
+ # [101, 5, 102, 7],
1005
+ # [8, 9, 10, 11]]
1006
+
1007
+ a[1,true]=99
1008
+ a
1009
+ # => Numo::DFloat#shape=[3,4]
1010
+ # [[0, 1, 2, 3],
1011
+ # [99, 99, 99, 99],
1012
+ # [8, 9, 10, 11]]
1013
+
1014
+ */
1015
+ // implemented in subclasses
1016
+ #define na_aset rb_f_notimplement
1017
+
1018
+ /*
1019
+ Multi-dimensional array indexing.
1020
+ Similar to numpy's tuple indexing, i.e., `a[[1,2,..],[3,4,..]]`
1021
+ Same as Numo::NArray#[] for one-dimensional NArray.
1022
+ @overload at(dim0,...,dimL)
1023
+ @param [Range,Array,Numo::Int32,Numo::Int64] dim0,...,dimL multi-dimensional index arrays.
1024
+ @return [Numo::NArray] one-dimensional NArray view.
1025
+ @see #[]
1026
+
1027
+ @example
1028
+ x = Numo::DFloat.new(3,3,3).seq
1029
+ # => Numo::DFloat#shape=[3,3,3]
1030
+ # [[[0, 1, 2],
1031
+ # [3, 4, 5],
1032
+ # [6, 7, 8]],
1033
+ # [[9, 10, 11],
1034
+ # [12, 13, 14],
1035
+ # [15, 16, 17]],
1036
+ # [[18, 19, 20],
1037
+ # [21, 22, 23],
1038
+ # [24, 25, 26]]]
1039
+
1040
+ x.at([0,1,2],[0,1,2],[-1,-2,-3])
1041
+ # => Numo::DFloat(view)#shape=[3]
1042
+ # [2, 13, 24]
1043
+ */
1044
+ static VALUE na_at(int argc, VALUE* argv, VALUE self) {
1045
+ int i;
1046
+ size_t n;
1047
+ ssize_t stride = 1;
1048
+ narray_t* na;
1049
+ VALUE idx = Qnil;
1050
+
1051
+ na_index_arg_to_internal_order(argc, argv, self);
1052
+
1053
+ GetNArray(self, na);
1054
+ if (NA_NDIM(na) != argc) {
1055
+ rb_raise(rb_eArgError, "the number of argument must be same as dimension");
1056
+ }
1057
+ for (i = argc; i > 0;) {
1058
+ i--;
1059
+ n = NA_SHAPE(na)[i];
1060
+ na_at_parse_each(argv[i], n, i, &idx, stride);
1061
+ stride *= n;
1062
+ }
1063
+ return na_aref_main(1, &idx, self, 1, 1);
1064
+ }
1065
+
1066
+ void Init_nary_index(void) {
1067
+ rb_define_method(cNArray, "slice", na_slice, -1);
1068
+ rb_define_method(cNArray, "[]", na_aref, -1);
1069
+ rb_define_method(cNArray, "[]=", na_aset, -1);
1070
+ rb_define_method(cNArray, "at", na_at, -1);
1071
+
1072
+ sym_ast = ID2SYM(rb_intern("*"));
1073
+ sym_all = ID2SYM(rb_intern("all"));
1074
+ sym_minus = ID2SYM(rb_intern("-"));
1075
+ sym_new = ID2SYM(rb_intern("new"));
1076
+ sym_reverse = ID2SYM(rb_intern("reverse"));
1077
+ sym_plus = ID2SYM(rb_intern("+"));
1078
+ // sym_reduce = ID2SYM(rb_intern("reduce"));
1079
+ sym_sum = ID2SYM(rb_intern("sum"));
1080
+ sym_tilde = ID2SYM(rb_intern("~"));
1081
+ sym_rest = ID2SYM(rb_intern("rest"));
1082
+ id_beg = rb_intern("begin");
1083
+ id_end = rb_intern("end");
1084
+ id_exclude_end = rb_intern("exclude_end?");
1085
+ id_each = rb_intern("each");
1086
+ id_step = rb_intern("step");
1087
+ id_dup = rb_intern("dup");
1088
+ id_bracket = rb_intern("[]");
1089
+ id_shift_left = rb_intern("<<");
1090
+ id_mask = rb_intern("mask");
1091
+ id_where = rb_intern("where");
1092
+ }