numo-narray-alt 0.9.13 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile +1 -1
  3. data/ext/numo/narray/array.c +1 -9
  4. data/ext/numo/narray/extconf.rb +0 -11
  5. data/ext/numo/narray/index.c +5 -39
  6. data/ext/numo/narray/math.c +0 -5
  7. data/ext/numo/narray/narray.c +13 -19
  8. data/ext/numo/narray/numo/narray.h +6 -8
  9. data/ext/numo/narray/src/mh/abs.h +56 -0
  10. data/ext/numo/narray/src/mh/aref.h +28 -0
  11. data/ext/numo/narray/src/mh/arg.h +56 -0
  12. data/ext/numo/narray/src/mh/aset.h +169 -0
  13. data/ext/numo/narray/src/mh/conj.h +71 -0
  14. data/ext/numo/narray/src/mh/copysign.h +97 -0
  15. data/ext/numo/narray/src/mh/each.h +71 -0
  16. data/ext/numo/narray/src/mh/each_with_index.h +98 -0
  17. data/ext/numo/narray/src/mh/extract.h +36 -0
  18. data/ext/numo/narray/src/mh/im.h +71 -0
  19. data/ext/numo/narray/src/mh/imag.h +56 -0
  20. data/ext/numo/narray/src/mh/kahan_sum.h +39 -0
  21. data/ext/numo/narray/src/mh/map.h +126 -0
  22. data/ext/numo/narray/src/mh/map_with_index.h +76 -0
  23. data/ext/numo/narray/src/mh/median.h +85 -0
  24. data/ext/numo/narray/src/mh/modf.h +35 -0
  25. data/ext/numo/narray/src/mh/poly.h +42 -0
  26. data/ext/numo/narray/src/mh/real.h +56 -0
  27. data/ext/numo/narray/src/mh/s_cast.h +80 -0
  28. data/ext/numo/narray/src/mh/set_imag.h +60 -0
  29. data/ext/numo/narray/src/mh/set_real.h +60 -0
  30. data/ext/numo/narray/src/mh/signbit.h +42 -0
  31. data/ext/numo/narray/src/mh/sort.h +484 -0
  32. data/ext/numo/narray/src/mh/store.h +496 -0
  33. data/ext/numo/narray/src/t_bit.c +65 -195
  34. data/ext/numo/narray/src/t_dcomplex.c +244 -2216
  35. data/ext/numo/narray/src/t_dfloat.c +74 -2435
  36. data/ext/numo/narray/src/t_int16.c +225 -2015
  37. data/ext/numo/narray/src/t_int32.c +225 -2015
  38. data/ext/numo/narray/src/t_int64.c +225 -2015
  39. data/ext/numo/narray/src/t_int8.c +225 -1998
  40. data/ext/numo/narray/src/t_robject.c +114 -1413
  41. data/ext/numo/narray/src/t_scomplex.c +232 -2168
  42. data/ext/numo/narray/src/t_sfloat.c +72 -2399
  43. data/ext/numo/narray/src/t_uint16.c +225 -2015
  44. data/ext/numo/narray/src/t_uint32.c +225 -2015
  45. data/ext/numo/narray/src/t_uint64.c +225 -2015
  46. data/ext/numo/narray/src/t_uint8.c +225 -1998
  47. data/ext/numo/narray/step.c +2 -59
  48. data/numo-narray-alt.gemspec +1 -1
  49. metadata +27 -3
@@ -43,12 +43,22 @@ static ID id_to_a;
43
43
  VALUE cT;
44
44
  extern VALUE cRT;
45
45
 
46
+ #include "mh/store.h"
47
+ #include "mh/s_cast.h"
48
+ #include "mh/extract.h"
49
+ #include "mh/aref.h"
50
+ #include "mh/aset.h"
46
51
  #include "mh/coerce_cast.h"
47
52
  #include "mh/to_a.h"
48
53
  #include "mh/fill.h"
49
54
  #include "mh/format.h"
50
55
  #include "mh/format_to_a.h"
51
56
  #include "mh/inspect.h"
57
+ #include "mh/each.h"
58
+ #include "mh/map.h"
59
+ #include "mh/each_with_index.h"
60
+ #include "mh/map_with_index.h"
61
+ #include "mh/abs.h"
52
62
  #include "mh/op/add.h"
53
63
  #include "mh/op/sub.h"
54
64
  #include "mh/op/mul.h"
@@ -92,6 +102,9 @@ extern VALUE cRT;
92
102
  #include "mh/seq.h"
93
103
  #include "mh/eye.h"
94
104
  #include "mh/rand.h"
105
+ #include "mh/poly.h"
106
+ #include "mh/sort.h"
107
+ #include "mh/median.h"
95
108
  #include "mh/mean.h"
96
109
  #include "mh/var.h"
97
110
  #include "mh/stddev.h"
@@ -99,12 +112,23 @@ extern VALUE cRT;
99
112
 
100
113
  typedef u_int32_t uint32; // Type aliases for shorter notation
101
114
  // following the codebase naming convention.
115
+ DEF_NARRAY_STORE_METHOD_FUNC(uint32, numo_cUInt32)
116
+ DEF_NARRAY_S_CAST_METHOD_FUNC(uint32, numo_cUInt32)
117
+ DEF_NARRAY_EXTRACT_METHOD_FUNC(uint32)
118
+ DEF_NARRAY_AREF_METHOD_FUNC(uint32)
119
+ DEF_EXTRACT_DATA_FUNC(uint32, numo_cUInt32)
120
+ DEF_NARRAY_ASET_METHOD_FUNC(uint32)
102
121
  DEF_NARRAY_COERCE_CAST_METHOD_FUNC(uint32)
103
122
  DEF_NARRAY_TO_A_METHOD_FUNC(uint32)
104
123
  DEF_NARRAY_FILL_METHOD_FUNC(uint32)
105
124
  DEF_NARRAY_FORMAT_METHOD_FUNC(uint32)
106
125
  DEF_NARRAY_FORMAT_TO_A_METHOD_FUNC(uint32)
107
126
  DEF_NARRAY_INSPECT_METHOD_FUNC(uint32)
127
+ DEF_NARRAY_EACH_METHOD_FUNC(uint32)
128
+ DEF_NARRAY_MAP_METHOD_FUNC(uint32, numo_cUInt32)
129
+ DEF_NARRAY_EACH_WITH_INDEX_METHOD_FUNC(uint32)
130
+ DEF_NARRAY_MAP_WITH_INDEX_METHOD_FUNC(uint32, numo_cUInt32)
131
+ DEF_NARRAY_ABS_METHOD_FUNC(uint32, numo_cUInt32, uint32, numo_cUInt32)
108
132
  DEF_NARRAY_ADD_METHOD_FUNC(uint32, numo_cUInt32)
109
133
  DEF_NARRAY_SUB_METHOD_FUNC(uint32, numo_cUInt32)
110
134
  DEF_NARRAY_MUL_METHOD_FUNC(uint32, numo_cUInt32)
@@ -148,2042 +172,122 @@ DEF_NARRAY_INT_MULSUM_METHOD_FUNC(uint32, numo_cUInt32)
148
172
  DEF_NARRAY_INT_SEQ_METHOD_FUNC(uint32)
149
173
  DEF_NARRAY_EYE_METHOD_FUNC(uint32)
150
174
  DEF_NARRAY_INT_RAND_METHOD_FUNC(uint32)
151
- DEF_NARRAY_INT_MEAN_METHOD_FUNC(uint32, numo_cUInt32)
152
- DEF_NARRAY_INT_VAR_METHOD_FUNC(uint32, numo_cUInt32)
153
- DEF_NARRAY_INT_STDDEV_METHOD_FUNC(uint32, numo_cUInt32)
154
- DEF_NARRAY_INT_RMS_METHOD_FUNC(uint32, numo_cUInt32)
155
-
156
- static VALUE uint32_store(VALUE, VALUE);
157
-
158
- static size_t uint32_memsize(const void* ptr) {
159
- size_t size = sizeof(narray_data_t);
160
- const narray_data_t* na = (const narray_data_t*)ptr;
161
-
162
- assert(na->base.type == NARRAY_DATA_T);
163
-
164
- if (na->ptr != NULL) {
165
-
166
- size += na->base.size * sizeof(dtype);
167
- }
168
- if (na->base.size > 0) {
169
- if (na->base.shape != NULL && na->base.shape != &(na->base.size)) {
170
- size += sizeof(size_t) * na->base.ndim;
171
- }
172
- }
173
- return size;
174
- }
175
-
176
- static void uint32_free(void* ptr) {
177
- narray_data_t* na = (narray_data_t*)ptr;
178
-
179
- assert(na->base.type == NARRAY_DATA_T);
180
-
181
- if (na->ptr != NULL) {
182
- if (na->owned) {
183
- xfree(na->ptr);
184
- }
185
- na->ptr = NULL;
186
- }
187
- if (na->base.size > 0) {
188
- if (na->base.shape != NULL && na->base.shape != &(na->base.size)) {
189
- xfree(na->base.shape);
190
- na->base.shape = NULL;
191
- }
192
- }
193
- xfree(na);
194
- }
195
-
196
- static narray_type_info_t uint32_info = {
197
-
198
- 0, // element_bits
199
- sizeof(dtype), // element_bytes
200
- sizeof(dtype), // element_stride (in bytes)
201
-
202
- };
203
-
204
- static const rb_data_type_t uint32_data_type = {
205
- "Numo::UInt32",
206
- {
207
- 0,
208
- uint32_free,
209
- uint32_memsize,
210
- },
211
- &na_data_type,
212
- &uint32_info,
213
- RUBY_TYPED_FROZEN_SHAREABLE, // flags
214
- };
215
-
216
- static VALUE uint32_s_alloc_func(VALUE klass) {
217
- narray_data_t* na = ALLOC(narray_data_t);
218
-
219
- na->base.ndim = 0;
220
- na->base.type = NARRAY_DATA_T;
221
- na->base.flag[0] = NA_FL0_INIT;
222
- na->base.flag[1] = NA_FL1_INIT;
223
- na->base.size = 0;
224
- na->base.shape = NULL;
225
- na->base.reduce = INT2FIX(0);
226
- na->ptr = NULL;
227
- na->owned = FALSE;
228
- return TypedData_Wrap_Struct(klass, &uint32_data_type, (void*)na);
229
- }
230
-
231
- static VALUE uint32_allocate(VALUE self) {
232
- narray_t* na;
233
- char* ptr;
234
-
235
- GetNArray(self, na);
236
-
237
- switch (NA_TYPE(na)) {
238
- case NARRAY_DATA_T:
239
- ptr = NA_DATA_PTR(na);
240
- if (na->size > 0 && ptr == NULL) {
241
- ptr = xmalloc(sizeof(dtype) * na->size);
242
-
243
- NA_DATA_PTR(na) = ptr;
244
- NA_DATA_OWNED(na) = TRUE;
245
- }
246
- break;
247
- case NARRAY_VIEW_T:
248
- rb_funcall(NA_VIEW_DATA(na), rb_intern("allocate"), 0);
249
- break;
250
- case NARRAY_FILEMAP_T:
251
- // ptr = ((narray_filemap_t*)na)->ptr;
252
- // to be implemented
253
- default:
254
- rb_bug("invalid narray type : %d", NA_TYPE(na));
255
- }
256
- return self;
257
- }
258
-
259
- /*
260
- Extract an element only if self is a dimensionless NArray.
261
- @overload extract
262
- @return [Numeric,Numo::NArray]
263
- --- Extract element value as Ruby Object if self is a dimensionless NArray,
264
- otherwise returns self.
265
- */
266
- static VALUE uint32_extract(VALUE self) {
267
- volatile VALUE v;
268
- char* ptr;
269
- narray_t* na;
270
- GetNArray(self, na);
271
-
272
- if (na->ndim == 0) {
273
- ptr = na_get_pointer_for_read(self) + na_get_offset(self);
274
- v = m_extract(ptr);
275
- na_release_lock(self);
276
- return v;
277
- }
278
- return self;
279
- }
280
-
281
- static VALUE uint32_new_dim0(dtype x) {
282
- VALUE v;
283
- dtype* ptr;
284
-
285
- v = nary_new(cT, 0, NULL);
286
- ptr = (dtype*)(char*)na_get_pointer_for_write(v);
287
- *ptr = x;
288
- na_release_lock(v);
289
- return v;
290
- }
291
-
292
- static VALUE uint32_store_numeric(VALUE self, VALUE obj) {
293
- dtype x;
294
- x = m_num_to_data(obj);
295
- obj = uint32_new_dim0(x);
296
- uint32_store(self, obj);
297
- return self;
298
- }
299
-
300
- static void iter_uint32_store_bit(na_loop_t* const lp) {
301
- size_t i;
302
- char* p1;
303
- size_t p2;
304
- ssize_t s1, s2;
305
- size_t *idx1, *idx2;
306
- BIT_DIGIT *a2, x;
307
- dtype y;
308
-
309
- INIT_COUNTER(lp, i);
310
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
311
- INIT_PTR_BIT_IDX(lp, 1, a2, p2, s2, idx2);
312
- if (idx2) {
313
- if (idx1) {
314
- for (; i--;) {
315
- LOAD_BIT(a2, p2 + *idx2, x);
316
- idx2++;
317
- y = m_from_sint(x);
318
- SET_DATA_INDEX(p1, idx1, dtype, y);
319
- }
320
- } else {
321
- for (; i--;) {
322
- LOAD_BIT(a2, p2 + *idx2, x);
323
- idx2++;
324
- y = m_from_sint(x);
325
- SET_DATA_STRIDE(p1, s1, dtype, y);
326
- }
327
- }
328
- } else {
329
- if (idx1) {
330
- for (; i--;) {
331
- LOAD_BIT(a2, p2, x);
332
- p2 += s2;
333
- y = m_from_sint(x);
334
- SET_DATA_INDEX(p1, idx1, dtype, y);
335
- }
336
- } else {
337
- for (; i--;) {
338
- LOAD_BIT(a2, p2, x);
339
- p2 += s2;
340
- y = m_from_sint(x);
341
- SET_DATA_STRIDE(p1, s1, dtype, y);
342
- }
343
- }
344
- }
345
- }
346
-
347
- static VALUE uint32_store_bit(VALUE self, VALUE obj) {
348
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
349
- ndfunc_t ndf = { iter_uint32_store_bit, FULL_LOOP, 2, 0, ain, 0 };
350
-
351
- na_ndloop(&ndf, 2, self, obj);
352
- return self;
353
- }
354
-
355
- static void iter_uint32_store_dfloat(na_loop_t* const lp) {
356
- size_t i, s1, s2;
357
- char *p1, *p2;
358
- size_t *idx1, *idx2;
359
- double x;
360
- dtype y;
361
-
362
- INIT_COUNTER(lp, i);
363
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
364
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
365
- if (idx2) {
366
- if (idx1) {
367
- for (; i--;) {
368
- GET_DATA_INDEX(p2, idx2, double, x);
369
- y = m_from_real(x);
370
- SET_DATA_INDEX(p1, idx1, dtype, y);
371
- }
372
- } else {
373
- for (; i--;) {
374
- GET_DATA_INDEX(p2, idx2, double, x);
375
- y = m_from_real(x);
376
- SET_DATA_STRIDE(p1, s1, dtype, y);
377
- }
378
- }
379
- } else {
380
- if (idx1) {
381
- for (; i--;) {
382
- GET_DATA_STRIDE(p2, s2, double, x);
383
- y = m_from_real(x);
384
- SET_DATA_INDEX(p1, idx1, dtype, y);
385
- }
386
- } else {
387
- for (; i--;) {
388
- GET_DATA_STRIDE(p2, s2, double, x);
389
- y = m_from_real(x);
390
- SET_DATA_STRIDE(p1, s1, dtype, y);
391
- }
392
- }
393
- }
394
- }
395
-
396
- static VALUE uint32_store_dfloat(VALUE self, VALUE obj) {
397
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
398
- ndfunc_t ndf = { iter_uint32_store_dfloat, FULL_LOOP, 2, 0, ain, 0 };
399
-
400
- na_ndloop(&ndf, 2, self, obj);
401
- return self;
402
- }
403
-
404
- static void iter_uint32_store_sfloat(na_loop_t* const lp) {
405
- size_t i, s1, s2;
406
- char *p1, *p2;
407
- size_t *idx1, *idx2;
408
- float x;
409
- dtype y;
410
-
411
- INIT_COUNTER(lp, i);
412
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
413
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
414
- if (idx2) {
415
- if (idx1) {
416
- for (; i--;) {
417
- GET_DATA_INDEX(p2, idx2, float, x);
418
- y = m_from_real(x);
419
- SET_DATA_INDEX(p1, idx1, dtype, y);
420
- }
421
- } else {
422
- for (; i--;) {
423
- GET_DATA_INDEX(p2, idx2, float, x);
424
- y = m_from_real(x);
425
- SET_DATA_STRIDE(p1, s1, dtype, y);
426
- }
427
- }
428
- } else {
429
- if (idx1) {
430
- for (; i--;) {
431
- GET_DATA_STRIDE(p2, s2, float, x);
432
- y = m_from_real(x);
433
- SET_DATA_INDEX(p1, idx1, dtype, y);
434
- }
435
- } else {
436
- for (; i--;) {
437
- GET_DATA_STRIDE(p2, s2, float, x);
438
- y = m_from_real(x);
439
- SET_DATA_STRIDE(p1, s1, dtype, y);
440
- }
441
- }
442
- }
443
- }
444
-
445
- static VALUE uint32_store_sfloat(VALUE self, VALUE obj) {
446
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
447
- ndfunc_t ndf = { iter_uint32_store_sfloat, FULL_LOOP, 2, 0, ain, 0 };
448
-
449
- na_ndloop(&ndf, 2, self, obj);
450
- return self;
451
- }
452
-
453
- static void iter_uint32_store_int64(na_loop_t* const lp) {
454
- size_t i, s1, s2;
455
- char *p1, *p2;
456
- size_t *idx1, *idx2;
457
- int64_t x;
458
- dtype y;
459
-
460
- INIT_COUNTER(lp, i);
461
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
462
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
463
- if (idx2) {
464
- if (idx1) {
465
- for (; i--;) {
466
- GET_DATA_INDEX(p2, idx2, int64_t, x);
467
- y = (dtype)m_from_int64(x);
468
- SET_DATA_INDEX(p1, idx1, dtype, y);
469
- }
470
- } else {
471
- for (; i--;) {
472
- GET_DATA_INDEX(p2, idx2, int64_t, x);
473
- y = (dtype)m_from_int64(x);
474
- SET_DATA_STRIDE(p1, s1, dtype, y);
475
- }
476
- }
477
- } else {
478
- if (idx1) {
479
- for (; i--;) {
480
- GET_DATA_STRIDE(p2, s2, int64_t, x);
481
- y = (dtype)m_from_int64(x);
482
- SET_DATA_INDEX(p1, idx1, dtype, y);
483
- }
484
- } else {
485
- for (; i--;) {
486
- GET_DATA_STRIDE(p2, s2, int64_t, x);
487
- y = (dtype)m_from_int64(x);
488
- SET_DATA_STRIDE(p1, s1, dtype, y);
489
- }
490
- }
491
- }
492
- }
493
-
494
- static VALUE uint32_store_int64(VALUE self, VALUE obj) {
495
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
496
- ndfunc_t ndf = { iter_uint32_store_int64, FULL_LOOP, 2, 0, ain, 0 };
497
-
498
- na_ndloop(&ndf, 2, self, obj);
499
- return self;
500
- }
501
-
502
- static void iter_uint32_store_int32(na_loop_t* const lp) {
503
- size_t i, s1, s2;
504
- char *p1, *p2;
505
- size_t *idx1, *idx2;
506
- int32_t x;
507
- dtype y;
508
-
509
- INIT_COUNTER(lp, i);
510
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
511
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
512
- if (idx2) {
513
- if (idx1) {
514
- for (; i--;) {
515
- GET_DATA_INDEX(p2, idx2, int32_t, x);
516
- y = m_from_int32(x);
517
- SET_DATA_INDEX(p1, idx1, dtype, y);
518
- }
519
- } else {
520
- for (; i--;) {
521
- GET_DATA_INDEX(p2, idx2, int32_t, x);
522
- y = m_from_int32(x);
523
- SET_DATA_STRIDE(p1, s1, dtype, y);
524
- }
525
- }
526
- } else {
527
- if (idx1) {
528
- for (; i--;) {
529
- GET_DATA_STRIDE(p2, s2, int32_t, x);
530
- y = m_from_int32(x);
531
- SET_DATA_INDEX(p1, idx1, dtype, y);
532
- }
533
- } else {
534
- for (; i--;) {
535
- GET_DATA_STRIDE(p2, s2, int32_t, x);
536
- y = m_from_int32(x);
537
- SET_DATA_STRIDE(p1, s1, dtype, y);
538
- }
539
- }
540
- }
541
- }
542
-
543
- static VALUE uint32_store_int32(VALUE self, VALUE obj) {
544
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
545
- ndfunc_t ndf = { iter_uint32_store_int32, FULL_LOOP, 2, 0, ain, 0 };
546
-
547
- na_ndloop(&ndf, 2, self, obj);
548
- return self;
549
- }
550
-
551
- static void iter_uint32_store_int16(na_loop_t* const lp) {
552
- size_t i, s1, s2;
553
- char *p1, *p2;
554
- size_t *idx1, *idx2;
555
- int16_t x;
556
- dtype y;
557
-
558
- INIT_COUNTER(lp, i);
559
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
560
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
561
- if (idx2) {
562
- if (idx1) {
563
- for (; i--;) {
564
- GET_DATA_INDEX(p2, idx2, int16_t, x);
565
- y = m_from_sint(x);
566
- SET_DATA_INDEX(p1, idx1, dtype, y);
567
- }
568
- } else {
569
- for (; i--;) {
570
- GET_DATA_INDEX(p2, idx2, int16_t, x);
571
- y = m_from_sint(x);
572
- SET_DATA_STRIDE(p1, s1, dtype, y);
573
- }
574
- }
575
- } else {
576
- if (idx1) {
577
- for (; i--;) {
578
- GET_DATA_STRIDE(p2, s2, int16_t, x);
579
- y = m_from_sint(x);
580
- SET_DATA_INDEX(p1, idx1, dtype, y);
581
- }
582
- } else {
583
- for (; i--;) {
584
- GET_DATA_STRIDE(p2, s2, int16_t, x);
585
- y = m_from_sint(x);
586
- SET_DATA_STRIDE(p1, s1, dtype, y);
587
- }
588
- }
589
- }
590
- }
591
-
592
- static VALUE uint32_store_int16(VALUE self, VALUE obj) {
593
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
594
- ndfunc_t ndf = { iter_uint32_store_int16, FULL_LOOP, 2, 0, ain, 0 };
595
-
596
- na_ndloop(&ndf, 2, self, obj);
597
- return self;
598
- }
599
-
600
- static void iter_uint32_store_int8(na_loop_t* const lp) {
601
- size_t i, s1, s2;
602
- char *p1, *p2;
603
- size_t *idx1, *idx2;
604
- int8_t x;
605
- dtype y;
606
-
607
- INIT_COUNTER(lp, i);
608
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
609
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
610
- if (idx2) {
611
- if (idx1) {
612
- for (; i--;) {
613
- GET_DATA_INDEX(p2, idx2, int8_t, x);
614
- y = m_from_sint(x);
615
- SET_DATA_INDEX(p1, idx1, dtype, y);
616
- }
617
- } else {
618
- for (; i--;) {
619
- GET_DATA_INDEX(p2, idx2, int8_t, x);
620
- y = m_from_sint(x);
621
- SET_DATA_STRIDE(p1, s1, dtype, y);
622
- }
623
- }
624
- } else {
625
- if (idx1) {
626
- for (; i--;) {
627
- GET_DATA_STRIDE(p2, s2, int8_t, x);
628
- y = m_from_sint(x);
629
- SET_DATA_INDEX(p1, idx1, dtype, y);
630
- }
631
- } else {
632
- for (; i--;) {
633
- GET_DATA_STRIDE(p2, s2, int8_t, x);
634
- y = m_from_sint(x);
635
- SET_DATA_STRIDE(p1, s1, dtype, y);
636
- }
637
- }
638
- }
639
- }
640
-
641
- static VALUE uint32_store_int8(VALUE self, VALUE obj) {
642
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
643
- ndfunc_t ndf = { iter_uint32_store_int8, FULL_LOOP, 2, 0, ain, 0 };
644
-
645
- na_ndloop(&ndf, 2, self, obj);
646
- return self;
647
- }
648
-
649
- static void iter_uint32_store_uint64(na_loop_t* const lp) {
650
- size_t i, s1, s2;
651
- char *p1, *p2;
652
- size_t *idx1, *idx2;
653
- u_int64_t x;
654
- dtype y;
655
-
656
- INIT_COUNTER(lp, i);
657
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
658
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
659
- if (idx2) {
660
- if (idx1) {
661
- for (; i--;) {
662
- GET_DATA_INDEX(p2, idx2, u_int64_t, x);
663
- y = (dtype)m_from_uint64(x);
664
- SET_DATA_INDEX(p1, idx1, dtype, y);
665
- }
666
- } else {
667
- for (; i--;) {
668
- GET_DATA_INDEX(p2, idx2, u_int64_t, x);
669
- y = (dtype)m_from_uint64(x);
670
- SET_DATA_STRIDE(p1, s1, dtype, y);
671
- }
672
- }
673
- } else {
674
- if (idx1) {
675
- for (; i--;) {
676
- GET_DATA_STRIDE(p2, s2, u_int64_t, x);
677
- y = (dtype)m_from_uint64(x);
678
- SET_DATA_INDEX(p1, idx1, dtype, y);
679
- }
680
- } else {
681
- for (; i--;) {
682
- GET_DATA_STRIDE(p2, s2, u_int64_t, x);
683
- y = (dtype)m_from_uint64(x);
684
- SET_DATA_STRIDE(p1, s1, dtype, y);
685
- }
686
- }
687
- }
688
- }
689
-
690
- static VALUE uint32_store_uint64(VALUE self, VALUE obj) {
691
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
692
- ndfunc_t ndf = { iter_uint32_store_uint64, FULL_LOOP, 2, 0, ain, 0 };
693
-
694
- na_ndloop(&ndf, 2, self, obj);
695
- return self;
696
- }
697
-
698
- static void iter_uint32_store_uint32(na_loop_t* const lp) {
699
- size_t i, s1, s2;
700
- char *p1, *p2;
701
- size_t *idx1, *idx2;
702
- u_int32_t x;
703
- dtype y;
704
-
705
- INIT_COUNTER(lp, i);
706
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
707
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
708
- if (idx2) {
709
- if (idx1) {
710
- for (; i--;) {
711
- GET_DATA_INDEX(p2, idx2, u_int32_t, x);
712
- y = m_from_uint32(x);
713
- SET_DATA_INDEX(p1, idx1, dtype, y);
714
- }
715
- } else {
716
- for (; i--;) {
717
- GET_DATA_INDEX(p2, idx2, u_int32_t, x);
718
- y = m_from_uint32(x);
719
- SET_DATA_STRIDE(p1, s1, dtype, y);
720
- }
721
- }
722
- } else {
723
- if (idx1) {
724
- for (; i--;) {
725
- GET_DATA_STRIDE(p2, s2, u_int32_t, x);
726
- y = m_from_uint32(x);
727
- SET_DATA_INDEX(p1, idx1, dtype, y);
728
- }
729
- } else {
730
- for (; i--;) {
731
- GET_DATA_STRIDE(p2, s2, u_int32_t, x);
732
- y = m_from_uint32(x);
733
- SET_DATA_STRIDE(p1, s1, dtype, y);
734
- }
735
- }
736
- }
737
- }
738
-
739
- static VALUE uint32_store_uint32(VALUE self, VALUE obj) {
740
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
741
- ndfunc_t ndf = { iter_uint32_store_uint32, FULL_LOOP, 2, 0, ain, 0 };
742
-
743
- na_ndloop(&ndf, 2, self, obj);
744
- return self;
745
- }
746
-
747
- static void iter_uint32_store_uint16(na_loop_t* const lp) {
748
- size_t i, s1, s2;
749
- char *p1, *p2;
750
- size_t *idx1, *idx2;
751
- u_int16_t x;
752
- dtype y;
753
-
754
- INIT_COUNTER(lp, i);
755
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
756
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
757
- if (idx2) {
758
- if (idx1) {
759
- for (; i--;) {
760
- GET_DATA_INDEX(p2, idx2, u_int16_t, x);
761
- y = m_from_sint(x);
762
- SET_DATA_INDEX(p1, idx1, dtype, y);
763
- }
764
- } else {
765
- for (; i--;) {
766
- GET_DATA_INDEX(p2, idx2, u_int16_t, x);
767
- y = m_from_sint(x);
768
- SET_DATA_STRIDE(p1, s1, dtype, y);
769
- }
770
- }
771
- } else {
772
- if (idx1) {
773
- for (; i--;) {
774
- GET_DATA_STRIDE(p2, s2, u_int16_t, x);
775
- y = m_from_sint(x);
776
- SET_DATA_INDEX(p1, idx1, dtype, y);
777
- }
778
- } else {
779
- for (; i--;) {
780
- GET_DATA_STRIDE(p2, s2, u_int16_t, x);
781
- y = m_from_sint(x);
782
- SET_DATA_STRIDE(p1, s1, dtype, y);
783
- }
784
- }
785
- }
786
- }
787
-
788
- static VALUE uint32_store_uint16(VALUE self, VALUE obj) {
789
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
790
- ndfunc_t ndf = { iter_uint32_store_uint16, FULL_LOOP, 2, 0, ain, 0 };
791
-
792
- na_ndloop(&ndf, 2, self, obj);
793
- return self;
794
- }
795
-
796
- static void iter_uint32_store_uint8(na_loop_t* const lp) {
797
- size_t i, s1, s2;
798
- char *p1, *p2;
799
- size_t *idx1, *idx2;
800
- u_int8_t x;
801
- dtype y;
802
-
803
- INIT_COUNTER(lp, i);
804
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
805
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
806
- if (idx2) {
807
- if (idx1) {
808
- for (; i--;) {
809
- GET_DATA_INDEX(p2, idx2, u_int8_t, x);
810
- y = m_from_sint(x);
811
- SET_DATA_INDEX(p1, idx1, dtype, y);
812
- }
813
- } else {
814
- for (; i--;) {
815
- GET_DATA_INDEX(p2, idx2, u_int8_t, x);
816
- y = m_from_sint(x);
817
- SET_DATA_STRIDE(p1, s1, dtype, y);
818
- }
819
- }
820
- } else {
821
- if (idx1) {
822
- for (; i--;) {
823
- GET_DATA_STRIDE(p2, s2, u_int8_t, x);
824
- y = m_from_sint(x);
825
- SET_DATA_INDEX(p1, idx1, dtype, y);
826
- }
827
- } else {
828
- for (; i--;) {
829
- GET_DATA_STRIDE(p2, s2, u_int8_t, x);
830
- y = m_from_sint(x);
831
- SET_DATA_STRIDE(p1, s1, dtype, y);
832
- }
833
- }
834
- }
835
- }
836
-
837
- static VALUE uint32_store_uint8(VALUE self, VALUE obj) {
838
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
839
- ndfunc_t ndf = { iter_uint32_store_uint8, FULL_LOOP, 2, 0, ain, 0 };
840
-
841
- na_ndloop(&ndf, 2, self, obj);
842
- return self;
843
- }
844
-
845
- static void iter_uint32_store_robject(na_loop_t* const lp) {
846
- size_t i, s1, s2;
847
- char *p1, *p2;
848
- size_t *idx1, *idx2;
849
- VALUE x;
850
- dtype y;
851
-
852
- INIT_COUNTER(lp, i);
853
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
854
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
855
- if (idx2) {
856
- if (idx1) {
857
- for (; i--;) {
858
- GET_DATA_INDEX(p2, idx2, VALUE, x);
859
- y = m_num_to_data(x);
860
- SET_DATA_INDEX(p1, idx1, dtype, y);
861
- }
862
- } else {
863
- for (; i--;) {
864
- GET_DATA_INDEX(p2, idx2, VALUE, x);
865
- y = m_num_to_data(x);
866
- SET_DATA_STRIDE(p1, s1, dtype, y);
867
- }
868
- }
869
- } else {
870
- if (idx1) {
871
- for (; i--;) {
872
- GET_DATA_STRIDE(p2, s2, VALUE, x);
873
- y = m_num_to_data(x);
874
- SET_DATA_INDEX(p1, idx1, dtype, y);
875
- }
876
- } else {
877
- for (; i--;) {
878
- GET_DATA_STRIDE(p2, s2, VALUE, x);
879
- y = m_num_to_data(x);
880
- SET_DATA_STRIDE(p1, s1, dtype, y);
881
- }
882
- }
883
- }
884
- }
885
-
886
- static VALUE uint32_store_robject(VALUE self, VALUE obj) {
887
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
888
- ndfunc_t ndf = { iter_uint32_store_robject, FULL_LOOP, 2, 0, ain, 0 };
889
-
890
- na_ndloop(&ndf, 2, self, obj);
891
- return self;
892
- }
893
-
894
- static void iter_uint32_store_array(na_loop_t* const lp) {
895
- size_t i, n;
896
- size_t i1, n1;
897
- VALUE v1, *ptr;
898
- char* p1;
899
- size_t s1, *idx1;
900
- VALUE x;
901
- double y;
902
- dtype z;
903
- size_t len, c;
904
- double beg, step;
905
-
906
- INIT_COUNTER(lp, n);
907
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
908
- v1 = lp->args[1].value;
909
- i = 0;
910
-
911
- if (lp->args[1].ptr) {
912
- if (v1 == Qtrue) {
913
- iter_uint32_store_uint32(lp);
914
- i = lp->args[1].shape[0];
915
- if (idx1) {
916
- idx1 += i;
917
- } else {
918
- p1 += s1 * i;
919
- }
920
- }
921
- goto loop_end;
922
- }
923
-
924
- ptr = &v1;
925
-
926
- switch (TYPE(v1)) {
927
- case T_ARRAY:
928
- n1 = RARRAY_LEN(v1);
929
- ptr = RARRAY_PTR(v1);
930
- break;
931
- case T_NIL:
932
- n1 = 0;
933
- break;
934
- default:
935
- n1 = 1;
936
- }
937
-
938
- if (idx1) {
939
- for (i = i1 = 0; i1 < n1 && i < n; i++, i1++) {
940
- x = ptr[i1];
941
- if (rb_obj_is_kind_of(x, rb_cRange)
942
- #ifdef HAVE_RB_ARITHMETIC_SEQUENCE_EXTRACT
943
- || rb_obj_is_kind_of(x, rb_cArithSeq)
944
- #else
945
- || rb_obj_is_kind_of(x, rb_cEnumerator)
946
- #endif
947
- ) {
948
- nary_step_sequence(x, &len, &beg, &step);
949
- for (c = 0; c < len && i < n; c++, i++) {
950
- y = beg + step * c;
951
- z = m_from_double(y);
952
- SET_DATA_INDEX(p1, idx1, dtype, z);
953
- }
954
- } else if (TYPE(x) != T_ARRAY) {
955
- z = m_num_to_data(x);
956
- SET_DATA_INDEX(p1, idx1, dtype, z);
957
- }
958
- }
959
- } else {
960
- for (i = i1 = 0; i1 < n1 && i < n; i++, i1++) {
961
- x = ptr[i1];
962
- if (rb_obj_is_kind_of(x, rb_cRange)
963
- #ifdef HAVE_RB_ARITHMETIC_SEQUENCE_EXTRACT
964
- || rb_obj_is_kind_of(x, rb_cArithSeq)
965
- #else
966
- || rb_obj_is_kind_of(x, rb_cEnumerator)
967
- #endif
968
- ) {
969
- nary_step_sequence(x, &len, &beg, &step);
970
- for (c = 0; c < len && i < n; c++, i++) {
971
- y = beg + step * c;
972
- z = m_from_double(y);
973
- SET_DATA_STRIDE(p1, s1, dtype, z);
974
- }
975
- } else if (TYPE(x) != T_ARRAY) {
976
- z = m_num_to_data(x);
977
- SET_DATA_STRIDE(p1, s1, dtype, z);
978
- }
979
- }
980
- }
981
-
982
- loop_end:
983
- z = m_zero;
984
- if (idx1) {
985
- for (; i < n; i++) {
986
- SET_DATA_INDEX(p1, idx1, dtype, z);
987
- }
988
- } else {
989
- for (; i < n; i++) {
990
- SET_DATA_STRIDE(p1, s1, dtype, z);
991
- }
992
- }
993
- }
994
-
995
- static VALUE uint32_store_array(VALUE self, VALUE rary) {
996
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { rb_cArray, 0 } };
997
- ndfunc_t ndf = { iter_uint32_store_array, FULL_LOOP, 2, 0, ain, 0 };
998
-
999
- na_ndloop_store_rarray(&ndf, self, rary);
1000
- return self;
1001
- }
1002
-
1003
- /*
1004
- Store elements to Numo::UInt32 from other.
1005
- @overload store(other)
1006
- @param [Object] other
1007
- @return [Numo::UInt32] self
1008
- */
1009
- static VALUE uint32_store(VALUE self, VALUE obj) {
1010
- VALUE r, klass;
1011
-
1012
- klass = rb_obj_class(obj);
1013
-
1014
- if (klass == numo_cUInt32) {
1015
- uint32_store_uint32(self, obj);
1016
- return self;
1017
- }
1018
-
1019
- if (IS_INTEGER_CLASS(klass) || klass == rb_cFloat || klass == rb_cComplex) {
1020
- uint32_store_numeric(self, obj);
1021
- return self;
1022
- }
1023
-
1024
- if (klass == numo_cBit) {
1025
- uint32_store_bit(self, obj);
1026
- return self;
1027
- }
1028
-
1029
- if (klass == numo_cDFloat) {
1030
- uint32_store_dfloat(self, obj);
1031
- return self;
1032
- }
1033
-
1034
- if (klass == numo_cSFloat) {
1035
- uint32_store_sfloat(self, obj);
1036
- return self;
1037
- }
1038
-
1039
- if (klass == numo_cInt64) {
1040
- uint32_store_int64(self, obj);
1041
- return self;
1042
- }
1043
-
1044
- if (klass == numo_cInt32) {
1045
- uint32_store_int32(self, obj);
1046
- return self;
1047
- }
1048
-
1049
- if (klass == numo_cInt16) {
1050
- uint32_store_int16(self, obj);
1051
- return self;
1052
- }
1053
-
1054
- if (klass == numo_cInt8) {
1055
- uint32_store_int8(self, obj);
1056
- return self;
1057
- }
1058
-
1059
- if (klass == numo_cUInt64) {
1060
- uint32_store_uint64(self, obj);
1061
- return self;
1062
- }
1063
-
1064
- if (klass == numo_cUInt16) {
1065
- uint32_store_uint16(self, obj);
1066
- return self;
1067
- }
1068
-
1069
- if (klass == numo_cUInt8) {
1070
- uint32_store_uint8(self, obj);
1071
- return self;
1072
- }
1073
-
1074
- if (klass == numo_cRObject) {
1075
- uint32_store_robject(self, obj);
1076
- return self;
1077
- }
1078
-
1079
- if (klass == rb_cArray) {
1080
- uint32_store_array(self, obj);
1081
- return self;
1082
- }
1083
-
1084
- if (IsNArray(obj)) {
1085
- r = rb_funcall(obj, rb_intern("coerce_cast"), 1, cT);
1086
- if (rb_obj_class(r) == cT) {
1087
- uint32_store(self, r);
1088
- return self;
1089
- }
1090
- }
1091
-
1092
- rb_raise(
1093
- nary_eCastError, "unknown conversion from %s to %s", rb_class2name(rb_obj_class(obj)),
1094
- rb_class2name(rb_obj_class(self))
1095
- );
1096
-
1097
- return self;
1098
- }
1099
-
1100
- /*
1101
- Convert a data value of obj (with a single element) to dtype.
1102
- */
1103
- static dtype uint32_extract_data(VALUE obj) {
1104
- narray_t* na;
1105
- dtype x;
1106
- char* ptr;
1107
- size_t pos;
1108
- VALUE r, klass;
1109
-
1110
- if (IsNArray(obj)) {
1111
- GetNArray(obj, na);
1112
- if (na->size != 1) {
1113
- rb_raise(nary_eShapeError, "narray size should be 1");
1114
- }
1115
- klass = rb_obj_class(obj);
1116
- ptr = na_get_pointer_for_read(obj);
1117
- pos = na_get_offset(obj);
1118
-
1119
- if (klass == numo_cUInt32) {
1120
- x = m_from_uint32(*(u_int32_t*)(ptr + pos));
1121
- return x;
1122
- }
1123
-
1124
- if (klass == numo_cBit) {
1125
- {
1126
- BIT_DIGIT b;
1127
- LOAD_BIT(ptr, pos, b);
1128
- x = m_from_sint(b);
1129
- };
1130
- return x;
1131
- }
1132
-
1133
- if (klass == numo_cDFloat) {
1134
- x = m_from_real(*(double*)(ptr + pos));
1135
- return x;
1136
- }
1137
-
1138
- if (klass == numo_cSFloat) {
1139
- x = m_from_real(*(float*)(ptr + pos));
1140
- return x;
1141
- }
1142
-
1143
- if (klass == numo_cInt64) {
1144
- x = (dtype)m_from_int64(*(int64_t*)(ptr + pos));
1145
- return x;
1146
- }
1147
-
1148
- if (klass == numo_cInt32) {
1149
- x = m_from_int32(*(int32_t*)(ptr + pos));
1150
- return x;
1151
- }
1152
-
1153
- if (klass == numo_cInt16) {
1154
- x = m_from_sint(*(int16_t*)(ptr + pos));
1155
- return x;
1156
- }
1157
-
1158
- if (klass == numo_cInt8) {
1159
- x = m_from_sint(*(int8_t*)(ptr + pos));
1160
- return x;
1161
- }
1162
-
1163
- if (klass == numo_cUInt64) {
1164
- x = (dtype)m_from_uint64(*(u_int64_t*)(ptr + pos));
1165
- return x;
1166
- }
1167
-
1168
- if (klass == numo_cUInt16) {
1169
- x = m_from_sint(*(u_int16_t*)(ptr + pos));
1170
- return x;
1171
- }
1172
-
1173
- if (klass == numo_cUInt8) {
1174
- x = m_from_sint(*(u_int8_t*)(ptr + pos));
1175
- return x;
1176
- }
1177
-
1178
- if (klass == numo_cRObject) {
1179
- x = m_num_to_data(*(VALUE*)(ptr + pos));
1180
- return x;
1181
- }
1182
-
1183
- // coerce
1184
- r = rb_funcall(obj, rb_intern("coerce_cast"), 1, cT);
1185
- if (rb_obj_class(r) == cT) {
1186
- return uint32_extract_data(r);
1187
- }
1188
-
1189
- rb_raise(
1190
- nary_eCastError, "unknown conversion from %s to %s", rb_class2name(rb_obj_class(obj)),
1191
- rb_class2name(cT)
1192
- );
1193
- }
1194
- if (TYPE(obj) == T_ARRAY) {
1195
- if (RARRAY_LEN(obj) != 1) {
1196
- rb_raise(nary_eShapeError, "array size should be 1");
1197
- }
1198
- return m_num_to_data(RARRAY_AREF(obj, 0));
1199
- }
1200
- return m_num_to_data(obj);
1201
- }
1202
-
1203
- static VALUE uint32_cast_array(VALUE rary) {
1204
- VALUE nary;
1205
- narray_t* na;
1206
-
1207
- nary = na_s_new_like(cT, rary);
1208
- GetNArray(nary, na);
1209
- if (na->size > 0) {
1210
- uint32_store_array(nary, rary);
1211
- }
1212
- return nary;
1213
- }
1214
-
1215
- /*
1216
- Cast object to Numo::UInt32.
1217
- @overload [](elements)
1218
- @overload cast(array)
1219
- @param [Numeric,Array] elements
1220
- @param [Array] array
1221
- @return [Numo::UInt32]
1222
- */
1223
- static VALUE uint32_s_cast(VALUE type, VALUE obj) {
1224
- VALUE v;
1225
- narray_t* na;
1226
- dtype x;
1227
-
1228
- if (rb_obj_class(obj) == cT) {
1229
- return obj;
1230
- }
1231
- if (RTEST(rb_obj_is_kind_of(obj, rb_cNumeric))) {
1232
- x = m_num_to_data(obj);
1233
- return uint32_new_dim0(x);
1234
- }
1235
- if (RTEST(rb_obj_is_kind_of(obj, rb_cArray))) {
1236
- return uint32_cast_array(obj);
1237
- }
1238
- if (IsNArray(obj)) {
1239
- GetNArray(obj, na);
1240
- v = nary_new(cT, NA_NDIM(na), NA_SHAPE(na));
1241
- if (NA_SIZE(na) > 0) {
1242
- uint32_store(v, obj);
1243
- }
1244
- return v;
1245
- }
1246
- if (rb_respond_to(obj, id_to_a)) {
1247
- obj = rb_funcall(obj, id_to_a, 0);
1248
- if (TYPE(obj) != T_ARRAY) {
1249
- rb_raise(rb_eTypeError, "`to_a' did not return Array");
1250
- }
1251
- return uint32_cast_array(obj);
1252
- }
1253
-
1254
- rb_raise(nary_eCastError, "cannot cast to %s", rb_class2name(type));
1255
- return Qnil;
1256
- }
1257
-
1258
- /*
1259
- Multi-dimensional element reference.
1260
- @overload [](dim0,...,dimL)
1261
- @param [Numeric,Range,Array,Numo::Int32,Numo::Int64,Numo::Bit,TrueClass,FalseClass,Symbol]
1262
- dim0,...,dimL multi-dimensional indices.
1263
- @return [Numeric,Numo::UInt32] an element or NArray view.
1264
- @see Numo::NArray#[]
1265
- @see #[]=
1266
- */
1267
- static VALUE uint32_aref(int argc, VALUE* argv, VALUE self) {
1268
- int nd;
1269
- size_t pos;
1270
- char* ptr;
1271
-
1272
- nd = na_get_result_dimension(self, argc, argv, sizeof(dtype), &pos);
1273
- if (nd) {
1274
- return na_aref_main(argc, argv, self, 0, nd);
1275
- } else {
1276
- ptr = na_get_pointer_for_read(self) + pos;
1277
- return m_extract(ptr);
1278
- }
1279
- }
1280
-
1281
- /*
1282
- Multi-dimensional element assignment.
1283
- @overload []=(dim0,...,dimL,val)
1284
- @param [Numeric,Range,Array,Numo::Int32,Numo::Int64,Numo::Bit,TrueClass,FalseClass,Symbol]
1285
- dim0,...,dimL multi-dimensional indices.
1286
- @param [Numeric,Numo::NArray,Array] val Value(s) to be set to self.
1287
- @return [Numeric,Numo::NArray,Array] returns `val` (last argument).
1288
- @see Numo::NArray#[]=
1289
- @see #[]
1290
- */
1291
- static VALUE uint32_aset(int argc, VALUE* argv, VALUE self) {
1292
- int nd;
1293
- size_t pos;
1294
- char* ptr;
1295
- VALUE a;
1296
- dtype x;
1297
-
1298
- argc--;
1299
- if (argc == 0) {
1300
- uint32_store(self, argv[argc]);
1301
- } else {
1302
- nd = na_get_result_dimension(self, argc, argv, sizeof(dtype), &pos);
1303
- if (nd) {
1304
- a = na_aref_main(argc, argv, self, 0, nd);
1305
- uint32_store(a, argv[argc]);
1306
- } else {
1307
- x = uint32_extract_data(argv[argc]);
1308
- ptr = na_get_pointer_for_read_write(self) + pos;
1309
- *(dtype*)ptr = x;
1310
- }
1311
- }
1312
- return argv[argc];
1313
- }
1314
-
1315
- static void iter_uint32_each(na_loop_t* const lp) {
1316
- size_t i, s1;
1317
- char* p1;
1318
- size_t* idx1;
1319
- dtype x;
1320
- VALUE y;
1321
-
1322
- INIT_COUNTER(lp, i);
1323
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
1324
- if (idx1) {
1325
- for (; i--;) {
1326
- GET_DATA_INDEX(p1, idx1, dtype, x);
1327
- y = m_data_to_num(x);
1328
- rb_yield(y);
1329
- }
1330
- } else {
1331
- for (; i--;) {
1332
- GET_DATA_STRIDE(p1, s1, dtype, x);
1333
- y = m_data_to_num(x);
1334
- rb_yield(y);
1335
- }
1336
- }
1337
- }
1338
-
1339
- /*
1340
- Calls the given block once for each element in self,
1341
- passing that element as a parameter.
1342
- @overload each
1343
- @return [Numo::NArray] self
1344
- For a block `{|x| ... }`,
1345
- @yieldparam [Numeric] x an element of NArray.
1346
- @see #each_with_index
1347
- @see #map
1348
- */
1349
- static VALUE uint32_each(VALUE self) {
1350
- ndfunc_arg_in_t ain[1] = { { Qnil, 0 } };
1351
- ndfunc_t ndf = { iter_uint32_each, FULL_LOOP_NIP, 1, 0, ain, 0 };
1352
-
1353
- na_ndloop(&ndf, 1, self);
1354
- return self;
1355
- }
1356
-
1357
- static void iter_uint32_map(na_loop_t* const lp) {
1358
- size_t i, n;
1359
- char *p1, *p2;
1360
- ssize_t s1, s2;
1361
- size_t *idx1, *idx2;
1362
- dtype x;
1363
-
1364
- INIT_COUNTER(lp, n);
1365
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
1366
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
1367
-
1368
- if (idx1) {
1369
- if (idx2) {
1370
- for (i = 0; i < n; i++) {
1371
- GET_DATA_INDEX(p1, idx1, dtype, x);
1372
- x = m_map(x);
1373
- SET_DATA_INDEX(p2, idx2, dtype, x);
1374
- }
1375
- } else {
1376
- for (i = 0; i < n; i++) {
1377
- GET_DATA_INDEX(p1, idx1, dtype, x);
1378
- x = m_map(x);
1379
- SET_DATA_STRIDE(p2, s2, dtype, x);
1380
- }
1381
- }
1382
- } else {
1383
- if (idx2) {
1384
- for (i = 0; i < n; i++) {
1385
- GET_DATA_STRIDE(p1, s1, dtype, x);
1386
- x = m_map(x);
1387
- SET_DATA_INDEX(p2, idx2, dtype, x);
1388
- }
1389
- } else {
1390
- //
1391
- if (is_aligned(p1, sizeof(dtype)) && is_aligned(p2, sizeof(dtype))) {
1392
- if (s1 == sizeof(dtype) && s2 == sizeof(dtype)) {
1393
- for (i = 0; i < n; i++) {
1394
- ((dtype*)p2)[i] = m_map(((dtype*)p1)[i]);
1395
- }
1396
- return;
1397
- }
1398
- if (is_aligned_step(s1, sizeof(dtype)) && is_aligned_step(s2, sizeof(dtype))) {
1399
- //
1400
- for (i = 0; i < n; i++) {
1401
- *(dtype*)p2 = m_map(*(dtype*)p1);
1402
- p1 += s1;
1403
- p2 += s2;
1404
- }
1405
- return;
1406
- //
1407
- }
1408
- }
1409
- for (i = 0; i < n; i++) {
1410
- GET_DATA_STRIDE(p1, s1, dtype, x);
1411
- x = m_map(x);
1412
- SET_DATA_STRIDE(p2, s2, dtype, x);
1413
- }
1414
- //
1415
- }
1416
- }
1417
- }
1418
-
1419
- /*
1420
- Unary map.
1421
- @overload map
1422
- @return [Numo::UInt32] map of self.
1423
- */
1424
- static VALUE uint32_map(VALUE self) {
1425
- ndfunc_arg_in_t ain[1] = { { cT, 0 } };
1426
- ndfunc_arg_out_t aout[1] = { { cT, 0 } };
1427
- ndfunc_t ndf = { iter_uint32_map, FULL_LOOP, 1, 1, ain, aout };
1428
-
1429
- return na_ndloop(&ndf, 1, self);
1430
- }
1431
-
1432
- static inline void yield_each_with_index(dtype x, size_t* c, VALUE* a, int nd, int md) {
1433
- int j;
1434
-
1435
- a[0] = m_data_to_num(x);
1436
- for (j = 0; j <= nd; j++) {
1437
- a[j + 1] = SIZET2NUM(c[j]);
1438
- }
1439
- rb_yield(rb_ary_new4(md, a));
1440
- }
1441
-
1442
- static void iter_uint32_each_with_index(na_loop_t* const lp) {
1443
- size_t i, s1;
1444
- char* p1;
1445
- size_t* idx1;
1446
- dtype x;
1447
- VALUE* a;
1448
- size_t* c;
1449
- int nd, md;
1450
-
1451
- c = (size_t*)(lp->opt_ptr);
1452
- nd = lp->ndim;
1453
- if (nd > 0) {
1454
- nd--;
1455
- }
1456
- md = nd + 2;
1457
- a = ALLOCA_N(VALUE, md);
1458
-
1459
- INIT_COUNTER(lp, i);
1460
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
1461
- c[nd] = 0;
1462
- if (idx1) {
1463
- for (; i--;) {
1464
- GET_DATA_INDEX(p1, idx1, dtype, x);
1465
- yield_each_with_index(x, c, a, nd, md);
1466
- c[nd]++;
1467
- }
1468
- } else {
1469
- for (; i--;) {
1470
- GET_DATA_STRIDE(p1, s1, dtype, x);
1471
- yield_each_with_index(x, c, a, nd, md);
1472
- c[nd]++;
1473
- }
1474
- }
1475
- }
1476
-
1477
- /*
1478
- Invokes the given block once for each element of self,
1479
- passing that element and indices along each axis as parameters.
1480
- @overload each_with_index
1481
- For a block `{|x,i,j,...| ... }`,
1482
- @yieldparam [Numeric] x an element
1483
- @yieldparam [Integer] i,j,... multitimensional indices
1484
- @return [Numo::NArray] self
1485
- @see #each
1486
- @see #map_with_index
1487
- */
1488
- static VALUE uint32_each_with_index(VALUE self) {
1489
- ndfunc_arg_in_t ain[1] = { { Qnil, 0 } };
1490
- ndfunc_t ndf = { iter_uint32_each_with_index, FULL_LOOP_NIP, 1, 0, ain, 0 };
1491
-
1492
- na_ndloop_with_index(&ndf, 1, self);
1493
- return self;
1494
- }
1495
-
1496
- static inline dtype yield_map_with_index(dtype x, size_t* c, VALUE* a, int nd, int md) {
1497
- int j;
1498
- VALUE y;
1499
-
1500
- a[0] = m_data_to_num(x);
1501
- for (j = 0; j <= nd; j++) {
1502
- a[j + 1] = SIZET2NUM(c[j]);
1503
- }
1504
- y = rb_yield(rb_ary_new4(md, a));
1505
- return m_num_to_data(y);
1506
- }
1507
-
1508
- static void iter_uint32_map_with_index(na_loop_t* const lp) {
1509
- size_t i;
1510
- char *p1, *p2;
1511
- ssize_t s1, s2;
1512
- size_t *idx1, *idx2;
1513
- dtype x;
1514
- VALUE* a;
1515
- size_t* c;
1516
- int nd, md;
1517
-
1518
- c = (size_t*)(lp->opt_ptr);
1519
- nd = lp->ndim;
1520
- if (nd > 0) {
1521
- nd--;
1522
- }
1523
- md = nd + 2;
1524
- a = ALLOCA_N(VALUE, md);
1525
-
1526
- INIT_COUNTER(lp, i);
1527
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
1528
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
1529
-
1530
- c[nd] = 0;
1531
- if (idx1) {
1532
- if (idx2) {
1533
- for (; i--;) {
1534
- GET_DATA_INDEX(p1, idx1, dtype, x);
1535
- x = yield_map_with_index(x, c, a, nd, md);
1536
- SET_DATA_INDEX(p2, idx2, dtype, x);
1537
- c[nd]++;
1538
- }
1539
- } else {
1540
- for (; i--;) {
1541
- GET_DATA_INDEX(p1, idx1, dtype, x);
1542
- x = yield_map_with_index(x, c, a, nd, md);
1543
- SET_DATA_STRIDE(p2, s2, dtype, x);
1544
- c[nd]++;
1545
- }
1546
- }
1547
- } else {
1548
- if (idx2) {
1549
- for (; i--;) {
1550
- GET_DATA_STRIDE(p1, s1, dtype, x);
1551
- x = yield_map_with_index(x, c, a, nd, md);
1552
- SET_DATA_INDEX(p2, idx2, dtype, x);
1553
- c[nd]++;
1554
- }
1555
- } else {
1556
- for (; i--;) {
1557
- GET_DATA_STRIDE(p1, s1, dtype, x);
1558
- x = yield_map_with_index(x, c, a, nd, md);
1559
- SET_DATA_STRIDE(p2, s2, dtype, x);
1560
- c[nd]++;
1561
- }
1562
- }
1563
- }
1564
- }
1565
-
1566
- /*
1567
- Invokes the given block once for each element of self,
1568
- passing that element and indices along each axis as parameters.
1569
- Creates a new NArray containing the values returned by the block.
1570
- Inplace option is allowed, i.e., `nary.inplace.map` overwrites `nary`.
1571
- @overload map_with_index
1572
- For a block `{|x,i,j,...| ... }`,
1573
- @yieldparam [Numeric] x an element
1574
- @yieldparam [Integer] i,j,... multitimensional indices
1575
- @return [Numo::NArray] mapped array
1576
- @see #map
1577
- @see #each_with_index
1578
- */
1579
- static VALUE uint32_map_with_index(VALUE self) {
1580
- ndfunc_arg_in_t ain[1] = { { Qnil, 0 } };
1581
- ndfunc_arg_out_t aout[1] = { { cT, 0 } };
1582
- ndfunc_t ndf = { iter_uint32_map_with_index, FULL_LOOP, 1, 1, ain, aout };
1583
-
1584
- return na_ndloop_with_index(&ndf, 1, self);
1585
- }
1586
-
1587
- static void iter_uint32_abs(na_loop_t* const lp) {
1588
- size_t i;
1589
- char *p1, *p2;
1590
- ssize_t s1, s2;
1591
- size_t *idx1, *idx2;
1592
- dtype x;
1593
- rtype y;
1594
- INIT_COUNTER(lp, i);
1595
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
1596
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
1597
- if (idx1) {
1598
- if (idx2) {
1599
- for (; i--;) {
1600
- GET_DATA_INDEX(p1, idx1, dtype, x);
1601
- y = m_abs(x);
1602
- SET_DATA_INDEX(p2, idx2, rtype, y);
1603
- }
1604
- } else {
1605
- for (; i--;) {
1606
- GET_DATA_INDEX(p1, idx1, dtype, x);
1607
- y = m_abs(x);
1608
- SET_DATA_STRIDE(p2, s2, rtype, y);
1609
- }
1610
- }
1611
- } else {
1612
- if (idx2) {
1613
- for (; i--;) {
1614
- GET_DATA_STRIDE(p1, s1, dtype, x);
1615
- y = m_abs(x);
1616
- SET_DATA_INDEX(p2, idx2, rtype, y);
1617
- }
1618
- } else {
1619
- for (; i--;) {
1620
- GET_DATA_STRIDE(p1, s1, dtype, x);
1621
- y = m_abs(x);
1622
- SET_DATA_STRIDE(p2, s2, rtype, y);
1623
- }
1624
- }
1625
- }
1626
- }
1627
-
1628
- /*
1629
- abs of self.
1630
- @overload abs
1631
- @return [Numo::UInt32] abs of self.
1632
- */
1633
- static VALUE uint32_abs(VALUE self) {
1634
- ndfunc_arg_in_t ain[1] = { { cT, 0 } };
1635
- ndfunc_arg_out_t aout[1] = { { cRT, 0 } };
1636
- ndfunc_t ndf = { iter_uint32_abs, FULL_LOOP, 1, 1, ain, aout };
1637
-
1638
- return na_ndloop(&ndf, 1, self);
1639
- }
1640
-
1641
- static void iter_uint32_poly(na_loop_t* const lp) {
1642
- size_t i;
1643
- dtype x, y, a;
1644
-
1645
- x = *(dtype*)(lp->args[0].ptr + lp->args[0].iter[0].pos);
1646
- i = lp->narg - 2;
1647
- y = *(dtype*)(lp->args[i].ptr + lp->args[i].iter[0].pos);
1648
- for (; --i;) {
1649
- y = m_mul(x, y);
1650
- a = *(dtype*)(lp->args[i].ptr + lp->args[i].iter[0].pos);
1651
- y = m_add(y, a);
1652
- }
1653
- i = lp->narg - 1;
1654
- *(dtype*)(lp->args[i].ptr + lp->args[i].iter[0].pos) = y;
1655
- }
1656
-
1657
- /*
1658
- Calculate polynomial.
1659
- `x.poly(a0,a1,a2,...,an) = a0 + a1*x + a2*x**2 + ... + an*x**n`
1660
- @overload poly a0, a1, ..., an
1661
- @param [Numo::NArray,Numeric] a0,a1,...,an
1662
- @return [Numo::UInt32]
1663
- */
1664
- static VALUE uint32_poly(VALUE self, VALUE args) {
1665
- int argc, i;
1666
- VALUE* argv;
1667
- volatile VALUE v, a;
1668
- ndfunc_arg_out_t aout[1] = { { cT, 0 } };
1669
- ndfunc_t ndf = { iter_uint32_poly, NO_LOOP, 0, 1, 0, aout };
1670
-
1671
- argc = (int)RARRAY_LEN(args);
1672
- ndf.nin = argc + 1;
1673
- ndf.ain = ALLOCA_N(ndfunc_arg_in_t, argc + 1);
1674
- for (i = 0; i < argc + 1; i++) {
1675
- ndf.ain[i].type = cT;
1676
- }
1677
- argv = ALLOCA_N(VALUE, argc + 1);
1678
- argv[0] = self;
1679
- for (i = 0; i < argc; i++) {
1680
- argv[i + 1] = RARRAY_PTR(args)[i];
1681
- }
1682
- a = rb_ary_new4(argc + 1, argv);
1683
- v = na_ndloop2(&ndf, a);
1684
- return uint32_extract(v);
1685
- }
1686
-
1687
- /*
1688
- qsort.c
1689
- Ruby/Numo::NArray - Numerical Array class for Ruby
1690
- modified by Masahiro TANAKA
1691
- */
1692
-
1693
- /*
1694
- * qsort.c: standard quicksort algorithm
1695
- *
1696
- * Modifications from vanilla NetBSD source:
1697
- * Add do ... while() macro fix
1698
- * Remove __inline, _DIAGASSERTs, __P
1699
- * Remove ill-considered "swap_cnt" switch to insertion sort,
1700
- * in favor of a simple check for presorted input.
1701
- *
1702
- * CAUTION: if you change this file, see also qsort_arg.c
1703
- *
1704
- * $PostgreSQL: pgsql/src/port/qsort.c,v 1.12 2006/10/19 20:56:22 tgl Exp $
1705
- */
1706
-
1707
- /* $NetBSD: qsort.c,v 1.13 2003/08/07 16:43:42 agc Exp $ */
1708
-
1709
- /*-
1710
- * Copyright (c) 1992, 1993
1711
- * The Regents of the University of California. All rights reserved.
1712
- *
1713
- * Redistribution and use in source and binary forms, with or without
1714
- * modification, are permitted provided that the following conditions
1715
- * are met:
1716
- * 1. Redistributions of source code must retain the above copyright
1717
- * notice, this list of conditions and the following disclaimer.
1718
- * 2. Redistributions in binary form must reproduce the above copyright
1719
- * notice, this list of conditions and the following disclaimer in the
1720
- * documentation and/or other materials provided with the distribution.
1721
- * 3. Neither the name of the University nor the names of its contributors
1722
- * may be used to endorse or promote products derived from this software
1723
- * without specific prior written permission.
1724
- *
1725
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
1726
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1727
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1728
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
1729
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1730
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
1731
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1732
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
1733
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
1734
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
1735
- * SUCH DAMAGE.
1736
- */
1737
-
1738
- #ifndef QSORT_INCL
1739
- #define QSORT_INCL
1740
- #define Min(x, y) ((x) < (y) ? (x) : (y))
1741
-
1742
- /*
1743
- * Qsort routine based on J. L. Bentley and M. D. McIlroy,
1744
- * "Engineering a sort function",
1745
- * Software--Practice and Experience 23 (1993) 1249-1265.
1746
- * We have modified their original by adding a check for already-sorted input,
1747
- * which seems to be a win per discussions on pgsql-hackers around 2006-03-21.
1748
- */
1749
- #define swapcode(TYPE, parmi, parmj, n) \
1750
- do { \
1751
- size_t i = (n) / sizeof(TYPE); \
1752
- TYPE* pi = (TYPE*)(void*)(parmi); \
1753
- TYPE* pj = (TYPE*)(void*)(parmj); \
1754
- do { \
1755
- TYPE t = *pi; \
1756
- *pi++ = *pj; \
1757
- *pj++ = t; \
1758
- } while (--i > 0); \
1759
- } while (0)
1760
-
1761
- #ifdef HAVE_STDINT_H
1762
- #define SWAPINIT(a, es) \
1763
- swaptype = (uintptr_t)(a) % sizeof(long) || (es) % sizeof(long) ? 2 \
1764
- : (es) == sizeof(long) ? 0 \
1765
- : 1;
1766
- #else
1767
- #define SWAPINIT(a, es) \
1768
- swaptype = ((char*)(a) - (char*)0) % sizeof(long) || (es) % sizeof(long) ? 2 \
1769
- : (es) == sizeof(long) ? 0 \
1770
- : 1;
1771
- #endif
1772
-
1773
- static inline void swapfunc(char* a, char* b, size_t n, int swaptype) {
1774
- if (swaptype <= 1)
1775
- swapcode(long, a, b, n);
1776
- else
1777
- swapcode(char, a, b, n);
1778
- }
1779
-
1780
- #define swap(a, b) \
1781
- if (swaptype == 0) { \
1782
- long t = *(long*)(void*)(a); \
1783
- *(long*)(void*)(a) = *(long*)(void*)(b); \
1784
- *(long*)(void*)(b) = t; \
1785
- } else \
1786
- swapfunc(a, b, es, swaptype)
1787
-
1788
- #define vecswap(a, b, n) \
1789
- if ((n) > 0) swapfunc((a), (b), (size_t)(n), swaptype)
1790
-
1791
- #define med3(a, b, c, _cmp) \
1792
- (cmpgt(b, a) ? (cmpgt(c, b) ? b : (cmpgt(c, a) ? c : a)) \
1793
- : (cmpgt(b, c) ? b : (cmpgt(c, a) ? a : c)))
1794
- #endif
1795
-
175
+ DEF_NARRAY_POLY_METHOD_FUNC(uint32, numo_cUInt32)
1796
176
  #undef qsort_dtype
1797
- #define qsort_dtype dtype
177
+ #define qsort_dtype uint32
1798
178
  #undef qsort_cast
1799
- #define qsort_cast *(dtype*)
1800
-
1801
- static void uint32_qsort(void* a, size_t n, ssize_t es) {
1802
- char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
1803
- int d, r, swaptype, presorted;
1804
-
1805
- loop:
1806
- SWAPINIT(a, es);
1807
- if (n < 7) {
1808
- for (pm = (char*)a + es; pm < (char*)a + n * es; pm += es)
1809
- for (pl = pm; pl > (char*)a && cmpgt(pl - es, pl); pl -= es) swap(pl, pl - es);
1810
- return;
1811
- }
1812
- presorted = 1;
1813
- for (pm = (char*)a + es; pm < (char*)a + n * es; pm += es) {
1814
- if (cmpgt(pm - es, pm)) {
1815
- presorted = 0;
1816
- break;
1817
- }
1818
- }
1819
- if (presorted) return;
1820
- pm = (char*)a + (n / 2) * es;
1821
- if (n > 7) {
1822
- pl = (char*)a;
1823
- pn = (char*)a + (n - 1) * es;
1824
- if (n > 40) {
1825
- d = (int)((n / 8) * es);
1826
- pl = med3(pl, pl + d, pl + 2 * d, cmp);
1827
- pm = med3(pm - d, pm, pm + d, cmp);
1828
- pn = med3(pn - 2 * d, pn - d, pn, cmp);
1829
- }
1830
- pm = med3(pl, pm, pn, cmp);
1831
- }
1832
- swap(a, pm);
1833
- pa = pb = (char*)a + es;
1834
- pc = pd = (char*)a + (n - 1) * es;
1835
- for (;;) {
1836
- while (pb <= pc && (r = cmp(pb, a)) <= 0) {
1837
- if (r == 0) {
1838
- swap(pa, pb);
1839
- pa += es;
1840
- }
1841
- pb += es;
1842
- }
1843
- while (pb <= pc && (r = cmp(pc, a)) >= 0) {
1844
- if (r == 0) {
1845
- swap(pc, pd);
1846
- pd -= es;
1847
- }
1848
- pc -= es;
1849
- }
1850
- if (pb > pc) break;
1851
- swap(pb, pc);
1852
- pb += es;
1853
- pc -= es;
1854
- }
1855
- pn = (char*)a + n * es;
1856
- r = (int)Min(pa - (char*)a, pb - pa);
1857
- vecswap(a, pb - r, r);
1858
- r = (int)Min(pd - pc, pn - pd - es);
1859
- vecswap(pb, pn - r, r);
1860
- if ((r = (int)(pb - pa)) > es) uint32_qsort(a, r / es, es);
1861
- if ((r = (int)(pd - pc)) > es) {
1862
- /* Iterate rather than recurse to save stack space */
1863
- a = pn - r;
1864
- n = r / es;
1865
- goto loop;
1866
- }
1867
- /* qsort(pn - r, r / es, es, cmp);*/
1868
- }
1869
-
1870
- static void iter_uint32_sort(na_loop_t* const lp) {
1871
- size_t n;
1872
- char* ptr;
1873
- ssize_t step;
1874
-
1875
- INIT_COUNTER(lp, n);
1876
- INIT_PTR(lp, 0, ptr, step);
1877
- uint32_qsort(ptr, n, step);
1878
- }
1879
-
1880
- /*
1881
- sort of self.
1882
- @overload sort(axis:nil)
1883
- @param [Numeric,Array,Range] axis Performs sort along the axis.
1884
- @return [Numo::UInt32] returns result of sort.
1885
- @example
1886
- Numo::DFloat[3,4,1,2].sort #=> Numo::DFloat[1,2,3,4]
1887
- */
1888
- static VALUE uint32_sort(int argc, VALUE* argv, VALUE self) {
1889
- VALUE reduce;
1890
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { sym_reduce, 0 } };
1891
- ndfunc_t ndf = { 0, NDF_HAS_LOOP | NDF_FLAT_REDUCE, 2, 0, ain, 0 };
1892
-
1893
- if (!TEST_INPLACE(self)) {
1894
- self = na_copy(self);
1895
- }
1896
-
1897
- ndf.func = iter_uint32_sort;
1898
- reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
1899
-
1900
- na_ndloop(&ndf, 2, self, reduce);
1901
- return self;
1902
- }
1903
-
1904
- /*
1905
- qsort.c
1906
- Ruby/Numo::NArray - Numerical Array class for Ruby
1907
- modified by Masahiro TANAKA
1908
- */
1909
-
1910
- /*
1911
- * qsort.c: standard quicksort algorithm
1912
- *
1913
- * Modifications from vanilla NetBSD source:
1914
- * Add do ... while() macro fix
1915
- * Remove __inline, _DIAGASSERTs, __P
1916
- * Remove ill-considered "swap_cnt" switch to insertion sort,
1917
- * in favor of a simple check for presorted input.
1918
- *
1919
- * CAUTION: if you change this file, see also qsort_arg.c
1920
- *
1921
- * $PostgreSQL: pgsql/src/port/qsort.c,v 1.12 2006/10/19 20:56:22 tgl Exp $
1922
- */
1923
-
1924
- /* $NetBSD: qsort.c,v 1.13 2003/08/07 16:43:42 agc Exp $ */
1925
-
1926
- /*-
1927
- * Copyright (c) 1992, 1993
1928
- * The Regents of the University of California. All rights reserved.
1929
- *
1930
- * Redistribution and use in source and binary forms, with or without
1931
- * modification, are permitted provided that the following conditions
1932
- * are met:
1933
- * 1. Redistributions of source code must retain the above copyright
1934
- * notice, this list of conditions and the following disclaimer.
1935
- * 2. Redistributions in binary form must reproduce the above copyright
1936
- * notice, this list of conditions and the following disclaimer in the
1937
- * documentation and/or other materials provided with the distribution.
1938
- * 3. Neither the name of the University nor the names of its contributors
1939
- * may be used to endorse or promote products derived from this software
1940
- * without specific prior written permission.
1941
- *
1942
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
1943
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1944
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1945
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
1946
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1947
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
1948
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1949
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
1950
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
1951
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
1952
- * SUCH DAMAGE.
1953
- */
1954
-
179
+ #define qsort_cast *(uint32*)
180
+ DEF_NARRAY_INT_SORT_METHOD_FUNC(uint32)
1955
181
  #undef qsort_dtype
1956
- #define qsort_dtype dtype*
182
+ #define qsort_dtype uint32*
1957
183
  #undef qsort_cast
1958
- #define qsort_cast **(dtype**)
184
+ #define qsort_cast **(uint32**)
185
+ DEF_NARRAY_INT_SORT_INDEX_METHOD_FUNC(uint32, numo_cUInt32)
186
+ DEF_NARRAY_INT_MEDIAN_METHOD_FUNC(uint32)
187
+ DEF_NARRAY_INT_MEAN_METHOD_FUNC(uint32, numo_cUInt32)
188
+ DEF_NARRAY_INT_VAR_METHOD_FUNC(uint32, numo_cUInt32)
189
+ DEF_NARRAY_INT_STDDEV_METHOD_FUNC(uint32, numo_cUInt32)
190
+ DEF_NARRAY_INT_RMS_METHOD_FUNC(uint32, numo_cUInt32)
1959
191
 
1960
- static void uint32_index_qsort(void* a, size_t n, ssize_t es) {
1961
- char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
1962
- int d, r, swaptype, presorted;
192
+ static size_t uint32_memsize(const void* ptr) {
193
+ size_t size = sizeof(narray_data_t);
194
+ const narray_data_t* na = (const narray_data_t*)ptr;
1963
195
 
1964
- loop:
1965
- SWAPINIT(a, es);
1966
- if (n < 7) {
1967
- for (pm = (char*)a + es; pm < (char*)a + n * es; pm += es)
1968
- for (pl = pm; pl > (char*)a && cmpgt(pl - es, pl); pl -= es) swap(pl, pl - es);
1969
- return;
1970
- }
1971
- presorted = 1;
1972
- for (pm = (char*)a + es; pm < (char*)a + n * es; pm += es) {
1973
- if (cmpgt(pm - es, pm)) {
1974
- presorted = 0;
1975
- break;
1976
- }
1977
- }
1978
- if (presorted) return;
1979
- pm = (char*)a + (n / 2) * es;
1980
- if (n > 7) {
1981
- pl = (char*)a;
1982
- pn = (char*)a + (n - 1) * es;
1983
- if (n > 40) {
1984
- d = (int)((n / 8) * es);
1985
- pl = med3(pl, pl + d, pl + 2 * d, cmp);
1986
- pm = med3(pm - d, pm, pm + d, cmp);
1987
- pn = med3(pn - 2 * d, pn - d, pn, cmp);
1988
- }
1989
- pm = med3(pl, pm, pn, cmp);
196
+ assert(na->base.type == NARRAY_DATA_T);
197
+
198
+ if (na->ptr != NULL) {
199
+
200
+ size += na->base.size * sizeof(dtype);
1990
201
  }
1991
- swap(a, pm);
1992
- for (pa = pb = (char*)a + es, pc = pd = (char*)a + (n - 1) * es; pb <= pc;
1993
- pb += es, pc -= es) {
1994
- while (pb <= pc && (r = cmp(pb, a)) <= 0) {
1995
- if (r == 0) {
1996
- swap(pa, pb);
1997
- pa += es;
1998
- }
1999
- pb += es;
2000
- }
2001
- while (pb <= pc && (r = cmp(pc, a)) >= 0) {
2002
- if (r == 0) {
2003
- swap(pc, pd);
2004
- pd -= es;
2005
- }
2006
- pc -= es;
202
+ if (na->base.size > 0) {
203
+ if (na->base.shape != NULL && na->base.shape != &(na->base.size)) {
204
+ size += sizeof(size_t) * na->base.ndim;
2007
205
  }
2008
- if (pb > pc) break;
2009
- swap(pb, pc);
2010
206
  }
2011
- pn = (char*)a + n * es;
2012
- r = (int)Min(pa - (char*)a, pb - pa);
2013
- vecswap(a, pb - r, r);
2014
- r = (int)Min(pd - pc, pn - pd - es);
2015
- vecswap(pb, pn - r, r);
2016
- if ((r = (int)(pb - pa)) > es) uint32_index_qsort(a, r / es, es);
2017
- if ((r = (int)(pd - pc)) > es) {
2018
- /* Iterate rather than recurse to save stack space */
2019
- a = pn - r;
2020
- n = r / es;
2021
- goto loop;
2022
- }
2023
- /* qsort(pn - r, r / es, es, cmp);*/
207
+ return size;
2024
208
  }
2025
209
 
2026
- #define idx_t int64_t
2027
- static void uint32_index64_qsort(na_loop_t* const lp) {
2028
- size_t i, n, idx;
2029
- char *d_ptr, *i_ptr, *o_ptr;
2030
- ssize_t d_step, i_step, o_step;
2031
- char** ptr;
2032
-
2033
- INIT_COUNTER(lp, n);
2034
- INIT_PTR(lp, 0, d_ptr, d_step);
2035
- INIT_PTR(lp, 1, i_ptr, i_step);
2036
- INIT_PTR(lp, 2, o_ptr, o_step);
2037
-
2038
- ptr = (char**)(lp->opt_ptr);
2039
-
2040
- // o_ptr=%lx,o_step=%ld)\n",(size_t)ptr,(size_t)d_ptr,(ssize_t)d_step,(size_t)i_ptr,(ssize_t)i_step,(size_t)o_ptr,(ssize_t)o_step);
210
+ static void uint32_free(void* ptr) {
211
+ narray_data_t* na = (narray_data_t*)ptr;
2041
212
 
2042
- if (n == 1) {
2043
- *(idx_t*)o_ptr = *(idx_t*)(i_ptr);
2044
- return;
2045
- }
213
+ assert(na->base.type == NARRAY_DATA_T);
2046
214
 
2047
- for (i = 0; i < n; i++) {
2048
- ptr[i] = d_ptr + d_step * i;
215
+ if (na->ptr != NULL) {
216
+ if (na->owned) {
217
+ xfree(na->ptr);
218
+ }
219
+ na->ptr = NULL;
2049
220
  }
2050
-
2051
- uint32_index_qsort(ptr, n, sizeof(dtype*));
2052
-
2053
- // d_ptr = lp->args[0].ptr;
2054
-
2055
- for (i = 0; i < n; i++) {
2056
- idx = (ptr[i] - d_ptr) / d_step;
2057
- *(idx_t*)o_ptr = *(idx_t*)(i_ptr + i_step * idx);
2058
- o_ptr += o_step;
221
+ if (na->base.size > 0) {
222
+ if (na->base.shape != NULL && na->base.shape != &(na->base.size)) {
223
+ xfree(na->base.shape);
224
+ na->base.shape = NULL;
225
+ }
2059
226
  }
227
+ xfree(na);
2060
228
  }
2061
- #undef idx_t
2062
-
2063
- #define idx_t int32_t
2064
- static void uint32_index32_qsort(na_loop_t* const lp) {
2065
- size_t i, n, idx;
2066
- char *d_ptr, *i_ptr, *o_ptr;
2067
- ssize_t d_step, i_step, o_step;
2068
- char** ptr;
2069
-
2070
- INIT_COUNTER(lp, n);
2071
- INIT_PTR(lp, 0, d_ptr, d_step);
2072
- INIT_PTR(lp, 1, i_ptr, i_step);
2073
- INIT_PTR(lp, 2, o_ptr, o_step);
2074
-
2075
- ptr = (char**)(lp->opt_ptr);
2076
229
 
2077
- // o_ptr=%lx,o_step=%ld)\n",(size_t)ptr,(size_t)d_ptr,(ssize_t)d_step,(size_t)i_ptr,(ssize_t)i_step,(size_t)o_ptr,(ssize_t)o_step);
230
+ static narray_type_info_t uint32_info = {
2078
231
 
2079
- if (n == 1) {
2080
- *(idx_t*)o_ptr = *(idx_t*)(i_ptr);
2081
- return;
2082
- }
232
+ 0, // element_bits
233
+ sizeof(dtype), // element_bytes
234
+ sizeof(dtype), // element_stride (in bytes)
2083
235
 
2084
- for (i = 0; i < n; i++) {
2085
- ptr[i] = d_ptr + d_step * i;
2086
- }
236
+ };
2087
237
 
2088
- uint32_index_qsort(ptr, n, sizeof(dtype*));
238
+ static const rb_data_type_t uint32_data_type = {
239
+ "Numo::UInt32",
240
+ {
241
+ 0,
242
+ uint32_free,
243
+ uint32_memsize,
244
+ },
245
+ &na_data_type,
246
+ &uint32_info,
247
+ RUBY_TYPED_FROZEN_SHAREABLE, // flags
248
+ };
2089
249
 
2090
- // d_ptr = lp->args[0].ptr;
250
+ static VALUE uint32_s_alloc_func(VALUE klass) {
251
+ narray_data_t* na = ALLOC(narray_data_t);
2091
252
 
2092
- for (i = 0; i < n; i++) {
2093
- idx = (ptr[i] - d_ptr) / d_step;
2094
- *(idx_t*)o_ptr = *(idx_t*)(i_ptr + i_step * idx);
2095
- o_ptr += o_step;
2096
- }
253
+ na->base.ndim = 0;
254
+ na->base.type = NARRAY_DATA_T;
255
+ na->base.flag[0] = NA_FL0_INIT;
256
+ na->base.flag[1] = NA_FL1_INIT;
257
+ na->base.size = 0;
258
+ na->base.shape = NULL;
259
+ na->base.reduce = INT2FIX(0);
260
+ na->ptr = NULL;
261
+ na->owned = FALSE;
262
+ return TypedData_Wrap_Struct(klass, &uint32_data_type, (void*)na);
2097
263
  }
2098
- #undef idx_t
2099
264
 
2100
- /*
2101
- sort_index. Returns an index array of sort result.
2102
- @overload sort_index(axis:nil)
2103
- @param [Numeric,Array,Range] axis Performs sort_index along the axis.
2104
- @return [Integer,Numo::Int] returns result index of sort_index.
2105
- @example
2106
- Numo::NArray[3,4,1,2].sort_index #=> Numo::Int32[2,3,0,1]
2107
- */
2108
- static VALUE uint32_sort_index(int argc, VALUE* argv, VALUE self) {
2109
- size_t size;
265
+ static VALUE uint32_allocate(VALUE self) {
2110
266
  narray_t* na;
2111
- VALUE idx, tmp, reduce, res;
2112
- char* buf;
2113
- ndfunc_arg_in_t ain[3] = { { cT, 0 }, { 0, 0 }, { sym_reduce, 0 } };
2114
- ndfunc_arg_out_t aout[1] = { { 0, 0, 0 } };
2115
- ndfunc_t ndf = { 0, STRIDE_LOOP_NIP | NDF_FLAT_REDUCE | NDF_CUM, 3, 1, ain, aout };
267
+ char* ptr;
2116
268
 
2117
269
  GetNArray(self, na);
2118
- if (na->ndim == 0) {
2119
- return INT2FIX(0);
2120
- }
2121
- if (na->size > (~(u_int32_t)0)) {
2122
- ain[1].type = aout[0].type = numo_cInt64;
2123
- idx = nary_new(numo_cInt64, na->ndim, na->shape);
2124
-
2125
- ndf.func = uint32_index64_qsort;
2126
- reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
2127
-
2128
- } else {
2129
- ain[1].type = aout[0].type = numo_cInt32;
2130
- idx = nary_new(numo_cInt32, na->ndim, na->shape);
2131
-
2132
- ndf.func = uint32_index32_qsort;
2133
- reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
2134
- }
2135
- rb_funcall(idx, rb_intern("seq"), 0);
2136
-
2137
- size = na->size * sizeof(void*); // max capa
2138
- buf = rb_alloc_tmp_buffer(&tmp, size);
2139
- res = na_ndloop3(&ndf, buf, 3, self, idx, reduce);
2140
- rb_free_tmp_buffer(&tmp);
2141
- return res;
2142
- }
2143
-
2144
- static void iter_uint32_median(na_loop_t* const lp) {
2145
- size_t n;
2146
- char *p1, *p2;
2147
- dtype* buf;
2148
-
2149
- INIT_COUNTER(lp, n);
2150
- p1 = (lp->args[0]).ptr + (lp->args[0].iter[0]).pos;
2151
- p2 = (lp->args[1]).ptr + (lp->args[1].iter[0]).pos;
2152
- buf = (dtype*)p1;
2153
270
 
2154
- uint32_qsort(buf, n, sizeof(dtype));
271
+ switch (NA_TYPE(na)) {
272
+ case NARRAY_DATA_T:
273
+ ptr = NA_DATA_PTR(na);
274
+ if (na->size > 0 && ptr == NULL) {
275
+ ptr = xmalloc(sizeof(dtype) * na->size);
2155
276
 
2156
- if (n == 0) {
2157
- *(dtype*)p2 = buf[0];
2158
- } else if (n % 2 == 0) {
2159
- *(dtype*)p2 = (buf[n / 2 - 1] + buf[n / 2]) / 2;
2160
- } else {
2161
- *(dtype*)p2 = buf[(n - 1) / 2];
277
+ NA_DATA_PTR(na) = ptr;
278
+ NA_DATA_OWNED(na) = TRUE;
279
+ }
280
+ break;
281
+ case NARRAY_VIEW_T:
282
+ rb_funcall(NA_VIEW_DATA(na), rb_intern("allocate"), 0);
283
+ break;
284
+ case NARRAY_FILEMAP_T:
285
+ // ptr = ((narray_filemap_t*)na)->ptr;
286
+ // to be implemented
287
+ default:
288
+ rb_bug("invalid narray type : %d", NA_TYPE(na));
2162
289
  }
2163
- }
2164
-
2165
- /*
2166
- median of self.
2167
- @overload median(axis:nil, keepdims:false)
2168
- @param [Numeric,Array,Range] axis Finds median along the axis.
2169
- @param [TrueClass] keepdims If true, the reduced axes are left in the result array as
2170
- dimensions with size one.
2171
- @return [Numo::UInt32] returns median of self.
2172
- */
2173
-
2174
- static VALUE uint32_median(int argc, VALUE* argv, VALUE self) {
2175
- VALUE v, reduce;
2176
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { sym_reduce, 0 } };
2177
- ndfunc_arg_out_t aout[1] = { { INT2FIX(0), 0 } };
2178
- ndfunc_t ndf = { 0, NDF_HAS_LOOP | NDF_FLAT_REDUCE, 2, 1, ain, aout };
2179
-
2180
- self = na_copy(self); // as temporary buffer
2181
-
2182
- ndf.func = iter_uint32_median;
2183
- reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
2184
-
2185
- v = na_ndloop(&ndf, 2, self, reduce);
2186
- return uint32_extract(v);
290
+ return self;
2187
291
  }
2188
292
 
2189
293
  void Init_numo_uint32(void) {
@@ -2219,12 +323,7 @@ void Init_numo_uint32(void) {
2219
323
  rb_define_const(cT, "UPCAST", hCast);
2220
324
  rb_hash_aset(hCast, rb_cArray, cT);
2221
325
 
2222
- #ifdef RUBY_INTEGER_UNIFICATION
2223
326
  rb_hash_aset(hCast, rb_cInteger, cT);
2224
- #else
2225
- rb_hash_aset(hCast, rb_cFixnum, cT);
2226
- rb_hash_aset(hCast, rb_cBignum, cT);
2227
- #endif
2228
327
  rb_hash_aset(hCast, rb_cFloat, numo_cDFloat);
2229
328
  rb_hash_aset(hCast, rb_cComplex, numo_cDComplex);
2230
329
  rb_hash_aset(hCast, numo_cRObject, numo_cRObject);
@@ -2254,12 +353,50 @@ void Init_numo_uint32(void) {
2254
353
  rb_define_const(cT, "MIN", M_MIN);
2255
354
  rb_define_alloc_func(cT, uint32_s_alloc_func);
2256
355
  rb_define_method(cT, "allocate", uint32_allocate, 0);
356
+ /**
357
+ * Extract an element only if self is a dimensionless NArray.
358
+ * @overload extract
359
+ * @return [Numeric,Numo::NArray]
360
+ * --- Extract element value as Ruby Object if self is a dimensionless NArray,
361
+ * otherwise returns self.
362
+ */
2257
363
  rb_define_method(cT, "extract", uint32_extract, 0);
2258
-
364
+ /**
365
+ * Store elements to Numo::UInt32 from other.
366
+ * @overload store(other)
367
+ * @param [Object] other
368
+ * @return [Numo::UInt32] self
369
+ */
2259
370
  rb_define_method(cT, "store", uint32_store, 1);
2260
-
371
+ /**
372
+ * Cast object to Numo::UInt32.
373
+ * @overload [](elements)
374
+ * @overload cast(array)
375
+ * @param [Numeric,Array] elements
376
+ * @param [Array] array
377
+ * @return [Numo::UInt32]
378
+ */
2261
379
  rb_define_singleton_method(cT, "cast", uint32_s_cast, 1);
380
+ /**
381
+ * Multi-dimensional element reference.
382
+ * @overload [](dim0,...,dimL)
383
+ * @param [Numeric,Range,Array,Numo::Int32,Numo::Int64,Numo::Bit,TrueClass,FalseClass,
384
+ * Symbol] dim0,...,dimL multi-dimensional indices.
385
+ * @return [Numeric,Numo::UInt32] an element or NArray view.
386
+ * @see Numo::NArray#[]
387
+ * @see #[]=
388
+ */
2262
389
  rb_define_method(cT, "[]", uint32_aref, -1);
390
+ /**
391
+ * Multi-dimensional element assignment.
392
+ * @overload []=(dim0,...,dimL,val)
393
+ * @param [Numeric,Range,Array,Numo::Int32,Numo::Int64,Numo::Bit,TrueClass,FalseClass,
394
+ * Symbol] dim0,...,dimL multi-dimensional indices.
395
+ * @param [Numeric,Numo::NArray,Array] val Value(s) to be set to self.
396
+ * @return [Numeric,Numo::NArray,Array] returns `val` (last argument).
397
+ * @see Numo::NArray#[]=
398
+ * @see #[]
399
+ */
2263
400
  rb_define_method(cT, "[]=", uint32_aset, -1);
2264
401
  /**
2265
402
  * return NArray with cast to the type of self.
@@ -2300,10 +437,54 @@ void Init_numo_uint32(void) {
2300
437
  * @return [String]
2301
438
  */
2302
439
  rb_define_method(cT, "inspect", uint32_inspect, 0);
440
+ /**
441
+ * Calls the given block once for each element in self,
442
+ * passing that element as a parameter.
443
+ * @overload each
444
+ * @return [Numo::NArray] self
445
+ * For a block `{|x| ... }`,
446
+ * @yieldparam [Numeric] x an element of NArray.
447
+ * @see #each_with_index
448
+ * @see #map
449
+ */
2303
450
  rb_define_method(cT, "each", uint32_each, 0);
451
+ /**
452
+ * Unary map.
453
+ * @overload map
454
+ * @return [Numo::UInt32] map of self.
455
+ */
2304
456
  rb_define_method(cT, "map", uint32_map, 0);
457
+ /**
458
+ * Invokes the given block once for each element of self,
459
+ * passing that element and indices along each axis as parameters.
460
+ * @overload each_with_index
461
+ * For a block `{|x,i,j,...| ... }`,
462
+ * @yieldparam [Numeric] x an element
463
+ * @yieldparam [Integer] i,j,... multitimensional indices
464
+ * @return [Numo::NArray] self
465
+ * @see #each
466
+ * @see #map_with_index
467
+ */
2305
468
  rb_define_method(cT, "each_with_index", uint32_each_with_index, 0);
469
+ /**
470
+ * Invokes the given block once for each element of self,
471
+ * passing that element and indices along each axis as parameters.
472
+ * Creates a new NArray containing the values returned by the block.
473
+ * Inplace option is allowed, i.e., `nary.inplace.map` overwrites `nary`.
474
+ * @overload map_with_index
475
+ * For a block `{|x,i,j,...| ... }`,
476
+ * @yieldparam [Numeric] x an element
477
+ * @yieldparam [Integer] i,j,... multitimensional indices
478
+ * @return [Numo::NArray] mapped array
479
+ * @see #map
480
+ * @see #each_with_index
481
+ */
2306
482
  rb_define_method(cT, "map_with_index", uint32_map_with_index, 0);
483
+ /**
484
+ * abs of self.
485
+ * @overload abs
486
+ * @return [Numo::UInt32] abs of self.
487
+ */
2307
488
  rb_define_method(cT, "abs", uint32_abs, 0);
2308
489
  /**
2309
490
  * Binary add.
@@ -2764,11 +945,40 @@ void Init_numo_uint32(void) {
2764
945
  * # [4, 3, 3, 2, 4, 2]
2765
946
  */
2766
947
  rb_define_method(cT, "rand", uint32_rand, -1);
948
+ /**
949
+ * Calculate polynomial.
950
+ * `x.poly(a0,a1,a2,...,an) = a0 + a1*x + a2*x**2 + ... + an*x**n`
951
+ * @overload poly a0, a1, ..., an
952
+ * @param [Numo::NArray,Numeric] a0,a1,...,an
953
+ * @return [Numo::UInt32]
954
+ */
2767
955
  rb_define_method(cT, "poly", uint32_poly, -2);
2768
-
956
+ /**
957
+ * sort of self.
958
+ * @overload sort(axis:nil)
959
+ * @param [Numeric,Array,Range] axis Performs sort along the axis.
960
+ * @return [Numo::UInt32] returns result of sort.
961
+ * @example
962
+ * Numo::DFloat[3,4,1,2].sort #=> Numo::DFloat[1,2,3,4]
963
+ */
2769
964
  rb_define_method(cT, "sort", uint32_sort, -1);
2770
-
965
+ /**
966
+ * sort_index. Returns an index array of sort result.
967
+ * @overload sort_index(axis:nil)
968
+ * @param [Numeric,Array,Range] axis Performs sort_index along the axis.
969
+ * @return [Integer,Numo::Int] returns result index of sort_index.
970
+ * @example
971
+ * Numo::NArray[3,4,1,2].sort_index #=> Numo::Int32[2,3,0,1]
972
+ */
2771
973
  rb_define_method(cT, "sort_index", uint32_sort_index, -1);
974
+ /**
975
+ * median of self.
976
+ * @overload median(axis:nil, keepdims:false)
977
+ * @param [Numeric,Array,Range] axis Finds median along the axis.
978
+ * @param [TrueClass] keepdims If true, the reduced axes are left in the result array as
979
+ * dimensions with size one.
980
+ * @return [Numo::UInt32] returns median of self.
981
+ */
2772
982
  rb_define_method(cT, "median", uint32_median, -1);
2773
983
  rb_define_singleton_method(cT, "[]", uint32_s_cast, -2);
2774
984
  /**