numo-narray-alt 0.9.13 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile +1 -1
  3. data/ext/numo/narray/array.c +1 -9
  4. data/ext/numo/narray/extconf.rb +0 -11
  5. data/ext/numo/narray/index.c +5 -39
  6. data/ext/numo/narray/math.c +0 -5
  7. data/ext/numo/narray/narray.c +13 -19
  8. data/ext/numo/narray/numo/narray.h +6 -8
  9. data/ext/numo/narray/src/mh/abs.h +56 -0
  10. data/ext/numo/narray/src/mh/aref.h +28 -0
  11. data/ext/numo/narray/src/mh/arg.h +56 -0
  12. data/ext/numo/narray/src/mh/aset.h +169 -0
  13. data/ext/numo/narray/src/mh/conj.h +71 -0
  14. data/ext/numo/narray/src/mh/copysign.h +97 -0
  15. data/ext/numo/narray/src/mh/each.h +71 -0
  16. data/ext/numo/narray/src/mh/each_with_index.h +98 -0
  17. data/ext/numo/narray/src/mh/extract.h +36 -0
  18. data/ext/numo/narray/src/mh/im.h +71 -0
  19. data/ext/numo/narray/src/mh/imag.h +56 -0
  20. data/ext/numo/narray/src/mh/kahan_sum.h +39 -0
  21. data/ext/numo/narray/src/mh/map.h +126 -0
  22. data/ext/numo/narray/src/mh/map_with_index.h +76 -0
  23. data/ext/numo/narray/src/mh/median.h +85 -0
  24. data/ext/numo/narray/src/mh/modf.h +35 -0
  25. data/ext/numo/narray/src/mh/poly.h +42 -0
  26. data/ext/numo/narray/src/mh/real.h +56 -0
  27. data/ext/numo/narray/src/mh/s_cast.h +80 -0
  28. data/ext/numo/narray/src/mh/set_imag.h +60 -0
  29. data/ext/numo/narray/src/mh/set_real.h +60 -0
  30. data/ext/numo/narray/src/mh/signbit.h +42 -0
  31. data/ext/numo/narray/src/mh/sort.h +484 -0
  32. data/ext/numo/narray/src/mh/store.h +496 -0
  33. data/ext/numo/narray/src/t_bit.c +65 -195
  34. data/ext/numo/narray/src/t_dcomplex.c +244 -2216
  35. data/ext/numo/narray/src/t_dfloat.c +74 -2435
  36. data/ext/numo/narray/src/t_int16.c +225 -2015
  37. data/ext/numo/narray/src/t_int32.c +225 -2015
  38. data/ext/numo/narray/src/t_int64.c +225 -2015
  39. data/ext/numo/narray/src/t_int8.c +225 -1998
  40. data/ext/numo/narray/src/t_robject.c +114 -1413
  41. data/ext/numo/narray/src/t_scomplex.c +232 -2168
  42. data/ext/numo/narray/src/t_sfloat.c +72 -2399
  43. data/ext/numo/narray/src/t_uint16.c +225 -2015
  44. data/ext/numo/narray/src/t_uint32.c +225 -2015
  45. data/ext/numo/narray/src/t_uint64.c +225 -2015
  46. data/ext/numo/narray/src/t_uint8.c +225 -1998
  47. data/ext/numo/narray/step.c +2 -59
  48. data/numo-narray-alt.gemspec +1 -1
  49. metadata +27 -3
@@ -43,12 +43,22 @@ static ID id_to_a;
43
43
  VALUE cT;
44
44
  extern VALUE cRT;
45
45
 
46
+ #include "mh/store.h"
47
+ #include "mh/s_cast.h"
48
+ #include "mh/extract.h"
49
+ #include "mh/aref.h"
50
+ #include "mh/aset.h"
46
51
  #include "mh/coerce_cast.h"
47
52
  #include "mh/to_a.h"
48
53
  #include "mh/fill.h"
49
54
  #include "mh/format.h"
50
55
  #include "mh/format_to_a.h"
51
56
  #include "mh/inspect.h"
57
+ #include "mh/each.h"
58
+ #include "mh/map.h"
59
+ #include "mh/each_with_index.h"
60
+ #include "mh/map_with_index.h"
61
+ #include "mh/abs.h"
52
62
  #include "mh/op/add.h"
53
63
  #include "mh/op/sub.h"
54
64
  #include "mh/op/mul.h"
@@ -92,6 +102,9 @@ extern VALUE cRT;
92
102
  #include "mh/seq.h"
93
103
  #include "mh/eye.h"
94
104
  #include "mh/rand.h"
105
+ #include "mh/poly.h"
106
+ #include "mh/sort.h"
107
+ #include "mh/median.h"
95
108
  #include "mh/mean.h"
96
109
  #include "mh/var.h"
97
110
  #include "mh/stddev.h"
@@ -99,12 +112,23 @@ extern VALUE cRT;
99
112
 
100
113
  typedef u_int8_t uint8; // Type aliases for shorter notation
101
114
  // following the codebase naming convention.
115
+ DEF_NARRAY_STORE_METHOD_FUNC(uint8, numo_cUInt8)
116
+ DEF_NARRAY_S_CAST_METHOD_FUNC(uint8, numo_cUInt8)
117
+ DEF_NARRAY_EXTRACT_METHOD_FUNC(uint8)
118
+ DEF_NARRAY_AREF_METHOD_FUNC(uint8)
119
+ DEF_EXTRACT_DATA_FUNC(uint8, numo_cUInt8)
120
+ DEF_NARRAY_ASET_METHOD_FUNC(uint8)
102
121
  DEF_NARRAY_COERCE_CAST_METHOD_FUNC(uint8)
103
122
  DEF_NARRAY_TO_A_METHOD_FUNC(uint8)
104
123
  DEF_NARRAY_FILL_METHOD_FUNC(uint8)
105
124
  DEF_NARRAY_FORMAT_METHOD_FUNC(uint8)
106
125
  DEF_NARRAY_FORMAT_TO_A_METHOD_FUNC(uint8)
107
126
  DEF_NARRAY_INSPECT_METHOD_FUNC(uint8)
127
+ DEF_NARRAY_EACH_METHOD_FUNC(uint8)
128
+ DEF_NARRAY_MAP_METHOD_FUNC(uint8, numo_cUInt8)
129
+ DEF_NARRAY_EACH_WITH_INDEX_METHOD_FUNC(uint8)
130
+ DEF_NARRAY_MAP_WITH_INDEX_METHOD_FUNC(uint8, numo_cUInt8)
131
+ DEF_NARRAY_ABS_METHOD_FUNC(uint8, numo_cUInt8, uint8, numo_cUInt8)
108
132
  DEF_NARRAY_INT8_ADD_METHOD_FUNC(uint8, numo_cUInt8)
109
133
  DEF_NARRAY_INT8_SUB_METHOD_FUNC(uint8, numo_cUInt8)
110
134
  DEF_NARRAY_INT8_MUL_METHOD_FUNC(uint8, numo_cUInt8)
@@ -148,2025 +172,122 @@ DEF_NARRAY_INT_MULSUM_METHOD_FUNC(uint8, numo_cUInt8)
148
172
  DEF_NARRAY_INT_SEQ_METHOD_FUNC(uint8)
149
173
  DEF_NARRAY_EYE_METHOD_FUNC(uint8)
150
174
  DEF_NARRAY_INT_RAND_METHOD_FUNC(uint8)
151
- DEF_NARRAY_INT_MEAN_METHOD_FUNC(uint8, numo_cUInt8)
152
- DEF_NARRAY_INT_VAR_METHOD_FUNC(uint8, numo_cUInt8)
153
- DEF_NARRAY_INT_STDDEV_METHOD_FUNC(uint8, numo_cUInt8)
154
- DEF_NARRAY_INT_RMS_METHOD_FUNC(uint8, numo_cUInt8)
155
-
156
- static VALUE uint8_store(VALUE, VALUE);
157
-
158
- static size_t uint8_memsize(const void* ptr) {
159
- size_t size = sizeof(narray_data_t);
160
- const narray_data_t* na = (const narray_data_t*)ptr;
161
-
162
- assert(na->base.type == NARRAY_DATA_T);
163
-
164
- if (na->ptr != NULL) {
165
-
166
- size += na->base.size * sizeof(dtype);
167
- }
168
- if (na->base.size > 0) {
169
- if (na->base.shape != NULL && na->base.shape != &(na->base.size)) {
170
- size += sizeof(size_t) * na->base.ndim;
171
- }
172
- }
173
- return size;
174
- }
175
-
176
- static void uint8_free(void* ptr) {
177
- narray_data_t* na = (narray_data_t*)ptr;
178
-
179
- assert(na->base.type == NARRAY_DATA_T);
180
-
181
- if (na->ptr != NULL) {
182
- if (na->owned) {
183
- xfree(na->ptr);
184
- }
185
- na->ptr = NULL;
186
- }
187
- if (na->base.size > 0) {
188
- if (na->base.shape != NULL && na->base.shape != &(na->base.size)) {
189
- xfree(na->base.shape);
190
- na->base.shape = NULL;
191
- }
192
- }
193
- xfree(na);
194
- }
195
-
196
- static narray_type_info_t uint8_info = {
197
-
198
- 0, // element_bits
199
- sizeof(dtype), // element_bytes
200
- sizeof(dtype), // element_stride (in bytes)
201
-
202
- };
203
-
204
- static const rb_data_type_t uint8_data_type = {
205
- "Numo::UInt8",
206
- {
207
- 0,
208
- uint8_free,
209
- uint8_memsize,
210
- },
211
- &na_data_type,
212
- &uint8_info,
213
- RUBY_TYPED_FROZEN_SHAREABLE, // flags
214
- };
215
-
216
- static VALUE uint8_s_alloc_func(VALUE klass) {
217
- narray_data_t* na = ALLOC(narray_data_t);
218
-
219
- na->base.ndim = 0;
220
- na->base.type = NARRAY_DATA_T;
221
- na->base.flag[0] = NA_FL0_INIT;
222
- na->base.flag[1] = NA_FL1_INIT;
223
- na->base.size = 0;
224
- na->base.shape = NULL;
225
- na->base.reduce = INT2FIX(0);
226
- na->ptr = NULL;
227
- na->owned = FALSE;
228
- return TypedData_Wrap_Struct(klass, &uint8_data_type, (void*)na);
229
- }
230
-
231
- static VALUE uint8_allocate(VALUE self) {
232
- narray_t* na;
233
- char* ptr;
234
-
235
- GetNArray(self, na);
236
-
237
- switch (NA_TYPE(na)) {
238
- case NARRAY_DATA_T:
239
- ptr = NA_DATA_PTR(na);
240
- if (na->size > 0 && ptr == NULL) {
241
- ptr = xmalloc(sizeof(dtype) * na->size);
242
-
243
- NA_DATA_PTR(na) = ptr;
244
- NA_DATA_OWNED(na) = TRUE;
245
- }
246
- break;
247
- case NARRAY_VIEW_T:
248
- rb_funcall(NA_VIEW_DATA(na), rb_intern("allocate"), 0);
249
- break;
250
- case NARRAY_FILEMAP_T:
251
- // ptr = ((narray_filemap_t*)na)->ptr;
252
- // to be implemented
253
- default:
254
- rb_bug("invalid narray type : %d", NA_TYPE(na));
255
- }
256
- return self;
257
- }
258
-
259
- /*
260
- Extract an element only if self is a dimensionless NArray.
261
- @overload extract
262
- @return [Numeric,Numo::NArray]
263
- --- Extract element value as Ruby Object if self is a dimensionless NArray,
264
- otherwise returns self.
265
- */
266
- static VALUE uint8_extract(VALUE self) {
267
- volatile VALUE v;
268
- char* ptr;
269
- narray_t* na;
270
- GetNArray(self, na);
271
-
272
- if (na->ndim == 0) {
273
- ptr = na_get_pointer_for_read(self) + na_get_offset(self);
274
- v = m_extract(ptr);
275
- na_release_lock(self);
276
- return v;
277
- }
278
- return self;
279
- }
280
-
281
- static VALUE uint8_new_dim0(dtype x) {
282
- VALUE v;
283
- dtype* ptr;
284
-
285
- v = nary_new(cT, 0, NULL);
286
- ptr = (dtype*)(char*)na_get_pointer_for_write(v);
287
- *ptr = x;
288
- na_release_lock(v);
289
- return v;
290
- }
291
-
292
- static VALUE uint8_store_numeric(VALUE self, VALUE obj) {
293
- dtype x;
294
- x = m_num_to_data(obj);
295
- obj = uint8_new_dim0(x);
296
- uint8_store(self, obj);
297
- return self;
298
- }
299
-
300
- static void iter_uint8_store_bit(na_loop_t* const lp) {
301
- size_t i;
302
- char* p1;
303
- size_t p2;
304
- ssize_t s1, s2;
305
- size_t *idx1, *idx2;
306
- BIT_DIGIT *a2, x;
307
- dtype y;
308
-
309
- INIT_COUNTER(lp, i);
310
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
311
- INIT_PTR_BIT_IDX(lp, 1, a2, p2, s2, idx2);
312
- if (idx2) {
313
- if (idx1) {
314
- for (; i--;) {
315
- LOAD_BIT(a2, p2 + *idx2, x);
316
- idx2++;
317
- y = m_from_sint(x);
318
- SET_DATA_INDEX(p1, idx1, dtype, y);
319
- }
320
- } else {
321
- for (; i--;) {
322
- LOAD_BIT(a2, p2 + *idx2, x);
323
- idx2++;
324
- y = m_from_sint(x);
325
- SET_DATA_STRIDE(p1, s1, dtype, y);
326
- }
327
- }
328
- } else {
329
- if (idx1) {
330
- for (; i--;) {
331
- LOAD_BIT(a2, p2, x);
332
- p2 += s2;
333
- y = m_from_sint(x);
334
- SET_DATA_INDEX(p1, idx1, dtype, y);
335
- }
336
- } else {
337
- for (; i--;) {
338
- LOAD_BIT(a2, p2, x);
339
- p2 += s2;
340
- y = m_from_sint(x);
341
- SET_DATA_STRIDE(p1, s1, dtype, y);
342
- }
343
- }
344
- }
345
- }
346
-
347
- static VALUE uint8_store_bit(VALUE self, VALUE obj) {
348
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
349
- ndfunc_t ndf = { iter_uint8_store_bit, FULL_LOOP, 2, 0, ain, 0 };
350
-
351
- na_ndloop(&ndf, 2, self, obj);
352
- return self;
353
- }
354
-
355
- static void iter_uint8_store_dfloat(na_loop_t* const lp) {
356
- size_t i, s1, s2;
357
- char *p1, *p2;
358
- size_t *idx1, *idx2;
359
- double x;
360
- dtype y;
361
-
362
- INIT_COUNTER(lp, i);
363
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
364
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
365
- if (idx2) {
366
- if (idx1) {
367
- for (; i--;) {
368
- GET_DATA_INDEX(p2, idx2, double, x);
369
- y = m_from_real(x);
370
- SET_DATA_INDEX(p1, idx1, dtype, y);
371
- }
372
- } else {
373
- for (; i--;) {
374
- GET_DATA_INDEX(p2, idx2, double, x);
375
- y = m_from_real(x);
376
- SET_DATA_STRIDE(p1, s1, dtype, y);
377
- }
378
- }
379
- } else {
380
- if (idx1) {
381
- for (; i--;) {
382
- GET_DATA_STRIDE(p2, s2, double, x);
383
- y = m_from_real(x);
384
- SET_DATA_INDEX(p1, idx1, dtype, y);
385
- }
386
- } else {
387
- for (; i--;) {
388
- GET_DATA_STRIDE(p2, s2, double, x);
389
- y = m_from_real(x);
390
- SET_DATA_STRIDE(p1, s1, dtype, y);
391
- }
392
- }
393
- }
394
- }
395
-
396
- static VALUE uint8_store_dfloat(VALUE self, VALUE obj) {
397
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
398
- ndfunc_t ndf = { iter_uint8_store_dfloat, FULL_LOOP, 2, 0, ain, 0 };
399
-
400
- na_ndloop(&ndf, 2, self, obj);
401
- return self;
402
- }
403
-
404
- static void iter_uint8_store_sfloat(na_loop_t* const lp) {
405
- size_t i, s1, s2;
406
- char *p1, *p2;
407
- size_t *idx1, *idx2;
408
- float x;
409
- dtype y;
410
-
411
- INIT_COUNTER(lp, i);
412
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
413
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
414
- if (idx2) {
415
- if (idx1) {
416
- for (; i--;) {
417
- GET_DATA_INDEX(p2, idx2, float, x);
418
- y = m_from_real(x);
419
- SET_DATA_INDEX(p1, idx1, dtype, y);
420
- }
421
- } else {
422
- for (; i--;) {
423
- GET_DATA_INDEX(p2, idx2, float, x);
424
- y = m_from_real(x);
425
- SET_DATA_STRIDE(p1, s1, dtype, y);
426
- }
427
- }
428
- } else {
429
- if (idx1) {
430
- for (; i--;) {
431
- GET_DATA_STRIDE(p2, s2, float, x);
432
- y = m_from_real(x);
433
- SET_DATA_INDEX(p1, idx1, dtype, y);
434
- }
435
- } else {
436
- for (; i--;) {
437
- GET_DATA_STRIDE(p2, s2, float, x);
438
- y = m_from_real(x);
439
- SET_DATA_STRIDE(p1, s1, dtype, y);
440
- }
441
- }
442
- }
443
- }
444
-
445
- static VALUE uint8_store_sfloat(VALUE self, VALUE obj) {
446
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
447
- ndfunc_t ndf = { iter_uint8_store_sfloat, FULL_LOOP, 2, 0, ain, 0 };
448
-
449
- na_ndloop(&ndf, 2, self, obj);
450
- return self;
451
- }
452
-
453
- static void iter_uint8_store_int64(na_loop_t* const lp) {
454
- size_t i, s1, s2;
455
- char *p1, *p2;
456
- size_t *idx1, *idx2;
457
- int64_t x;
458
- dtype y;
459
-
460
- INIT_COUNTER(lp, i);
461
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
462
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
463
- if (idx2) {
464
- if (idx1) {
465
- for (; i--;) {
466
- GET_DATA_INDEX(p2, idx2, int64_t, x);
467
- y = m_from_int64(x);
468
- SET_DATA_INDEX(p1, idx1, dtype, y);
469
- }
470
- } else {
471
- for (; i--;) {
472
- GET_DATA_INDEX(p2, idx2, int64_t, x);
473
- y = m_from_int64(x);
474
- SET_DATA_STRIDE(p1, s1, dtype, y);
475
- }
476
- }
477
- } else {
478
- if (idx1) {
479
- for (; i--;) {
480
- GET_DATA_STRIDE(p2, s2, int64_t, x);
481
- y = m_from_int64(x);
482
- SET_DATA_INDEX(p1, idx1, dtype, y);
483
- }
484
- } else {
485
- for (; i--;) {
486
- GET_DATA_STRIDE(p2, s2, int64_t, x);
487
- y = m_from_int64(x);
488
- SET_DATA_STRIDE(p1, s1, dtype, y);
489
- }
490
- }
491
- }
492
- }
493
-
494
- static VALUE uint8_store_int64(VALUE self, VALUE obj) {
495
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
496
- ndfunc_t ndf = { iter_uint8_store_int64, FULL_LOOP, 2, 0, ain, 0 };
497
-
498
- na_ndloop(&ndf, 2, self, obj);
499
- return self;
500
- }
501
-
502
- static void iter_uint8_store_int32(na_loop_t* const lp) {
503
- size_t i, s1, s2;
504
- char *p1, *p2;
505
- size_t *idx1, *idx2;
506
- int32_t x;
507
- dtype y;
508
-
509
- INIT_COUNTER(lp, i);
510
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
511
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
512
- if (idx2) {
513
- if (idx1) {
514
- for (; i--;) {
515
- GET_DATA_INDEX(p2, idx2, int32_t, x);
516
- y = m_from_int32(x);
517
- SET_DATA_INDEX(p1, idx1, dtype, y);
518
- }
519
- } else {
520
- for (; i--;) {
521
- GET_DATA_INDEX(p2, idx2, int32_t, x);
522
- y = m_from_int32(x);
523
- SET_DATA_STRIDE(p1, s1, dtype, y);
524
- }
525
- }
526
- } else {
527
- if (idx1) {
528
- for (; i--;) {
529
- GET_DATA_STRIDE(p2, s2, int32_t, x);
530
- y = m_from_int32(x);
531
- SET_DATA_INDEX(p1, idx1, dtype, y);
532
- }
533
- } else {
534
- for (; i--;) {
535
- GET_DATA_STRIDE(p2, s2, int32_t, x);
536
- y = m_from_int32(x);
537
- SET_DATA_STRIDE(p1, s1, dtype, y);
538
- }
539
- }
540
- }
541
- }
542
-
543
- static VALUE uint8_store_int32(VALUE self, VALUE obj) {
544
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
545
- ndfunc_t ndf = { iter_uint8_store_int32, FULL_LOOP, 2, 0, ain, 0 };
546
-
547
- na_ndloop(&ndf, 2, self, obj);
548
- return self;
549
- }
550
-
551
- static void iter_uint8_store_int16(na_loop_t* const lp) {
552
- size_t i, s1, s2;
553
- char *p1, *p2;
554
- size_t *idx1, *idx2;
555
- int16_t x;
556
- dtype y;
557
-
558
- INIT_COUNTER(lp, i);
559
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
560
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
561
- if (idx2) {
562
- if (idx1) {
563
- for (; i--;) {
564
- GET_DATA_INDEX(p2, idx2, int16_t, x);
565
- y = m_from_sint(x);
566
- SET_DATA_INDEX(p1, idx1, dtype, y);
567
- }
568
- } else {
569
- for (; i--;) {
570
- GET_DATA_INDEX(p2, idx2, int16_t, x);
571
- y = m_from_sint(x);
572
- SET_DATA_STRIDE(p1, s1, dtype, y);
573
- }
574
- }
575
- } else {
576
- if (idx1) {
577
- for (; i--;) {
578
- GET_DATA_STRIDE(p2, s2, int16_t, x);
579
- y = m_from_sint(x);
580
- SET_DATA_INDEX(p1, idx1, dtype, y);
581
- }
582
- } else {
583
- for (; i--;) {
584
- GET_DATA_STRIDE(p2, s2, int16_t, x);
585
- y = m_from_sint(x);
586
- SET_DATA_STRIDE(p1, s1, dtype, y);
587
- }
588
- }
589
- }
590
- }
591
-
592
- static VALUE uint8_store_int16(VALUE self, VALUE obj) {
593
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
594
- ndfunc_t ndf = { iter_uint8_store_int16, FULL_LOOP, 2, 0, ain, 0 };
595
-
596
- na_ndloop(&ndf, 2, self, obj);
597
- return self;
598
- }
599
-
600
- static void iter_uint8_store_int8(na_loop_t* const lp) {
601
- size_t i, s1, s2;
602
- char *p1, *p2;
603
- size_t *idx1, *idx2;
604
- int8_t x;
605
- dtype y;
606
-
607
- INIT_COUNTER(lp, i);
608
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
609
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
610
- if (idx2) {
611
- if (idx1) {
612
- for (; i--;) {
613
- GET_DATA_INDEX(p2, idx2, int8_t, x);
614
- y = m_from_sint(x);
615
- SET_DATA_INDEX(p1, idx1, dtype, y);
616
- }
617
- } else {
618
- for (; i--;) {
619
- GET_DATA_INDEX(p2, idx2, int8_t, x);
620
- y = m_from_sint(x);
621
- SET_DATA_STRIDE(p1, s1, dtype, y);
622
- }
623
- }
624
- } else {
625
- if (idx1) {
626
- for (; i--;) {
627
- GET_DATA_STRIDE(p2, s2, int8_t, x);
628
- y = m_from_sint(x);
629
- SET_DATA_INDEX(p1, idx1, dtype, y);
630
- }
631
- } else {
632
- for (; i--;) {
633
- GET_DATA_STRIDE(p2, s2, int8_t, x);
634
- y = m_from_sint(x);
635
- SET_DATA_STRIDE(p1, s1, dtype, y);
636
- }
637
- }
638
- }
639
- }
640
-
641
- static VALUE uint8_store_int8(VALUE self, VALUE obj) {
642
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
643
- ndfunc_t ndf = { iter_uint8_store_int8, FULL_LOOP, 2, 0, ain, 0 };
644
-
645
- na_ndloop(&ndf, 2, self, obj);
646
- return self;
647
- }
648
-
649
- static void iter_uint8_store_uint64(na_loop_t* const lp) {
650
- size_t i, s1, s2;
651
- char *p1, *p2;
652
- size_t *idx1, *idx2;
653
- u_int64_t x;
654
- dtype y;
655
-
656
- INIT_COUNTER(lp, i);
657
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
658
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
659
- if (idx2) {
660
- if (idx1) {
661
- for (; i--;) {
662
- GET_DATA_INDEX(p2, idx2, u_int64_t, x);
663
- y = m_from_uint64(x);
664
- SET_DATA_INDEX(p1, idx1, dtype, y);
665
- }
666
- } else {
667
- for (; i--;) {
668
- GET_DATA_INDEX(p2, idx2, u_int64_t, x);
669
- y = m_from_uint64(x);
670
- SET_DATA_STRIDE(p1, s1, dtype, y);
671
- }
672
- }
673
- } else {
674
- if (idx1) {
675
- for (; i--;) {
676
- GET_DATA_STRIDE(p2, s2, u_int64_t, x);
677
- y = m_from_uint64(x);
678
- SET_DATA_INDEX(p1, idx1, dtype, y);
679
- }
680
- } else {
681
- for (; i--;) {
682
- GET_DATA_STRIDE(p2, s2, u_int64_t, x);
683
- y = m_from_uint64(x);
684
- SET_DATA_STRIDE(p1, s1, dtype, y);
685
- }
686
- }
687
- }
688
- }
689
-
690
- static VALUE uint8_store_uint64(VALUE self, VALUE obj) {
691
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
692
- ndfunc_t ndf = { iter_uint8_store_uint64, FULL_LOOP, 2, 0, ain, 0 };
693
-
694
- na_ndloop(&ndf, 2, self, obj);
695
- return self;
696
- }
697
-
698
- static void iter_uint8_store_uint32(na_loop_t* const lp) {
699
- size_t i, s1, s2;
700
- char *p1, *p2;
701
- size_t *idx1, *idx2;
702
- u_int32_t x;
703
- dtype y;
704
-
705
- INIT_COUNTER(lp, i);
706
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
707
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
708
- if (idx2) {
709
- if (idx1) {
710
- for (; i--;) {
711
- GET_DATA_INDEX(p2, idx2, u_int32_t, x);
712
- y = m_from_uint32(x);
713
- SET_DATA_INDEX(p1, idx1, dtype, y);
714
- }
715
- } else {
716
- for (; i--;) {
717
- GET_DATA_INDEX(p2, idx2, u_int32_t, x);
718
- y = m_from_uint32(x);
719
- SET_DATA_STRIDE(p1, s1, dtype, y);
720
- }
721
- }
722
- } else {
723
- if (idx1) {
724
- for (; i--;) {
725
- GET_DATA_STRIDE(p2, s2, u_int32_t, x);
726
- y = m_from_uint32(x);
727
- SET_DATA_INDEX(p1, idx1, dtype, y);
728
- }
729
- } else {
730
- for (; i--;) {
731
- GET_DATA_STRIDE(p2, s2, u_int32_t, x);
732
- y = m_from_uint32(x);
733
- SET_DATA_STRIDE(p1, s1, dtype, y);
734
- }
735
- }
736
- }
737
- }
738
-
739
- static VALUE uint8_store_uint32(VALUE self, VALUE obj) {
740
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
741
- ndfunc_t ndf = { iter_uint8_store_uint32, FULL_LOOP, 2, 0, ain, 0 };
742
-
743
- na_ndloop(&ndf, 2, self, obj);
744
- return self;
745
- }
746
-
747
- static void iter_uint8_store_uint16(na_loop_t* const lp) {
748
- size_t i, s1, s2;
749
- char *p1, *p2;
750
- size_t *idx1, *idx2;
751
- u_int16_t x;
752
- dtype y;
753
-
754
- INIT_COUNTER(lp, i);
755
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
756
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
757
- if (idx2) {
758
- if (idx1) {
759
- for (; i--;) {
760
- GET_DATA_INDEX(p2, idx2, u_int16_t, x);
761
- y = m_from_sint(x);
762
- SET_DATA_INDEX(p1, idx1, dtype, y);
763
- }
764
- } else {
765
- for (; i--;) {
766
- GET_DATA_INDEX(p2, idx2, u_int16_t, x);
767
- y = m_from_sint(x);
768
- SET_DATA_STRIDE(p1, s1, dtype, y);
769
- }
770
- }
771
- } else {
772
- if (idx1) {
773
- for (; i--;) {
774
- GET_DATA_STRIDE(p2, s2, u_int16_t, x);
775
- y = m_from_sint(x);
776
- SET_DATA_INDEX(p1, idx1, dtype, y);
777
- }
778
- } else {
779
- for (; i--;) {
780
- GET_DATA_STRIDE(p2, s2, u_int16_t, x);
781
- y = m_from_sint(x);
782
- SET_DATA_STRIDE(p1, s1, dtype, y);
783
- }
784
- }
785
- }
786
- }
787
-
788
- static VALUE uint8_store_uint16(VALUE self, VALUE obj) {
789
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
790
- ndfunc_t ndf = { iter_uint8_store_uint16, FULL_LOOP, 2, 0, ain, 0 };
791
-
792
- na_ndloop(&ndf, 2, self, obj);
793
- return self;
794
- }
795
-
796
- static void iter_uint8_store_uint8(na_loop_t* const lp) {
797
- size_t i, s1, s2;
798
- char *p1, *p2;
799
- size_t *idx1, *idx2;
800
- u_int8_t x;
801
- dtype y;
802
-
803
- INIT_COUNTER(lp, i);
804
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
805
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
806
- if (idx2) {
807
- if (idx1) {
808
- for (; i--;) {
809
- GET_DATA_INDEX(p2, idx2, u_int8_t, x);
810
- y = m_from_sint(x);
811
- SET_DATA_INDEX(p1, idx1, dtype, y);
812
- }
813
- } else {
814
- for (; i--;) {
815
- GET_DATA_INDEX(p2, idx2, u_int8_t, x);
816
- y = m_from_sint(x);
817
- SET_DATA_STRIDE(p1, s1, dtype, y);
818
- }
819
- }
820
- } else {
821
- if (idx1) {
822
- for (; i--;) {
823
- GET_DATA_STRIDE(p2, s2, u_int8_t, x);
824
- y = m_from_sint(x);
825
- SET_DATA_INDEX(p1, idx1, dtype, y);
826
- }
827
- } else {
828
- for (; i--;) {
829
- GET_DATA_STRIDE(p2, s2, u_int8_t, x);
830
- y = m_from_sint(x);
831
- SET_DATA_STRIDE(p1, s1, dtype, y);
832
- }
833
- }
834
- }
835
- }
836
-
837
- static VALUE uint8_store_uint8(VALUE self, VALUE obj) {
838
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
839
- ndfunc_t ndf = { iter_uint8_store_uint8, FULL_LOOP, 2, 0, ain, 0 };
840
-
841
- na_ndloop(&ndf, 2, self, obj);
842
- return self;
843
- }
844
-
845
- static void iter_uint8_store_robject(na_loop_t* const lp) {
846
- size_t i, s1, s2;
847
- char *p1, *p2;
848
- size_t *idx1, *idx2;
849
- VALUE x;
850
- dtype y;
851
-
852
- INIT_COUNTER(lp, i);
853
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
854
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
855
- if (idx2) {
856
- if (idx1) {
857
- for (; i--;) {
858
- GET_DATA_INDEX(p2, idx2, VALUE, x);
859
- y = m_num_to_data(x);
860
- SET_DATA_INDEX(p1, idx1, dtype, y);
861
- }
862
- } else {
863
- for (; i--;) {
864
- GET_DATA_INDEX(p2, idx2, VALUE, x);
865
- y = m_num_to_data(x);
866
- SET_DATA_STRIDE(p1, s1, dtype, y);
867
- }
868
- }
869
- } else {
870
- if (idx1) {
871
- for (; i--;) {
872
- GET_DATA_STRIDE(p2, s2, VALUE, x);
873
- y = m_num_to_data(x);
874
- SET_DATA_INDEX(p1, idx1, dtype, y);
875
- }
876
- } else {
877
- for (; i--;) {
878
- GET_DATA_STRIDE(p2, s2, VALUE, x);
879
- y = m_num_to_data(x);
880
- SET_DATA_STRIDE(p1, s1, dtype, y);
881
- }
882
- }
883
- }
884
- }
885
-
886
- static VALUE uint8_store_robject(VALUE self, VALUE obj) {
887
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { Qnil, 0 } };
888
- ndfunc_t ndf = { iter_uint8_store_robject, FULL_LOOP, 2, 0, ain, 0 };
889
-
890
- na_ndloop(&ndf, 2, self, obj);
891
- return self;
892
- }
893
-
894
- static void iter_uint8_store_array(na_loop_t* const lp) {
895
- size_t i, n;
896
- size_t i1, n1;
897
- VALUE v1, *ptr;
898
- char* p1;
899
- size_t s1, *idx1;
900
- VALUE x;
901
- double y;
902
- dtype z;
903
- size_t len, c;
904
- double beg, step;
905
-
906
- INIT_COUNTER(lp, n);
907
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
908
- v1 = lp->args[1].value;
909
- i = 0;
910
-
911
- if (lp->args[1].ptr) {
912
- if (v1 == Qtrue) {
913
- iter_uint8_store_uint8(lp);
914
- i = lp->args[1].shape[0];
915
- if (idx1) {
916
- idx1 += i;
917
- } else {
918
- p1 += s1 * i;
919
- }
920
- }
921
- goto loop_end;
922
- }
923
-
924
- ptr = &v1;
925
-
926
- switch (TYPE(v1)) {
927
- case T_ARRAY:
928
- n1 = RARRAY_LEN(v1);
929
- ptr = RARRAY_PTR(v1);
930
- break;
931
- case T_NIL:
932
- n1 = 0;
933
- break;
934
- default:
935
- n1 = 1;
936
- }
937
-
938
- if (idx1) {
939
- for (i = i1 = 0; i1 < n1 && i < n; i++, i1++) {
940
- x = ptr[i1];
941
- if (rb_obj_is_kind_of(x, rb_cRange)
942
- #ifdef HAVE_RB_ARITHMETIC_SEQUENCE_EXTRACT
943
- || rb_obj_is_kind_of(x, rb_cArithSeq)
944
- #else
945
- || rb_obj_is_kind_of(x, rb_cEnumerator)
946
- #endif
947
- ) {
948
- nary_step_sequence(x, &len, &beg, &step);
949
- for (c = 0; c < len && i < n; c++, i++) {
950
- y = beg + step * c;
951
- z = m_from_double(y);
952
- SET_DATA_INDEX(p1, idx1, dtype, z);
953
- }
954
- } else if (TYPE(x) != T_ARRAY) {
955
- z = m_num_to_data(x);
956
- SET_DATA_INDEX(p1, idx1, dtype, z);
957
- }
958
- }
959
- } else {
960
- for (i = i1 = 0; i1 < n1 && i < n; i++, i1++) {
961
- x = ptr[i1];
962
- if (rb_obj_is_kind_of(x, rb_cRange)
963
- #ifdef HAVE_RB_ARITHMETIC_SEQUENCE_EXTRACT
964
- || rb_obj_is_kind_of(x, rb_cArithSeq)
965
- #else
966
- || rb_obj_is_kind_of(x, rb_cEnumerator)
967
- #endif
968
- ) {
969
- nary_step_sequence(x, &len, &beg, &step);
970
- for (c = 0; c < len && i < n; c++, i++) {
971
- y = beg + step * c;
972
- z = m_from_double(y);
973
- SET_DATA_STRIDE(p1, s1, dtype, z);
974
- }
975
- } else if (TYPE(x) != T_ARRAY) {
976
- z = m_num_to_data(x);
977
- SET_DATA_STRIDE(p1, s1, dtype, z);
978
- }
979
- }
980
- }
981
-
982
- loop_end:
983
- z = m_zero;
984
- if (idx1) {
985
- for (; i < n; i++) {
986
- SET_DATA_INDEX(p1, idx1, dtype, z);
987
- }
988
- } else {
989
- for (; i < n; i++) {
990
- SET_DATA_STRIDE(p1, s1, dtype, z);
991
- }
992
- }
993
- }
994
-
995
- static VALUE uint8_store_array(VALUE self, VALUE rary) {
996
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { rb_cArray, 0 } };
997
- ndfunc_t ndf = { iter_uint8_store_array, FULL_LOOP, 2, 0, ain, 0 };
998
-
999
- na_ndloop_store_rarray(&ndf, self, rary);
1000
- return self;
1001
- }
1002
-
1003
- /*
1004
- Store elements to Numo::UInt8 from other.
1005
- @overload store(other)
1006
- @param [Object] other
1007
- @return [Numo::UInt8] self
1008
- */
1009
- static VALUE uint8_store(VALUE self, VALUE obj) {
1010
- VALUE r, klass;
1011
-
1012
- klass = rb_obj_class(obj);
1013
-
1014
- if (klass == numo_cUInt8) {
1015
- uint8_store_uint8(self, obj);
1016
- return self;
1017
- }
1018
-
1019
- if (IS_INTEGER_CLASS(klass) || klass == rb_cFloat || klass == rb_cComplex) {
1020
- uint8_store_numeric(self, obj);
1021
- return self;
1022
- }
1023
-
1024
- if (klass == numo_cBit) {
1025
- uint8_store_bit(self, obj);
1026
- return self;
1027
- }
1028
-
1029
- if (klass == numo_cDFloat) {
1030
- uint8_store_dfloat(self, obj);
1031
- return self;
1032
- }
1033
-
1034
- if (klass == numo_cSFloat) {
1035
- uint8_store_sfloat(self, obj);
1036
- return self;
1037
- }
1038
-
1039
- if (klass == numo_cInt64) {
1040
- uint8_store_int64(self, obj);
1041
- return self;
1042
- }
1043
-
1044
- if (klass == numo_cInt32) {
1045
- uint8_store_int32(self, obj);
1046
- return self;
1047
- }
1048
-
1049
- if (klass == numo_cInt16) {
1050
- uint8_store_int16(self, obj);
1051
- return self;
1052
- }
1053
-
1054
- if (klass == numo_cInt8) {
1055
- uint8_store_int8(self, obj);
1056
- return self;
1057
- }
1058
-
1059
- if (klass == numo_cUInt64) {
1060
- uint8_store_uint64(self, obj);
1061
- return self;
1062
- }
1063
-
1064
- if (klass == numo_cUInt32) {
1065
- uint8_store_uint32(self, obj);
1066
- return self;
1067
- }
1068
-
1069
- if (klass == numo_cUInt16) {
1070
- uint8_store_uint16(self, obj);
1071
- return self;
1072
- }
1073
-
1074
- if (klass == numo_cRObject) {
1075
- uint8_store_robject(self, obj);
1076
- return self;
1077
- }
1078
-
1079
- if (klass == rb_cArray) {
1080
- uint8_store_array(self, obj);
1081
- return self;
1082
- }
1083
-
1084
- if (IsNArray(obj)) {
1085
- r = rb_funcall(obj, rb_intern("coerce_cast"), 1, cT);
1086
- if (rb_obj_class(r) == cT) {
1087
- uint8_store(self, r);
1088
- return self;
1089
- }
1090
- }
1091
-
1092
- rb_raise(
1093
- nary_eCastError, "unknown conversion from %s to %s", rb_class2name(rb_obj_class(obj)),
1094
- rb_class2name(rb_obj_class(self))
1095
- );
1096
-
1097
- return self;
1098
- }
1099
-
1100
- /*
1101
- Convert a data value of obj (with a single element) to dtype.
1102
- */
1103
- static dtype uint8_extract_data(VALUE obj) {
1104
- narray_t* na;
1105
- dtype x;
1106
- char* ptr;
1107
- size_t pos;
1108
- VALUE r, klass;
1109
-
1110
- if (IsNArray(obj)) {
1111
- GetNArray(obj, na);
1112
- if (na->size != 1) {
1113
- rb_raise(nary_eShapeError, "narray size should be 1");
1114
- }
1115
- klass = rb_obj_class(obj);
1116
- ptr = na_get_pointer_for_read(obj);
1117
- pos = na_get_offset(obj);
1118
-
1119
- if (klass == numo_cUInt8) {
1120
- x = m_from_sint(*(u_int8_t*)(ptr + pos));
1121
- return x;
1122
- }
1123
-
1124
- if (klass == numo_cBit) {
1125
- {
1126
- BIT_DIGIT b;
1127
- LOAD_BIT(ptr, pos, b);
1128
- x = m_from_sint(b);
1129
- };
1130
- return x;
1131
- }
1132
-
1133
- if (klass == numo_cDFloat) {
1134
- x = m_from_real(*(double*)(ptr + pos));
1135
- return x;
1136
- }
1137
-
1138
- if (klass == numo_cSFloat) {
1139
- x = m_from_real(*(float*)(ptr + pos));
1140
- return x;
1141
- }
1142
-
1143
- if (klass == numo_cInt64) {
1144
- x = m_from_int64(*(int64_t*)(ptr + pos));
1145
- return x;
1146
- }
1147
-
1148
- if (klass == numo_cInt32) {
1149
- x = m_from_int32(*(int32_t*)(ptr + pos));
1150
- return x;
1151
- }
1152
-
1153
- if (klass == numo_cInt16) {
1154
- x = m_from_sint(*(int16_t*)(ptr + pos));
1155
- return x;
1156
- }
1157
-
1158
- if (klass == numo_cInt8) {
1159
- x = m_from_sint(*(int8_t*)(ptr + pos));
1160
- return x;
1161
- }
1162
-
1163
- if (klass == numo_cUInt64) {
1164
- x = m_from_uint64(*(u_int64_t*)(ptr + pos));
1165
- return x;
1166
- }
1167
-
1168
- if (klass == numo_cUInt32) {
1169
- x = m_from_uint32(*(u_int32_t*)(ptr + pos));
1170
- return x;
1171
- }
1172
-
1173
- if (klass == numo_cUInt16) {
1174
- x = m_from_sint(*(u_int16_t*)(ptr + pos));
1175
- return x;
1176
- }
1177
-
1178
- if (klass == numo_cRObject) {
1179
- x = m_num_to_data(*(VALUE*)(ptr + pos));
1180
- return x;
1181
- }
1182
-
1183
- // coerce
1184
- r = rb_funcall(obj, rb_intern("coerce_cast"), 1, cT);
1185
- if (rb_obj_class(r) == cT) {
1186
- return uint8_extract_data(r);
1187
- }
1188
-
1189
- rb_raise(
1190
- nary_eCastError, "unknown conversion from %s to %s", rb_class2name(rb_obj_class(obj)),
1191
- rb_class2name(cT)
1192
- );
1193
- }
1194
- if (TYPE(obj) == T_ARRAY) {
1195
- if (RARRAY_LEN(obj) != 1) {
1196
- rb_raise(nary_eShapeError, "array size should be 1");
1197
- }
1198
- return m_num_to_data(RARRAY_AREF(obj, 0));
1199
- }
1200
- return m_num_to_data(obj);
1201
- }
1202
-
1203
- static VALUE uint8_cast_array(VALUE rary) {
1204
- VALUE nary;
1205
- narray_t* na;
1206
-
1207
- nary = na_s_new_like(cT, rary);
1208
- GetNArray(nary, na);
1209
- if (na->size > 0) {
1210
- uint8_store_array(nary, rary);
1211
- }
1212
- return nary;
1213
- }
1214
-
1215
- /*
1216
- Cast object to Numo::UInt8.
1217
- @overload [](elements)
1218
- @overload cast(array)
1219
- @param [Numeric,Array] elements
1220
- @param [Array] array
1221
- @return [Numo::UInt8]
1222
- */
1223
- static VALUE uint8_s_cast(VALUE type, VALUE obj) {
1224
- VALUE v;
1225
- narray_t* na;
1226
- dtype x;
1227
-
1228
- if (rb_obj_class(obj) == cT) {
1229
- return obj;
1230
- }
1231
- if (RTEST(rb_obj_is_kind_of(obj, rb_cNumeric))) {
1232
- x = m_num_to_data(obj);
1233
- return uint8_new_dim0(x);
1234
- }
1235
- if (RTEST(rb_obj_is_kind_of(obj, rb_cArray))) {
1236
- return uint8_cast_array(obj);
1237
- }
1238
- if (IsNArray(obj)) {
1239
- GetNArray(obj, na);
1240
- v = nary_new(cT, NA_NDIM(na), NA_SHAPE(na));
1241
- if (NA_SIZE(na) > 0) {
1242
- uint8_store(v, obj);
1243
- }
1244
- return v;
1245
- }
1246
- if (rb_respond_to(obj, id_to_a)) {
1247
- obj = rb_funcall(obj, id_to_a, 0);
1248
- if (TYPE(obj) != T_ARRAY) {
1249
- rb_raise(rb_eTypeError, "`to_a' did not return Array");
1250
- }
1251
- return uint8_cast_array(obj);
1252
- }
1253
-
1254
- rb_raise(nary_eCastError, "cannot cast to %s", rb_class2name(type));
1255
- return Qnil;
1256
- }
1257
-
1258
- /*
1259
- Multi-dimensional element reference.
1260
- @overload [](dim0,...,dimL)
1261
- @param [Numeric,Range,Array,Numo::Int32,Numo::Int64,Numo::Bit,TrueClass,FalseClass,Symbol]
1262
- dim0,...,dimL multi-dimensional indices.
1263
- @return [Numeric,Numo::UInt8] an element or NArray view.
1264
- @see Numo::NArray#[]
1265
- @see #[]=
1266
- */
1267
- static VALUE uint8_aref(int argc, VALUE* argv, VALUE self) {
1268
- int nd;
1269
- size_t pos;
1270
- char* ptr;
1271
-
1272
- nd = na_get_result_dimension(self, argc, argv, sizeof(dtype), &pos);
1273
- if (nd) {
1274
- return na_aref_main(argc, argv, self, 0, nd);
1275
- } else {
1276
- ptr = na_get_pointer_for_read(self) + pos;
1277
- return m_extract(ptr);
1278
- }
1279
- }
1280
-
1281
- /*
1282
- Multi-dimensional element assignment.
1283
- @overload []=(dim0,...,dimL,val)
1284
- @param [Numeric,Range,Array,Numo::Int32,Numo::Int64,Numo::Bit,TrueClass,FalseClass,Symbol]
1285
- dim0,...,dimL multi-dimensional indices.
1286
- @param [Numeric,Numo::NArray,Array] val Value(s) to be set to self.
1287
- @return [Numeric,Numo::NArray,Array] returns `val` (last argument).
1288
- @see Numo::NArray#[]=
1289
- @see #[]
1290
- */
1291
- static VALUE uint8_aset(int argc, VALUE* argv, VALUE self) {
1292
- int nd;
1293
- size_t pos;
1294
- char* ptr;
1295
- VALUE a;
1296
- dtype x;
1297
-
1298
- argc--;
1299
- if (argc == 0) {
1300
- uint8_store(self, argv[argc]);
1301
- } else {
1302
- nd = na_get_result_dimension(self, argc, argv, sizeof(dtype), &pos);
1303
- if (nd) {
1304
- a = na_aref_main(argc, argv, self, 0, nd);
1305
- uint8_store(a, argv[argc]);
1306
- } else {
1307
- x = uint8_extract_data(argv[argc]);
1308
- ptr = na_get_pointer_for_read_write(self) + pos;
1309
- *(dtype*)ptr = x;
1310
- }
1311
- }
1312
- return argv[argc];
1313
- }
1314
-
1315
- static void iter_uint8_each(na_loop_t* const lp) {
1316
- size_t i, s1;
1317
- char* p1;
1318
- size_t* idx1;
1319
- dtype x;
1320
- VALUE y;
1321
-
1322
- INIT_COUNTER(lp, i);
1323
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
1324
- if (idx1) {
1325
- for (; i--;) {
1326
- GET_DATA_INDEX(p1, idx1, dtype, x);
1327
- y = m_data_to_num(x);
1328
- rb_yield(y);
1329
- }
1330
- } else {
1331
- for (; i--;) {
1332
- GET_DATA_STRIDE(p1, s1, dtype, x);
1333
- y = m_data_to_num(x);
1334
- rb_yield(y);
1335
- }
1336
- }
1337
- }
1338
-
1339
- /*
1340
- Calls the given block once for each element in self,
1341
- passing that element as a parameter.
1342
- @overload each
1343
- @return [Numo::NArray] self
1344
- For a block `{|x| ... }`,
1345
- @yieldparam [Numeric] x an element of NArray.
1346
- @see #each_with_index
1347
- @see #map
1348
- */
1349
- static VALUE uint8_each(VALUE self) {
1350
- ndfunc_arg_in_t ain[1] = { { Qnil, 0 } };
1351
- ndfunc_t ndf = { iter_uint8_each, FULL_LOOP_NIP, 1, 0, ain, 0 };
1352
-
1353
- na_ndloop(&ndf, 1, self);
1354
- return self;
1355
- }
1356
-
1357
- static void iter_uint8_map(na_loop_t* const lp) {
1358
- size_t i, n;
1359
- char *p1, *p2;
1360
- ssize_t s1, s2;
1361
- size_t *idx1, *idx2;
1362
- dtype x;
1363
-
1364
- INIT_COUNTER(lp, n);
1365
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
1366
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
1367
-
1368
- if (idx1) {
1369
- if (idx2) {
1370
- for (i = 0; i < n; i++) {
1371
- GET_DATA_INDEX(p1, idx1, dtype, x);
1372
- x = m_map(x);
1373
- SET_DATA_INDEX(p2, idx2, dtype, x);
1374
- }
1375
- } else {
1376
- for (i = 0; i < n; i++) {
1377
- GET_DATA_INDEX(p1, idx1, dtype, x);
1378
- x = m_map(x);
1379
- SET_DATA_STRIDE(p2, s2, dtype, x);
1380
- }
1381
- }
1382
- } else {
1383
- if (idx2) {
1384
- for (i = 0; i < n; i++) {
1385
- GET_DATA_STRIDE(p1, s1, dtype, x);
1386
- x = m_map(x);
1387
- SET_DATA_INDEX(p2, idx2, dtype, x);
1388
- }
1389
- } else {
1390
- //
1391
- for (i = 0; i < n; i++) {
1392
- *(dtype*)p2 = m_map(*(dtype*)p1);
1393
- p1 += s1;
1394
- p2 += s2;
1395
- }
1396
- return;
1397
- //
1398
- }
1399
- }
1400
- }
1401
-
1402
- /*
1403
- Unary map.
1404
- @overload map
1405
- @return [Numo::UInt8] map of self.
1406
- */
1407
- static VALUE uint8_map(VALUE self) {
1408
- ndfunc_arg_in_t ain[1] = { { cT, 0 } };
1409
- ndfunc_arg_out_t aout[1] = { { cT, 0 } };
1410
- ndfunc_t ndf = { iter_uint8_map, FULL_LOOP, 1, 1, ain, aout };
1411
-
1412
- return na_ndloop(&ndf, 1, self);
1413
- }
1414
-
1415
- static inline void yield_each_with_index(dtype x, size_t* c, VALUE* a, int nd, int md) {
1416
- int j;
1417
-
1418
- a[0] = m_data_to_num(x);
1419
- for (j = 0; j <= nd; j++) {
1420
- a[j + 1] = SIZET2NUM(c[j]);
1421
- }
1422
- rb_yield(rb_ary_new4(md, a));
1423
- }
1424
-
1425
- static void iter_uint8_each_with_index(na_loop_t* const lp) {
1426
- size_t i, s1;
1427
- char* p1;
1428
- size_t* idx1;
1429
- dtype x;
1430
- VALUE* a;
1431
- size_t* c;
1432
- int nd, md;
1433
-
1434
- c = (size_t*)(lp->opt_ptr);
1435
- nd = lp->ndim;
1436
- if (nd > 0) {
1437
- nd--;
1438
- }
1439
- md = nd + 2;
1440
- a = ALLOCA_N(VALUE, md);
1441
-
1442
- INIT_COUNTER(lp, i);
1443
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
1444
- c[nd] = 0;
1445
- if (idx1) {
1446
- for (; i--;) {
1447
- GET_DATA_INDEX(p1, idx1, dtype, x);
1448
- yield_each_with_index(x, c, a, nd, md);
1449
- c[nd]++;
1450
- }
1451
- } else {
1452
- for (; i--;) {
1453
- GET_DATA_STRIDE(p1, s1, dtype, x);
1454
- yield_each_with_index(x, c, a, nd, md);
1455
- c[nd]++;
1456
- }
1457
- }
1458
- }
1459
-
1460
- /*
1461
- Invokes the given block once for each element of self,
1462
- passing that element and indices along each axis as parameters.
1463
- @overload each_with_index
1464
- For a block `{|x,i,j,...| ... }`,
1465
- @yieldparam [Numeric] x an element
1466
- @yieldparam [Integer] i,j,... multitimensional indices
1467
- @return [Numo::NArray] self
1468
- @see #each
1469
- @see #map_with_index
1470
- */
1471
- static VALUE uint8_each_with_index(VALUE self) {
1472
- ndfunc_arg_in_t ain[1] = { { Qnil, 0 } };
1473
- ndfunc_t ndf = { iter_uint8_each_with_index, FULL_LOOP_NIP, 1, 0, ain, 0 };
1474
-
1475
- na_ndloop_with_index(&ndf, 1, self);
1476
- return self;
1477
- }
1478
-
1479
- static inline dtype yield_map_with_index(dtype x, size_t* c, VALUE* a, int nd, int md) {
1480
- int j;
1481
- VALUE y;
1482
-
1483
- a[0] = m_data_to_num(x);
1484
- for (j = 0; j <= nd; j++) {
1485
- a[j + 1] = SIZET2NUM(c[j]);
1486
- }
1487
- y = rb_yield(rb_ary_new4(md, a));
1488
- return m_num_to_data(y);
1489
- }
1490
-
1491
- static void iter_uint8_map_with_index(na_loop_t* const lp) {
1492
- size_t i;
1493
- char *p1, *p2;
1494
- ssize_t s1, s2;
1495
- size_t *idx1, *idx2;
1496
- dtype x;
1497
- VALUE* a;
1498
- size_t* c;
1499
- int nd, md;
1500
-
1501
- c = (size_t*)(lp->opt_ptr);
1502
- nd = lp->ndim;
1503
- if (nd > 0) {
1504
- nd--;
1505
- }
1506
- md = nd + 2;
1507
- a = ALLOCA_N(VALUE, md);
1508
-
1509
- INIT_COUNTER(lp, i);
1510
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
1511
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
1512
-
1513
- c[nd] = 0;
1514
- if (idx1) {
1515
- if (idx2) {
1516
- for (; i--;) {
1517
- GET_DATA_INDEX(p1, idx1, dtype, x);
1518
- x = yield_map_with_index(x, c, a, nd, md);
1519
- SET_DATA_INDEX(p2, idx2, dtype, x);
1520
- c[nd]++;
1521
- }
1522
- } else {
1523
- for (; i--;) {
1524
- GET_DATA_INDEX(p1, idx1, dtype, x);
1525
- x = yield_map_with_index(x, c, a, nd, md);
1526
- SET_DATA_STRIDE(p2, s2, dtype, x);
1527
- c[nd]++;
1528
- }
1529
- }
1530
- } else {
1531
- if (idx2) {
1532
- for (; i--;) {
1533
- GET_DATA_STRIDE(p1, s1, dtype, x);
1534
- x = yield_map_with_index(x, c, a, nd, md);
1535
- SET_DATA_INDEX(p2, idx2, dtype, x);
1536
- c[nd]++;
1537
- }
1538
- } else {
1539
- for (; i--;) {
1540
- GET_DATA_STRIDE(p1, s1, dtype, x);
1541
- x = yield_map_with_index(x, c, a, nd, md);
1542
- SET_DATA_STRIDE(p2, s2, dtype, x);
1543
- c[nd]++;
1544
- }
1545
- }
1546
- }
1547
- }
1548
-
1549
- /*
1550
- Invokes the given block once for each element of self,
1551
- passing that element and indices along each axis as parameters.
1552
- Creates a new NArray containing the values returned by the block.
1553
- Inplace option is allowed, i.e., `nary.inplace.map` overwrites `nary`.
1554
- @overload map_with_index
1555
- For a block `{|x,i,j,...| ... }`,
1556
- @yieldparam [Numeric] x an element
1557
- @yieldparam [Integer] i,j,... multitimensional indices
1558
- @return [Numo::NArray] mapped array
1559
- @see #map
1560
- @see #each_with_index
1561
- */
1562
- static VALUE uint8_map_with_index(VALUE self) {
1563
- ndfunc_arg_in_t ain[1] = { { Qnil, 0 } };
1564
- ndfunc_arg_out_t aout[1] = { { cT, 0 } };
1565
- ndfunc_t ndf = { iter_uint8_map_with_index, FULL_LOOP, 1, 1, ain, aout };
1566
-
1567
- return na_ndloop_with_index(&ndf, 1, self);
1568
- }
1569
-
1570
- static void iter_uint8_abs(na_loop_t* const lp) {
1571
- size_t i;
1572
- char *p1, *p2;
1573
- ssize_t s1, s2;
1574
- size_t *idx1, *idx2;
1575
- dtype x;
1576
- rtype y;
1577
- INIT_COUNTER(lp, i);
1578
- INIT_PTR_IDX(lp, 0, p1, s1, idx1);
1579
- INIT_PTR_IDX(lp, 1, p2, s2, idx2);
1580
- if (idx1) {
1581
- if (idx2) {
1582
- for (; i--;) {
1583
- GET_DATA_INDEX(p1, idx1, dtype, x);
1584
- y = m_abs(x);
1585
- SET_DATA_INDEX(p2, idx2, rtype, y);
1586
- }
1587
- } else {
1588
- for (; i--;) {
1589
- GET_DATA_INDEX(p1, idx1, dtype, x);
1590
- y = m_abs(x);
1591
- SET_DATA_STRIDE(p2, s2, rtype, y);
1592
- }
1593
- }
1594
- } else {
1595
- if (idx2) {
1596
- for (; i--;) {
1597
- GET_DATA_STRIDE(p1, s1, dtype, x);
1598
- y = m_abs(x);
1599
- SET_DATA_INDEX(p2, idx2, rtype, y);
1600
- }
1601
- } else {
1602
- for (; i--;) {
1603
- GET_DATA_STRIDE(p1, s1, dtype, x);
1604
- y = m_abs(x);
1605
- SET_DATA_STRIDE(p2, s2, rtype, y);
1606
- }
1607
- }
1608
- }
1609
- }
1610
-
1611
- /*
1612
- abs of self.
1613
- @overload abs
1614
- @return [Numo::UInt8] abs of self.
1615
- */
1616
- static VALUE uint8_abs(VALUE self) {
1617
- ndfunc_arg_in_t ain[1] = { { cT, 0 } };
1618
- ndfunc_arg_out_t aout[1] = { { cRT, 0 } };
1619
- ndfunc_t ndf = { iter_uint8_abs, FULL_LOOP, 1, 1, ain, aout };
1620
-
1621
- return na_ndloop(&ndf, 1, self);
1622
- }
1623
-
1624
- static void iter_uint8_poly(na_loop_t* const lp) {
1625
- size_t i;
1626
- dtype x, y, a;
1627
-
1628
- x = *(dtype*)(lp->args[0].ptr + lp->args[0].iter[0].pos);
1629
- i = lp->narg - 2;
1630
- y = *(dtype*)(lp->args[i].ptr + lp->args[i].iter[0].pos);
1631
- for (; --i;) {
1632
- y = m_mul(x, y);
1633
- a = *(dtype*)(lp->args[i].ptr + lp->args[i].iter[0].pos);
1634
- y = m_add(y, a);
1635
- }
1636
- i = lp->narg - 1;
1637
- *(dtype*)(lp->args[i].ptr + lp->args[i].iter[0].pos) = y;
1638
- }
1639
-
1640
- /*
1641
- Calculate polynomial.
1642
- `x.poly(a0,a1,a2,...,an) = a0 + a1*x + a2*x**2 + ... + an*x**n`
1643
- @overload poly a0, a1, ..., an
1644
- @param [Numo::NArray,Numeric] a0,a1,...,an
1645
- @return [Numo::UInt8]
1646
- */
1647
- static VALUE uint8_poly(VALUE self, VALUE args) {
1648
- int argc, i;
1649
- VALUE* argv;
1650
- volatile VALUE v, a;
1651
- ndfunc_arg_out_t aout[1] = { { cT, 0 } };
1652
- ndfunc_t ndf = { iter_uint8_poly, NO_LOOP, 0, 1, 0, aout };
1653
-
1654
- argc = (int)RARRAY_LEN(args);
1655
- ndf.nin = argc + 1;
1656
- ndf.ain = ALLOCA_N(ndfunc_arg_in_t, argc + 1);
1657
- for (i = 0; i < argc + 1; i++) {
1658
- ndf.ain[i].type = cT;
1659
- }
1660
- argv = ALLOCA_N(VALUE, argc + 1);
1661
- argv[0] = self;
1662
- for (i = 0; i < argc; i++) {
1663
- argv[i + 1] = RARRAY_PTR(args)[i];
1664
- }
1665
- a = rb_ary_new4(argc + 1, argv);
1666
- v = na_ndloop2(&ndf, a);
1667
- return uint8_extract(v);
1668
- }
1669
-
1670
- /*
1671
- qsort.c
1672
- Ruby/Numo::NArray - Numerical Array class for Ruby
1673
- modified by Masahiro TANAKA
1674
- */
1675
-
1676
- /*
1677
- * qsort.c: standard quicksort algorithm
1678
- *
1679
- * Modifications from vanilla NetBSD source:
1680
- * Add do ... while() macro fix
1681
- * Remove __inline, _DIAGASSERTs, __P
1682
- * Remove ill-considered "swap_cnt" switch to insertion sort,
1683
- * in favor of a simple check for presorted input.
1684
- *
1685
- * CAUTION: if you change this file, see also qsort_arg.c
1686
- *
1687
- * $PostgreSQL: pgsql/src/port/qsort.c,v 1.12 2006/10/19 20:56:22 tgl Exp $
1688
- */
1689
-
1690
- /* $NetBSD: qsort.c,v 1.13 2003/08/07 16:43:42 agc Exp $ */
1691
-
1692
- /*-
1693
- * Copyright (c) 1992, 1993
1694
- * The Regents of the University of California. All rights reserved.
1695
- *
1696
- * Redistribution and use in source and binary forms, with or without
1697
- * modification, are permitted provided that the following conditions
1698
- * are met:
1699
- * 1. Redistributions of source code must retain the above copyright
1700
- * notice, this list of conditions and the following disclaimer.
1701
- * 2. Redistributions in binary form must reproduce the above copyright
1702
- * notice, this list of conditions and the following disclaimer in the
1703
- * documentation and/or other materials provided with the distribution.
1704
- * 3. Neither the name of the University nor the names of its contributors
1705
- * may be used to endorse or promote products derived from this software
1706
- * without specific prior written permission.
1707
- *
1708
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
1709
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1710
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1711
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
1712
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1713
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
1714
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1715
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
1716
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
1717
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
1718
- * SUCH DAMAGE.
1719
- */
1720
-
1721
- #ifndef QSORT_INCL
1722
- #define QSORT_INCL
1723
- #define Min(x, y) ((x) < (y) ? (x) : (y))
1724
-
1725
- /*
1726
- * Qsort routine based on J. L. Bentley and M. D. McIlroy,
1727
- * "Engineering a sort function",
1728
- * Software--Practice and Experience 23 (1993) 1249-1265.
1729
- * We have modified their original by adding a check for already-sorted input,
1730
- * which seems to be a win per discussions on pgsql-hackers around 2006-03-21.
1731
- */
1732
- #define swapcode(TYPE, parmi, parmj, n) \
1733
- do { \
1734
- size_t i = (n) / sizeof(TYPE); \
1735
- TYPE* pi = (TYPE*)(void*)(parmi); \
1736
- TYPE* pj = (TYPE*)(void*)(parmj); \
1737
- do { \
1738
- TYPE t = *pi; \
1739
- *pi++ = *pj; \
1740
- *pj++ = t; \
1741
- } while (--i > 0); \
1742
- } while (0)
1743
-
1744
- #ifdef HAVE_STDINT_H
1745
- #define SWAPINIT(a, es) \
1746
- swaptype = (uintptr_t)(a) % sizeof(long) || (es) % sizeof(long) ? 2 \
1747
- : (es) == sizeof(long) ? 0 \
1748
- : 1;
1749
- #else
1750
- #define SWAPINIT(a, es) \
1751
- swaptype = ((char*)(a) - (char*)0) % sizeof(long) || (es) % sizeof(long) ? 2 \
1752
- : (es) == sizeof(long) ? 0 \
1753
- : 1;
1754
- #endif
1755
-
1756
- static inline void swapfunc(char* a, char* b, size_t n, int swaptype) {
1757
- if (swaptype <= 1)
1758
- swapcode(long, a, b, n);
1759
- else
1760
- swapcode(char, a, b, n);
1761
- }
1762
-
1763
- #define swap(a, b) \
1764
- if (swaptype == 0) { \
1765
- long t = *(long*)(void*)(a); \
1766
- *(long*)(void*)(a) = *(long*)(void*)(b); \
1767
- *(long*)(void*)(b) = t; \
1768
- } else \
1769
- swapfunc(a, b, es, swaptype)
1770
-
1771
- #define vecswap(a, b, n) \
1772
- if ((n) > 0) swapfunc((a), (b), (size_t)(n), swaptype)
1773
-
1774
- #define med3(a, b, c, _cmp) \
1775
- (cmpgt(b, a) ? (cmpgt(c, b) ? b : (cmpgt(c, a) ? c : a)) \
1776
- : (cmpgt(b, c) ? b : (cmpgt(c, a) ? a : c)))
1777
- #endif
1778
-
175
+ DEF_NARRAY_POLY_METHOD_FUNC(uint8, numo_cUInt8)
1779
176
  #undef qsort_dtype
1780
- #define qsort_dtype dtype
177
+ #define qsort_dtype uint8
1781
178
  #undef qsort_cast
1782
- #define qsort_cast *(dtype*)
1783
-
1784
- static void uint8_qsort(void* a, size_t n, ssize_t es) {
1785
- char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
1786
- int d, r, swaptype, presorted;
1787
-
1788
- loop:
1789
- SWAPINIT(a, es);
1790
- if (n < 7) {
1791
- for (pm = (char*)a + es; pm < (char*)a + n * es; pm += es)
1792
- for (pl = pm; pl > (char*)a && cmpgt(pl - es, pl); pl -= es) swap(pl, pl - es);
1793
- return;
1794
- }
1795
- presorted = 1;
1796
- for (pm = (char*)a + es; pm < (char*)a + n * es; pm += es) {
1797
- if (cmpgt(pm - es, pm)) {
1798
- presorted = 0;
1799
- break;
1800
- }
1801
- }
1802
- if (presorted) return;
1803
- pm = (char*)a + (n / 2) * es;
1804
- if (n > 7) {
1805
- pl = (char*)a;
1806
- pn = (char*)a + (n - 1) * es;
1807
- if (n > 40) {
1808
- d = (int)((n / 8) * es);
1809
- pl = med3(pl, pl + d, pl + 2 * d, cmp);
1810
- pm = med3(pm - d, pm, pm + d, cmp);
1811
- pn = med3(pn - 2 * d, pn - d, pn, cmp);
1812
- }
1813
- pm = med3(pl, pm, pn, cmp);
1814
- }
1815
- swap(a, pm);
1816
- pa = pb = (char*)a + es;
1817
- pc = pd = (char*)a + (n - 1) * es;
1818
- for (;;) {
1819
- while (pb <= pc && (r = cmp(pb, a)) <= 0) {
1820
- if (r == 0) {
1821
- swap(pa, pb);
1822
- pa += es;
1823
- }
1824
- pb += es;
1825
- }
1826
- while (pb <= pc && (r = cmp(pc, a)) >= 0) {
1827
- if (r == 0) {
1828
- swap(pc, pd);
1829
- pd -= es;
1830
- }
1831
- pc -= es;
1832
- }
1833
- if (pb > pc) break;
1834
- swap(pb, pc);
1835
- pb += es;
1836
- pc -= es;
1837
- }
1838
- pn = (char*)a + n * es;
1839
- r = (int)(Min(pa - (char*)a, pb - pa));
1840
- vecswap(a, pb - r, r);
1841
- r = (int)(Min(pd - pc, pn - pd - es));
1842
- vecswap(pb, pn - r, r);
1843
- if ((r = (int)(pb - pa)) > es) uint8_qsort(a, r / es, es);
1844
- if ((r = (int)(pd - pc)) > es) {
1845
- /* Iterate rather than recurse to save stack space */
1846
- a = pn - r;
1847
- n = r / es;
1848
- goto loop;
1849
- }
1850
- /* qsort(pn - r, r / es, es, cmp);*/
1851
- }
1852
-
1853
- static void iter_uint8_sort(na_loop_t* const lp) {
1854
- size_t n;
1855
- char* ptr;
1856
- ssize_t step;
1857
-
1858
- INIT_COUNTER(lp, n);
1859
- INIT_PTR(lp, 0, ptr, step);
1860
- uint8_qsort(ptr, n, step);
1861
- }
1862
-
1863
- /*
1864
- sort of self.
1865
- @overload sort(axis:nil)
1866
- @param [Numeric,Array,Range] axis Performs sort along the axis.
1867
- @return [Numo::UInt8] returns result of sort.
1868
- @example
1869
- Numo::DFloat[3,4,1,2].sort #=> Numo::DFloat[1,2,3,4]
1870
- */
1871
- static VALUE uint8_sort(int argc, VALUE* argv, VALUE self) {
1872
- VALUE reduce;
1873
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { sym_reduce, 0 } };
1874
- ndfunc_t ndf = { 0, NDF_HAS_LOOP | NDF_FLAT_REDUCE, 2, 0, ain, 0 };
1875
-
1876
- if (!TEST_INPLACE(self)) {
1877
- self = na_copy(self);
1878
- }
1879
-
1880
- ndf.func = iter_uint8_sort;
1881
- reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
1882
-
1883
- na_ndloop(&ndf, 2, self, reduce);
1884
- return self;
1885
- }
1886
-
1887
- /*
1888
- qsort.c
1889
- Ruby/Numo::NArray - Numerical Array class for Ruby
1890
- modified by Masahiro TANAKA
1891
- */
1892
-
1893
- /*
1894
- * qsort.c: standard quicksort algorithm
1895
- *
1896
- * Modifications from vanilla NetBSD source:
1897
- * Add do ... while() macro fix
1898
- * Remove __inline, _DIAGASSERTs, __P
1899
- * Remove ill-considered "swap_cnt" switch to insertion sort,
1900
- * in favor of a simple check for presorted input.
1901
- *
1902
- * CAUTION: if you change this file, see also qsort_arg.c
1903
- *
1904
- * $PostgreSQL: pgsql/src/port/qsort.c,v 1.12 2006/10/19 20:56:22 tgl Exp $
1905
- */
1906
-
1907
- /* $NetBSD: qsort.c,v 1.13 2003/08/07 16:43:42 agc Exp $ */
1908
-
1909
- /*-
1910
- * Copyright (c) 1992, 1993
1911
- * The Regents of the University of California. All rights reserved.
1912
- *
1913
- * Redistribution and use in source and binary forms, with or without
1914
- * modification, are permitted provided that the following conditions
1915
- * are met:
1916
- * 1. Redistributions of source code must retain the above copyright
1917
- * notice, this list of conditions and the following disclaimer.
1918
- * 2. Redistributions in binary form must reproduce the above copyright
1919
- * notice, this list of conditions and the following disclaimer in the
1920
- * documentation and/or other materials provided with the distribution.
1921
- * 3. Neither the name of the University nor the names of its contributors
1922
- * may be used to endorse or promote products derived from this software
1923
- * without specific prior written permission.
1924
- *
1925
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
1926
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1927
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1928
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
1929
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1930
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
1931
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1932
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
1933
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
1934
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
1935
- * SUCH DAMAGE.
1936
- */
1937
-
179
+ #define qsort_cast *(uint8*)
180
+ DEF_NARRAY_INT_SORT_METHOD_FUNC(uint8)
1938
181
  #undef qsort_dtype
1939
- #define qsort_dtype dtype*
182
+ #define qsort_dtype uint8*
1940
183
  #undef qsort_cast
1941
- #define qsort_cast **(dtype**)
184
+ #define qsort_cast **(uint8**)
185
+ DEF_NARRAY_INT_SORT_INDEX_METHOD_FUNC(uint8, numo_cUInt8)
186
+ DEF_NARRAY_INT_MEDIAN_METHOD_FUNC(uint8)
187
+ DEF_NARRAY_INT_MEAN_METHOD_FUNC(uint8, numo_cUInt8)
188
+ DEF_NARRAY_INT_VAR_METHOD_FUNC(uint8, numo_cUInt8)
189
+ DEF_NARRAY_INT_STDDEV_METHOD_FUNC(uint8, numo_cUInt8)
190
+ DEF_NARRAY_INT_RMS_METHOD_FUNC(uint8, numo_cUInt8)
1942
191
 
1943
- static void uint8_index_qsort(void* a, size_t n, ssize_t es) {
1944
- char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
1945
- int d, r, swaptype, presorted;
192
+ static size_t uint8_memsize(const void* ptr) {
193
+ size_t size = sizeof(narray_data_t);
194
+ const narray_data_t* na = (const narray_data_t*)ptr;
1946
195
 
1947
- loop:
1948
- SWAPINIT(a, es);
1949
- if (n < 7) {
1950
- for (pm = (char*)a + es; pm < (char*)a + n * es; pm += es)
1951
- for (pl = pm; pl > (char*)a && cmpgt(pl - es, pl); pl -= es) swap(pl, pl - es);
1952
- return;
1953
- }
1954
- presorted = 1;
1955
- for (pm = (char*)a + es; pm < (char*)a + n * es; pm += es) {
1956
- if (cmpgt(pm - es, pm)) {
1957
- presorted = 0;
1958
- break;
1959
- }
1960
- }
1961
- if (presorted) return;
1962
- pm = (char*)a + (n / 2) * es;
1963
- if (n > 7) {
1964
- pl = (char*)a;
1965
- pn = (char*)a + (n - 1) * es;
1966
- if (n > 40) {
1967
- d = (int)((n / 8) * es);
1968
- pl = med3(pl, pl + d, pl + 2 * d, cmp);
1969
- pm = med3(pm - d, pm, pm + d, cmp);
1970
- pn = med3(pn - 2 * d, pn - d, pn, cmp);
1971
- }
1972
- pm = med3(pl, pm, pn, cmp);
196
+ assert(na->base.type == NARRAY_DATA_T);
197
+
198
+ if (na->ptr != NULL) {
199
+
200
+ size += na->base.size * sizeof(dtype);
1973
201
  }
1974
- swap(a, pm);
1975
- for (pa = pb = (char*)a + es, pc = pd = (char*)a + (n - 1) * es; pb <= pc;
1976
- pb += es, pc -= es) {
1977
- while (pb <= pc && (r = cmp(pb, a)) <= 0) {
1978
- if (r == 0) {
1979
- swap(pa, pb);
1980
- pa += es;
1981
- }
1982
- pb += es;
1983
- }
1984
- while (pb <= pc && (r = cmp(pc, a)) >= 0) {
1985
- if (r == 0) {
1986
- swap(pc, pd);
1987
- pd -= es;
1988
- }
1989
- pc -= es;
202
+ if (na->base.size > 0) {
203
+ if (na->base.shape != NULL && na->base.shape != &(na->base.size)) {
204
+ size += sizeof(size_t) * na->base.ndim;
1990
205
  }
1991
- if (pb > pc) break;
1992
- swap(pb, pc);
1993
206
  }
1994
- pn = (char*)a + n * es;
1995
- r = (int)Min(pa - (char*)a, pb - pa);
1996
- vecswap(a, pb - r, r);
1997
- r = (int)Min(pd - pc, pn - pd - es);
1998
- vecswap(pb, pn - r, r);
1999
- if ((r = (int)(pb - pa)) > es) uint8_index_qsort(a, r / es, es);
2000
- if ((r = (int)(pd - pc)) > es) {
2001
- /* Iterate rather than recurse to save stack space */
2002
- a = pn - r;
2003
- n = r / es;
2004
- goto loop;
2005
- }
2006
- /* qsort(pn - r, r / es, es, cmp);*/
207
+ return size;
2007
208
  }
2008
209
 
2009
- #define idx_t int64_t
2010
- static void uint8_index64_qsort(na_loop_t* const lp) {
2011
- size_t i, n, idx;
2012
- char *d_ptr, *i_ptr, *o_ptr;
2013
- ssize_t d_step, i_step, o_step;
2014
- char** ptr;
2015
-
2016
- INIT_COUNTER(lp, n);
2017
- INIT_PTR(lp, 0, d_ptr, d_step);
2018
- INIT_PTR(lp, 1, i_ptr, i_step);
2019
- INIT_PTR(lp, 2, o_ptr, o_step);
2020
-
2021
- ptr = (char**)(lp->opt_ptr);
2022
-
2023
- // o_ptr=%lx,o_step=%ld)\n",(size_t)ptr,(size_t)d_ptr,(ssize_t)d_step,(size_t)i_ptr,(ssize_t)i_step,(size_t)o_ptr,(ssize_t)o_step);
210
+ static void uint8_free(void* ptr) {
211
+ narray_data_t* na = (narray_data_t*)ptr;
2024
212
 
2025
- if (n == 1) {
2026
- *(idx_t*)o_ptr = *(idx_t*)(i_ptr);
2027
- return;
2028
- }
213
+ assert(na->base.type == NARRAY_DATA_T);
2029
214
 
2030
- for (i = 0; i < n; i++) {
2031
- ptr[i] = d_ptr + d_step * i;
215
+ if (na->ptr != NULL) {
216
+ if (na->owned) {
217
+ xfree(na->ptr);
218
+ }
219
+ na->ptr = NULL;
2032
220
  }
2033
-
2034
- uint8_index_qsort(ptr, n, sizeof(dtype*));
2035
-
2036
- // d_ptr = lp->args[0].ptr;
2037
-
2038
- for (i = 0; i < n; i++) {
2039
- idx = (ptr[i] - d_ptr) / d_step;
2040
- *(idx_t*)o_ptr = *(idx_t*)(i_ptr + i_step * idx);
2041
- o_ptr += o_step;
221
+ if (na->base.size > 0) {
222
+ if (na->base.shape != NULL && na->base.shape != &(na->base.size)) {
223
+ xfree(na->base.shape);
224
+ na->base.shape = NULL;
225
+ }
2042
226
  }
227
+ xfree(na);
2043
228
  }
2044
- #undef idx_t
2045
-
2046
- #define idx_t int32_t
2047
- static void uint8_index32_qsort(na_loop_t* const lp) {
2048
- size_t i, n, idx;
2049
- char *d_ptr, *i_ptr, *o_ptr;
2050
- ssize_t d_step, i_step, o_step;
2051
- char** ptr;
2052
-
2053
- INIT_COUNTER(lp, n);
2054
- INIT_PTR(lp, 0, d_ptr, d_step);
2055
- INIT_PTR(lp, 1, i_ptr, i_step);
2056
- INIT_PTR(lp, 2, o_ptr, o_step);
2057
-
2058
- ptr = (char**)(lp->opt_ptr);
2059
229
 
2060
- // o_ptr=%lx,o_step=%ld)\n",(size_t)ptr,(size_t)d_ptr,(ssize_t)d_step,(size_t)i_ptr,(ssize_t)i_step,(size_t)o_ptr,(ssize_t)o_step);
230
+ static narray_type_info_t uint8_info = {
2061
231
 
2062
- if (n == 1) {
2063
- *(idx_t*)o_ptr = *(idx_t*)(i_ptr);
2064
- return;
2065
- }
232
+ 0, // element_bits
233
+ sizeof(dtype), // element_bytes
234
+ sizeof(dtype), // element_stride (in bytes)
2066
235
 
2067
- for (i = 0; i < n; i++) {
2068
- ptr[i] = d_ptr + d_step * i;
2069
- }
236
+ };
2070
237
 
2071
- uint8_index_qsort(ptr, n, sizeof(dtype*));
238
+ static const rb_data_type_t uint8_data_type = {
239
+ "Numo::UInt8",
240
+ {
241
+ 0,
242
+ uint8_free,
243
+ uint8_memsize,
244
+ },
245
+ &na_data_type,
246
+ &uint8_info,
247
+ RUBY_TYPED_FROZEN_SHAREABLE, // flags
248
+ };
2072
249
 
2073
- // d_ptr = lp->args[0].ptr;
250
+ static VALUE uint8_s_alloc_func(VALUE klass) {
251
+ narray_data_t* na = ALLOC(narray_data_t);
2074
252
 
2075
- for (i = 0; i < n; i++) {
2076
- idx = (ptr[i] - d_ptr) / d_step;
2077
- *(idx_t*)o_ptr = *(idx_t*)(i_ptr + i_step * idx);
2078
- o_ptr += o_step;
2079
- }
253
+ na->base.ndim = 0;
254
+ na->base.type = NARRAY_DATA_T;
255
+ na->base.flag[0] = NA_FL0_INIT;
256
+ na->base.flag[1] = NA_FL1_INIT;
257
+ na->base.size = 0;
258
+ na->base.shape = NULL;
259
+ na->base.reduce = INT2FIX(0);
260
+ na->ptr = NULL;
261
+ na->owned = FALSE;
262
+ return TypedData_Wrap_Struct(klass, &uint8_data_type, (void*)na);
2080
263
  }
2081
- #undef idx_t
2082
264
 
2083
- /*
2084
- sort_index. Returns an index array of sort result.
2085
- @overload sort_index(axis:nil)
2086
- @param [Numeric,Array,Range] axis Performs sort_index along the axis.
2087
- @return [Integer,Numo::Int] returns result index of sort_index.
2088
- @example
2089
- Numo::NArray[3,4,1,2].sort_index #=> Numo::Int32[2,3,0,1]
2090
- */
2091
- static VALUE uint8_sort_index(int argc, VALUE* argv, VALUE self) {
2092
- size_t size;
265
+ static VALUE uint8_allocate(VALUE self) {
2093
266
  narray_t* na;
2094
- VALUE idx, tmp, reduce, res;
2095
- char* buf;
2096
- ndfunc_arg_in_t ain[3] = { { cT, 0 }, { 0, 0 }, { sym_reduce, 0 } };
2097
- ndfunc_arg_out_t aout[1] = { { 0, 0, 0 } };
2098
- ndfunc_t ndf = { 0, STRIDE_LOOP_NIP | NDF_FLAT_REDUCE | NDF_CUM, 3, 1, ain, aout };
267
+ char* ptr;
2099
268
 
2100
269
  GetNArray(self, na);
2101
- if (na->ndim == 0) {
2102
- return INT2FIX(0);
2103
- }
2104
- if (na->size > (~(u_int32_t)0)) {
2105
- ain[1].type = aout[0].type = numo_cInt64;
2106
- idx = nary_new(numo_cInt64, na->ndim, na->shape);
2107
-
2108
- ndf.func = uint8_index64_qsort;
2109
- reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
2110
-
2111
- } else {
2112
- ain[1].type = aout[0].type = numo_cInt32;
2113
- idx = nary_new(numo_cInt32, na->ndim, na->shape);
2114
-
2115
- ndf.func = uint8_index32_qsort;
2116
- reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
2117
- }
2118
- rb_funcall(idx, rb_intern("seq"), 0);
2119
-
2120
- size = na->size * sizeof(void*); // max capa
2121
- buf = rb_alloc_tmp_buffer(&tmp, size);
2122
- res = na_ndloop3(&ndf, buf, 3, self, idx, reduce);
2123
- rb_free_tmp_buffer(&tmp);
2124
- return res;
2125
- }
2126
-
2127
- static void iter_uint8_median(na_loop_t* const lp) {
2128
- size_t n;
2129
- char *p1, *p2;
2130
- dtype* buf;
2131
-
2132
- INIT_COUNTER(lp, n);
2133
- p1 = (lp->args[0]).ptr + (lp->args[0].iter[0]).pos;
2134
- p2 = (lp->args[1]).ptr + (lp->args[1].iter[0]).pos;
2135
- buf = (dtype*)p1;
2136
270
 
2137
- uint8_qsort(buf, n, sizeof(dtype));
271
+ switch (NA_TYPE(na)) {
272
+ case NARRAY_DATA_T:
273
+ ptr = NA_DATA_PTR(na);
274
+ if (na->size > 0 && ptr == NULL) {
275
+ ptr = xmalloc(sizeof(dtype) * na->size);
2138
276
 
2139
- if (n == 0) {
2140
- *(dtype*)p2 = buf[0];
2141
- } else if (n % 2 == 0) {
2142
- *(dtype*)p2 = (buf[n / 2 - 1] + buf[n / 2]) / 2;
2143
- } else {
2144
- *(dtype*)p2 = buf[(n - 1) / 2];
277
+ NA_DATA_PTR(na) = ptr;
278
+ NA_DATA_OWNED(na) = TRUE;
279
+ }
280
+ break;
281
+ case NARRAY_VIEW_T:
282
+ rb_funcall(NA_VIEW_DATA(na), rb_intern("allocate"), 0);
283
+ break;
284
+ case NARRAY_FILEMAP_T:
285
+ // ptr = ((narray_filemap_t*)na)->ptr;
286
+ // to be implemented
287
+ default:
288
+ rb_bug("invalid narray type : %d", NA_TYPE(na));
2145
289
  }
2146
- }
2147
-
2148
- /*
2149
- median of self.
2150
- @overload median(axis:nil, keepdims:false)
2151
- @param [Numeric,Array,Range] axis Finds median along the axis.
2152
- @param [TrueClass] keepdims If true, the reduced axes are left in the result array as
2153
- dimensions with size one.
2154
- @return [Numo::UInt8] returns median of self.
2155
- */
2156
-
2157
- static VALUE uint8_median(int argc, VALUE* argv, VALUE self) {
2158
- VALUE v, reduce;
2159
- ndfunc_arg_in_t ain[2] = { { OVERWRITE, 0 }, { sym_reduce, 0 } };
2160
- ndfunc_arg_out_t aout[1] = { { INT2FIX(0), 0 } };
2161
- ndfunc_t ndf = { 0, NDF_HAS_LOOP | NDF_FLAT_REDUCE, 2, 1, ain, aout };
2162
-
2163
- self = na_copy(self); // as temporary buffer
2164
-
2165
- ndf.func = iter_uint8_median;
2166
- reduce = na_reduce_dimension(argc, argv, 1, &self, &ndf, 0);
2167
-
2168
- v = na_ndloop(&ndf, 2, self, reduce);
2169
- return uint8_extract(v);
290
+ return self;
2170
291
  }
2171
292
 
2172
293
  void Init_numo_uint8(void) {
@@ -2202,12 +323,7 @@ void Init_numo_uint8(void) {
2202
323
  rb_define_const(cT, "UPCAST", hCast);
2203
324
  rb_hash_aset(hCast, rb_cArray, cT);
2204
325
 
2205
- #ifdef RUBY_INTEGER_UNIFICATION
2206
326
  rb_hash_aset(hCast, rb_cInteger, cT);
2207
- #else
2208
- rb_hash_aset(hCast, rb_cFixnum, cT);
2209
- rb_hash_aset(hCast, rb_cBignum, cT);
2210
- #endif
2211
327
  rb_hash_aset(hCast, rb_cFloat, numo_cDFloat);
2212
328
  rb_hash_aset(hCast, rb_cComplex, numo_cDComplex);
2213
329
  rb_hash_aset(hCast, numo_cRObject, numo_cRObject);
@@ -2237,12 +353,50 @@ void Init_numo_uint8(void) {
2237
353
  rb_define_const(cT, "MIN", M_MIN);
2238
354
  rb_define_alloc_func(cT, uint8_s_alloc_func);
2239
355
  rb_define_method(cT, "allocate", uint8_allocate, 0);
356
+ /**
357
+ * Extract an element only if self is a dimensionless NArray.
358
+ * @overload extract
359
+ * @return [Numeric,Numo::NArray]
360
+ * --- Extract element value as Ruby Object if self is a dimensionless NArray,
361
+ * otherwise returns self.
362
+ */
2240
363
  rb_define_method(cT, "extract", uint8_extract, 0);
2241
-
364
+ /**
365
+ * Store elements to Numo::UInt8 from other.
366
+ * @overload store(other)
367
+ * @param [Object] other
368
+ * @return [Numo::UInt8] self
369
+ */
2242
370
  rb_define_method(cT, "store", uint8_store, 1);
2243
-
371
+ /**
372
+ * Cast object to Numo::UInt8.
373
+ * @overload [](elements)
374
+ * @overload cast(array)
375
+ * @param [Numeric,Array] elements
376
+ * @param [Array] array
377
+ * @return [Numo::UInt8]
378
+ */
2244
379
  rb_define_singleton_method(cT, "cast", uint8_s_cast, 1);
380
+ /**
381
+ * Multi-dimensional element reference.
382
+ * @overload [](dim0,...,dimL)
383
+ * @param [Numeric,Range,Array,Numo::Int32,Numo::Int64,Numo::Bit,TrueClass,FalseClass,
384
+ * Symbol] dim0,...,dimL multi-dimensional indices.
385
+ * @return [Numeric,Numo::UInt8] an element or NArray view.
386
+ * @see Numo::NArray#[]
387
+ * @see #[]=
388
+ */
2245
389
  rb_define_method(cT, "[]", uint8_aref, -1);
390
+ /**
391
+ * Multi-dimensional element assignment.
392
+ * @overload []=(dim0,...,dimL,val)
393
+ * @param [Numeric,Range,Array,Numo::Int32,Numo::Int64,Numo::Bit,TrueClass,FalseClass,
394
+ * Symbol] dim0,...,dimL multi-dimensional indices.
395
+ * @param [Numeric,Numo::NArray,Array] val Value(s) to be set to self.
396
+ * @return [Numeric,Numo::NArray,Array] returns `val` (last argument).
397
+ * @see Numo::NArray#[]=
398
+ * @see #[]
399
+ */
2246
400
  rb_define_method(cT, "[]=", uint8_aset, -1);
2247
401
  /**
2248
402
  * return NArray with cast to the type of self.
@@ -2283,10 +437,54 @@ void Init_numo_uint8(void) {
2283
437
  * @return [String]
2284
438
  */
2285
439
  rb_define_method(cT, "inspect", uint8_inspect, 0);
440
+ /**
441
+ * Calls the given block once for each element in self,
442
+ * passing that element as a parameter.
443
+ * @overload each
444
+ * @return [Numo::NArray] self
445
+ * For a block `{|x| ... }`,
446
+ * @yieldparam [Numeric] x an element of NArray.
447
+ * @see #each_with_index
448
+ * @see #map
449
+ */
2286
450
  rb_define_method(cT, "each", uint8_each, 0);
451
+ /**
452
+ * Unary map.
453
+ * @overload map
454
+ * @return [Numo::UInt8] map of self.
455
+ */
2287
456
  rb_define_method(cT, "map", uint8_map, 0);
457
+ /**
458
+ * Invokes the given block once for each element of self,
459
+ * passing that element and indices along each axis as parameters.
460
+ * @overload each_with_index
461
+ * For a block `{|x,i,j,...| ... }`,
462
+ * @yieldparam [Numeric] x an element
463
+ * @yieldparam [Integer] i,j,... multitimensional indices
464
+ * @return [Numo::NArray] self
465
+ * @see #each
466
+ * @see #map_with_index
467
+ */
2288
468
  rb_define_method(cT, "each_with_index", uint8_each_with_index, 0);
469
+ /**
470
+ * Invokes the given block once for each element of self,
471
+ * passing that element and indices along each axis as parameters.
472
+ * Creates a new NArray containing the values returned by the block.
473
+ * Inplace option is allowed, i.e., `nary.inplace.map` overwrites `nary`.
474
+ * @overload map_with_index
475
+ * For a block `{|x,i,j,...| ... }`,
476
+ * @yieldparam [Numeric] x an element
477
+ * @yieldparam [Integer] i,j,... multitimensional indices
478
+ * @return [Numo::NArray] mapped array
479
+ * @see #map
480
+ * @see #each_with_index
481
+ */
2289
482
  rb_define_method(cT, "map_with_index", uint8_map_with_index, 0);
483
+ /**
484
+ * abs of self.
485
+ * @overload abs
486
+ * @return [Numo::UInt8] abs of self.
487
+ */
2290
488
  rb_define_method(cT, "abs", uint8_abs, 0);
2291
489
  /**
2292
490
  * Binary add.
@@ -2747,11 +945,40 @@ void Init_numo_uint8(void) {
2747
945
  * # [4, 3, 3, 2, 4, 2]
2748
946
  */
2749
947
  rb_define_method(cT, "rand", uint8_rand, -1);
948
+ /**
949
+ * Calculate polynomial.
950
+ * `x.poly(a0,a1,a2,...,an) = a0 + a1*x + a2*x**2 + ... + an*x**n`
951
+ * @overload poly a0, a1, ..., an
952
+ * @param [Numo::NArray,Numeric] a0,a1,...,an
953
+ * @return [Numo::UInt8]
954
+ */
2750
955
  rb_define_method(cT, "poly", uint8_poly, -2);
2751
-
956
+ /**
957
+ * sort of self.
958
+ * @overload sort(axis:nil)
959
+ * @param [Numeric,Array,Range] axis Performs sort along the axis.
960
+ * @return [Numo::UInt8] returns result of sort.
961
+ * @example
962
+ * Numo::DFloat[3,4,1,2].sort #=> Numo::DFloat[1,2,3,4]
963
+ */
2752
964
  rb_define_method(cT, "sort", uint8_sort, -1);
2753
-
965
+ /**
966
+ * sort_index. Returns an index array of sort result.
967
+ * @overload sort_index(axis:nil)
968
+ * @param [Numeric,Array,Range] axis Performs sort_index along the axis.
969
+ * @return [Integer,Numo::Int] returns result index of sort_index.
970
+ * @example
971
+ * Numo::NArray[3,4,1,2].sort_index #=> Numo::Int32[2,3,0,1]
972
+ */
2754
973
  rb_define_method(cT, "sort_index", uint8_sort_index, -1);
974
+ /**
975
+ * median of self.
976
+ * @overload median(axis:nil, keepdims:false)
977
+ * @param [Numeric,Array,Range] axis Finds median along the axis.
978
+ * @param [TrueClass] keepdims If true, the reduced axes are left in the result array as
979
+ * dimensions with size one.
980
+ * @return [Numo::UInt8] returns median of self.
981
+ */
2755
982
  rb_define_method(cT, "median", uint8_median, -1);
2756
983
  rb_define_singleton_method(cT, "[]", uint8_s_cast, -2);
2757
984
  /**