xnd 0.2.0dev6 → 0.2.0dev7

Sign up to get free protection for your applications and to get access to all the features.
Files changed (74) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +2 -0
  3. data/Rakefile +1 -1
  4. data/ext/ruby_xnd/GPATH +0 -0
  5. data/ext/ruby_xnd/GRTAGS +0 -0
  6. data/ext/ruby_xnd/GTAGS +0 -0
  7. data/ext/ruby_xnd/extconf.rb +8 -5
  8. data/ext/ruby_xnd/gc_guard.c +53 -2
  9. data/ext/ruby_xnd/gc_guard.h +8 -2
  10. data/ext/ruby_xnd/include/overflow.h +147 -0
  11. data/ext/ruby_xnd/include/ruby_xnd.h +62 -0
  12. data/ext/ruby_xnd/include/xnd.h +590 -0
  13. data/ext/ruby_xnd/lib/libxnd.a +0 -0
  14. data/ext/ruby_xnd/lib/libxnd.so +1 -0
  15. data/ext/ruby_xnd/lib/libxnd.so.0 +1 -0
  16. data/ext/ruby_xnd/lib/libxnd.so.0.2.0dev3 +0 -0
  17. data/ext/ruby_xnd/ruby_xnd.c +556 -47
  18. data/ext/ruby_xnd/ruby_xnd.h +2 -1
  19. data/ext/ruby_xnd/xnd/Makefile +80 -0
  20. data/ext/ruby_xnd/xnd/config.h +26 -0
  21. data/ext/ruby_xnd/xnd/config.h.in +3 -0
  22. data/ext/ruby_xnd/xnd/config.log +421 -0
  23. data/ext/ruby_xnd/xnd/config.status +1023 -0
  24. data/ext/ruby_xnd/xnd/configure +376 -8
  25. data/ext/ruby_xnd/xnd/configure.ac +48 -7
  26. data/ext/ruby_xnd/xnd/doc/xnd/index.rst +3 -1
  27. data/ext/ruby_xnd/xnd/doc/xnd/{types.rst → xnd.rst} +3 -18
  28. data/ext/ruby_xnd/xnd/libxnd/Makefile +142 -0
  29. data/ext/ruby_xnd/xnd/libxnd/Makefile.in +43 -3
  30. data/ext/ruby_xnd/xnd/libxnd/Makefile.vc +19 -3
  31. data/ext/ruby_xnd/xnd/libxnd/bitmaps.c +42 -3
  32. data/ext/ruby_xnd/xnd/libxnd/bitmaps.o +0 -0
  33. data/ext/ruby_xnd/xnd/libxnd/bounds.c +366 -0
  34. data/ext/ruby_xnd/xnd/libxnd/bounds.o +0 -0
  35. data/ext/ruby_xnd/xnd/libxnd/contrib.h +98 -0
  36. data/ext/ruby_xnd/xnd/libxnd/contrib/bfloat16.h +213 -0
  37. data/ext/ruby_xnd/xnd/libxnd/copy.c +155 -4
  38. data/ext/ruby_xnd/xnd/libxnd/copy.o +0 -0
  39. data/ext/ruby_xnd/xnd/libxnd/cuda/cuda_memory.cu +121 -0
  40. data/ext/ruby_xnd/xnd/libxnd/cuda/cuda_memory.h +58 -0
  41. data/ext/ruby_xnd/xnd/libxnd/equal.c +195 -7
  42. data/ext/ruby_xnd/xnd/libxnd/equal.o +0 -0
  43. data/ext/ruby_xnd/xnd/libxnd/inline.h +32 -0
  44. data/ext/ruby_xnd/xnd/libxnd/libxnd.a +0 -0
  45. data/ext/ruby_xnd/xnd/libxnd/libxnd.so +1 -0
  46. data/ext/ruby_xnd/xnd/libxnd/libxnd.so.0 +1 -0
  47. data/ext/ruby_xnd/xnd/libxnd/libxnd.so.0.2.0dev3 +0 -0
  48. data/ext/ruby_xnd/xnd/libxnd/shape.c +207 -0
  49. data/ext/ruby_xnd/xnd/libxnd/shape.o +0 -0
  50. data/ext/ruby_xnd/xnd/libxnd/split.c +2 -2
  51. data/ext/ruby_xnd/xnd/libxnd/split.o +0 -0
  52. data/ext/ruby_xnd/xnd/libxnd/tests/Makefile +39 -0
  53. data/ext/ruby_xnd/xnd/libxnd/xnd.c +613 -91
  54. data/ext/ruby_xnd/xnd/libxnd/xnd.h +145 -4
  55. data/ext/ruby_xnd/xnd/libxnd/xnd.o +0 -0
  56. data/ext/ruby_xnd/xnd/python/test_xnd.py +1125 -50
  57. data/ext/ruby_xnd/xnd/python/xnd/__init__.py +609 -124
  58. data/ext/ruby_xnd/xnd/python/xnd/_version.py +1 -0
  59. data/ext/ruby_xnd/xnd/python/xnd/_xnd.c +1652 -101
  60. data/ext/ruby_xnd/xnd/python/xnd/libxnd.a +0 -0
  61. data/ext/ruby_xnd/xnd/python/xnd/libxnd.so +1 -0
  62. data/ext/ruby_xnd/xnd/python/xnd/libxnd.so.0 +1 -0
  63. data/ext/ruby_xnd/xnd/python/xnd/libxnd.so.0.2.0dev3 +0 -0
  64. data/ext/ruby_xnd/xnd/python/xnd/pyxnd.h +1 -1
  65. data/ext/ruby_xnd/xnd/python/xnd/util.h +25 -0
  66. data/ext/ruby_xnd/xnd/python/xnd/xnd.h +590 -0
  67. data/ext/ruby_xnd/xnd/python/xnd_randvalue.py +106 -6
  68. data/ext/ruby_xnd/xnd/python/xnd_support.py +4 -0
  69. data/ext/ruby_xnd/xnd/setup.py +46 -4
  70. data/lib/ruby_xnd.so +0 -0
  71. data/lib/xnd.rb +39 -3
  72. data/lib/xnd/version.rb +2 -2
  73. data/xnd.gemspec +2 -1
  74. metadata +58 -5
@@ -0,0 +1,366 @@
1
+ /*
2
+ * BSD 3-Clause License
3
+ *
4
+ * Copyright (c) 2017-2018, plures
5
+ * All rights reserved.
6
+ *
7
+ * Redistribution and use in source and binary forms, with or without
8
+ * modification, are permitted provided that the following conditions are met:
9
+ *
10
+ * 1. Redistributions of source code must retain the above copyright notice,
11
+ * tbufsizes list of conditions and the following disclaimer.
12
+ *
13
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
14
+ * tbufsizes list of conditions and the following disclaimer in the documentation
15
+ * and/or other materials provided with the distribution.
16
+ *
17
+ * 3. Neither the name of the copyright holder nor the names of its
18
+ * contributors may be used to endorse or promote products derived from
19
+ * tbufsizes software without specific prior written permission.
20
+ *
21
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
+ */
32
+
33
+
34
+ #include <stdlib.h>
35
+ #include <stdint.h>
36
+ #include "ndtypes.h"
37
+ #include "xnd.h"
38
+ #include "overflow.h"
39
+
40
+ #ifndef _MSC_VER
41
+ #include "config.h"
42
+ #endif
43
+
44
+
45
+ typedef struct {
46
+ int64_t index;
47
+ const ndt_t *type;
48
+ int64_t ptr;
49
+ } xnd_bounds_t;
50
+
51
+
52
+ static inline xnd_bounds_t
53
+ _fixed_dim_next(const xnd_bounds_t *x, const int64_t i, bool *overflow)
54
+ {
55
+ const ndt_t *t = x->type;
56
+ const ndt_t *u = t->FixedDim.type;
57
+ const int64_t step = MULi64(i, t->Concrete.FixedDim.step, overflow);
58
+ xnd_bounds_t next;
59
+
60
+ next.index = ADDi64(x->index, step, overflow);
61
+ next.type = u;
62
+ next.ptr = x->ptr;
63
+
64
+ if (u->ndim == 0) {
65
+ int64_t tmp = MULi64(next.index, next.type->datasize, overflow);
66
+ next.ptr = ADDi64(x->ptr, tmp, overflow);
67
+ }
68
+
69
+ return next;
70
+ }
71
+
72
+ static inline xnd_bounds_t
73
+ _var_dim_next(const xnd_bounds_t *x, const int64_t start, const int64_t step,
74
+ const int64_t i, bool *overflow)
75
+ {
76
+ const ndt_t *t = x->type;
77
+ const ndt_t *u = t->VarDim.type;
78
+ xnd_bounds_t next;
79
+ int64_t tmp;
80
+
81
+ tmp = MULi64(i, step, overflow);
82
+ next.index = ADDi64(start, tmp, overflow);
83
+ next.type = u;
84
+ next.ptr = x->ptr;
85
+
86
+ if (u->ndim == 0) {
87
+ tmp = MULi64(next.index, next.type->datasize, overflow);
88
+ next.ptr = ADDi64(x->ptr, tmp, overflow);
89
+ }
90
+
91
+ return next;
92
+ }
93
+
94
+ static inline xnd_bounds_t
95
+ _tuple_next(const xnd_bounds_t *x, const int64_t i, bool *overflow)
96
+ {
97
+ const ndt_t *t = x->type;
98
+ xnd_bounds_t next;
99
+
100
+ next.index = 0;
101
+ next.type = t->Tuple.types[i];
102
+ next.ptr = ADDi64(x->ptr, t->Concrete.Tuple.offset[i], overflow);
103
+
104
+ return next;
105
+ }
106
+
107
+ static inline xnd_bounds_t
108
+ _record_next(const xnd_bounds_t *x, const int64_t i, bool *overflow)
109
+ {
110
+ const ndt_t *t = x->type;
111
+ xnd_bounds_t next;
112
+
113
+ next.index = 0;
114
+ next.type = t->Record.types[i];
115
+ next.ptr = ADDi64(x->ptr, t->Concrete.Record.offset[i], overflow);
116
+
117
+ return next;
118
+ }
119
+
120
+ static inline xnd_bounds_t
121
+ _constr_next(const xnd_bounds_t *x)
122
+ {
123
+ const ndt_t *t = x->type;
124
+ xnd_bounds_t next;
125
+
126
+ next.index = 0;
127
+ next.type = t->Constr.type;
128
+ next.ptr = x->ptr;
129
+
130
+ return next;
131
+ }
132
+
133
+ static inline xnd_bounds_t
134
+ _nominal_next(const xnd_bounds_t *x)
135
+ {
136
+ const ndt_t *t = x->type;
137
+ xnd_bounds_t next;
138
+
139
+ next.index = 0;
140
+ next.type = t->Nominal.type;
141
+ next.ptr = x->ptr;
142
+
143
+ return next;
144
+ }
145
+
146
+
147
+ static int
148
+ _xnd_bounds_check(const xnd_bounds_t * const x, const int64_t bufsize, ndt_context_t *ctx)
149
+ {
150
+ const ndt_t * const t = x->type;
151
+ bool overflow = false;
152
+
153
+ if (ndt_is_abstract(t)) {
154
+ ndt_err_format(ctx, NDT_ValueError,
155
+ "bounds checking requires a concrete type");
156
+ return -1;
157
+ }
158
+
159
+ if (ndt_subtree_is_optional(t)) {
160
+ ndt_err_format(ctx, NDT_NotImplementedError,
161
+ "bounds checking not implemented for optional types");
162
+ return -1;
163
+ }
164
+
165
+ switch (t->tag) {
166
+ case FixedDim: {
167
+ if (t->FixedDim.shape > 0) {
168
+ xnd_bounds_t next = _fixed_dim_next(x, 0, &overflow);
169
+ if (_xnd_bounds_check(&next, bufsize, ctx) < 0) {
170
+ return -1;
171
+ }
172
+ }
173
+
174
+ if (t->FixedDim.shape > 1) {
175
+ xnd_bounds_t next = _fixed_dim_next(x, t->FixedDim.shape-1, &overflow);
176
+ if (_xnd_bounds_check(&next, bufsize, ctx) < 0) {
177
+ return -1;
178
+ }
179
+ }
180
+
181
+ if (overflow) {
182
+ goto overflow_error;
183
+ }
184
+
185
+ return 0;
186
+ }
187
+
188
+ case VarDim: {
189
+ int64_t start, step, shape;
190
+
191
+ shape = ndt_var_indices(&start, &step, t, x->index, ctx);
192
+ if (shape < 0) {
193
+ return -1;
194
+ }
195
+
196
+ if (shape > 0) {
197
+ xnd_bounds_t next = _var_dim_next(x, start, step, 0, &overflow);
198
+ if (_xnd_bounds_check(&next, bufsize, ctx) < 0) {
199
+ return -1;
200
+ }
201
+ }
202
+
203
+ if (shape > 1) {
204
+ xnd_bounds_t next = _var_dim_next(x, start, step, shape-1, &overflow);
205
+ if (_xnd_bounds_check(&next, bufsize, ctx) < 0) {
206
+ return -1;
207
+ }
208
+ }
209
+
210
+ if (overflow) {
211
+ goto overflow_error;
212
+ }
213
+
214
+ return 0;
215
+ }
216
+
217
+ case Tuple: {
218
+ if (t->Tuple.shape > 0) {
219
+ xnd_bounds_t next = _tuple_next(x, 0, &overflow);
220
+ if (_xnd_bounds_check(&next, bufsize, ctx) < 0) {
221
+ return -1;
222
+ }
223
+ }
224
+
225
+ if (t->Tuple.shape > 1) {
226
+ xnd_bounds_t next = _tuple_next(x, t->Tuple.shape-1, &overflow);
227
+ if (_xnd_bounds_check(&next, bufsize, ctx) < 0) {
228
+ return -1;
229
+ }
230
+ }
231
+
232
+ if (overflow) {
233
+ goto overflow_error;
234
+ }
235
+
236
+ return 0;
237
+ }
238
+
239
+ case Record: {
240
+ if (t->Record.shape > 0) {
241
+ xnd_bounds_t next = _record_next(x, 0, &overflow);
242
+ if (_xnd_bounds_check(&next, bufsize, ctx) < 0) {
243
+ return -1;
244
+ }
245
+ }
246
+
247
+ if (t->Record.shape > 1) {
248
+ xnd_bounds_t next = _record_next(x, t->Record.shape-1, &overflow);
249
+ if (_xnd_bounds_check(&next, bufsize, ctx) < 0) {
250
+ return -1;
251
+ }
252
+ }
253
+
254
+ if (overflow) {
255
+ goto overflow_error;
256
+ }
257
+
258
+ return 0;
259
+ }
260
+
261
+ case Union: {
262
+ ndt_err_format(ctx, NDT_NotImplementedError,
263
+ "bounds checking union types is not implemented");
264
+ return -1;
265
+ }
266
+
267
+ case Ref: {
268
+ ndt_err_format(ctx, NDT_NotImplementedError,
269
+ "bounds checking ref types is not implemented");
270
+ return -1;
271
+ }
272
+
273
+ case Constr: {
274
+ xnd_bounds_t next = _constr_next(x);
275
+ if (_xnd_bounds_check(&next, bufsize, ctx) < 0) {
276
+ return -1;
277
+ }
278
+
279
+ return 0;
280
+ }
281
+
282
+ case Nominal: {
283
+ xnd_bounds_t next = _nominal_next(x);
284
+ if (_xnd_bounds_check(&next, bufsize, ctx) < 0) {
285
+ return -1;
286
+ }
287
+
288
+ return 0;
289
+ }
290
+
291
+ case VarDimElem: {
292
+ ndt_err_format(ctx, NDT_NotImplementedError,
293
+ "cannot bounds check var elem dimension");
294
+ return -1;
295
+ }
296
+
297
+ case Char: {
298
+ ndt_err_format(ctx, NDT_NotImplementedError,
299
+ "char not implemented");
300
+ return -1;
301
+ }
302
+
303
+ case String: case Bytes: {
304
+ ndt_err_format(ctx, NDT_NotImplementedError,
305
+ "serialization for string and bytes is not implemented");
306
+ return -1;
307
+ }
308
+
309
+ case Array: {
310
+ ndt_err_format(ctx, NDT_NotImplementedError,
311
+ "serialization for flexible arrays is not implemented");
312
+ return -1;
313
+ }
314
+
315
+ case Categorical:
316
+ case Bool:
317
+ case Int8: case Int16: case Int32: case Int64:
318
+ case Uint8: case Uint16: case Uint32: case Uint64:
319
+ case BFloat16: case Float16: case Float32: case Float64:
320
+ case BComplex32: case Complex32: case Complex64: case Complex128:
321
+ case FixedString: case FixedBytes: {
322
+ const int64_t min = x->ptr;
323
+ const int64_t max = ADDi64(min, t->datasize, &overflow);
324
+
325
+ if (overflow) {
326
+ goto overflow_error;
327
+ }
328
+
329
+ if (min < 0 || max > bufsize) {
330
+ ndt_err_format(ctx, NDT_ValueError, "bounds check failed");
331
+ return -1;
332
+ }
333
+
334
+ return 0;
335
+ }
336
+
337
+ /* NOT REACHED: intercepted by ndt_is_abstract(). */
338
+ case Module: case Function:
339
+ case AnyKind: case SymbolicDim: case EllipsisDim: case Typevar:
340
+ case ScalarKind: case SignedKind: case UnsignedKind: case FloatKind:
341
+ case ComplexKind: case FixedStringKind: case FixedBytesKind:
342
+ ndt_err_format(ctx, NDT_RuntimeError, "unexpected abstract type");
343
+ return -1;
344
+ }
345
+
346
+ /* NOT REACHED: tags should be exhaustive */
347
+ ndt_err_format(ctx, NDT_RuntimeError, "invalid type tag");
348
+ return -1;
349
+
350
+ overflow_error:
351
+ ndt_err_format(ctx, NDT_ValueError, "overflow in bounds check");
352
+ return -1;
353
+ }
354
+
355
+ int
356
+ xnd_bounds_check(const ndt_t *t, const int64_t linear_index, const int64_t bufsize,
357
+ ndt_context_t *ctx)
358
+ {
359
+ xnd_bounds_t x;
360
+
361
+ x.index = linear_index;
362
+ x.type = t;
363
+ x.ptr = 0;
364
+
365
+ return _xnd_bounds_check(&x, bufsize, ctx);
366
+ }
@@ -6,6 +6,7 @@
6
6
  #include <stdint.h>
7
7
  #include <inttypes.h>
8
8
  #include <math.h>
9
+ #include <ndtypes.h>
9
10
 
10
11
 
11
12
  /* PSF copyright: Written by Jim Hugunin and Chris Chase. */
@@ -309,5 +310,102 @@ xnd_float_unpack8(const unsigned char *p, int le)
309
310
  return x;
310
311
  }
311
312
 
313
+ /* NumPy copyright: Original by David Cournapeau. */
314
+ static inline int
315
+ xnd_nocopy_reshape(int64_t *newdims, int64_t *newstrides, int newnd,
316
+ const int64_t *srcdims, const int64_t *srcstrides, const int srcnd,
317
+ int is_f_order)
318
+ {
319
+ int oldnd;
320
+ int64_t olddims[NDT_MAX_DIM];
321
+ int64_t oldstrides[NDT_MAX_DIM];
322
+ int64_t last_stride;
323
+ int oi, oj, ok, ni, nj, nk;
324
+
325
+ oldnd = 0;
326
+ /*
327
+ * Remove axes with dimension 1 from the old array. They have no effect
328
+ * but would need special cases since their strides do not matter.
329
+ */
330
+ for (oi = 0; oi < srcnd; oi++) {
331
+ if (srcdims[oi] != 1) {
332
+ olddims[oldnd] = srcdims[oi];
333
+ oldstrides[oldnd] = srcstrides[oi];
334
+ oldnd++;
335
+ }
336
+ }
337
+
338
+ /* oi to oj and ni to nj give the axis ranges currently worked with */
339
+ oi = 0;
340
+ oj = 1;
341
+ ni = 0;
342
+ nj = 1;
343
+ while (ni < newnd && oi < oldnd) {
344
+ int64_t np = newdims[ni];
345
+ int64_t op = olddims[oi];
346
+
347
+ while (np != op) {
348
+ if (np < op) {
349
+ /* Misses trailing 1s, these are handled later */
350
+ np *= newdims[nj++];
351
+ } else {
352
+ op *= olddims[oj++];
353
+ }
354
+ }
355
+
356
+ /* Check whether the original axes can be combined */
357
+ for (ok = oi; ok < oj - 1; ok++) {
358
+ if (is_f_order) {
359
+ if (oldstrides[ok+1] != olddims[ok]*oldstrides[ok]) {
360
+ /* not contiguous enough */
361
+ return 0;
362
+ }
363
+ }
364
+ else {
365
+ /* C order */
366
+ if (oldstrides[ok] != olddims[ok+1]*oldstrides[ok+1]) {
367
+ /* not contiguous enough */
368
+ return 0;
369
+ }
370
+ }
371
+ }
372
+
373
+ /* Calculate new strides for all axes currently worked with */
374
+ if (is_f_order) {
375
+ newstrides[ni] = oldstrides[oi];
376
+ for (nk = ni + 1; nk < nj; nk++) {
377
+ newstrides[nk] = newstrides[nk - 1]*newdims[nk - 1];
378
+ }
379
+ }
380
+ else {
381
+ /* C order */
382
+ newstrides[nj - 1] = oldstrides[oj - 1];
383
+ for (nk = nj - 1; nk > ni; nk--) {
384
+ newstrides[nk - 1] = newstrides[nk]*newdims[nk];
385
+ }
386
+ }
387
+ ni = nj++;
388
+ oi = oj++;
389
+ }
390
+
391
+ /*
392
+ * Set strides corresponding to trailing 1s of the new shape.
393
+ */
394
+ if (ni >= 1) {
395
+ last_stride = newstrides[ni - 1];
396
+ }
397
+ else {
398
+ last_stride = 1;
399
+ }
400
+ if (is_f_order) {
401
+ last_stride *= newdims[ni - 1];
402
+ }
403
+ for (nk = ni; nk < newnd; nk++) {
404
+ newstrides[nk] = last_stride;
405
+ }
406
+
407
+ return 1;
408
+ }
409
+
312
410
 
313
411
  #endif /* CONTRIB_H */