ndtypes 0.2.0dev4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (139) hide show
  1. checksums.yaml +7 -0
  2. data/CONTRIBUTING.md +50 -0
  3. data/Gemfile +2 -0
  4. data/History.md +0 -0
  5. data/README.md +19 -0
  6. data/Rakefile +125 -0
  7. data/ext/ruby_ndtypes/extconf.rb +55 -0
  8. data/ext/ruby_ndtypes/gc_guard.c +36 -0
  9. data/ext/ruby_ndtypes/gc_guard.h +12 -0
  10. data/ext/ruby_ndtypes/ndtypes/AUTHORS.txt +5 -0
  11. data/ext/ruby_ndtypes/ndtypes/INSTALL.txt +101 -0
  12. data/ext/ruby_ndtypes/ndtypes/LICENSE.txt +29 -0
  13. data/ext/ruby_ndtypes/ndtypes/MANIFEST.in +3 -0
  14. data/ext/ruby_ndtypes/ndtypes/Makefile.in +87 -0
  15. data/ext/ruby_ndtypes/ndtypes/README.rst +47 -0
  16. data/ext/ruby_ndtypes/ndtypes/config.guess +1530 -0
  17. data/ext/ruby_ndtypes/ndtypes/config.h.in +67 -0
  18. data/ext/ruby_ndtypes/ndtypes/config.sub +1782 -0
  19. data/ext/ruby_ndtypes/ndtypes/configure +5260 -0
  20. data/ext/ruby_ndtypes/ndtypes/configure.ac +161 -0
  21. data/ext/ruby_ndtypes/ndtypes/doc/Makefile +14 -0
  22. data/ext/ruby_ndtypes/ndtypes/doc/_static/copybutton.js +66 -0
  23. data/ext/ruby_ndtypes/ndtypes/doc/conf.py +26 -0
  24. data/ext/ruby_ndtypes/ndtypes/doc/grammar/grammar.rst +27 -0
  25. data/ext/ruby_ndtypes/ndtypes/doc/index.rst +56 -0
  26. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/context.rst +131 -0
  27. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/encodings.rst +68 -0
  28. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/fields-values.rst +175 -0
  29. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/functions.rst +72 -0
  30. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/index.rst +43 -0
  31. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/init.rst +48 -0
  32. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/io.rst +100 -0
  33. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/memory.rst +124 -0
  34. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/predicates.rst +110 -0
  35. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/typedef.rst +31 -0
  36. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/types.rst +594 -0
  37. data/ext/ruby_ndtypes/ndtypes/doc/libndtypes/util.rst +166 -0
  38. data/ext/ruby_ndtypes/ndtypes/doc/ndtypes/buffer-protocol.rst +27 -0
  39. data/ext/ruby_ndtypes/ndtypes/doc/ndtypes/index.rst +21 -0
  40. data/ext/ruby_ndtypes/ndtypes/doc/ndtypes/pattern-matching.rst +330 -0
  41. data/ext/ruby_ndtypes/ndtypes/doc/ndtypes/quickstart.rst +144 -0
  42. data/ext/ruby_ndtypes/ndtypes/doc/ndtypes/types.rst +544 -0
  43. data/ext/ruby_ndtypes/ndtypes/doc/releases/index.rst +35 -0
  44. data/ext/ruby_ndtypes/ndtypes/install-sh +527 -0
  45. data/ext/ruby_ndtypes/ndtypes/libndtypes/Makefile.in +271 -0
  46. data/ext/ruby_ndtypes/ndtypes/libndtypes/Makefile.vc +269 -0
  47. data/ext/ruby_ndtypes/ndtypes/libndtypes/alloc.c +230 -0
  48. data/ext/ruby_ndtypes/ndtypes/libndtypes/attr.c +268 -0
  49. data/ext/ruby_ndtypes/ndtypes/libndtypes/attr.h +109 -0
  50. data/ext/ruby_ndtypes/ndtypes/libndtypes/compat/Makefile.in +73 -0
  51. data/ext/ruby_ndtypes/ndtypes/libndtypes/compat/Makefile.vc +70 -0
  52. data/ext/ruby_ndtypes/ndtypes/libndtypes/compat/README.txt +16 -0
  53. data/ext/ruby_ndtypes/ndtypes/libndtypes/compat/bpgrammar.c +2179 -0
  54. data/ext/ruby_ndtypes/ndtypes/libndtypes/compat/bpgrammar.h +134 -0
  55. data/ext/ruby_ndtypes/ndtypes/libndtypes/compat/bpgrammar.y +428 -0
  56. data/ext/ruby_ndtypes/ndtypes/libndtypes/compat/bplexer.c +2543 -0
  57. data/ext/ruby_ndtypes/ndtypes/libndtypes/compat/bplexer.h +735 -0
  58. data/ext/ruby_ndtypes/ndtypes/libndtypes/compat/bplexer.l +176 -0
  59. data/ext/ruby_ndtypes/ndtypes/libndtypes/compat/export.c +543 -0
  60. data/ext/ruby_ndtypes/ndtypes/libndtypes/compat/import.c +110 -0
  61. data/ext/ruby_ndtypes/ndtypes/libndtypes/context.c +228 -0
  62. data/ext/ruby_ndtypes/ndtypes/libndtypes/copy.c +634 -0
  63. data/ext/ruby_ndtypes/ndtypes/libndtypes/encodings.c +116 -0
  64. data/ext/ruby_ndtypes/ndtypes/libndtypes/equal.c +288 -0
  65. data/ext/ruby_ndtypes/ndtypes/libndtypes/grammar.c +3067 -0
  66. data/ext/ruby_ndtypes/ndtypes/libndtypes/grammar.h +180 -0
  67. data/ext/ruby_ndtypes/ndtypes/libndtypes/grammar.y +417 -0
  68. data/ext/ruby_ndtypes/ndtypes/libndtypes/io.c +1658 -0
  69. data/ext/ruby_ndtypes/ndtypes/libndtypes/lexer.c +2773 -0
  70. data/ext/ruby_ndtypes/ndtypes/libndtypes/lexer.h +734 -0
  71. data/ext/ruby_ndtypes/ndtypes/libndtypes/lexer.l +222 -0
  72. data/ext/ruby_ndtypes/ndtypes/libndtypes/match.c +1132 -0
  73. data/ext/ruby_ndtypes/ndtypes/libndtypes/ndtypes.c +2323 -0
  74. data/ext/ruby_ndtypes/ndtypes/libndtypes/ndtypes.h.in +893 -0
  75. data/ext/ruby_ndtypes/ndtypes/libndtypes/overflow.h +161 -0
  76. data/ext/ruby_ndtypes/ndtypes/libndtypes/parsefuncs.c +473 -0
  77. data/ext/ruby_ndtypes/ndtypes/libndtypes/parsefuncs.h +92 -0
  78. data/ext/ruby_ndtypes/ndtypes/libndtypes/parser.c +246 -0
  79. data/ext/ruby_ndtypes/ndtypes/libndtypes/seq.c +269 -0
  80. data/ext/ruby_ndtypes/ndtypes/libndtypes/seq.h +197 -0
  81. data/ext/ruby_ndtypes/ndtypes/libndtypes/serialize/Makefile.in +48 -0
  82. data/ext/ruby_ndtypes/ndtypes/libndtypes/serialize/Makefile.vc +46 -0
  83. data/ext/ruby_ndtypes/ndtypes/libndtypes/serialize/deserialize.c +1007 -0
  84. data/ext/ruby_ndtypes/ndtypes/libndtypes/serialize/serialize.c +442 -0
  85. data/ext/ruby_ndtypes/ndtypes/libndtypes/slice.h +42 -0
  86. data/ext/ruby_ndtypes/ndtypes/libndtypes/substitute.c +238 -0
  87. data/ext/ruby_ndtypes/ndtypes/libndtypes/substitute.h +50 -0
  88. data/ext/ruby_ndtypes/ndtypes/libndtypes/symtable.c +371 -0
  89. data/ext/ruby_ndtypes/ndtypes/libndtypes/symtable.h +100 -0
  90. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/Makefile.in +55 -0
  91. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/Makefile.vc +45 -0
  92. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/alloc_fail.c +82 -0
  93. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/alloc_fail.h +49 -0
  94. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/runtest.c +1657 -0
  95. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test.h +85 -0
  96. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test_array.c +115 -0
  97. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test_buffer.c +137 -0
  98. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test_indent.c +201 -0
  99. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test_match.c +2397 -0
  100. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test_numba.c +57 -0
  101. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test_parse.c +349 -0
  102. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test_parse_error.c +27839 -0
  103. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test_parse_roundtrip.c +350 -0
  104. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test_record.c +231 -0
  105. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test_typecheck.c +375 -0
  106. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/test_typedef.c +65 -0
  107. data/ext/ruby_ndtypes/ndtypes/libndtypes/tests/valgrind.supp +30 -0
  108. data/ext/ruby_ndtypes/ndtypes/libndtypes/tools/bench.c +79 -0
  109. data/ext/ruby_ndtypes/ndtypes/libndtypes/tools/indent.c +94 -0
  110. data/ext/ruby_ndtypes/ndtypes/libndtypes/tools/print_ast.c +96 -0
  111. data/ext/ruby_ndtypes/ndtypes/libndtypes/util.c +474 -0
  112. data/ext/ruby_ndtypes/ndtypes/libndtypes/values.c +228 -0
  113. data/ext/ruby_ndtypes/ndtypes/python/bench.py +49 -0
  114. data/ext/ruby_ndtypes/ndtypes/python/ndt_randtype.py +409 -0
  115. data/ext/ruby_ndtypes/ndtypes/python/ndt_support.py +14 -0
  116. data/ext/ruby_ndtypes/ndtypes/python/ndtypes/__init__.py +70 -0
  117. data/ext/ruby_ndtypes/ndtypes/python/ndtypes/_ndtypes.c +1332 -0
  118. data/ext/ruby_ndtypes/ndtypes/python/ndtypes/docstrings.h +319 -0
  119. data/ext/ruby_ndtypes/ndtypes/python/ndtypes/pyndtypes.h +154 -0
  120. data/ext/ruby_ndtypes/ndtypes/python/test_ndtypes.py +1977 -0
  121. data/ext/ruby_ndtypes/ndtypes/setup.py +288 -0
  122. data/ext/ruby_ndtypes/ndtypes/vcbuild/INSTALL.txt +41 -0
  123. data/ext/ruby_ndtypes/ndtypes/vcbuild/runtest32.bat +15 -0
  124. data/ext/ruby_ndtypes/ndtypes/vcbuild/runtest64.bat +13 -0
  125. data/ext/ruby_ndtypes/ndtypes/vcbuild/vcbuild32.bat +38 -0
  126. data/ext/ruby_ndtypes/ndtypes/vcbuild/vcbuild64.bat +38 -0
  127. data/ext/ruby_ndtypes/ndtypes/vcbuild/vcclean.bat +13 -0
  128. data/ext/ruby_ndtypes/ndtypes/vcbuild/vcdistclean.bat +14 -0
  129. data/ext/ruby_ndtypes/ruby_ndtypes.c +1003 -0
  130. data/ext/ruby_ndtypes/ruby_ndtypes.h +37 -0
  131. data/ext/ruby_ndtypes/ruby_ndtypes_internal.h +28 -0
  132. data/lib/ndtypes.rb +45 -0
  133. data/lib/ndtypes/errors.rb +2 -0
  134. data/lib/ndtypes/version.rb +6 -0
  135. data/ndtypes.gemspec +47 -0
  136. data/spec/gc_table_spec.rb +10 -0
  137. data/spec/ndtypes_spec.rb +289 -0
  138. data/spec/spec_helper.rb +241 -0
  139. metadata +242 -0
@@ -0,0 +1,2323 @@
1
+ /*
2
+ * BSD 3-Clause License
3
+ *
4
+ * Copyright (c) 2017-2018, plures
5
+ * All rights reserved.
6
+ *
7
+ * Redistribution and use in source and binary forms, with or without
8
+ * modification, are permitted provided that the following conditions are met:
9
+ *
10
+ * 1. Redistributions of source code must retain the above copyright notice,
11
+ * this list of conditions and the following disclaimer.
12
+ *
13
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
14
+ * this list of conditions and the following disclaimer in the documentation
15
+ * and/or other materials provided with the distribution.
16
+ *
17
+ * 3. Neither the name of the copyright holder nor the names of its
18
+ * contributors may be used to endorse or promote products derived from
19
+ * this software without specific prior written permission.
20
+ *
21
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
+ */
32
+
33
+
34
+ #include <stdlib.h>
35
+ #include <string.h>
36
+ #include <stdint.h>
37
+ #include <inttypes.h>
38
+ #include <stdbool.h>
39
+ #include <complex.h>
40
+ #include <assert.h>
41
+ #include "ndtypes.h"
42
+ #include "overflow.h"
43
+ #include "slice.h"
44
+
45
+
46
+ /*****************************************************************************/
47
+ /* Static helper functions */
48
+ /*****************************************************************************/
49
+
50
+ #undef max
51
+ static inline uint16_t
52
+ max(uint16_t x, uint16_t y)
53
+ {
54
+ return x >= y ? x : y;
55
+ }
56
+
57
+ static inline int
58
+ ispower2(uint16_t n)
59
+ {
60
+ return n != 0 && (n & (n-1)) == 0;
61
+ }
62
+
63
+ static int64_t
64
+ round_up(int64_t offset, uint16_t align, bool *overflow)
65
+ {
66
+ int64_t size;
67
+
68
+ assert(align > 0);
69
+ size = ADDi64(offset, align-1, overflow);
70
+ return (size / align) * align;
71
+ }
72
+
73
+
74
+ /*****************************************************************************/
75
+ /* Flags */
76
+ /*****************************************************************************/
77
+
78
+ /* Determine general subtree flags. */
79
+ static uint32_t
80
+ ndt_subtree_flags(const ndt_t *type)
81
+ {
82
+ if (type && type->flags & (NDT_OPTION|NDT_SUBTREE_OPTION)) {
83
+ return NDT_SUBTREE_OPTION;
84
+ }
85
+
86
+ return 0;
87
+ }
88
+
89
+ /* Determine general subtree and ellipsis flags. */
90
+ static uint32_t
91
+ ndt_dim_flags(const ndt_t *type)
92
+ {
93
+ uint32_t flags = ndt_subtree_flags(type);
94
+ flags |= (type->flags & NDT_ELLIPSIS);
95
+ return flags;
96
+ }
97
+
98
+
99
+ /*****************************************************************************/
100
+ /* Common fields for concrete types */
101
+ /*****************************************************************************/
102
+
103
+ /* Itemsize of a concrete type. Undefined for abstract types. */
104
+ int64_t
105
+ ndt_itemsize(const ndt_t *t)
106
+ {
107
+ assert(ndt_is_concrete(t));
108
+
109
+ switch (t->tag) {
110
+ case FixedDim:
111
+ return t->Concrete.FixedDim.itemsize;
112
+ case VarDim:
113
+ return t->Concrete.VarDim.itemsize;
114
+ default:
115
+ return t->datasize;
116
+ }
117
+ }
118
+
119
+
120
+ /*****************************************************************************/
121
+ /* Predicates */
122
+ /*****************************************************************************/
123
+
124
+ /* Type field access */
125
+ int
126
+ ndt_is_abstract(const ndt_t *t)
127
+ {
128
+ return t->access == Abstract;
129
+ }
130
+
131
+ int
132
+ ndt_is_concrete(const ndt_t *t)
133
+ {
134
+ return t->access == Concrete;
135
+ }
136
+
137
+ /* Type flags */
138
+ int
139
+ ndt_is_optional(const ndt_t *t)
140
+ {
141
+ return t->flags & NDT_OPTION;
142
+ }
143
+
144
+ int
145
+ ndt_subtree_is_optional(const ndt_t *t)
146
+ {
147
+ return t->flags & NDT_SUBTREE_OPTION;
148
+ }
149
+
150
+ /* Array predicates */
151
+ int
152
+ ndt_is_ndarray(const ndt_t *t)
153
+ {
154
+ switch (t->tag) {
155
+ case FixedDim:
156
+ return 1;
157
+ default:
158
+ return t->ndim == 0;
159
+ }
160
+ }
161
+
162
+ int
163
+ ndt_is_c_contiguous(const ndt_t *t)
164
+ {
165
+ const ndt_t *dims[NDT_MAX_DIM];
166
+ const ndt_t *dtype;
167
+ int64_t shape, step;
168
+ int ndim, i;
169
+
170
+ if (ndt_is_abstract(t)) {
171
+ return 0;
172
+ }
173
+ if (!ndt_is_ndarray(t)) {
174
+ return 0;
175
+ }
176
+ if (t->ndim == 0) {
177
+ return 1;
178
+ }
179
+
180
+ ndim = ndt_dims_dtype(dims, &dtype, t);
181
+
182
+ step = 1;
183
+ for (i = ndim-1; i >= 0; i--) {
184
+ shape = dims[i]->FixedDim.shape;
185
+ if (shape > 1 && dims[i]->Concrete.FixedDim.step != step) {
186
+ return 0;
187
+ }
188
+ step *= shape;
189
+ }
190
+
191
+ return 1;
192
+ }
193
+
194
+ int
195
+ ndt_is_f_contiguous(const ndt_t *t)
196
+ {
197
+ const ndt_t *dims[NDT_MAX_DIM];
198
+ const ndt_t *dtype;
199
+ int64_t shape, step;
200
+ int ndim, i;
201
+
202
+ if (ndt_is_abstract(t)) {
203
+ return 0;
204
+ }
205
+ if (!ndt_is_ndarray(t)) {
206
+ return 0;
207
+ }
208
+ if (t->ndim == 0) {
209
+ return 1;
210
+ }
211
+
212
+ ndim = ndt_dims_dtype(dims, &dtype, t);
213
+ step = 1;
214
+ for (i = 0; i < ndim; i++) {
215
+ shape = dims[i]->FixedDim.shape;
216
+ if (shape > 1 && dims[i]->Concrete.FixedDim.step != step) {
217
+ return 0;
218
+ }
219
+ step *= shape;
220
+ }
221
+
222
+ return 1;
223
+ }
224
+
225
+ int
226
+ ndt_really_fortran(const ndt_t *t)
227
+ {
228
+ return ndt_is_f_contiguous(t) && !ndt_is_c_contiguous(t);
229
+ }
230
+
231
+ /* Scalar */
232
+ int
233
+ ndt_is_scalar(const ndt_t *t)
234
+ {
235
+ switch (t->tag) {
236
+ case Bool:
237
+ case Int8: case Int16: case Int32: case Int64:
238
+ case Uint8: case Uint16: case Uint32: case Uint64:
239
+ case Float16: case Float32: case Float64:
240
+ case Complex32: case Complex64: case Complex128:
241
+ case FixedString: case FixedBytes:
242
+ case String: case Bytes:
243
+ case Char:
244
+ return 1;
245
+ default:
246
+ return 0;
247
+ }
248
+ }
249
+
250
+ /* Primitive type predicates */
251
+ int
252
+ ndt_is_signed(const ndt_t *t)
253
+ {
254
+ switch (t->tag) {
255
+ case Int8: case Int16: case Int32: case Int64:
256
+ return 1;
257
+ default:
258
+ return 0;
259
+ }
260
+ }
261
+
262
+ int
263
+ ndt_is_unsigned(const ndt_t *t)
264
+ {
265
+ switch (t->tag) {
266
+ case Uint8: case Uint16: case Uint32: case Uint64:
267
+ return 1;
268
+ default:
269
+ return 0;
270
+ }
271
+ }
272
+
273
+ int
274
+ ndt_is_float(const ndt_t *t)
275
+ {
276
+ switch (t->tag) {
277
+ case Float16: case Float32: case Float64:
278
+ return 1;
279
+ default:
280
+ return 0;
281
+ }
282
+ }
283
+
284
+ int
285
+ ndt_is_complex(const ndt_t *t)
286
+ {
287
+ switch (t->tag) {
288
+ case Complex32: case Complex64: case Complex128:
289
+ return 1;
290
+ default:
291
+ return 0;
292
+ }
293
+ }
294
+
295
+ int
296
+ ndt_endian_is_set(const ndt_t *t)
297
+ {
298
+ return t->flags & (NDT_LITTLE_ENDIAN|NDT_BIG_ENDIAN);
299
+ }
300
+
301
+ int
302
+ ndt_is_little_endian(const ndt_t *t)
303
+ {
304
+ switch (t->tag) {
305
+ case Int8: case Int16: case Int32: case Int64:
306
+ case Uint8: case Uint16: case Uint32: case Uint64:
307
+ case Float16: case Float32: case Float64:
308
+ case Complex32: case Complex64: case Complex128:
309
+ if (t->flags & NDT_LITTLE_ENDIAN) {
310
+ return 1;
311
+ }
312
+ if (t->flags & NDT_BIG_ENDIAN) {
313
+ return 0;
314
+ }
315
+ return !NDT_SYS_BIG_ENDIAN;
316
+ default:
317
+ return 0;
318
+ }
319
+ }
320
+
321
+ int
322
+ ndt_is_big_endian(const ndt_t *t)
323
+ {
324
+ switch (t->tag) {
325
+ case Int16: case Int32: case Int64:
326
+ case Uint16: case Uint32: case Uint64:
327
+ case Float16: case Float32: case Float64:
328
+ case Complex32: case Complex64: case Complex128:
329
+ if (t->flags & NDT_BIG_ENDIAN) {
330
+ return 1;
331
+ }
332
+ if (t->flags & NDT_LITTLE_ENDIAN) {
333
+ return 0;
334
+ }
335
+ return NDT_SYS_BIG_ENDIAN;
336
+ default:
337
+ return 0;
338
+ }
339
+ }
340
+
341
+
342
+ /*****************************************************************************/
343
+ /* Alignment and packing */
344
+ /*****************************************************************************/
345
+
346
+ /* Check that 'align' is a power of two. */
347
+ static inline bool
348
+ align_ispower2(uint16_t align, ndt_context_t *ctx)
349
+ {
350
+ if (!ispower2(align)) {
351
+ ndt_err_format(ctx, NDT_ValueError,
352
+ "'align' must be a power of two, got %" PRIu16, align);
353
+ return false;
354
+ }
355
+
356
+ return true;
357
+ }
358
+
359
+ /*
360
+ * Check that at most one of 'align' and 'pack' is user-specified. If
361
+ * 'align' is specified, choose the larger value of 'align' and the
362
+ * natural alignment of the field member.
363
+ *
364
+ * If 'pack' is specified, use that value, regardless of whether it
365
+ * is smaller than the natural alignment of the field member.
366
+ *
367
+ * If type access is restricted and explicit values have been specified,
368
+ * raise an error.
369
+ *
370
+ * Return value: The extracted alignment if the type is concrete and no
371
+ * error occurred. 1 if the type is abstract and no error occurred (that
372
+ * value is unused later).
373
+ *
374
+ * Return UINT16_MAX if an error occurred.
375
+ */
376
+ static uint16_t
377
+ min_field_align(const ndt_t *t, uint16_opt_t align, uint16_opt_t pack,
378
+ ndt_context_t *ctx)
379
+ {
380
+ uint16_t min_align = 1;
381
+
382
+ if (align.tag == Some) {
383
+ if (pack.tag == Some) {
384
+ ndt_err_format(ctx, NDT_InvalidArgumentError,
385
+ "field has both 'align' and 'pack' attributes");
386
+ return UINT16_MAX;
387
+ }
388
+ if (t->access == Abstract) {
389
+ goto access_error;
390
+ }
391
+ min_align = max(align.Some, t->align);
392
+ }
393
+ else if (pack.tag == Some) {
394
+ if (t->access == Abstract) {
395
+ goto access_error;
396
+ }
397
+ min_align = pack.Some;
398
+ }
399
+ else {
400
+ if (t->access == Concrete) {
401
+ min_align = t->align;
402
+ }
403
+ }
404
+
405
+ if (!align_ispower2(min_align, ctx)) {
406
+ return UINT16_MAX;
407
+ }
408
+
409
+ return min_align;
410
+
411
+
412
+ access_error:
413
+ ndt_err_format(ctx, NDT_InvalidArgumentError,
414
+ "'align' or 'pack' attribute given for abstract type");
415
+ return UINT16_MAX;
416
+ }
417
+
418
+ /* Extract and validate the alignment value. */
419
+ static inline uint16_t
420
+ get_align(uint16_opt_t align, uint16_t default_align, ndt_context_t *ctx)
421
+ {
422
+ switch (align.tag) {
423
+ case Some:
424
+ if (!align_ispower2(align.Some, ctx)) {
425
+ return UINT16_MAX;
426
+ }
427
+ return align.Some;
428
+ default: /* None */
429
+ return default_align;
430
+ }
431
+ }
432
+
433
+
434
+ /******************************************************************************/
435
+ /* Fields */
436
+ /******************************************************************************/
437
+
438
+ /*
439
+ * align = n: minimum alignment for the field; the resulting alignment is
440
+ * guaranteed to be at least the maximum of n and the natural alignment of
441
+ * the field member.
442
+ *
443
+ * pack = n: minimum alignment for the field; the resulting alignment is
444
+ * guaranteed to be at least n.
445
+ *
446
+ * pad = n: requested padding for a field; used for checking if an explicitly
447
+ * given padding is equal to the computed padding.
448
+ *
449
+ * 'name' is NULL for a tuple field.
450
+ */
451
+ ndt_field_t *
452
+ ndt_field(char *name, ndt_t *type, uint16_opt_t align, uint16_opt_t pack,
453
+ uint16_opt_t pad, ndt_context_t *ctx)
454
+ {
455
+ ndt_field_t *field;
456
+ uint16_t min_align;
457
+
458
+ if (type == NULL) {
459
+ return NULL;
460
+ }
461
+
462
+ min_align = min_field_align(type, align, pack, ctx);
463
+ if (min_align == UINT16_MAX) {
464
+ ndt_free(name);
465
+ ndt_del(type);
466
+ return NULL;
467
+ }
468
+
469
+ /* abstract field */
470
+ field = ndt_alloc_size(sizeof *field);
471
+ if (field == NULL) {
472
+ ndt_free(name);
473
+ ndt_del(type);
474
+ return ndt_memory_error(ctx);
475
+ }
476
+ field->name = name;
477
+ field->type = type;
478
+
479
+ /* concrete access */
480
+ field->access = type->access;
481
+ if (field->access == Concrete) {
482
+ field->Concrete.align = min_align;
483
+ field->Concrete.explicit_align = (align.tag==Some || pack.tag==Some);
484
+ field->Concrete.pad = (pad.tag==Some) ? pad.Some : UINT16_MAX;
485
+ field->Concrete.explicit_pad = (pad.tag==Some);
486
+ }
487
+
488
+ return field;
489
+ }
490
+
491
+ void
492
+ ndt_field_del(ndt_field_t *field)
493
+ {
494
+ if (field) {
495
+ ndt_free(field->name);
496
+ ndt_del(field->type);
497
+ ndt_free(field);
498
+ }
499
+ }
500
+
501
+ void
502
+ ndt_field_array_del(ndt_field_t *fields, int64_t shape)
503
+ {
504
+ int64_t i;
505
+
506
+ if (fields == NULL) {
507
+ return;
508
+ }
509
+
510
+ for (i = 0; i < shape; i++) {
511
+ ndt_free(fields[i].name);
512
+ ndt_del(fields[i].type);
513
+ }
514
+
515
+ ndt_free(fields);
516
+ }
517
+
518
+ void
519
+ ndt_type_array_clear(ndt_t **types, int64_t shape)
520
+ {
521
+ int64_t i;
522
+
523
+ if (types == NULL) {
524
+ return;
525
+ }
526
+
527
+ for (i = 0; i < shape; i++) {
528
+ ndt_del(types[i]);
529
+ }
530
+ }
531
+
532
+ void
533
+ ndt_type_array_del(ndt_t **types, int64_t shape)
534
+ {
535
+ ndt_type_array_clear(types, shape);
536
+ ndt_free(types);
537
+ }
538
+
539
+
540
+ /*****************************************************************************/
541
+ /* Typedef */
542
+ /*****************************************************************************/
543
+
544
+ /*
545
+ * Add a typedef name -> type to the symbol table. Steals the 'type'
546
+ * reference.
547
+ */
548
+ int
549
+ ndt_typedef(const char *name, ndt_t *type, const ndt_methods_t *m, ndt_context_t *ctx)
550
+ {
551
+ if (ndt_typedef_add(name, type, m, ctx) < 0) {
552
+ return -1;
553
+ }
554
+
555
+ return 0;
556
+ }
557
+
558
+ int
559
+ ndt_typedef_from_string(const char *name, const char *type, const ndt_methods_t *m,
560
+ ndt_context_t *ctx)
561
+ {
562
+ ndt_t *t;
563
+
564
+ t = ndt_from_string(type, ctx);
565
+ if (t == NULL) {
566
+ return -1;
567
+ }
568
+
569
+ if (ndt_typedef_add(name, t, m, ctx) < 0) {
570
+ return -1;
571
+ }
572
+
573
+ return 0;
574
+ }
575
+
576
+
577
+ /******************************************************************************/
578
+ /* Type invariants */
579
+ /******************************************************************************/
580
+
581
+ /* Invariants for all types except for var and ellipsis dimensions. */
582
+ static int
583
+ check_type_invariants(const ndt_t *type, ndt_context_t *ctx)
584
+ {
585
+ if (type->tag == Module) {
586
+ ndt_err_format(ctx, NDT_TypeError,
587
+ "nested module types are not supported");
588
+ return 0;
589
+ }
590
+
591
+ if (type->tag == VarDim) {
592
+ ndt_err_format(ctx, NDT_TypeError,
593
+ "nested or non-uniform var dimensions are not supported");
594
+ return 0;
595
+ }
596
+
597
+ if (type->ndim >= NDT_MAX_DIM) {
598
+ ndt_err_format(ctx, NDT_TypeError, "ndim > %d", NDT_MAX_DIM);
599
+ return 0;
600
+ }
601
+
602
+ return 1;
603
+ }
604
+
605
+ /* Invariants for abstract var dimensions. */
606
+ static int
607
+ check_abstract_var_invariants(const ndt_t *type, ndt_context_t *ctx)
608
+ {
609
+ if (type->tag == Module) {
610
+ ndt_err_format(ctx, NDT_TypeError,
611
+ "nested module types are not supported");
612
+ return 0;
613
+ }
614
+
615
+ if (type->tag == FixedDim || type->tag == SymbolicDim) {
616
+ ndt_err_format(ctx, NDT_TypeError,
617
+ "mixed fixed and var dim are not supported");
618
+ return 0;
619
+ }
620
+
621
+ if (type->tag == VarDim && ndt_is_concrete(type)) {
622
+ ndt_err_format(ctx, NDT_TypeError,
623
+ "mixing abstract and concrete var dimensions is not allowed");
624
+ return 0;
625
+ }
626
+
627
+ if (type->ndim >= NDT_MAX_DIM) {
628
+ ndt_err_format(ctx, NDT_TypeError, "ndim > %d", NDT_MAX_DIM);
629
+ return 0;
630
+ }
631
+
632
+ return 1;
633
+ }
634
+
635
+ /* Invariants for concrete var dimensions. */
636
+ static int
637
+ check_var_invariants(enum ndt_offsets flag, const ndt_t *type, ndt_context_t *ctx)
638
+ {
639
+ if (type->tag == Module) {
640
+ ndt_err_format(ctx, NDT_TypeError,
641
+ "nested module types are not supported");
642
+ return 0;
643
+ }
644
+
645
+ if (type->tag == FixedDim || type->tag == SymbolicDim) {
646
+ ndt_err_format(ctx, NDT_TypeError,
647
+ "mixed fixed and var dim are not supported");
648
+ return 0;
649
+ }
650
+
651
+ if (type->tag == VarDim) {
652
+ if (ndt_is_abstract(type)) {
653
+ ndt_err_format(ctx, NDT_TypeError,
654
+ "mixing abstract and concrete var dimensions is not allowed");
655
+ return 0;
656
+ }
657
+ if (flag != type->Concrete.VarDim.flag) {
658
+ ndt_err_format(ctx, NDT_TypeError,
659
+ "mixing internal and external offsets is not allowed");
660
+ return 0;
661
+ }
662
+ }
663
+
664
+ if (type->ndim >= NDT_MAX_DIM) {
665
+ ndt_err_format(ctx, NDT_TypeError, "ndim > %d", NDT_MAX_DIM);
666
+ return 0;
667
+ }
668
+
669
+ return 1;
670
+ }
671
+
672
+ /* Invariants for ellipsis dimensions. */
673
+ static int
674
+ check_ellipsis_invariants(const ndt_t *type, ndt_context_t *ctx)
675
+ {
676
+ if (type->tag == Module) {
677
+ ndt_err_format(ctx, NDT_TypeError,
678
+ "nested module types are not supported");
679
+ return 0;
680
+ }
681
+
682
+ if (type->ndim >= NDT_MAX_DIM) {
683
+ ndt_err_format(ctx, NDT_TypeError, "ndim > %d", NDT_MAX_DIM);
684
+ return 0;
685
+ }
686
+
687
+ return 1;
688
+ }
689
+
690
+ static int
691
+ strcmp_null(const char *s, const char *t)
692
+ {
693
+ if (s == NULL) {
694
+ return t == NULL;
695
+ }
696
+ return t != NULL && strcmp(s, t) == 0;
697
+ }
698
+
699
+ static int
700
+ check_function_invariants(ndt_t * const *types, int64_t nin, int64_t nargs,
701
+ ndt_context_t *ctx)
702
+ {
703
+ int64_t count = 0;
704
+ int i;
705
+
706
+ if (nargs == 0) {
707
+ return 1;
708
+ }
709
+
710
+ if (nin == 0) {
711
+ for (i = 0; i < nargs; i++) {
712
+ if (ndt_is_abstract(types[i])) {
713
+ ndt_err_format(ctx, NDT_ValueError,
714
+ "output types cannot be inferred for function with "
715
+ "no arguments");
716
+ return 0;
717
+ }
718
+ }
719
+ }
720
+
721
+ for (i = 0; i < nargs; i++) {
722
+ if (types[i]->tag == EllipsisDim) {
723
+ count++;
724
+ }
725
+ }
726
+
727
+ if (count == 0) {
728
+ return 1;
729
+ }
730
+ if (count != nargs) {
731
+ goto error;
732
+ }
733
+
734
+ for (i = 1; i < nargs; i++) {
735
+ if (!strcmp_null(types[0]->EllipsisDim.name, types[i]->EllipsisDim.name)) {
736
+ goto error;
737
+ }
738
+ }
739
+
740
+ return 1;
741
+
742
+ error:
743
+ ndt_err_format(ctx, NDT_ValueError,
744
+ "invalid combination of ellipsis dimensions");
745
+ return 0;
746
+ }
747
+
748
+ static bool
749
+ is_elemwise(ndt_t * const *types, int64_t nargs)
750
+ {
751
+ for (int64_t i = 0; i < nargs; i++) {
752
+ if ((types[i]->ndim == 1 && types[i]->tag == EllipsisDim) ||
753
+ types[i]->ndim == 0) {
754
+ continue;
755
+ }
756
+ return false;
757
+ }
758
+
759
+ return true;
760
+ }
761
+
762
+
763
+ /******************************************************************************/
764
+ /* Type allocation/deallocation */
765
+ /******************************************************************************/
766
+
767
+ ndt_t *
768
+ ndt_new(enum ndt tag, ndt_context_t *ctx)
769
+ {
770
+ ndt_t *t;
771
+
772
+ t = ndt_alloc_size(sizeof *t);
773
+ if (t == NULL) {
774
+ return ndt_memory_error(ctx);
775
+ }
776
+
777
+ t->tag = tag;
778
+ t->access = Abstract;
779
+ t->flags = 0;
780
+ t->ndim = 0;
781
+
782
+ t->datasize = 0;
783
+ t->align = UINT16_MAX;
784
+
785
+ return t;
786
+ }
787
+
788
+ static ndt_t *
789
+ ndt_new_extra(enum ndt tag, int64_t n, ndt_context_t *ctx)
790
+ {
791
+ bool overflow = 0;
792
+ ndt_t *t;
793
+ int64_t size;
794
+
795
+ size = ADDi64(offsetof(ndt_t, extra), n, &overflow);
796
+ if (overflow) {
797
+ ndt_err_format(ctx, NDT_ValueError, "type too large");
798
+ return NULL;
799
+ }
800
+
801
+ t = ndt_alloc(1, size);
802
+ if (t == NULL) {
803
+ return ndt_memory_error(ctx);
804
+ }
805
+
806
+ t->tag = tag;
807
+ t->access = Abstract;
808
+ t->flags = 0;
809
+ t->ndim = 0;
810
+
811
+ t->datasize = 0;
812
+ t->align = UINT16_MAX;
813
+
814
+ return t;
815
+ }
816
+
817
+ ndt_t *
818
+ ndt_function_new(int64_t nargs, ndt_context_t *ctx)
819
+ {
820
+ ndt_t *t = NULL;
821
+ bool overflow = 0;
822
+ int64_t extra, i;
823
+
824
+ extra = MULi64(nargs, sizeof(ndt_t *), &overflow);
825
+
826
+ if (overflow) {
827
+ ndt_err_format(ctx, NDT_ValueError, "function size too large");
828
+ return NULL;
829
+ }
830
+
831
+ t = ndt_new_extra(Function, extra, ctx);
832
+ if (t == NULL) {
833
+ return NULL;
834
+ }
835
+ t->Function.nargs = nargs;
836
+ t->Function.types = (ndt_t **)t->extra;
837
+
838
+ for (i = 0; i < nargs; i++) {
839
+ t->Function.types[i] = NULL;
840
+ }
841
+
842
+ return t;
843
+ }
844
+
845
+ ndt_t *
846
+ ndt_tuple_new(enum ndt_variadic flag, int64_t shape, ndt_context_t *ctx)
847
+ {
848
+ ndt_t *t = NULL;
849
+ bool overflow = 0;
850
+ int64_t offset_offset;
851
+ int64_t align_offset;
852
+ int64_t pad_offset;
853
+ int64_t extra;
854
+ int64_t size;
855
+ int64_t i;
856
+
857
+ size = MULi64(shape, sizeof(ndt_t *), &overflow);
858
+ offset_offset = round_up(size, alignof(int64_t), &overflow);
859
+
860
+ size = MULi64(shape, sizeof(int64_t), &overflow);
861
+ align_offset = ADDi64(offset_offset, size, &overflow);
862
+
863
+ size = MULi64(shape, sizeof(uint16_t), &overflow);
864
+ pad_offset = ADDi64(align_offset, size, &overflow);
865
+
866
+ extra = ADDi64(pad_offset, size, &overflow);
867
+
868
+ if (overflow) {
869
+ ndt_err_format(ctx, NDT_ValueError, "tuple size too large");
870
+ return NULL;
871
+ }
872
+
873
+ t = ndt_new_extra(Tuple, extra, ctx);
874
+ if (t == NULL) {
875
+ return NULL;
876
+ }
877
+
878
+ t->Tuple.flag = flag;
879
+ t->Tuple.shape = shape;
880
+ t->Tuple.types = (ndt_t **)t->extra;
881
+ t->Concrete.Tuple.offset = (int64_t *)(t->extra + offset_offset);
882
+ t->Concrete.Tuple.align = (uint16_t *)(t->extra + align_offset);
883
+ t->Concrete.Tuple.pad = (uint16_t *)(t->extra + pad_offset);
884
+
885
+ for (i = 0; i < shape; i++) {
886
+ t->Tuple.types[i] = NULL;
887
+ t->Concrete.Tuple.offset[i] = 0;
888
+ t->Concrete.Tuple.align[i] = 1;
889
+ t->Concrete.Tuple.pad[i] = 0;
890
+ }
891
+
892
+ return t;
893
+ }
894
+
895
+ ndt_t *
896
+ ndt_record_new(enum ndt_variadic flag, int64_t shape, ndt_context_t *ctx)
897
+ {
898
+ ndt_t *t = NULL;
899
+ bool overflow = 0;
900
+ int64_t types_offset;
901
+ int64_t offset_offset;
902
+ int64_t align_offset;
903
+ int64_t pad_offset;
904
+ int64_t extra;
905
+ int64_t size;
906
+ int64_t i;
907
+
908
+ size = types_offset = MULi64(shape, sizeof(char *), &overflow);
909
+
910
+ offset_offset = ADDi64(types_offset, size, &overflow);
911
+ offset_offset = round_up(offset_offset, alignof(int64_t), &overflow);
912
+
913
+ size = MULi64(shape, sizeof(int64_t), &overflow);
914
+ align_offset = ADDi64(offset_offset, size, &overflow);
915
+
916
+ size = MULi64(shape, sizeof(uint16_t), &overflow);
917
+ pad_offset = ADDi64(align_offset, size, &overflow);
918
+
919
+ extra = ADDi64(pad_offset, size, &overflow);
920
+
921
+ if (overflow) {
922
+ ndt_err_format(ctx, NDT_ValueError, "record size too large");
923
+ return NULL;
924
+ }
925
+
926
+ t = ndt_new_extra(Record, extra, ctx);
927
+ if (t == NULL) {
928
+ return NULL;
929
+ }
930
+
931
+ t->Record.flag = flag;
932
+ t->Record.shape = shape;
933
+ t->Record.names = (char **)t->extra;
934
+ t->Record.types = (ndt_t **)(t->extra + types_offset);
935
+ t->Concrete.Record.offset = (int64_t *)(t->extra + offset_offset);
936
+ t->Concrete.Record.align = (uint16_t *)(t->extra + align_offset);
937
+ t->Concrete.Record.pad = (uint16_t *)(t->extra + pad_offset);
938
+
939
+ for (i = 0; i < shape; i++) {
940
+ t->Record.names[i] = NULL;
941
+ t->Record.types[i] = NULL;
942
+ t->Concrete.Record.offset[i] = 0;
943
+ t->Concrete.Record.align[i] = 1;
944
+ t->Concrete.Record.pad[i] = 0;
945
+ }
946
+
947
+ return t;
948
+ }
949
+
950
+ void
951
+ ndt_del(ndt_t *t)
952
+ {
953
+ if (t == NULL) {
954
+ return;
955
+ }
956
+
957
+ switch (t->tag) {
958
+ case Module: {
959
+ ndt_free(t->Module.name);
960
+ ndt_del(t->Module.type);
961
+ goto free_type;
962
+ }
963
+
964
+ case Function: {
965
+ int64_t i;
966
+ for (i = 0; i < t->Function.nargs; i++) {
967
+ ndt_del(t->Function.types[i]);
968
+ }
969
+ goto free_type;
970
+ }
971
+
972
+ case FixedDim: {
973
+ ndt_del(t->FixedDim.type);
974
+ goto free_type;
975
+ }
976
+
977
+ case VarDim: {
978
+ ndt_del(t->VarDim.type);
979
+ if (ndt_is_concrete(t)) {
980
+ if (t->Concrete.VarDim.flag == InternalOffsets) {
981
+ ndt_free((int32_t *)t->Concrete.VarDim.offsets);
982
+ }
983
+ ndt_free(t->Concrete.VarDim.slices);
984
+ }
985
+ goto free_type;
986
+ }
987
+
988
+ case SymbolicDim: {
989
+ ndt_free(t->SymbolicDim.name);
990
+ ndt_del(t->SymbolicDim.type);
991
+ goto free_type;
992
+ }
993
+
994
+ case EllipsisDim: {
995
+ ndt_free(t->EllipsisDim.name);
996
+ ndt_del(t->EllipsisDim.type);
997
+ goto free_type;
998
+ }
999
+
1000
+ case Tuple: {
1001
+ int64_t i;
1002
+ for (i = 0; i < t->Tuple.shape; i++) {
1003
+ ndt_del(t->Tuple.types[i]);
1004
+ }
1005
+ goto free_type;
1006
+ }
1007
+
1008
+ case Record: {
1009
+ int64_t i;
1010
+ for (i = 0; i < t->Record.shape; i++) {
1011
+ ndt_free(t->Record.names[i]);
1012
+ ndt_del(t->Record.types[i]);
1013
+ }
1014
+ goto free_type;
1015
+ }
1016
+
1017
+ case Ref: {
1018
+ ndt_del(t->Ref.type);
1019
+ goto free_type;
1020
+ }
1021
+
1022
+ case Constr: {
1023
+ ndt_free(t->Constr.name);
1024
+ ndt_del(t->Constr.type);
1025
+ goto free_type;
1026
+ }
1027
+
1028
+ case Nominal: {
1029
+ ndt_free(t->Nominal.name);
1030
+ ndt_del(t->Nominal.type);
1031
+ goto free_type;
1032
+ }
1033
+
1034
+ case Categorical: {
1035
+ ndt_value_array_del(t->Categorical.types, t->Categorical.ntypes);
1036
+ goto free_type;
1037
+ }
1038
+
1039
+ case Typevar: {
1040
+ ndt_free(t->Typevar.name);
1041
+ goto free_type;
1042
+ }
1043
+
1044
+ case AnyKind: case ScalarKind:
1045
+ case FixedStringKind: case FixedString:
1046
+ case FixedBytesKind: case FixedBytes:
1047
+ case String: case Bytes: case Char:
1048
+ case Bool:
1049
+ case SignedKind:
1050
+ case Int8: case Int16: case Int32: case Int64:
1051
+ case UnsignedKind:
1052
+ case Uint8: case Uint16: case Uint32: case Uint64:
1053
+ case FloatKind:
1054
+ case Float16: case Float32: case Float64:
1055
+ case ComplexKind:
1056
+ case Complex32: case Complex64: case Complex128:
1057
+ goto free_type;
1058
+ }
1059
+
1060
+ /* NOT REACHED: tags should be exhaustive. */
1061
+ ndt_internal_error("invalid tag");
1062
+
1063
+
1064
+ free_type:
1065
+ ndt_free(t);
1066
+ }
1067
+
1068
+
1069
+ /******************************************************************************/
1070
+ /* Type functions */
1071
+ /******************************************************************************/
1072
+
1073
+ /* Set the option flag of a type */
1074
+ ndt_t *
1075
+ ndt_option(ndt_t *t)
1076
+ {
1077
+ t->flags |= NDT_OPTION;
1078
+ return t;
1079
+ }
1080
+
1081
+ /* Abstract namespace type */
1082
+ ndt_t *
1083
+ ndt_module(char *name, ndt_t *type, ndt_context_t *ctx)
1084
+ {
1085
+ ndt_t *t;
1086
+
1087
+ t = ndt_new(Module, ctx);
1088
+ if (t == NULL) {
1089
+ ndt_free(name);
1090
+ ndt_del(type);
1091
+ return NULL;
1092
+ }
1093
+
1094
+ /* abstract type */
1095
+ t->Module.name = name;
1096
+ t->Module.type = type;
1097
+ t->flags = ndt_subtree_flags(type);
1098
+
1099
+ return t;
1100
+ }
1101
+
1102
+ /* Abstract function signatures */
1103
+ ndt_t *
1104
+ ndt_function(ndt_t * const *types, int64_t nargs, int64_t nin, int64_t nout,
1105
+ ndt_context_t *ctx)
1106
+ {
1107
+ ndt_t *t;
1108
+ int64_t i;
1109
+
1110
+ assert(0 <= nin && 0 <= nout && nargs == nin+nout);
1111
+
1112
+ if (!check_function_invariants(types, nin, nargs, ctx)) {
1113
+ ndt_type_array_clear((ndt_t **)types, nargs);
1114
+ return NULL;
1115
+ }
1116
+
1117
+ /* abstract type */
1118
+ t = ndt_function_new(nargs, ctx);
1119
+ if (t == NULL) {
1120
+ for (i = 0; i < nargs; i++) {
1121
+ ndt_del(types[i]);
1122
+ }
1123
+ return NULL;
1124
+ }
1125
+ t->Function.elemwise = is_elemwise(types, nargs);
1126
+ t->Function.nin = nin;
1127
+ t->Function.nout = nout;
1128
+
1129
+ for (i = 0; i < nargs; i++) {
1130
+ t->Function.types[i] = types[i];
1131
+ t->flags |= ndt_dim_flags(types[i]);
1132
+ }
1133
+
1134
+ return t;
1135
+ }
1136
+
1137
+ ndt_t *
1138
+ ndt_any_kind(ndt_context_t *ctx)
1139
+ {
1140
+ return ndt_new(AnyKind, ctx);
1141
+ }
1142
+
1143
+
1144
+ /******************************************************************************/
1145
+ /* Dimension types */
1146
+ /******************************************************************************/
1147
+
1148
+ /*
1149
+ * Return the step in the fixed dimension that contains 'type'. 'type'
1150
+ * is assumed to be either a dtype with ndim==0 or a FixedDim.
1151
+ */
1152
+ static inline int64_t
1153
+ fixed_step(ndt_t *type, int64_t step, bool *overflow)
1154
+ {
1155
+ assert(ndt_is_concrete(type));
1156
+ assert(type->tag != VarDim);
1157
+
1158
+ if (step != INT64_MAX) {
1159
+ return step;
1160
+ }
1161
+
1162
+ switch (type->tag) {
1163
+ case FixedDim:
1164
+ if (type->Concrete.FixedDim.itemsize == 0) {
1165
+ return MULi64(type->FixedDim.shape, type->Concrete.FixedDim.step,
1166
+ overflow);
1167
+ }
1168
+ else {
1169
+ return DIVi64(type->datasize, type->Concrete.FixedDim.itemsize,
1170
+ overflow);
1171
+ }
1172
+ default:
1173
+ return 1;
1174
+ }
1175
+ }
1176
+
1177
+ static inline int64_t
1178
+ fixed_datasize(ndt_t *type, int64_t shape, int64_t step, int64_t itemsize,
1179
+ bool *overflow)
1180
+ {
1181
+ int64_t index_range;
1182
+ int64_t datasize;
1183
+ int64_t abs_step;
1184
+
1185
+ if (shape == 0 || type->datasize == 0) {
1186
+ return 0;
1187
+ }
1188
+
1189
+ abs_step = ABSi64(step, overflow);
1190
+ index_range = MULi64(shape-1, abs_step, overflow);
1191
+ datasize = MULi64(index_range, itemsize, overflow);
1192
+ return ADDi64(datasize, type->datasize, overflow);
1193
+ }
1194
+
1195
+ static ndt_t *
1196
+ _ndt_to_fortran(const ndt_t *t, int64_t step, ndt_context_t *ctx)
1197
+ {
1198
+ bool overflow = 0;
1199
+ ndt_t *dt;
1200
+ int64_t next_step;
1201
+
1202
+ if (t->ndim == 0) {
1203
+ return ndt_copy(t, ctx);
1204
+ }
1205
+
1206
+ next_step = MULi64(step, t->FixedDim.shape, &overflow);
1207
+ if (overflow) {
1208
+ ndt_err_format(ctx, NDT_ValueError,
1209
+ "overflow in converting to Fortran order");
1210
+ return NULL;
1211
+ }
1212
+
1213
+ assert(t->tag == FixedDim);
1214
+ dt = _ndt_to_fortran(t->FixedDim.type, next_step, ctx);
1215
+ if (dt == NULL) {
1216
+ return NULL;
1217
+ }
1218
+
1219
+ return ndt_fixed_dim(dt, t->FixedDim.shape, step, ctx);
1220
+ }
1221
+
1222
+ /* Return a copy of a C-contiguous array in Fortran order. */
1223
+ ndt_t *
1224
+ ndt_to_fortran(const ndt_t *t, ndt_context_t *ctx)
1225
+ {
1226
+ if (ndt_is_abstract(t)) {
1227
+ ndt_err_format(ctx, NDT_TypeError,
1228
+ "cannot convert abstract type to Fortran order");
1229
+ return NULL;
1230
+ }
1231
+
1232
+ if (!ndt_is_c_contiguous(t)) {
1233
+ ndt_err_format(ctx, NDT_TypeError,
1234
+ "array must be C-contiguous for conversion to Fortran order");
1235
+ return NULL;
1236
+ }
1237
+
1238
+ return _ndt_to_fortran(t, 1, ctx);
1239
+ }
1240
+
1241
+ ndt_t *
1242
+ ndt_fixed_dim(ndt_t *type, int64_t shape, int64_t step, ndt_context_t *ctx)
1243
+ {
1244
+ ndt_t *t;
1245
+ bool overflow = 0;
1246
+
1247
+ if (!check_type_invariants(type, ctx)) {
1248
+ ndt_del(type);
1249
+ return NULL;
1250
+ }
1251
+
1252
+ if (shape < 0) {
1253
+ ndt_err_format(ctx, NDT_ValueError, "shape must be a natural number");
1254
+ return NULL;
1255
+ }
1256
+
1257
+ /* abstract type */
1258
+ t = ndt_new(FixedDim, ctx);
1259
+ if (t == NULL) {
1260
+ ndt_del(type);
1261
+ return NULL;
1262
+ }
1263
+ t->FixedDim.tag = RequireNA;
1264
+ t->FixedDim.shape = shape;
1265
+ t->FixedDim.type = type;
1266
+ t->ndim = type->ndim + 1;
1267
+ t->flags = ndt_dim_flags(type);
1268
+
1269
+ t->Concrete.FixedDim.itemsize = 0;
1270
+ t->Concrete.FixedDim.step = INT64_MAX;
1271
+
1272
+ /* concrete access */
1273
+ t->access = type->access;
1274
+ if (t->access == Concrete) {
1275
+ int64_t itemsize = ndt_itemsize(type);
1276
+ step = fixed_step(type, step, &overflow);
1277
+
1278
+ t->Concrete.FixedDim.itemsize = itemsize;
1279
+ t->Concrete.FixedDim.step = step;
1280
+ t->datasize = fixed_datasize(type, shape, step, itemsize, &overflow);
1281
+ t->align = type->align;
1282
+ }
1283
+
1284
+ if (overflow) {
1285
+ ndt_err_format(ctx, NDT_ValueError, "data size too large");
1286
+ ndt_del(t);
1287
+ return NULL;
1288
+ }
1289
+
1290
+ return t;
1291
+ }
1292
+
1293
+ ndt_t *
1294
+ ndt_fixed_dim_tag(ndt_t *type, enum ndt_contig tag, int64_t shape, int64_t step,
1295
+ ndt_context_t *ctx)
1296
+ {
1297
+ ndt_t *t = ndt_fixed_dim(type, shape, step, ctx);
1298
+ if (t == NULL) {
1299
+ return NULL;
1300
+ }
1301
+ t->FixedDim.tag = tag;
1302
+
1303
+ if (tag != RequireNA) {
1304
+ t->access = Abstract;
1305
+ }
1306
+
1307
+ return t;
1308
+ }
1309
+
1310
+ ndt_t *
1311
+ ndt_abstract_var_dim(ndt_t *type, ndt_context_t *ctx)
1312
+ {
1313
+ ndt_t *t = NULL;
1314
+
1315
+ if (!check_abstract_var_invariants(type, ctx)) {
1316
+ ndt_del(type);
1317
+ return NULL;
1318
+ }
1319
+
1320
+ /* abstract type */
1321
+ t = ndt_new(VarDim, ctx);
1322
+ if (t == NULL) {
1323
+ ndt_del(type);
1324
+ return NULL;
1325
+ }
1326
+ t->VarDim.type = type;
1327
+ t->ndim = type->ndim+1;
1328
+ t->flags = ndt_dim_flags(type);
1329
+
1330
+ /* concrete access */
1331
+ t->access = Abstract;
1332
+ t->Concrete.VarDim.flag = ExternalOffsets;
1333
+ t->Concrete.VarDim.itemsize = 0;
1334
+ t->Concrete.VarDim.noffsets = 0;
1335
+ t->Concrete.VarDim.offsets = NULL;
1336
+ t->Concrete.VarDim.nslices = 0;
1337
+ t->Concrete.VarDim.slices = NULL;
1338
+
1339
+ return t;
1340
+ }
1341
+
1342
+ /*
1343
+ * Compute the current start index, step and shape of a var dimension.
1344
+ * Recomputing the values avoids a potentially very large shape array
1345
+ * per dimension (same size as the offset array).
1346
+ */
1347
+ int64_t
1348
+ ndt_var_indices(int64_t *res_start, int64_t *res_step, const ndt_t *t,
1349
+ int64_t index, ndt_context_t *ctx)
1350
+ {
1351
+ int64_t list_start, list_stop, list_shape;
1352
+ int64_t start, stop, step;
1353
+ int64_t res_shape;
1354
+ const ndt_slice_t *slices;
1355
+ int32_t i;
1356
+
1357
+ assert(ndt_is_concrete(t));
1358
+ assert(t->tag == VarDim);
1359
+
1360
+ if (index < 0 || index+1 >= t->Concrete.VarDim.noffsets) {
1361
+ ndt_err_format(ctx, NDT_ValueError,
1362
+ "var dim index out of range: index=%" PRIi64 ", noffsets=%" PRIi32,
1363
+ index, t->Concrete.VarDim.noffsets);
1364
+ return -1;
1365
+ }
1366
+
1367
+ list_start = t->Concrete.VarDim.offsets[index];
1368
+ list_stop = t->Concrete.VarDim.offsets[index+1];
1369
+ list_shape = list_stop - list_start;
1370
+
1371
+ *res_start = 0;
1372
+ *res_step = 1;
1373
+ res_shape = list_shape;
1374
+ slices = t->Concrete.VarDim.slices;
1375
+
1376
+ for (i = 0; i < t->Concrete.VarDim.nslices; i++) {
1377
+ start = slices[i].start;
1378
+ stop = slices[i].stop;
1379
+ step = slices[i].step;
1380
+ res_shape = ndt_slice_adjust_indices(res_shape, &start, &stop, step);
1381
+ *res_start += (start * *res_step);
1382
+ *res_step *= step;
1383
+ }
1384
+
1385
+ *res_start += list_start;
1386
+
1387
+ return res_shape;
1388
+ }
1389
+
1390
+ ndt_slice_t *
1391
+ ndt_var_add_slice(int32_t *nslices, const ndt_t *t,
1392
+ int64_t start, int64_t stop, int64_t step,
1393
+ ndt_context_t *ctx)
1394
+ {
1395
+ int n = t->Concrete.VarDim.nslices;
1396
+ ndt_slice_t *slices;
1397
+
1398
+ assert(ndt_is_concrete(t));
1399
+ assert(t->tag == VarDim);
1400
+
1401
+ if (n == INT_MAX) {
1402
+ ndt_err_format(ctx, NDT_RuntimeError, "slice stack overflow");
1403
+ return NULL;
1404
+ }
1405
+
1406
+ slices = ndt_alloc(n+1, sizeof *slices);
1407
+ if (slices == NULL) {
1408
+ return ndt_memory_error(ctx);
1409
+ }
1410
+ memcpy(slices, t->Concrete.VarDim.slices, n * (sizeof *slices));
1411
+
1412
+ slices[n].start = start;
1413
+ slices[n].stop = stop;
1414
+ slices[n].step = step;
1415
+
1416
+ *nslices = n+1;
1417
+
1418
+ return slices;
1419
+ }
1420
+
1421
+ ndt_t *
1422
+ ndt_var_dim(ndt_t *type,
1423
+ enum ndt_offsets flag,
1424
+ int32_t noffsets, const int32_t *offsets,
1425
+ int32_t nslices, ndt_slice_t *slices,
1426
+ ndt_context_t *ctx)
1427
+ {
1428
+ bool overflow = 0;
1429
+ ndt_t *t;
1430
+ int64_t itemsize, datasize;
1431
+
1432
+ assert(offsets != NULL);
1433
+ assert(!!nslices == !!slices);
1434
+
1435
+ if (!check_var_invariants(flag, type, ctx)) {
1436
+ ndt_del(type);
1437
+ goto error;
1438
+ }
1439
+
1440
+ if (noffsets < 2) {
1441
+ ndt_err_format(ctx, NDT_InvalidArgumentError, "var_dim: noffsets < 2");
1442
+ ndt_del(type);
1443
+ goto error;
1444
+ }
1445
+
1446
+ if (!ndt_is_concrete(type)) {
1447
+ ndt_err_format(ctx, NDT_InvalidArgumentError,
1448
+ "var_dim: expected concrete type");
1449
+ ndt_del(type);
1450
+ goto error;
1451
+ }
1452
+
1453
+ switch (type->tag) {
1454
+ case VarDim:
1455
+ if (offsets[noffsets-1] != type->Concrete.VarDim.noffsets-1) {
1456
+ ndt_err_format(ctx, NDT_ValueError,
1457
+ "var_dim: missing or invalid number of offset arguments");
1458
+ ndt_del(type);
1459
+ goto error;
1460
+ }
1461
+ datasize = type->datasize;
1462
+ itemsize = type->Concrete.VarDim.itemsize;
1463
+ break;
1464
+ default:
1465
+ datasize = MULi64(offsets[noffsets-1], type->datasize, &overflow);
1466
+ itemsize = type->datasize;
1467
+ break;
1468
+ }
1469
+
1470
+ if (overflow) {
1471
+ ndt_err_format(ctx, NDT_ValueError,
1472
+ "overflow in creating var dimension");
1473
+ ndt_del(type);
1474
+ goto error;
1475
+ }
1476
+
1477
+ /* abstract type */
1478
+ t = ndt_new(VarDim, ctx);
1479
+ if (t == NULL) {
1480
+ ndt_del(type);
1481
+ goto error;
1482
+ }
1483
+ t->VarDim.type = type;
1484
+ t->ndim = type->ndim+1;
1485
+ t->flags = ndt_dim_flags(type);
1486
+
1487
+ /* concrete access */
1488
+ t->access = Concrete;
1489
+ t->datasize = datasize;
1490
+ t->align = type->align;
1491
+ t->Concrete.VarDim.flag = flag;
1492
+ t->Concrete.VarDim.itemsize = itemsize;
1493
+ t->Concrete.VarDim.noffsets = noffsets;
1494
+ t->Concrete.VarDim.offsets = offsets;
1495
+ t->Concrete.VarDim.nslices = nslices;
1496
+ t->Concrete.VarDim.slices = slices;
1497
+
1498
+ return t;
1499
+
1500
+
1501
+ error:
1502
+ if (flag == InternalOffsets) {
1503
+ ndt_free((int32_t *)offsets);
1504
+ }
1505
+ ndt_free(slices);
1506
+ return NULL;
1507
+ }
1508
+
1509
+ ndt_t *
1510
+ ndt_symbolic_dim(char *name, ndt_t *type, ndt_context_t *ctx)
1511
+ {
1512
+ ndt_t *t;
1513
+
1514
+ if (!check_type_invariants(type, ctx)) {
1515
+ ndt_free(name);
1516
+ ndt_del(type);
1517
+ return NULL;
1518
+ }
1519
+
1520
+ /* abstract type */
1521
+ t = ndt_new(SymbolicDim, ctx);
1522
+ if (t == NULL) {
1523
+ ndt_free(name);
1524
+ ndt_del(type);
1525
+ return NULL;
1526
+ }
1527
+ t->SymbolicDim.tag = RequireNA;
1528
+ t->SymbolicDim.name = name;
1529
+ t->SymbolicDim.type = type;
1530
+ t->ndim = type->ndim + 1;
1531
+ t->flags = ndt_dim_flags(type);
1532
+
1533
+ return t;
1534
+ }
1535
+
1536
+ ndt_t *
1537
+ ndt_symbolic_dim_tag(char *name, ndt_t *type, enum ndt_contig tag, ndt_context_t *ctx)
1538
+ {
1539
+ ndt_t *t = ndt_symbolic_dim(name, type, ctx);
1540
+ if (t == NULL) {
1541
+ return NULL;
1542
+ }
1543
+ t->SymbolicDim.tag = tag;
1544
+
1545
+ return t;
1546
+ }
1547
+
1548
+ ndt_t *
1549
+ ndt_ellipsis_dim(char *name, ndt_t *type, ndt_context_t *ctx)
1550
+ {
1551
+ ndt_t *t;
1552
+ uint32_t flags;
1553
+
1554
+ if (!check_ellipsis_invariants(type, ctx)) {
1555
+ ndt_free(name);
1556
+ ndt_del(type);
1557
+ return NULL;
1558
+ }
1559
+
1560
+ flags = ndt_dim_flags(type);
1561
+ if (flags & NDT_ELLIPSIS) {
1562
+ ndt_err_format(ctx, NDT_ValueError, "more than one ellipsis");
1563
+ ndt_free(name);
1564
+ ndt_del(type);
1565
+ return NULL;
1566
+ }
1567
+
1568
+ /* abstract type */
1569
+ t = ndt_new(EllipsisDim, ctx);
1570
+ if (t == NULL) {
1571
+ ndt_free(name);
1572
+ ndt_del(type);
1573
+ return NULL;
1574
+ }
1575
+ t->EllipsisDim.tag = RequireNA;
1576
+ t->EllipsisDim.name = name;
1577
+ t->EllipsisDim.type = type;
1578
+ t->flags = flags | NDT_ELLIPSIS;
1579
+ t->ndim = type->ndim + 1;
1580
+
1581
+ return t;
1582
+ }
1583
+
1584
+ ndt_t *
1585
+ ndt_ellipsis_dim_tag(char *name, ndt_t *type, enum ndt_contig tag, ndt_context_t *ctx)
1586
+ {
1587
+ ndt_t *t = ndt_ellipsis_dim(name, type, ctx);
1588
+ if (t == NULL) {
1589
+ return NULL;
1590
+ }
1591
+ t->EllipsisDim.tag = tag;
1592
+
1593
+ return t;
1594
+ }
1595
+
1596
+
1597
+ /******************************************************************************/
1598
+ /* Container types */
1599
+ /******************************************************************************/
1600
+
1601
+ /*
1602
+ * Initialize the access information of a concrete tuple or record.
1603
+ * Assumptions:
1604
+ * 1) t->tag == Tuple || t->tag == Record
1605
+ * 2) t->access == Concrete
1606
+ * 3) 0 <= i < shape ==> fields[i].access == Concrete
1607
+ * 4) len(fields) == len(offsets) == len(align) == len(pad) == shape
1608
+ */
1609
+ static int
1610
+ init_concrete_fields(ndt_t *t, int64_t *offsets, uint16_t *align, uint16_t *pad,
1611
+ const ndt_field_t *fields, int64_t shape,
1612
+ uint16_opt_t align_attr, uint16_opt_t pack,
1613
+ ndt_context_t *ctx)
1614
+ {
1615
+ bool overflow = 0;
1616
+ int64_t offset = 0;
1617
+ int64_t size = 0;
1618
+ uint16_t maxalign;
1619
+ int64_t i;
1620
+
1621
+ maxalign = get_align(align_attr, 1, ctx);
1622
+ if (maxalign == UINT16_MAX) {
1623
+ return -1;
1624
+ }
1625
+
1626
+ if (get_align(pack, 1, ctx) == UINT16_MAX) {
1627
+ return -1;
1628
+ }
1629
+
1630
+ for (i = 0; i < shape; i++) {
1631
+ assert(fields[i].access == Concrete);
1632
+ assert(fields[i].type->access == Concrete);
1633
+
1634
+ if (pack.tag == Some) {
1635
+ if (fields[i].Concrete.explicit_align) {
1636
+ ndt_err_format(ctx, NDT_InvalidArgumentError,
1637
+ "cannot have 'pack' tuple attribute and field attributes");
1638
+ return -1;
1639
+ }
1640
+ align[i] = pack.Some;
1641
+ }
1642
+ else {
1643
+ align[i] = fields[i].Concrete.align;
1644
+ }
1645
+
1646
+ maxalign = max(align[i], maxalign);
1647
+
1648
+ if (i > 0) {
1649
+ int64_t n = offset;
1650
+ offset = round_up(offset, align[i], &overflow);
1651
+ pad[i-1] = (uint16_t)(offset - n);
1652
+ }
1653
+
1654
+ offsets[i] = offset;
1655
+ offset = ADDi64(offset, fields[i].type->datasize, &overflow);
1656
+ }
1657
+
1658
+ size = round_up(offset, maxalign, &overflow);
1659
+
1660
+ if (shape > 0) {
1661
+ int64_t n = (size - offsets[shape-1]) - fields[shape-1].type->datasize;
1662
+ pad[shape-1] = (uint16_t)n;
1663
+ }
1664
+
1665
+ assert(t->access == Concrete);
1666
+ t->align = maxalign;
1667
+ t->datasize = size;
1668
+
1669
+ for (i = 0; i < shape; i++) {
1670
+ if (fields[i].Concrete.explicit_pad) {
1671
+ if (fields[i].Concrete.pad != pad[i]) {
1672
+ ndt_err_format(ctx, NDT_ValueError,
1673
+ "field %" PRIi64 " has invalid padding, natural padding is %" PRIi16
1674
+ ", got %" PRIi16 "\n",
1675
+ i, pad[i], fields[i].Concrete.pad);
1676
+ return -1;
1677
+ }
1678
+ }
1679
+ }
1680
+
1681
+ if (overflow) {
1682
+ ndt_err_format(ctx, NDT_ValueError, "tuple or record too large");
1683
+ return -1;
1684
+ }
1685
+
1686
+ return 0;
1687
+ }
1688
+
1689
+ ndt_t *
1690
+ ndt_tuple(enum ndt_variadic flag, ndt_field_t *fields, int64_t shape,
1691
+ uint16_opt_t align, uint16_opt_t pack, ndt_context_t *ctx)
1692
+ {
1693
+ ndt_t *t;
1694
+ int64_t i;
1695
+
1696
+ assert((fields == NULL) == (shape == 0));
1697
+
1698
+ for (i = 0; i < shape; i++) {
1699
+ if (!check_type_invariants(fields[i].type, ctx)) {
1700
+ ndt_field_array_del(fields, shape);
1701
+ return NULL;
1702
+ }
1703
+ }
1704
+
1705
+ /* abstract type */
1706
+ t = ndt_tuple_new(flag, shape, ctx);
1707
+ if (t == NULL) {
1708
+ ndt_field_array_del(fields, shape);
1709
+ return NULL;
1710
+ }
1711
+
1712
+ /* check concrete access */
1713
+ t->access = (flag == Variadic) ? Abstract : Concrete;
1714
+ for (i = 0; i < shape; i++) {
1715
+ if (fields[i].access == Abstract) {
1716
+ t->access = Abstract;
1717
+ }
1718
+ }
1719
+
1720
+ if (t->access == Abstract) {
1721
+ /* check if any field has explicit 'align' or 'pack' attributes */
1722
+ for (i = 0; i < shape; i++) {
1723
+ if (fields[i].access == Concrete &&
1724
+ fields[i].Concrete.explicit_align) {
1725
+ ndt_err_format(ctx, NDT_InvalidArgumentError,
1726
+ "explicit field alignment in abstract tuple");
1727
+ ndt_field_array_del(fields, shape);
1728
+ ndt_free(t);
1729
+ return NULL;
1730
+ }
1731
+ }
1732
+ for (i = 0; i < shape; i++) {
1733
+ assert(fields[i].name == NULL);
1734
+ t->Tuple.types[i] = fields[i].type;
1735
+ t->flags |= ndt_subtree_flags(fields[i].type);
1736
+ }
1737
+ ndt_free(fields);
1738
+ return t;
1739
+ }
1740
+ else {
1741
+ if (init_concrete_fields(t,
1742
+ t->Concrete.Tuple.offset,
1743
+ t->Concrete.Tuple.align,
1744
+ t->Concrete.Tuple.pad,
1745
+ fields, shape, align, pack, ctx) < 0) {
1746
+ ndt_field_array_del(fields, shape);
1747
+ ndt_free(t);
1748
+ return NULL;
1749
+ }
1750
+ for (i = 0; i < shape; i++) {
1751
+ assert(fields[i].name == NULL);
1752
+ t->Tuple.types[i] = fields[i].type;
1753
+ t->flags |= ndt_subtree_flags(fields[i].type);
1754
+ }
1755
+ ndt_free(fields);
1756
+ return t;
1757
+ }
1758
+ }
1759
+
1760
+ ndt_t *
1761
+ ndt_record(enum ndt_variadic flag, ndt_field_t *fields, int64_t shape,
1762
+ uint16_opt_t align, uint16_opt_t pack, ndt_context_t *ctx)
1763
+ {
1764
+ ndt_t *t;
1765
+ int64_t i;
1766
+
1767
+ assert((fields == NULL) == (shape == 0));
1768
+
1769
+ for (i = 0; i < shape; i++) {
1770
+ if (!check_type_invariants(fields[i].type, ctx)) {
1771
+ ndt_field_array_del(fields, shape);
1772
+ return NULL;
1773
+ }
1774
+ }
1775
+
1776
+ /* abstract type */
1777
+ t = ndt_record_new(flag, shape, ctx);
1778
+ if (t == NULL) {
1779
+ ndt_field_array_del(fields, shape);
1780
+ return NULL;
1781
+ }
1782
+
1783
+ /* check concrete access */
1784
+ t->access = (flag == Variadic) ? Abstract : Concrete;
1785
+ for (i = 0; i < shape; i++) {
1786
+ if (fields[i].access == Abstract) {
1787
+ t->access = Abstract;
1788
+ }
1789
+ }
1790
+
1791
+ if (t->access == Abstract) {
1792
+ /* check if any field has explicit 'align' or 'pack' attributes */
1793
+ for (i = 0; i < shape; i++) {
1794
+ if (fields[i].access == Concrete &&
1795
+ fields[i].Concrete.explicit_align) {
1796
+ ndt_err_format(ctx, NDT_InvalidArgumentError,
1797
+ "explicit field alignment in abstract tuple");
1798
+ /* at this point names and types still belong to the fields */
1799
+ ndt_field_array_del(fields, shape);
1800
+ ndt_free(t);
1801
+ return NULL;
1802
+ }
1803
+ }
1804
+ for (i = 0; i < shape; i++) {
1805
+ t->Record.names[i] = fields[i].name;
1806
+ t->Record.types[i] = fields[i].type;
1807
+ t->flags |= ndt_subtree_flags(fields[i].type);
1808
+ }
1809
+ ndt_free(fields);
1810
+ return t;
1811
+ }
1812
+ else {
1813
+ if (init_concrete_fields(t,
1814
+ t->Concrete.Record.offset,
1815
+ t->Concrete.Record.align,
1816
+ t->Concrete.Record.pad,
1817
+ fields, shape, align, pack, ctx) < 0) {
1818
+ /* at this point names and types still belong to the fields */
1819
+ ndt_field_array_del(fields, shape);
1820
+ ndt_free(t);
1821
+ return NULL;
1822
+ }
1823
+ for (i = 0; i < shape; i++) {
1824
+ t->Record.names[i] = fields[i].name;
1825
+ t->Record.types[i] = fields[i].type;
1826
+ t->flags |= ndt_subtree_flags(fields[i].type);
1827
+ }
1828
+ ndt_free(fields);
1829
+ return t;
1830
+ }
1831
+ }
1832
+
1833
+ ndt_t *
1834
+ ndt_ref(ndt_t *type, ndt_context_t *ctx)
1835
+ {
1836
+ ndt_t *t;
1837
+
1838
+ if (!check_type_invariants(type, ctx)) {
1839
+ ndt_del(type);
1840
+ return NULL;
1841
+ }
1842
+
1843
+ /* abstract type */
1844
+ t = ndt_new(Ref, ctx);
1845
+ if (t == NULL) {
1846
+ ndt_del(type);
1847
+ return NULL;
1848
+ }
1849
+ t->Ref.type = type;
1850
+ t->flags = ndt_subtree_flags(type);
1851
+
1852
+ /* concrete access */
1853
+ t->access = type->access;
1854
+ t->datasize = sizeof(void *);
1855
+ t->align = alignof(void *);
1856
+
1857
+ return t;
1858
+ }
1859
+
1860
+ ndt_t *
1861
+ ndt_constr(char *name, ndt_t *type, ndt_context_t *ctx)
1862
+ {
1863
+ ndt_t *t;
1864
+
1865
+ if (!check_type_invariants(type, ctx)) {
1866
+ ndt_free(name);
1867
+ ndt_del(type);
1868
+ return NULL;
1869
+ }
1870
+
1871
+ t = ndt_new(Constr, ctx);
1872
+ if (t == NULL) {
1873
+ ndt_free(name);
1874
+ ndt_del(type);
1875
+ return NULL;
1876
+ }
1877
+
1878
+ /* abstract type */
1879
+ t->Constr.name = name;
1880
+ t->Constr.type = type;
1881
+ t->flags = ndt_subtree_flags(type);
1882
+
1883
+ /* concrete access */
1884
+ t->access = type->access;
1885
+ if (t->access == Concrete) {
1886
+ t->datasize = type->datasize;
1887
+ t->align = type->align;
1888
+ }
1889
+
1890
+ return t;
1891
+ }
1892
+
1893
+ ndt_t *
1894
+ ndt_nominal(char *name, ndt_t *type, ndt_context_t *ctx)
1895
+ {
1896
+ const ndt_typedef_t *d;
1897
+ ndt_t *t;
1898
+
1899
+ d = ndt_typedef_find(name, ctx);
1900
+ if (d == NULL) {
1901
+ ndt_free(name);
1902
+ ndt_del(type);
1903
+ return NULL;
1904
+ }
1905
+
1906
+ if (type != NULL) {
1907
+ int ret = ndt_match(d->type, type, ctx);
1908
+ if (ret <= 0) {
1909
+ if (ret == 0) {
1910
+ ndt_err_format(ctx, NDT_ValueError,
1911
+ "type is not an instance of %s", name);
1912
+ }
1913
+ ndt_free(name);
1914
+ ndt_del(type);
1915
+ return NULL;
1916
+ }
1917
+ }
1918
+ else {
1919
+ type = ndt_copy(d->type, ctx);
1920
+ if (type == NULL) {
1921
+ ndt_free(name);
1922
+ return NULL;
1923
+ }
1924
+ }
1925
+
1926
+ /* abstract type */
1927
+ t = ndt_new(Nominal, ctx);
1928
+ if (t == NULL) {
1929
+ ndt_free(name);
1930
+ ndt_del(type);
1931
+ return NULL;
1932
+ }
1933
+ t->Nominal.name = name;
1934
+ t->Nominal.type = type;
1935
+ t->Nominal.meth = &d->meth;
1936
+ t->flags = ndt_subtree_flags(type);
1937
+
1938
+ /* concrete access */
1939
+ t->access = type->access;
1940
+ t->datasize = type->datasize;
1941
+ t->align = type->align;
1942
+
1943
+ return t;
1944
+ }
1945
+
1946
+
1947
+ /******************************************************************************/
1948
+ /* Scalar types */
1949
+ /******************************************************************************/
1950
+
1951
+ ndt_t *
1952
+ ndt_scalar_kind(ndt_context_t *ctx)
1953
+ {
1954
+ return ndt_new(ScalarKind, ctx);
1955
+ }
1956
+
1957
+ /* Define a sort order for the typed values in the categorical set. */
1958
+ static int
1959
+ cmp(const void *x, const void *y)
1960
+ {
1961
+ const ndt_value_t *p = (const ndt_value_t *)x;
1962
+ const ndt_value_t *q = (const ndt_value_t *)y;
1963
+
1964
+ return ndt_value_compare(p, q);
1965
+ }
1966
+
1967
+ ndt_t *
1968
+ ndt_categorical(ndt_value_t *types, int64_t ntypes, ndt_context_t *ctx)
1969
+ {
1970
+ ndt_value_t *tmp;
1971
+ ndt_t *t;
1972
+ int64_t size, i;
1973
+
1974
+ tmp = ndt_alloc(ntypes, sizeof(ndt_value_t));
1975
+ if (tmp == NULL) {
1976
+ ndt_value_array_del(types, ntypes);
1977
+ return ndt_memory_error(ctx);
1978
+ }
1979
+
1980
+ /* Successful allocation implies no overflow and size <= SIZE_MAX. */
1981
+ size = ntypes * sizeof(ndt_value_t);
1982
+ memcpy(tmp, types, (size_t)size);
1983
+ qsort(tmp, (size_t)ntypes, sizeof *tmp, cmp);
1984
+
1985
+ for (i = 0; i+1 < ntypes; i++) {
1986
+ if (ndt_value_mem_equal(&tmp[i], &tmp[i+1])) {
1987
+ ndt_free(tmp);
1988
+ ndt_value_array_del(types, ntypes);
1989
+ ndt_err_format(ctx, NDT_ValueError, "duplicate category entries");
1990
+ return NULL;
1991
+ }
1992
+ }
1993
+ ndt_free(tmp);
1994
+
1995
+ /* abstract type */
1996
+ t = ndt_new(Categorical, ctx);
1997
+ if (t == NULL) {
1998
+ ndt_value_array_del(types, ntypes);
1999
+ return NULL;
2000
+ }
2001
+ t->Categorical.ntypes = ntypes;
2002
+ t->Categorical.types = types;
2003
+
2004
+ /* concrete access */
2005
+ t->access = Concrete;
2006
+ t->datasize = sizeof(ndt_categorical_t);
2007
+ t->align = alignof(ndt_categorical_t);
2008
+
2009
+ return t;
2010
+ }
2011
+
2012
+ ndt_t *
2013
+ ndt_fixed_string_kind(ndt_context_t *ctx)
2014
+ {
2015
+ return ndt_new(FixedStringKind, ctx);
2016
+ }
2017
+
2018
+ ndt_t *
2019
+ ndt_fixed_string(int64_t size, enum ndt_encoding encoding, ndt_context_t *ctx)
2020
+ {
2021
+ bool overflow = 0;
2022
+ ndt_t *t;
2023
+
2024
+ /* abstract type */
2025
+ t = ndt_new(FixedString, ctx);
2026
+ if (t == NULL) {
2027
+ return NULL;
2028
+ }
2029
+ t->FixedString.size = size;
2030
+ t->FixedString.encoding = encoding;
2031
+
2032
+ /* concrete access */
2033
+ t->access = Concrete;
2034
+ t->datasize = MULi64(ndt_sizeof_encoding(encoding), size, &overflow);
2035
+ t->align = ndt_alignof_encoding(encoding);
2036
+
2037
+ if (overflow) {
2038
+ ndt_err_format(ctx, NDT_ValueError,
2039
+ "overflow while creating fixed string");
2040
+ ndt_del(t);
2041
+ return NULL;
2042
+ }
2043
+
2044
+ return t;
2045
+ }
2046
+
2047
+ ndt_t *
2048
+ ndt_fixed_bytes_kind(ndt_context_t *ctx)
2049
+ {
2050
+ return ndt_new(FixedBytesKind, ctx);
2051
+ }
2052
+
2053
+ ndt_t *
2054
+ ndt_fixed_bytes(int64_t size, uint16_opt_t align_attr, ndt_context_t *ctx)
2055
+ {
2056
+ ndt_t *t;
2057
+ uint16_t align;
2058
+
2059
+ align = get_align(align_attr, 1, ctx);
2060
+ if (align == UINT16_MAX) {
2061
+ return NULL;
2062
+ }
2063
+
2064
+ if (size % align != 0) {
2065
+ ndt_err_format(ctx, NDT_ValueError,
2066
+ "data size must be a multiple of alignment");
2067
+ return NULL;
2068
+ }
2069
+
2070
+ /* abstract type */
2071
+ t = ndt_new(FixedBytes, ctx);
2072
+ if (t == NULL) {
2073
+ return NULL;
2074
+ }
2075
+ t->FixedBytes.size = size;
2076
+ t->FixedBytes.align = align;
2077
+
2078
+ /* concrete access */
2079
+ t->access = Concrete;
2080
+ t->datasize = size;
2081
+ t->align = align;
2082
+
2083
+ return t;
2084
+ }
2085
+
2086
+ ndt_t *
2087
+ ndt_string(ndt_context_t *ctx)
2088
+ {
2089
+ ndt_t *t;
2090
+
2091
+ /* abstract type */
2092
+ t = ndt_new(String, ctx);
2093
+ if (t == NULL) {
2094
+ return NULL;
2095
+ }
2096
+
2097
+ /* concrete access */
2098
+ t->access = Concrete;
2099
+ t->datasize = sizeof(char *);
2100
+ t->align = alignof(char *);
2101
+
2102
+ return t;
2103
+ }
2104
+
2105
+ ndt_t *
2106
+ ndt_bytes(uint16_opt_t target_align, ndt_context_t *ctx)
2107
+ {
2108
+ ndt_t *t;
2109
+ uint16_t align;
2110
+
2111
+ align = get_align(target_align, 1, ctx);
2112
+ if (align == UINT16_MAX) {
2113
+ return NULL;
2114
+ }
2115
+
2116
+ /* abstract type */
2117
+ t = ndt_new(Bytes, ctx);
2118
+ if (t == NULL) {
2119
+ return NULL;
2120
+ }
2121
+ t->Bytes.target_align = align;
2122
+
2123
+ /* concrete access */
2124
+ t->access = Concrete;
2125
+ t->datasize = sizeof(ndt_bytes_t);
2126
+ t->align = alignof(ndt_bytes_t);
2127
+
2128
+ return t;
2129
+ }
2130
+
2131
+ ndt_t *
2132
+ ndt_char(enum ndt_encoding encoding, ndt_context_t *ctx)
2133
+ {
2134
+ ndt_t *t;
2135
+
2136
+ /* abstract type */
2137
+ t = ndt_new(Char, ctx);
2138
+ if (t == NULL) {
2139
+ return NULL;
2140
+ }
2141
+ t->Char.encoding = encoding;
2142
+
2143
+ /* concrete access */
2144
+ t->access = Concrete;
2145
+ t->datasize = ndt_sizeof_encoding(encoding);
2146
+ t->align = ndt_alignof_encoding(encoding);
2147
+
2148
+ return t;
2149
+ }
2150
+
2151
+ ndt_t *
2152
+ ndt_signed_kind(ndt_context_t *ctx)
2153
+ {
2154
+ return ndt_new(SignedKind, ctx);
2155
+ }
2156
+
2157
+ ndt_t *
2158
+ ndt_unsigned_kind(ndt_context_t *ctx)
2159
+ {
2160
+ return ndt_new(UnsignedKind, ctx);
2161
+ }
2162
+
2163
+ ndt_t *
2164
+ ndt_float_kind(ndt_context_t *ctx)
2165
+ {
2166
+ return ndt_new(FloatKind, ctx);
2167
+ }
2168
+
2169
+ ndt_t *
2170
+ ndt_complex_kind(ndt_context_t *ctx)
2171
+ {
2172
+ return ndt_new(ComplexKind, ctx);
2173
+ }
2174
+
2175
+ ndt_t *
2176
+ ndt_primitive(enum ndt tag, uint32_t flags, ndt_context_t *ctx)
2177
+ {
2178
+ ndt_t *t;
2179
+
2180
+ if (flags != 0 && flags != NDT_LITTLE_ENDIAN && flags != NDT_BIG_ENDIAN) {
2181
+ ndt_err_format(ctx, NDT_ValueError,
2182
+ "flags argument must be 0 or NDT_LITTLE_ENDIAN or NDT_BIG_ENDIAN");
2183
+ return NULL;
2184
+ }
2185
+
2186
+ /* abstract type */
2187
+ t = ndt_new(tag, ctx);
2188
+ if (t == NULL) {
2189
+ return NULL;
2190
+ }
2191
+ t->flags |= flags;
2192
+
2193
+ /* concrete access */
2194
+ t->access = Concrete;
2195
+
2196
+ switch(tag) {
2197
+ case Bool:
2198
+ t->datasize = sizeof(bool);
2199
+ t->align = alignof(bool);
2200
+ break;
2201
+ case Int8:
2202
+ t->datasize = sizeof(int8_t);
2203
+ t->align = alignof(int8_t);
2204
+ break;
2205
+ case Int16:
2206
+ t->datasize = sizeof(int16_t);
2207
+ t->align = alignof(int16_t);
2208
+ break;
2209
+ case Int32:
2210
+ t->datasize = sizeof(int32_t);
2211
+ t->align = alignof(int32_t);
2212
+ break;
2213
+ case Int64:
2214
+ t->datasize = sizeof(int64_t);
2215
+ t->align = alignof(int64_t);
2216
+ break;
2217
+ case Uint8:
2218
+ t->datasize = sizeof(uint8_t);
2219
+ t->align = alignof(uint8_t);
2220
+ break;
2221
+ case Uint16:
2222
+ t->datasize = sizeof(uint16_t);
2223
+ t->align = alignof(uint16_t);
2224
+ break;
2225
+ case Uint32:
2226
+ t->datasize = sizeof(uint32_t);
2227
+ t->align = alignof(uint32_t);
2228
+ break;
2229
+ case Uint64:
2230
+ t->datasize = sizeof(uint64_t);
2231
+ t->align = alignof(uint64_t);
2232
+ break;
2233
+ case Float16:
2234
+ t->datasize = 2;
2235
+ t->align = 2;
2236
+ break;
2237
+ case Float32:
2238
+ t->datasize = sizeof(float);
2239
+ t->align = alignof(float);
2240
+ break;
2241
+ case Float64:
2242
+ t->datasize = sizeof(double);
2243
+ t->align = alignof(double);
2244
+ break;
2245
+ case Complex32:
2246
+ t->datasize = 4;
2247
+ t->align = 2;
2248
+ break;
2249
+ case Complex64:
2250
+ t->datasize = sizeof(ndt_complex64_t);
2251
+ t->align = alignof(ndt_complex64_t);
2252
+ break;
2253
+ case Complex128:
2254
+ t->datasize = sizeof(ndt_complex128_t);
2255
+ t->align = alignof(ndt_complex128_t);
2256
+ break;
2257
+ default:
2258
+ ndt_err_format(ctx, NDT_ValueError, "not a primitive type"),
2259
+ ndt_free(t);
2260
+ return NULL;
2261
+ }
2262
+
2263
+ return t;
2264
+ }
2265
+
2266
+ ndt_t *
2267
+ ndt_signed(int size, uint32_t flags, ndt_context_t *ctx)
2268
+ {
2269
+ switch (size) {
2270
+ case 1: return ndt_primitive(Int8, flags, ctx);
2271
+ case 2: return ndt_primitive(Int16, flags, ctx);
2272
+ case 4: return ndt_primitive(Int32, flags, ctx);
2273
+ case 8: return ndt_primitive(Int64, flags, ctx);
2274
+ default:
2275
+ ndt_err_format(ctx, NDT_ValueError,
2276
+ "invalid size for signed integer: '%d'", size);
2277
+ return NULL;
2278
+ }
2279
+ }
2280
+
2281
+ ndt_t *
2282
+ ndt_unsigned(int size, uint32_t flags, ndt_context_t *ctx)
2283
+ {
2284
+ switch (size) {
2285
+ case 1: return ndt_primitive(Uint8, flags, ctx);
2286
+ case 2: return ndt_primitive(Uint16, flags, ctx);
2287
+ case 4: return ndt_primitive(Uint32, flags, ctx);
2288
+ case 8: return ndt_primitive(Uint64, flags, ctx);
2289
+ default:
2290
+ ndt_err_format(ctx, NDT_ValueError,
2291
+ "invalid size for unsigned integer: '%d'", size);
2292
+ return NULL;
2293
+ }
2294
+ }
2295
+
2296
+ ndt_t *
2297
+ ndt_from_alias(enum ndt_alias tag, uint32_t flags, ndt_context_t *ctx)
2298
+ {
2299
+ switch (tag) {
2300
+ case Size: return ndt_unsigned(sizeof(size_t), flags, ctx);
2301
+ case Intptr: return ndt_signed(sizeof(intptr_t), flags, ctx);
2302
+ case Uintptr: return ndt_unsigned(sizeof(uintptr_t), flags, ctx);
2303
+ default:
2304
+ ndt_err_format(ctx, NDT_ValueError, "invalid alias tag");
2305
+ return NULL;
2306
+ }
2307
+ }
2308
+
2309
+ ndt_t *
2310
+ ndt_typevar(char *name, ndt_context_t *ctx)
2311
+ {
2312
+ ndt_t *t;
2313
+
2314
+ /* abstract type */
2315
+ t = ndt_new(Typevar, ctx);
2316
+ if (t == NULL) {
2317
+ ndt_free(name);
2318
+ return NULL;
2319
+ }
2320
+ t->Typevar.name = name;
2321
+
2322
+ return t;
2323
+ }