xnd 0.2.0dev3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. checksums.yaml +7 -0
  2. data/CONTRIBUTING.md +42 -0
  3. data/Gemfile +3 -0
  4. data/History.md +0 -0
  5. data/README.md +7 -0
  6. data/Rakefile +135 -0
  7. data/ext/ruby_xnd/extconf.rb +70 -0
  8. data/ext/ruby_xnd/float_pack_unpack.c +277 -0
  9. data/ext/ruby_xnd/float_pack_unpack.h +39 -0
  10. data/ext/ruby_xnd/gc_guard.c +36 -0
  11. data/ext/ruby_xnd/gc_guard.h +12 -0
  12. data/ext/ruby_xnd/include/xnd.h +449 -0
  13. data/ext/ruby_xnd/lib/libxnd.a +0 -0
  14. data/ext/ruby_xnd/lib/libxnd.so +1 -0
  15. data/ext/ruby_xnd/lib/libxnd.so.0 +1 -0
  16. data/ext/ruby_xnd/lib/libxnd.so.0.2.0dev3 +0 -0
  17. data/ext/ruby_xnd/memory_block_object.c +32 -0
  18. data/ext/ruby_xnd/memory_block_object.h +33 -0
  19. data/ext/ruby_xnd/ruby_xnd.c +1953 -0
  20. data/ext/ruby_xnd/ruby_xnd.h +61 -0
  21. data/ext/ruby_xnd/ruby_xnd_internal.h +85 -0
  22. data/ext/ruby_xnd/util.h +170 -0
  23. data/ext/ruby_xnd/xnd/AUTHORS.txt +5 -0
  24. data/ext/ruby_xnd/xnd/INSTALL.txt +134 -0
  25. data/ext/ruby_xnd/xnd/LICENSE.txt +29 -0
  26. data/ext/ruby_xnd/xnd/MANIFEST.in +3 -0
  27. data/ext/ruby_xnd/xnd/Makefile.in +80 -0
  28. data/ext/ruby_xnd/xnd/README.rst +44 -0
  29. data/ext/ruby_xnd/xnd/config.guess +1530 -0
  30. data/ext/ruby_xnd/xnd/config.h.in +22 -0
  31. data/ext/ruby_xnd/xnd/config.sub +1782 -0
  32. data/ext/ruby_xnd/xnd/configure +4867 -0
  33. data/ext/ruby_xnd/xnd/configure.ac +164 -0
  34. data/ext/ruby_xnd/xnd/doc/Makefile +14 -0
  35. data/ext/ruby_xnd/xnd/doc/_static/copybutton.js +66 -0
  36. data/ext/ruby_xnd/xnd/doc/conf.py +26 -0
  37. data/ext/ruby_xnd/xnd/doc/index.rst +44 -0
  38. data/ext/ruby_xnd/xnd/doc/libxnd/data-structures.rst +186 -0
  39. data/ext/ruby_xnd/xnd/doc/libxnd/functions.rst +148 -0
  40. data/ext/ruby_xnd/xnd/doc/libxnd/index.rst +25 -0
  41. data/ext/ruby_xnd/xnd/doc/releases/index.rst +34 -0
  42. data/ext/ruby_xnd/xnd/doc/xnd/align-pack.rst +96 -0
  43. data/ext/ruby_xnd/xnd/doc/xnd/buffer-protocol.rst +42 -0
  44. data/ext/ruby_xnd/xnd/doc/xnd/index.rst +30 -0
  45. data/ext/ruby_xnd/xnd/doc/xnd/quickstart.rst +62 -0
  46. data/ext/ruby_xnd/xnd/doc/xnd/types.rst +674 -0
  47. data/ext/ruby_xnd/xnd/install-sh +527 -0
  48. data/ext/ruby_xnd/xnd/libxnd/Makefile.in +102 -0
  49. data/ext/ruby_xnd/xnd/libxnd/Makefile.vc +112 -0
  50. data/ext/ruby_xnd/xnd/libxnd/bitmaps.c +345 -0
  51. data/ext/ruby_xnd/xnd/libxnd/contrib.h +313 -0
  52. data/ext/ruby_xnd/xnd/libxnd/copy.c +944 -0
  53. data/ext/ruby_xnd/xnd/libxnd/equal.c +1216 -0
  54. data/ext/ruby_xnd/xnd/libxnd/inline.h +154 -0
  55. data/ext/ruby_xnd/xnd/libxnd/overflow.h +147 -0
  56. data/ext/ruby_xnd/xnd/libxnd/split.c +286 -0
  57. data/ext/ruby_xnd/xnd/libxnd/tests/Makefile.in +39 -0
  58. data/ext/ruby_xnd/xnd/libxnd/tests/Makefile.vc +44 -0
  59. data/ext/ruby_xnd/xnd/libxnd/tests/README.txt +2 -0
  60. data/ext/ruby_xnd/xnd/libxnd/tests/runtest.c +101 -0
  61. data/ext/ruby_xnd/xnd/libxnd/tests/test.h +48 -0
  62. data/ext/ruby_xnd/xnd/libxnd/tests/test_fixed.c +108 -0
  63. data/ext/ruby_xnd/xnd/libxnd/xnd.c +1304 -0
  64. data/ext/ruby_xnd/xnd/libxnd/xnd.h +449 -0
  65. data/ext/ruby_xnd/xnd/python/test_xnd.py +3144 -0
  66. data/ext/ruby_xnd/xnd/python/xnd/__init__.py +290 -0
  67. data/ext/ruby_xnd/xnd/python/xnd/_xnd.c +2822 -0
  68. data/ext/ruby_xnd/xnd/python/xnd/contrib/pretty.py +850 -0
  69. data/ext/ruby_xnd/xnd/python/xnd/docstrings.h +129 -0
  70. data/ext/ruby_xnd/xnd/python/xnd/pyxnd.h +200 -0
  71. data/ext/ruby_xnd/xnd/python/xnd/util.h +182 -0
  72. data/ext/ruby_xnd/xnd/python/xnd_randvalue.py +1121 -0
  73. data/ext/ruby_xnd/xnd/python/xnd_support.py +106 -0
  74. data/ext/ruby_xnd/xnd/setup.py +303 -0
  75. data/ext/ruby_xnd/xnd/vcbuild/INSTALL.txt +42 -0
  76. data/ext/ruby_xnd/xnd/vcbuild/runtest32.bat +16 -0
  77. data/ext/ruby_xnd/xnd/vcbuild/runtest64.bat +14 -0
  78. data/ext/ruby_xnd/xnd/vcbuild/vcbuild32.bat +29 -0
  79. data/ext/ruby_xnd/xnd/vcbuild/vcbuild64.bat +29 -0
  80. data/ext/ruby_xnd/xnd/vcbuild/vcclean.bat +13 -0
  81. data/ext/ruby_xnd/xnd/vcbuild/vcdistclean.bat +14 -0
  82. data/lib/ruby_xnd.so +0 -0
  83. data/lib/xnd.rb +306 -0
  84. data/lib/xnd/monkeys.rb +29 -0
  85. data/lib/xnd/version.rb +6 -0
  86. data/spec/debug_spec.rb +9 -0
  87. data/spec/gc_guard_spec.rb +10 -0
  88. data/spec/leakcheck.rb +9 -0
  89. data/spec/spec_helper.rb +877 -0
  90. data/spec/type_inference_spec.rb +81 -0
  91. data/spec/xnd_spec.rb +2921 -0
  92. data/xnd.gemspec +47 -0
  93. metadata +215 -0
@@ -0,0 +1,154 @@
1
+ /*
2
+ * BSD 3-Clause License
3
+ *
4
+ * Copyright (c) 2017-2018, plures
5
+ * All rights reserved.
6
+ *
7
+ * Redistribution and use in source and binary forms, with or without
8
+ * modification, are permitted provided that the following conditions are met:
9
+ *
10
+ * 1. Redistributions of source code must retain the above copyright notice,
11
+ * this list of conditions and the following disclaimer.
12
+ *
13
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
14
+ * this list of conditions and the following disclaimer in the documentation
15
+ * and/or other materials provided with the distribution.
16
+ *
17
+ * 3. Neither the name of the copyright holder nor the names of its
18
+ * contributors may be used to endorse or promote products derived from
19
+ * this software without specific prior written permission.
20
+ *
21
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
+ */
32
+
33
+
34
+ #ifndef INLINE_H
35
+ #define INLINE_H
36
+
37
+
38
+ #include <stdlib.h>
39
+ #include <stdint.h>
40
+ #include <assert.h>
41
+ #include "ndtypes.h"
42
+ #include "xnd.h"
43
+
44
+
45
+ /*****************************************************************************/
46
+ /* Internal inline functions */
47
+ /*****************************************************************************/
48
+
49
+ /* Counterparts of the functions in xnd.h. These versions ignore the bitmaps. */
50
+
51
+ static inline xnd_t
52
+ _fixed_dim_next(const xnd_t *x, const int64_t i)
53
+ {
54
+ const ndt_t *t = x->type;
55
+ const ndt_t *u = t->FixedDim.type;
56
+ const int64_t step = i * t->Concrete.FixedDim.step;
57
+ xnd_t next;
58
+
59
+ next.bitmap = xnd_bitmap_empty;
60
+ next.index = x->index + step;
61
+ next.type = u;
62
+ next.ptr = u->ndim==0 ? x->ptr + next.index * next.type->datasize : x->ptr;
63
+
64
+ return next;
65
+ }
66
+
67
+ static inline xnd_t
68
+ _var_dim_next(const xnd_t *x, const int64_t start, const int64_t step,
69
+ const int64_t i)
70
+ {
71
+ const ndt_t *t = x->type;
72
+ const ndt_t *u = t->VarDim.type;
73
+ xnd_t next;
74
+
75
+ next.bitmap = xnd_bitmap_empty;
76
+ next.index = start + i * step;
77
+ next.type = u;
78
+ next.ptr = u->ndim==0 ? x->ptr + next.index * next.type->datasize : x->ptr;
79
+
80
+ return next;
81
+ }
82
+
83
+ static inline xnd_t
84
+ _tuple_next(const xnd_t *x, const int64_t i)
85
+ {
86
+ const ndt_t *t = x->type;
87
+ xnd_t next;
88
+
89
+ next.bitmap = xnd_bitmap_empty;
90
+ next.index = 0;
91
+ next.type = t->Tuple.types[i];
92
+ next.ptr = x->ptr + t->Concrete.Tuple.offset[i];
93
+
94
+ return next;
95
+ }
96
+
97
+ static inline xnd_t
98
+ _record_next(const xnd_t *x, const int64_t i)
99
+ {
100
+ const ndt_t *t = x->type;
101
+ xnd_t next;
102
+
103
+ next.bitmap = xnd_bitmap_empty;
104
+ next.index = 0;
105
+ next.type = t->Record.types[i];
106
+ next.ptr = x->ptr + t->Concrete.Record.offset[i];
107
+
108
+ return next;
109
+ }
110
+
111
+ static inline xnd_t
112
+ _ref_next(const xnd_t *x)
113
+ {
114
+ const ndt_t *t = x->type;
115
+ xnd_t next;
116
+
117
+ next.bitmap = xnd_bitmap_empty;
118
+ next.index = 0;
119
+ next.type = t->Ref.type;
120
+ next.ptr = XND_POINTER_DATA(x->ptr);
121
+
122
+ return next;
123
+ }
124
+
125
+ static inline xnd_t
126
+ _constr_next(const xnd_t *x)
127
+ {
128
+ const ndt_t *t = x->type;
129
+ xnd_t next;
130
+
131
+ next.bitmap = xnd_bitmap_empty;
132
+ next.index = 0;
133
+ next.type = t->Constr.type;
134
+ next.ptr = x->ptr;
135
+
136
+ return next;
137
+ }
138
+
139
+ static inline xnd_t
140
+ _nominal_next(const xnd_t *x)
141
+ {
142
+ const ndt_t *t = x->type;
143
+ xnd_t next;
144
+
145
+ next.bitmap = xnd_bitmap_empty;
146
+ next.index = 0;
147
+ next.type = t->Nominal.type;
148
+ next.ptr = x->ptr;
149
+
150
+ return next;
151
+ }
152
+
153
+
154
+ #endif /* INLINE_H */
@@ -0,0 +1,147 @@
1
+ /*
2
+ * BSD 3-Clause License
3
+ *
4
+ * Copyright (c) 2017-2018, plures
5
+ * All rights reserved.
6
+ *
7
+ * Redistribution and use in source and binary forms, with or without
8
+ * modification, are permitted provided that the following conditions are met:
9
+ *
10
+ * 1. Redistributions of source code must retain the above copyright notice,
11
+ * this list of conditions and the following disclaimer.
12
+ *
13
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
14
+ * this list of conditions and the following disclaimer in the documentation
15
+ * and/or other materials provided with the distribution.
16
+ *
17
+ * 3. Neither the name of the copyright holder nor the names of its
18
+ * contributors may be used to endorse or promote products derived from
19
+ * this software without specific prior written permission.
20
+ *
21
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
+ */
32
+
33
+
34
+ #include <stdint.h>
35
+
36
+
37
+ /*****************************************************************************/
38
+ /* Arithmetic with overflow checking */
39
+ /*****************************************************************************/
40
+
41
+ #if defined(__GNUC__) && __GNUC__ >= 5 && !defined(__INTEL_COMPILER)
42
+ static inline int64_t
43
+ ADDi64(int64_t a, int64_t b, bool *overflow)
44
+ {
45
+ int64_t c;
46
+ *overflow |= __builtin_add_overflow(a, b, &c);
47
+ return c;
48
+ }
49
+
50
+ static inline int64_t
51
+ SUBi64(int64_t a, int64_t b, bool *overflow)
52
+ {
53
+ int64_t c;
54
+ *overflow |= __builtin_sub_overflow(a, b, &c);
55
+ return c;
56
+ }
57
+
58
+ static inline int64_t
59
+ MULi64(int64_t a, int64_t b, bool *overflow)
60
+ {
61
+ int64_t c;
62
+ *overflow |= __builtin_mul_overflow(a, b, &c);
63
+ return c;
64
+ }
65
+
66
+ static inline size_t
67
+ MULi64_size(int64_t a, int64_t b, bool *overflow)
68
+ {
69
+ int64_t c;
70
+ *overflow |= __builtin_mul_overflow(a, b, &c);
71
+ #if SIZE_MAX < INT64_MAX
72
+ *overflow |= (c > INT32_MAX);
73
+ #endif
74
+ return (size_t)c;
75
+ }
76
+
77
+ static inline int64_t
78
+ ABSi64(int64_t a, bool *overflow)
79
+ {
80
+ if (a == INT64_MIN) {
81
+ *overflow = 1;
82
+ return INT64_MIN;
83
+ }
84
+ return a >= 0 ? a : -a;
85
+ }
86
+
87
+ static inline uint16_t
88
+ ADDu16(uint16_t a, uint16_t b, bool *overflow)
89
+ {
90
+ uint16_t c;
91
+ *overflow |= __builtin_add_overflow(a, b, &c);
92
+ return c;
93
+ }
94
+ #else
95
+ static inline int64_t
96
+ ADDi64(int64_t a, int64_t b, bool *overflow)
97
+ {
98
+ int64_t c = (uint64_t)a + (uint64_t)b;
99
+ *overflow |= ((a < 0 && b < 0 && c >= 0) || (a >= 0 && b >= 0 && c < 0));
100
+ return c;
101
+ }
102
+
103
+ static inline int64_t
104
+ SUBi64(int64_t a, int64_t b, bool *overflow)
105
+ {
106
+ int64_t c = (uint64_t)a - (uint64_t)b;
107
+ *overflow |= ((a < 0 && b >= 0 && c >= 0) || (a >= 0 && b < 0 && c < 0));
108
+ return c;
109
+ }
110
+
111
+ static inline int64_t
112
+ MULi64(int64_t a, int64_t b, bool *overflow)
113
+ {
114
+ int64_t c = (uint64_t)a * (uint64_t)b;
115
+ *overflow |= ((b < 0 && a == INT64_MIN) || (b != 0 && a != c / b));
116
+ return c;
117
+ }
118
+
119
+ static inline size_t
120
+ MULi64_size(int64_t a, int64_t b, bool *overflow)
121
+ {
122
+ int64_t c = (uint64_t)a * (uint64_t)b;
123
+ *overflow |= ((b < 0 && a == INT64_MIN) || (b != 0 && a != c / b));
124
+ #if SIZE_MAX < INT64_MAX
125
+ *overflow |= (c > INT32_MAX);
126
+ #endif
127
+ return (size_t)c;
128
+ }
129
+
130
+ static inline int64_t
131
+ ABSi64(int64_t a, bool *overflow)
132
+ {
133
+ if (a == INT64_MIN) {
134
+ *overflow = 1;
135
+ return INT64_MIN;
136
+ }
137
+ return a >= 0 ? a : -a;
138
+ }
139
+
140
+ static inline uint16_t
141
+ ADDu16(uint16_t a, uint16_t b, bool *overflow)
142
+ {
143
+ uint16_t c = a + b;
144
+ *overflow |= (c < a);
145
+ return c;
146
+ }
147
+ #endif /* OVERFLOW_H */
@@ -0,0 +1,286 @@
1
+ /*
2
+ * BSD 3-Clause License
3
+ *
4
+ * Copyright (c) 2017-2018, plures
5
+ * All rights reserved.
6
+ *
7
+ * Redistribution and use in source and binary forms, with or without
8
+ * modification, are permitted provided that the following conditions are met:
9
+ *
10
+ * 1. Redistributions of source code must retain the above copyright notice,
11
+ * this list of conditions and the following disclaimer.
12
+ *
13
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
14
+ * this list of conditions and the following disclaimer in the documentation
15
+ * and/or other materials provided with the distribution.
16
+ *
17
+ * 3. Neither the name of the copyright holder nor the names of its
18
+ * contributors may be used to endorse or promote products derived from
19
+ * this software without specific prior written permission.
20
+ *
21
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
+ */
32
+
33
+
34
+ #include <stdlib.h>
35
+ #include <stdint.h>
36
+ #include <string.h>
37
+ #include <inttypes.h>
38
+ #include "ndtypes.h"
39
+ #include "xnd.h"
40
+ #include "overflow.h"
41
+
42
+
43
+ static const xnd_index_t init_slice =
44
+ { .tag = Slice,
45
+ .Slice = {.start = -1, .stop = -1, .step = -1}};
46
+
47
+
48
+ static int64_t column(
49
+ int64_t nrows, int64_t ncols,
50
+ xnd_index_t *indices, int *nindices,
51
+ int64_t row, int64_t col, int64_t r, int64_t q,
52
+ int64_t m, const int64_t *ms, int len);
53
+
54
+
55
+ static void
56
+ free_slices(xnd_t *lst, int64_t len)
57
+ {
58
+ for (int64_t i = 0; i < len; i++) {
59
+ ndt_del((ndt_t *)lst[i].type);
60
+ }
61
+
62
+ ndt_free(lst);
63
+ }
64
+
65
+ static inline int64_t
66
+ start(int64_t i, int64_t r, int64_t q)
67
+ {
68
+ return i < r ? i*(q+1) : r+i*q;
69
+ }
70
+
71
+ static inline int64_t
72
+ stop(int64_t i, int64_t r, int64_t q)
73
+ {
74
+ return i < r ? (i+1)*(q+1) : r+(i+1)*q;
75
+ }
76
+
77
+ static inline int64_t
78
+ step(int64_t i, int64_t r, int64_t q)
79
+ {
80
+ return i < r ? q+1 : q;
81
+ }
82
+
83
+ static inline xnd_index_t
84
+ single_step_slice(int64_t i)
85
+ {
86
+ xnd_index_t x;
87
+
88
+ x.tag = Slice;
89
+ x.Slice.start = i;
90
+ x.Slice.stop = i+1;
91
+ x.Slice.step = 1;
92
+
93
+ return x;
94
+ }
95
+
96
+ static inline xnd_index_t
97
+ slice(int64_t i, int64_t r, int64_t q)
98
+ {
99
+ xnd_index_t x;
100
+
101
+ x.tag = Slice;
102
+ x.Slice.start = start(i, r, q);
103
+ x.Slice.stop = stop(i, r, q);
104
+ x.Slice.step = 1;
105
+
106
+ return x;
107
+ }
108
+
109
+ static int64_t
110
+ prepend(int64_t nrows, int64_t ncols,
111
+ xnd_index_t *indices, int *nindices,
112
+ int64_t row, int64_t col, xnd_index_t s, int64_t n)
113
+ {
114
+ for (int64_t i = 0; i < n; i++) {
115
+ assert(row+i < nrows && col < ncols);
116
+ indices[(row+i)*ncols + col] = s;
117
+ nindices[row+i]++;
118
+ }
119
+
120
+ return n;
121
+ }
122
+
123
+ static int64_t
124
+ last_column(int64_t nrows, int64_t ncols,
125
+ xnd_index_t *indices, int *nindices,
126
+ int64_t row, int64_t col, int64_t r, int64_t q, int64_t n)
127
+ {
128
+ for (int64_t i = 0; i < n; i++) {
129
+ assert(row+i < nrows && col < ncols);
130
+ indices[(row+i)*ncols + col] = slice(i, r, q);
131
+ nindices[row+i]++;
132
+ }
133
+
134
+ return n;
135
+ }
136
+
137
+ static int64_t
138
+ schedule(int64_t nrows, int64_t ncols,
139
+ xnd_index_t *indices, int *nindices,
140
+ int64_t row, int64_t col, int64_t n, const int64_t *shape, int len)
141
+ {
142
+ int64_t m;
143
+ int64_t q;
144
+ int64_t r;
145
+
146
+ if (len == 0) {
147
+ return 1;
148
+ }
149
+
150
+ m = shape[0];
151
+ if (n <= m) {
152
+ q = m / n;
153
+ r = m % n;
154
+ return last_column(nrows, ncols, indices, nindices, row, col, r, q, n);
155
+ }
156
+ else {
157
+ q = n / m;
158
+ r = n % m;
159
+ return column(nrows, ncols, indices, nindices, row, col, r, q, m, shape+1, len-1);
160
+ }
161
+ }
162
+
163
+ static int64_t
164
+ column(int64_t nrows, int64_t ncols,
165
+ xnd_index_t *indices, int *nindices,
166
+ int64_t row, int64_t col, int64_t r, int64_t q, int64_t m,
167
+ const int64_t *ms, int len)
168
+ {
169
+ int64_t column_len = 0;
170
+ int64_t n, subtree_len, block_len;
171
+ xnd_index_t s;
172
+
173
+ for (int64_t i = 0; i < m; i++) {
174
+ n = step(i, r, q);
175
+ s = single_step_slice(i);
176
+ subtree_len = schedule(nrows, ncols, indices, nindices, row, col+1, n, ms, len);
177
+ block_len = prepend(nrows, ncols, indices, nindices, row, col, s, subtree_len);
178
+ row += block_len;
179
+ column_len += block_len;
180
+ }
181
+
182
+ return column_len;
183
+ }
184
+
185
+ static int
186
+ get_shape(int64_t *shape, const ndt_t *t, int max_outer, ndt_context_t *ctx)
187
+ {
188
+ int i;
189
+
190
+ if (!ndt_is_ndarray(t)) {
191
+ ndt_err_format(ctx, NDT_ValueError,
192
+ "split function called on non-ndarray");
193
+ return -1;
194
+ }
195
+
196
+ for (i = 0; i < max_outer && t->ndim > 0; i++, t=t->FixedDim.type) {
197
+ shape[i] = t->FixedDim.shape;
198
+ if (shape[i] <= 0) {
199
+ ndt_err_format(ctx, NDT_ValueError,
200
+ "split function called on invalid shape or shape with zeros");
201
+ return -1;
202
+ }
203
+ }
204
+ for (; t->ndim > 0; t=t->FixedDim.type) {
205
+ if (t->FixedDim.shape <= 0) {
206
+ ndt_err_format(ctx, NDT_ValueError,
207
+ "split function called on invalid shape or shape with zeros");
208
+ return -1;
209
+ }
210
+ }
211
+
212
+ return i;
213
+ }
214
+
215
+ xnd_t *
216
+ xnd_split(const xnd_t *x, int64_t *nparts, int max_outer, ndt_context_t *ctx)
217
+ {
218
+ bool overflow = false;
219
+ int64_t shape[NDT_MAX_DIM];
220
+ xnd_index_t *indices;
221
+ int *nindices;
222
+ xnd_t *result;
223
+ int64_t nrows, nmemb;
224
+ int ncols;
225
+
226
+ if (*nparts < 1) {
227
+ ndt_err_format(ctx, NDT_ValueError, "'n' parameter must be >= 1");
228
+ return NULL;
229
+ }
230
+ nrows = *nparts;
231
+
232
+ ncols = get_shape(shape, x->type, max_outer, ctx);
233
+ if (ncols < 0) {
234
+ return NULL;
235
+ }
236
+
237
+ nmemb = MULi64(nrows, ncols, &overflow);
238
+ if (overflow) {
239
+ ndt_err_format(ctx, NDT_ValueError, "'n' parameter is too large");
240
+ return NULL;
241
+ }
242
+
243
+ indices = ndt_alloc(nmemb, sizeof *indices);
244
+ if (indices == NULL) {
245
+ return ndt_memory_error(ctx);
246
+ }
247
+ for (int64_t i = 0; i < nrows; i++) {
248
+ for (int64_t k = 0; k < ncols; k++) {
249
+ indices[i*ncols + k] = init_slice;
250
+ }
251
+ }
252
+
253
+ nindices = ndt_alloc(nrows, sizeof *nindices);
254
+ if (nindices == NULL) {
255
+ ndt_free(indices);
256
+ return ndt_memory_error(ctx);
257
+ }
258
+ for (int64_t i = 0; i < nrows; i++) {
259
+ nindices[i] = 0;
260
+ }
261
+
262
+ nrows = schedule(nrows, ncols, indices, nindices, 0, 0, nrows, shape, ncols);
263
+
264
+ result = ndt_alloc(nrows, sizeof *result);
265
+ if (result == NULL) {
266
+ ndt_free(nindices);
267
+ ndt_free(indices);
268
+ return ndt_memory_error(ctx);
269
+ }
270
+
271
+ for (int64_t i = 0; i < nrows; i++) {
272
+ result[i] = xnd_multikey(x, indices+(i*ncols), nindices[i], ctx);
273
+ if (ndt_err_occurred(ctx)) {
274
+ ndt_free(nindices);
275
+ ndt_free(indices);
276
+ free_slices(result, i);
277
+ return NULL;
278
+ }
279
+ }
280
+
281
+ ndt_free(nindices);
282
+ ndt_free(indices);
283
+ *nparts = nrows;
284
+
285
+ return result;
286
+ }