xnd 0.2.0dev6 → 0.2.0dev7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +2 -0
- data/Rakefile +1 -1
- data/ext/ruby_xnd/GPATH +0 -0
- data/ext/ruby_xnd/GRTAGS +0 -0
- data/ext/ruby_xnd/GTAGS +0 -0
- data/ext/ruby_xnd/extconf.rb +8 -5
- data/ext/ruby_xnd/gc_guard.c +53 -2
- data/ext/ruby_xnd/gc_guard.h +8 -2
- data/ext/ruby_xnd/include/overflow.h +147 -0
- data/ext/ruby_xnd/include/ruby_xnd.h +62 -0
- data/ext/ruby_xnd/include/xnd.h +590 -0
- data/ext/ruby_xnd/lib/libxnd.a +0 -0
- data/ext/ruby_xnd/lib/libxnd.so +1 -0
- data/ext/ruby_xnd/lib/libxnd.so.0 +1 -0
- data/ext/ruby_xnd/lib/libxnd.so.0.2.0dev3 +0 -0
- data/ext/ruby_xnd/ruby_xnd.c +556 -47
- data/ext/ruby_xnd/ruby_xnd.h +2 -1
- data/ext/ruby_xnd/xnd/Makefile +80 -0
- data/ext/ruby_xnd/xnd/config.h +26 -0
- data/ext/ruby_xnd/xnd/config.h.in +3 -0
- data/ext/ruby_xnd/xnd/config.log +421 -0
- data/ext/ruby_xnd/xnd/config.status +1023 -0
- data/ext/ruby_xnd/xnd/configure +376 -8
- data/ext/ruby_xnd/xnd/configure.ac +48 -7
- data/ext/ruby_xnd/xnd/doc/xnd/index.rst +3 -1
- data/ext/ruby_xnd/xnd/doc/xnd/{types.rst → xnd.rst} +3 -18
- data/ext/ruby_xnd/xnd/libxnd/Makefile +142 -0
- data/ext/ruby_xnd/xnd/libxnd/Makefile.in +43 -3
- data/ext/ruby_xnd/xnd/libxnd/Makefile.vc +19 -3
- data/ext/ruby_xnd/xnd/libxnd/bitmaps.c +42 -3
- data/ext/ruby_xnd/xnd/libxnd/bitmaps.o +0 -0
- data/ext/ruby_xnd/xnd/libxnd/bounds.c +366 -0
- data/ext/ruby_xnd/xnd/libxnd/bounds.o +0 -0
- data/ext/ruby_xnd/xnd/libxnd/contrib.h +98 -0
- data/ext/ruby_xnd/xnd/libxnd/contrib/bfloat16.h +213 -0
- data/ext/ruby_xnd/xnd/libxnd/copy.c +155 -4
- data/ext/ruby_xnd/xnd/libxnd/copy.o +0 -0
- data/ext/ruby_xnd/xnd/libxnd/cuda/cuda_memory.cu +121 -0
- data/ext/ruby_xnd/xnd/libxnd/cuda/cuda_memory.h +58 -0
- data/ext/ruby_xnd/xnd/libxnd/equal.c +195 -7
- data/ext/ruby_xnd/xnd/libxnd/equal.o +0 -0
- data/ext/ruby_xnd/xnd/libxnd/inline.h +32 -0
- data/ext/ruby_xnd/xnd/libxnd/libxnd.a +0 -0
- data/ext/ruby_xnd/xnd/libxnd/libxnd.so +1 -0
- data/ext/ruby_xnd/xnd/libxnd/libxnd.so.0 +1 -0
- data/ext/ruby_xnd/xnd/libxnd/libxnd.so.0.2.0dev3 +0 -0
- data/ext/ruby_xnd/xnd/libxnd/shape.c +207 -0
- data/ext/ruby_xnd/xnd/libxnd/shape.o +0 -0
- data/ext/ruby_xnd/xnd/libxnd/split.c +2 -2
- data/ext/ruby_xnd/xnd/libxnd/split.o +0 -0
- data/ext/ruby_xnd/xnd/libxnd/tests/Makefile +39 -0
- data/ext/ruby_xnd/xnd/libxnd/xnd.c +613 -91
- data/ext/ruby_xnd/xnd/libxnd/xnd.h +145 -4
- data/ext/ruby_xnd/xnd/libxnd/xnd.o +0 -0
- data/ext/ruby_xnd/xnd/python/test_xnd.py +1125 -50
- data/ext/ruby_xnd/xnd/python/xnd/__init__.py +609 -124
- data/ext/ruby_xnd/xnd/python/xnd/_version.py +1 -0
- data/ext/ruby_xnd/xnd/python/xnd/_xnd.c +1652 -101
- data/ext/ruby_xnd/xnd/python/xnd/libxnd.a +0 -0
- data/ext/ruby_xnd/xnd/python/xnd/libxnd.so +1 -0
- data/ext/ruby_xnd/xnd/python/xnd/libxnd.so.0 +1 -0
- data/ext/ruby_xnd/xnd/python/xnd/libxnd.so.0.2.0dev3 +0 -0
- data/ext/ruby_xnd/xnd/python/xnd/pyxnd.h +1 -1
- data/ext/ruby_xnd/xnd/python/xnd/util.h +25 -0
- data/ext/ruby_xnd/xnd/python/xnd/xnd.h +590 -0
- data/ext/ruby_xnd/xnd/python/xnd_randvalue.py +106 -6
- data/ext/ruby_xnd/xnd/python/xnd_support.py +4 -0
- data/ext/ruby_xnd/xnd/setup.py +46 -4
- data/lib/ruby_xnd.so +0 -0
- data/lib/xnd.rb +39 -3
- data/lib/xnd/version.rb +2 -2
- data/xnd.gemspec +2 -1
- metadata +58 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 88ae9320468c5a0d6129cbb1403f5c0cb4968bb3
|
4
|
+
data.tar.gz: 6ca5834d15f88f89a420ec841ad191855fa846d3
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 238cac8e9ce6e4aed41e57c55abaf68286781b642ed356ba6339eb2e262aae2c0081f2d6aabc63db104295452300eddb8daf20b9da90b512d1a5d055956bac4f
|
7
|
+
data.tar.gz: 4943e104993ca0910904feaab4c29b8d484faf3fe31c910e28980d59d9d7d69edcd45e2b6ad728a08fb5dc0669ac39f617c4877889510680f5e35d626d705917
|
data/README.md
CHANGED
data/Rakefile
CHANGED
@@ -94,7 +94,7 @@ task :develop do
|
|
94
94
|
Dir.mkdir(ext_xnd)
|
95
95
|
|
96
96
|
puts "cloning xnd repo into ext/ folder..."
|
97
|
-
system("git clone https://github.com/
|
97
|
+
system("git clone https://github.com/xnd-project/xnd #{ext_xnd}")
|
98
98
|
|
99
99
|
Dir.chdir(ext_xnd) do
|
100
100
|
system("git checkout #{RubyXND::COMMIT}")
|
data/ext/ruby_xnd/GPATH
ADDED
Binary file
|
data/ext/ruby_xnd/GRTAGS
ADDED
Binary file
|
data/ext/ruby_xnd/GTAGS
ADDED
Binary file
|
data/ext/ruby_xnd/extconf.rb
CHANGED
@@ -49,18 +49,21 @@ end
|
|
49
49
|
binaries = File.expand_path(File.join(File.dirname(__FILE__) + "/lib/"))
|
50
50
|
headers = File.expand_path(File.join(File.dirname(__FILE__) + "/include/"))
|
51
51
|
|
52
|
+
FileUtils.copy_file(File.expand_path(File.join(File.dirname(__FILE__) + "/ruby_xnd.h")),
|
53
|
+
"#{headers}/ruby_xnd.h")
|
54
|
+
FileUtils.copy_file(File.expand_path(File.join(File.dirname(__FILE__) + "/xnd/libxnd/overflow.h")),
|
55
|
+
"#{headers}/overflow.h")
|
56
|
+
|
52
57
|
find_library("xnd", nil, binaries)
|
53
58
|
find_header("xnd.h", headers)
|
54
|
-
|
55
|
-
FileUtils.copy_file File.expand_path(File.join(File.dirname(__FILE__) +
|
56
|
-
"/ruby_xnd.h")),
|
57
|
-
"#{headers}/ruby_xnd.h"
|
59
|
+
find_header("overflow.h", headers)
|
58
60
|
|
59
61
|
dir_config("xnd", [headers], [binaries])
|
60
62
|
|
61
63
|
$INSTALLFILES = [
|
62
64
|
["ruby_xnd.h", "$(archdir)"],
|
63
|
-
["xnd.h", "$(archdir)"]
|
65
|
+
["xnd.h", "$(archdir)"],
|
66
|
+
["overflow.h", "$(archdir)"]
|
64
67
|
]
|
65
68
|
|
66
69
|
# for macOS
|
data/ext/ruby_xnd/gc_guard.c
CHANGED
@@ -3,20 +3,48 @@
|
|
3
3
|
#include "ruby_xnd_internal.h"
|
4
4
|
|
5
5
|
#define GC_GUARD_TABLE_NAME "@__gc_guard_table"
|
6
|
+
#define GC_GUARD_MBLOCK "@__gc_guard_mblock"
|
7
|
+
#define GC_GUARD_TYPE "@__gc_guard_type"
|
6
8
|
|
7
9
|
static ID id_gc_guard_table;
|
10
|
+
static ID id_gc_guard_mblock;
|
11
|
+
static ID id_gc_guard_type;
|
8
12
|
|
9
13
|
/* Unregister an NDT object-rbuf pair from the GC guard. */
|
10
14
|
void
|
11
|
-
|
15
|
+
rb_xnd_gc_guard_unregister_xnd_mblock(XndObject *xnd)
|
12
16
|
{
|
13
17
|
VALUE table = rb_ivar_get(mRubyXND_GCGuard, id_gc_guard_table);
|
14
18
|
rb_hash_delete(table, PTR2NUM(xnd));
|
15
19
|
}
|
16
20
|
|
21
|
+
void
|
22
|
+
rb_xnd_gc_guard_unregister_xnd_type(XndObject *xnd)
|
23
|
+
{
|
24
|
+
VALUE table = rb_ivar_get(mRubyXND_GCGuard, id_gc_guard_type);
|
25
|
+
rb_hash_delete(table, PTR2NUM(xnd));
|
26
|
+
}
|
27
|
+
|
28
|
+
void
|
29
|
+
rb_xnd_gc_guard_unregister_mblock_type(MemoryBlockObject *mblock)
|
30
|
+
{
|
31
|
+
VALUE table = rb_ivar_get(mRubyXND_GCGuard, id_gc_guard_mblock);
|
32
|
+
rb_hash_delete(table, PTR2NUM(mblock));
|
33
|
+
}
|
34
|
+
|
35
|
+
void rb_xnd_gc_guard_register_xnd_type(XndObject *xnd, VALUE type)
|
36
|
+
{
|
37
|
+
VALUE table = rb_ivar_get(mRubyXND_GCGuard, id_gc_guard_type);
|
38
|
+
if (table == Qnil) {
|
39
|
+
rb_raise(rb_eLoadError, "GC guard not initialized.");
|
40
|
+
}
|
41
|
+
|
42
|
+
rb_hash_aset(table, PTR2NUM(xnd), type);
|
43
|
+
}
|
44
|
+
|
17
45
|
/* Register a XND-mblock pair in the GC guard. */
|
18
46
|
void
|
19
|
-
|
47
|
+
rb_xnd_gc_guard_register_xnd_mblock(XndObject *xnd, VALUE mblock)
|
20
48
|
{
|
21
49
|
VALUE table = rb_ivar_get(mRubyXND_GCGuard, id_gc_guard_table);
|
22
50
|
if (table == Qnil) {
|
@@ -26,11 +54,34 @@ rb_xnd_gc_guard_register(XndObject *xnd, VALUE mblock)
|
|
26
54
|
rb_hash_aset(table, PTR2NUM(xnd), mblock);
|
27
55
|
}
|
28
56
|
|
57
|
+
void
|
58
|
+
rb_xnd_gc_guard_unregsiter_mblock(MemoryBlockObject *mblock)
|
59
|
+
{
|
60
|
+
VALUE table = rb_ivar_get(mRubyXND_GCGuard, id_gc_guard_mblock);
|
61
|
+
rb_hash_delete(table, PTR2NUM(mblock));
|
62
|
+
}
|
63
|
+
|
64
|
+
void
|
65
|
+
rb_xnd_gc_guard_register_mblock_type(MemoryBlockObject *mblock, VALUE type)
|
66
|
+
{
|
67
|
+
VALUE table = rb_ivar_get(mRubyXND_GCGuard, id_gc_guard_mblock);
|
68
|
+
if (table == Qnil) {
|
69
|
+
rb_raise(rb_eLoadError, "Mblock guard not initialized.");
|
70
|
+
}
|
71
|
+
rb_hash_aset(table, PTR2NUM(mblock), type);
|
72
|
+
}
|
73
|
+
|
29
74
|
/* Initialize the global GC guard table. klass is a VALUE reprensenting NDTypes class. */
|
30
75
|
void
|
31
76
|
rb_xnd_init_gc_guard(void)
|
32
77
|
{
|
33
78
|
id_gc_guard_table = rb_intern(GC_GUARD_TABLE_NAME);
|
34
79
|
rb_ivar_set(mRubyXND_GCGuard, id_gc_guard_table, rb_hash_new());
|
80
|
+
|
81
|
+
id_gc_guard_mblock = rb_intern(GC_GUARD_MBLOCK);
|
82
|
+
rb_ivar_set(mRubyXND_GCGuard, id_gc_guard_mblock, rb_hash_new());
|
83
|
+
|
84
|
+
id_gc_guard_type = rb_intern(GC_GUARD_TYPE);
|
85
|
+
rb_ivar_set(mRubyXND_GCGuard, id_gc_guard_type, rb_hash_new());
|
35
86
|
}
|
36
87
|
|
data/ext/ruby_xnd/gc_guard.h
CHANGED
@@ -5,8 +5,14 @@
|
|
5
5
|
|
6
6
|
#include "ruby_xnd_internal.h"
|
7
7
|
|
8
|
-
void
|
9
|
-
void
|
8
|
+
void rb_xnd_gc_guard_register_xnd_mblock(XndObject *xnd, VALUE mblock);
|
9
|
+
void rb_xnd_gc_guard_register_xnd_type(XndObject *xnd, VALUE type);
|
10
|
+
void rb_xnd_gc_guard_register_mblock_type(MemoryBlockObject *mblock, VALUE type);
|
11
|
+
|
12
|
+
void rb_xnd_gc_guard_unregister_xnd_mblock(XndObject *mblock);
|
13
|
+
void rb_xnd_gc_guard_unregister_xnd_type(XndObject *mblock);
|
14
|
+
void rb_xnd_gc_guard_unregister_mblock_type(MemoryBlockObject *mblock);
|
15
|
+
|
10
16
|
void rb_xnd_init_gc_guard(void);
|
11
17
|
|
12
18
|
#endif /* GC_GUARD_H */
|
@@ -0,0 +1,147 @@
|
|
1
|
+
/*
|
2
|
+
* BSD 3-Clause License
|
3
|
+
*
|
4
|
+
* Copyright (c) 2017-2018, plures
|
5
|
+
* All rights reserved.
|
6
|
+
*
|
7
|
+
* Redistribution and use in source and binary forms, with or without
|
8
|
+
* modification, are permitted provided that the following conditions are met:
|
9
|
+
*
|
10
|
+
* 1. Redistributions of source code must retain the above copyright notice,
|
11
|
+
* this list of conditions and the following disclaimer.
|
12
|
+
*
|
13
|
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
14
|
+
* this list of conditions and the following disclaimer in the documentation
|
15
|
+
* and/or other materials provided with the distribution.
|
16
|
+
*
|
17
|
+
* 3. Neither the name of the copyright holder nor the names of its
|
18
|
+
* contributors may be used to endorse or promote products derived from
|
19
|
+
* this software without specific prior written permission.
|
20
|
+
*
|
21
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
22
|
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
23
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
24
|
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
25
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
26
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
27
|
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
28
|
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
29
|
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
30
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31
|
+
*/
|
32
|
+
|
33
|
+
|
34
|
+
#include <stdint.h>
|
35
|
+
|
36
|
+
|
37
|
+
/*****************************************************************************/
|
38
|
+
/* Arithmetic with overflow checking */
|
39
|
+
/*****************************************************************************/
|
40
|
+
|
41
|
+
#if defined(__GNUC__) && __GNUC__ >= 5 && !defined(__INTEL_COMPILER)
|
42
|
+
static inline int64_t
|
43
|
+
ADDi64(int64_t a, int64_t b, bool *overflow)
|
44
|
+
{
|
45
|
+
int64_t c;
|
46
|
+
*overflow |= __builtin_add_overflow(a, b, &c);
|
47
|
+
return c;
|
48
|
+
}
|
49
|
+
|
50
|
+
static inline int64_t
|
51
|
+
SUBi64(int64_t a, int64_t b, bool *overflow)
|
52
|
+
{
|
53
|
+
int64_t c;
|
54
|
+
*overflow |= __builtin_sub_overflow(a, b, &c);
|
55
|
+
return c;
|
56
|
+
}
|
57
|
+
|
58
|
+
static inline int64_t
|
59
|
+
MULi64(int64_t a, int64_t b, bool *overflow)
|
60
|
+
{
|
61
|
+
int64_t c;
|
62
|
+
*overflow |= __builtin_mul_overflow(a, b, &c);
|
63
|
+
return c;
|
64
|
+
}
|
65
|
+
|
66
|
+
static inline size_t
|
67
|
+
MULi64_size(int64_t a, int64_t b, bool *overflow)
|
68
|
+
{
|
69
|
+
int64_t c;
|
70
|
+
*overflow |= __builtin_mul_overflow(a, b, &c);
|
71
|
+
#if SIZE_MAX < INT64_MAX
|
72
|
+
*overflow |= (c > INT32_MAX);
|
73
|
+
#endif
|
74
|
+
return (size_t)c;
|
75
|
+
}
|
76
|
+
|
77
|
+
static inline int64_t
|
78
|
+
ABSi64(int64_t a, bool *overflow)
|
79
|
+
{
|
80
|
+
if (a == INT64_MIN) {
|
81
|
+
*overflow = 1;
|
82
|
+
return INT64_MIN;
|
83
|
+
}
|
84
|
+
return a >= 0 ? a : -a;
|
85
|
+
}
|
86
|
+
|
87
|
+
static inline uint16_t
|
88
|
+
ADDu16(uint16_t a, uint16_t b, bool *overflow)
|
89
|
+
{
|
90
|
+
uint16_t c;
|
91
|
+
*overflow |= __builtin_add_overflow(a, b, &c);
|
92
|
+
return c;
|
93
|
+
}
|
94
|
+
#else
|
95
|
+
static inline int64_t
|
96
|
+
ADDi64(int64_t a, int64_t b, bool *overflow)
|
97
|
+
{
|
98
|
+
int64_t c = (uint64_t)a + (uint64_t)b;
|
99
|
+
*overflow |= ((a < 0 && b < 0 && c >= 0) || (a >= 0 && b >= 0 && c < 0));
|
100
|
+
return c;
|
101
|
+
}
|
102
|
+
|
103
|
+
static inline int64_t
|
104
|
+
SUBi64(int64_t a, int64_t b, bool *overflow)
|
105
|
+
{
|
106
|
+
int64_t c = (uint64_t)a - (uint64_t)b;
|
107
|
+
*overflow |= ((a < 0 && b >= 0 && c >= 0) || (a >= 0 && b < 0 && c < 0));
|
108
|
+
return c;
|
109
|
+
}
|
110
|
+
|
111
|
+
static inline int64_t
|
112
|
+
MULi64(int64_t a, int64_t b, bool *overflow)
|
113
|
+
{
|
114
|
+
int64_t c = (uint64_t)a * (uint64_t)b;
|
115
|
+
*overflow |= ((b < 0 && a == INT64_MIN) || (b != 0 && a != c / b));
|
116
|
+
return c;
|
117
|
+
}
|
118
|
+
|
119
|
+
static inline size_t
|
120
|
+
MULi64_size(int64_t a, int64_t b, bool *overflow)
|
121
|
+
{
|
122
|
+
int64_t c = (uint64_t)a * (uint64_t)b;
|
123
|
+
*overflow |= ((b < 0 && a == INT64_MIN) || (b != 0 && a != c / b));
|
124
|
+
#if SIZE_MAX < INT64_MAX
|
125
|
+
*overflow |= (c > INT32_MAX);
|
126
|
+
#endif
|
127
|
+
return (size_t)c;
|
128
|
+
}
|
129
|
+
|
130
|
+
static inline int64_t
|
131
|
+
ABSi64(int64_t a, bool *overflow)
|
132
|
+
{
|
133
|
+
if (a == INT64_MIN) {
|
134
|
+
*overflow = 1;
|
135
|
+
return INT64_MIN;
|
136
|
+
}
|
137
|
+
return a >= 0 ? a : -a;
|
138
|
+
}
|
139
|
+
|
140
|
+
static inline uint16_t
|
141
|
+
ADDu16(uint16_t a, uint16_t b, bool *overflow)
|
142
|
+
{
|
143
|
+
uint16_t c = a + b;
|
144
|
+
*overflow |= (c < a);
|
145
|
+
return c;
|
146
|
+
}
|
147
|
+
#endif /* OVERFLOW_H */
|
@@ -0,0 +1,62 @@
|
|
1
|
+
/* BSD 3-Clause License
|
2
|
+
*
|
3
|
+
* Copyright (c) 2018, Quansight and Sameer Deshmukh
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions are met:
|
8
|
+
*
|
9
|
+
* * Redistributions of source code must retain the above copyright notice, this
|
10
|
+
* list of conditions and the following disclaimer.
|
11
|
+
*
|
12
|
+
* * Redistributions in binary form must reproduce the above copyright notice,
|
13
|
+
* this list of conditions and the following disclaimer in the documentation
|
14
|
+
* and/or other materials provided with the distribution.
|
15
|
+
*
|
16
|
+
* * Neither the name of the copyright holder nor the names of its
|
17
|
+
* contributors may be used to endorse or promote products derived from
|
18
|
+
* this software without specific prior written permission.
|
19
|
+
*
|
20
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
21
|
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
22
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
23
|
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
24
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
25
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
26
|
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
27
|
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
28
|
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30
|
+
*/
|
31
|
+
|
32
|
+
/* File containing headers for Ruby XND wrapper.
|
33
|
+
*
|
34
|
+
* Author: Sameer Deshmukh (@v0dro)
|
35
|
+
*/
|
36
|
+
#ifndef RUBY_XND_H
|
37
|
+
#define RUBY_XND_H
|
38
|
+
|
39
|
+
#ifdef __cplusplus
|
40
|
+
extern "C" {
|
41
|
+
#endif
|
42
|
+
|
43
|
+
#include "ruby.h"
|
44
|
+
#include "ndtypes.h"
|
45
|
+
#include "xnd.h"
|
46
|
+
#include "overflow.h"
|
47
|
+
|
48
|
+
size_t rb_xnd_hash_size(VALUE hash);
|
49
|
+
int rb_xnd_get_complex_values(VALUE comp, double *real, double *imag);
|
50
|
+
/* Return true if obj is of type XND. */
|
51
|
+
int rb_xnd_check_type(VALUE obj);
|
52
|
+
const xnd_t * rb_xnd_const_xnd(VALUE xnd);
|
53
|
+
VALUE rb_xnd_empty_from_type(const ndt_t *t, uint32_t flags);
|
54
|
+
VALUE rb_xnd_from_xnd(xnd_t *x);
|
55
|
+
|
56
|
+
typedef struct XndObject XndObject;
|
57
|
+
|
58
|
+
#ifdef __cplusplus
|
59
|
+
}
|
60
|
+
#endif
|
61
|
+
|
62
|
+
#endif /* RUBY_XND_H */
|
@@ -0,0 +1,590 @@
|
|
1
|
+
/*
|
2
|
+
* BSD 3-Clause License
|
3
|
+
*
|
4
|
+
* Copyright (c) 2017-2018, plures
|
5
|
+
* All rights reserved.
|
6
|
+
*
|
7
|
+
* Redistribution and use in source and binary forms, with or without
|
8
|
+
* modification, are permitted provided that the following conditions are met:
|
9
|
+
*
|
10
|
+
* 1. Redistributions of source code must retain the above copyright notice,
|
11
|
+
* this list of conditions and the following disclaimer.
|
12
|
+
*
|
13
|
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
14
|
+
* this list of conditions and the following disclaimer in the documentation
|
15
|
+
* and/or other materials provided with the distribution.
|
16
|
+
*
|
17
|
+
* 3. Neither the name of the copyright holder nor the names of its
|
18
|
+
* contributors may be used to endorse or promote products derived from
|
19
|
+
* this software without specific prior written permission.
|
20
|
+
*
|
21
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
22
|
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
23
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
24
|
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
25
|
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
26
|
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
27
|
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
28
|
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
29
|
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
30
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31
|
+
*/
|
32
|
+
|
33
|
+
|
34
|
+
#ifndef XND_H
|
35
|
+
#define XND_H
|
36
|
+
|
37
|
+
|
38
|
+
#ifdef __cplusplus
|
39
|
+
extern "C" {
|
40
|
+
#endif
|
41
|
+
|
42
|
+
#include <stdlib.h>
|
43
|
+
#include <stdint.h>
|
44
|
+
#include <string.h>
|
45
|
+
#include <assert.h>
|
46
|
+
#include "ndtypes.h"
|
47
|
+
|
48
|
+
#ifdef __cplusplus
|
49
|
+
#include <cstdint>
|
50
|
+
#else
|
51
|
+
#include <stdint.h>
|
52
|
+
#include <inttypes.h>
|
53
|
+
#endif
|
54
|
+
|
55
|
+
|
56
|
+
#ifdef _MSC_VER
|
57
|
+
#if defined (XND_EXPORT)
|
58
|
+
#define XND_API __declspec(dllexport)
|
59
|
+
#elif defined(XND_IMPORT)
|
60
|
+
#define XND_API __declspec(dllimport)
|
61
|
+
#else
|
62
|
+
#define XND_API
|
63
|
+
#endif
|
64
|
+
|
65
|
+
#include "malloc.h"
|
66
|
+
#define ALLOCA(type, name, nmemb) type *name = _alloca(nmemb * sizeof(type))
|
67
|
+
#else
|
68
|
+
#define XND_API
|
69
|
+
|
70
|
+
#define ALLOCA(type, name, nmemb) type name[nmemb]
|
71
|
+
#endif
|
72
|
+
|
73
|
+
|
74
|
+
#if SIZE_MAX == UINT64_MAX
|
75
|
+
#define XND_SSIZE_MAX INT64_MAX
|
76
|
+
#elif SIZE_MAX == UINT32_MAX
|
77
|
+
#define XND_SSIZE_MAX INT32_MAX
|
78
|
+
#else
|
79
|
+
#error "unsupported platform: need 32-bit or 64-bit size_t"
|
80
|
+
#endif
|
81
|
+
|
82
|
+
|
83
|
+
/*
|
84
|
+
* Ownership flags: The library itself has no notion of how many exported
|
85
|
+
* views a master buffer has. The Python bindings for example use Pythons's
|
86
|
+
* reference counting to to keep track of exported memory blocks.
|
87
|
+
*/
|
88
|
+
#define XND_OWN_TYPE 0x00000001U /* type pointer */
|
89
|
+
#define XND_OWN_DATA 0x00000002U /* data pointer */
|
90
|
+
#define XND_OWN_STRINGS 0x00000004U /* embedded string pointers */
|
91
|
+
#define XND_OWN_BYTES 0x00000008U /* embedded bytes pointers */
|
92
|
+
#define XND_OWN_ARRAYS 0x00000010U /* embedded array pointers */
|
93
|
+
#define XND_OWN_POINTERS 0x00000020U /* embedded pointers */
|
94
|
+
#define XND_CUDA_MANAGED 0x00000040U /* cuda managed memory */
|
95
|
+
|
96
|
+
#define XND_OWN_ALL (XND_OWN_TYPE | \
|
97
|
+
XND_OWN_DATA | \
|
98
|
+
XND_OWN_STRINGS | \
|
99
|
+
XND_OWN_BYTES | \
|
100
|
+
XND_OWN_ARRAYS | \
|
101
|
+
XND_OWN_POINTERS)
|
102
|
+
|
103
|
+
#define XND_OWN_EMBEDDED (XND_OWN_DATA | \
|
104
|
+
XND_OWN_STRINGS | \
|
105
|
+
XND_OWN_BYTES | \
|
106
|
+
XND_OWN_ARRAYS | \
|
107
|
+
XND_OWN_POINTERS)
|
108
|
+
|
109
|
+
|
110
|
+
/* Convenience macros to extract embedded values. */
|
111
|
+
#define XND_POINTER_DATA(ptr) (*((char **)ptr))
|
112
|
+
#define XND_STRING_DATA(ptr) ((*((const char **)ptr)) == NULL ? "" : (*((const char **)ptr)))
|
113
|
+
#define XND_BYTES_SIZE(ptr) (((ndt_bytes_t *)ptr)->size)
|
114
|
+
#define XND_BYTES_DATA(ptr) (((ndt_bytes_t *)ptr)->data)
|
115
|
+
#define XND_ARRAY_SHAPE(ptr) (((ndt_array_t *)ptr)->shape)
|
116
|
+
#define XND_ARRAY_DATA(ptr) (((ndt_array_t *)ptr)->data)
|
117
|
+
#define XND_UNION_TAG(ptr) (*((uint8_t *)ptr))
|
118
|
+
|
119
|
+
|
120
|
+
/* Bitmap tree. */
|
121
|
+
typedef struct xnd_bitmap xnd_bitmap_t;
|
122
|
+
|
123
|
+
struct xnd_bitmap {
|
124
|
+
uint8_t *data; /* bitmap */
|
125
|
+
int64_t size; /* number of subtree bitmaps in the "next" array */
|
126
|
+
xnd_bitmap_t *next; /* array of bitmaps for subtrees */
|
127
|
+
};
|
128
|
+
|
129
|
+
/* Typed memory block, usually a view. */
|
130
|
+
typedef struct xnd {
|
131
|
+
xnd_bitmap_t bitmap; /* bitmap tree */
|
132
|
+
int64_t index; /* linear index for var dims */
|
133
|
+
const ndt_t *type; /* type of the data */
|
134
|
+
char *ptr; /* data */
|
135
|
+
} xnd_t;
|
136
|
+
|
137
|
+
/* Master memory block. */
|
138
|
+
typedef struct xnd_master {
|
139
|
+
uint32_t flags; /* ownership flags */
|
140
|
+
xnd_t master; /* typed memory */
|
141
|
+
} xnd_master_t;
|
142
|
+
|
143
|
+
/* Used in indexing and slicing. */
|
144
|
+
enum xnd_key { Index, FieldName, Slice };
|
145
|
+
typedef struct {
|
146
|
+
enum xnd_key tag;
|
147
|
+
union {
|
148
|
+
int64_t Index;
|
149
|
+
const char *FieldName;
|
150
|
+
ndt_slice_t Slice;
|
151
|
+
};
|
152
|
+
} xnd_index_t;
|
153
|
+
|
154
|
+
|
155
|
+
/* Unstable API: view with ownership tracking. */
|
156
|
+
typedef struct xnd_view {
|
157
|
+
uint32_t flags; /* flags that indicate resource ownership by the view */
|
158
|
+
const void *obj; /* object that holds shared resources */
|
159
|
+
xnd_t view; /* typed memory */
|
160
|
+
} xnd_view_t;
|
161
|
+
|
162
|
+
|
163
|
+
/*****************************************************************************/
|
164
|
+
/* Create xnd memory blocks */
|
165
|
+
/*****************************************************************************/
|
166
|
+
|
167
|
+
XND_API xnd_master_t *xnd_empty_from_string(const char *s, uint32_t flags, ndt_context_t *ctx);
|
168
|
+
XND_API xnd_master_t *xnd_empty_from_type(const ndt_t *t, uint32_t flags, ndt_context_t *ctx);
|
169
|
+
XND_API void xnd_clear(xnd_t * const x, const uint32_t flags);
|
170
|
+
XND_API void xnd_del(xnd_master_t *x);
|
171
|
+
|
172
|
+
/* Create and delete pristine xnd_t buffers. */
|
173
|
+
XND_API xnd_master_t *xnd_from_xnd(xnd_t *src, uint32_t flags, ndt_context_t *ctx);
|
174
|
+
XND_API void xnd_del_buffer(xnd_t *x, uint32_t flags);
|
175
|
+
|
176
|
+
|
177
|
+
/*****************************************************************************/
|
178
|
+
/* Traverse xnd memory blocks */
|
179
|
+
/*****************************************************************************/
|
180
|
+
|
181
|
+
XND_API bool have_stored_index(const ndt_t *t);
|
182
|
+
XND_API int64_t get_stored_index(const ndt_t *t);
|
183
|
+
XND_API xnd_t apply_stored_index(const xnd_t *x, ndt_context_t *ctx);
|
184
|
+
XND_API xnd_t apply_stored_indices(const xnd_t *x, ndt_context_t *ctx);
|
185
|
+
|
186
|
+
XND_API xnd_t xnd_subtree_index(const xnd_t *x, const int64_t *indices, int len,
|
187
|
+
ndt_context_t *ctx);
|
188
|
+
|
189
|
+
XND_API xnd_t xnd_subtree(const xnd_t *x, const xnd_index_t indices[], int len,
|
190
|
+
ndt_context_t *ctx);
|
191
|
+
|
192
|
+
XND_API xnd_t xnd_subscript(const xnd_t *x, const xnd_index_t indices[], int len,
|
193
|
+
ndt_context_t *ctx);
|
194
|
+
|
195
|
+
XND_API xnd_t xnd_reshape(const xnd_t *x, int64_t shape[], int ndim, char order, ndt_context_t *ctx);
|
196
|
+
|
197
|
+
XND_API xnd_t *xnd_split(const xnd_t *x, int64_t *n, int max_outer, ndt_context_t *ctx);
|
198
|
+
|
199
|
+
XND_API int xnd_equal(const xnd_t *x, const xnd_t *y, ndt_context_t *ctx);
|
200
|
+
XND_API int xnd_strict_equal(const xnd_t *x, const xnd_t *y, ndt_context_t *ctx);
|
201
|
+
|
202
|
+
XND_API int xnd_copy(xnd_t *y, const xnd_t *x, uint32_t flags, ndt_context_t *ctx);
|
203
|
+
|
204
|
+
|
205
|
+
/*****************************************************************************/
|
206
|
+
/* Bounds checking */
|
207
|
+
/*****************************************************************************/
|
208
|
+
|
209
|
+
XND_API int xnd_bounds_check(const ndt_t *t, const int64_t linear_index,
|
210
|
+
const int64_t bufsize, ndt_context_t *ctx);
|
211
|
+
|
212
|
+
|
213
|
+
/*****************************************************************************/
|
214
|
+
/* Bitmaps */
|
215
|
+
/*****************************************************************************/
|
216
|
+
|
217
|
+
XND_API int xnd_bitmap_init(xnd_bitmap_t *b, const ndt_t *t, ndt_context_t *ctx);
|
218
|
+
XND_API void xnd_bitmap_clear(xnd_bitmap_t *b);
|
219
|
+
XND_API xnd_bitmap_t xnd_bitmap_next(const xnd_t *x, int64_t i, ndt_context_t *ctx);
|
220
|
+
XND_API void xnd_set_valid(xnd_t *x);
|
221
|
+
XND_API void xnd_set_na(xnd_t *x);
|
222
|
+
XND_API int xnd_is_valid(const xnd_t *x);
|
223
|
+
XND_API int xnd_is_na(const xnd_t *x);
|
224
|
+
|
225
|
+
|
226
|
+
/*****************************************************************************/
|
227
|
+
/* Error handling */
|
228
|
+
/*****************************************************************************/
|
229
|
+
|
230
|
+
XND_API extern const xnd_t xnd_error;
|
231
|
+
XND_API extern const xnd_bitmap_t xnd_bitmap_empty;
|
232
|
+
|
233
|
+
XND_API int xnd_err_occurred(const xnd_t *x);
|
234
|
+
|
235
|
+
|
236
|
+
/*****************************************************************************/
|
237
|
+
/* Unstable API */
|
238
|
+
/*****************************************************************************/
|
239
|
+
|
240
|
+
XND_API extern const xnd_view_t xnd_view_error;
|
241
|
+
|
242
|
+
XND_API int xnd_view_err_occurred(const xnd_view_t *x);
|
243
|
+
XND_API void xnd_view_clear(xnd_view_t *x);
|
244
|
+
XND_API xnd_view_t xnd_view_from_xnd(const void *obj, const xnd_t *x);
|
245
|
+
XND_API xnd_view_t xnd_view_subscript(const xnd_view_t *x, const xnd_index_t indices[],
|
246
|
+
int len, ndt_context_t *ctx);
|
247
|
+
|
248
|
+
|
249
|
+
|
250
|
+
/*****************************************************************************/
|
251
|
+
/* Float format */
|
252
|
+
/*****************************************************************************/
|
253
|
+
|
254
|
+
XND_API int xnd_init_float(ndt_context_t *ctx);
|
255
|
+
XND_API bool xnd_float_is_little_endian(void);
|
256
|
+
XND_API bool xnd_float_is_big_endian(void);
|
257
|
+
XND_API bool xnd_double_is_little_endian(void);
|
258
|
+
XND_API bool xnd_double_is_big_endian(void);
|
259
|
+
|
260
|
+
|
261
|
+
/*****************************************************************************/
|
262
|
+
/* BFloat16 */
|
263
|
+
/*****************************************************************************/
|
264
|
+
|
265
|
+
XND_API void xnd_bfloat_pack(char *p, double x);
|
266
|
+
XND_API double xnd_bfloat_unpack(char *p);
|
267
|
+
|
268
|
+
|
269
|
+
/*****************************************************************************/
|
270
|
+
/* Cuda */
|
271
|
+
/*****************************************************************************/
|
272
|
+
|
273
|
+
void *xnd_cuda_calloc_managed(uint16_t align, int64_t size, ndt_context_t *ctx);
|
274
|
+
void xnd_cuda_free(void *ptr);
|
275
|
+
int xnd_cuda_mem_prefetch_async(const void *ptr, int64_t count, int dev, ndt_context_t *ctx);
|
276
|
+
int xnd_cuda_device_synchronize(ndt_context_t *ctx);
|
277
|
+
|
278
|
+
|
279
|
+
/*****************************************************************************/
|
280
|
+
/* Static inline functions */
|
281
|
+
/*****************************************************************************/
|
282
|
+
|
283
|
+
/* Check index bounds and adjust negative indices. */
|
284
|
+
static inline int64_t
|
285
|
+
adjust_index(const int64_t i, const int64_t shape, ndt_context_t *ctx)
|
286
|
+
{
|
287
|
+
const int64_t k = i < 0 ? i + shape : i;
|
288
|
+
|
289
|
+
if (k < 0 || k >= shape || k > XND_SSIZE_MAX) {
|
290
|
+
ndt_err_format(ctx, NDT_IndexError,
|
291
|
+
"index with value %" PRIi64 " out of bounds", i);
|
292
|
+
return -1;
|
293
|
+
}
|
294
|
+
|
295
|
+
return k;
|
296
|
+
}
|
297
|
+
|
298
|
+
/*
|
299
|
+
* This looks inefficient, but both gcc and clang clean up unused xnd_t members.
|
300
|
+
*/
|
301
|
+
static inline int64_t
|
302
|
+
xnd_ndim(const xnd_t *x)
|
303
|
+
{
|
304
|
+
return x->type->ndim;
|
305
|
+
}
|
306
|
+
|
307
|
+
static inline xnd_t
|
308
|
+
xnd_fixed_dim_next(const xnd_t *x, const int64_t i)
|
309
|
+
{
|
310
|
+
const ndt_t *t = x->type;
|
311
|
+
const ndt_t *u = t->FixedDim.type;
|
312
|
+
const int64_t step = i * t->Concrete.FixedDim.step;
|
313
|
+
xnd_t next;
|
314
|
+
|
315
|
+
assert(t->tag == FixedDim);
|
316
|
+
|
317
|
+
next.bitmap = x->bitmap;
|
318
|
+
next.index = x->index + step;
|
319
|
+
next.type = u;
|
320
|
+
next.ptr = u->ndim==0 ? x->ptr + next.index * next.type->datasize : x->ptr;
|
321
|
+
|
322
|
+
return next;
|
323
|
+
}
|
324
|
+
|
325
|
+
static inline int64_t
|
326
|
+
xnd_fixed_shape(const xnd_t *x)
|
327
|
+
{
|
328
|
+
const ndt_t *t = x->type;
|
329
|
+
assert(t->tag == FixedDim);
|
330
|
+
return t->FixedDim.shape;
|
331
|
+
}
|
332
|
+
|
333
|
+
static inline int64_t
|
334
|
+
xnd_fixed_step(const xnd_t *x)
|
335
|
+
{
|
336
|
+
const ndt_t *t = x->type;
|
337
|
+
assert(t->tag == FixedDim);
|
338
|
+
return t->Concrete.FixedDim.step;
|
339
|
+
}
|
340
|
+
|
341
|
+
static inline int64_t
|
342
|
+
xnd_fixed_shape_at(const xnd_t *x, const int i)
|
343
|
+
{
|
344
|
+
const ndt_t *t = x->type;
|
345
|
+
|
346
|
+
assert(0 <= i && i < t->ndim);
|
347
|
+
assert(t->tag == FixedDim);
|
348
|
+
|
349
|
+
for (int k = 0; k < i; k++) {
|
350
|
+
t = t->FixedDim.type;
|
351
|
+
}
|
352
|
+
return t->FixedDim.shape;
|
353
|
+
}
|
354
|
+
|
355
|
+
static inline int64_t
|
356
|
+
xnd_fixed_stride(const xnd_t *x)
|
357
|
+
{
|
358
|
+
const ndt_t *t = x->type;
|
359
|
+
assert(t->tag == FixedDim);
|
360
|
+
return t->Concrete.FixedDim.step * t->Concrete.FixedDim.itemsize;
|
361
|
+
}
|
362
|
+
|
363
|
+
static inline char *
|
364
|
+
xnd_fixed_apply_index(const xnd_t *x)
|
365
|
+
{
|
366
|
+
assert(x->type->tag == FixedDim);
|
367
|
+
return x->ptr + x->index * x->type->Concrete.FixedDim.itemsize;
|
368
|
+
}
|
369
|
+
|
370
|
+
static inline xnd_t
|
371
|
+
xnd_var_dim_next(const xnd_t *x, const int64_t start, const int64_t step,
|
372
|
+
const int64_t i)
|
373
|
+
{
|
374
|
+
const ndt_t *t = x->type;
|
375
|
+
const ndt_t *u = t->VarDim.type;
|
376
|
+
xnd_t next;
|
377
|
+
|
378
|
+
next.bitmap = x->bitmap;
|
379
|
+
next.index = start + i * step;
|
380
|
+
next.type = u;
|
381
|
+
next.ptr = u->ndim==0 ? x->ptr + next.index * next.type->datasize : x->ptr;
|
382
|
+
|
383
|
+
return next;
|
384
|
+
}
|
385
|
+
|
386
|
+
static inline xnd_t
|
387
|
+
xnd_tuple_next(const xnd_t *x, const int64_t i, ndt_context_t *ctx)
|
388
|
+
{
|
389
|
+
const ndt_t *t = x->type;
|
390
|
+
xnd_t next;
|
391
|
+
|
392
|
+
next.bitmap = xnd_bitmap_next(x, i, ctx);
|
393
|
+
if (ndt_err_occurred(ctx)) {
|
394
|
+
return xnd_error;
|
395
|
+
}
|
396
|
+
|
397
|
+
next.index = 0;
|
398
|
+
next.type = t->Tuple.types[i];
|
399
|
+
next.ptr = x->ptr + t->Concrete.Tuple.offset[i];
|
400
|
+
|
401
|
+
return next;
|
402
|
+
}
|
403
|
+
|
404
|
+
static inline xnd_t
|
405
|
+
xnd_record_next(const xnd_t *x, const int64_t i, ndt_context_t *ctx)
|
406
|
+
{
|
407
|
+
const ndt_t *t = x->type;
|
408
|
+
xnd_t next;
|
409
|
+
|
410
|
+
next.bitmap = xnd_bitmap_next(x, i, ctx);
|
411
|
+
if (ndt_err_occurred(ctx)) {
|
412
|
+
return xnd_error;
|
413
|
+
}
|
414
|
+
|
415
|
+
next.index = 0;
|
416
|
+
next.type = t->Record.types[i];
|
417
|
+
next.ptr = x->ptr + t->Concrete.Record.offset[i];
|
418
|
+
|
419
|
+
return next;
|
420
|
+
}
|
421
|
+
|
422
|
+
static inline xnd_t
|
423
|
+
xnd_union_next(const xnd_t *x, ndt_context_t *ctx)
|
424
|
+
{
|
425
|
+
uint8_t i = XND_UNION_TAG(x->ptr);
|
426
|
+
const ndt_t *t = x->type;
|
427
|
+
xnd_t next;
|
428
|
+
|
429
|
+
next.bitmap = xnd_bitmap_next(x, i, ctx);
|
430
|
+
if (ndt_err_occurred(ctx)) {
|
431
|
+
return xnd_error;
|
432
|
+
}
|
433
|
+
|
434
|
+
next.index = 0;
|
435
|
+
next.type = t->Union.types[i];
|
436
|
+
next.ptr = x->ptr+1;
|
437
|
+
|
438
|
+
return next;
|
439
|
+
}
|
440
|
+
|
441
|
+
static inline xnd_t
|
442
|
+
xnd_ref_next(const xnd_t *x, ndt_context_t *ctx)
|
443
|
+
{
|
444
|
+
const ndt_t *t = x->type;
|
445
|
+
xnd_t next;
|
446
|
+
|
447
|
+
next.bitmap = xnd_bitmap_next(x, 0, ctx);
|
448
|
+
if (ndt_err_occurred(ctx)) {
|
449
|
+
return xnd_error;
|
450
|
+
}
|
451
|
+
|
452
|
+
next.index = 0;
|
453
|
+
next.type = t->Ref.type;
|
454
|
+
next.ptr = XND_POINTER_DATA(x->ptr);
|
455
|
+
|
456
|
+
return next;
|
457
|
+
}
|
458
|
+
|
459
|
+
static inline xnd_t
|
460
|
+
xnd_constr_next(const xnd_t *x, ndt_context_t *ctx)
|
461
|
+
{
|
462
|
+
const ndt_t *t = x->type;
|
463
|
+
xnd_t next;
|
464
|
+
|
465
|
+
next.bitmap = xnd_bitmap_next(x, 0, ctx);
|
466
|
+
if (ndt_err_occurred(ctx)) {
|
467
|
+
return xnd_error;
|
468
|
+
}
|
469
|
+
|
470
|
+
next.index = 0;
|
471
|
+
next.type = t->Constr.type;
|
472
|
+
next.ptr = x->ptr;
|
473
|
+
|
474
|
+
return next;
|
475
|
+
}
|
476
|
+
|
477
|
+
static inline xnd_t
|
478
|
+
xnd_nominal_next(const xnd_t *x, ndt_context_t *ctx)
|
479
|
+
{
|
480
|
+
const ndt_t *t = x->type;
|
481
|
+
xnd_t next;
|
482
|
+
|
483
|
+
next.bitmap = xnd_bitmap_next(x, 0, ctx);
|
484
|
+
if (ndt_err_occurred(ctx)) {
|
485
|
+
return xnd_error;
|
486
|
+
}
|
487
|
+
|
488
|
+
next.index = 0;
|
489
|
+
next.type = t->Nominal.type;
|
490
|
+
next.ptr = x->ptr;
|
491
|
+
|
492
|
+
return next;
|
493
|
+
}
|
494
|
+
|
495
|
+
static inline xnd_t
|
496
|
+
xnd_array_next(const xnd_t *x, const int64_t i)
|
497
|
+
{
|
498
|
+
const ndt_t *t = x->type;
|
499
|
+
const ndt_t *u = t->Array.type;
|
500
|
+
xnd_t next;
|
501
|
+
|
502
|
+
assert(t->tag == Array);
|
503
|
+
|
504
|
+
next.bitmap = x->bitmap;
|
505
|
+
next.index = 0;
|
506
|
+
next.type = u;
|
507
|
+
next.ptr = XND_ARRAY_DATA(x->ptr) + i * next.type->datasize;
|
508
|
+
|
509
|
+
return next;
|
510
|
+
}
|
511
|
+
|
512
|
+
#if NDT_SYS_BIG_ENDIAN == 1
|
513
|
+
#define XND_REV_COND NDT_LITTLE_ENDIAN
|
514
|
+
#else
|
515
|
+
#define XND_REV_COND NDT_BIG_ENDIAN
|
516
|
+
#endif
|
517
|
+
|
518
|
+
static inline void
|
519
|
+
memcpy_rev(char *dest, const char *src, size_t size)
|
520
|
+
{
|
521
|
+
size_t i;
|
522
|
+
|
523
|
+
for (i = 0; i < size; i++) {
|
524
|
+
dest[i] = src[size-1-i];
|
525
|
+
}
|
526
|
+
}
|
527
|
+
|
528
|
+
static inline void
|
529
|
+
bcopy_swap(char *dest, const char *src, size_t size, uint32_t flags)
|
530
|
+
{
|
531
|
+
if (flags & XND_REV_COND) {
|
532
|
+
memcpy_rev(dest, src, size);
|
533
|
+
}
|
534
|
+
else {
|
535
|
+
memcpy(dest, src, size);
|
536
|
+
}
|
537
|
+
}
|
538
|
+
|
539
|
+
static inline int
|
540
|
+
le(uint32_t flags)
|
541
|
+
{
|
542
|
+
#if NDT_SYS_BIG_ENDIAN == 1
|
543
|
+
return flags & NDT_LITTLE_ENDIAN;
|
544
|
+
#else
|
545
|
+
return !(flags & NDT_BIG_ENDIAN);
|
546
|
+
#endif
|
547
|
+
}
|
548
|
+
|
549
|
+
|
550
|
+
#define PACK_SINGLE(ptr, src, type, flags) \
|
551
|
+
do { \
|
552
|
+
type _x; \
|
553
|
+
_x = (type)src; \
|
554
|
+
bcopy_swap(ptr, (const char *)&_x, sizeof _x, flags); \
|
555
|
+
} while (0)
|
556
|
+
|
557
|
+
#define UNPACK_SINGLE(dest, ptr, type, flags) \
|
558
|
+
do { \
|
559
|
+
type _x; \
|
560
|
+
bcopy_swap((char *)&_x, ptr, sizeof _x, flags); \
|
561
|
+
dest = _x; \
|
562
|
+
} while (0)
|
563
|
+
|
564
|
+
#define APPLY_STORED_INDICES_INT(x) \
|
565
|
+
xnd_t _##x##tail; \
|
566
|
+
if (have_stored_index(x->type)) { \
|
567
|
+
_##x##tail = apply_stored_indices(x, ctx); \
|
568
|
+
if (xnd_err_occurred(&_##x##tail)) { \
|
569
|
+
return -1; \
|
570
|
+
} \
|
571
|
+
x = &_##x##tail; \
|
572
|
+
}
|
573
|
+
|
574
|
+
#define APPLY_STORED_INDICES_XND(x) \
|
575
|
+
xnd_t _##x##tail; \
|
576
|
+
if (have_stored_index(x->type)) { \
|
577
|
+
_##x##tail = apply_stored_indices(x, ctx); \
|
578
|
+
if (xnd_err_occurred(&_##x##tail)) { \
|
579
|
+
return xnd_error; \
|
580
|
+
} \
|
581
|
+
x = &_##x##tail; \
|
582
|
+
}
|
583
|
+
|
584
|
+
|
585
|
+
#ifdef __cplusplus
|
586
|
+
} /* END extern "C" */
|
587
|
+
#endif
|
588
|
+
|
589
|
+
|
590
|
+
#endif /* XND_H */
|