onnxruntime 0.9.2 → 0.9.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +10 -0
- data/README.md +2 -0
- data/lib/onnxruntime/ffi.rb +6 -2
- data/lib/onnxruntime/inference_session.rb +31 -307
- data/lib/onnxruntime/ort_value.rb +278 -0
- data/lib/onnxruntime/utils.rb +131 -5
- data/lib/onnxruntime/version.rb +1 -1
- data/lib/onnxruntime.rb +1 -0
- data/vendor/ThirdPartyNotices.txt +35 -461
- data/vendor/libonnxruntime.arm64.dylib +0 -0
- data/vendor/libonnxruntime.arm64.so +0 -0
- data/vendor/libonnxruntime.dylib +0 -0
- data/vendor/libonnxruntime.so +0 -0
- data/vendor/onnxruntime.dll +0 -0
- metadata +4 -7
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
module OnnxRuntime
|
|
2
|
+
class OrtValue
|
|
3
|
+
def initialize(ptr, ref = nil)
|
|
4
|
+
@ptr = ptr.read_pointer
|
|
5
|
+
@ref = ref # keep reference to data
|
|
6
|
+
ObjectSpace.define_finalizer(@ptr, self.class.finalize(@ptr.to_i))
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
def self.from_numo(numo_obj)
|
|
10
|
+
element_type = numo_obj.is_a?(Numo::Bit) ? :bool : Utils.numo_types.invert[numo_obj.class]
|
|
11
|
+
Utils.unsupported_type("Numo", numo_obj.class.name) unless element_type
|
|
12
|
+
|
|
13
|
+
from_array(numo_obj, element_type: element_type)
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def self.from_array(input, element_type:)
|
|
17
|
+
type_enum = FFI::TensorElementDataType[element_type]
|
|
18
|
+
Utils.unsupported_type("element", element_type) unless type_enum
|
|
19
|
+
|
|
20
|
+
input = input.to_a unless input.is_a?(Array) || Utils.numo_array?(input)
|
|
21
|
+
|
|
22
|
+
shape = Utils.input_shape(input)
|
|
23
|
+
input_node_dims = ::FFI::MemoryPointer.new(:int64, shape.size)
|
|
24
|
+
input_node_dims.write_array_of_int64(shape)
|
|
25
|
+
|
|
26
|
+
ptr = ::FFI::MemoryPointer.new(:pointer)
|
|
27
|
+
if element_type == :string
|
|
28
|
+
# keep reference to _str_ptrs until FillStringTensor call
|
|
29
|
+
input_tensor_values, _str_ptrs = create_input_strings(input)
|
|
30
|
+
Utils.check_status FFI.api[:CreateTensorAsOrtValue].call(Utils.allocator.read_pointer, input_node_dims, shape.size, type_enum, ptr)
|
|
31
|
+
Utils.check_status FFI.api[:FillStringTensor].call(ptr.read_pointer, input_tensor_values, input_tensor_values.size / input_tensor_values.type_size)
|
|
32
|
+
else
|
|
33
|
+
input_tensor_values = create_input_data(input, element_type)
|
|
34
|
+
Utils.check_status FFI.api[:CreateTensorWithDataAsOrtValue].call(allocator_info.read_pointer, input_tensor_values, input_tensor_values.size, input_node_dims, shape.size, type_enum, ptr)
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
new(ptr, input_tensor_values)
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def self.from_shape_and_type(shape, element_type)
|
|
41
|
+
type_enum = FFI::TensorElementDataType[element_type]
|
|
42
|
+
Utils.unsupported_type("element", element_type) unless type_enum
|
|
43
|
+
|
|
44
|
+
input_node_dims = ::FFI::MemoryPointer.new(:int64, shape.size)
|
|
45
|
+
input_node_dims.write_array_of_int64(shape)
|
|
46
|
+
|
|
47
|
+
ptr = ::FFI::MemoryPointer.new(:pointer)
|
|
48
|
+
Utils.check_status FFI.api[:CreateTensorAsOrtValue].call(Utils.allocator.read_pointer, input_node_dims, shape.size, type_enum, ptr)
|
|
49
|
+
|
|
50
|
+
new(ptr)
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def self.create_input_data(input, tensor_type)
|
|
54
|
+
if Utils.numo_array?(input)
|
|
55
|
+
input.cast_to(Utils.numo_types[tensor_type]).to_binary
|
|
56
|
+
else
|
|
57
|
+
flat_input = input.flatten.to_a
|
|
58
|
+
input_tensor_values = ::FFI::MemoryPointer.new(tensor_type, flat_input.size)
|
|
59
|
+
if tensor_type == :bool
|
|
60
|
+
input_tensor_values.write_array_of_uint8(flat_input.map { |v| v ? 1 : 0 })
|
|
61
|
+
else
|
|
62
|
+
input_tensor_values.send("write_array_of_#{tensor_type}", flat_input)
|
|
63
|
+
end
|
|
64
|
+
input_tensor_values
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
private_class_method :create_input_data
|
|
68
|
+
|
|
69
|
+
def self.create_input_strings(input)
|
|
70
|
+
str_ptrs =
|
|
71
|
+
if Utils.numo_array?(input)
|
|
72
|
+
input.size.times.map { |i| ::FFI::MemoryPointer.from_string(input[i]) }
|
|
73
|
+
else
|
|
74
|
+
input.flatten.map { |v| ::FFI::MemoryPointer.from_string(v) }
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
input_tensor_values = ::FFI::MemoryPointer.new(:pointer, str_ptrs.size)
|
|
78
|
+
input_tensor_values.write_array_of_pointer(str_ptrs)
|
|
79
|
+
[input_tensor_values, str_ptrs]
|
|
80
|
+
end
|
|
81
|
+
private_class_method :create_input_strings
|
|
82
|
+
|
|
83
|
+
def tensor?
|
|
84
|
+
FFI::OnnxType[value_type] == :tensor
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
def data_type
|
|
88
|
+
@data_type ||= begin
|
|
89
|
+
typeinfo = ::FFI::MemoryPointer.new(:pointer)
|
|
90
|
+
Utils.check_status FFI.api[:GetTypeInfo].call(@ptr, typeinfo)
|
|
91
|
+
Utils.node_info(typeinfo)[:type]
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
def element_type
|
|
96
|
+
FFI::TensorElementDataType[type_and_shape_info[0]]
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def shape
|
|
100
|
+
type_and_shape_info[1]
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
def device_name
|
|
104
|
+
"cpu"
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
def numo
|
|
108
|
+
create_from_onnx_value(@ptr, :numo)
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
def to_ruby
|
|
112
|
+
create_from_onnx_value(@ptr, :ruby)
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
def to_ptr
|
|
116
|
+
@ptr
|
|
117
|
+
end
|
|
118
|
+
|
|
119
|
+
def data_ptr
|
|
120
|
+
tensor_data = ::FFI::MemoryPointer.new(:pointer)
|
|
121
|
+
FFI.api[:GetTensorMutableData].call(@ptr, tensor_data)
|
|
122
|
+
tensor_data.read_pointer
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
private
|
|
126
|
+
|
|
127
|
+
def value_type
|
|
128
|
+
@value_type ||= begin
|
|
129
|
+
out_type = ::FFI::MemoryPointer.new(:int)
|
|
130
|
+
Utils.check_status FFI.api[:GetValueType].call(@ptr, out_type)
|
|
131
|
+
out_type.read_int
|
|
132
|
+
end
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
def type_and_shape_info
|
|
136
|
+
@type_and_shape_info ||= begin
|
|
137
|
+
begin
|
|
138
|
+
typeinfo = ::FFI::MemoryPointer.new(:pointer)
|
|
139
|
+
Utils.check_status FFI.api[:GetTensorTypeAndShape].call(@ptr, typeinfo)
|
|
140
|
+
Utils.tensor_type_and_shape(typeinfo)
|
|
141
|
+
ensure
|
|
142
|
+
Utils.release :TensorTypeAndShapeInfo, typeinfo
|
|
143
|
+
end
|
|
144
|
+
end
|
|
145
|
+
end
|
|
146
|
+
|
|
147
|
+
def create_from_onnx_value(out_ptr, output_type)
|
|
148
|
+
out_type = ::FFI::MemoryPointer.new(:int)
|
|
149
|
+
Utils.check_status FFI.api[:GetValueType].call(out_ptr, out_type)
|
|
150
|
+
type = FFI::OnnxType[out_type.read_int]
|
|
151
|
+
|
|
152
|
+
case type
|
|
153
|
+
when :tensor
|
|
154
|
+
typeinfo = ::FFI::MemoryPointer.new(:pointer)
|
|
155
|
+
Utils.check_status FFI.api[:GetTensorTypeAndShape].call(out_ptr, typeinfo)
|
|
156
|
+
|
|
157
|
+
type, shape = Utils.tensor_type_and_shape(typeinfo)
|
|
158
|
+
|
|
159
|
+
tensor_data = ::FFI::MemoryPointer.new(:pointer)
|
|
160
|
+
Utils.check_status FFI.api[:GetTensorMutableData].call(out_ptr, tensor_data)
|
|
161
|
+
|
|
162
|
+
out_size = ::FFI::MemoryPointer.new(:size_t)
|
|
163
|
+
Utils.check_status FFI.api[:GetTensorShapeElementCount].call(typeinfo.read_pointer, out_size)
|
|
164
|
+
output_tensor_size = out_size.read(:size_t)
|
|
165
|
+
|
|
166
|
+
Utils.release :TensorTypeAndShapeInfo, typeinfo
|
|
167
|
+
|
|
168
|
+
# TODO support more types
|
|
169
|
+
type = FFI::TensorElementDataType[type]
|
|
170
|
+
|
|
171
|
+
case output_type
|
|
172
|
+
when :numo
|
|
173
|
+
case type
|
|
174
|
+
when :string
|
|
175
|
+
result = Numo::RObject.new(shape)
|
|
176
|
+
result.allocate
|
|
177
|
+
create_strings_from_onnx_value(out_ptr, output_tensor_size, result)
|
|
178
|
+
else
|
|
179
|
+
numo_type = Utils.numo_types[type]
|
|
180
|
+
Utils.unsupported_type("element", type) unless numo_type
|
|
181
|
+
numo_type.from_binary(tensor_data.read_pointer.read_bytes(output_tensor_size * numo_type::ELEMENT_BYTE_SIZE), shape)
|
|
182
|
+
end
|
|
183
|
+
when :ruby
|
|
184
|
+
arr =
|
|
185
|
+
case type
|
|
186
|
+
when :float, :uint8, :int8, :uint16, :int16, :int32, :int64, :double, :uint32, :uint64
|
|
187
|
+
tensor_data.read_pointer.send("read_array_of_#{type}", output_tensor_size)
|
|
188
|
+
when :bool
|
|
189
|
+
tensor_data.read_pointer.read_array_of_uint8(output_tensor_size).map { |v| v == 1 }
|
|
190
|
+
when :string
|
|
191
|
+
create_strings_from_onnx_value(out_ptr, output_tensor_size, [])
|
|
192
|
+
else
|
|
193
|
+
Utils.unsupported_type("element", type)
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
reshape(arr, shape)
|
|
197
|
+
else
|
|
198
|
+
raise ArgumentError, "Invalid output type: #{output_type}"
|
|
199
|
+
end
|
|
200
|
+
when :sequence
|
|
201
|
+
out = ::FFI::MemoryPointer.new(:size_t)
|
|
202
|
+
Utils.check_status FFI.api[:GetValueCount].call(out_ptr, out)
|
|
203
|
+
|
|
204
|
+
out.read(:size_t).times.map do |i|
|
|
205
|
+
seq = ::FFI::MemoryPointer.new(:pointer)
|
|
206
|
+
Utils.check_status FFI.api[:GetValue].call(out_ptr, i, Utils.allocator.read_pointer, seq)
|
|
207
|
+
create_from_onnx_value(seq.read_pointer, output_type)
|
|
208
|
+
end
|
|
209
|
+
when :map
|
|
210
|
+
type_shape = ::FFI::MemoryPointer.new(:pointer)
|
|
211
|
+
map_keys = ::FFI::MemoryPointer.new(:pointer)
|
|
212
|
+
map_values = ::FFI::MemoryPointer.new(:pointer)
|
|
213
|
+
elem_type = ::FFI::MemoryPointer.new(:int)
|
|
214
|
+
|
|
215
|
+
Utils.check_status FFI.api[:GetValue].call(out_ptr, 0, Utils.allocator.read_pointer, map_keys)
|
|
216
|
+
Utils.check_status FFI.api[:GetValue].call(out_ptr, 1, Utils.allocator.read_pointer, map_values)
|
|
217
|
+
Utils.check_status FFI.api[:GetTensorTypeAndShape].call(map_keys.read_pointer, type_shape)
|
|
218
|
+
Utils.check_status FFI.api[:GetTensorElementType].call(type_shape.read_pointer, elem_type)
|
|
219
|
+
Utils.release :TensorTypeAndShapeInfo, type_shape
|
|
220
|
+
|
|
221
|
+
# TODO support more types
|
|
222
|
+
elem_type = FFI::TensorElementDataType[elem_type.read_int]
|
|
223
|
+
case elem_type
|
|
224
|
+
when :int64
|
|
225
|
+
ret = {}
|
|
226
|
+
keys = create_from_onnx_value(map_keys.read_pointer, output_type)
|
|
227
|
+
values = create_from_onnx_value(map_values.read_pointer, output_type)
|
|
228
|
+
keys.zip(values).each do |k, v|
|
|
229
|
+
ret[k] = v
|
|
230
|
+
end
|
|
231
|
+
ret
|
|
232
|
+
else
|
|
233
|
+
Utils.unsupported_type("element", elem_type)
|
|
234
|
+
end
|
|
235
|
+
else
|
|
236
|
+
Utils.unsupported_type("ONNX", type)
|
|
237
|
+
end
|
|
238
|
+
end
|
|
239
|
+
|
|
240
|
+
def create_strings_from_onnx_value(out_ptr, output_tensor_size, result)
|
|
241
|
+
len = ::FFI::MemoryPointer.new(:size_t)
|
|
242
|
+
Utils.check_status FFI.api[:GetStringTensorDataLength].call(out_ptr, len)
|
|
243
|
+
|
|
244
|
+
s_len = len.read(:size_t)
|
|
245
|
+
s = ::FFI::MemoryPointer.new(:uchar, s_len)
|
|
246
|
+
offsets = ::FFI::MemoryPointer.new(:size_t, output_tensor_size)
|
|
247
|
+
Utils.check_status FFI.api[:GetStringTensorContent].call(out_ptr, s, s_len, offsets, output_tensor_size)
|
|
248
|
+
|
|
249
|
+
offsets = output_tensor_size.times.map { |i| offsets[i].read(:size_t) }
|
|
250
|
+
offsets << s_len
|
|
251
|
+
output_tensor_size.times do |i|
|
|
252
|
+
result[i] = s.get_bytes(offsets[i], offsets[i + 1] - offsets[i])
|
|
253
|
+
end
|
|
254
|
+
result
|
|
255
|
+
end
|
|
256
|
+
|
|
257
|
+
def reshape(arr, dims)
|
|
258
|
+
arr = arr.flatten
|
|
259
|
+
dims[1..-1].reverse_each do |dim|
|
|
260
|
+
arr = arr.each_slice(dim)
|
|
261
|
+
end
|
|
262
|
+
arr.to_a
|
|
263
|
+
end
|
|
264
|
+
|
|
265
|
+
def self.finalize(addr)
|
|
266
|
+
# must use proc instead of stabby lambda
|
|
267
|
+
proc { FFI.api[:ReleaseValue].call(::FFI::Pointer.new(:pointer, addr)) }
|
|
268
|
+
end
|
|
269
|
+
|
|
270
|
+
def self.allocator_info
|
|
271
|
+
@allocator_info ||= begin
|
|
272
|
+
allocator_info = ::FFI::MemoryPointer.new(:pointer)
|
|
273
|
+
Utils.check_status FFI.api[:CreateCpuMemoryInfo].call(1, 0, allocator_info)
|
|
274
|
+
allocator_info
|
|
275
|
+
end
|
|
276
|
+
end
|
|
277
|
+
end
|
|
278
|
+
end
|
data/lib/onnxruntime/utils.rb
CHANGED
|
@@ -5,12 +5,138 @@ module OnnxRuntime
|
|
|
5
5
|
end
|
|
6
6
|
self.mutex = Mutex.new
|
|
7
7
|
|
|
8
|
-
def self.
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
8
|
+
def self.check_status(status)
|
|
9
|
+
unless status.null?
|
|
10
|
+
message = api[:GetErrorMessage].call(status).read_string
|
|
11
|
+
api[:ReleaseStatus].call(status)
|
|
12
|
+
raise Error, message
|
|
13
|
+
end
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def self.api
|
|
17
|
+
FFI.api
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def self.release(type, pointer)
|
|
21
|
+
FFI.api[:"Release#{type}"].call(pointer.read_pointer) if pointer && !pointer.null?
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def self.unsupported_type(name, type)
|
|
25
|
+
raise Error, "Unsupported #{name} type: #{type}"
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def self.tensor_type_and_shape(tensor_info)
|
|
29
|
+
type = ::FFI::MemoryPointer.new(:int)
|
|
30
|
+
check_status api[:GetTensorElementType].call(tensor_info.read_pointer, type)
|
|
31
|
+
|
|
32
|
+
num_dims_ptr = ::FFI::MemoryPointer.new(:size_t)
|
|
33
|
+
check_status api[:GetDimensionsCount].call(tensor_info.read_pointer, num_dims_ptr)
|
|
34
|
+
num_dims = num_dims_ptr.read(:size_t)
|
|
35
|
+
|
|
36
|
+
node_dims = ::FFI::MemoryPointer.new(:int64, num_dims)
|
|
37
|
+
check_status api[:GetDimensions].call(tensor_info.read_pointer, node_dims, num_dims)
|
|
38
|
+
dims = node_dims.read_array_of_int64(num_dims)
|
|
39
|
+
|
|
40
|
+
symbolic_dims = ::FFI::MemoryPointer.new(:pointer, num_dims)
|
|
41
|
+
check_status api[:GetSymbolicDimensions].call(tensor_info.read_pointer, symbolic_dims, num_dims)
|
|
42
|
+
named_dims = num_dims.times.map { |i| symbolic_dims[i].read_pointer.read_string }
|
|
43
|
+
dims = named_dims.zip(dims).map { |n, d| n.empty? ? d : n }
|
|
44
|
+
|
|
45
|
+
[type.read_int, dims]
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def self.node_info(typeinfo)
|
|
49
|
+
onnx_type = ::FFI::MemoryPointer.new(:int)
|
|
50
|
+
check_status api[:GetOnnxTypeFromTypeInfo].call(typeinfo.read_pointer, onnx_type)
|
|
51
|
+
|
|
52
|
+
type = FFI::OnnxType[onnx_type.read_int]
|
|
53
|
+
case type
|
|
54
|
+
when :tensor
|
|
55
|
+
tensor_info = ::FFI::MemoryPointer.new(:pointer)
|
|
56
|
+
# don't free tensor_info
|
|
57
|
+
check_status api[:CastTypeInfoToTensorInfo].call(typeinfo.read_pointer, tensor_info)
|
|
58
|
+
|
|
59
|
+
type, shape = Utils.tensor_type_and_shape(tensor_info)
|
|
60
|
+
{
|
|
61
|
+
type: "tensor(#{FFI::TensorElementDataType[type]})",
|
|
62
|
+
shape: shape
|
|
63
|
+
}
|
|
64
|
+
when :sequence
|
|
65
|
+
sequence_type_info = ::FFI::MemoryPointer.new(:pointer)
|
|
66
|
+
check_status api[:CastTypeInfoToSequenceTypeInfo].call(typeinfo.read_pointer, sequence_type_info)
|
|
67
|
+
nested_type_info = ::FFI::MemoryPointer.new(:pointer)
|
|
68
|
+
check_status api[:GetSequenceElementType].call(sequence_type_info.read_pointer, nested_type_info)
|
|
69
|
+
v = node_info(nested_type_info)[:type]
|
|
70
|
+
|
|
71
|
+
{
|
|
72
|
+
type: "seq(#{v})",
|
|
73
|
+
shape: []
|
|
74
|
+
}
|
|
75
|
+
when :map
|
|
76
|
+
map_type_info = ::FFI::MemoryPointer.new(:pointer)
|
|
77
|
+
check_status api[:CastTypeInfoToMapTypeInfo].call(typeinfo.read_pointer, map_type_info)
|
|
78
|
+
|
|
79
|
+
# key
|
|
80
|
+
key_type = ::FFI::MemoryPointer.new(:int)
|
|
81
|
+
check_status api[:GetMapKeyType].call(map_type_info.read_pointer, key_type)
|
|
82
|
+
k = FFI::TensorElementDataType[key_type.read_int]
|
|
83
|
+
|
|
84
|
+
# value
|
|
85
|
+
value_type_info = ::FFI::MemoryPointer.new(:pointer)
|
|
86
|
+
check_status api[:GetMapValueType].call(map_type_info.read_pointer, value_type_info)
|
|
87
|
+
v = node_info(value_type_info)[:type]
|
|
88
|
+
|
|
89
|
+
{
|
|
90
|
+
type: "map(#{k},#{v})",
|
|
91
|
+
shape: []
|
|
92
|
+
}
|
|
93
|
+
else
|
|
94
|
+
Utils.unsupported_type("ONNX", type)
|
|
95
|
+
end
|
|
96
|
+
ensure
|
|
97
|
+
release :TypeInfo, typeinfo
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
def self.numo_array?(obj)
|
|
101
|
+
defined?(Numo::NArray) && obj.is_a?(Numo::NArray)
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
def self.numo_types
|
|
105
|
+
@numo_types ||= {
|
|
106
|
+
float: Numo::SFloat,
|
|
107
|
+
uint8: Numo::UInt8,
|
|
108
|
+
int8: Numo::Int8,
|
|
109
|
+
uint16: Numo::UInt16,
|
|
110
|
+
int16: Numo::Int16,
|
|
111
|
+
int32: Numo::Int32,
|
|
112
|
+
int64: Numo::Int64,
|
|
113
|
+
bool: Numo::UInt8,
|
|
114
|
+
double: Numo::DFloat,
|
|
115
|
+
uint32: Numo::UInt32,
|
|
116
|
+
uint64: Numo::UInt64
|
|
117
|
+
}
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
def self.input_shape(input)
|
|
121
|
+
if numo_array?(input)
|
|
122
|
+
input.shape
|
|
123
|
+
else
|
|
124
|
+
shape = []
|
|
125
|
+
s = input
|
|
126
|
+
while s.is_a?(Array)
|
|
127
|
+
shape << s.size
|
|
128
|
+
s = s.first
|
|
129
|
+
end
|
|
130
|
+
shape
|
|
131
|
+
end
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
def self.allocator
|
|
135
|
+
@allocator ||= begin
|
|
136
|
+
allocator = ::FFI::MemoryPointer.new(:pointer)
|
|
137
|
+
check_status api[:GetAllocatorWithDefaultOptions].call(allocator)
|
|
138
|
+
allocator
|
|
12
139
|
end
|
|
13
|
-
arr.to_a
|
|
14
140
|
end
|
|
15
141
|
end
|
|
16
142
|
end
|
data/lib/onnxruntime/version.rb
CHANGED
data/lib/onnxruntime.rb
CHANGED
|
@@ -5,6 +5,7 @@ require "ffi"
|
|
|
5
5
|
require_relative "onnxruntime/datasets"
|
|
6
6
|
require_relative "onnxruntime/inference_session"
|
|
7
7
|
require_relative "onnxruntime/model"
|
|
8
|
+
require_relative "onnxruntime/ort_value"
|
|
8
9
|
require_relative "onnxruntime/utils"
|
|
9
10
|
require_relative "onnxruntime/version"
|
|
10
11
|
|