onnxruntime 0.5.0 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: dc2115e3b32ec7d1d0ab2da64edfb5bf2ebf24ecc6ad705d4a53ae4793e60412
4
- data.tar.gz: 5e53efd5de12ed5ac06a7e4472c8ee043ffbbba9c82b5f49e340f6a70a4dfcdf
3
+ metadata.gz: 316527780be2781a474d0813aff47d840654423823ad83b8b52b13752caf6814
4
+ data.tar.gz: 5954ba2dc4223b8330fb52e8474be78542360c2f4c1cb5946529d062c0b1b864
5
5
  SHA512:
6
- metadata.gz: 93f83646d23298213b971ea7462ce4e5cc970c49d7572b646a6f8003b05689a5a232a3acc05b9cacfbabf65534c046d687eecc80fbf5583b2691391d30b54872
7
- data.tar.gz: fc0349312d70944a2169302e8b9f1f70f255e29b60c523f0ba1a688ac162cc230636f9ef333d6ac032d855d9387d88234181a69edade6407bde027050972a2da
6
+ metadata.gz: c127874dd75a10b8cb9d9d033e607f9d73069bb5709d7b9ecee04c1d59f969f61db6ec88fd569188b0a35c8276f7611619f81662f3735fbd53e61938f15c6822
7
+ data.tar.gz: 77a7c9f8c98b25fd82ee8818c63176d2005f846db489b5973330220344be8fd47d8e5279517a716f8c1222f729a248fc8ac80cb610633dbdc35c49418a42ea1c
@@ -1,3 +1,10 @@
1
+ ## 0.5.1 (2020-11-01)
2
+
3
+ - Updated ONNX Runtime to 1.5.2
4
+ - Added support for string output
5
+ - Added `output_type` option
6
+ - Improved performance for Numo array inputs
7
+
1
8
  ## 0.5.0 (2020-10-01)
2
9
 
3
10
  - Updated ONNX Runtime to 1.5.1
data/README.md CHANGED
@@ -87,7 +87,8 @@ model.predict(input_feed, {
87
87
  log_severity_level: 2,
88
88
  log_verbosity_level: 0,
89
89
  logid: nil,
90
- terminate: false
90
+ terminate: false,
91
+ output_type: :ruby # :ruby or :numo
91
92
  })
92
93
  ```
93
94
 
@@ -74,8 +74,8 @@ module OnnxRuntime
74
74
  :IsTensor, callback(%i[], :pointer),
75
75
  :GetTensorMutableData, callback(%i[pointer pointer], :pointer),
76
76
  :FillStringTensor, callback(%i[pointer pointer size_t], :pointer),
77
- :GetStringTensorDataLength, callback(%i[], :pointer),
78
- :GetStringTensorContent, callback(%i[], :pointer),
77
+ :GetStringTensorDataLength, callback(%i[pointer pointer], :pointer),
78
+ :GetStringTensorContent, callback(%i[pointer pointer size_t pointer size_t], :pointer),
79
79
  :CastTypeInfoToTensorInfo, callback(%i[pointer pointer], :pointer),
80
80
  :GetOnnxTypeFromTypeInfo, callback(%i[pointer pointer], :pointer),
81
81
  :CreateTensorTypeAndShapeInfo, callback(%i[], :pointer),
@@ -80,7 +80,7 @@ module OnnxRuntime
80
80
  end
81
81
 
82
82
  # TODO support logid
83
- def run(output_names, input_feed, log_severity_level: nil, log_verbosity_level: nil, logid: nil, terminate: nil)
83
+ def run(output_names, input_feed, log_severity_level: nil, log_verbosity_level: nil, logid: nil, terminate: nil, output_type: :ruby)
84
84
  input_tensor = create_input_tensor(input_feed)
85
85
 
86
86
  output_names ||= @outputs.map { |v| v[:name] }
@@ -100,7 +100,7 @@ module OnnxRuntime
100
100
  check_status api[:Run].call(read_pointer, run_options.read_pointer, input_node_names, input_tensor, input_feed.size, output_node_names, output_names.size, output_tensor)
101
101
 
102
102
  output_names.size.times.map do |i|
103
- create_from_onnx_value(output_tensor[i].read_pointer)
103
+ create_from_onnx_value(output_tensor[i].read_pointer, output_type)
104
104
  end
105
105
  ensure
106
106
  release :RunOptions, run_options
@@ -180,18 +180,19 @@ module OnnxRuntime
180
180
  input_tensor = ::FFI::MemoryPointer.new(:pointer, input_feed.size)
181
181
 
182
182
  input_feed.each_with_index do |(input_name, input), idx|
183
- input = input.to_a unless input.is_a?(Array)
183
+ if numo_array?(input)
184
+ shape = input.shape
185
+ else
186
+ input = input.to_a unless input.is_a?(Array)
184
187
 
185
- shape = []
186
- s = input
187
- while s.is_a?(Array)
188
- shape << s.size
189
- s = s.first
188
+ shape = []
189
+ s = input
190
+ while s.is_a?(Array)
191
+ shape << s.size
192
+ s = s.first
193
+ end
190
194
  end
191
195
 
192
- flat_input = input.flatten
193
- input_tensor_size = flat_input.size
194
-
195
196
  # TODO support more types
196
197
  inp = @inputs.find { |i| i[:name] == input_name.to_s }
197
198
  raise Error, "Unknown input: #{input_name}" unless inp
@@ -200,22 +201,35 @@ module OnnxRuntime
200
201
  input_node_dims.write_array_of_int64(shape)
201
202
 
202
203
  if inp[:type] == "tensor(string)"
203
- input_tensor_values = ::FFI::MemoryPointer.new(:pointer, input_tensor_size)
204
- input_tensor_values.write_array_of_pointer(flat_input.map { |v| ::FFI::MemoryPointer.from_string(v) })
204
+ if numo_array?(input)
205
+ input_tensor_size = input.size
206
+ input_tensor_values = ::FFI::MemoryPointer.new(:pointer, input.size)
207
+ input_tensor_values.write_array_of_pointer(input_tensor_size.times.map { |i| ::FFI::MemoryPointer.from_string(input[i]) })
208
+ else
209
+ flat_input = input.flatten.to_a
210
+ input_tensor_size = flat_input.size
211
+ input_tensor_values = ::FFI::MemoryPointer.new(:pointer, input_tensor_size)
212
+ input_tensor_values.write_array_of_pointer(flat_input.map { |v| ::FFI::MemoryPointer.from_string(v) })
213
+ end
205
214
  type_enum = FFI::TensorElementDataType[:string]
206
215
  check_status api[:CreateTensorAsOrtValue].call(@allocator.read_pointer, input_node_dims, shape.size, type_enum, input_tensor[idx])
207
- check_status api[:FillStringTensor].call(input_tensor[idx].read_pointer, input_tensor_values, flat_input.size)
216
+ check_status api[:FillStringTensor].call(input_tensor[idx].read_pointer, input_tensor_values, input_tensor_size)
208
217
  else
209
- tensor_types = [:float, :uint8, :int8, :uint16, :int16, :int32, :int64, :bool, :double, :uint32, :uint64].map { |v| ["tensor(#{v})", v] }.to_h
210
218
  tensor_type = tensor_types[inp[:type]]
211
219
 
212
220
  if tensor_type
213
- input_tensor_values = ::FFI::MemoryPointer.new(tensor_type, input_tensor_size)
214
- if tensor_type == :bool
215
- tensor_type = :uchar
216
- flat_input = flat_input.map { |v| v ? 1 : 0 }
221
+ if numo_array?(input)
222
+ input_tensor_values = input.cast_to(numo_types[tensor_type]).to_binary
223
+ else
224
+ flat_input = input.flatten.to_a
225
+ input_tensor_values = ::FFI::MemoryPointer.new(tensor_type, flat_input.size)
226
+ if tensor_type == :bool
227
+ tensor_type = :uchar
228
+ flat_input = flat_input.map { |v| v ? 1 : 0 }
229
+ end
230
+ input_tensor_values.send("write_array_of_#{tensor_type}", flat_input)
217
231
  end
218
- input_tensor_values.send("write_array_of_#{tensor_type}", flat_input)
232
+
219
233
  type_enum = FFI::TensorElementDataType[tensor_type]
220
234
  else
221
235
  unsupported_type("input", inp[:type])
@@ -234,7 +248,7 @@ module OnnxRuntime
234
248
  ptr
235
249
  end
236
250
 
237
- def create_from_onnx_value(out_ptr)
251
+ def create_from_onnx_value(out_ptr, output_type)
238
252
  out_type = ::FFI::MemoryPointer.new(:int)
239
253
  check_status api[:GetValueType].call(out_ptr, out_type)
240
254
  type = FFI::OnnxType[out_type.read_int]
@@ -257,17 +271,36 @@ module OnnxRuntime
257
271
 
258
272
  # TODO support more types
259
273
  type = FFI::TensorElementDataType[type]
260
- arr =
274
+
275
+ case output_type
276
+ when :numo
261
277
  case type
262
- when :float, :uint8, :int8, :uint16, :int16, :int32, :int64, :double, :uint32, :uint64
263
- tensor_data.read_pointer.send("read_array_of_#{type}", output_tensor_size)
264
- when :bool
265
- tensor_data.read_pointer.read_array_of_uchar(output_tensor_size).map { |v| v == 1 }
278
+ when :string
279
+ result = Numo::RObject.new(shape)
280
+ result.allocate
281
+ create_strings_from_onnx_value(out_ptr, output_tensor_size, result)
266
282
  else
267
- unsupported_type("element", type)
283
+ numo_type = numo_types[type]
284
+ unsupported_type("element", type) unless numo_type
285
+ numo_type.from_binary(tensor_data.read_pointer.read_bytes(output_tensor_size * numo_type::ELEMENT_BYTE_SIZE), shape)
268
286
  end
287
+ when :ruby
288
+ arr =
289
+ case type
290
+ when :float, :uint8, :int8, :uint16, :int16, :int32, :int64, :double, :uint32, :uint64
291
+ tensor_data.read_pointer.send("read_array_of_#{type}", output_tensor_size)
292
+ when :bool
293
+ tensor_data.read_pointer.read_array_of_uchar(output_tensor_size).map { |v| v == 1 }
294
+ when :string
295
+ create_strings_from_onnx_value(out_ptr, output_tensor_size, [])
296
+ else
297
+ unsupported_type("element", type)
298
+ end
269
299
 
270
- Utils.reshape(arr, shape)
300
+ Utils.reshape(arr, shape)
301
+ else
302
+ raise ArgumentError, "Invalid output type: #{output_type}"
303
+ end
271
304
  when :sequence
272
305
  out = ::FFI::MemoryPointer.new(:size_t)
273
306
  check_status api[:GetValueCount].call(out_ptr, out)
@@ -275,7 +308,7 @@ module OnnxRuntime
275
308
  out.read(:size_t).times.map do |i|
276
309
  seq = ::FFI::MemoryPointer.new(:pointer)
277
310
  check_status api[:GetValue].call(out_ptr, i, @allocator.read_pointer, seq)
278
- create_from_onnx_value(seq.read_pointer)
311
+ create_from_onnx_value(seq.read_pointer, output_type)
279
312
  end
280
313
  when :map
281
314
  type_shape = ::FFI::MemoryPointer.new(:pointer)
@@ -294,8 +327,8 @@ module OnnxRuntime
294
327
  case elem_type
295
328
  when :int64
296
329
  ret = {}
297
- keys = create_from_onnx_value(map_keys.read_pointer)
298
- values = create_from_onnx_value(map_values.read_pointer)
330
+ keys = create_from_onnx_value(map_keys.read_pointer, output_type)
331
+ values = create_from_onnx_value(map_values.read_pointer, output_type)
299
332
  keys.zip(values).each do |k, v|
300
333
  ret[k] = v
301
334
  end
@@ -308,6 +341,23 @@ module OnnxRuntime
308
341
  end
309
342
  end
310
343
 
344
+ def create_strings_from_onnx_value(out_ptr, output_tensor_size, result)
345
+ len = ::FFI::MemoryPointer.new(:size_t)
346
+ check_status api[:GetStringTensorDataLength].call(out_ptr, len)
347
+
348
+ s_len = len.read(:size_t)
349
+ s = ::FFI::MemoryPointer.new(:uchar, s_len)
350
+ offsets = ::FFI::MemoryPointer.new(:size_t, output_tensor_size)
351
+ check_status api[:GetStringTensorContent].call(out_ptr, s, s_len, offsets, output_tensor_size)
352
+
353
+ offsets = output_tensor_size.times.map { |i| offsets[i].read(:size_t) }
354
+ offsets << s_len
355
+ output_tensor_size.times do |i|
356
+ result[i] = s.get_bytes(offsets[i], offsets[i + 1] - offsets[i])
357
+ end
358
+ result
359
+ end
360
+
311
361
  def read_pointer
312
362
  @session.read_pointer
313
363
  end
@@ -390,6 +440,30 @@ module OnnxRuntime
390
440
  raise Error, "Unsupported #{name} type: #{type}"
391
441
  end
392
442
 
443
+ def tensor_types
444
+ @tensor_types ||= [:float, :uint8, :int8, :uint16, :int16, :int32, :int64, :bool, :double, :uint32, :uint64].map { |v| ["tensor(#{v})", v] }.to_h
445
+ end
446
+
447
+ def numo_array?(obj)
448
+ defined?(Numo::NArray) && obj.is_a?(Numo::NArray)
449
+ end
450
+
451
+ def numo_types
452
+ @numo_types ||= {
453
+ float: Numo::SFloat,
454
+ uint8: Numo::UInt8,
455
+ int8: Numo::Int8,
456
+ uint16: Numo::UInt16,
457
+ int16: Numo::Int16,
458
+ int32: Numo::Int32,
459
+ int64: Numo::Int64,
460
+ bool: Numo::UInt8,
461
+ double: Numo::DFloat,
462
+ uint32: Numo::UInt32,
463
+ uint64: Numo::UInt64
464
+ }
465
+ end
466
+
393
467
  def api
394
468
  self.class.api
395
469
  end
@@ -1,3 +1,3 @@
1
1
  module OnnxRuntime
2
- VERSION = "0.5.0"
2
+ VERSION = "0.5.1"
3
3
  end
Binary file
Binary file
Binary file
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: onnxruntime
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.0
4
+ version: 0.5.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrew Kane
8
- autorequire:
8
+ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-10-02 00:00:00.000000000 Z
11
+ date: 2020-11-01 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ffi
@@ -66,7 +66,7 @@ dependencies:
66
66
  - - ">="
67
67
  - !ruby/object:Gem::Version
68
68
  version: '5'
69
- description:
69
+ description:
70
70
  email: andrew@chartkick.com
71
71
  executables: []
72
72
  extensions: []
@@ -91,7 +91,7 @@ homepage: https://github.com/ankane/onnxruntime
91
91
  licenses:
92
92
  - MIT
93
93
  metadata: {}
94
- post_install_message:
94
+ post_install_message:
95
95
  rdoc_options: []
96
96
  require_paths:
97
97
  - lib
@@ -106,8 +106,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
106
106
  - !ruby/object:Gem::Version
107
107
  version: '0'
108
108
  requirements: []
109
- rubygems_version: 3.1.2
110
- signing_key:
109
+ rubygems_version: 3.1.4
110
+ signing_key:
111
111
  specification_version: 4
112
112
  summary: High performance scoring engine for ML models
113
113
  test_files: []