onnxruntime 0.9.0-x64-mingw → 0.9.2-x64-mingw

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 6ccea29e29e2302a1b33f0d7a7f6596b40cb0ac5058aaacfaed4bbe592bfeeba
4
- data.tar.gz: 9634edd825d1953661ff2db6dcb9161b5bb284535816d8c5b1f7c2328de80cd0
3
+ metadata.gz: 3822460dbba75fa16e2c2a36c3c02bef346c613af8e0aa22619a88d9f0646e84
4
+ data.tar.gz: e34f4e5bdf071d4f044bad1fb80953087e02770056a013eebbad1d1a3813085f
5
5
  SHA512:
6
- metadata.gz: 04f99c3ec124276d0249ccf74ed7cab33518051c0cae33f33463bfb7ff634ba3a648c9ea57b12521e343a74acd61046deedb5d9de6cf47395c9f2ed6749d03a9
7
- data.tar.gz: 8c960e95f97b3ddeb628e7ae1a49d36dd4221117f44cf1ecf8250f4bac5c5bd772dd819575953562b35ef01dc5a6624cb3351134b44afad6a29072c49d582230
6
+ metadata.gz: 4e3cc7f6424d71ee2e11cb0cddf20edc574fd611315b8e7c71b837472de72f33e14147b5279cc11956cb1b9bb3acceb3f843f5bdf65c365a09008b7d3ff98318
7
+ data.tar.gz: 11cd62b750a55a579bbbf5d43ea054cd73a2c2476cb758733459aded2ff68234018c709e8b4707d7f15f263b31305dcf2ffefc29388cc4d506d6aef5ba97f46f
data/CHANGELOG.md CHANGED
@@ -1,3 +1,12 @@
1
+ ## 0.9.2 (2024-09-04)
2
+
3
+ - Updated ONNX Runtime to 1.19.2
4
+ - Added support for CoreML
5
+
6
+ ## 0.9.1 (2024-05-22)
7
+
8
+ - Updated ONNX Runtime to 1.18.0
9
+
1
10
  ## 0.9.0 (2024-02-27)
2
11
 
3
12
  - Updated ONNX Runtime to 1.17.1
data/README.md CHANGED
@@ -4,7 +4,7 @@
4
4
 
5
5
  Check out [an example](https://ankane.org/tensorflow-ruby)
6
6
 
7
- [![Build Status](https://github.com/ankane/onnxruntime-ruby/workflows/build/badge.svg?branch=master)](https://github.com/ankane/onnxruntime-ruby/actions)
7
+ [![Build Status](https://github.com/ankane/onnxruntime-ruby/actions/workflows/build.yml/badge.svg)](https://github.com/ankane/onnxruntime-ruby/actions)
8
8
 
9
9
  ## Installation
10
10
 
@@ -108,7 +108,9 @@ OnnxRuntime::Datasets.example("sigmoid.onnx")
108
108
 
109
109
  ## GPU Support
110
110
 
111
- To enable GPU support on Linux and Windows, download the appropriate [GPU release](https://github.com/microsoft/onnxruntime/releases) and set:
111
+ ### Linux and Windows
112
+
113
+ Download the appropriate [GPU release](https://github.com/microsoft/onnxruntime/releases) and set:
112
114
 
113
115
  ```ruby
114
116
  OnnxRuntime.ffi_lib = "path/to/lib/libonnxruntime.so" # onnxruntime.dll for Windows
@@ -120,6 +122,14 @@ and use:
120
122
  model = OnnxRuntime::Model.new("model.onnx", providers: ["CUDAExecutionProvider"])
121
123
  ```
122
124
 
125
+ ### Mac
126
+
127
+ Use:
128
+
129
+ ```ruby
130
+ model = OnnxRuntime::Model.new("model.onnx", providers: ["CoreMLExecutionProvider"])
131
+ ```
132
+
123
133
  ## History
124
134
 
125
135
  View the [changelog](https://github.com/ankane/onnxruntime-ruby/blob/master/CHANGELOG.md)
@@ -144,7 +144,7 @@ module OnnxRuntime
144
144
  :ReleaseAvailableProviders, callback(%i[pointer int], :pointer),
145
145
  :GetStringTensorElementLength, callback(%i[], :pointer),
146
146
  :GetStringTensorElement, callback(%i[], :pointer),
147
- :FillStringTensorElement, callback(%i[], :pointer),
147
+ :FillStringTensorElement, callback(%i[pointer string size_t], :pointer),
148
148
  :AddSessionConfigEntry, callback(%i[pointer string string], :pointer),
149
149
  :CreateAllocator, callback(%i[], :pointer),
150
150
  :ReleaseAllocator, callback(%i[], :pointer),
@@ -224,7 +224,16 @@ module OnnxRuntime
224
224
  :UpdateCUDAProviderOptions, callback(%i[pointer pointer pointer size_t], :pointer),
225
225
  :GetCUDAProviderOptionsAsString, callback(%i[pointer pointer pointer], :pointer),
226
226
  :ReleaseCUDAProviderOptions, callback(%i[pointer], :void),
227
- :SessionOptionsAppendExecutionProvider_MIGraphX, callback(%i[], :pointer)
227
+ :SessionOptionsAppendExecutionProvider_MIGraphX, callback(%i[], :pointer),
228
+ :AddExternalInitializers, callback(%i[], :pointer),
229
+ :CreateOpAttr, callback(%i[], :pointer),
230
+ :ReleaseOpAddr, callback(%i[pointer], :void),
231
+ :CreateOp, callback(%i[], :pointer),
232
+ :InvokeOp, callback(%i[], :pointer),
233
+ :ReleaseOp, callback(%i[pointer], :void),
234
+ :SessionOptionsAppendExecutionProvider, callback(%i[pointer pointer pointer pointer size_t], :pointer),
235
+ :CopyKernelInfo, callback(%i[], :pointer),
236
+ :ReleaseKernelInfo, callback(%i[pointer], :void)
228
237
  end
229
238
 
230
239
  class ApiBase < ::FFI::Struct
@@ -237,6 +246,10 @@ module OnnxRuntime
237
246
 
238
247
  attach_function :OrtGetApiBase, %i[], ApiBase.by_ref
239
248
 
249
+ def self.api
250
+ @api ||= self.OrtGetApiBase[:GetApi].call(ORT_API_VERSION)
251
+ end
252
+
240
253
  if Gem.win_platform?
241
254
  class Libc
242
255
  extend ::FFI::Library
@@ -244,5 +257,11 @@ module OnnxRuntime
244
257
  attach_function :mbstowcs, %i[pointer string size_t], :size_t
245
258
  end
246
259
  end
260
+
261
+ # https://github.com/microsoft/onnxruntime/blob/main/include/onnxruntime/core/providers/coreml/coreml_provider_factory.h
262
+ begin
263
+ attach_function :OrtSessionOptionsAppendExecutionProvider_CoreML, %i[pointer uint32], :pointer
264
+ rescue ::FFI::NotFoundError
265
+ end
247
266
  end
248
267
  end
@@ -66,6 +66,13 @@ module OnnxRuntime
66
66
  check_status api[:CreateCUDAProviderOptions].call(cuda_options)
67
67
  check_status api[:SessionOptionsAppendExecutionProvider_CUDA_V2].call(session_options.read_pointer, cuda_options.read_pointer)
68
68
  release :CUDAProviderOptions, cuda_options
69
+ when "CoreMLExecutionProvider"
70
+ unless FFI.respond_to?(:OrtSessionOptionsAppendExecutionProvider_CoreML)
71
+ raise ArgumentError, "Provider not available: #{provider}"
72
+ end
73
+
74
+ coreml_flags = 0
75
+ check_status FFI.OrtSessionOptionsAppendExecutionProvider_CoreML(session_options.read_pointer, coreml_flags)
69
76
  when "CPUExecutionProvider"
70
77
  break
71
78
  else
@@ -258,77 +265,81 @@ module OnnxRuntime
258
265
  input_tensor = ::FFI::MemoryPointer.new(:pointer, input_feed.size)
259
266
 
260
267
  input_feed.each_with_index do |(input_name, input), idx|
261
- if numo_array?(input)
262
- shape = input.shape
263
- else
264
- input = input.to_a unless input.is_a?(Array)
265
-
266
- shape = []
267
- s = input
268
- while s.is_a?(Array)
269
- shape << s.size
270
- s = s.first
271
- end
272
- end
273
-
274
268
  # TODO support more types
275
269
  inp = @inputs.find { |i| i[:name] == input_name.to_s }
276
270
  raise Error, "Unknown input: #{input_name}" unless inp
277
271
 
272
+ input = input.to_a unless input.is_a?(Array) || numo_array?(input)
273
+ shape = input_shape(input)
274
+
278
275
  input_node_dims = ::FFI::MemoryPointer.new(:int64, shape.size)
279
276
  input_node_dims.write_array_of_int64(shape)
280
277
 
281
278
  if inp[:type] == "tensor(string)"
282
- str_ptrs =
283
- if numo_array?(input)
284
- input.size.times.map { |i| ::FFI::MemoryPointer.from_string(input[i]) }
285
- else
286
- input.flatten.map { |v| ::FFI::MemoryPointer.from_string(v) }
287
- end
288
-
289
- input_tensor_values = ::FFI::MemoryPointer.new(:pointer, str_ptrs.size)
290
- input_tensor_values.write_array_of_pointer(str_ptrs)
291
-
292
279
  type_enum = FFI::TensorElementDataType[:string]
293
280
  check_status api[:CreateTensorAsOrtValue].call(@allocator.read_pointer, input_node_dims, shape.size, type_enum, input_tensor[idx])
294
- check_status api[:FillStringTensor].call(input_tensor[idx].read_pointer, input_tensor_values, str_ptrs.size)
295
-
296
- refs << str_ptrs
297
- else
298
- tensor_type = tensor_types[inp[:type]]
299
-
300
- if tensor_type
301
- if numo_array?(input)
302
- input_tensor_values = input.cast_to(numo_types[tensor_type]).to_binary
303
- else
304
- flat_input = input.flatten.to_a
305
- input_tensor_values = ::FFI::MemoryPointer.new(tensor_type, flat_input.size)
306
- if tensor_type == :bool
307
- input_tensor_values.write_array_of_uint8(flat_input.map { |v| v ? 1 : 0 })
308
- else
309
- input_tensor_values.send("write_array_of_#{tensor_type}", flat_input)
310
- end
311
- end
312
-
313
- type_enum = FFI::TensorElementDataType[tensor_type]
314
- else
315
- unsupported_type("input", inp[:type])
316
- end
317
281
 
282
+ # keep reference to _str_ptrs until FillStringTensor call
283
+ input_tensor_values, _str_ptrs = create_input_strings(input)
284
+ check_status api[:FillStringTensor].call(input_tensor[idx].read_pointer, input_tensor_values, input_tensor_values.size / input_tensor_values.type_size)
285
+ elsif (tensor_type = tensor_types[inp[:type]])
286
+ input_tensor_values = create_input_data(input, tensor_type)
287
+ type_enum = FFI::TensorElementDataType[tensor_type]
318
288
  check_status api[:CreateTensorWithDataAsOrtValue].call(allocator_info.read_pointer, input_tensor_values, input_tensor_values.size, input_node_dims, shape.size, type_enum, input_tensor[idx])
319
289
 
320
- refs << input_node_dims
321
290
  refs << input_tensor_values
291
+ else
292
+ unsupported_type("input", inp[:type])
322
293
  end
323
294
  end
324
295
 
325
- refs << allocator_info
326
-
327
296
  input_tensor
328
297
  ensure
329
298
  release :MemoryInfo, allocator_info
330
299
  end
331
300
 
301
+ def input_shape(input)
302
+ if numo_array?(input)
303
+ input.shape
304
+ else
305
+ shape = []
306
+ s = input
307
+ while s.is_a?(Array)
308
+ shape << s.size
309
+ s = s.first
310
+ end
311
+ shape
312
+ end
313
+ end
314
+
315
+ def create_input_strings(input)
316
+ str_ptrs =
317
+ if numo_array?(input)
318
+ input.size.times.map { |i| ::FFI::MemoryPointer.from_string(input[i]) }
319
+ else
320
+ input.flatten.map { |v| ::FFI::MemoryPointer.from_string(v) }
321
+ end
322
+
323
+ input_tensor_values = ::FFI::MemoryPointer.new(:pointer, str_ptrs.size)
324
+ input_tensor_values.write_array_of_pointer(str_ptrs)
325
+ [input_tensor_values, str_ptrs]
326
+ end
327
+
328
+ def create_input_data(input, tensor_type)
329
+ if numo_array?(input)
330
+ input.cast_to(numo_types[tensor_type]).to_binary
331
+ else
332
+ flat_input = input.flatten.to_a
333
+ input_tensor_values = ::FFI::MemoryPointer.new(tensor_type, flat_input.size)
334
+ if tensor_type == :bool
335
+ input_tensor_values.write_array_of_uint8(flat_input.map { |v| v ? 1 : 0 })
336
+ else
337
+ input_tensor_values.send("write_array_of_#{tensor_type}", flat_input)
338
+ end
339
+ input_tensor_values
340
+ end
341
+ end
342
+
332
343
  def create_node_names(names, refs)
333
344
  str_ptrs = names.map { |v| ::FFI::MemoryPointer.from_string(v) }
334
345
  refs << str_ptrs
@@ -575,7 +586,7 @@ module OnnxRuntime
575
586
  end
576
587
 
577
588
  def self.api
578
- @api ||= FFI.OrtGetApiBase[:GetApi].call(FFI::ORT_API_VERSION)
589
+ FFI.api
579
590
  end
580
591
 
581
592
  def self.release(type, pointer)
@@ -1,3 +1,3 @@
1
1
  module OnnxRuntime
2
- VERSION = "0.9.0"
2
+ VERSION = "0.9.2"
3
3
  end
@@ -1829,7 +1829,7 @@ Zbigniew Skowron <zbychs@gmail.com>
1829
1829
 
1830
1830
  _____
1831
1831
 
1832
- HalidelR
1832
+ HalideIR
1833
1833
 
1834
1834
  Copyright (c) 2016 HalideIR contributors
1835
1835
  Copyright (c) 2012-2014 MIT CSAIL, Google Inc., and other contributors
@@ -4820,7 +4820,7 @@ SOFTWARE.
4820
4820
 
4821
4821
  ----------------------------------------------------------------------------
4822
4822
 
4823
- This is the MIT/Expat Licence. For more information see:
4823
+ This is the MIT/Expat License. For more information see:
4824
4824
 
4825
4825
  1. http://www.opensource.org/licenses/mit-license.php
4826
4826
 
Binary file
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: onnxruntime
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.0
4
+ version: 0.9.2
5
5
  platform: x64-mingw
6
6
  authors:
7
7
  - Andrew Kane
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-02-27 00:00:00.000000000 Z
11
+ date: 2024-09-04 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ffi
@@ -62,7 +62,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
62
62
  - !ruby/object:Gem::Version
63
63
  version: '0'
64
64
  requirements: []
65
- rubygems_version: 3.5.3
65
+ rubygems_version: 3.5.11
66
66
  signing_key:
67
67
  specification_version: 4
68
68
  summary: High performance scoring engine for ML models