com.github.asus4.onnxruntime 0.1.14 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Plugins/Android/onnxruntime-android.aar +0 -0
- package/Plugins/Linux/x64/libonnxruntime.so +0 -0
- package/Plugins/Windows/x64/onnxruntime.dll +0 -0
- package/Plugins/iOS~/onnxruntime.xcframework/Info.plist +6 -6
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64/onnxruntime.framework/Headers/coreml_provider_factory.h +4 -1
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64/onnxruntime.framework/Headers/onnxruntime_c_api.h +134 -19
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64/onnxruntime.framework/Headers/onnxruntime_cxx_api.h +18 -3
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64/onnxruntime.framework/Headers/onnxruntime_cxx_inline.h +68 -15
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64/onnxruntime.framework/Headers/onnxruntime_lite_custom_op.h +1119 -0
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64/onnxruntime.framework/Headers/onnxruntime_run_options_config_keys.h +19 -0
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64/onnxruntime.framework/Headers/onnxruntime_session_options_config_keys.h +32 -9
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64/onnxruntime.framework/Info.plist +2 -2
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64/onnxruntime.framework/onnxruntime +0 -0
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64_x86_64-simulator/onnxruntime.framework/Headers/coreml_provider_factory.h +4 -1
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64_x86_64-simulator/onnxruntime.framework/Headers/onnxruntime_c_api.h +134 -19
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64_x86_64-simulator/onnxruntime.framework/Headers/onnxruntime_cxx_api.h +18 -3
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64_x86_64-simulator/onnxruntime.framework/Headers/onnxruntime_cxx_inline.h +68 -15
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64_x86_64-simulator/onnxruntime.framework/Headers/onnxruntime_lite_custom_op.h +1119 -0
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64_x86_64-simulator/onnxruntime.framework/Headers/onnxruntime_run_options_config_keys.h +19 -0
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64_x86_64-simulator/onnxruntime.framework/Headers/onnxruntime_session_options_config_keys.h +32 -9
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64_x86_64-simulator/onnxruntime.framework/Info.plist +2 -2
- package/Plugins/iOS~/onnxruntime.xcframework/ios-arm64_x86_64-simulator/onnxruntime.framework/onnxruntime +0 -0
- package/Plugins/iOS~/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.framework/{Headers → Versions/A/Headers}/coreml_provider_factory.h +4 -1
- package/Plugins/iOS~/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.framework/{Headers → Versions/A/Headers}/onnxruntime_c_api.h +134 -19
- package/Plugins/iOS~/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.framework/{Headers → Versions/A/Headers}/onnxruntime_cxx_api.h +18 -3
- package/Plugins/iOS~/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.framework/{Headers → Versions/A/Headers}/onnxruntime_cxx_inline.h +68 -15
- package/Plugins/iOS~/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.framework/Versions/A/Headers/onnxruntime_lite_custom_op.h +1119 -0
- package/Plugins/iOS~/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.framework/{Headers → Versions/A/Headers}/onnxruntime_run_options_config_keys.h +19 -0
- package/Plugins/iOS~/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.framework/{Headers → Versions/A/Headers}/onnxruntime_session_options_config_keys.h +32 -9
- package/Plugins/iOS~/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.framework/{Info.plist → Versions/A/Resources/Info.plist} +2 -2
- package/Plugins/iOS~/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.framework/{onnxruntime → Versions/A/onnxruntime} +0 -0
- package/Plugins/macOS/libonnxruntime.dylib +0 -0
- package/README.md +8 -8
- package/Runtime/AssemblyInfo.shared.cs +1 -11
- package/Runtime/NativeMethods.shared.cs +37 -2
- package/Runtime/OrtValue.shared.cs +38 -38
- package/Runtime/SessionOptions.shared.cs +14 -0
- package/Runtime/Training/NativeTrainingMethods.shared.cs +20 -2
- package/Runtime/Training/TrainingSession.shared.cs +107 -0
- package/package.json +1 -1
- /package/Plugins/iOS~/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.framework/{Headers → Versions/A/Headers}/cpu_provider_factory.h +0 -0
- /package/Plugins/iOS~/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.framework/{Headers → Versions/A/Headers}/onnxruntime_float16.h +0 -0
|
@@ -30,3 +30,22 @@ static const char* const kOrtRunOptionsConfigEnableMemoryArenaShrinkage = "memor
|
|
|
30
30
|
// Per default it will be set to '0'
|
|
31
31
|
// Taking CUDA EP as an example, it omit triggering cudaStreamSynchronize on the compute stream.
|
|
32
32
|
static const char* const kOrtRunOptionsConfigDisableSynchronizeExecutionProviders = "disable_synchronize_execution_providers";
|
|
33
|
+
|
|
34
|
+
// Set HTP performance mode for QNN HTP backend before session run.
|
|
35
|
+
// options for HTP performance mode: "burst", "balanced", "default", "high_performance",
|
|
36
|
+
// "high_power_saver", "low_balanced", "extreme_power_saver", "low_power_saver", "power_saver",
|
|
37
|
+
// "sustained_high_performance". Default to "default".
|
|
38
|
+
static const char* const kOrtRunOptionsConfigQnnPerfMode = "qnn.htp_perf_mode";
|
|
39
|
+
|
|
40
|
+
// Set HTP performance mode for QNN HTP backend post session run.
|
|
41
|
+
static const char* const kOrtRunOptionsConfigQnnPerfModePostRun = "qnn.htp_perf_mode_post_run";
|
|
42
|
+
|
|
43
|
+
// Set RPC control latency for QNN HTP backend
|
|
44
|
+
static const char* const kOrtRunOptionsConfigQnnRpcControlLatency = "qnn.rpc_control_latency";
|
|
45
|
+
|
|
46
|
+
// Set graph annotation id for CUDA EP. Use with enable_cuda_graph=true.
|
|
47
|
+
// The value should be an integer. If the value is not set, the default value is 0 and
|
|
48
|
+
// ORT session only captures one cuda graph before another capture is requested.
|
|
49
|
+
// If the value is set to -1, cuda graph capture/replay is disabled in that run.
|
|
50
|
+
// User are not expected to set the value to 0 as it is reserved for internal use.
|
|
51
|
+
static const char* const kOrtRunOptionsConfigCudaGraphAnnotation = "gpu_graph_id";
|
|
@@ -78,21 +78,35 @@ static const char* const kOrtSessionOptionsEnableGeluApproximation = "optimizati
|
|
|
78
78
|
static const char* const kOrtSessionOptionsDisableAheadOfTimeFunctionInlining = "session.disable_aot_function_inlining";
|
|
79
79
|
|
|
80
80
|
#ifdef ENABLE_TRAINING
|
|
81
|
-
// Specifies a
|
|
82
|
-
// The value should be a
|
|
83
|
-
//
|
|
84
|
-
//
|
|
85
|
-
//
|
|
86
|
-
// "
|
|
87
|
-
//
|
|
88
|
-
//
|
|
89
|
-
|
|
81
|
+
// Specifies a path of the file containing a list of memory optimization configurations.
|
|
82
|
+
// The value should be a string indicating the file path of the config file.
|
|
83
|
+
// The content of the config file is a JSON struct like this:
|
|
84
|
+
// [
|
|
85
|
+
// "Gelu+Cast+:1:0",
|
|
86
|
+
// "Dropout+:1:1"
|
|
87
|
+
// ]
|
|
88
|
+
// Taking the example of "Gelu+Cast+:1:0",
|
|
89
|
+
// > "Gelu+Cast+" is the subgraph string, a valid "subgraph string" should be one subgraph representation
|
|
90
|
+
// output by ORT graph transformations.
|
|
91
|
+
// > "1" is "optimization strategy", valid values: 0 - disabled, 1 - recompute.
|
|
92
|
+
// > "0" is "number of subgraph to apply" which is used to control how many subgraphs to apply optimization,
|
|
93
|
+
// to avoid "oversaving" the memory.
|
|
94
|
+
static const char* const kOrtSessionOptionsMemoryOptimizerApplyConfig = "optimization.memory_optimizer_config";
|
|
90
95
|
|
|
91
96
|
// Specifies the config for detecting subgraphs for memory footprint reduction.
|
|
92
97
|
// The value should be a string contains int separated using commas. The default value is "0:0".
|
|
93
98
|
static const char* const kOrtSessionOptionsMemoryOptimizerProbeConfig = "optimization.enable_memory_probe_recompute_config";
|
|
94
99
|
#endif
|
|
95
100
|
|
|
101
|
+
// This setting if set should contain a comma separated list of optimizers names that should be disabled.
|
|
102
|
+
// Optimizers may take time to execute and affect model loading time. If you feel that a specific optimizer
|
|
103
|
+
// does not provider runtime benefits, but affects your model loading time you may disable it using this config
|
|
104
|
+
// entry. This option is not enabled in ORT_MINIMAL_BUILD build.
|
|
105
|
+
// A list of optimizes is available in onnxruntime/core/optimizer/graph_transformer_utils.cc
|
|
106
|
+
//
|
|
107
|
+
// Default is an empty string which means no optimizers are disabled.
|
|
108
|
+
static const char* const kOrtSessionOptionsDisableSpecifiedOptimizers = "optimization.disable_specified_optimizers";
|
|
109
|
+
|
|
96
110
|
// Enable or disable using device allocator for allocating initialized tensor memory. "1": enable; "0": disable. The default is "0".
|
|
97
111
|
// Using device allocators means the memory allocation is made using malloc/new.
|
|
98
112
|
static const char* const kOrtSessionOptionsUseDeviceAllocatorForInitializers = "session.use_device_allocator_for_initializers";
|
|
@@ -251,8 +265,17 @@ static const char* const kOrtSessionOptionEpContextFilePath = "ep.context_file_p
|
|
|
251
265
|
// "1": dump the EP context into the Onnx model. (default).
|
|
252
266
|
static const char* const kOrtSessionOptionEpContextEmbedMode = "ep.context_embed_mode";
|
|
253
267
|
|
|
268
|
+
// Specify the EPContext node name prefix to make it unique
|
|
269
|
+
// in case user need to merge/connect multiple EPContext nodes in one model
|
|
270
|
+
static const char* const kOrtSessionOptionEpContextNodeNamePrefix = "ep.context_node_name_prefix";
|
|
271
|
+
|
|
254
272
|
// Gemm fastmath mode provides fp32 gemm acceleration with bfloat16 based matmul.
|
|
255
273
|
// Option values:
|
|
256
274
|
// - "0": Gemm FastMath mode is not enabled. [DEFAULT]
|
|
257
275
|
// - "1": Gemm FastMath mode is enabled.
|
|
258
276
|
static const char* const kOrtSessionOptionsMlasGemmFastMathArm64Bfloat16 = "mlas.enable_gemm_fastmath_arm64_bfloat16";
|
|
277
|
+
|
|
278
|
+
// When converting DQ + MatMul -> MatMulNBits, the accuracy level of the MatMulNBits is controlled by this option.
|
|
279
|
+
// Refer to MatMulNBits op schema for more details.
|
|
280
|
+
// If not provided, default is 4.
|
|
281
|
+
static const char* const kOrtSessionOptionsQDQMatMulNBitsAccuracyLevel = "session.qdq_matmulnbits_accuracy_level";
|
|
@@ -9,9 +9,9 @@
|
|
|
9
9
|
<key>CFBundleIdentifier</key>
|
|
10
10
|
<string>com.microsoft.onnxruntime</string>
|
|
11
11
|
<key>CFBundleVersion</key>
|
|
12
|
-
<string>1.
|
|
12
|
+
<string>1.19.2</string>
|
|
13
13
|
<key>CFBundleShortVersionString</key>
|
|
14
|
-
<string>1.
|
|
14
|
+
<string>1.19.2</string>
|
|
15
15
|
<key>CFBundleSignature</key>
|
|
16
16
|
<string>????</string>
|
|
17
17
|
<key>CFBundlePackageType</key>
|
|
Binary file
|
|
Binary file
|
package/README.md
CHANGED
|
@@ -18,9 +18,9 @@ NanoSAM
|
|
|
18
18
|
|
|
19
19
|
## Tested environment
|
|
20
20
|
|
|
21
|
-
- Unity: 2022.3.
|
|
22
|
-
- ONNX Runtime: [1.
|
|
23
|
-
- ONNX Runtime Extensions: [0.
|
|
21
|
+
- Unity: 2022.3.20f1 (LTS)
|
|
22
|
+
- ONNX Runtime: [1.19.2](https://github.com/microsoft/onnxruntime/releases/tag/v1.19.2)
|
|
23
|
+
- ONNX Runtime Extensions: [0.12.0](https://github.com/microsoft/onnxruntime-extensions/releases/tag/v0.12.0)
|
|
24
24
|
|
|
25
25
|
### Execution Providers & Extensions
|
|
26
26
|
|
|
@@ -66,11 +66,11 @@ Pre-built libraries are available on [NPM](https://www.npmjs.com/package/com.git
|
|
|
66
66
|
}
|
|
67
67
|
]
|
|
68
68
|
"dependencies": {
|
|
69
|
-
"com.github.asus4.onnxruntime": "0.1
|
|
70
|
-
"com.github.asus4.onnxruntime.unity": "0.1
|
|
71
|
-
"com.github.asus4.onnxruntime.win-x64-gpu": "0.1
|
|
72
|
-
"com.github.asus4.onnxruntime.linux-x64-gpu": "0.1
|
|
73
|
-
"com.github.asus4.onnxruntime-extensions": "0.1
|
|
69
|
+
"com.github.asus4.onnxruntime": "0.2.1",
|
|
70
|
+
"com.github.asus4.onnxruntime.unity": "0.2.1",
|
|
71
|
+
"com.github.asus4.onnxruntime.win-x64-gpu": "0.2.1",
|
|
72
|
+
"com.github.asus4.onnxruntime.linux-x64-gpu": "0.2.1",
|
|
73
|
+
"com.github.asus4.onnxruntime-extensions": "0.2.1",
|
|
74
74
|
... other dependencies
|
|
75
75
|
}
|
|
76
76
|
```
|
|
@@ -9,24 +9,14 @@
|
|
|
9
9
|
#elif UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX
|
|
10
10
|
#define __ENABLE_COREML__
|
|
11
11
|
#endif
|
|
12
|
-
|
|
13
|
-
#if __XAMARIN__
|
|
14
|
-
#if __IOS__
|
|
15
|
-
[assembly: Foundation.LinkerSafe]
|
|
16
|
-
#elif __ANDROID__
|
|
17
|
-
[assembly: Android.LinkerSafe]
|
|
18
|
-
#endif
|
|
19
|
-
#else
|
|
20
|
-
// .net 6
|
|
21
12
|
#if __IOS__
|
|
22
13
|
[assembly: System.Reflection.AssemblyMetadata ("IsTrimmable", "True")]
|
|
23
14
|
#elif __ANDROID__
|
|
24
15
|
[assembly: global::System.Reflection.AssemblyMetadata("IsTrimmable", "True")]
|
|
25
16
|
#endif
|
|
26
|
-
#endif
|
|
27
17
|
|
|
28
18
|
// Making these assembly's internals visible to the internal Test assembly
|
|
29
19
|
[assembly: System.Runtime.CompilerServices.InternalsVisibleTo("Microsoft.ML.OnnxRuntime.Tests.Common, PublicKey=002400000480000094000000060200000024000052534131000400000100010059013e94e4bc70136ca4c35f33acd6b62974536b698f9c7a21cee18d805c7ad860ad9eebfdc47a96ba2f8d03f4cf1c36b9d30787e276c7b9833b5bf2a6eba7e919e6b90083078a352262aed1d842e5f70a3085cbcf4c56ae851b161137920961c23fcc246598d61d258ccc615c927b2441359eea666a99ce1c3c07dca18fb0e1")]
|
|
30
20
|
[assembly: System.Runtime.CompilerServices.InternalsVisibleTo("Microsoft.ML.OnnxRuntime.Tests.Droid, PublicKey=002400000480000094000000060200000024000052534131000400000100010059013e94e4bc70136ca4c35f33acd6b62974536b698f9c7a21cee18d805c7ad860ad9eebfdc47a96ba2f8d03f4cf1c36b9d30787e276c7b9833b5bf2a6eba7e919e6b90083078a352262aed1d842e5f70a3085cbcf4c56ae851b161137920961c23fcc246598d61d258ccc615c927b2441359eea666a99ce1c3c07dca18fb0e1")]
|
|
31
21
|
[assembly: System.Runtime.CompilerServices.InternalsVisibleTo("Microsoft.ML.OnnxRuntime.Tests.iOS, PublicKey=002400000480000094000000060200000024000052534131000400000100010059013e94e4bc70136ca4c35f33acd6b62974536b698f9c7a21cee18d805c7ad860ad9eebfdc47a96ba2f8d03f4cf1c36b9d30787e276c7b9833b5bf2a6eba7e919e6b90083078a352262aed1d842e5f70a3085cbcf4c56ae851b161137920961c23fcc246598d61d258ccc615c927b2441359eea666a99ce1c3c07dca18fb0e1")]
|
|
32
|
-
[assembly: System.Runtime.CompilerServices.InternalsVisibleTo("Microsoft.ML.OnnxRuntime.Tests.NetCoreApp, PublicKey=002400000480000094000000060200000024000052534131000400000100010059013e94e4bc70136ca4c35f33acd6b62974536b698f9c7a21cee18d805c7ad860ad9eebfdc47a96ba2f8d03f4cf1c36b9d30787e276c7b9833b5bf2a6eba7e919e6b90083078a352262aed1d842e5f70a3085cbcf4c56ae851b161137920961c23fcc246598d61d258ccc615c927b2441359eea666a99ce1c3c07dca18fb0e1")]
|
|
22
|
+
[assembly: System.Runtime.CompilerServices.InternalsVisibleTo("Microsoft.ML.OnnxRuntime.Tests.NetCoreApp, PublicKey=002400000480000094000000060200000024000052534131000400000100010059013e94e4bc70136ca4c35f33acd6b62974536b698f9c7a21cee18d805c7ad860ad9eebfdc47a96ba2f8d03f4cf1c36b9d30787e276c7b9833b5bf2a6eba7e919e6b90083078a352262aed1d842e5f70a3085cbcf4c56ae851b161137920961c23fcc246598d61d258ccc615c927b2441359eea666a99ce1c3c07dca18fb0e1")]
|
|
@@ -19,7 +19,11 @@ using System.Runtime.InteropServices;
|
|
|
19
19
|
namespace Microsoft.ML.OnnxRuntime
|
|
20
20
|
{
|
|
21
21
|
[StructLayout(LayoutKind.Sequential)]
|
|
22
|
+
#if NETSTANDARD2_0
|
|
23
|
+
public class OrtApiBase
|
|
24
|
+
#else
|
|
22
25
|
public struct OrtApiBase
|
|
26
|
+
#endif
|
|
23
27
|
{
|
|
24
28
|
public IntPtr GetApi;
|
|
25
29
|
public IntPtr GetVersionString;
|
|
@@ -29,7 +33,11 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
29
33
|
// OrtApi ort_api_1_to_<latest_version> (onnxruntime/core/session/onnxruntime_c_api.cc)
|
|
30
34
|
// If syncing your new C API, any other C APIs before yours also need to be synced here if haven't
|
|
31
35
|
[StructLayout(LayoutKind.Sequential)]
|
|
36
|
+
#if NETSTANDARD2_0
|
|
37
|
+
public class OrtApi
|
|
38
|
+
#else
|
|
32
39
|
public struct OrtApi
|
|
40
|
+
#endif
|
|
33
41
|
{
|
|
34
42
|
public IntPtr CreateStatus;
|
|
35
43
|
public IntPtr GetErrorCode;
|
|
@@ -312,8 +320,13 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
312
320
|
{
|
|
313
321
|
static OrtApi api_;
|
|
314
322
|
|
|
323
|
+
#if NETSTANDARD2_0
|
|
324
|
+
[UnmanagedFunctionPointer(CallingConvention.Winapi)]
|
|
325
|
+
public delegate IntPtr DOrtGetApi(UInt32 version);
|
|
326
|
+
#else
|
|
315
327
|
[UnmanagedFunctionPointer(CallingConvention.Winapi)]
|
|
316
328
|
public delegate ref OrtApi DOrtGetApi(UInt32 version);
|
|
329
|
+
#endif
|
|
317
330
|
|
|
318
331
|
[UnmanagedFunctionPointer(CallingConvention.Winapi)]
|
|
319
332
|
public delegate IntPtr DOrtGetVersionString();
|
|
@@ -322,11 +335,24 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
322
335
|
|
|
323
336
|
static NativeMethods()
|
|
324
337
|
{
|
|
338
|
+
#if NETSTANDARD2_0
|
|
339
|
+
IntPtr ortApiBasePtr = OrtGetApiBase();
|
|
340
|
+
OrtApiBase ortApiBase = (OrtApiBase)Marshal.PtrToStructure(ortApiBasePtr, typeof(OrtApiBase));
|
|
341
|
+
DOrtGetApi OrtGetApi = (DOrtGetApi)Marshal.GetDelegateForFunctionPointer(ortApiBase.GetApi, typeof(DOrtGetApi));
|
|
342
|
+
#else
|
|
325
343
|
DOrtGetApi OrtGetApi = (DOrtGetApi)Marshal.GetDelegateForFunctionPointer(OrtGetApiBase().GetApi, typeof(DOrtGetApi));
|
|
344
|
+
#endif
|
|
326
345
|
|
|
346
|
+
const uint ORT_API_VERSION = 14;
|
|
347
|
+
#if NETSTANDARD2_0
|
|
348
|
+
IntPtr ortApiPtr = OrtGetApi(ORT_API_VERSION);
|
|
349
|
+
api_ = (OrtApi)Marshal.PtrToStructure(ortApiPtr, typeof(OrtApi));
|
|
350
|
+
OrtGetVersionString = (DOrtGetVersionString)Marshal.GetDelegateForFunctionPointer(ortApiBase.GetVersionString, typeof(DOrtGetVersionString));
|
|
351
|
+
#else
|
|
327
352
|
// TODO: Make this save the pointer, and not copy the whole structure across
|
|
328
|
-
api_ = (OrtApi)OrtGetApi(
|
|
353
|
+
api_ = (OrtApi)OrtGetApi(ORT_API_VERSION);
|
|
329
354
|
OrtGetVersionString = (DOrtGetVersionString)Marshal.GetDelegateForFunctionPointer(OrtGetApiBase().GetVersionString, typeof(DOrtGetVersionString));
|
|
355
|
+
#endif
|
|
330
356
|
|
|
331
357
|
OrtCreateEnv = (DOrtCreateEnv)Marshal.GetDelegateForFunctionPointer(api_.CreateEnv, typeof(DOrtCreateEnv));
|
|
332
358
|
OrtCreateEnvWithCustomLogger = (DOrtCreateEnvWithCustomLogger)Marshal.GetDelegateForFunctionPointer(api_.CreateEnvWithCustomLogger, typeof(DOrtCreateEnvWithCustomLogger));
|
|
@@ -374,6 +400,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
374
400
|
OrtDisableMemPattern = (DOrtDisableMemPattern)Marshal.GetDelegateForFunctionPointer(api_.DisableMemPattern, typeof(DOrtDisableMemPattern));
|
|
375
401
|
OrtEnableCpuMemArena = (DOrtEnableCpuMemArena)Marshal.GetDelegateForFunctionPointer(api_.EnableCpuMemArena, typeof(DOrtEnableCpuMemArena));
|
|
376
402
|
OrtDisableCpuMemArena = (DOrtDisableCpuMemArena)Marshal.GetDelegateForFunctionPointer(api_.DisableCpuMemArena, typeof(DOrtDisableCpuMemArena));
|
|
403
|
+
OrtDisablePerSessionThreads = (DOrtDisablePerSessionThreads)Marshal.GetDelegateForFunctionPointer(api_.DisablePerSessionThreads, typeof(DOrtDisablePerSessionThreads));
|
|
377
404
|
OrtSetSessionLogId = (DOrtSetSessionLogId)Marshal.GetDelegateForFunctionPointer(api_.SetSessionLogId, typeof(DOrtSetSessionLogId));
|
|
378
405
|
OrtSetSessionLogVerbosityLevel = (DOrtSetSessionLogVerbosityLevel)Marshal.GetDelegateForFunctionPointer(api_.SetSessionLogVerbosityLevel, typeof(DOrtSetSessionLogVerbosityLevel));
|
|
379
406
|
OrtSetSessionLogSeverityLevel = (DOrtSetSessionLogSeverityLevel)Marshal.GetDelegateForFunctionPointer(api_.SetSessionLogSeverityLevel, typeof(DOrtSetSessionLogSeverityLevel));
|
|
@@ -541,7 +568,11 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
541
568
|
}
|
|
542
569
|
|
|
543
570
|
[DllImport(NativeLib.DllName, CharSet = CharSet.Ansi)]
|
|
571
|
+
#if NETSTANDARD2_0
|
|
572
|
+
public static extern IntPtr OrtGetApiBase();
|
|
573
|
+
#else
|
|
544
574
|
public static extern ref OrtApiBase OrtGetApiBase();
|
|
575
|
+
#endif
|
|
545
576
|
|
|
546
577
|
#region Runtime / Environment API
|
|
547
578
|
|
|
@@ -1004,6 +1035,10 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
1004
1035
|
public delegate IntPtr /*(OrtStatus*)*/ DOrtDisableCpuMemArena(IntPtr /* OrtSessionOptions* */ options);
|
|
1005
1036
|
public static DOrtDisableCpuMemArena OrtDisableCpuMemArena;
|
|
1006
1037
|
|
|
1038
|
+
[UnmanagedFunctionPointer(CallingConvention.Winapi)]
|
|
1039
|
+
public delegate IntPtr /*(OrtStatus*)*/ DOrtDisablePerSessionThreads(IntPtr /* OrtSessionOptions* */ options);
|
|
1040
|
+
public static DOrtDisablePerSessionThreads OrtDisablePerSessionThreads;
|
|
1041
|
+
|
|
1007
1042
|
[UnmanagedFunctionPointer(CallingConvention.Winapi)]
|
|
1008
1043
|
public delegate IntPtr /*(OrtStatus*)*/ DOrtSetSessionLogId(IntPtr /* OrtSessionOptions* */ options, byte[] /* const char* */ logId);
|
|
1009
1044
|
public static DOrtSetSessionLogId OrtSetSessionLogId;
|
|
@@ -1334,7 +1369,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
1334
1369
|
OrtAllocatorType allocatorType,
|
|
1335
1370
|
int identifier,
|
|
1336
1371
|
OrtMemType memType,
|
|
1337
|
-
out IntPtr /*(OrtMemoryInfo*)*/ allocatorInfo // memory ownership
|
|
1372
|
+
out IntPtr /*(OrtMemoryInfo*)*/ allocatorInfo // memory ownership transferred to caller
|
|
1338
1373
|
);
|
|
1339
1374
|
|
|
1340
1375
|
public static DOrtCreateMemoryInfo OrtCreateMemoryInfo;
|
|
@@ -22,7 +22,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
22
22
|
ONNX_TYPE_MAP = 3, // It's a map
|
|
23
23
|
ONNX_TYPE_OPAQUE = 4, // It's an experimental Opaque object
|
|
24
24
|
ONNX_TYPE_SPARSETENSOR = 5, // It's a Sparse Tensor
|
|
25
|
-
ONNX_TYPE_OPTIONAL = 6, // It's an optional type that designates anything above (except
|
|
25
|
+
ONNX_TYPE_OPTIONAL = 6, // It's an optional type that designates anything above (except UNKNOWN)
|
|
26
26
|
}
|
|
27
27
|
|
|
28
28
|
/// <summary>
|
|
@@ -31,7 +31,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
31
31
|
/// The class implements IDisposable and must
|
|
32
32
|
/// be disposed of, otherwise native resources will leak
|
|
33
33
|
/// and will eventually cause the application to slow down or crash.
|
|
34
|
-
///
|
|
34
|
+
///
|
|
35
35
|
/// If the OrtValue instance is constructed over a managed memory, and it is not
|
|
36
36
|
/// disposed properly, the pinned memory will continue to be pinned and interfere
|
|
37
37
|
/// with GC operation.
|
|
@@ -72,7 +72,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
72
72
|
/// Constructor. The newly constructed OrtValue takes ownership of the native OrtValue instance
|
|
73
73
|
/// and disposes of it when the OrtValue instance is disposed. The instance will take ownership and will
|
|
74
74
|
/// dispose of compositeMembers instances.
|
|
75
|
-
///
|
|
75
|
+
///
|
|
76
76
|
/// This constructor can only throw if OnnxType is not specified.
|
|
77
77
|
/// </summary>
|
|
78
78
|
/// <param name="handle">native ortValue handle</param>
|
|
@@ -189,10 +189,10 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
189
189
|
/// <summary>
|
|
190
190
|
/// Returns a ReadOnlySpan<typeparamref name="T"/> over tensor native buffer that
|
|
191
191
|
/// provides a read-only view.
|
|
192
|
-
///
|
|
192
|
+
///
|
|
193
193
|
/// Note, that the memory may be device allocated and, therefore, not accessible from the CPU.
|
|
194
194
|
/// To get memory descriptor use GetTensorMemoryInfo().
|
|
195
|
-
///
|
|
195
|
+
///
|
|
196
196
|
/// OrtValue must contain a non-string tensor.
|
|
197
197
|
/// The span is valid as long as the OrtValue instance is alive (not disposed).
|
|
198
198
|
/// </summary>
|
|
@@ -210,10 +210,10 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
210
210
|
/// This enables you to safely and efficiently modify the underlying
|
|
211
211
|
/// native buffer in a type-safe manner. This is useful for example in IOBinding scenarios
|
|
212
212
|
/// where you want to modify results of the inference and feed it back as input.
|
|
213
|
-
///
|
|
213
|
+
///
|
|
214
214
|
/// Note, that the memory may be device allocated.
|
|
215
215
|
/// To get memory descriptor use GetTensorMemoryInfo().
|
|
216
|
-
///
|
|
216
|
+
///
|
|
217
217
|
/// OrtValue must contain a non-string tensor.
|
|
218
218
|
/// The span is valid as long as the OrtValue instance is alive (not disposed).
|
|
219
219
|
/// </summary>
|
|
@@ -237,7 +237,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
237
237
|
/// <summary>
|
|
238
238
|
/// Fetch string tensor element buffer pointer at the specified index,
|
|
239
239
|
/// convert/copy to UTF-16 char[] and return a ReadOnlyMemory<char> instance.
|
|
240
|
-
///
|
|
240
|
+
///
|
|
241
241
|
/// Obtain TensorTypeAndShape to get shape and element count.
|
|
242
242
|
/// </summary>
|
|
243
243
|
/// <param name="index">flat string tensor element index</param>
|
|
@@ -256,7 +256,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
256
256
|
/// <summary>
|
|
257
257
|
/// Fetch string tensor element buffer pointer at the specified index,
|
|
258
258
|
/// copy/convert UTF-8 into a UTF-16 string and return it.
|
|
259
|
-
///
|
|
259
|
+
///
|
|
260
260
|
/// Obtain TensorTypeAndShape to get shape and element count.
|
|
261
261
|
/// </summary>
|
|
262
262
|
/// <param name="index">flat string tensor element index</param>
|
|
@@ -279,7 +279,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
279
279
|
/// <summary>
|
|
280
280
|
/// Get a span over the native memory of the string tensor element.
|
|
281
281
|
/// The span is valid as long as the OrtValue is valid.
|
|
282
|
-
///
|
|
282
|
+
///
|
|
283
283
|
/// This is useful if you want to perform your own UTF-8 decoding or
|
|
284
284
|
/// you do not care about decoding.
|
|
285
285
|
/// Obtain TensorTypeAndShape to get shape and element count.
|
|
@@ -483,7 +483,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
483
483
|
/// This can be a piece of arbitrary memory that may be allocated by OrtAllocator (possibly on a device),
|
|
484
484
|
/// a chunk of managed memory (must be pinned for the duration of OrtValue lifetime) or a memory that is allocated
|
|
485
485
|
/// natively allocated using Marshal.AllocHGlobal(), stackalloc or other means (may be on a device).
|
|
486
|
-
///
|
|
486
|
+
///
|
|
487
487
|
/// The resulting OrtValue does not own the underlying memory buffer and will not attempt to
|
|
488
488
|
/// deallocate it. The caller must make sure that the memory remains valid for the duration of OrtValue lifetime.
|
|
489
489
|
/// </summary>
|
|
@@ -769,12 +769,12 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
769
769
|
/// Converts the string argument represented by ReadOnlySpan to UTF-8,
|
|
770
770
|
/// allocates space in the native tensor and copies it into the native tensor memory.
|
|
771
771
|
/// Typically, this is used to populate a new empty string tensor element.
|
|
772
|
-
///
|
|
772
|
+
///
|
|
773
773
|
/// The number of elements is according to the shape supplied to CreateTensorWithEmptyStrings().
|
|
774
774
|
/// However, this API can also be used to overwrite any existing element within the string tensor.
|
|
775
|
-
///
|
|
775
|
+
///
|
|
776
776
|
/// In general, to obtain the number of elements for any tensor, use GetTensorTypeAndShape() which
|
|
777
|
-
/// would return a disposable instance of TensorTypeAndShapeInfo.
|
|
777
|
+
/// would return a disposable instance of TensorTypeAndShapeInfo.
|
|
778
778
|
/// Then call GetElementCount() or GetShape().
|
|
779
779
|
/// </summary>
|
|
780
780
|
/// <param name="str">ReadOnlySpan over chars</param>
|
|
@@ -795,12 +795,12 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
795
795
|
/// Converts the string argument represented by ReadOnlyMemory to UTF-8,
|
|
796
796
|
/// allocates space in the native tensor and copies it into the native tensor memory.
|
|
797
797
|
/// Typically, this is used to populate a new empty string tensor element.
|
|
798
|
-
///
|
|
798
|
+
///
|
|
799
799
|
/// The number of elements is according to the shape supplied to CreateTensorWithEmptyStrings().
|
|
800
800
|
/// However, this API can also be used to overwrite any existing element within the string tensor.
|
|
801
|
-
///
|
|
801
|
+
///
|
|
802
802
|
/// In general, to obtain the number of elements for any tensor, use GetTensorTypeAndShape() which
|
|
803
|
-
/// would return a disposable instance of TensorTypeAndShapeInfo.
|
|
803
|
+
/// would return a disposable instance of TensorTypeAndShapeInfo.
|
|
804
804
|
/// Then call GetElementCount() or GetShape().
|
|
805
805
|
///
|
|
806
806
|
/// </summary>
|
|
@@ -815,7 +815,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
815
815
|
/// <summary>
|
|
816
816
|
/// This API resizes String Tensor element to the requested amount of bytes (UTF-8)
|
|
817
817
|
/// and copies the bytes from the supplied ReadOnlySpan into the native tensor memory (resized buffer).
|
|
818
|
-
///
|
|
818
|
+
///
|
|
819
819
|
/// The API is useful for quick loading of utf8 data into the native tensor memory.
|
|
820
820
|
/// </summary>
|
|
821
821
|
/// <param name="utf8Bytes">read only span of bytes</param>
|
|
@@ -841,7 +841,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
841
841
|
/// Creates an OrtValue that contains a string tensor.
|
|
842
842
|
/// String tensors are always allocated on CPU.
|
|
843
843
|
/// String data will be converted to UTF-8 and copied to native memory.
|
|
844
|
-
///
|
|
844
|
+
///
|
|
845
845
|
/// Note, this is different from creating an OrtValue from other primitive data types
|
|
846
846
|
/// where memory is pinned (if necessary) and the OrtValue points to that chunk of memory.
|
|
847
847
|
/// </summary>
|
|
@@ -885,10 +885,10 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
885
885
|
/// Creates a sequence of OrtValues from a collection of OrtValues.
|
|
886
886
|
/// All OrtValues in the collection must be of the same Onnx type.
|
|
887
887
|
/// I.e. (Tensor, SparseTensor, Map, Sequence, etc.)
|
|
888
|
-
///
|
|
888
|
+
///
|
|
889
889
|
/// The ortValues that are passed as argument are taken possession of by the newly
|
|
890
890
|
/// created OrtValue. The caller should not dispose them, unless this call fails.
|
|
891
|
-
///
|
|
891
|
+
///
|
|
892
892
|
/// The ortValues would be empty on successful return.
|
|
893
893
|
/// </summary>
|
|
894
894
|
/// <param name="ortValues">a collection of OrtValues. On success the ortValues contained in the list
|
|
@@ -978,24 +978,24 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
978
978
|
/// Creates a map OrtValue with keys and values.
|
|
979
979
|
/// On a high level the Onnxruntime representation of the map always consists of two
|
|
980
980
|
/// OrtValues, keys and values.
|
|
981
|
-
///
|
|
981
|
+
///
|
|
982
982
|
/// According to ONNX standard map keys can be unmanaged types only (or strings).
|
|
983
983
|
/// Those keys are contained in a single tensor within OrtValue keys.
|
|
984
|
-
///
|
|
984
|
+
///
|
|
985
985
|
/// Map values, on the other hand, can be composite types. The values parameter
|
|
986
986
|
/// can either contain a single tensor with unmanaged map values with the same number of
|
|
987
987
|
/// elements as the keys, or it can be a sequence of OrtValues,
|
|
988
988
|
/// each of those can be a composite type (tensor, sequence, map). If it is a sequence,
|
|
989
989
|
/// then the number of elements must match the number of elements in keys.
|
|
990
|
-
///
|
|
990
|
+
///
|
|
991
991
|
/// Keys and values must be in the same order.
|
|
992
|
-
///
|
|
992
|
+
///
|
|
993
993
|
/// ORT supports only a subset of types for keys and values, however, this API does not
|
|
994
994
|
/// restrict it.
|
|
995
|
-
///
|
|
995
|
+
///
|
|
996
996
|
/// The ortValues that are passed as argument are taken possession of by the newly
|
|
997
997
|
/// created OrtValue. The caller should not dispose them, unless this call fails.
|
|
998
|
-
///
|
|
998
|
+
///
|
|
999
999
|
/// Keys and values arguments will be set to null on success.
|
|
1000
1000
|
/// </summary>
|
|
1001
1001
|
/// <param name="keys">Contains keys</param>
|
|
@@ -1031,10 +1031,10 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
1031
1031
|
/// This API helps to quickly creates a map OrtValue with unmanaged (primitive) keys and values specified as arrays.
|
|
1032
1032
|
/// This helps the user not to create OrtValues for keys and values separately and deal only with the final result.
|
|
1033
1033
|
/// The map would consist of two tensors, one for keys and one for values.
|
|
1034
|
-
///
|
|
1034
|
+
///
|
|
1035
1035
|
/// The OrtValues would be created on top of the managed memory arrays and use it directly.
|
|
1036
1036
|
/// The number of elements in keys and values must be the same and they must be in order.
|
|
1037
|
-
///
|
|
1037
|
+
///
|
|
1038
1038
|
/// The types must be unmanaged.
|
|
1039
1039
|
/// </summary>
|
|
1040
1040
|
/// <typeparam name="K">keys type</typeparam>
|
|
@@ -1078,10 +1078,10 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
1078
1078
|
/// This helps the user not to create OrtValues for keys and values separately.
|
|
1079
1079
|
/// The number of elements in keys and values must be the same and they must be in order.
|
|
1080
1080
|
/// The map would consist of two tensors, one for keys and one for values.
|
|
1081
|
-
///
|
|
1081
|
+
///
|
|
1082
1082
|
/// string keys would be converted to UTF-8 encoding and copied to an allocated native memory.
|
|
1083
1083
|
/// The OrtValue for values would be created on top of the managed memory using it directly.
|
|
1084
|
-
///
|
|
1084
|
+
///
|
|
1085
1085
|
/// The values type must be unmanaged.
|
|
1086
1086
|
/// </summary>
|
|
1087
1087
|
/// <typeparam name="V"></typeparam>
|
|
@@ -1128,13 +1128,13 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
1128
1128
|
|
|
1129
1129
|
/// <summary>
|
|
1130
1130
|
/// Creates a map OrtValue with non-string keys and string values.
|
|
1131
|
-
///
|
|
1131
|
+
///
|
|
1132
1132
|
/// This helps the user not to create OrtValues for keys and values separately.
|
|
1133
1133
|
/// The number of elements in keys and values must be the same and they must be in order.
|
|
1134
|
-
///
|
|
1134
|
+
///
|
|
1135
1135
|
/// The OrtValue for keys would be created on top of the managed memory using it directly.
|
|
1136
1136
|
/// string values would be converted to UTF-8 encoding and copied to an allocated native memory.
|
|
1137
|
-
///
|
|
1137
|
+
///
|
|
1138
1138
|
/// </summary>
|
|
1139
1139
|
/// <typeparam name="K">unmanaged type of keys</typeparam>
|
|
1140
1140
|
/// <param name="keys"></param>
|
|
@@ -1182,17 +1182,17 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
1182
1182
|
/// Typically, when one uses GetValue() API, it creates a copy of OrtValue
|
|
1183
1183
|
/// that points to the same buffer as keys or values. This API helps to deal with those
|
|
1184
1184
|
/// temporary instances and avoid leaks.
|
|
1185
|
-
///
|
|
1185
|
+
///
|
|
1186
1186
|
/// According to ONNX standard map keys can be unmanaged types only (or strings).
|
|
1187
1187
|
/// Those keys are contained in a single tensor within OrtValue keys. So you can query those
|
|
1188
1188
|
/// directly from keys argument.
|
|
1189
|
-
///
|
|
1189
|
+
///
|
|
1190
1190
|
/// Map values, on the other hand, can be composite types. The values parameter
|
|
1191
1191
|
/// can either contain a single tensor with unmanaged map values with the same number of
|
|
1192
1192
|
/// elements as the keys, or it can be a sequence of OrtValues,
|
|
1193
1193
|
/// each of those can be a composite type (tensor, sequence, map). If it is a sequence,
|
|
1194
1194
|
/// then the number of elements must match the number of elements in keys.
|
|
1195
|
-
///
|
|
1195
|
+
///
|
|
1196
1196
|
/// Depending on the structure of the values, one will either directly query a single tensor
|
|
1197
1197
|
/// from values, or will have to iterate over the sequence of OrtValues and visit each of those
|
|
1198
1198
|
/// resulting in a recursive visitation.
|
|
@@ -1204,7 +1204,7 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
1204
1204
|
/// <summary>
|
|
1205
1205
|
/// This API helps the user to process a map OrtValue without
|
|
1206
1206
|
/// having to deal with the lifespan of intermediate OrtValues.
|
|
1207
|
-
///
|
|
1207
|
+
///
|
|
1208
1208
|
/// each API value is fed to the vistor functor.
|
|
1209
1209
|
/// </summary>
|
|
1210
1210
|
/// <param name="visitor">visitor function</param>
|
|
@@ -519,7 +519,12 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
519
519
|
{
|
|
520
520
|
try
|
|
521
521
|
{
|
|
522
|
+
#if NETSTANDARD2_0
|
|
523
|
+
var ortApiBasePtr = NativeMethods.OrtGetApiBase();
|
|
524
|
+
var ortApiBase = (OrtApiBase)Marshal.PtrToStructure(ortApiBasePtr, typeof(OrtApiBase));
|
|
525
|
+
#else
|
|
522
526
|
var ortApiBase = NativeMethods.OrtGetApiBase();
|
|
527
|
+
#endif
|
|
523
528
|
NativeApiStatus.VerifySuccess(
|
|
524
529
|
OrtExtensionsNativeMethods.RegisterCustomOps(this.handle, ref ortApiBase)
|
|
525
530
|
);
|
|
@@ -708,6 +713,15 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
708
713
|
}
|
|
709
714
|
private bool _enableCpuMemArena = true;
|
|
710
715
|
|
|
716
|
+
/// <summary>
|
|
717
|
+
/// Disables the per session threads. Default is true.
|
|
718
|
+
/// This makes all sessions in the process use a global TP.
|
|
719
|
+
/// </summary>
|
|
720
|
+
public void DisablePerSessionThreads()
|
|
721
|
+
{
|
|
722
|
+
NativeApiStatus.VerifySuccess(NativeMethods.OrtDisablePerSessionThreads(handle));
|
|
723
|
+
}
|
|
724
|
+
|
|
711
725
|
/// <summary>
|
|
712
726
|
/// Log Id to be used for the session. Default is empty string.
|
|
713
727
|
/// </summary>
|
|
@@ -53,8 +53,14 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
53
53
|
static OrtTrainingApi trainingApi_;
|
|
54
54
|
static IntPtr trainingApiPtr;
|
|
55
55
|
|
|
56
|
+
#if NETSTANDARD2_0
|
|
57
|
+
[UnmanagedFunctionPointer(CallingConvention.Winapi)]
|
|
58
|
+
public delegate IntPtr DOrtGetApi(UInt32 version);
|
|
59
|
+
#else
|
|
56
60
|
[UnmanagedFunctionPointer(CallingConvention.Winapi)]
|
|
57
61
|
public delegate ref OrtApi DOrtGetApi(UInt32 version);
|
|
62
|
+
#endif
|
|
63
|
+
|
|
58
64
|
|
|
59
65
|
[UnmanagedFunctionPointer(CallingConvention.Winapi)]
|
|
60
66
|
public delegate IntPtr /* OrtTrainingApi* */ DOrtGetTrainingApi(UInt32 version);
|
|
@@ -62,13 +68,25 @@ namespace Microsoft.ML.OnnxRuntime
|
|
|
62
68
|
|
|
63
69
|
static NativeTrainingMethods()
|
|
64
70
|
{
|
|
71
|
+
#if NETSTANDARD2_0
|
|
72
|
+
IntPtr ortApiBasePtr = NativeMethods.OrtGetApiBase();
|
|
73
|
+
OrtApiBase ortApiBase = (OrtApiBase)Marshal.PtrToStructure(ortApiBasePtr, typeof(OrtApiBase));
|
|
74
|
+
DOrtGetApi OrtGetApi = (DOrtGetApi)Marshal.GetDelegateForFunctionPointer(ortApiBase.GetApi, typeof(DOrtGetApi));
|
|
75
|
+
#else
|
|
65
76
|
DOrtGetApi OrtGetApi = (DOrtGetApi)Marshal.GetDelegateForFunctionPointer(NativeMethods.OrtGetApiBase().GetApi, typeof(DOrtGetApi));
|
|
77
|
+
#endif
|
|
66
78
|
|
|
79
|
+
const uint ORT_API_VERSION = 19;
|
|
80
|
+
#if NETSTANDARD2_0
|
|
81
|
+
IntPtr ortApiPtr = OrtGetApi(ORT_API_VERSION);
|
|
82
|
+
api_ = (OrtApi)Marshal.PtrToStructure(ortApiPtr, typeof(OrtApi));
|
|
83
|
+
#else
|
|
67
84
|
// TODO: Make this save the pointer, and not copy the whole structure across
|
|
68
|
-
api_ = (OrtApi)OrtGetApi(
|
|
85
|
+
api_ = (OrtApi)OrtGetApi(ORT_API_VERSION);
|
|
86
|
+
#endif
|
|
69
87
|
|
|
70
88
|
OrtGetTrainingApi = (DOrtGetTrainingApi)Marshal.GetDelegateForFunctionPointer(api_.GetTrainingApi, typeof(DOrtGetTrainingApi));
|
|
71
|
-
trainingApiPtr = OrtGetTrainingApi(
|
|
89
|
+
trainingApiPtr = OrtGetTrainingApi(ORT_API_VERSION);
|
|
72
90
|
if (trainingApiPtr != IntPtr.Zero)
|
|
73
91
|
{
|
|
74
92
|
trainingApi_ = (OrtTrainingApi)Marshal.PtrToStructure(trainingApiPtr, typeof(OrtTrainingApi));
|