onnxruntime_extensions 0.14.0__cp313-cp313-macosx_11_0_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. onnxruntime_extensions/__init__.py +82 -0
  2. onnxruntime_extensions/_cuops.py +564 -0
  3. onnxruntime_extensions/_extensions_pydll.cpython-313-darwin.so +0 -0
  4. onnxruntime_extensions/_extensions_pydll.pyi +45 -0
  5. onnxruntime_extensions/_hf_cvt.py +331 -0
  6. onnxruntime_extensions/_ocos.py +133 -0
  7. onnxruntime_extensions/_ortapi2.py +274 -0
  8. onnxruntime_extensions/_torch_cvt.py +231 -0
  9. onnxruntime_extensions/_version.py +2 -0
  10. onnxruntime_extensions/cmd.py +66 -0
  11. onnxruntime_extensions/cvt.py +306 -0
  12. onnxruntime_extensions/onnxprocess/__init__.py +12 -0
  13. onnxruntime_extensions/onnxprocess/_builder.py +53 -0
  14. onnxruntime_extensions/onnxprocess/_onnx_ops.py +1507 -0
  15. onnxruntime_extensions/onnxprocess/_session.py +355 -0
  16. onnxruntime_extensions/onnxprocess/_tensor.py +628 -0
  17. onnxruntime_extensions/onnxprocess/torch_wrapper.py +31 -0
  18. onnxruntime_extensions/pnp/__init__.py +13 -0
  19. onnxruntime_extensions/pnp/_base.py +124 -0
  20. onnxruntime_extensions/pnp/_imagenet.py +65 -0
  21. onnxruntime_extensions/pnp/_nlp.py +148 -0
  22. onnxruntime_extensions/pnp/_onnx_ops.py +1544 -0
  23. onnxruntime_extensions/pnp/_torchext.py +310 -0
  24. onnxruntime_extensions/pnp/_unifier.py +45 -0
  25. onnxruntime_extensions/pnp/_utils.py +302 -0
  26. onnxruntime_extensions/pp_api.py +83 -0
  27. onnxruntime_extensions/tools/__init__.py +0 -0
  28. onnxruntime_extensions/tools/add_HuggingFace_CLIPImageProcessor_to_model.py +171 -0
  29. onnxruntime_extensions/tools/add_pre_post_processing_to_model.py +535 -0
  30. onnxruntime_extensions/tools/pre_post_processing/__init__.py +4 -0
  31. onnxruntime_extensions/tools/pre_post_processing/pre_post_processor.py +395 -0
  32. onnxruntime_extensions/tools/pre_post_processing/step.py +227 -0
  33. onnxruntime_extensions/tools/pre_post_processing/steps/__init__.py +6 -0
  34. onnxruntime_extensions/tools/pre_post_processing/steps/general.py +366 -0
  35. onnxruntime_extensions/tools/pre_post_processing/steps/nlp.py +344 -0
  36. onnxruntime_extensions/tools/pre_post_processing/steps/vision.py +1157 -0
  37. onnxruntime_extensions/tools/pre_post_processing/utils.py +139 -0
  38. onnxruntime_extensions/util.py +186 -0
  39. onnxruntime_extensions-0.14.0.dist-info/LICENSE +21 -0
  40. onnxruntime_extensions-0.14.0.dist-info/METADATA +102 -0
  41. onnxruntime_extensions-0.14.0.dist-info/RECORD +43 -0
  42. onnxruntime_extensions-0.14.0.dist-info/WHEEL +6 -0
  43. onnxruntime_extensions-0.14.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,139 @@
1
+ # Copyright (c) Microsoft Corporation. All rights reserved.
2
+ # Licensed under the MIT License.
3
+
4
+ import onnx
5
+
6
+ from dataclasses import dataclass
7
+ from typing import List, Union
8
+
9
+
10
+ def create_named_value(name: str, data_type: int, shape: List[Union[str, int]]):
11
+ """
12
+ Helper to create a new model input.
13
+
14
+ Args:
15
+ name: Name for input. Must not already be in use in the model being updated.
16
+ data_type: onnx.TensorProto data type. e.g. onnx.TensorProto.FLOAT, onnx.TensorProto.UINT8
17
+ shape: Input shape. Use int for dimensions with known values and strings for symbolic dimensions.
18
+ e.g. ['batch_size', 256, 256] would be a rank 3 tensor with a symbolic first dimension named 'batch_size'
19
+
20
+
21
+ Returns:
22
+ An onnx.ValueInfoProto that can be used as a new model input.
23
+ """
24
+ tensor_type = onnx.helper.make_tensor_type_proto(
25
+ elem_type=data_type, shape=shape)
26
+ return onnx.helper.make_value_info(name, tensor_type)
27
+
28
+
29
+ # Create an onnx checker context that includes the ort-ext domain so that custom ops don't cause failure
30
+ def create_custom_op_checker_context(onnx_opset: int):
31
+ """
32
+ Create an ONNX checker context that includes the ort-extensions custom op domains so that custom ops don't
33
+ cause failure when running onnx.checker.check_graph.
34
+
35
+ Args:
36
+ onnx_opset: ONNX opset to use in the checker context.
37
+
38
+ Returns:
39
+ ONNX checker context.
40
+ """
41
+ context = onnx.checker.C.CheckerContext()
42
+ context.ir_version = onnx.checker.DEFAULT_CONTEXT.ir_version
43
+ context.opset_imports = {"": onnx_opset, "com.microsoft.extensions": 1}
44
+
45
+ return context
46
+
47
+
48
+ # The ONNX graph parser has it's own map of names just to be special
49
+ # https://github.com/onnx/onnx/blob/604af9cb28f63a6b9924237dcb91530649233db9/onnx/defs/parser.h#L72
50
+ TENSOR_TYPE_TO_ONNX_TYPE = {
51
+ int(onnx.TensorProto.FLOAT): "float",
52
+ int(onnx.TensorProto.UINT8): "uint8",
53
+ int(onnx.TensorProto.INT8): "int8",
54
+ int(onnx.TensorProto.UINT16): "uint16",
55
+ int(onnx.TensorProto.INT16): "int16",
56
+ int(onnx.TensorProto.INT32): "int32",
57
+ int(onnx.TensorProto.INT64): "int64",
58
+ int(onnx.TensorProto.STRING): "string",
59
+ int(onnx.TensorProto.BOOL): "bool",
60
+ int(onnx.TensorProto.FLOAT16): "float16",
61
+ int(onnx.TensorProto.DOUBLE): "double",
62
+ int(onnx.TensorProto.UINT32): "uint32",
63
+ int(onnx.TensorProto.UINT64): "uint64",
64
+ int(onnx.TensorProto.COMPLEX64): "complex64",
65
+ int(onnx.TensorProto.COMPLEX128): "complex128",
66
+ int(onnx.TensorProto.BFLOAT16): "bfloat16",
67
+ }
68
+
69
+
70
+ @dataclass
71
+ class IoMapEntry:
72
+ """Entry to map the output index from a producer step to the input index of a consumer step."""
73
+
74
+ # optional producer
75
+ # Uses Step if provided.
76
+ # If a str with a previous Step name is provided the PrePostProcessor will find the relevant Step
77
+ # If neither are provided the producer is inferred to be the immediately previous Step in the pipeline
78
+ producer: Union["Step", str] = None
79
+ # output index from the producer step
80
+ producer_idx: int = 0
81
+ # input index of the consumer step
82
+ consumer_idx: int = 0
83
+
84
+
85
+ @dataclass
86
+ class IOEntryValuePreserver:
87
+ """
88
+ used to allow an output value to have multiple consumers,
89
+ which is only possible when IoMapEntry is used to create those additional connections.
90
+
91
+ Generally, a connection consumes an output and an input, then the output is removed from the graph.
92
+ This class enabled one-to-many connections by making the other consumers share the same output.
93
+
94
+ How this class works:
95
+ 1. when the IoMapEntry is created, this class will be created simultaneously.
96
+ 2. It records the producer and consumer steps, and the output index of the producer step.
97
+ when producer step is running, this IOEntryValuePreserver will be activated and start to preserve the output.
98
+ 3. when graph merge happens, this class will check if the output is still in the graph, if not,
99
+ it will add the output
100
+ 4. when consumer step is running, this class will be deactivated and remove output from preserved_list.
101
+ """
102
+
103
+ producer: Union["Step", str] = None
104
+ consumer: Union["Step", str] = None
105
+ # output index from the producer step
106
+ producer_idx: int = 0
107
+ is_active: bool = False
108
+ output: str = None
109
+
110
+
111
+ def sanitize_output_names(graph: onnx.GraphProto):
112
+ """
113
+ Convert any usage of invalid characters like '/' and ';' in value names to '_'
114
+ This is common in models exported from TensorFlow [Lite].
115
+
116
+ ONNX parse_graph does not allow for that in a value name, and technically it's a violation of the ONNX spec as per
117
+ https://github.com/onnx/onnx/blob/main/docs/IR.md#names-within-a-graph
118
+
119
+ We do this for the original graph outputs only. The invalid naming has not been seen in model inputs, and we can
120
+ leave the internals of the graph intact to minimize changes.
121
+
122
+ Args:
123
+ graph: Graph to check and update any invalid names
124
+ """
125
+
126
+ bad_output_names = [o.name for o in graph.output if "/" in o.name or ";" in o.name]
127
+ if not bad_output_names:
128
+ return graph
129
+
130
+ renames = {}
131
+ for n in bad_output_names:
132
+ renames[n] = n.replace("/", "_").replace(";", "_")
133
+
134
+ for o in graph.output:
135
+ if o.name in bad_output_names:
136
+ # Add Identity node to rename the output, and update the name in graph.output
137
+ rename = onnx.helper.make_node("Identity", [o.name], [renames[o.name]], f"Rename {o.name}")
138
+ graph.node.append(rename)
139
+ o.name = renames[o.name]
@@ -0,0 +1,186 @@
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT License.
3
+
4
+ """
5
+ util.py: Miscellaneous utility functions
6
+ """
7
+
8
+ import onnx
9
+ import pathlib
10
+ import inspect
11
+
12
+ import numpy as np
13
+
14
+
15
+ # some util function for testing and tools
16
+ def get_test_data_file(*sub_dirs):
17
+ case_file = inspect.currentframe().f_back.f_code.co_filename
18
+ test_dir = pathlib.Path(case_file).parent
19
+ return str(test_dir.joinpath(*sub_dirs).resolve())
20
+
21
+
22
+ def read_file(path, mode='r'):
23
+ with open(str(path), mode) as file_content:
24
+ return file_content.read()
25
+
26
+
27
+ def mel_filterbank(
28
+ n_fft: int, n_mels: int = 80, sr=16000, min_mel=0, max_mel=45.245640471924965, dtype=np.float32):
29
+ """
30
+ Compute a Mel-filterbank. The filters are stored in the rows, the columns,
31
+ and it is Slaney normalized mel-scale filterbank.
32
+ """
33
+ fbank = np.zeros((n_mels, n_fft // 2 + 1), dtype=dtype)
34
+
35
+ # the centers of the frequency bins for the DFT
36
+ freq_bins = np.fft.rfftfreq(n=n_fft, d=1.0 / sr)
37
+
38
+ mel = np.linspace(min_mel, max_mel, n_mels + 2)
39
+ # Fill in the linear scale
40
+ f_min = 0.0
41
+ f_sp = 200.0 / 3
42
+ freqs = f_min + f_sp * mel
43
+
44
+ # And now the nonlinear scale
45
+ min_log_hz = 1000.0 # beginning of log region (Hz)
46
+ min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
47
+ logstep = np.log(6.4) / 27.0 # step size for log region
48
+
49
+ log_t = mel >= min_log_mel
50
+ freqs[log_t] = min_log_hz * np.exp(logstep * (mel[log_t] - min_log_mel))
51
+ mel_bins = freqs
52
+
53
+ mel_spacing = np.diff(mel_bins)
54
+
55
+ ramps = mel_bins.reshape(-1, 1) - freq_bins.reshape(1, -1)
56
+ for i in range(n_mels):
57
+ left = -ramps[i] / mel_spacing[i]
58
+ right = ramps[i + 2] / mel_spacing[i + 1]
59
+
60
+ # intersect them with each other and zero
61
+ fbank[i] = np.maximum(0, np.minimum(left, right))
62
+
63
+ energy_norm = 2.0 / (mel_bins[2: n_mels + 2] - mel_bins[:n_mels])
64
+ fbank *= energy_norm[:, np.newaxis]
65
+ return fbank
66
+
67
+
68
+ def remove_unused_constants(subgraph):
69
+ nodes = [_n for _n in subgraph.node]
70
+
71
+ # Find the names of all input tensors for all nodes in the subgraph
72
+ input_tensors = set()
73
+ for node in nodes:
74
+ for input_name in node.input:
75
+ input_tensors.add(input_name)
76
+
77
+ # Remove Constant nodes whose output is not used by any other nodes
78
+ nodes_to_remove = []
79
+ for node in nodes:
80
+ if node.op_type == 'Constant':
81
+ output_name = node.output[0]
82
+ if output_name not in input_tensors:
83
+ nodes_to_remove.append(node)
84
+
85
+ for node in nodes_to_remove:
86
+ subgraph.node.remove(node)
87
+
88
+ # Recursively process subgraphs within this subgraph
89
+ for node in nodes:
90
+ for attr in node.attribute:
91
+ if attr.type == onnx.AttributeProto.GRAPH:
92
+ remove_unused_constants(attr.g)
93
+ elif attr.type == onnx.AttributeProto.GRAPHS:
94
+ for subgraph in attr.graphs:
95
+ remove_unused_constants(subgraph)
96
+
97
+
98
+ def remove_unused_initializers(subgraph, top_level_initializers=None):
99
+ if top_level_initializers is None:
100
+ top_level_initializers = []
101
+ remove_unused_constants(subgraph)
102
+ initializers = [_i for _i in subgraph.initializer]
103
+ nodes = subgraph.node
104
+
105
+ # Find the names of all input tensors for all nodes in the subgraph
106
+ input_tensors = set()
107
+ for node in nodes:
108
+ for input_name in node.input:
109
+ input_tensors.add(input_name)
110
+
111
+ # Combine top-level and current subgraph initializers
112
+ all_initializers = initializers + top_level_initializers
113
+
114
+ # Filter the initializers by checking if their names are in the list of used input tensors
115
+ used_initializers = [
116
+ init for init in all_initializers if init.name in input_tensors]
117
+
118
+ # Update the subgraph's initializers
119
+ del subgraph.initializer[:]
120
+ subgraph.initializer.extend(
121
+ [init for init in used_initializers if init in initializers])
122
+
123
+ # Recursively process subgraphs within this subgraph
124
+ for node in nodes:
125
+ for attr in node.attribute:
126
+ if attr.type == onnx.AttributeProto.GRAPH:
127
+ remove_unused_initializers(attr.g, top_level_initializers)
128
+ elif attr.type == onnx.AttributeProto.GRAPHS:
129
+ for subgraph in attr.graphs:
130
+ remove_unused_initializers(
131
+ subgraph, top_level_initializers)
132
+
133
+
134
+ def quick_merge(*models, connection_indices=None):
135
+ """
136
+ This function merges multiple ONNX models into a single model, without performing any ONNX format checks.
137
+
138
+ Parameters:
139
+ *models (onnx.ModelProto): Varargs parameter representing the ONNX models to be merged.
140
+ connection_indices (List[List[int]], optional): A nested list specifying which outputs in one model should connect
141
+ to which inputs in the next model, based on their indices.
142
+ If not provided, it's assumed that the sequence of outputs in
143
+ one model exactly matches the sequence of inputs in the next model.
144
+
145
+ Returns:
146
+ merged_model (onnx.ModelProto): The merged ONNX model.
147
+
148
+ Raises:
149
+ ValueError: If there is any conflict in tensor names, either in initializers or in nodes, including subgraphs.
150
+ If there is any conflict in opset versions for the same domain.
151
+ """
152
+
153
+ merged_graph = models[0].graph
154
+
155
+ # Dictionary to store unique opsets
156
+ opset_imports = {
157
+ opset.domain if opset.domain else "ai.onnx": opset for opset in models[0].opset_import}
158
+
159
+ # Iterate over all other models and merge
160
+ for model_idx, model in enumerate(models[1:], start=1):
161
+ if connection_indices is None:
162
+ io_map = [(out.name, in_.name) for out, in_ in zip(
163
+ models[model_idx - 1].graph.output, model.graph.input)]
164
+ else:
165
+ io_map = [(models[model_idx - 1].graph.output[out_idx].name, model.graph.input[in_idx].name)
166
+ for out_idx, in_idx in connection_indices[model_idx - 1]]
167
+
168
+ merged_graph = onnx.compose.merge_graphs(merged_graph, model.graph, io_map)
169
+
170
+ for opset in model.opset_import:
171
+ if not opset.domain:
172
+ opset.domain = "ai.onnx"
173
+ if opset.domain in opset_imports and opset_imports[opset.domain].version != opset.version:
174
+ raise ValueError(f"Conflict in opset versions for domain '{opset.domain}': " +
175
+ f"model {model_idx} has version {opset.version}, while previous model has version " +
176
+ f"{opset_imports[opset.domain].version}.")
177
+ else:
178
+ opset_imports[opset.domain] = opset
179
+
180
+ default_opset = opset_imports.pop("ai.onnx", None)
181
+ merged_model = onnx.helper.make_model_gen_version(merged_graph,
182
+ opset_imports=[
183
+ default_opset],
184
+ producer_name='ONNX Model Merger')
185
+ merged_model.opset_import.extend(opset_imports.values())
186
+ return merged_model
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) Microsoft Corporation.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE
@@ -0,0 +1,102 @@
1
+ Metadata-Version: 2.2
2
+ Name: onnxruntime_extensions
3
+ Version: 0.14.0
4
+ Summary: ONNXRuntime Extensions
5
+ Home-page: https://github.com/microsoft/onnxruntime-extensions
6
+ Author: Microsoft Corporation
7
+ Author-email: onnxruntime@microsoft.com
8
+ License: MIT License
9
+ Classifier: Development Status :: 4 - Beta
10
+ Classifier: Environment :: Console
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Operating System :: MacOS :: MacOS X
13
+ Classifier: Operating System :: Microsoft :: Windows
14
+ Classifier: Operating System :: POSIX :: Linux
15
+ Classifier: Programming Language :: C++
16
+ Classifier: Programming Language :: Python
17
+ Classifier: Programming Language :: Python :: Implementation :: CPython
18
+ Classifier: License :: OSI Approved :: MIT License
19
+ Description-Content-Type: text/markdown
20
+ License-File: LICENSE
21
+ Dynamic: author
22
+ Dynamic: author-email
23
+ Dynamic: classifier
24
+ Dynamic: description
25
+ Dynamic: description-content-type
26
+ Dynamic: home-page
27
+ Dynamic: license
28
+ Dynamic: summary
29
+
30
+ # ONNXRuntime-Extensions
31
+
32
+ [![Build Status](https://dev.azure.com/onnxruntime/onnxruntime/_apis/build/status%2Fonnxruntime-extensions.CI?branchName=main)](https://dev.azure.com/onnxruntime/onnxruntime/_build/latest?definitionId=213&branchName=main)
33
+
34
+ ## What's ONNXRuntime-Extensions
35
+
36
+ Introduction: ONNXRuntime-Extensions is a C/C++ library that extends the capability of the ONNX models and inference with ONNX Runtime, via ONNX Runtime Custom Operator ABIs. It includes a set of [ONNX Runtime Custom Operator](https://onnxruntime.ai/docs/reference/operators/add-custom-op.html) to support the common pre- and post-processing operators for vision, text, and nlp models. And it supports multiple languages and platforms, like Python on Windows/Linux/macOS, some mobile platforms like Android and iOS, and Web-Assembly etc. The basic workflow is to enhance a ONNX model firstly and then do the model inference with ONNX Runtime and ONNXRuntime-Extensions package.
37
+
38
+
39
+ ## Quickstart
40
+ The library can be utilized as either a C/C++ library or other advance language packages like Python, Java, C#, etc. To build it as a shared library, you can use the `build.bat` or `build.sh` scripts located in the root folder. The CMake build definition is available in the `CMakeLists.txt` file and can be modified by appending options to `build.bat` or `build.sh`, such as `build.bat -DOCOS_BUILD_SHARED_LIB=OFF`. For more details, please refer to the [C API documentation](./docs/c_api.md).
41
+
42
+ ### **Python installation**
43
+ ```bash
44
+ pip install onnxruntime-extensions
45
+ ````
46
+ The nightly build is also available for the latest features, please refer to [nightly build](./docs/development.md#nightly-build)
47
+
48
+
49
+ ## Usage
50
+
51
+ ## 1. Generation of Pre-/Post-Processing ONNX Model
52
+ The `onnxruntime-extensions` Python package provides a convenient way to generate the ONNX processing graph. This can be achieved by converting the Huggingface transformer data processing classes into the desired format. For more detailed information, please refer to the API below:
53
+
54
+ ```python
55
+ help(onnxruntime_extensions.gen_processing_models)
56
+ ```
57
+ ### NOTE:
58
+ The generation of model processing requires the **ONNX** package to be installed. The data processing models generated in this manner can be merged with other models using the [onnx.compose](https://onnx.ai/onnx/api/compose.html) if needed.
59
+
60
+ ## 2. Using Extensions for ONNX Runtime inference
61
+
62
+ ### Python
63
+ There are individual packages for the following languages, please install it for the build.
64
+ ```python
65
+ import onnxruntime as _ort
66
+ from onnxruntime_extensions import get_library_path as _lib_path
67
+
68
+ so = _ort.SessionOptions()
69
+ so.register_custom_ops_library(_lib_path())
70
+
71
+ # Run the ONNXRuntime Session, as ONNXRuntime docs suggested.
72
+ # sess = _ort.InferenceSession(model, so)
73
+ # sess.run (...)
74
+ ```
75
+ ### C++
76
+
77
+ ```c++
78
+ // The line loads the customop library into ONNXRuntime engine to load the ONNX model with the custom op
79
+ Ort::ThrowOnError(Ort::GetApi().RegisterCustomOpsLibrary((OrtSessionOptions*)session_options, custom_op_library_filename, &handle));
80
+
81
+ // The regular ONNXRuntime invoking to run the model.
82
+ Ort::Session session(env, model_uri, session_options);
83
+ RunSession(session, inputs, outputs);
84
+ ```
85
+ ### Java
86
+ ```java
87
+ var env = OrtEnvironment.getEnvironment();
88
+ var sess_opt = new OrtSession.SessionOptions();
89
+
90
+ /* Register the custom ops from onnxruntime-extensions */
91
+ sess_opt.registerCustomOpLibrary(OrtxPackage.getLibraryPath());
92
+ ```
93
+
94
+ ### C#
95
+ ```C#
96
+ SessionOptions options = new SessionOptions()
97
+ options.RegisterOrtExtensions()
98
+ session = new InferenceSession(model, options)
99
+ ```
100
+
101
+
102
+ #
@@ -0,0 +1,43 @@
1
+ onnxruntime_extensions/_cuops.py,sha256=BoyBYGsCmWqHy1jdCFXKqz2bI9InZjtfFiSSGgBdM9M,16098
2
+ onnxruntime_extensions/cmd.py,sha256=Zz94QMFfSBVK7uh8t1tJ7Tje6H8Ed6QZTbbzDPLEDxw,2362
3
+ onnxruntime_extensions/_extensions_pydll.cpython-313-darwin.so,sha256=5W_8StexPbrmLxZpBv9PrkaAjVzexhyP-51sHyNtKDk,7541184
4
+ onnxruntime_extensions/_torch_cvt.py,sha256=Bg6fPEYeOspOYIgNVB4e1tiIvP__1BijE6EZa5zkQM8,9954
5
+ onnxruntime_extensions/_version.py,sha256=gV0jajIhsEFulXzRHnb5qyjzK26g1T5Ges3p8hpC8Lc,74
6
+ onnxruntime_extensions/util.py,sha256=yXmlyHdUm8DK0HV-GQulgDIR0Jvvbb1kdjqvbVKj-SU,7208
7
+ onnxruntime_extensions/cvt.py,sha256=GJrkcqx91GTi_b_hIsMXxAA9Uh1t5zPuOIo0elG5CSw,13104
8
+ onnxruntime_extensions/pp_api.py,sha256=ldUstCJST0LhmZMdZKDPo44e8wXfELP9fXa5EUHqI4A,3074
9
+ onnxruntime_extensions/__init__.py,sha256=4eAhroTJ8B9AqY8RTq3iPMsqznuMP_U-e8hGx8Vu55o,2305
10
+ onnxruntime_extensions/_hf_cvt.py,sha256=3WnAAEC1kxoF_OnA3x4pNurTDdPwV80ArAaqgC3JLjs,15614
11
+ onnxruntime_extensions/_ortapi2.py,sha256=ebLMfrsI474Q6k4e6IxIi8N1z3i-lwXOGLHqZoMXuyE,9677
12
+ onnxruntime_extensions/_ocos.py,sha256=6RPGMhs-GXU9n-jvCYr9wLwPLBReaI7wk7CXBhvQTNY,4060
13
+ onnxruntime_extensions/_extensions_pydll.pyi,sha256=G_NAgjZlPEfIjOCOz_TevFq-RhmpuEoRh8qtzb-Wj5k,1025
14
+ onnxruntime_extensions/tools/add_HuggingFace_CLIPImageProcessor_to_model.py,sha256=XvVjgmf6eB8dCBxc2kxSOajlVuN5ZNIBSIGCNUIQq6g,6615
15
+ onnxruntime_extensions/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
+ onnxruntime_extensions/tools/add_pre_post_processing_to_model.py,sha256=IwDpJpX5iTIKKIgSLzk23sJGXSCstwUQ43c_7ClV1cU,23766
17
+ onnxruntime_extensions/tools/pre_post_processing/__init__.py,sha256=nRTIZoZpm991ayyeG4SGe5_Y4oWElLvrSeMnsnQhYBI,121
18
+ onnxruntime_extensions/tools/pre_post_processing/step.py,sha256=z72A62TjNVd2yn-KbPplLK-YEiJY1cztXWaOs62TAX8,9167
19
+ onnxruntime_extensions/tools/pre_post_processing/utils.py,sha256=BRj9MI3PMtWWxGCPC0_1sKDKtHRuSOUC34zecUblzm8,5559
20
+ onnxruntime_extensions/tools/pre_post_processing/pre_post_processor.py,sha256=d7UbC57F3VmQye1cMAuU-J6iz4JG_KgvLIfq9bV1XtY,19240
21
+ onnxruntime_extensions/tools/pre_post_processing/steps/vision.py,sha256=7OtLKZVou7K7VaiQJrdB6v-3_iuSKBuCZnAHQFTNz5c,52026
22
+ onnxruntime_extensions/tools/pre_post_processing/steps/__init__.py,sha256=UvcT_ZiFP3n3IuYymoZsbe-V_czym0i4EDvBsTO7Tdw,159
23
+ onnxruntime_extensions/tools/pre_post_processing/steps/nlp.py,sha256=4kSTSyFkSng09SDvBHiYlY4aD1jl-ehuPC6TBpTTQ_Q,14829
24
+ onnxruntime_extensions/tools/pre_post_processing/steps/general.py,sha256=uk5Pv5qELQXqODHM8ysQHFI52iV1bijvMOIUsZ5qqZo,13415
25
+ onnxruntime_extensions/onnxprocess/_onnx_ops.py,sha256=46qVtkO52vZcoihsqOJ-MtXSTdAcONty3uzmU3ZVNv4,71748
26
+ onnxruntime_extensions/onnxprocess/torch_wrapper.py,sha256=Zc1y7TFIGE5n4NMNNciEvQQPy-G8dU91ZED2Q20gtls,828
27
+ onnxruntime_extensions/onnxprocess/__init__.py,sha256=QDb5TwaNWcA3wrNG36gBjI9MAWsLcG-ogbpTjV4R3mc,522
28
+ onnxruntime_extensions/onnxprocess/_session.py,sha256=QVjQPQeucJgyE3S2eD-w0A14BL1bGSrJ7vNlcNzNX8Q,14803
29
+ onnxruntime_extensions/onnxprocess/_tensor.py,sha256=BMIsZ0Q5TrKk6yyGCxhO7o-MOuxxz1ZujSqwfisRGQI,24782
30
+ onnxruntime_extensions/onnxprocess/_builder.py,sha256=anpzKLdkEZyFEopmAQ0PE7qUa_u1t2Sidupb6Dr6w9E,1791
31
+ onnxruntime_extensions/pnp/_base.py,sha256=Ho6QS_Nd-XHHmP9rKGWdY8fRdC2A42eBcmbL8PFJ9q4,3804
32
+ onnxruntime_extensions/pnp/_onnx_ops.py,sha256=Yp7nzw01RNTgRBacUaDxRJYmoYC85EVd1CQqVlHjxgs,72854
33
+ onnxruntime_extensions/pnp/_nlp.py,sha256=uZ4ULl4nY4JhrJkJm9PqCmZowlNISK4ve0AQLlEqKS4,7272
34
+ onnxruntime_extensions/pnp/_imagenet.py,sha256=GwYFUMnbszmfMiWoW19BtytiZEMUAkFwJqj-AhTBju8,2387
35
+ onnxruntime_extensions/pnp/__init__.py,sha256=jLK2YniktlZsERKfW85SmVx9kEWpEnbpO3_6w-c1Kb8,482
36
+ onnxruntime_extensions/pnp/_unifier.py,sha256=YqWzUl2QgMipfUGTDd1hXC80W7SjbS2nLSJ4c_-J-HQ,1604
37
+ onnxruntime_extensions/pnp/_torchext.py,sha256=KFW58a9ZCBlwISO-USQzLAtrS7XdaoXuVGOQErLhT8E,11617
38
+ onnxruntime_extensions/pnp/_utils.py,sha256=U1m-4uxCY2slsdu1scprk2FGpLaoMjz0W6NgKudhutQ,12759
39
+ onnxruntime_extensions-0.14.0.dist-info/RECORD,,
40
+ onnxruntime_extensions-0.14.0.dist-info/LICENSE,sha256=ws_MuBL-SCEBqPBFl9_FqZkaaydIJmxHrJG2parhU4M,1141
41
+ onnxruntime_extensions-0.14.0.dist-info/WHEEL,sha256=mXclPsvKCrLSi5M_t9A9nHNWopNLnNNMFFGj--YXJQo,141
42
+ onnxruntime_extensions-0.14.0.dist-info/top_level.txt,sha256=XyAgQDKyXsf6_0MJb58kRdHwigpTn7A7kl9diBEjs8M,23
43
+ onnxruntime_extensions-0.14.0.dist-info/METADATA,sha256=7WunQ6CdHmE_nN3tQsw74WY24zrMemtn4rdvAZuIZ4U,4555
@@ -0,0 +1,6 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (75.8.2)
3
+ Root-Is-Purelib: false
4
+ Tag: cp313-cp313-macosx_11_0_universal2
5
+ Generator: delocate 0.13.0
6
+
@@ -0,0 +1 @@
1
+ onnxruntime_extensions