onnxruntime-gpu 1.23.0__cp313-cp313-win_amd64.whl → 1.23.2__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
onnxruntime/__init__.py CHANGED
@@ -8,7 +8,7 @@ For more information on ONNX Runtime, please see `aka.ms/onnxruntime <https://ak
8
8
  or the `Github project <https://github.com/microsoft/onnxruntime/>`_.
9
9
  """
10
10
 
11
- __version__ = "1.23.0"
11
+ __version__ = "1.23.2"
12
12
  __author__ = "Microsoft"
13
13
 
14
14
  # we need to do device version validation (for example to check Cuda version for an onnxruntime-training package).
@@ -31,14 +31,17 @@ try:
31
31
  OrtAllocatorType, # noqa: F401
32
32
  OrtArenaCfg, # noqa: F401
33
33
  OrtCompileApiFlags, # noqa: F401
34
+ OrtDeviceMemoryType, # noqa: F401
34
35
  OrtEpDevice, # noqa: F401
35
36
  OrtExecutionProviderDevicePolicy, # noqa: F401
36
37
  OrtExternalInitializerInfo, # noqa: F401
37
38
  OrtHardwareDevice, # noqa: F401
38
39
  OrtHardwareDeviceType, # noqa: F401
39
40
  OrtMemoryInfo, # noqa: F401
41
+ OrtMemoryInfoDeviceType, # noqa: F401
40
42
  OrtMemType, # noqa: F401
41
43
  OrtSparseFormat, # noqa: F401
44
+ OrtSyncStream, # noqa: F401
42
45
  RunOptions, # noqa: F401
43
46
  SessionIOBinding, # noqa: F401
44
47
  SessionOptions, # noqa: F401
@@ -78,6 +81,7 @@ from onnxruntime.capi.onnxruntime_inference_collection import (
78
81
  OrtDevice, # noqa: F401
79
82
  OrtValue, # noqa: F401
80
83
  SparseTensor, # noqa: F401
84
+ copy_tensors, # noqa: F401
81
85
  )
82
86
 
83
87
  # TODO: thiagofc: Temporary experimental namespace for new PyTorch front-end
@@ -1,3 +1,3 @@
1
1
  package_name = 'onnxruntime-gpu'
2
- __version__ = '1.23.0'
2
+ __version__ = '1.23.2'
3
3
  cuda_version = '12.2'
Binary file
@@ -199,6 +199,18 @@ class Session:
199
199
  "Return the metadata. See :class:`onnxruntime.ModelMetadata`."
200
200
  return self._model_meta
201
201
 
202
+ def get_input_memory_infos(self) -> Sequence[onnxruntime.MemoryInfo]:
203
+ "Return the memory info for the inputs."
204
+ return self._input_meminfos
205
+
206
+ def get_output_memory_infos(self) -> Sequence[onnxruntime.MemoryInfo]:
207
+ "Return the memory info for the outputs."
208
+ return self._output_meminfos
209
+
210
+ def get_input_epdevices(self) -> Sequence[onnxruntime.OrtEpDevice]:
211
+ "Return the execution providers for the inputs."
212
+ return self._input_epdevices
213
+
202
214
  def get_providers(self) -> Sequence[str]:
203
215
  "Return list of registered execution providers."
204
216
  return self._providers
@@ -576,6 +588,9 @@ class InferenceSession(Session):
576
588
  self._inputs_meta = self._sess.inputs_meta
577
589
  self._outputs_meta = self._sess.outputs_meta
578
590
  self._overridable_initializers = self._sess.overridable_initializers
591
+ self._input_meminfos = self._sess.input_meminfos
592
+ self._output_meminfos = self._sess.output_meminfos
593
+ self._input_epdevices = self._sess.input_epdevices
579
594
  self._model_meta = self._sess.model_meta
580
595
  self._providers = self._sess.get_providers()
581
596
  self._provider_options = self._sess.get_provider_options()
@@ -589,6 +604,9 @@ class InferenceSession(Session):
589
604
  self._inputs_meta = None
590
605
  self._outputs_meta = None
591
606
  self._overridable_initializers = None
607
+ self._input_meminfos = None
608
+ self._output_meminfos = None
609
+ self._input_epdevices = None
592
610
  self._model_meta = None
593
611
  self._providers = None
594
612
  self._provider_options = None
@@ -1134,6 +1152,15 @@ class OrtValue:
1134
1152
  self._ortvalue.update_inplace(np_arr)
1135
1153
 
1136
1154
 
1155
+ def copy_tensors(src: Sequence[OrtValue], dst: Sequence[OrtValue], stream=None) -> None:
1156
+ """
1157
+ Copy tensor data from source OrtValue sequence to destination OrtValue sequence.
1158
+ """
1159
+ c_sources = [s._get_c_value() for s in src]
1160
+ c_dsts = [d._get_c_value() for d in dst]
1161
+ C.copy_tensors(c_sources, c_dsts, stream)
1162
+
1163
+
1137
1164
  class OrtDevice:
1138
1165
  """
1139
1166
  A data structure that exposes the underlying C++ OrtDevice
@@ -1146,6 +1173,7 @@ class OrtDevice:
1146
1173
  if isinstance(c_ort_device, C.OrtDevice):
1147
1174
  self._ort_device = c_ort_device
1148
1175
  else:
1176
+ # An end user won't hit this error
1149
1177
  raise ValueError(
1150
1178
  "`Provided object` needs to be of type `onnxruntime.capi.onnxruntime_pybind11_state.OrtDevice`"
1151
1179
  )
@@ -1188,6 +1216,9 @@ class OrtDevice:
1188
1216
  def device_vendor_id(self):
1189
1217
  return self._ort_device.vendor_id()
1190
1218
 
1219
+ def device_mem_type(self):
1220
+ return self._ort_device.mem_type()
1221
+
1191
1222
 
1192
1223
  class SparseTensor:
1193
1224
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onnxruntime-gpu
3
- Version: 1.23.0
3
+ Version: 1.23.2
4
4
  Summary: ONNX Runtime is a runtime accelerator for Machine Learning models
5
5
  Home-page: https://onnxruntime.ai
6
6
  Download-URL: https://github.com/microsoft/onnxruntime/tags
@@ -62,6 +62,16 @@ For more information on ONNX Runtime, please see `aka.ms/onnxruntime <https://ak
62
62
 
63
63
  Changes
64
64
  -------
65
+ 1.23.2
66
+ ^^^^^^
67
+
68
+ Release Notes : https://github.com/Microsoft/onnxruntime/releases/tag/v1.23.2
69
+
70
+
71
+ 1.23.1
72
+ ^^^^^^
73
+
74
+ Release Notes : https://github.com/Microsoft/onnxruntime/releases/tag/v1.23.1
65
75
 
66
76
  1.23.0
67
77
  ^^^^^^
@@ -1,22 +1,22 @@
1
1
  onnxruntime/LICENSE,sha256=wlDWJ48LR6ZDn7dZKwi1ilXrn1NapJodtjIRw_mCtnQ,1094
2
2
  onnxruntime/Privacy.md,sha256=v7dxKwdfPwfj6-5dwqKW0d4y2_ca0oZj9z0VOMtsOwg,2490
3
3
  onnxruntime/ThirdPartyNotices.txt,sha256=4A-Cjgoz3lkaNVrmYG0mJfV1jafSyETbeCHJ3T42R7Y,333022
4
- onnxruntime/__init__.py,sha256=mE1tr2wVVmzB66KwvL_XuyguD8eFj13BY4YUqzPvE4M,15071
4
+ onnxruntime/__init__.py,sha256=YdcAwnbAE0ZyFpivLfxWAaJSqHxcMk6HwxQSJUUfYXA,15234
5
5
  onnxruntime/backend/__init__.py,sha256=5I1Ylsawf9w6MNmK4RiN1wA-EEQqlKKwYTNZB-m_k6M,334
6
6
  onnxruntime/backend/backend.py,sha256=xPA69Lf7rwwwgeWoZ3CgB2JSoExuIAdhHmd6ROp19sc,8187
7
7
  onnxruntime/backend/backend_rep.py,sha256=A7S4GqxLC6IfkbEXLlWiWpCD9AJ5x-xAhnR8BCM2cNk,1776
8
8
  onnxruntime/capi/__init__.py,sha256=uRp4pMtfoayBhZgEsiFqFCD13Y6LUo82FdZsQX8X8LI,251
9
9
  onnxruntime/capi/_ld_preload.py,sha256=li6cbZ64hDfUndat4mprUWzowLa3RQdw0q2E56sXFwE,413
10
10
  onnxruntime/capi/_pybind_state.py,sha256=nbUpnUncwBv5pgJA8yugDYJRA4TTfC0gaYOED5jD-SA,1533
11
- onnxruntime/capi/build_and_package_info.py,sha256=673StnzOZHxc0V4k7aximnvddIEjJMkMvEtuaAhn6qQ,81
11
+ onnxruntime/capi/build_and_package_info.py,sha256=iXEsoOmQE9LyqJPmXtzyrYeM5IWO1JJYoZFef0K4ddY,81
12
12
  onnxruntime/capi/convert_npz_to_onnx_adapter.py,sha256=N0ShYr30vBQcOr9KyFd4AUdEcqWW89KVd80qSYCgdQ4,1581
13
- onnxruntime/capi/onnxruntime.dll,sha256=OWayO5olNDXncGqrKl_RCnhrVYmRz39jkvaGpBHYKgE,15464480
13
+ onnxruntime/capi/onnxruntime.dll,sha256=wDgtUkbd2vnGDgUgA0YDufEoAuDomRYmOg7xbem9Iv4,15551008
14
14
  onnxruntime/capi/onnxruntime_collect_build_info.py,sha256=sD8Z2S15QHSvuO1j7tgqJKeORDUatwzUmzvC8Uj9cAM,2109
15
- onnxruntime/capi/onnxruntime_inference_collection.py,sha256=0-ISc_Bo8U3G8j5P-dZjRS0eSQ0vpDVSEODNmwJptNA,59993
16
- onnxruntime/capi/onnxruntime_providers_cuda.dll,sha256=gsk3w3Iyy6FBhTlyPqTTHU0j_NgOGIxBfKh7vjxB40M,433778208
17
- onnxruntime/capi/onnxruntime_providers_shared.dll,sha256=9aBYLMtqPJMFDf92QS5xmYYxH10fOXsC7NpbkIv-Wuw,22072
18
- onnxruntime/capi/onnxruntime_providers_tensorrt.dll,sha256=VXIceFNE02roIgbcKFJzXsyE1oJ6ElEJXEsWn8ZuNDY,848456
19
- onnxruntime/capi/onnxruntime_pybind11_state.pyd,sha256=FpeIqDzcsuRrJil0usCpSmugTW5VM5hzjXxmLAfnAdg,19326536
15
+ onnxruntime/capi/onnxruntime_inference_collection.py,sha256=8tjkx7AIa7T1XApmf-UgX1lLsDPY-0VBxiYOk4NE4KI,61252
16
+ onnxruntime/capi/onnxruntime_providers_cuda.dll,sha256=4fNIgs9dKYcZWq70Y9SVZDp2JbkvZ-EBGw2MPYfxUBQ,366219320
17
+ onnxruntime/capi/onnxruntime_providers_shared.dll,sha256=nJj1UkqeCvaFf9msWEVlP1dn6Ys_HhfER77hy93qbPc,22088
18
+ onnxruntime/capi/onnxruntime_providers_tensorrt.dll,sha256=KXv6VE39ZYrPDGDEZ208LJVUnO3ZexHSeaAOW5qJ4JY,849976
19
+ onnxruntime/capi/onnxruntime_pybind11_state.pyd,sha256=l1vOrW2f9qgnDpcLqqVMCY0tRC30quT4opj1vyVck0U,19462216
20
20
  onnxruntime/capi/onnxruntime_validation.py,sha256=nJydkJxSVNiRqQvVkJCpfFZoeZAvATFD8OIze3SY_5E,6865
21
21
  onnxruntime/capi/version_info.py,sha256=VntvblZPqhDvDUTtZQKgsmQ-oJIoqHRvehW3JZtPiHM,81
22
22
  onnxruntime/datasets/__init__.py,sha256=DqRdpMfRtDfhVkCQu5lTmfSQ-GG4dETHNWdoB4fA7lU,473
@@ -316,8 +316,8 @@ onnxruntime/transformers/models/whisper/whisper_encoder_decoder_init.py,sha256=P
316
316
  onnxruntime/transformers/models/whisper/whisper_helper.py,sha256=ifEhlnK5NkGUlJrJNECp1nArm3FInaVa7mXTOuogZEQ,51347
317
317
  onnxruntime/transformers/models/whisper/whisper_inputs.py,sha256=rSjFJ9Llz2FmpRXDz8cK2KFzr2b5l-n4eo8VN463iUE,16041
318
318
  onnxruntime/transformers/models/whisper/whisper_jump_times.py,sha256=bgxGRs00NAf5F7hzJBJbt7JB3IMuCOxylhCgnf16-l8,19950
319
- onnxruntime_gpu-1.23.0.dist-info/METADATA,sha256=jAGOntQV20wlqTV_s2edMiDDPg1uE_4GQXYY7fpyA4c,5447
320
- onnxruntime_gpu-1.23.0.dist-info/WHEEL,sha256=qV0EIPljj1XC_vuSatRWjn02nZIz3N1t8jsZz7HBr2U,101
321
- onnxruntime_gpu-1.23.0.dist-info/entry_points.txt,sha256=7qLS4FbGXwPZjfdpVAGpnmk9I6m6H5CxEnwcCx1Imjs,77
322
- onnxruntime_gpu-1.23.0.dist-info/top_level.txt,sha256=zk_fJEekrTm9DLxX2LwGegokVqP6blqPhFoMIuh0Nv8,12
323
- onnxruntime_gpu-1.23.0.dist-info/RECORD,,
319
+ onnxruntime_gpu-1.23.2.dist-info/METADATA,sha256=OlbByIn6yvomXOpVDEiOMxKfS6X85690Tns0Pk04vQQ,5645
320
+ onnxruntime_gpu-1.23.2.dist-info/WHEEL,sha256=qV0EIPljj1XC_vuSatRWjn02nZIz3N1t8jsZz7HBr2U,101
321
+ onnxruntime_gpu-1.23.2.dist-info/entry_points.txt,sha256=7qLS4FbGXwPZjfdpVAGpnmk9I6m6H5CxEnwcCx1Imjs,77
322
+ onnxruntime_gpu-1.23.2.dist-info/top_level.txt,sha256=zk_fJEekrTm9DLxX2LwGegokVqP6blqPhFoMIuh0Nv8,12
323
+ onnxruntime_gpu-1.23.2.dist-info/RECORD,,