onnxruntime-directml 1.24.1__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (322) hide show
  1. onnxruntime/LICENSE +21 -0
  2. onnxruntime/Privacy.md +21 -0
  3. onnxruntime/ThirdPartyNotices.txt +6121 -0
  4. onnxruntime/__init__.py +418 -0
  5. onnxruntime/backend/__init__.py +6 -0
  6. onnxruntime/backend/backend.py +175 -0
  7. onnxruntime/backend/backend_rep.py +52 -0
  8. onnxruntime/capi/DirectML.dll +0 -0
  9. onnxruntime/capi/__init__.py +4 -0
  10. onnxruntime/capi/_ld_preload.py +7 -0
  11. onnxruntime/capi/_pybind_state.py +33 -0
  12. onnxruntime/capi/build_and_package_info.py +2 -0
  13. onnxruntime/capi/convert_npz_to_onnx_adapter.py +48 -0
  14. onnxruntime/capi/onnxruntime.dll +0 -0
  15. onnxruntime/capi/onnxruntime_collect_build_info.py +47 -0
  16. onnxruntime/capi/onnxruntime_inference_collection.py +1440 -0
  17. onnxruntime/capi/onnxruntime_providers_shared.dll +0 -0
  18. onnxruntime/capi/onnxruntime_pybind11_state.pyd +0 -0
  19. onnxruntime/capi/onnxruntime_validation.py +154 -0
  20. onnxruntime/capi/version_info.py +2 -0
  21. onnxruntime/datasets/__init__.py +18 -0
  22. onnxruntime/datasets/logreg_iris.onnx +0 -0
  23. onnxruntime/datasets/mul_1.onnx +0 -0
  24. onnxruntime/datasets/sigmoid.onnx +13 -0
  25. onnxruntime/quantization/CalTableFlatBuffers/KeyValue.py +78 -0
  26. onnxruntime/quantization/CalTableFlatBuffers/TrtTable.py +90 -0
  27. onnxruntime/quantization/CalTableFlatBuffers/__init__.py +0 -0
  28. onnxruntime/quantization/__init__.py +19 -0
  29. onnxruntime/quantization/base_quantizer.py +529 -0
  30. onnxruntime/quantization/calibrate.py +1267 -0
  31. onnxruntime/quantization/execution_providers/qnn/__init__.py +2 -0
  32. onnxruntime/quantization/execution_providers/qnn/fusion_lpnorm.py +132 -0
  33. onnxruntime/quantization/execution_providers/qnn/fusion_spacetodepth.py +162 -0
  34. onnxruntime/quantization/execution_providers/qnn/mixed_precision_overrides_utils.py +413 -0
  35. onnxruntime/quantization/execution_providers/qnn/preprocess.py +353 -0
  36. onnxruntime/quantization/execution_providers/qnn/quant_config.py +389 -0
  37. onnxruntime/quantization/fusions/__init__.py +4 -0
  38. onnxruntime/quantization/fusions/fusion.py +311 -0
  39. onnxruntime/quantization/fusions/fusion_gelu.py +272 -0
  40. onnxruntime/quantization/fusions/fusion_layernorm.py +146 -0
  41. onnxruntime/quantization/fusions/replace_upsample_with_resize.py +96 -0
  42. onnxruntime/quantization/matmul_bnb4_quantizer.py +239 -0
  43. onnxruntime/quantization/matmul_nbits_quantizer.py +1638 -0
  44. onnxruntime/quantization/neural_compressor/__init__.py +1 -0
  45. onnxruntime/quantization/neural_compressor/onnx_model.py +1251 -0
  46. onnxruntime/quantization/neural_compressor/util.py +80 -0
  47. onnxruntime/quantization/neural_compressor/weight_only.py +932 -0
  48. onnxruntime/quantization/onnx_model.py +600 -0
  49. onnxruntime/quantization/onnx_quantizer.py +1163 -0
  50. onnxruntime/quantization/operators/__init__.py +2 -0
  51. onnxruntime/quantization/operators/activation.py +119 -0
  52. onnxruntime/quantization/operators/argmax.py +18 -0
  53. onnxruntime/quantization/operators/attention.py +73 -0
  54. onnxruntime/quantization/operators/base_operator.py +26 -0
  55. onnxruntime/quantization/operators/binary_op.py +72 -0
  56. onnxruntime/quantization/operators/concat.py +62 -0
  57. onnxruntime/quantization/operators/conv.py +260 -0
  58. onnxruntime/quantization/operators/direct_q8.py +78 -0
  59. onnxruntime/quantization/operators/embed_layernorm.py +121 -0
  60. onnxruntime/quantization/operators/gather.py +64 -0
  61. onnxruntime/quantization/operators/gavgpool.py +62 -0
  62. onnxruntime/quantization/operators/gemm.py +172 -0
  63. onnxruntime/quantization/operators/lstm.py +121 -0
  64. onnxruntime/quantization/operators/matmul.py +231 -0
  65. onnxruntime/quantization/operators/maxpool.py +34 -0
  66. onnxruntime/quantization/operators/norm.py +40 -0
  67. onnxruntime/quantization/operators/pad.py +172 -0
  68. onnxruntime/quantization/operators/pooling.py +67 -0
  69. onnxruntime/quantization/operators/qdq_base_operator.py +22 -0
  70. onnxruntime/quantization/operators/resize.py +34 -0
  71. onnxruntime/quantization/operators/softmax.py +74 -0
  72. onnxruntime/quantization/operators/split.py +63 -0
  73. onnxruntime/quantization/operators/where.py +87 -0
  74. onnxruntime/quantization/preprocess.py +141 -0
  75. onnxruntime/quantization/qdq_loss_debug.py +389 -0
  76. onnxruntime/quantization/qdq_quantizer.py +1477 -0
  77. onnxruntime/quantization/quant_utils.py +1051 -0
  78. onnxruntime/quantization/quantize.py +953 -0
  79. onnxruntime/quantization/registry.py +110 -0
  80. onnxruntime/quantization/shape_inference.py +204 -0
  81. onnxruntime/quantization/static_quantize_runner.py +256 -0
  82. onnxruntime/quantization/tensor_quant_overrides.py +520 -0
  83. onnxruntime/tools/__init__.py +10 -0
  84. onnxruntime/tools/check_onnx_model_mobile_usability.py +47 -0
  85. onnxruntime/tools/convert_onnx_models_to_ort.py +380 -0
  86. onnxruntime/tools/file_utils.py +47 -0
  87. onnxruntime/tools/logger.py +11 -0
  88. onnxruntime/tools/make_dynamic_shape_fixed.py +73 -0
  89. onnxruntime/tools/mobile_helpers/__init__.py +0 -0
  90. onnxruntime/tools/mobile_helpers/coreml_supported_mlprogram_ops.md +53 -0
  91. onnxruntime/tools/mobile_helpers/coreml_supported_neuralnetwork_ops.md +43 -0
  92. onnxruntime/tools/mobile_helpers/nnapi_supported_ops.md +58 -0
  93. onnxruntime/tools/mobile_helpers/usability_checker.py +738 -0
  94. onnxruntime/tools/offline_tuning.py +169 -0
  95. onnxruntime/tools/onnx_model_utils.py +416 -0
  96. onnxruntime/tools/onnx_randomizer.py +85 -0
  97. onnxruntime/tools/onnxruntime_test.py +164 -0
  98. onnxruntime/tools/optimize_onnx_model.py +56 -0
  99. onnxruntime/tools/ort_format_model/__init__.py +27 -0
  100. onnxruntime/tools/ort_format_model/operator_type_usage_processors.py +653 -0
  101. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/__init__.py +0 -0
  102. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ArgType.py +7 -0
  103. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ArgTypeAndIndex.py +67 -0
  104. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Attribute.py +337 -0
  105. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/AttributeType.py +18 -0
  106. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Checkpoint.py +125 -0
  107. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedKernelCreateInfos.py +120 -0
  108. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedNodeIndexAndKernelDefHash.py +68 -0
  109. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedSessionState.py +96 -0
  110. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedSubGraphSessionState.py +72 -0
  111. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Dimension.py +71 -0
  112. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DimensionValue.py +80 -0
  113. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DimensionValueType.py +8 -0
  114. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/EdgeEnd.py +32 -0
  115. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/FloatProperty.py +67 -0
  116. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Graph.py +320 -0
  117. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/InferenceSession.py +88 -0
  118. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/IntProperty.py +67 -0
  119. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/KernelTypeStrArgsEntry.py +91 -0
  120. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/KernelTypeStrResolver.py +78 -0
  121. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/MapType.py +71 -0
  122. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Model.py +223 -0
  123. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ModuleState.py +141 -0
  124. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Node.py +317 -0
  125. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodeEdge.py +126 -0
  126. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodeType.py +7 -0
  127. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodesToOptimizeIndices.py +160 -0
  128. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OpIdKernelTypeStrArgsEntry.py +91 -0
  129. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OperatorSetId.py +67 -0
  130. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OptimizerGroup.py +117 -0
  131. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ParameterOptimizerState.py +91 -0
  132. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/PropertyBag.py +152 -0
  133. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizationRecord.py +105 -0
  134. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizationRecordContainerEntry.py +91 -0
  135. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizations.py +79 -0
  136. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/SequenceType.py +58 -0
  137. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Shape.py +78 -0
  138. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/SparseTensor.py +114 -0
  139. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/StringProperty.py +67 -0
  140. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/StringStringEntry.py +67 -0
  141. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Tensor.py +203 -0
  142. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TensorDataType.py +26 -0
  143. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TensorTypeAndShape.py +71 -0
  144. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TypeInfo.py +83 -0
  145. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TypeInfoValue.py +9 -0
  146. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ValueInfo.py +84 -0
  147. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/__init__.py +6 -0
  148. onnxruntime/tools/ort_format_model/ort_model_processor.py +86 -0
  149. onnxruntime/tools/ort_format_model/types.py +85 -0
  150. onnxruntime/tools/ort_format_model/utils.py +61 -0
  151. onnxruntime/tools/pytorch_export_contrib_ops.py +129 -0
  152. onnxruntime/tools/pytorch_export_helpers.py +131 -0
  153. onnxruntime/tools/qdq_helpers/__init__.py +0 -0
  154. onnxruntime/tools/qdq_helpers/optimize_qdq_model.py +37 -0
  155. onnxruntime/tools/qnn/add_trans_cast.py +292 -0
  156. onnxruntime/tools/qnn/gen_qnn_ctx_onnx_model.py +364 -0
  157. onnxruntime/tools/qnn/preprocess.py +165 -0
  158. onnxruntime/tools/reduced_build_config_parser.py +203 -0
  159. onnxruntime/tools/remove_initializer_from_input.py +37 -0
  160. onnxruntime/tools/symbolic_shape_infer.py +3094 -0
  161. onnxruntime/tools/update_onnx_opset.py +31 -0
  162. onnxruntime/transformers/__init__.py +8 -0
  163. onnxruntime/transformers/affinity_helper.py +40 -0
  164. onnxruntime/transformers/benchmark.py +942 -0
  165. onnxruntime/transformers/benchmark_helper.py +643 -0
  166. onnxruntime/transformers/bert_perf_test.py +629 -0
  167. onnxruntime/transformers/bert_test_data.py +641 -0
  168. onnxruntime/transformers/compare_bert_results.py +256 -0
  169. onnxruntime/transformers/constants.py +47 -0
  170. onnxruntime/transformers/convert_generation.py +3605 -0
  171. onnxruntime/transformers/convert_tf_models_to_pytorch.py +205 -0
  172. onnxruntime/transformers/convert_to_packing_mode.py +385 -0
  173. onnxruntime/transformers/dynamo_onnx_helper.py +205 -0
  174. onnxruntime/transformers/float16.py +501 -0
  175. onnxruntime/transformers/fusion_attention.py +1189 -0
  176. onnxruntime/transformers/fusion_attention_clip.py +340 -0
  177. onnxruntime/transformers/fusion_attention_sam2.py +533 -0
  178. onnxruntime/transformers/fusion_attention_unet.py +1307 -0
  179. onnxruntime/transformers/fusion_attention_vae.py +300 -0
  180. onnxruntime/transformers/fusion_bart_attention.py +435 -0
  181. onnxruntime/transformers/fusion_base.py +141 -0
  182. onnxruntime/transformers/fusion_bias_add.py +57 -0
  183. onnxruntime/transformers/fusion_biasgelu.py +66 -0
  184. onnxruntime/transformers/fusion_biassplitgelu.py +110 -0
  185. onnxruntime/transformers/fusion_conformer_attention.py +222 -0
  186. onnxruntime/transformers/fusion_constant_fold.py +144 -0
  187. onnxruntime/transformers/fusion_embedlayer.py +810 -0
  188. onnxruntime/transformers/fusion_fastgelu.py +492 -0
  189. onnxruntime/transformers/fusion_gelu.py +258 -0
  190. onnxruntime/transformers/fusion_gelu_approximation.py +25 -0
  191. onnxruntime/transformers/fusion_gemmfastgelu.py +121 -0
  192. onnxruntime/transformers/fusion_gpt_attention.py +546 -0
  193. onnxruntime/transformers/fusion_gpt_attention_megatron.py +355 -0
  194. onnxruntime/transformers/fusion_gpt_attention_no_past.py +260 -0
  195. onnxruntime/transformers/fusion_group_norm.py +180 -0
  196. onnxruntime/transformers/fusion_layernorm.py +489 -0
  197. onnxruntime/transformers/fusion_mha_mmdit.py +667 -0
  198. onnxruntime/transformers/fusion_nhwc_conv.py +99 -0
  199. onnxruntime/transformers/fusion_options.py +340 -0
  200. onnxruntime/transformers/fusion_qordered_attention.py +420 -0
  201. onnxruntime/transformers/fusion_qordered_gelu.py +118 -0
  202. onnxruntime/transformers/fusion_qordered_layernorm.py +122 -0
  203. onnxruntime/transformers/fusion_qordered_matmul.py +216 -0
  204. onnxruntime/transformers/fusion_quickgelu.py +74 -0
  205. onnxruntime/transformers/fusion_reshape.py +173 -0
  206. onnxruntime/transformers/fusion_rotary_attention.py +1591 -0
  207. onnxruntime/transformers/fusion_shape.py +109 -0
  208. onnxruntime/transformers/fusion_simplified_layernorm.py +165 -0
  209. onnxruntime/transformers/fusion_skip_group_norm.py +254 -0
  210. onnxruntime/transformers/fusion_skiplayernorm.py +209 -0
  211. onnxruntime/transformers/fusion_transpose.py +167 -0
  212. onnxruntime/transformers/fusion_utils.py +321 -0
  213. onnxruntime/transformers/huggingface_models.py +74 -0
  214. onnxruntime/transformers/import_utils.py +20 -0
  215. onnxruntime/transformers/io_binding_helper.py +487 -0
  216. onnxruntime/transformers/large_model_exporter.py +395 -0
  217. onnxruntime/transformers/machine_info.py +230 -0
  218. onnxruntime/transformers/metrics.py +163 -0
  219. onnxruntime/transformers/models/bart/__init__.py +12 -0
  220. onnxruntime/transformers/models/bart/export.py +98 -0
  221. onnxruntime/transformers/models/bert/__init__.py +12 -0
  222. onnxruntime/transformers/models/bert/eval_squad.py +329 -0
  223. onnxruntime/transformers/models/gpt2/__init__.py +12 -0
  224. onnxruntime/transformers/models/gpt2/benchmark_gpt2.py +413 -0
  225. onnxruntime/transformers/models/gpt2/convert_to_onnx.py +566 -0
  226. onnxruntime/transformers/models/gpt2/gpt2_helper.py +1031 -0
  227. onnxruntime/transformers/models/gpt2/gpt2_parity.py +513 -0
  228. onnxruntime/transformers/models/gpt2/gpt2_tester.py +501 -0
  229. onnxruntime/transformers/models/gpt2/parity_check_helper.py +146 -0
  230. onnxruntime/transformers/models/llama/__init__.py +12 -0
  231. onnxruntime/transformers/models/llama/benchmark.py +700 -0
  232. onnxruntime/transformers/models/llama/benchmark_all.py +488 -0
  233. onnxruntime/transformers/models/llama/benchmark_e2e.py +608 -0
  234. onnxruntime/transformers/models/llama/convert_to_onnx.py +1064 -0
  235. onnxruntime/transformers/models/llama/dist_settings.py +57 -0
  236. onnxruntime/transformers/models/llama/llama_inputs.py +504 -0
  237. onnxruntime/transformers/models/llama/llama_parity.py +343 -0
  238. onnxruntime/transformers/models/llama/llama_torch.py +47 -0
  239. onnxruntime/transformers/models/llama/quant_kv_dataloader.py +108 -0
  240. onnxruntime/transformers/models/longformer/__init__.py +12 -0
  241. onnxruntime/transformers/models/longformer/benchmark_longformer.py +821 -0
  242. onnxruntime/transformers/models/longformer/convert_to_onnx.py +413 -0
  243. onnxruntime/transformers/models/longformer/generate_test_data.py +347 -0
  244. onnxruntime/transformers/models/longformer/longformer_helper.py +76 -0
  245. onnxruntime/transformers/models/phi2/__init__.py +12 -0
  246. onnxruntime/transformers/models/phi2/convert_to_onnx.py +590 -0
  247. onnxruntime/transformers/models/phi2/inference_example.py +414 -0
  248. onnxruntime/transformers/models/sam2/__init__.py +12 -0
  249. onnxruntime/transformers/models/sam2/benchmark_sam2.py +638 -0
  250. onnxruntime/transformers/models/sam2/convert_to_onnx.py +270 -0
  251. onnxruntime/transformers/models/sam2/image_decoder.py +272 -0
  252. onnxruntime/transformers/models/sam2/image_encoder.py +236 -0
  253. onnxruntime/transformers/models/sam2/mask_decoder.py +208 -0
  254. onnxruntime/transformers/models/sam2/nvtx_helper.py +33 -0
  255. onnxruntime/transformers/models/sam2/prompt_encoder.py +189 -0
  256. onnxruntime/transformers/models/sam2/sam2_demo.py +321 -0
  257. onnxruntime/transformers/models/sam2/sam2_image_onnx_predictor.py +279 -0
  258. onnxruntime/transformers/models/sam2/sam2_utils.py +147 -0
  259. onnxruntime/transformers/models/stable_diffusion/__init__.py +12 -0
  260. onnxruntime/transformers/models/stable_diffusion/benchmark.py +1519 -0
  261. onnxruntime/transformers/models/stable_diffusion/benchmark_controlnet.py +426 -0
  262. onnxruntime/transformers/models/stable_diffusion/demo_txt2img.py +103 -0
  263. onnxruntime/transformers/models/stable_diffusion/demo_txt2img_xl.py +269 -0
  264. onnxruntime/transformers/models/stable_diffusion/demo_utils.py +778 -0
  265. onnxruntime/transformers/models/stable_diffusion/diffusion_models.py +1318 -0
  266. onnxruntime/transformers/models/stable_diffusion/diffusion_schedulers.py +1179 -0
  267. onnxruntime/transformers/models/stable_diffusion/engine_builder.py +295 -0
  268. onnxruntime/transformers/models/stable_diffusion/engine_builder_ort_cuda.py +387 -0
  269. onnxruntime/transformers/models/stable_diffusion/engine_builder_ort_trt.py +288 -0
  270. onnxruntime/transformers/models/stable_diffusion/engine_builder_tensorrt.py +395 -0
  271. onnxruntime/transformers/models/stable_diffusion/engine_builder_torch.py +108 -0
  272. onnxruntime/transformers/models/stable_diffusion/optimize_pipeline.py +590 -0
  273. onnxruntime/transformers/models/stable_diffusion/ort_optimizer.py +136 -0
  274. onnxruntime/transformers/models/stable_diffusion/pipeline_stable_diffusion.py +831 -0
  275. onnxruntime/transformers/models/stable_diffusion/trt_utilities.py +12 -0
  276. onnxruntime/transformers/models/t5/__init__.py +12 -0
  277. onnxruntime/transformers/models/t5/convert_to_onnx.py +318 -0
  278. onnxruntime/transformers/models/t5/t5_decoder.py +437 -0
  279. onnxruntime/transformers/models/t5/t5_encoder.py +70 -0
  280. onnxruntime/transformers/models/t5/t5_encoder_decoder_init.py +361 -0
  281. onnxruntime/transformers/models/t5/t5_helper.py +302 -0
  282. onnxruntime/transformers/models/whisper/__init__.py +12 -0
  283. onnxruntime/transformers/models/whisper/benchmark.py +585 -0
  284. onnxruntime/transformers/models/whisper/benchmark_all.py +526 -0
  285. onnxruntime/transformers/models/whisper/convert_to_onnx.py +609 -0
  286. onnxruntime/transformers/models/whisper/whisper_chain.py +334 -0
  287. onnxruntime/transformers/models/whisper/whisper_decoder.py +464 -0
  288. onnxruntime/transformers/models/whisper/whisper_encoder.py +164 -0
  289. onnxruntime/transformers/models/whisper/whisper_encoder_decoder_init.py +371 -0
  290. onnxruntime/transformers/models/whisper/whisper_helper.py +1035 -0
  291. onnxruntime/transformers/models/whisper/whisper_inputs.py +380 -0
  292. onnxruntime/transformers/models/whisper/whisper_jump_times.py +477 -0
  293. onnxruntime/transformers/onnx_exporter.py +719 -0
  294. onnxruntime/transformers/onnx_model.py +1636 -0
  295. onnxruntime/transformers/onnx_model_bart.py +141 -0
  296. onnxruntime/transformers/onnx_model_bert.py +488 -0
  297. onnxruntime/transformers/onnx_model_bert_keras.py +474 -0
  298. onnxruntime/transformers/onnx_model_bert_tf.py +588 -0
  299. onnxruntime/transformers/onnx_model_clip.py +42 -0
  300. onnxruntime/transformers/onnx_model_conformer.py +32 -0
  301. onnxruntime/transformers/onnx_model_gpt2.py +101 -0
  302. onnxruntime/transformers/onnx_model_mmdit.py +112 -0
  303. onnxruntime/transformers/onnx_model_phi.py +929 -0
  304. onnxruntime/transformers/onnx_model_sam2.py +137 -0
  305. onnxruntime/transformers/onnx_model_t5.py +985 -0
  306. onnxruntime/transformers/onnx_model_tnlr.py +226 -0
  307. onnxruntime/transformers/onnx_model_unet.py +258 -0
  308. onnxruntime/transformers/onnx_model_vae.py +42 -0
  309. onnxruntime/transformers/onnx_utils.py +55 -0
  310. onnxruntime/transformers/optimizer.py +620 -0
  311. onnxruntime/transformers/past_helper.py +149 -0
  312. onnxruntime/transformers/profile_result_processor.py +358 -0
  313. onnxruntime/transformers/profiler.py +434 -0
  314. onnxruntime/transformers/quantize_helper.py +76 -0
  315. onnxruntime/transformers/shape_infer_helper.py +121 -0
  316. onnxruntime/transformers/shape_optimizer.py +400 -0
  317. onnxruntime/transformers/torch_onnx_export_helper.py +74 -0
  318. onnxruntime_directml-1.24.1.dist-info/METADATA +216 -0
  319. onnxruntime_directml-1.24.1.dist-info/RECORD +322 -0
  320. onnxruntime_directml-1.24.1.dist-info/WHEEL +5 -0
  321. onnxruntime_directml-1.24.1.dist-info/entry_points.txt +2 -0
  322. onnxruntime_directml-1.24.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,413 @@
1
+ # -------------------------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # Licensed under the MIT License. See License.txt in the project root for
4
+ # license information.
5
+ # --------------------------------------------------------------------------
6
+ # This script benchmarks gpt2 model with past state.
7
+ # For gpt2 model without past state, use benchmark.py to measure performance.
8
+
9
+ import argparse
10
+ import csv
11
+ import logging
12
+ import os
13
+ from datetime import datetime
14
+
15
+ import psutil
16
+ import torch
17
+ from benchmark_helper import (
18
+ Precision,
19
+ create_onnxruntime_session,
20
+ get_ort_environment_variables,
21
+ prepare_environment,
22
+ setup_logger,
23
+ )
24
+ from gpt2_helper import DEFAULT_TOLERANCE, MODEL_CLASSES, PRETRAINED_GPT2_MODELS, Gpt2Helper
25
+ from packaging import version
26
+ from quantize_helper import QuantizeHelper
27
+ from transformers import AutoConfig
28
+ from transformers import __version__ as transformers_version
29
+
30
+ logger = logging.getLogger("")
31
+
32
+
33
+ def parse_arguments(argv=None):
34
+ parser = argparse.ArgumentParser()
35
+
36
+ parser.add_argument(
37
+ "-m",
38
+ "--model_name_or_path",
39
+ required=True,
40
+ type=str,
41
+ help="Model path, or pretrained model name selected in the list: " + ", ".join(PRETRAINED_GPT2_MODELS),
42
+ )
43
+
44
+ parser.add_argument(
45
+ "--model_class",
46
+ required=False,
47
+ type=str,
48
+ default="GPT2LMHeadModel",
49
+ choices=list(MODEL_CLASSES.keys()),
50
+ help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
51
+ )
52
+
53
+ parser.add_argument(
54
+ "--cache_dir",
55
+ required=False,
56
+ type=str,
57
+ default=os.path.join(".", "cache_models"),
58
+ help="Directory to cache pre-trained models",
59
+ )
60
+
61
+ parser.add_argument(
62
+ "--onnx_dir",
63
+ required=False,
64
+ type=str,
65
+ default=os.path.join(".", "onnx_models"),
66
+ help="Directory to store onnx models",
67
+ )
68
+
69
+ parser.add_argument(
70
+ "--test_times",
71
+ required=False,
72
+ default=100,
73
+ type=int,
74
+ help="Number of repeat times to get average inference latency.",
75
+ )
76
+
77
+ parser.add_argument(
78
+ "-v",
79
+ "--validate_onnx",
80
+ required=False,
81
+ action="store_true",
82
+ help="Validate ONNX model",
83
+ )
84
+
85
+ parser.add_argument(
86
+ "-o",
87
+ "--optimize_onnx",
88
+ required=False,
89
+ action="store_true",
90
+ help="Use optimizer.py to optimize onnx model",
91
+ )
92
+ parser.set_defaults(optimize_onnx=False)
93
+
94
+ parser.add_argument(
95
+ "--stage",
96
+ type=int,
97
+ default=0,
98
+ required=False,
99
+ choices=[0, 1, 2],
100
+ help="Stage in generation: 1 (initial decoder), 2 (decoder), 0 (both). "
101
+ "1 - decode the first token when past_sequence_length is zero; "
102
+ "2 - decode the remaining tokens when past_sequence_length is not zero; "
103
+ "0 - one onnx model for both stages 1 and 2. "
104
+ "Note that we will optimize 1 and 2 differently for best performance.",
105
+ )
106
+
107
+ parser.add_argument("--use_gpu", required=False, action="store_true", help="use GPU for inference")
108
+ parser.set_defaults(use_gpu=False)
109
+
110
+ parser.add_argument(
111
+ "-p",
112
+ "--precision",
113
+ type=Precision,
114
+ default=Precision.FLOAT32,
115
+ choices=list(Precision),
116
+ help="Precision of model to run. fp32 for full precision, fp16 for half precision, and int8 for quantization",
117
+ )
118
+
119
+ parser.add_argument("--torchscript", required=False, action="store_true", help="use Torchscript")
120
+ parser.set_defaults(torchscript=False)
121
+
122
+ parser.add_argument("-b", "--batch_sizes", nargs="+", type=int, default=[1], help="batch size")
123
+
124
+ parser.add_argument(
125
+ "--sequence_lengths",
126
+ nargs="+",
127
+ type=int,
128
+ default=[1],
129
+ help="sequence lengths (excluding past)",
130
+ )
131
+
132
+ parser.add_argument(
133
+ "-s",
134
+ "--past_sequence_lengths",
135
+ nargs="+",
136
+ type=int,
137
+ default=[8, 16, 32, 64, 128, 256],
138
+ help="past sequence lengths",
139
+ )
140
+
141
+ parser.add_argument(
142
+ "-r",
143
+ "--result_csv",
144
+ required=False,
145
+ default=None,
146
+ help="CSV file for saving summary results.",
147
+ )
148
+
149
+ parser.add_argument("--thread_num", required=False, type=int, default=-1, help="Threads to use")
150
+
151
+ parser.add_argument("--include_copy_output_latency", required=False, action="store_true")
152
+ parser.set_defaults(include_copy_output_latency=False)
153
+
154
+ parser.add_argument("--verbose", required=False, action="store_true")
155
+ parser.set_defaults(verbose=False)
156
+
157
+ parser.add_argument("--output_torch_latency", required=False, action="store_true")
158
+ parser.set_defaults(output_torch_latency=False)
159
+
160
+ parser.add_argument("--disable_io_binding", required=False, action="store_true")
161
+ parser.set_defaults(disable_io_binding=False)
162
+
163
+ args = parser.parse_args(argv)
164
+
165
+ return args
166
+
167
+
168
+ def main(args):
169
+ if version.parse(transformers_version) < version.parse(
170
+ "3.1.0"
171
+ ): # past_key_values name does not exist in 3.0.2 or older
172
+ raise RuntimeError("This tool requires transformers 3.1.0 or later.")
173
+
174
+ logger.info(f"Arguments:{args}")
175
+ if args.precision == Precision.FLOAT16:
176
+ assert args.optimize_onnx and args.use_gpu, "fp16 requires --optimize_onnx --use_gpu"
177
+
178
+ if args.precision == Precision.INT8:
179
+ assert not args.use_gpu, "quantization only supports CPU"
180
+
181
+ if args.stage == 1:
182
+ assert args.past_sequence_lengths == [0], "past_sequence_lengths shall be 0 for stage==1 (init decoder)"
183
+
184
+ torch.set_num_threads(psutil.cpu_count(logical=True) if args.thread_num <= 0 else args.thread_num)
185
+ print(torch.__config__.parallel_info())
186
+
187
+ cache_dir = args.cache_dir
188
+ output_dir = args.onnx_dir
189
+ prepare_environment(cache_dir, output_dir, args.use_gpu)
190
+
191
+ model_class = MODEL_CLASSES[args.model_class][0]
192
+ gpt2helper = Gpt2Helper
193
+ config = AutoConfig.from_pretrained(args.model_name_or_path, torchscript=args.torchscript, cache_dir=cache_dir)
194
+ model = model_class.from_pretrained(args.model_name_or_path, config=config, cache_dir=cache_dir)
195
+
196
+ # This script does not support float16 for PyTorch.
197
+ # if args.float16:
198
+ # model.half()
199
+
200
+ device = torch.device("cuda:0" if args.use_gpu else "cpu")
201
+ model.to(device)
202
+ use_external_data_format = config.n_layer > 24 # TODO: find a way to check model size > 2GB
203
+ onnx_model_paths = gpt2helper.get_onnx_paths(
204
+ output_dir,
205
+ args.model_name_or_path,
206
+ args.model_class,
207
+ has_past=True,
208
+ new_folder=use_external_data_format,
209
+ )
210
+
211
+ onnx_model_path = onnx_model_paths["raw"]
212
+ use_padding = MODEL_CLASSES[args.model_class][2]
213
+ gpt2helper.export_onnx(
214
+ model,
215
+ device,
216
+ onnx_model_path,
217
+ args.verbose,
218
+ use_external_data_format,
219
+ has_position_ids=use_padding,
220
+ has_attention_mask=use_padding,
221
+ )
222
+
223
+ if args.optimize_onnx or args.precision != Precision.FLOAT32:
224
+ onnx_model_path = onnx_model_paths[str(args.precision) if args.precision != Precision.INT8 else "fp32"]
225
+ gpt2helper.optimize_onnx(
226
+ onnx_model_paths["raw"],
227
+ onnx_model_path,
228
+ args.precision == Precision.FLOAT16,
229
+ model.config.num_attention_heads,
230
+ model.config.hidden_size,
231
+ use_external_data_format,
232
+ auto_mixed_precision=True,
233
+ stage=args.stage,
234
+ )
235
+
236
+ if args.precision == Precision.INT8:
237
+ logger.info("quantizing model...")
238
+ QuantizeHelper.quantize_onnx_model(onnx_model_path, onnx_model_paths["int8"], use_external_data_format)
239
+ model = QuantizeHelper.quantize_torch_model(model)
240
+ logger.info("finished quantizing model")
241
+ onnx_model_path = onnx_model_paths["int8"]
242
+
243
+ if args.torchscript:
244
+ model = gpt2helper.torchscript(
245
+ model,
246
+ config,
247
+ device,
248
+ has_position_ids=use_padding,
249
+ has_attention_mask=use_padding,
250
+ )
251
+
252
+ session = create_onnxruntime_session(
253
+ onnx_model_path,
254
+ args.use_gpu,
255
+ enable_all_optimization=False,
256
+ num_threads=args.thread_num,
257
+ verbose=args.verbose,
258
+ )
259
+ if session is None:
260
+ return
261
+
262
+ # Allocate output buffers for IO Binding
263
+ max_output_shapes = gpt2helper.get_output_shapes(
264
+ max(args.batch_sizes),
265
+ max(args.past_sequence_lengths),
266
+ max(args.sequence_lengths),
267
+ config,
268
+ args.model_class,
269
+ )
270
+ output_buffers = gpt2helper.get_output_buffers(max_output_shapes, device, args.precision == Precision.FLOAT16)
271
+
272
+ csv_filename = args.result_csv or "benchmark_result_{}.csv".format(datetime.now().strftime("%Y%m%d-%H%M%S"))
273
+ with open(csv_filename, mode="a", newline="") as csv_file:
274
+ column_names = [
275
+ "model_name",
276
+ "model_class",
277
+ "stage",
278
+ "environment_variables",
279
+ "gpu",
280
+ "precision",
281
+ "optimizer",
282
+ "torchscript",
283
+ "batch_size",
284
+ "sequence_length",
285
+ "past_sequence_length",
286
+ "disable_io_binding",
287
+ "torch_latency",
288
+ "onnxruntime_latency",
289
+ ]
290
+ csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
291
+ csv_writer.writeheader()
292
+
293
+ for batch_size in args.batch_sizes:
294
+ for sequence_length in args.sequence_lengths:
295
+ for past_sequence_length in args.past_sequence_lengths:
296
+ assert batch_size > 0 and sequence_length > 0 and past_sequence_length >= 0
297
+ logger.debug(
298
+ "Running test for batch_size=%d sequence_length=%d past_sequence_length=%d ...",
299
+ batch_size,
300
+ sequence_length,
301
+ past_sequence_length,
302
+ )
303
+
304
+ dummy_inputs = gpt2helper.get_dummy_inputs(
305
+ batch_size,
306
+ past_sequence_length,
307
+ sequence_length,
308
+ config.num_attention_heads,
309
+ config.hidden_size,
310
+ config.n_layer,
311
+ config.vocab_size,
312
+ device,
313
+ float16=(args.precision == Precision.FLOAT16),
314
+ has_position_ids=use_padding,
315
+ has_attention_mask=use_padding,
316
+ )
317
+ output_shapes = gpt2helper.get_output_shapes(
318
+ batch_size,
319
+ past_sequence_length,
320
+ sequence_length,
321
+ config,
322
+ args.model_class,
323
+ )
324
+
325
+ try:
326
+ if args.validate_onnx or args.output_torch_latency:
327
+ outputs, torch_latency = gpt2helper.pytorch_inference(model, dummy_inputs, args.test_times)
328
+
329
+ # Dump Torch output shape
330
+ for i, value in enumerate(outputs):
331
+ if isinstance(value, tuple):
332
+ logger.debug(
333
+ f"torch output {i} is tuple of size {len(value)}, shape {value[0].shape}"
334
+ )
335
+ else:
336
+ logger.debug(f"torch output {i} shape {value.shape}")
337
+ else:
338
+ outputs = None
339
+ torch_latency = None
340
+
341
+ if args.disable_io_binding:
342
+ ort_outputs, ort_latency = gpt2helper.onnxruntime_inference(
343
+ session, dummy_inputs, args.test_times
344
+ )
345
+ else:
346
+ ort_outputs, ort_latency = gpt2helper.onnxruntime_inference_with_binded_io(
347
+ session,
348
+ dummy_inputs,
349
+ output_buffers,
350
+ output_shapes,
351
+ args.test_times,
352
+ return_numpy=False,
353
+ include_copy_output_latency=args.include_copy_output_latency,
354
+ )
355
+
356
+ if args.validate_onnx:
357
+ copy_outputs = ort_outputs
358
+ if not args.disable_io_binding:
359
+ # Results of IO binding might be in GPU. Copy outputs to CPU for comparison.
360
+ copy_outputs = []
361
+ for output in ort_outputs:
362
+ copy_outputs.append(output.cpu().numpy())
363
+
364
+ if gpt2helper.compare_outputs(
365
+ outputs,
366
+ copy_outputs,
367
+ model_class=args.model_class,
368
+ rtol=DEFAULT_TOLERANCE[args.precision],
369
+ atol=DEFAULT_TOLERANCE[args.precision],
370
+ ):
371
+ logger.info(
372
+ f"Pytorch and ONNX Runtime outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]})."
373
+ )
374
+
375
+ logger.info(
376
+ "batch_size=%d, sequence_length=%d, past_sequence_length=%d, onnxruntime_latency=%.2f %s %s",
377
+ batch_size,
378
+ sequence_length,
379
+ past_sequence_length,
380
+ ort_latency,
381
+ "(disable_io_binding)" if args.disable_io_binding else "",
382
+ ", torch_latency={torch_latency}" if torch_latency else "",
383
+ )
384
+
385
+ row = {
386
+ "model_name": args.model_name_or_path,
387
+ "model_class": args.model_class,
388
+ "stage": args.stage,
389
+ "environment_variables": get_ort_environment_variables(),
390
+ "gpu": args.use_gpu,
391
+ "precision": args.precision,
392
+ "optimizer": args.optimize_onnx,
393
+ "torchscript": args.torchscript,
394
+ "batch_size": batch_size,
395
+ "sequence_length": sequence_length,
396
+ "past_sequence_length": past_sequence_length,
397
+ "disable_io_binding": args.disable_io_binding,
398
+ "torch_latency": f"{torch_latency:.2f}" if torch_latency else "None",
399
+ "onnxruntime_latency": f"{ort_latency:.2f}",
400
+ }
401
+ csv_writer.writerow(row)
402
+ except Exception:
403
+ logger.error("Exception", exc_info=True) # noqa: G201
404
+ return None
405
+
406
+ logger.info(f"Results are saved to file {csv_filename}")
407
+ return csv_filename
408
+
409
+
410
+ if __name__ == "__main__":
411
+ args = parse_arguments()
412
+ setup_logger(args.verbose)
413
+ main(args)