onnxruntime-directml 1.24.1__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (322) hide show
  1. onnxruntime/LICENSE +21 -0
  2. onnxruntime/Privacy.md +21 -0
  3. onnxruntime/ThirdPartyNotices.txt +6121 -0
  4. onnxruntime/__init__.py +418 -0
  5. onnxruntime/backend/__init__.py +6 -0
  6. onnxruntime/backend/backend.py +175 -0
  7. onnxruntime/backend/backend_rep.py +52 -0
  8. onnxruntime/capi/DirectML.dll +0 -0
  9. onnxruntime/capi/__init__.py +4 -0
  10. onnxruntime/capi/_ld_preload.py +7 -0
  11. onnxruntime/capi/_pybind_state.py +33 -0
  12. onnxruntime/capi/build_and_package_info.py +2 -0
  13. onnxruntime/capi/convert_npz_to_onnx_adapter.py +48 -0
  14. onnxruntime/capi/onnxruntime.dll +0 -0
  15. onnxruntime/capi/onnxruntime_collect_build_info.py +47 -0
  16. onnxruntime/capi/onnxruntime_inference_collection.py +1440 -0
  17. onnxruntime/capi/onnxruntime_providers_shared.dll +0 -0
  18. onnxruntime/capi/onnxruntime_pybind11_state.pyd +0 -0
  19. onnxruntime/capi/onnxruntime_validation.py +154 -0
  20. onnxruntime/capi/version_info.py +2 -0
  21. onnxruntime/datasets/__init__.py +18 -0
  22. onnxruntime/datasets/logreg_iris.onnx +0 -0
  23. onnxruntime/datasets/mul_1.onnx +0 -0
  24. onnxruntime/datasets/sigmoid.onnx +13 -0
  25. onnxruntime/quantization/CalTableFlatBuffers/KeyValue.py +78 -0
  26. onnxruntime/quantization/CalTableFlatBuffers/TrtTable.py +90 -0
  27. onnxruntime/quantization/CalTableFlatBuffers/__init__.py +0 -0
  28. onnxruntime/quantization/__init__.py +19 -0
  29. onnxruntime/quantization/base_quantizer.py +529 -0
  30. onnxruntime/quantization/calibrate.py +1267 -0
  31. onnxruntime/quantization/execution_providers/qnn/__init__.py +2 -0
  32. onnxruntime/quantization/execution_providers/qnn/fusion_lpnorm.py +132 -0
  33. onnxruntime/quantization/execution_providers/qnn/fusion_spacetodepth.py +162 -0
  34. onnxruntime/quantization/execution_providers/qnn/mixed_precision_overrides_utils.py +413 -0
  35. onnxruntime/quantization/execution_providers/qnn/preprocess.py +353 -0
  36. onnxruntime/quantization/execution_providers/qnn/quant_config.py +389 -0
  37. onnxruntime/quantization/fusions/__init__.py +4 -0
  38. onnxruntime/quantization/fusions/fusion.py +311 -0
  39. onnxruntime/quantization/fusions/fusion_gelu.py +272 -0
  40. onnxruntime/quantization/fusions/fusion_layernorm.py +146 -0
  41. onnxruntime/quantization/fusions/replace_upsample_with_resize.py +96 -0
  42. onnxruntime/quantization/matmul_bnb4_quantizer.py +239 -0
  43. onnxruntime/quantization/matmul_nbits_quantizer.py +1638 -0
  44. onnxruntime/quantization/neural_compressor/__init__.py +1 -0
  45. onnxruntime/quantization/neural_compressor/onnx_model.py +1251 -0
  46. onnxruntime/quantization/neural_compressor/util.py +80 -0
  47. onnxruntime/quantization/neural_compressor/weight_only.py +932 -0
  48. onnxruntime/quantization/onnx_model.py +600 -0
  49. onnxruntime/quantization/onnx_quantizer.py +1163 -0
  50. onnxruntime/quantization/operators/__init__.py +2 -0
  51. onnxruntime/quantization/operators/activation.py +119 -0
  52. onnxruntime/quantization/operators/argmax.py +18 -0
  53. onnxruntime/quantization/operators/attention.py +73 -0
  54. onnxruntime/quantization/operators/base_operator.py +26 -0
  55. onnxruntime/quantization/operators/binary_op.py +72 -0
  56. onnxruntime/quantization/operators/concat.py +62 -0
  57. onnxruntime/quantization/operators/conv.py +260 -0
  58. onnxruntime/quantization/operators/direct_q8.py +78 -0
  59. onnxruntime/quantization/operators/embed_layernorm.py +121 -0
  60. onnxruntime/quantization/operators/gather.py +64 -0
  61. onnxruntime/quantization/operators/gavgpool.py +62 -0
  62. onnxruntime/quantization/operators/gemm.py +172 -0
  63. onnxruntime/quantization/operators/lstm.py +121 -0
  64. onnxruntime/quantization/operators/matmul.py +231 -0
  65. onnxruntime/quantization/operators/maxpool.py +34 -0
  66. onnxruntime/quantization/operators/norm.py +40 -0
  67. onnxruntime/quantization/operators/pad.py +172 -0
  68. onnxruntime/quantization/operators/pooling.py +67 -0
  69. onnxruntime/quantization/operators/qdq_base_operator.py +22 -0
  70. onnxruntime/quantization/operators/resize.py +34 -0
  71. onnxruntime/quantization/operators/softmax.py +74 -0
  72. onnxruntime/quantization/operators/split.py +63 -0
  73. onnxruntime/quantization/operators/where.py +87 -0
  74. onnxruntime/quantization/preprocess.py +141 -0
  75. onnxruntime/quantization/qdq_loss_debug.py +389 -0
  76. onnxruntime/quantization/qdq_quantizer.py +1477 -0
  77. onnxruntime/quantization/quant_utils.py +1051 -0
  78. onnxruntime/quantization/quantize.py +953 -0
  79. onnxruntime/quantization/registry.py +110 -0
  80. onnxruntime/quantization/shape_inference.py +204 -0
  81. onnxruntime/quantization/static_quantize_runner.py +256 -0
  82. onnxruntime/quantization/tensor_quant_overrides.py +520 -0
  83. onnxruntime/tools/__init__.py +10 -0
  84. onnxruntime/tools/check_onnx_model_mobile_usability.py +47 -0
  85. onnxruntime/tools/convert_onnx_models_to_ort.py +380 -0
  86. onnxruntime/tools/file_utils.py +47 -0
  87. onnxruntime/tools/logger.py +11 -0
  88. onnxruntime/tools/make_dynamic_shape_fixed.py +73 -0
  89. onnxruntime/tools/mobile_helpers/__init__.py +0 -0
  90. onnxruntime/tools/mobile_helpers/coreml_supported_mlprogram_ops.md +53 -0
  91. onnxruntime/tools/mobile_helpers/coreml_supported_neuralnetwork_ops.md +43 -0
  92. onnxruntime/tools/mobile_helpers/nnapi_supported_ops.md +58 -0
  93. onnxruntime/tools/mobile_helpers/usability_checker.py +738 -0
  94. onnxruntime/tools/offline_tuning.py +169 -0
  95. onnxruntime/tools/onnx_model_utils.py +416 -0
  96. onnxruntime/tools/onnx_randomizer.py +85 -0
  97. onnxruntime/tools/onnxruntime_test.py +164 -0
  98. onnxruntime/tools/optimize_onnx_model.py +56 -0
  99. onnxruntime/tools/ort_format_model/__init__.py +27 -0
  100. onnxruntime/tools/ort_format_model/operator_type_usage_processors.py +653 -0
  101. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/__init__.py +0 -0
  102. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ArgType.py +7 -0
  103. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ArgTypeAndIndex.py +67 -0
  104. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Attribute.py +337 -0
  105. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/AttributeType.py +18 -0
  106. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Checkpoint.py +125 -0
  107. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedKernelCreateInfos.py +120 -0
  108. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedNodeIndexAndKernelDefHash.py +68 -0
  109. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedSessionState.py +96 -0
  110. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedSubGraphSessionState.py +72 -0
  111. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Dimension.py +71 -0
  112. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DimensionValue.py +80 -0
  113. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DimensionValueType.py +8 -0
  114. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/EdgeEnd.py +32 -0
  115. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/FloatProperty.py +67 -0
  116. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Graph.py +320 -0
  117. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/InferenceSession.py +88 -0
  118. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/IntProperty.py +67 -0
  119. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/KernelTypeStrArgsEntry.py +91 -0
  120. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/KernelTypeStrResolver.py +78 -0
  121. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/MapType.py +71 -0
  122. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Model.py +223 -0
  123. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ModuleState.py +141 -0
  124. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Node.py +317 -0
  125. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodeEdge.py +126 -0
  126. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodeType.py +7 -0
  127. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodesToOptimizeIndices.py +160 -0
  128. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OpIdKernelTypeStrArgsEntry.py +91 -0
  129. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OperatorSetId.py +67 -0
  130. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OptimizerGroup.py +117 -0
  131. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ParameterOptimizerState.py +91 -0
  132. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/PropertyBag.py +152 -0
  133. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizationRecord.py +105 -0
  134. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizationRecordContainerEntry.py +91 -0
  135. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizations.py +79 -0
  136. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/SequenceType.py +58 -0
  137. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Shape.py +78 -0
  138. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/SparseTensor.py +114 -0
  139. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/StringProperty.py +67 -0
  140. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/StringStringEntry.py +67 -0
  141. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Tensor.py +203 -0
  142. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TensorDataType.py +26 -0
  143. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TensorTypeAndShape.py +71 -0
  144. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TypeInfo.py +83 -0
  145. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TypeInfoValue.py +9 -0
  146. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ValueInfo.py +84 -0
  147. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/__init__.py +6 -0
  148. onnxruntime/tools/ort_format_model/ort_model_processor.py +86 -0
  149. onnxruntime/tools/ort_format_model/types.py +85 -0
  150. onnxruntime/tools/ort_format_model/utils.py +61 -0
  151. onnxruntime/tools/pytorch_export_contrib_ops.py +129 -0
  152. onnxruntime/tools/pytorch_export_helpers.py +131 -0
  153. onnxruntime/tools/qdq_helpers/__init__.py +0 -0
  154. onnxruntime/tools/qdq_helpers/optimize_qdq_model.py +37 -0
  155. onnxruntime/tools/qnn/add_trans_cast.py +292 -0
  156. onnxruntime/tools/qnn/gen_qnn_ctx_onnx_model.py +364 -0
  157. onnxruntime/tools/qnn/preprocess.py +165 -0
  158. onnxruntime/tools/reduced_build_config_parser.py +203 -0
  159. onnxruntime/tools/remove_initializer_from_input.py +37 -0
  160. onnxruntime/tools/symbolic_shape_infer.py +3094 -0
  161. onnxruntime/tools/update_onnx_opset.py +31 -0
  162. onnxruntime/transformers/__init__.py +8 -0
  163. onnxruntime/transformers/affinity_helper.py +40 -0
  164. onnxruntime/transformers/benchmark.py +942 -0
  165. onnxruntime/transformers/benchmark_helper.py +643 -0
  166. onnxruntime/transformers/bert_perf_test.py +629 -0
  167. onnxruntime/transformers/bert_test_data.py +641 -0
  168. onnxruntime/transformers/compare_bert_results.py +256 -0
  169. onnxruntime/transformers/constants.py +47 -0
  170. onnxruntime/transformers/convert_generation.py +3605 -0
  171. onnxruntime/transformers/convert_tf_models_to_pytorch.py +205 -0
  172. onnxruntime/transformers/convert_to_packing_mode.py +385 -0
  173. onnxruntime/transformers/dynamo_onnx_helper.py +205 -0
  174. onnxruntime/transformers/float16.py +501 -0
  175. onnxruntime/transformers/fusion_attention.py +1189 -0
  176. onnxruntime/transformers/fusion_attention_clip.py +340 -0
  177. onnxruntime/transformers/fusion_attention_sam2.py +533 -0
  178. onnxruntime/transformers/fusion_attention_unet.py +1307 -0
  179. onnxruntime/transformers/fusion_attention_vae.py +300 -0
  180. onnxruntime/transformers/fusion_bart_attention.py +435 -0
  181. onnxruntime/transformers/fusion_base.py +141 -0
  182. onnxruntime/transformers/fusion_bias_add.py +57 -0
  183. onnxruntime/transformers/fusion_biasgelu.py +66 -0
  184. onnxruntime/transformers/fusion_biassplitgelu.py +110 -0
  185. onnxruntime/transformers/fusion_conformer_attention.py +222 -0
  186. onnxruntime/transformers/fusion_constant_fold.py +144 -0
  187. onnxruntime/transformers/fusion_embedlayer.py +810 -0
  188. onnxruntime/transformers/fusion_fastgelu.py +492 -0
  189. onnxruntime/transformers/fusion_gelu.py +258 -0
  190. onnxruntime/transformers/fusion_gelu_approximation.py +25 -0
  191. onnxruntime/transformers/fusion_gemmfastgelu.py +121 -0
  192. onnxruntime/transformers/fusion_gpt_attention.py +546 -0
  193. onnxruntime/transformers/fusion_gpt_attention_megatron.py +355 -0
  194. onnxruntime/transformers/fusion_gpt_attention_no_past.py +260 -0
  195. onnxruntime/transformers/fusion_group_norm.py +180 -0
  196. onnxruntime/transformers/fusion_layernorm.py +489 -0
  197. onnxruntime/transformers/fusion_mha_mmdit.py +667 -0
  198. onnxruntime/transformers/fusion_nhwc_conv.py +99 -0
  199. onnxruntime/transformers/fusion_options.py +340 -0
  200. onnxruntime/transformers/fusion_qordered_attention.py +420 -0
  201. onnxruntime/transformers/fusion_qordered_gelu.py +118 -0
  202. onnxruntime/transformers/fusion_qordered_layernorm.py +122 -0
  203. onnxruntime/transformers/fusion_qordered_matmul.py +216 -0
  204. onnxruntime/transformers/fusion_quickgelu.py +74 -0
  205. onnxruntime/transformers/fusion_reshape.py +173 -0
  206. onnxruntime/transformers/fusion_rotary_attention.py +1591 -0
  207. onnxruntime/transformers/fusion_shape.py +109 -0
  208. onnxruntime/transformers/fusion_simplified_layernorm.py +165 -0
  209. onnxruntime/transformers/fusion_skip_group_norm.py +254 -0
  210. onnxruntime/transformers/fusion_skiplayernorm.py +209 -0
  211. onnxruntime/transformers/fusion_transpose.py +167 -0
  212. onnxruntime/transformers/fusion_utils.py +321 -0
  213. onnxruntime/transformers/huggingface_models.py +74 -0
  214. onnxruntime/transformers/import_utils.py +20 -0
  215. onnxruntime/transformers/io_binding_helper.py +487 -0
  216. onnxruntime/transformers/large_model_exporter.py +395 -0
  217. onnxruntime/transformers/machine_info.py +230 -0
  218. onnxruntime/transformers/metrics.py +163 -0
  219. onnxruntime/transformers/models/bart/__init__.py +12 -0
  220. onnxruntime/transformers/models/bart/export.py +98 -0
  221. onnxruntime/transformers/models/bert/__init__.py +12 -0
  222. onnxruntime/transformers/models/bert/eval_squad.py +329 -0
  223. onnxruntime/transformers/models/gpt2/__init__.py +12 -0
  224. onnxruntime/transformers/models/gpt2/benchmark_gpt2.py +413 -0
  225. onnxruntime/transformers/models/gpt2/convert_to_onnx.py +566 -0
  226. onnxruntime/transformers/models/gpt2/gpt2_helper.py +1031 -0
  227. onnxruntime/transformers/models/gpt2/gpt2_parity.py +513 -0
  228. onnxruntime/transformers/models/gpt2/gpt2_tester.py +501 -0
  229. onnxruntime/transformers/models/gpt2/parity_check_helper.py +146 -0
  230. onnxruntime/transformers/models/llama/__init__.py +12 -0
  231. onnxruntime/transformers/models/llama/benchmark.py +700 -0
  232. onnxruntime/transformers/models/llama/benchmark_all.py +488 -0
  233. onnxruntime/transformers/models/llama/benchmark_e2e.py +608 -0
  234. onnxruntime/transformers/models/llama/convert_to_onnx.py +1064 -0
  235. onnxruntime/transformers/models/llama/dist_settings.py +57 -0
  236. onnxruntime/transformers/models/llama/llama_inputs.py +504 -0
  237. onnxruntime/transformers/models/llama/llama_parity.py +343 -0
  238. onnxruntime/transformers/models/llama/llama_torch.py +47 -0
  239. onnxruntime/transformers/models/llama/quant_kv_dataloader.py +108 -0
  240. onnxruntime/transformers/models/longformer/__init__.py +12 -0
  241. onnxruntime/transformers/models/longformer/benchmark_longformer.py +821 -0
  242. onnxruntime/transformers/models/longformer/convert_to_onnx.py +413 -0
  243. onnxruntime/transformers/models/longformer/generate_test_data.py +347 -0
  244. onnxruntime/transformers/models/longformer/longformer_helper.py +76 -0
  245. onnxruntime/transformers/models/phi2/__init__.py +12 -0
  246. onnxruntime/transformers/models/phi2/convert_to_onnx.py +590 -0
  247. onnxruntime/transformers/models/phi2/inference_example.py +414 -0
  248. onnxruntime/transformers/models/sam2/__init__.py +12 -0
  249. onnxruntime/transformers/models/sam2/benchmark_sam2.py +638 -0
  250. onnxruntime/transformers/models/sam2/convert_to_onnx.py +270 -0
  251. onnxruntime/transformers/models/sam2/image_decoder.py +272 -0
  252. onnxruntime/transformers/models/sam2/image_encoder.py +236 -0
  253. onnxruntime/transformers/models/sam2/mask_decoder.py +208 -0
  254. onnxruntime/transformers/models/sam2/nvtx_helper.py +33 -0
  255. onnxruntime/transformers/models/sam2/prompt_encoder.py +189 -0
  256. onnxruntime/transformers/models/sam2/sam2_demo.py +321 -0
  257. onnxruntime/transformers/models/sam2/sam2_image_onnx_predictor.py +279 -0
  258. onnxruntime/transformers/models/sam2/sam2_utils.py +147 -0
  259. onnxruntime/transformers/models/stable_diffusion/__init__.py +12 -0
  260. onnxruntime/transformers/models/stable_diffusion/benchmark.py +1519 -0
  261. onnxruntime/transformers/models/stable_diffusion/benchmark_controlnet.py +426 -0
  262. onnxruntime/transformers/models/stable_diffusion/demo_txt2img.py +103 -0
  263. onnxruntime/transformers/models/stable_diffusion/demo_txt2img_xl.py +269 -0
  264. onnxruntime/transformers/models/stable_diffusion/demo_utils.py +778 -0
  265. onnxruntime/transformers/models/stable_diffusion/diffusion_models.py +1318 -0
  266. onnxruntime/transformers/models/stable_diffusion/diffusion_schedulers.py +1179 -0
  267. onnxruntime/transformers/models/stable_diffusion/engine_builder.py +295 -0
  268. onnxruntime/transformers/models/stable_diffusion/engine_builder_ort_cuda.py +387 -0
  269. onnxruntime/transformers/models/stable_diffusion/engine_builder_ort_trt.py +288 -0
  270. onnxruntime/transformers/models/stable_diffusion/engine_builder_tensorrt.py +395 -0
  271. onnxruntime/transformers/models/stable_diffusion/engine_builder_torch.py +108 -0
  272. onnxruntime/transformers/models/stable_diffusion/optimize_pipeline.py +590 -0
  273. onnxruntime/transformers/models/stable_diffusion/ort_optimizer.py +136 -0
  274. onnxruntime/transformers/models/stable_diffusion/pipeline_stable_diffusion.py +831 -0
  275. onnxruntime/transformers/models/stable_diffusion/trt_utilities.py +12 -0
  276. onnxruntime/transformers/models/t5/__init__.py +12 -0
  277. onnxruntime/transformers/models/t5/convert_to_onnx.py +318 -0
  278. onnxruntime/transformers/models/t5/t5_decoder.py +437 -0
  279. onnxruntime/transformers/models/t5/t5_encoder.py +70 -0
  280. onnxruntime/transformers/models/t5/t5_encoder_decoder_init.py +361 -0
  281. onnxruntime/transformers/models/t5/t5_helper.py +302 -0
  282. onnxruntime/transformers/models/whisper/__init__.py +12 -0
  283. onnxruntime/transformers/models/whisper/benchmark.py +585 -0
  284. onnxruntime/transformers/models/whisper/benchmark_all.py +526 -0
  285. onnxruntime/transformers/models/whisper/convert_to_onnx.py +609 -0
  286. onnxruntime/transformers/models/whisper/whisper_chain.py +334 -0
  287. onnxruntime/transformers/models/whisper/whisper_decoder.py +464 -0
  288. onnxruntime/transformers/models/whisper/whisper_encoder.py +164 -0
  289. onnxruntime/transformers/models/whisper/whisper_encoder_decoder_init.py +371 -0
  290. onnxruntime/transformers/models/whisper/whisper_helper.py +1035 -0
  291. onnxruntime/transformers/models/whisper/whisper_inputs.py +380 -0
  292. onnxruntime/transformers/models/whisper/whisper_jump_times.py +477 -0
  293. onnxruntime/transformers/onnx_exporter.py +719 -0
  294. onnxruntime/transformers/onnx_model.py +1636 -0
  295. onnxruntime/transformers/onnx_model_bart.py +141 -0
  296. onnxruntime/transformers/onnx_model_bert.py +488 -0
  297. onnxruntime/transformers/onnx_model_bert_keras.py +474 -0
  298. onnxruntime/transformers/onnx_model_bert_tf.py +588 -0
  299. onnxruntime/transformers/onnx_model_clip.py +42 -0
  300. onnxruntime/transformers/onnx_model_conformer.py +32 -0
  301. onnxruntime/transformers/onnx_model_gpt2.py +101 -0
  302. onnxruntime/transformers/onnx_model_mmdit.py +112 -0
  303. onnxruntime/transformers/onnx_model_phi.py +929 -0
  304. onnxruntime/transformers/onnx_model_sam2.py +137 -0
  305. onnxruntime/transformers/onnx_model_t5.py +985 -0
  306. onnxruntime/transformers/onnx_model_tnlr.py +226 -0
  307. onnxruntime/transformers/onnx_model_unet.py +258 -0
  308. onnxruntime/transformers/onnx_model_vae.py +42 -0
  309. onnxruntime/transformers/onnx_utils.py +55 -0
  310. onnxruntime/transformers/optimizer.py +620 -0
  311. onnxruntime/transformers/past_helper.py +149 -0
  312. onnxruntime/transformers/profile_result_processor.py +358 -0
  313. onnxruntime/transformers/profiler.py +434 -0
  314. onnxruntime/transformers/quantize_helper.py +76 -0
  315. onnxruntime/transformers/shape_infer_helper.py +121 -0
  316. onnxruntime/transformers/shape_optimizer.py +400 -0
  317. onnxruntime/transformers/torch_onnx_export_helper.py +74 -0
  318. onnxruntime_directml-1.24.1.dist-info/METADATA +216 -0
  319. onnxruntime_directml-1.24.1.dist-info/RECORD +322 -0
  320. onnxruntime_directml-1.24.1.dist-info/WHEEL +5 -0
  321. onnxruntime_directml-1.24.1.dist-info/entry_points.txt +2 -0
  322. onnxruntime_directml-1.24.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1031 @@
1
+ # -------------------------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # Licensed under the MIT License. See License.txt in the project root for
4
+ # license information.
5
+ # --------------------------------------------------------------------------
6
+ # This script helps onnx conversion and validation for GPT2 model with past state.
7
+ import logging
8
+ import os
9
+ import pickle
10
+ import random
11
+ import shutil
12
+ import tempfile
13
+ import time
14
+ from pathlib import Path
15
+
16
+ import numpy
17
+ import onnx
18
+ import torch
19
+ from benchmark_helper import Precision
20
+ from float16 import float_to_float16_max_diff
21
+ from fusion_options import FusionOptions
22
+ from io_binding_helper import IOBindingHelper
23
+ from onnx_model import OnnxModel
24
+ from optimizer import optimize_model
25
+ from torch_onnx_export_helper import torch_onnx_export
26
+ from transformers import GPT2Config, GPT2LMHeadModel, GPT2Model, TFGPT2Model
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+ PRETRAINED_GPT2_MODELS = ["distilgpt2", "gpt2", "gpt2-medium", "gpt2-large", "gpt2-xl"]
31
+
32
+ DEFAULT_TOLERANCE = {
33
+ Precision.FLOAT32: 0.0005,
34
+ Precision.FLOAT16: 0.2,
35
+ Precision.INT8: 3.0,
36
+ }
37
+
38
+
39
+ class GPT2ModelNoPastState(GPT2Model):
40
+ """Here we wrap a class to disable past state output."""
41
+
42
+ def __init__(self, config):
43
+ super().__init__(config)
44
+
45
+ def forward(self, input_ids):
46
+ return super().forward(input_ids, use_cache=False, return_dict=False)
47
+
48
+
49
+ class TFGPT2ModelNoPastState(TFGPT2Model):
50
+ """Here we wrap a class to disable past state output."""
51
+
52
+ def __init__(self, config):
53
+ config.use_cache = False
54
+ super().__init__(config)
55
+
56
+ def forward(self, input_ids):
57
+ return super().call(input_ids, use_cache=False)
58
+
59
+
60
+ class MyGPT2Model(GPT2Model):
61
+ """Here we wrap a class for Onnx model conversion for GPT2Model with past state."""
62
+
63
+ def __init__(self, config):
64
+ super().__init__(config)
65
+
66
+ @staticmethod
67
+ def post_process(result, num_layer):
68
+ if isinstance(result[1][0], (tuple, list)):
69
+ assert len(result[1]) == num_layer and len(result[1][0]) == 2
70
+ # assert len(result[1][0][0].shape) == 4 and result[1][0][0].shape == result[1][0][1].shape
71
+ present = []
72
+ for i in range(num_layer):
73
+ # Since transformers v4.*, past key and values are separated outputs.
74
+ # Here we concate them into one tensor to be compatible with Attention operator.
75
+ present.append(
76
+ torch.cat(
77
+ (result[1][i][0].unsqueeze(0), result[1][i][1].unsqueeze(0)),
78
+ dim=0,
79
+ )
80
+ )
81
+ return (result[0], tuple(present))
82
+
83
+ return result
84
+
85
+ def forward(self, input_ids, position_ids, attention_mask, *past):
86
+ result = super().forward(
87
+ input_ids,
88
+ position_ids=position_ids,
89
+ attention_mask=attention_mask,
90
+ past_key_values=past,
91
+ return_dict=False,
92
+ )
93
+ return MyGPT2Model.post_process(result, self.config.n_layer)
94
+
95
+
96
+ class MyGPT2LMHeadModel(GPT2LMHeadModel):
97
+ """Here we wrap a class for Onnx model conversion for GPT2LMHeadModel with past state."""
98
+
99
+ def __init__(self, config):
100
+ super().__init__(config)
101
+
102
+ def forward(self, input_ids, position_ids, attention_mask, *past):
103
+ result = super().forward(
104
+ input_ids,
105
+ position_ids=position_ids,
106
+ attention_mask=attention_mask,
107
+ past_key_values=past,
108
+ return_dict=False,
109
+ )
110
+
111
+ return MyGPT2Model.post_process(result, self.config.n_layer)
112
+
113
+
114
+ class MyGPT2LMHeadModel_NoPadding(GPT2LMHeadModel): # noqa: N801
115
+ """Here we wrap a class for Onnx model conversion for GPT2LMHeadModel with past state and no padding.
116
+ When you always use batch_size=1 in inference, there is no padding in inputs. In such case, position_ids
117
+ and attention_mask need no be in inputs.
118
+ """
119
+
120
+ def __init__(self, config):
121
+ super().__init__(config)
122
+
123
+ def forward(self, input_ids, *past):
124
+ result = super().forward(input_ids, past_key_values=past, return_dict=False)
125
+
126
+ return MyGPT2Model.post_process(result, self.config.n_layer)
127
+
128
+
129
+ # Maps model class name to a tuple of model class, name of first output and use padding or not
130
+ MODEL_CLASSES = {
131
+ "GPT2LMHeadModel": (MyGPT2LMHeadModel, "logits", True),
132
+ "GPT2LMHeadModel_NoPadding": (MyGPT2LMHeadModel_NoPadding, "logits", False),
133
+ "GPT2Model": (MyGPT2Model, "last_state", True),
134
+ }
135
+
136
+
137
+ class Gpt2Inputs:
138
+ def __init__(self, input_ids, position_ids, attention_mask, past):
139
+ self.input_ids: torch.LongTensor = input_ids
140
+ self.position_ids: torch.LongTensor = position_ids
141
+ self.attention_mask: torch.LongTensor | torch.FloatTensor | torch.HalfTensor = attention_mask
142
+ self.past: list[torch.FloatTensor] | list[torch.HalfTensor] = past
143
+
144
+ def to_list(self) -> list:
145
+ input_list = [v for v in [self.input_ids, self.position_ids, self.attention_mask] if v is not None]
146
+ if self.past:
147
+ input_list.extend(self.past)
148
+
149
+ return input_list
150
+
151
+ def to_tuple(self) -> tuple:
152
+ return tuple(v for v in [self.input_ids, self.position_ids, self.attention_mask, self.past] if v is not None)
153
+
154
+ def to_fp32(self):
155
+ # For attention mask, only convert fp16 to fp32, and keep the original type if it is integer.
156
+ attention_mask = None
157
+ if self.attention_mask is not None:
158
+ attention_mask = (
159
+ self.attention_mask.to(dtype=torch.float32)
160
+ if (self.attention_mask.dtype == torch.float16)
161
+ else self.attention_mask
162
+ )
163
+
164
+ past = [p.to(dtype=torch.float32) for p in self.past]
165
+ return Gpt2Inputs(self.input_ids, self.position_ids, attention_mask, past)
166
+
167
+
168
+ class Gpt2Helper:
169
+ """A helper class for Gpt2 model conversion, inference and verification."""
170
+
171
+ @staticmethod
172
+ def get_dummy_inputs(
173
+ batch_size: int,
174
+ past_sequence_length: int,
175
+ sequence_length: int,
176
+ num_attention_heads: int,
177
+ hidden_size: int,
178
+ num_layer: int,
179
+ vocab_size: int,
180
+ device: torch.device,
181
+ float16: bool = False,
182
+ has_position_ids: bool = True,
183
+ has_attention_mask: bool = True,
184
+ input_ids_dtype: torch.dtype = torch.int32,
185
+ position_ids_dtype: torch.dtype = torch.int32,
186
+ attention_mask_dtype: torch.dtype = torch.int32,
187
+ left_side_padding: bool = True,
188
+ ) -> Gpt2Inputs:
189
+ """Create random inputs for GPT2 model.
190
+ Returns torch tensors of input_ids, position_ids, attention_mask and a list of past state tensors.
191
+ """
192
+ float_type = torch.float16 if float16 else torch.float32
193
+ past_shape = [
194
+ 2,
195
+ batch_size,
196
+ num_attention_heads,
197
+ past_sequence_length,
198
+ int(hidden_size / num_attention_heads),
199
+ ]
200
+
201
+ past = [(torch.rand(past_shape, dtype=float_type, device=device) * 2.0 - 1.0) for _ in range(num_layer)]
202
+ input_ids = torch.randint(
203
+ low=0,
204
+ high=vocab_size - 1,
205
+ size=(batch_size, sequence_length),
206
+ dtype=input_ids_dtype,
207
+ device=device,
208
+ )
209
+
210
+ attention_mask = None
211
+ if has_attention_mask:
212
+ total_sequence_length = past_sequence_length + sequence_length
213
+ attention_mask = torch.ones(
214
+ [batch_size, total_sequence_length],
215
+ dtype=attention_mask_dtype,
216
+ device=device,
217
+ )
218
+
219
+ if total_sequence_length >= 2:
220
+ for i in range(batch_size):
221
+ padding_length = random.randint(0, total_sequence_length - 1)
222
+ if left_side_padding:
223
+ attention_mask[i, :padding_length] = 0
224
+ else: # right side padding
225
+ attention_mask[i, total_sequence_length - padding_length :] = 0
226
+
227
+ # Deduce position_ids from attention mask
228
+ position_ids = None
229
+ if has_position_ids:
230
+ position_ids = attention_mask.long().cumsum(-1) - 1
231
+ position_ids.masked_fill_(position_ids < 0, 0)
232
+ position_ids = position_ids[:, past_sequence_length:].to(position_ids_dtype)
233
+
234
+ return Gpt2Inputs(input_ids, position_ids, attention_mask, past)
235
+
236
+ @staticmethod
237
+ def get_output_shapes(
238
+ batch_size: int,
239
+ past_sequence_length: int,
240
+ sequence_length: int,
241
+ config: GPT2Config,
242
+ model_class: str = "GPT2LMHeadModel",
243
+ ) -> dict[str, list[int]]:
244
+ """Returns a dictionary with output name as key, and shape as value."""
245
+ num_attention_heads = config.num_attention_heads
246
+ hidden_size = config.hidden_size
247
+ num_layer = config.num_hidden_layers
248
+ vocab_size = config.vocab_size
249
+
250
+ output_name = MODEL_CLASSES[model_class][1]
251
+
252
+ last_state_shape = [
253
+ batch_size,
254
+ sequence_length,
255
+ vocab_size if output_name == "logits" else hidden_size,
256
+ ]
257
+ present_state_shape = [
258
+ 2,
259
+ batch_size,
260
+ num_attention_heads,
261
+ past_sequence_length + sequence_length,
262
+ int(hidden_size / num_attention_heads),
263
+ ]
264
+
265
+ output_shapes = {output_name: last_state_shape}
266
+ for i in range(num_layer):
267
+ output_shapes["present_" + str(i)] = present_state_shape
268
+
269
+ return output_shapes
270
+
271
+ @staticmethod
272
+ def auto_increase_buffer_size(output_buffers, output_shapes):
273
+ for key in output_shapes:
274
+ assert key in output_buffers
275
+ buffer = output_buffers[key]
276
+ if numpy.prod(output_shapes[key]) > buffer.nelement():
277
+ output_buffers[key] = torch.empty(
278
+ numpy.prod(output_shapes[key]),
279
+ dtype=buffer.dtype,
280
+ device=buffer.device,
281
+ )
282
+
283
+ @staticmethod
284
+ def get_output_buffers(output_shapes, device, is_float16=False):
285
+ """Returns a dictionary of output name as key, and 1D tensor as value. The tensor has enough space for given shape."""
286
+ data_type = torch.float16 if is_float16 else torch.float32
287
+
288
+ output_buffers = {}
289
+ for name, shape in output_shapes.items():
290
+ output_buffers[name] = torch.empty(numpy.prod(shape), dtype=data_type, device=device)
291
+ return output_buffers
292
+
293
+ @staticmethod
294
+ def diff_outputs(torch_outputs, ort_outputs, relative=False):
295
+ """Returns the maximum difference between PyTorch and OnnxRuntime outputs."""
296
+ expected_outputs = torch_outputs[0].cpu().numpy()
297
+ diff = numpy.abs(expected_outputs - ort_outputs[0])
298
+ if relative:
299
+ return numpy.amax(diff / (numpy.abs(expected_outputs) + 1e-6))
300
+ else:
301
+ return numpy.amax(diff)
302
+
303
+ @staticmethod
304
+ def compare_outputs(torch_outputs, ort_outputs, rtol=1e-03, atol=1e-03, **kwargs):
305
+ """Returns True if torch and ORT outputs are close for given thresholds, and False otherwise.
306
+ Note: need kwargs since Gpt2BeamSearchHelper.compare_outputs has an extra parameter model_class
307
+ """
308
+ is_close = numpy.allclose(ort_outputs[0], torch_outputs[0].cpu().numpy(), rtol=rtol, atol=atol)
309
+ logger.debug(f"PyTorch and OnnxRuntime output 0 (last_state) are close: {is_close}")
310
+
311
+ is_all_close = is_close
312
+ num_layers = len(ort_outputs) - 1
313
+
314
+ for layer in range(num_layers):
315
+ is_close = numpy.allclose(
316
+ ort_outputs[1 + layer],
317
+ torch_outputs[1][layer].cpu().numpy(),
318
+ rtol=rtol,
319
+ atol=atol,
320
+ )
321
+ logger.debug(f"PyTorch and OnnxRuntime layer {layer} state (present_{layer}) are close:{is_close}")
322
+ is_all_close = is_all_close and is_close
323
+
324
+ if not is_all_close:
325
+ max_abs_diff = Gpt2Helper.diff_outputs(torch_outputs, ort_outputs)
326
+ logger.info(f"PyTorch and OnnxRuntime results are not all close: max_abs_diff={max_abs_diff:.5f}")
327
+
328
+ return is_all_close
329
+
330
+ @staticmethod
331
+ def compare_outputs_v2(torch_outputs, ort_outputs, atol=1e-06):
332
+ """Compare outputs from PyTorch and OnnxRuntime
333
+
334
+ Args:
335
+ torch_outputs (Tuple[Torch.Tensor]): PyTorch model output
336
+ ort_outputs (List[numpy.ndarray]): OnnxRuntime output
337
+ atol (float, optional): Absolute tollerance. Defaults to 1e-06.
338
+
339
+ Returns:
340
+ is_all_close(bool): whether all elements are close.
341
+ max_abs_diff(float): maximum absolute difference.
342
+ messages(str): a list of debug message for each output
343
+ """
344
+ is_all_close = True
345
+ is_top1_matched = False
346
+ max_diffs = []
347
+ messages = []
348
+ for i in range(len(ort_outputs)):
349
+ ort_output = ort_outputs[i]
350
+ torch_output = (torch_outputs[0] if i == 0 else torch_outputs[1][i - 1]).cpu().numpy()
351
+ is_close = numpy.allclose(ort_output, torch_output, atol=atol, rtol=0)
352
+ max_diffs.append(numpy.amax(numpy.abs(torch_output - ort_output)))
353
+ is_all_close = is_all_close and is_close
354
+
355
+ if numpy.isnan(torch_output).any():
356
+ logger.debug(f"PyTorch output {i} has nan")
357
+ if numpy.isinf(torch_output).any():
358
+ logger.debug(f"PyTorch output {i} has inf")
359
+ if numpy.isnan(ort_output).any():
360
+ logger.debug(f"ORT output {i} has nan")
361
+ if numpy.isinf(ort_output).any():
362
+ logger.debug(f"ORT output {i} has inf")
363
+
364
+ diff = numpy.fabs(ort_output - torch_output)
365
+ idx = numpy.unravel_index(diff.argmax(), diff.shape)
366
+ messages.append(
367
+ f"diff={diff[idx]:.9f} index={idx} ort={ort_output[idx]:.9f} torch={float(torch_output[idx]):.9f}"
368
+ )
369
+
370
+ if i == 0: # logits
371
+ ort_max_index = numpy.unravel_index(numpy.argmax(ort_output, axis=None), ort_output.shape)
372
+ torch_max_index = numpy.unravel_index(numpy.argmax(torch_output, axis=None), torch_output.shape)
373
+ is_top1_matched = numpy.array_equal(ort_max_index, torch_max_index)
374
+
375
+ max_diff_output_index = max_diffs.index(max(max_diffs))
376
+ return (
377
+ is_all_close,
378
+ max(max_diffs),
379
+ max_diff_output_index,
380
+ messages,
381
+ is_top1_matched,
382
+ )
383
+
384
+ @staticmethod
385
+ def export_onnx(
386
+ model,
387
+ device,
388
+ onnx_model_path: str,
389
+ verbose: bool = False,
390
+ use_external_data_format: bool = False,
391
+ has_position_ids: bool = True,
392
+ has_attention_mask: bool = True,
393
+ input_ids_dtype: torch.dtype = torch.int32,
394
+ position_ids_dtype: torch.dtype = torch.int32,
395
+ attention_mask_dtype: torch.dtype = torch.int32,
396
+ ):
397
+ """Export GPT-2 model with past state to ONNX model."""
398
+ config: GPT2Config = model.config
399
+ num_layer = config.n_layer
400
+ dummy_inputs = Gpt2Helper.get_dummy_inputs(
401
+ batch_size=1,
402
+ past_sequence_length=1,
403
+ sequence_length=1,
404
+ num_attention_heads=config.num_attention_heads,
405
+ hidden_size=config.hidden_size,
406
+ num_layer=num_layer,
407
+ vocab_size=config.vocab_size,
408
+ device=device,
409
+ float16=False,
410
+ has_position_ids=has_position_ids,
411
+ has_attention_mask=has_attention_mask,
412
+ input_ids_dtype=input_ids_dtype,
413
+ position_ids_dtype=position_ids_dtype,
414
+ attention_mask_dtype=attention_mask_dtype,
415
+ )
416
+ input_list = dummy_inputs.to_list()
417
+
418
+ with torch.no_grad():
419
+ outputs = model(*input_list)
420
+
421
+ past_names = [f"past_{i}" for i in range(num_layer)]
422
+ present_names = [f"present_{i}" for i in range(num_layer)]
423
+
424
+ # GPT2Model outputs last_state; GPT2LMHeadModel outputs logits (prediction_scores)
425
+ assert outputs[0].shape[2] == config.vocab_size or outputs[0].shape[2] == config.hidden_size
426
+ output_names = ["logits" if outputs[0].shape[2] == config.vocab_size else "last_state", *present_names]
427
+
428
+ # Shape of input tensors:
429
+ # input_ids: (batch_size, seq_len)
430
+ # past_{i}: (2, batch_size, num_heads, past_seq_len, hidden_size/num_heads)
431
+ # attention_mask: (batch_size, past_seq_len + seq_len)
432
+ # Shape of output tensors:
433
+ # last_state: (batch_size, seq_len, hidden_size)
434
+ # or logits: (batch_size, seq_len, vocab_size)
435
+ # present_{i}: (2, batch_size, num_heads, past_seq_len + seq_len, hidden_size/num_heads)
436
+ dynamic_axes = {
437
+ "input_ids": {0: "batch_size", 1: "seq_len"},
438
+ output_names[0]: {0: "batch_size", 1: "seq_len"},
439
+ }
440
+ for name in past_names:
441
+ dynamic_axes[name] = {1: "batch_size", 3: "past_seq_len"}
442
+ for name in present_names:
443
+ dynamic_axes[name] = {1: "batch_size", 3: "total_seq_len"}
444
+
445
+ input_names = ["input_ids"]
446
+ if has_position_ids:
447
+ dynamic_axes["position_ids"] = {0: "batch_size", 1: "seq_len"}
448
+ input_names.append("position_ids")
449
+ if has_attention_mask:
450
+ dynamic_axes["attention_mask"] = {0: "batch_size", 1: "total_seq_len"}
451
+ input_names.append("attention_mask")
452
+ input_names.extend(past_names)
453
+
454
+ assert len(outputs) == 2 and len(outputs[1]) == num_layer
455
+
456
+ logger.info(
457
+ f"Shapes: input_ids={dummy_inputs.input_ids.shape} past={dummy_inputs.past[0].shape} output={outputs[0].shape} present={outputs[1][0].shape}"
458
+ )
459
+
460
+ Path(onnx_model_path).parent.mkdir(parents=True, exist_ok=True)
461
+
462
+ if use_external_data_format:
463
+ # We let PyTorch export onnx to a temp directory first, then convert external data to one file.
464
+ with tempfile.TemporaryDirectory() as tmp_dir_name:
465
+ temp_onnx_model_path = os.path.join(tmp_dir_name, "gpt2.onnx")
466
+ Path(temp_onnx_model_path).parent.mkdir(parents=True, exist_ok=True)
467
+
468
+ torch_onnx_export(
469
+ model,
470
+ args=tuple(input_list),
471
+ f=temp_onnx_model_path,
472
+ export_params=True,
473
+ input_names=input_names,
474
+ output_names=output_names,
475
+ dynamic_axes=dynamic_axes,
476
+ opset_version=11,
477
+ do_constant_folding=True,
478
+ use_external_data_format=True,
479
+ verbose=verbose,
480
+ )
481
+
482
+ model = onnx.load_model(temp_onnx_model_path, load_external_data=True)
483
+ OnnxModel.save(
484
+ model,
485
+ onnx_model_path,
486
+ save_as_external_data=True,
487
+ all_tensors_to_one_file=True,
488
+ )
489
+ else:
490
+ torch_onnx_export(
491
+ model,
492
+ args=tuple(input_list),
493
+ f=onnx_model_path,
494
+ export_params=True,
495
+ input_names=input_names,
496
+ output_names=output_names,
497
+ dynamic_axes=dynamic_axes,
498
+ opset_version=11,
499
+ do_constant_folding=True,
500
+ use_external_data_format=False,
501
+ verbose=verbose,
502
+ )
503
+
504
+ @staticmethod
505
+ def optimize_onnx(
506
+ onnx_model_path,
507
+ optimized_model_path,
508
+ is_float16,
509
+ num_attention_heads,
510
+ hidden_size,
511
+ use_external_data_format=False,
512
+ auto_mixed_precision=False,
513
+ stage=0,
514
+ **kwargs,
515
+ ):
516
+ """Optimize ONNX model with an option to convert it to use mixed precision."""
517
+ optimization_options = FusionOptions("gpt2")
518
+
519
+ m = optimize_model(
520
+ onnx_model_path,
521
+ model_type="gpt2",
522
+ num_heads=num_attention_heads,
523
+ hidden_size=hidden_size,
524
+ opt_level=0,
525
+ optimization_options=optimization_options,
526
+ use_gpu=False,
527
+ )
528
+
529
+ if is_float16:
530
+ if auto_mixed_precision:
531
+ Gpt2Helper.auto_mixed_precision(m)
532
+ else:
533
+ if "keep_io_types" not in kwargs:
534
+ kwargs["keep_io_types"] = False
535
+ m.convert_float_to_float16(use_symbolic_shape_infer=True, **kwargs)
536
+
537
+ m.save_model_to_file(optimized_model_path, use_external_data_format)
538
+ return m
539
+
540
+ @staticmethod
541
+ def auto_mixed_precision(
542
+ onnx_model: OnnxModel,
543
+ op_block_list: list[str] = [ # noqa: B006
544
+ "Add",
545
+ "LayerNormalization",
546
+ "SkipLayerNormalization",
547
+ "FastGelu",
548
+ "EmbedLayerNormalization",
549
+ ],
550
+ ):
551
+ """Convert GPT-2 model to mixed precision.
552
+ It detects whether original model has fp16 weights, and set parameters for float16 conversion automatically.
553
+ Args:
554
+ onnx_model (OnnxModel): optimized ONNX model
555
+ op_block_list (List[str], optional): operators to compute in fp32. Defaults to ["Add", "LayerNormalization",
556
+ "SkipLayerNormalization", "FastGelu", "EmbedLayerNormalization"]
557
+ Returns:
558
+ parameters(dict): a dictionary of parameters used in float16 conversion
559
+ """
560
+ op_full_set = {node.op_type for node in onnx_model.nodes()}
561
+ fp32_op_set = set(op_block_list)
562
+ fp16_op_set = op_full_set.difference(fp32_op_set)
563
+ logger.info(f"fp32 op: {fp32_op_set} fp16 op: {fp16_op_set}")
564
+
565
+ # logits is the first output
566
+ logits_output_name = onnx_model.graph().output[0].name
567
+
568
+ # We use the weight in last MatMul node to detect whether the model is stored with float16 weights from training.
569
+ is_weight_fp16_precision = False
570
+ output_name_to_node = onnx_model.output_name_to_node()
571
+ assert logits_output_name in output_name_to_node
572
+ node = output_name_to_node[logits_output_name]
573
+ last_matmul_node = None
574
+ if node.op_type == "MatMul":
575
+ last_matmul_node = node
576
+ logger.info(f"Found last MatMul node for logits: {node.name}")
577
+ initializer = None
578
+ for input in node.input:
579
+ initializer = onnx_model.get_initializer(input)
580
+ if initializer is not None:
581
+ break
582
+
583
+ # when the max difference of value after converting float to float16 is lower than a threshold (1e-6),
584
+ # we can deduce that the weights are stored in float16 precision.
585
+ max_diff = float_to_float16_max_diff(initializer)
586
+ logger.debug(f"max diff of converting weights in last MatMul node {node.name}: {max_diff}")
587
+ is_weight_fp16_precision = max_diff < 1e-6
588
+ else:
589
+ logger.warning(f"Failed to find MatMul node for logits. Found {node.op_type} of node {node.name}")
590
+
591
+ keep_io_types = []
592
+ node_block_list = []
593
+ if (not is_weight_fp16_precision) and (last_matmul_node is not None):
594
+ # When original weight is float32 precision, keep logits and last MatMul in float32 could get better precision.
595
+ keep_io_types = [logits_output_name]
596
+ node_block_list = [last_matmul_node.name]
597
+
598
+ parameters = {
599
+ "keep_io_types": keep_io_types,
600
+ "op_block_list": op_block_list,
601
+ "node_block_list": node_block_list,
602
+ "force_fp16_initializers": is_weight_fp16_precision,
603
+ }
604
+
605
+ logger.info(f"auto_mixed_precision parameters: {parameters}")
606
+ onnx_model.convert_float_to_float16(use_symbolic_shape_infer=True, **parameters)
607
+
608
+ return parameters
609
+
610
+ @staticmethod
611
+ def pytorch_inference(model, inputs: Gpt2Inputs, total_runs: int = 0):
612
+ """Run inference of PyTorch model, and returns average latency in ms when total_runs > 0 besides outputs."""
613
+ logger.debug("start pytorch_inference")
614
+
615
+ # Convert it to fp32 as the PyTroch model cannot deal with half input.
616
+ input_list = inputs.to_fp32().to_list()
617
+
618
+ with torch.no_grad():
619
+ outputs = model(*input_list)
620
+
621
+ if total_runs == 0:
622
+ return outputs
623
+
624
+ latency = []
625
+ with torch.no_grad():
626
+ for _ in range(total_runs):
627
+ start = time.time()
628
+ outputs = model(*input_list)
629
+ latency.append(time.time() - start)
630
+
631
+ average_latency = sum(latency) * 1000 / len(latency)
632
+ logger.debug("PyTorch inference time = {} ms".format(format(average_latency, ".2f"))) # noqa: G001
633
+
634
+ return outputs, average_latency
635
+
636
+ @staticmethod
637
+ def onnxruntime_inference(ort_session, inputs: Gpt2Inputs, total_runs: int = 0):
638
+ """Run inference of ONNX model, and returns average latency in ms when total_runs > 0 besides outputs."""
639
+ logger.debug("start onnxruntime_inference")
640
+
641
+ ort_inputs = {"input_ids": numpy.ascontiguousarray(inputs.input_ids.cpu().numpy())}
642
+
643
+ if inputs.past is not None:
644
+ for i, past_i in enumerate(inputs.past):
645
+ ort_inputs[f"past_{i}"] = numpy.ascontiguousarray(past_i.cpu().numpy())
646
+
647
+ if inputs.attention_mask is not None:
648
+ ort_inputs["attention_mask"] = numpy.ascontiguousarray(inputs.attention_mask.cpu().numpy())
649
+
650
+ if inputs.position_ids is not None:
651
+ ort_inputs["position_ids"] = numpy.ascontiguousarray(inputs.position_ids.cpu().numpy())
652
+
653
+ ort_outputs = ort_session.run(None, ort_inputs)
654
+ if total_runs == 0:
655
+ return ort_outputs
656
+
657
+ latency = []
658
+ for _ in range(total_runs):
659
+ start = time.time()
660
+ ort_outputs = ort_session.run(None, ort_inputs)
661
+ latency.append(time.time() - start)
662
+
663
+ average_latency = sum(latency) * 1000 / len(latency)
664
+ logger.debug("OnnxRuntime Inference time = {} ms".format(format(average_latency, ".2f"))) # noqa: G001
665
+
666
+ return ort_outputs, average_latency
667
+
668
+ @staticmethod
669
+ def prepare_io_binding(
670
+ ort_session,
671
+ input_ids,
672
+ position_ids,
673
+ attention_mask,
674
+ past,
675
+ output_buffers,
676
+ output_shapes,
677
+ ):
678
+ """Returnas IO binding object for a session."""
679
+ return IOBindingHelper.prepare_io_binding(
680
+ ort_session,
681
+ input_ids,
682
+ position_ids,
683
+ attention_mask,
684
+ past,
685
+ output_buffers,
686
+ output_shapes,
687
+ )
688
+
689
+ @staticmethod
690
+ def get_outputs_from_io_binding_buffer(ort_session, output_buffers, output_shapes, return_numpy=True):
691
+ """Copy results to cpu. Returns a list of numpy array."""
692
+ return IOBindingHelper.get_outputs_from_io_binding_buffer(
693
+ ort_session, output_buffers, output_shapes, return_numpy
694
+ )
695
+
696
+ @staticmethod
697
+ def onnxruntime_inference_with_binded_io(
698
+ ort_session,
699
+ inputs: Gpt2Inputs,
700
+ output_buffers: dict[str, torch.Tensor],
701
+ output_shapes: dict[str, list[int]],
702
+ total_runs: int = 0,
703
+ return_numpy: bool = True,
704
+ include_copy_output_latency: bool = False,
705
+ ):
706
+ """Inference with IO binding. Returns outputs, and optional latency when total_runs > 0."""
707
+ logger.debug("start onnxruntime_inference_with_binded_io")
708
+
709
+ # Bind inputs and outputs to onnxruntime session
710
+ io_binding = Gpt2Helper.prepare_io_binding(
711
+ ort_session,
712
+ inputs.input_ids,
713
+ inputs.position_ids,
714
+ inputs.attention_mask,
715
+ inputs.past,
716
+ output_buffers,
717
+ output_shapes,
718
+ )
719
+
720
+ # Run onnxruntime with io binding
721
+ ort_session.run_with_iobinding(io_binding)
722
+
723
+ # Copy results to cpu for verification
724
+ ort_outputs = Gpt2Helper.get_outputs_from_io_binding_buffer(
725
+ ort_session, output_buffers, output_shapes, return_numpy
726
+ )
727
+
728
+ if total_runs == 0:
729
+ return ort_outputs
730
+
731
+ latency = []
732
+ for _ in range(total_runs):
733
+ start = time.time()
734
+ # Run onnxruntime with io binding
735
+ ort_session.run_with_iobinding(io_binding)
736
+ if include_copy_output_latency:
737
+ _ = Gpt2Helper.get_outputs_from_io_binding_buffer(
738
+ ort_session, output_buffers, output_shapes, return_numpy
739
+ )
740
+ latency.append(time.time() - start)
741
+
742
+ average_latency = sum(latency) * 1000 / len(latency)
743
+ logger.debug("OnnxRuntime with IO binding inference time = %.2f ms", average_latency)
744
+
745
+ return ort_outputs, average_latency
746
+
747
+ @staticmethod
748
+ def save_outputs(i, ort_outputs, torch_outputs):
749
+ with open(f"ort_outputs_{i}.pickle", "wb") as f:
750
+ pickle.dump(ort_outputs, f)
751
+ logger.info(f"ORT output are saved to ort_outputs_{i}.pickle")
752
+
753
+ with open(f"torch_outputs_{i}.pickle", "wb") as f:
754
+ pickle.dump(torch_outputs, f)
755
+ logger.info(f"Torch output are saved to torch_outputs_{i}.pickle")
756
+
757
+ @staticmethod
758
+ def save_inputs(i, dummy_inputs, ort_outputs, torch_outputs):
759
+ with open(f"dummy_inputs_{i}.pickle", "wb") as f:
760
+ pickle.dump(dummy_inputs, f)
761
+ logger.info(f"inputs are saved to dummy_inputs_{i}.pickle")
762
+
763
+ @staticmethod
764
+ def test_parity(
765
+ ort_session,
766
+ model,
767
+ device,
768
+ is_float16=False,
769
+ rtol=5e-4,
770
+ atol=5e-4,
771
+ test_cases_per_run=10000,
772
+ total_runs=1,
773
+ use_io_binding=True,
774
+ model_class="GPT2LMHeadModel",
775
+ has_position_ids=True,
776
+ has_attention_mask=True,
777
+ input_ids_dtype=torch.int32,
778
+ position_ids_dtype=torch.int32,
779
+ attention_mask_dtype=torch.int32,
780
+ stage=0,
781
+ verbose=False,
782
+ enable_pickle_output=False,
783
+ ):
784
+ """Generate random inputs and compare the results of PyTorch and Onnx Runtime."""
785
+
786
+ config: GPT2Config = model.config
787
+
788
+ logger.info(
789
+ f"Running parity test (atol={atol}, test_cases={test_cases_per_run}, runs={total_runs}, use_io_binding={use_io_binding}, model_class={model_class}, is_float16={is_float16}) ..."
790
+ )
791
+
792
+ max_batch_size = 8
793
+ max_past_seq_len = 4 # Do not use large number here for higher chance of hitting empty past (past_seq_len=0)
794
+ max_seq_len = 2
795
+
796
+ output_buffers = None
797
+ if use_io_binding:
798
+ max_output_shapes = Gpt2Helper.get_output_shapes(
799
+ max_batch_size, max_past_seq_len, max_seq_len, config, model_class
800
+ )
801
+ output_buffers = Gpt2Helper.get_output_buffers(max_output_shapes, device, is_float16)
802
+
803
+ passed_test_cases = 0
804
+ top1_matched_cases = 0
805
+
806
+ max_abs_diff_list = []
807
+ top1_matched_cases_per_run = [0] * total_runs
808
+ total_test_cases = test_cases_per_run * total_runs
809
+ for i in range(total_test_cases):
810
+ run_id = int(i / test_cases_per_run)
811
+ sequence_length = random.randint(1, max_seq_len)
812
+ past_sequence_length = 0 if (stage == 1) else random.randint(0, max_past_seq_len)
813
+ batch_size = random.randint(1, max_batch_size)
814
+
815
+ logger.debug(
816
+ f"Running parity test for batch_size={batch_size} past_sequence_length={past_sequence_length}..."
817
+ )
818
+ dummy_inputs = Gpt2Helper.get_dummy_inputs(
819
+ batch_size,
820
+ past_sequence_length,
821
+ sequence_length,
822
+ config.num_attention_heads,
823
+ config.hidden_size,
824
+ config.n_layer,
825
+ config.vocab_size,
826
+ device,
827
+ is_float16,
828
+ has_position_ids,
829
+ has_attention_mask,
830
+ input_ids_dtype=input_ids_dtype,
831
+ position_ids_dtype=position_ids_dtype,
832
+ attention_mask_dtype=attention_mask_dtype,
833
+ left_side_padding=True,
834
+ )
835
+ outputs = Gpt2Helper.pytorch_inference(model, dummy_inputs)
836
+ if use_io_binding:
837
+ ort_outputs = Gpt2Helper.onnxruntime_inference(ort_session, dummy_inputs)
838
+ else:
839
+ output_shapes = Gpt2Helper.get_output_shapes(
840
+ batch_size,
841
+ past_sequence_length,
842
+ sequence_length,
843
+ config,
844
+ model_class,
845
+ )
846
+ ort_outputs = Gpt2Helper.onnxruntime_inference_with_binded_io(
847
+ ort_session, dummy_inputs, output_buffers, output_shapes
848
+ )
849
+
850
+ (
851
+ is_all_close,
852
+ max_abs_diff,
853
+ max_diff_output_index,
854
+ messages,
855
+ is_top1_matched,
856
+ ) = Gpt2Helper.compare_outputs_v2(outputs, ort_outputs, atol=atol)
857
+ if not numpy.isnan(max_abs_diff):
858
+ max_abs_diff_list.append(max_abs_diff)
859
+ if is_all_close:
860
+ passed_test_cases += 1
861
+
862
+ if is_top1_matched:
863
+ top1_matched_cases += 1
864
+ top1_matched_cases_per_run[run_id] += 1
865
+
866
+ if verbose and not is_all_close:
867
+ logger.info(
868
+ f"test_case={i} batch_size={batch_size} past_sequence_length={past_sequence_length} sequence_length={sequence_length} MaxDiff={max_abs_diff}"
869
+ )
870
+ for i, message in enumerate(messages): # noqa: PLW2901
871
+ logger.info(f"\t{i}: Name={ort_session.get_outputs()[i].name}, {message}")
872
+
873
+ # Collect data for debugging
874
+ if enable_pickle_output and (numpy.isnan(max_abs_diff) or max_abs_diff > 100 * atol):
875
+ Gpt2Helper.save_inputs(i, dummy_inputs)
876
+ Gpt2Helper.save_outputs(i, ort_outputs, outputs)
877
+
878
+ if max_abs_diff_list:
879
+ result = {
880
+ f"max_diff_percentile_{p}": f"{numpy.percentile(max_abs_diff_list, p):.5f}" for p in [50, 90, 95, 99]
881
+ }
882
+ else:
883
+ result = {f"max_diff_percentile_{p}": "nan" for p in [50, 90, 95, 99]}
884
+
885
+ result["top1_match_rate"] = top1_matched_cases * 1.0 / total_test_cases
886
+ result["top1_match_rate_per_run"] = [x * 1.0 / test_cases_per_run for x in top1_matched_cases_per_run]
887
+ result["diff_pass_rate"] = passed_test_cases * 1.0 / total_test_cases
888
+ result["nan_rate"] = (total_test_cases - len(max_abs_diff_list)) * 1.0 / total_test_cases
889
+
890
+ logger.info(
891
+ f"Parity Test Cases={total_test_cases}; Passed={passed_test_cases}; Nan={total_test_cases - len(max_abs_diff_list)}; Top1_Matched={top1_matched_cases}"
892
+ )
893
+
894
+ if passed_test_cases > 0.95 * total_test_cases:
895
+ logger.info(f"Parity is good: passed rate={int(passed_test_cases * 100 / total_test_cases):.0f}%")
896
+
897
+ return result
898
+
899
+ @staticmethod
900
+ def test_performance(
901
+ ort_session,
902
+ model,
903
+ device,
904
+ is_float16=False,
905
+ total_runs=100,
906
+ use_io_binding=True,
907
+ model_class="GPT2LMHeadModel",
908
+ has_position_ids=True,
909
+ has_attention_mask=True,
910
+ input_ids_dtype=torch.int32,
911
+ position_ids_dtype=torch.int32,
912
+ attention_mask_dtype=torch.int32,
913
+ batch_size=8,
914
+ sequence_length=1,
915
+ past_sequence_length=32,
916
+ ):
917
+ """Generate random inputs and measure average latency of Onnx Runtime."""
918
+
919
+ config: GPT2Config = model.config
920
+
921
+ output_buffers = None
922
+ if use_io_binding:
923
+ output_shapes = Gpt2Helper.get_output_shapes(
924
+ batch_size, past_sequence_length, sequence_length, config, model_class
925
+ )
926
+ output_buffers = Gpt2Helper.get_output_buffers(output_shapes, device, is_float16)
927
+
928
+ dummy_inputs = Gpt2Helper.get_dummy_inputs(
929
+ batch_size,
930
+ past_sequence_length,
931
+ sequence_length,
932
+ config.num_attention_heads,
933
+ config.hidden_size,
934
+ config.n_layer,
935
+ config.vocab_size,
936
+ device,
937
+ is_float16,
938
+ has_position_ids,
939
+ has_attention_mask,
940
+ input_ids_dtype=input_ids_dtype,
941
+ position_ids_dtype=position_ids_dtype,
942
+ attention_mask_dtype=attention_mask_dtype,
943
+ )
944
+
945
+ if use_io_binding:
946
+ _, latency = Gpt2Helper.onnxruntime_inference(ort_session, dummy_inputs, total_runs)
947
+ else:
948
+ _, latency = Gpt2Helper.onnxruntime_inference_with_binded_io(
949
+ ort_session, dummy_inputs, output_buffers, output_shapes, total_runs
950
+ )
951
+
952
+ return latency
953
+
954
+ @staticmethod
955
+ def torchscript(model, config, device, has_position_ids=True, has_attention_mask=True):
956
+ """JIT trace for TorchScript."""
957
+ input_list = Gpt2Helper.get_dummy_inputs(
958
+ batch_size=1,
959
+ past_sequence_length=1,
960
+ sequence_length=1,
961
+ num_attention_heads=config.num_attention_heads,
962
+ hidden_size=config.hidden_size,
963
+ num_layer=config.n_layer,
964
+ vocab_size=config.vocab_size,
965
+ device=device,
966
+ float16=False,
967
+ has_position_ids=has_position_ids,
968
+ has_attention_mask=has_attention_mask,
969
+ ).to_list()
970
+ return torch.jit.trace(model, input_list)
971
+
972
+ @staticmethod
973
+ def get_onnx_paths(
974
+ output_dir,
975
+ model_name_or_path,
976
+ model_class: str = "GPT2LMHeadModel",
977
+ has_past=True,
978
+ new_folder=False,
979
+ remove_existing=["raw", "fp32", "fp16", "int8"], # noqa: B006
980
+ ):
981
+ """Build a path name for given model based on given attributes."""
982
+ model_name = model_name_or_path
983
+ if os.path.isdir(model_name_or_path):
984
+ model_name = Path(model_name_or_path).parts[-1]
985
+ else:
986
+ model_name.split("/")[-1]
987
+
988
+ if model_class != "GPT2LMHeadModel":
989
+ model_name += "_" + model_class
990
+
991
+ if has_past:
992
+ model_name += "_past"
993
+
994
+ if new_folder:
995
+ suffix = {"raw": "", "fp32": "_fp32", "fp16": "_fp16", "int8": "_int8"}
996
+ # Remove the directories if existed.
997
+ for model_type in ["raw", "fp32", "fp16", "int8"]:
998
+ new_dir = os.path.join(output_dir, model_name + suffix[model_type])
999
+ if os.path.exists(new_dir):
1000
+ if model_type in remove_existing:
1001
+ try:
1002
+ shutil.rmtree(new_dir)
1003
+ logger.info(f"Removed the existed directory: {new_dir}")
1004
+ except OSError as e:
1005
+ logger.info(f"Failed to remove the directory {new_dir}: {e.strerror}")
1006
+ else:
1007
+ logger.info(f"Directory for {model_type} existed: {new_dir}")
1008
+
1009
+ # store each model to its own directory (for external data format).
1010
+ return {
1011
+ "raw": os.path.join(os.path.join(output_dir, model_name), model_name + ".onnx"),
1012
+ "fp32": os.path.join(
1013
+ os.path.join(output_dir, model_name + "_fp32"),
1014
+ model_name + "_fp32.onnx",
1015
+ ),
1016
+ "fp16": os.path.join(
1017
+ os.path.join(output_dir, model_name + "_fp16"),
1018
+ model_name + "_fp16.onnx",
1019
+ ),
1020
+ "int8": os.path.join(
1021
+ os.path.join(output_dir, model_name + "_int8"),
1022
+ model_name + "_int8.onnx",
1023
+ ),
1024
+ }
1025
+
1026
+ return {
1027
+ "raw": os.path.join(output_dir, model_name + ".onnx"),
1028
+ "fp32": os.path.join(output_dir, model_name + "_fp32.onnx"),
1029
+ "fp16": os.path.join(output_dir, model_name + "_fp16.onnx"),
1030
+ "int8": os.path.join(output_dir, model_name + "_int8.onnx"),
1031
+ }