onnxruntime-directml 1.24.1__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (322) hide show
  1. onnxruntime/LICENSE +21 -0
  2. onnxruntime/Privacy.md +21 -0
  3. onnxruntime/ThirdPartyNotices.txt +6121 -0
  4. onnxruntime/__init__.py +418 -0
  5. onnxruntime/backend/__init__.py +6 -0
  6. onnxruntime/backend/backend.py +175 -0
  7. onnxruntime/backend/backend_rep.py +52 -0
  8. onnxruntime/capi/DirectML.dll +0 -0
  9. onnxruntime/capi/__init__.py +4 -0
  10. onnxruntime/capi/_ld_preload.py +7 -0
  11. onnxruntime/capi/_pybind_state.py +33 -0
  12. onnxruntime/capi/build_and_package_info.py +2 -0
  13. onnxruntime/capi/convert_npz_to_onnx_adapter.py +48 -0
  14. onnxruntime/capi/onnxruntime.dll +0 -0
  15. onnxruntime/capi/onnxruntime_collect_build_info.py +47 -0
  16. onnxruntime/capi/onnxruntime_inference_collection.py +1440 -0
  17. onnxruntime/capi/onnxruntime_providers_shared.dll +0 -0
  18. onnxruntime/capi/onnxruntime_pybind11_state.pyd +0 -0
  19. onnxruntime/capi/onnxruntime_validation.py +154 -0
  20. onnxruntime/capi/version_info.py +2 -0
  21. onnxruntime/datasets/__init__.py +18 -0
  22. onnxruntime/datasets/logreg_iris.onnx +0 -0
  23. onnxruntime/datasets/mul_1.onnx +0 -0
  24. onnxruntime/datasets/sigmoid.onnx +13 -0
  25. onnxruntime/quantization/CalTableFlatBuffers/KeyValue.py +78 -0
  26. onnxruntime/quantization/CalTableFlatBuffers/TrtTable.py +90 -0
  27. onnxruntime/quantization/CalTableFlatBuffers/__init__.py +0 -0
  28. onnxruntime/quantization/__init__.py +19 -0
  29. onnxruntime/quantization/base_quantizer.py +529 -0
  30. onnxruntime/quantization/calibrate.py +1267 -0
  31. onnxruntime/quantization/execution_providers/qnn/__init__.py +2 -0
  32. onnxruntime/quantization/execution_providers/qnn/fusion_lpnorm.py +132 -0
  33. onnxruntime/quantization/execution_providers/qnn/fusion_spacetodepth.py +162 -0
  34. onnxruntime/quantization/execution_providers/qnn/mixed_precision_overrides_utils.py +413 -0
  35. onnxruntime/quantization/execution_providers/qnn/preprocess.py +353 -0
  36. onnxruntime/quantization/execution_providers/qnn/quant_config.py +389 -0
  37. onnxruntime/quantization/fusions/__init__.py +4 -0
  38. onnxruntime/quantization/fusions/fusion.py +311 -0
  39. onnxruntime/quantization/fusions/fusion_gelu.py +272 -0
  40. onnxruntime/quantization/fusions/fusion_layernorm.py +146 -0
  41. onnxruntime/quantization/fusions/replace_upsample_with_resize.py +96 -0
  42. onnxruntime/quantization/matmul_bnb4_quantizer.py +239 -0
  43. onnxruntime/quantization/matmul_nbits_quantizer.py +1638 -0
  44. onnxruntime/quantization/neural_compressor/__init__.py +1 -0
  45. onnxruntime/quantization/neural_compressor/onnx_model.py +1251 -0
  46. onnxruntime/quantization/neural_compressor/util.py +80 -0
  47. onnxruntime/quantization/neural_compressor/weight_only.py +932 -0
  48. onnxruntime/quantization/onnx_model.py +600 -0
  49. onnxruntime/quantization/onnx_quantizer.py +1163 -0
  50. onnxruntime/quantization/operators/__init__.py +2 -0
  51. onnxruntime/quantization/operators/activation.py +119 -0
  52. onnxruntime/quantization/operators/argmax.py +18 -0
  53. onnxruntime/quantization/operators/attention.py +73 -0
  54. onnxruntime/quantization/operators/base_operator.py +26 -0
  55. onnxruntime/quantization/operators/binary_op.py +72 -0
  56. onnxruntime/quantization/operators/concat.py +62 -0
  57. onnxruntime/quantization/operators/conv.py +260 -0
  58. onnxruntime/quantization/operators/direct_q8.py +78 -0
  59. onnxruntime/quantization/operators/embed_layernorm.py +121 -0
  60. onnxruntime/quantization/operators/gather.py +64 -0
  61. onnxruntime/quantization/operators/gavgpool.py +62 -0
  62. onnxruntime/quantization/operators/gemm.py +172 -0
  63. onnxruntime/quantization/operators/lstm.py +121 -0
  64. onnxruntime/quantization/operators/matmul.py +231 -0
  65. onnxruntime/quantization/operators/maxpool.py +34 -0
  66. onnxruntime/quantization/operators/norm.py +40 -0
  67. onnxruntime/quantization/operators/pad.py +172 -0
  68. onnxruntime/quantization/operators/pooling.py +67 -0
  69. onnxruntime/quantization/operators/qdq_base_operator.py +22 -0
  70. onnxruntime/quantization/operators/resize.py +34 -0
  71. onnxruntime/quantization/operators/softmax.py +74 -0
  72. onnxruntime/quantization/operators/split.py +63 -0
  73. onnxruntime/quantization/operators/where.py +87 -0
  74. onnxruntime/quantization/preprocess.py +141 -0
  75. onnxruntime/quantization/qdq_loss_debug.py +389 -0
  76. onnxruntime/quantization/qdq_quantizer.py +1477 -0
  77. onnxruntime/quantization/quant_utils.py +1051 -0
  78. onnxruntime/quantization/quantize.py +953 -0
  79. onnxruntime/quantization/registry.py +110 -0
  80. onnxruntime/quantization/shape_inference.py +204 -0
  81. onnxruntime/quantization/static_quantize_runner.py +256 -0
  82. onnxruntime/quantization/tensor_quant_overrides.py +520 -0
  83. onnxruntime/tools/__init__.py +10 -0
  84. onnxruntime/tools/check_onnx_model_mobile_usability.py +47 -0
  85. onnxruntime/tools/convert_onnx_models_to_ort.py +380 -0
  86. onnxruntime/tools/file_utils.py +47 -0
  87. onnxruntime/tools/logger.py +11 -0
  88. onnxruntime/tools/make_dynamic_shape_fixed.py +73 -0
  89. onnxruntime/tools/mobile_helpers/__init__.py +0 -0
  90. onnxruntime/tools/mobile_helpers/coreml_supported_mlprogram_ops.md +53 -0
  91. onnxruntime/tools/mobile_helpers/coreml_supported_neuralnetwork_ops.md +43 -0
  92. onnxruntime/tools/mobile_helpers/nnapi_supported_ops.md +58 -0
  93. onnxruntime/tools/mobile_helpers/usability_checker.py +738 -0
  94. onnxruntime/tools/offline_tuning.py +169 -0
  95. onnxruntime/tools/onnx_model_utils.py +416 -0
  96. onnxruntime/tools/onnx_randomizer.py +85 -0
  97. onnxruntime/tools/onnxruntime_test.py +164 -0
  98. onnxruntime/tools/optimize_onnx_model.py +56 -0
  99. onnxruntime/tools/ort_format_model/__init__.py +27 -0
  100. onnxruntime/tools/ort_format_model/operator_type_usage_processors.py +653 -0
  101. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/__init__.py +0 -0
  102. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ArgType.py +7 -0
  103. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ArgTypeAndIndex.py +67 -0
  104. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Attribute.py +337 -0
  105. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/AttributeType.py +18 -0
  106. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Checkpoint.py +125 -0
  107. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedKernelCreateInfos.py +120 -0
  108. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedNodeIndexAndKernelDefHash.py +68 -0
  109. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedSessionState.py +96 -0
  110. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedSubGraphSessionState.py +72 -0
  111. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Dimension.py +71 -0
  112. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DimensionValue.py +80 -0
  113. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DimensionValueType.py +8 -0
  114. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/EdgeEnd.py +32 -0
  115. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/FloatProperty.py +67 -0
  116. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Graph.py +320 -0
  117. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/InferenceSession.py +88 -0
  118. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/IntProperty.py +67 -0
  119. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/KernelTypeStrArgsEntry.py +91 -0
  120. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/KernelTypeStrResolver.py +78 -0
  121. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/MapType.py +71 -0
  122. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Model.py +223 -0
  123. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ModuleState.py +141 -0
  124. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Node.py +317 -0
  125. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodeEdge.py +126 -0
  126. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodeType.py +7 -0
  127. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodesToOptimizeIndices.py +160 -0
  128. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OpIdKernelTypeStrArgsEntry.py +91 -0
  129. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OperatorSetId.py +67 -0
  130. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OptimizerGroup.py +117 -0
  131. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ParameterOptimizerState.py +91 -0
  132. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/PropertyBag.py +152 -0
  133. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizationRecord.py +105 -0
  134. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizationRecordContainerEntry.py +91 -0
  135. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizations.py +79 -0
  136. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/SequenceType.py +58 -0
  137. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Shape.py +78 -0
  138. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/SparseTensor.py +114 -0
  139. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/StringProperty.py +67 -0
  140. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/StringStringEntry.py +67 -0
  141. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Tensor.py +203 -0
  142. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TensorDataType.py +26 -0
  143. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TensorTypeAndShape.py +71 -0
  144. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TypeInfo.py +83 -0
  145. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TypeInfoValue.py +9 -0
  146. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ValueInfo.py +84 -0
  147. onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/__init__.py +6 -0
  148. onnxruntime/tools/ort_format_model/ort_model_processor.py +86 -0
  149. onnxruntime/tools/ort_format_model/types.py +85 -0
  150. onnxruntime/tools/ort_format_model/utils.py +61 -0
  151. onnxruntime/tools/pytorch_export_contrib_ops.py +129 -0
  152. onnxruntime/tools/pytorch_export_helpers.py +131 -0
  153. onnxruntime/tools/qdq_helpers/__init__.py +0 -0
  154. onnxruntime/tools/qdq_helpers/optimize_qdq_model.py +37 -0
  155. onnxruntime/tools/qnn/add_trans_cast.py +292 -0
  156. onnxruntime/tools/qnn/gen_qnn_ctx_onnx_model.py +364 -0
  157. onnxruntime/tools/qnn/preprocess.py +165 -0
  158. onnxruntime/tools/reduced_build_config_parser.py +203 -0
  159. onnxruntime/tools/remove_initializer_from_input.py +37 -0
  160. onnxruntime/tools/symbolic_shape_infer.py +3094 -0
  161. onnxruntime/tools/update_onnx_opset.py +31 -0
  162. onnxruntime/transformers/__init__.py +8 -0
  163. onnxruntime/transformers/affinity_helper.py +40 -0
  164. onnxruntime/transformers/benchmark.py +942 -0
  165. onnxruntime/transformers/benchmark_helper.py +643 -0
  166. onnxruntime/transformers/bert_perf_test.py +629 -0
  167. onnxruntime/transformers/bert_test_data.py +641 -0
  168. onnxruntime/transformers/compare_bert_results.py +256 -0
  169. onnxruntime/transformers/constants.py +47 -0
  170. onnxruntime/transformers/convert_generation.py +3605 -0
  171. onnxruntime/transformers/convert_tf_models_to_pytorch.py +205 -0
  172. onnxruntime/transformers/convert_to_packing_mode.py +385 -0
  173. onnxruntime/transformers/dynamo_onnx_helper.py +205 -0
  174. onnxruntime/transformers/float16.py +501 -0
  175. onnxruntime/transformers/fusion_attention.py +1189 -0
  176. onnxruntime/transformers/fusion_attention_clip.py +340 -0
  177. onnxruntime/transformers/fusion_attention_sam2.py +533 -0
  178. onnxruntime/transformers/fusion_attention_unet.py +1307 -0
  179. onnxruntime/transformers/fusion_attention_vae.py +300 -0
  180. onnxruntime/transformers/fusion_bart_attention.py +435 -0
  181. onnxruntime/transformers/fusion_base.py +141 -0
  182. onnxruntime/transformers/fusion_bias_add.py +57 -0
  183. onnxruntime/transformers/fusion_biasgelu.py +66 -0
  184. onnxruntime/transformers/fusion_biassplitgelu.py +110 -0
  185. onnxruntime/transformers/fusion_conformer_attention.py +222 -0
  186. onnxruntime/transformers/fusion_constant_fold.py +144 -0
  187. onnxruntime/transformers/fusion_embedlayer.py +810 -0
  188. onnxruntime/transformers/fusion_fastgelu.py +492 -0
  189. onnxruntime/transformers/fusion_gelu.py +258 -0
  190. onnxruntime/transformers/fusion_gelu_approximation.py +25 -0
  191. onnxruntime/transformers/fusion_gemmfastgelu.py +121 -0
  192. onnxruntime/transformers/fusion_gpt_attention.py +546 -0
  193. onnxruntime/transformers/fusion_gpt_attention_megatron.py +355 -0
  194. onnxruntime/transformers/fusion_gpt_attention_no_past.py +260 -0
  195. onnxruntime/transformers/fusion_group_norm.py +180 -0
  196. onnxruntime/transformers/fusion_layernorm.py +489 -0
  197. onnxruntime/transformers/fusion_mha_mmdit.py +667 -0
  198. onnxruntime/transformers/fusion_nhwc_conv.py +99 -0
  199. onnxruntime/transformers/fusion_options.py +340 -0
  200. onnxruntime/transformers/fusion_qordered_attention.py +420 -0
  201. onnxruntime/transformers/fusion_qordered_gelu.py +118 -0
  202. onnxruntime/transformers/fusion_qordered_layernorm.py +122 -0
  203. onnxruntime/transformers/fusion_qordered_matmul.py +216 -0
  204. onnxruntime/transformers/fusion_quickgelu.py +74 -0
  205. onnxruntime/transformers/fusion_reshape.py +173 -0
  206. onnxruntime/transformers/fusion_rotary_attention.py +1591 -0
  207. onnxruntime/transformers/fusion_shape.py +109 -0
  208. onnxruntime/transformers/fusion_simplified_layernorm.py +165 -0
  209. onnxruntime/transformers/fusion_skip_group_norm.py +254 -0
  210. onnxruntime/transformers/fusion_skiplayernorm.py +209 -0
  211. onnxruntime/transformers/fusion_transpose.py +167 -0
  212. onnxruntime/transformers/fusion_utils.py +321 -0
  213. onnxruntime/transformers/huggingface_models.py +74 -0
  214. onnxruntime/transformers/import_utils.py +20 -0
  215. onnxruntime/transformers/io_binding_helper.py +487 -0
  216. onnxruntime/transformers/large_model_exporter.py +395 -0
  217. onnxruntime/transformers/machine_info.py +230 -0
  218. onnxruntime/transformers/metrics.py +163 -0
  219. onnxruntime/transformers/models/bart/__init__.py +12 -0
  220. onnxruntime/transformers/models/bart/export.py +98 -0
  221. onnxruntime/transformers/models/bert/__init__.py +12 -0
  222. onnxruntime/transformers/models/bert/eval_squad.py +329 -0
  223. onnxruntime/transformers/models/gpt2/__init__.py +12 -0
  224. onnxruntime/transformers/models/gpt2/benchmark_gpt2.py +413 -0
  225. onnxruntime/transformers/models/gpt2/convert_to_onnx.py +566 -0
  226. onnxruntime/transformers/models/gpt2/gpt2_helper.py +1031 -0
  227. onnxruntime/transformers/models/gpt2/gpt2_parity.py +513 -0
  228. onnxruntime/transformers/models/gpt2/gpt2_tester.py +501 -0
  229. onnxruntime/transformers/models/gpt2/parity_check_helper.py +146 -0
  230. onnxruntime/transformers/models/llama/__init__.py +12 -0
  231. onnxruntime/transformers/models/llama/benchmark.py +700 -0
  232. onnxruntime/transformers/models/llama/benchmark_all.py +488 -0
  233. onnxruntime/transformers/models/llama/benchmark_e2e.py +608 -0
  234. onnxruntime/transformers/models/llama/convert_to_onnx.py +1064 -0
  235. onnxruntime/transformers/models/llama/dist_settings.py +57 -0
  236. onnxruntime/transformers/models/llama/llama_inputs.py +504 -0
  237. onnxruntime/transformers/models/llama/llama_parity.py +343 -0
  238. onnxruntime/transformers/models/llama/llama_torch.py +47 -0
  239. onnxruntime/transformers/models/llama/quant_kv_dataloader.py +108 -0
  240. onnxruntime/transformers/models/longformer/__init__.py +12 -0
  241. onnxruntime/transformers/models/longformer/benchmark_longformer.py +821 -0
  242. onnxruntime/transformers/models/longformer/convert_to_onnx.py +413 -0
  243. onnxruntime/transformers/models/longformer/generate_test_data.py +347 -0
  244. onnxruntime/transformers/models/longformer/longformer_helper.py +76 -0
  245. onnxruntime/transformers/models/phi2/__init__.py +12 -0
  246. onnxruntime/transformers/models/phi2/convert_to_onnx.py +590 -0
  247. onnxruntime/transformers/models/phi2/inference_example.py +414 -0
  248. onnxruntime/transformers/models/sam2/__init__.py +12 -0
  249. onnxruntime/transformers/models/sam2/benchmark_sam2.py +638 -0
  250. onnxruntime/transformers/models/sam2/convert_to_onnx.py +270 -0
  251. onnxruntime/transformers/models/sam2/image_decoder.py +272 -0
  252. onnxruntime/transformers/models/sam2/image_encoder.py +236 -0
  253. onnxruntime/transformers/models/sam2/mask_decoder.py +208 -0
  254. onnxruntime/transformers/models/sam2/nvtx_helper.py +33 -0
  255. onnxruntime/transformers/models/sam2/prompt_encoder.py +189 -0
  256. onnxruntime/transformers/models/sam2/sam2_demo.py +321 -0
  257. onnxruntime/transformers/models/sam2/sam2_image_onnx_predictor.py +279 -0
  258. onnxruntime/transformers/models/sam2/sam2_utils.py +147 -0
  259. onnxruntime/transformers/models/stable_diffusion/__init__.py +12 -0
  260. onnxruntime/transformers/models/stable_diffusion/benchmark.py +1519 -0
  261. onnxruntime/transformers/models/stable_diffusion/benchmark_controlnet.py +426 -0
  262. onnxruntime/transformers/models/stable_diffusion/demo_txt2img.py +103 -0
  263. onnxruntime/transformers/models/stable_diffusion/demo_txt2img_xl.py +269 -0
  264. onnxruntime/transformers/models/stable_diffusion/demo_utils.py +778 -0
  265. onnxruntime/transformers/models/stable_diffusion/diffusion_models.py +1318 -0
  266. onnxruntime/transformers/models/stable_diffusion/diffusion_schedulers.py +1179 -0
  267. onnxruntime/transformers/models/stable_diffusion/engine_builder.py +295 -0
  268. onnxruntime/transformers/models/stable_diffusion/engine_builder_ort_cuda.py +387 -0
  269. onnxruntime/transformers/models/stable_diffusion/engine_builder_ort_trt.py +288 -0
  270. onnxruntime/transformers/models/stable_diffusion/engine_builder_tensorrt.py +395 -0
  271. onnxruntime/transformers/models/stable_diffusion/engine_builder_torch.py +108 -0
  272. onnxruntime/transformers/models/stable_diffusion/optimize_pipeline.py +590 -0
  273. onnxruntime/transformers/models/stable_diffusion/ort_optimizer.py +136 -0
  274. onnxruntime/transformers/models/stable_diffusion/pipeline_stable_diffusion.py +831 -0
  275. onnxruntime/transformers/models/stable_diffusion/trt_utilities.py +12 -0
  276. onnxruntime/transformers/models/t5/__init__.py +12 -0
  277. onnxruntime/transformers/models/t5/convert_to_onnx.py +318 -0
  278. onnxruntime/transformers/models/t5/t5_decoder.py +437 -0
  279. onnxruntime/transformers/models/t5/t5_encoder.py +70 -0
  280. onnxruntime/transformers/models/t5/t5_encoder_decoder_init.py +361 -0
  281. onnxruntime/transformers/models/t5/t5_helper.py +302 -0
  282. onnxruntime/transformers/models/whisper/__init__.py +12 -0
  283. onnxruntime/transformers/models/whisper/benchmark.py +585 -0
  284. onnxruntime/transformers/models/whisper/benchmark_all.py +526 -0
  285. onnxruntime/transformers/models/whisper/convert_to_onnx.py +609 -0
  286. onnxruntime/transformers/models/whisper/whisper_chain.py +334 -0
  287. onnxruntime/transformers/models/whisper/whisper_decoder.py +464 -0
  288. onnxruntime/transformers/models/whisper/whisper_encoder.py +164 -0
  289. onnxruntime/transformers/models/whisper/whisper_encoder_decoder_init.py +371 -0
  290. onnxruntime/transformers/models/whisper/whisper_helper.py +1035 -0
  291. onnxruntime/transformers/models/whisper/whisper_inputs.py +380 -0
  292. onnxruntime/transformers/models/whisper/whisper_jump_times.py +477 -0
  293. onnxruntime/transformers/onnx_exporter.py +719 -0
  294. onnxruntime/transformers/onnx_model.py +1636 -0
  295. onnxruntime/transformers/onnx_model_bart.py +141 -0
  296. onnxruntime/transformers/onnx_model_bert.py +488 -0
  297. onnxruntime/transformers/onnx_model_bert_keras.py +474 -0
  298. onnxruntime/transformers/onnx_model_bert_tf.py +588 -0
  299. onnxruntime/transformers/onnx_model_clip.py +42 -0
  300. onnxruntime/transformers/onnx_model_conformer.py +32 -0
  301. onnxruntime/transformers/onnx_model_gpt2.py +101 -0
  302. onnxruntime/transformers/onnx_model_mmdit.py +112 -0
  303. onnxruntime/transformers/onnx_model_phi.py +929 -0
  304. onnxruntime/transformers/onnx_model_sam2.py +137 -0
  305. onnxruntime/transformers/onnx_model_t5.py +985 -0
  306. onnxruntime/transformers/onnx_model_tnlr.py +226 -0
  307. onnxruntime/transformers/onnx_model_unet.py +258 -0
  308. onnxruntime/transformers/onnx_model_vae.py +42 -0
  309. onnxruntime/transformers/onnx_utils.py +55 -0
  310. onnxruntime/transformers/optimizer.py +620 -0
  311. onnxruntime/transformers/past_helper.py +149 -0
  312. onnxruntime/transformers/profile_result_processor.py +358 -0
  313. onnxruntime/transformers/profiler.py +434 -0
  314. onnxruntime/transformers/quantize_helper.py +76 -0
  315. onnxruntime/transformers/shape_infer_helper.py +121 -0
  316. onnxruntime/transformers/shape_optimizer.py +400 -0
  317. onnxruntime/transformers/torch_onnx_export_helper.py +74 -0
  318. onnxruntime_directml-1.24.1.dist-info/METADATA +216 -0
  319. onnxruntime_directml-1.24.1.dist-info/RECORD +322 -0
  320. onnxruntime_directml-1.24.1.dist-info/WHEEL +5 -0
  321. onnxruntime_directml-1.24.1.dist-info/entry_points.txt +2 -0
  322. onnxruntime_directml-1.24.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,608 @@
1
+ # -------------------------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # Licensed under the MIT License. See License.txt in the project root for
4
+ # license information.
5
+ # --------------------------------------------------------------------------
6
+
7
+ # This is an end-to-end benchmarking script for the Hugging Face LLaMA-2 model.
8
+ #
9
+ # Prerequisites:
10
+ # 1) Install `huggingface-cli`:
11
+ #
12
+ # $ pip install huggingface_hub
13
+ #
14
+ # 2) Authenticate with Hugging Face's CLI:
15
+ #
16
+ # $ huggingface-cli login
17
+ #
18
+ # 3) Accept Meta's license in Hugging Face to access the models at https://huggingface.co/meta-llama/
19
+ #
20
+ # 4) Install the latest ONNX Runtime version
21
+ #
22
+ # $ pip install onnxruntime-gpu
23
+ #
24
+ # 5) Install flash attention v2
25
+ #
26
+ # $ pip install flash-attn --no-build-isolation
27
+ #
28
+ # 6) Install bitsandbytes
29
+ #
30
+ # $ pip install bitsandbytes
31
+
32
+ from __future__ import annotations
33
+
34
+ import argparse
35
+ import datetime
36
+ import gc
37
+ import itertools
38
+ import json
39
+ import logging
40
+ import os
41
+ import textwrap
42
+ import time
43
+
44
+ import numpy as np
45
+ import pandas as pd
46
+ import torch
47
+ from benchmark_helper import setup_logger
48
+ from llama_inputs import add_io_bindings_as_tensors, get_initial_inputs_and_outputs
49
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
50
+
51
+ import onnxruntime as ort
52
+
53
+ logger = logging.getLogger(__name__)
54
+
55
+
56
+ def get_model(args: argparse.Namespace):
57
+ if args.benchmark_type in {"pt-eager", "pt-compile"}:
58
+ model = None
59
+ if args.onnx_precision == "int4" and args.device == "cuda":
60
+ bnb_config = BitsAndBytesConfig(
61
+ load_in_4bit=True,
62
+ bnb_4bit_use_double_quant=True,
63
+ bnb_4bit_quant_type="nf4",
64
+ bnb_4bit_compute_dtype=torch.float16,
65
+ )
66
+
67
+ model = AutoModelForCausalLM.from_pretrained(
68
+ args.hf_dir_path if args.hf_dir_path != "" else args.model_name,
69
+ cache_dir=args.cache_dir,
70
+ torch_dtype=args.torch_dtype,
71
+ use_auth_token=args.auth,
72
+ trust_remote_code=args.trust,
73
+ use_cache=True,
74
+ attn_implementation="flash_attention_2",
75
+ quantization_config=bnb_config,
76
+ max_memory={args.device_id: "80GB"},
77
+ )
78
+ else:
79
+ try:
80
+ model = AutoModelForCausalLM.from_pretrained(
81
+ args.hf_dir_path if args.hf_dir_path != "" else args.model_name,
82
+ cache_dir=args.cache_dir,
83
+ torch_dtype=args.torch_dtype,
84
+ use_auth_token=args.auth,
85
+ trust_remote_code=args.trust,
86
+ use_cache=True,
87
+ attn_implementation=("flash_attention_2" if args.device == "cuda" else "sdpa"),
88
+ ).to(args.target_device)
89
+ except Exception as e:
90
+ # When flash_attention or sdpa doesn't support a model, it throws an exception.
91
+ # Rather than stopping a process, run as eager mode.
92
+ print("Try to load a model using eager mode: ", e)
93
+ model = AutoModelForCausalLM.from_pretrained(
94
+ args.hf_dir_path if args.hf_dir_path != "" else args.model_name,
95
+ cache_dir=args.cache_dir,
96
+ torch_dtype=args.torch_dtype,
97
+ use_auth_token=args.auth,
98
+ trust_remote_code=args.trust,
99
+ use_cache=True,
100
+ attn_implementation="eager",
101
+ ).to(args.target_device)
102
+
103
+ model.eval()
104
+
105
+ if args.benchmark_type == "pt-compile":
106
+ model = torch.compile(model)
107
+
108
+ else:
109
+ sess_options = ort.SessionOptions()
110
+ ep = (
111
+ ("CUDAExecutionProvider", {"device_id": args.device_id})
112
+ if args.device == "cuda"
113
+ else "CPUExecutionProvider"
114
+ )
115
+ model = ort.InferenceSession(args.onnx_model_path, sess_options=sess_options, providers=[ep])
116
+
117
+ return model
118
+
119
+
120
+ def run_inference(args, model, runs, inputs, outputs):
121
+ if args.benchmark_type == "pt-compile":
122
+ with torch.no_grad():
123
+ outputs = model(**inputs)
124
+
125
+ # Synchronize inputs
126
+ io_binding = None
127
+ if args.benchmark_type in {"pt-eager", "pt-compile"}:
128
+ if args.device != "cpu":
129
+ torch.cuda.synchronize(args.target_device)
130
+ else:
131
+ io_binding = add_io_bindings_as_tensors(model, inputs, outputs, args.use_fp16, args.use_buffer_share)
132
+ io_binding.synchronize_inputs()
133
+
134
+ # Run inference
135
+ start = time.perf_counter()
136
+ for _ in range(runs):
137
+ if args.benchmark_type in {"pt-eager", "pt-compile"}:
138
+ with torch.no_grad():
139
+ outputs = model(**inputs)
140
+ if args.device != "cpu":
141
+ torch.cuda.synchronize(args.target_device)
142
+ else:
143
+ model.run_with_iobinding(io_binding)
144
+ io_binding.synchronize_outputs()
145
+
146
+ end = time.perf_counter()
147
+ avg = (end - start) / runs
148
+ return avg, outputs
149
+
150
+
151
+ def prepare_model_for_inference(args, model, config, tokenizer, prompt_length, prompt):
152
+ clear_cache()
153
+ inputs, outputs = get_initial_inputs_and_outputs(
154
+ config, tokenizer, prompt_length, prompt, args.target_device, args.use_fp16, args.use_buffer_share, args.engine
155
+ )
156
+ _, outputs = run_inference(args, model, args.warmup_runs, inputs, outputs)
157
+ return inputs, outputs
158
+
159
+
160
+ def clear_cache():
161
+ gc.collect()
162
+ torch.cuda.empty_cache()
163
+
164
+
165
+ def save_results(results, filename, gen_length):
166
+ df = pd.DataFrame(
167
+ results,
168
+ columns=[
169
+ "Batch Size",
170
+ "Prompt Length",
171
+ "Prompt Processing Latency (ms)",
172
+ "Prompt Processing Throughput (tps)",
173
+ "Sampling Latency (ms)",
174
+ "Sampling Throughput (tps)",
175
+ "First Token Generated Latency (ms)",
176
+ "First Token Generated Throughput (tps)",
177
+ f"Average Latency of First {gen_length // 2} Tokens Generated (ms)",
178
+ f"Average Throughput of First {gen_length // 2} Tokens Generated (tps)",
179
+ f"Average Latency of First {gen_length} Tokens Generated (ms)",
180
+ f"Average Throughput of First {gen_length} Tokens Generated (tps)",
181
+ "Wall-Clock Latency (s)",
182
+ "Wall-Clock Throughput (tps)",
183
+ ],
184
+ )
185
+
186
+ df.to_csv(filename, index=False)
187
+ logger.info(f"Results saved in {filename}!")
188
+
189
+
190
+ def get_args():
191
+ parser = argparse.ArgumentParser()
192
+
193
+ parser.add_argument(
194
+ "-bt",
195
+ "--benchmark-type",
196
+ type=str,
197
+ required=True,
198
+ choices=["pt-eager", "pt-compile", "ort"],
199
+ )
200
+
201
+ parser.add_argument(
202
+ "-m",
203
+ "--model-name",
204
+ type=str,
205
+ required=False,
206
+ help="Hugging Face name of model (e.g. 'meta-llama/Llama-2-7b-hf')",
207
+ )
208
+
209
+ parser.add_argument(
210
+ "-a",
211
+ "--auth",
212
+ default=False,
213
+ action="store_true",
214
+ help="Use Hugging Face authentication token to access model",
215
+ )
216
+
217
+ parser.add_argument(
218
+ "-t",
219
+ "--trust",
220
+ default=False,
221
+ action="store_true",
222
+ help="Whether or not to allow for custom models defined on the Hugging Face Hub in their own modeling files",
223
+ )
224
+
225
+ parser.add_argument(
226
+ "-c",
227
+ "--cache-dir",
228
+ type=str,
229
+ default=os.path.join(".", "model_cache"),
230
+ help="Path to directory containing all Hugging Face files (e.g. config, tokenizer, PyTorch model). Use when loading model as `AutoModel.from_pretrained(model_name, cache_dir=cache_dir)`.",
231
+ )
232
+
233
+ parser.add_argument(
234
+ "--hf-dir-path",
235
+ type=str,
236
+ default="",
237
+ help="Path to directory containing all Hugging Face files (e.g. config, tokenizer, PyTorch model). Use when loading model as `AutoModel.from_pretrained(folder_path)`.",
238
+ )
239
+
240
+ parser.add_argument(
241
+ "-o",
242
+ "--onnx-model-path",
243
+ required=False,
244
+ help="Path to ONNX model",
245
+ )
246
+
247
+ parser.add_argument(
248
+ "-f",
249
+ "--prompts-file",
250
+ required=True,
251
+ default=os.path.join(".", "models", "llama", "prompts.json"),
252
+ help="JSON file containing entries in the format 'prompt length: prompt' where prompt length = tokenized length of prompt",
253
+ )
254
+
255
+ parser.add_argument(
256
+ "--use_buffer_share",
257
+ default=False,
258
+ action="store_true",
259
+ help="Use when GroupQueryAttention (GQA) is in ONNX model",
260
+ )
261
+
262
+ (
263
+ parser.add_argument(
264
+ "--anomaly-filtering",
265
+ default=False,
266
+ action="store_true",
267
+ help="Use this flag to filter anomaly accelerator times for tokens generated. \
268
+ This may give more accurate latency and throughput metrics for tokens generated. \
269
+ Wall-clock metrics are still reported with anomaly times though.",
270
+ ),
271
+ )
272
+
273
+ parser.add_argument(
274
+ "-b",
275
+ "--batch-sizes",
276
+ default="1 2",
277
+ )
278
+
279
+ parser.add_argument(
280
+ "-s",
281
+ "--prompt-lengths",
282
+ default="16 64 256 1024",
283
+ )
284
+
285
+ parser.add_argument(
286
+ "-p",
287
+ "--precision",
288
+ required=True,
289
+ type=str,
290
+ default="fp32",
291
+ choices=["int4", "int8", "fp16", "fp32"],
292
+ help="Precision for model. For ONNX models, the model's precision should be set before running this script.",
293
+ )
294
+
295
+ parser.add_argument(
296
+ "-g",
297
+ "--generation-length",
298
+ type=int,
299
+ default=256,
300
+ help="Number of new tokens to generate",
301
+ )
302
+
303
+ parser.add_argument(
304
+ "-d",
305
+ "--device",
306
+ type=str,
307
+ default="cuda" if torch.cuda.is_available() else "cpu",
308
+ choices=["cpu", "cuda"],
309
+ )
310
+
311
+ parser.add_argument("-id", "--device-id", type=int, default=0)
312
+ parser.add_argument("-w", "--warmup-runs", type=int, default=5)
313
+ parser.add_argument("-n", "--num-runs", type=int, default=100)
314
+ parser.add_argument("--seed", type=int, default=2)
315
+
316
+ args = parser.parse_args()
317
+
318
+ # Set seed properties
319
+ np.random.seed(args.seed)
320
+ torch.manual_seed(args.seed)
321
+
322
+ # Set runtime properties
323
+ if "ort" in args.benchmark_type:
324
+ setattr(args, "execution_provider", f"{args.device.upper()}ExecutionProvider") # noqa: B010
325
+ if args.execution_provider == "CUDAExecutionProvider":
326
+ args.execution_provider = (args.execution_provider, {"device_id": args.device_id})
327
+
328
+ # Check that paths have been specified for any benchmarking with ORT
329
+ if args.benchmark_type == "ort":
330
+ assert args.onnx_model_path, "Please specify a path to `--onnx-model-path`"
331
+
332
+ args.batch_sizes = args.batch_sizes.split(" ")
333
+ args.prompt_lengths = args.prompt_lengths.split(" ")
334
+
335
+ # Use FP32 precision for FP32, INT8, INT4 CPU models, use FP16 precision for FP16 and INT4 GPU models
336
+ setattr(args, "onnx_precision", args.precision) # noqa: B010
337
+ args.precision = (
338
+ "fp32" if args.precision in {"int8", "fp32"} or (args.precision == "int4" and args.device == "cpu") else "fp16"
339
+ )
340
+
341
+ target_device = f"cuda:{args.device_id}" if args.device != "cpu" else args.device
342
+ torch_dtype = torch.float16 if args.precision == "fp16" else torch.float32
343
+ engine = "ort" if args.benchmark_type == "ort" else "pt"
344
+ setattr(args, "target_device", target_device) # noqa: B010
345
+ setattr(args, "torch_dtype", torch_dtype) # noqa: B010
346
+ setattr(args, "engine", engine) # noqa: B010
347
+ setattr(args, "use_fp16", args.precision == "fp16") # noqa: B010
348
+
349
+ args.use_buffer_share = args.use_buffer_share and engine == "ort"
350
+
351
+ return args
352
+
353
+
354
+ def main():
355
+ args = get_args()
356
+ setup_logger(False)
357
+ logger.info(args.__dict__)
358
+
359
+ # Get prompts and prompt sizes
360
+ size_to_prompt = None
361
+ with open(args.prompts_file) as f:
362
+ size_to_prompt = json.load(f, object_hook=lambda d: {int(k): v for k, v in d.items()})
363
+
364
+ # Get config, tokenizer, and model
365
+ config = AutoConfig.from_pretrained(
366
+ args.hf_dir_path if args.hf_dir_path != "" else args.model_name,
367
+ cache_dir=args.cache_dir,
368
+ use_auth_token=args.auth,
369
+ trust_remote_code=args.trust,
370
+ )
371
+ tokenizer = AutoTokenizer.from_pretrained(
372
+ args.hf_dir_path if args.hf_dir_path != "" else args.model_name,
373
+ cache_dir=args.cache_dir,
374
+ use_auth_token=args.auth,
375
+ trust_remote_code=args.trust,
376
+ )
377
+ model = get_model(args)
378
+
379
+ all_csv_metrics = []
380
+ for batch_size, prompt_length in itertools.product(args.batch_sizes, args.prompt_lengths):
381
+ batch_size, prompt_length = int(batch_size), int(prompt_length) # noqa: PLW2901
382
+ logger.info(f"Running batch size = {batch_size}, prompt length = {prompt_length}")
383
+ clear_cache()
384
+ max_length = prompt_length + args.generation_length
385
+
386
+ if prompt_length not in size_to_prompt:
387
+ raise NotImplementedError(
388
+ textwrap.dedent(
389
+ f"""
390
+ A prompt of size {prompt_length} was not found in '{args.prompts_file}'. There are a couple of solutions to fix this.
391
+ 1) You can change one of the keys in '{args.prompts_file}' to be {prompt_length}.
392
+ If {prompt_length} < actual prompt's length, the benchmark E2E tool will repeat the first word in the prompt until {prompt_length} = actual prompt's length.
393
+ If {prompt_length} > actual prompt's length, the benchmark E2E tool will automatically trim the actual prompt's length so that {prompt_length} = actual prompt's length.
394
+ 2) You can add a new key-value entry in '{args.prompts_file}' of the form '{prompt_length}': 'your prompt goes here'.
395
+ """
396
+ )
397
+ )
398
+ prompt = [size_to_prompt[prompt_length]] * batch_size
399
+ csv_metrics = [batch_size, prompt_length]
400
+
401
+ try:
402
+ # Measure prompt processing
403
+ logger.info("Measuring prompt processing...")
404
+ inputs, outputs = prepare_model_for_inference(args, model, config, tokenizer, prompt_length, prompt)
405
+ accelerator_prompt_latency_s, outputs = run_inference(args, model, args.num_runs, inputs, outputs)
406
+
407
+ # Calculate prompt metrics
408
+ accelerator_prompt_latency_ms = accelerator_prompt_latency_s * 1000
409
+ accelerator_prompt_thrpt = batch_size * (prompt_length / accelerator_prompt_latency_s)
410
+ logger.info(f"Average Latency of Prompt Processing: {accelerator_prompt_latency_ms} ms")
411
+ logger.info(
412
+ f"Average Throughput of Prompt Processing: {batch_size * (prompt_length / accelerator_prompt_latency_s)} tps"
413
+ )
414
+ csv_metrics.extend([accelerator_prompt_latency_ms, accelerator_prompt_thrpt])
415
+
416
+ # Measure token generation
417
+ logger.info("Measuring token generation...")
418
+ clear_cache()
419
+ inputs, outputs = prepare_model_for_inference(args, model, config, tokenizer, prompt_length, prompt)
420
+
421
+ all_token_ids = inputs["input_ids"].clone()
422
+ current_length = all_token_ids.shape[-1]
423
+ num_heads = config.num_key_value_heads
424
+ head_size = (
425
+ config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads
426
+ )
427
+
428
+ has_eos = torch.zeros(batch_size, device=args.target_device, dtype=torch.bool)
429
+
430
+ # 0th entry will have prompt accelerator time, 1st entry onwards will have token generation accelerator time
431
+ accelerator_times = []
432
+ sampling_times = [] # cost to sample after each model run
433
+
434
+ wall_clock_start_time = time.perf_counter()
435
+ while current_length <= max_length:
436
+ # Run inference
437
+ accelerator_time_latency_s, outputs = run_inference(args, model, 1, inputs, outputs)
438
+ accelerator_times.append(accelerator_time_latency_s)
439
+
440
+ # Sample with argmax (greedy search)
441
+ sampling_start_time = time.perf_counter()
442
+ if outputs["logits"].shape[1] > 1:
443
+ prompt_end_indices = inputs["attention_mask"].sum(1) - 1
444
+ idxs = (
445
+ prompt_end_indices.unsqueeze(dim=1)
446
+ .repeat(1, config.vocab_size)
447
+ .view(batch_size, 1, config.vocab_size)
448
+ )
449
+ next_token_logits = torch.gather(outputs["logits"], 1, idxs).squeeze()
450
+ else:
451
+ next_token_logits = outputs["logits"][:, -1, :]
452
+ next_tokens = torch.argmax(next_token_logits, dim=-1)
453
+
454
+ # Check if we previously reached EOS token id or if generated token id is EOS token id
455
+ has_eos = has_eos | next_tokens == tokenizer.eos_token_id
456
+
457
+ # Determine which new tokens to add to list of all token ids
458
+ # Add EOS token ids for batch entries that ended early (ragged batching scenario where some batch entries ended early and some haven't)
459
+ tokens_to_add = next_tokens.masked_fill(has_eos, tokenizer.eos_token_id).reshape([batch_size, 1])
460
+ sampling_end_time = time.perf_counter()
461
+ sampling_times.append(sampling_end_time - sampling_start_time)
462
+
463
+ all_token_ids = torch.cat([all_token_ids, tokens_to_add], dim=-1)
464
+ current_length += 1
465
+
466
+ # Update inputs for next inference run
467
+ inputs["input_ids"] = tokens_to_add
468
+ inputs["attention_mask"] = torch.cat(
469
+ [inputs["attention_mask"], (~has_eos).to(torch.int64).reshape(batch_size, 1)], 1
470
+ )
471
+ if "position_ids" in inputs:
472
+ inputs["position_ids"] = torch.max(inputs["position_ids"], dim=1)[0].reshape(batch_size, 1) + 1
473
+
474
+ # Set logits to zeros for next inference run and re-use memory buffer
475
+ if outputs["logits"].shape[1] != 1:
476
+ outputs["logits"] = outputs["logits"][:, :1, :].contiguous()
477
+ outputs["logits"].zero_()
478
+
479
+ # Update KV caches for next inference run
480
+ if args.engine == "pt":
481
+ # Update KV caches for PyTorch
482
+ inputs["past_key_values"] = outputs["past_key_values"]
483
+ elif not args.use_buffer_share:
484
+ # Update KV caches for ONNX Runtime if buffer sharing is not used
485
+ for i in range(config.num_hidden_layers):
486
+ inputs[f"past_key_values.{i}.key"] = outputs[f"present.{i}.key"]
487
+ inputs[f"past_key_values.{i}.value"] = outputs[f"present.{i}.value"]
488
+
489
+ new_sequence_length = inputs["attention_mask"].shape[1]
490
+ for i in range(config.num_hidden_layers):
491
+ present_key = torch.zeros(
492
+ batch_size,
493
+ num_heads,
494
+ new_sequence_length,
495
+ head_size,
496
+ device=args.target_device,
497
+ dtype=args.torch_dtype,
498
+ )
499
+ present_value = torch.zeros(
500
+ batch_size,
501
+ num_heads,
502
+ new_sequence_length,
503
+ head_size,
504
+ device=args.target_device,
505
+ dtype=args.torch_dtype,
506
+ )
507
+ outputs.update(
508
+ {
509
+ f"present.{i}.key": present_key.contiguous(),
510
+ f"present.{i}.value": present_value.contiguous(),
511
+ }
512
+ )
513
+
514
+ wall_clock_end_time = time.perf_counter()
515
+
516
+ # Filter out any anomaly accelerator times (e.g. for `torch.compile`)
517
+ accelerator_times.pop(0) # Remove prompt processing time
518
+ if args.anomaly_filtering:
519
+ anomaly_threshold_factor = 10
520
+ min_time_s = min(accelerator_times)
521
+ orig_size = len(accelerator_times)
522
+ accelerator_times = list(
523
+ filter(lambda acc_time: acc_time < anomaly_threshold_factor * min_time_s, accelerator_times)
524
+ )
525
+ new_size = len(accelerator_times)
526
+ logger.info(
527
+ f"Filtered out {orig_size - new_size} anomaly accelerator times that are {anomaly_threshold_factor}x greater than {min_time_s * 1000} ms..."
528
+ )
529
+
530
+ #######################################################
531
+ # Calculate sampling and first token generated metrics
532
+ #######################################################
533
+
534
+ # Calculate sampling metrics
535
+ avg_sampling_latency_s = sum(sampling_times) / len(sampling_times)
536
+ avg_sampling_latency_ms = avg_sampling_latency_s * 1000
537
+ avg_sampling_thrpt = batch_size * (1 / avg_sampling_latency_s)
538
+ logger.info(f"Average Latency of Sampling: {avg_sampling_latency_ms} ms")
539
+ logger.info(f"Average Throughput of Sampling: {avg_sampling_thrpt} tps")
540
+
541
+ # Calculate first token generated metrics
542
+ first_token_latency_s = accelerator_times[0]
543
+ first_token_latency_ms = first_token_latency_s * 1000
544
+ first_token_thrpt = batch_size * (1 / first_token_latency_s)
545
+ logger.info(f"Latency of First Token Generated: {first_token_latency_ms} ms")
546
+ logger.info(f"Throughput of First Token Generated: {first_token_thrpt} tps")
547
+
548
+ ####################################################
549
+ # Calculate first `halfway` token generated metrics
550
+ ####################################################
551
+
552
+ halfway = args.generation_length // 2
553
+ halfway_token_latency_s = sum(accelerator_times[:halfway]) / len(accelerator_times[:halfway])
554
+ halfway_token_latency_ms = halfway_token_latency_s * 1000
555
+ halfway_token_thrpt = batch_size * (1 / halfway_token_latency_s)
556
+ logger.info(f"Average Latency of First {halfway} Tokens Generated: {halfway_token_latency_ms} ms")
557
+ logger.info(f"Average Throughput of First {halfway} Tokens Generated: {halfway_token_thrpt} tps")
558
+
559
+ #########################################
560
+ # Calculate all tokens generated metrics
561
+ #########################################
562
+
563
+ all_token_latency_s = sum(accelerator_times) / len(accelerator_times)
564
+ all_token_latency_ms = all_token_latency_s * 1000
565
+ all_token_thrpt = batch_size * (1 / all_token_latency_s)
566
+ logger.info(
567
+ f"Average Latency of First {args.generation_length} Tokens Generated: {all_token_latency_ms} ms"
568
+ )
569
+ logger.info(f"Average Throughput of First {args.generation_length} Tokens Generated: {all_token_thrpt} tps")
570
+
571
+ ###############################
572
+ # Calculate wall clock metrics
573
+ ###############################
574
+
575
+ wall_clock_latency_s = wall_clock_end_time - wall_clock_start_time
576
+ wall_clock_thrpt = batch_size * ((prompt_length + args.generation_length) / wall_clock_latency_s)
577
+ logger.info(f"Wall-Clock Latency: {wall_clock_latency_s} s")
578
+ logger.info(
579
+ f"Wall-Clock Throughput: {batch_size * ((prompt_length + args.generation_length) / wall_clock_latency_s)} tps"
580
+ )
581
+
582
+ # Add metrics to CSV
583
+ logger.info("Adding results to CSV")
584
+ csv_metrics.extend(
585
+ [
586
+ avg_sampling_latency_ms,
587
+ avg_sampling_thrpt,
588
+ first_token_latency_ms,
589
+ first_token_thrpt,
590
+ halfway_token_latency_ms,
591
+ halfway_token_thrpt,
592
+ all_token_latency_ms,
593
+ all_token_thrpt,
594
+ wall_clock_latency_s,
595
+ wall_clock_thrpt,
596
+ ]
597
+ )
598
+ all_csv_metrics.append(csv_metrics)
599
+
600
+ except Exception as e:
601
+ logger.info(f"Could not benchmark at batch size = {batch_size}, prompt length = {prompt_length} - {e}")
602
+
603
+ filename = f"benchmark_{args.engine}_e2e_{datetime.datetime.now():%Y-%m-%d_%H:%M:%S}.csv"
604
+ save_results(all_csv_metrics, filename, args.generation_length)
605
+
606
+
607
+ if __name__ == "__main__":
608
+ main()