onnxruntime-directml 1.24.1__cp314-cp314-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onnxruntime/LICENSE +21 -0
- onnxruntime/Privacy.md +21 -0
- onnxruntime/ThirdPartyNotices.txt +6121 -0
- onnxruntime/__init__.py +418 -0
- onnxruntime/backend/__init__.py +6 -0
- onnxruntime/backend/backend.py +175 -0
- onnxruntime/backend/backend_rep.py +52 -0
- onnxruntime/capi/DirectML.dll +0 -0
- onnxruntime/capi/__init__.py +4 -0
- onnxruntime/capi/_ld_preload.py +7 -0
- onnxruntime/capi/_pybind_state.py +33 -0
- onnxruntime/capi/build_and_package_info.py +2 -0
- onnxruntime/capi/convert_npz_to_onnx_adapter.py +48 -0
- onnxruntime/capi/onnxruntime.dll +0 -0
- onnxruntime/capi/onnxruntime_collect_build_info.py +47 -0
- onnxruntime/capi/onnxruntime_inference_collection.py +1440 -0
- onnxruntime/capi/onnxruntime_providers_shared.dll +0 -0
- onnxruntime/capi/onnxruntime_pybind11_state.pyd +0 -0
- onnxruntime/capi/onnxruntime_validation.py +154 -0
- onnxruntime/capi/version_info.py +2 -0
- onnxruntime/datasets/__init__.py +18 -0
- onnxruntime/datasets/logreg_iris.onnx +0 -0
- onnxruntime/datasets/mul_1.onnx +0 -0
- onnxruntime/datasets/sigmoid.onnx +13 -0
- onnxruntime/quantization/CalTableFlatBuffers/KeyValue.py +78 -0
- onnxruntime/quantization/CalTableFlatBuffers/TrtTable.py +90 -0
- onnxruntime/quantization/CalTableFlatBuffers/__init__.py +0 -0
- onnxruntime/quantization/__init__.py +19 -0
- onnxruntime/quantization/base_quantizer.py +529 -0
- onnxruntime/quantization/calibrate.py +1267 -0
- onnxruntime/quantization/execution_providers/qnn/__init__.py +2 -0
- onnxruntime/quantization/execution_providers/qnn/fusion_lpnorm.py +132 -0
- onnxruntime/quantization/execution_providers/qnn/fusion_spacetodepth.py +162 -0
- onnxruntime/quantization/execution_providers/qnn/mixed_precision_overrides_utils.py +413 -0
- onnxruntime/quantization/execution_providers/qnn/preprocess.py +353 -0
- onnxruntime/quantization/execution_providers/qnn/quant_config.py +389 -0
- onnxruntime/quantization/fusions/__init__.py +4 -0
- onnxruntime/quantization/fusions/fusion.py +311 -0
- onnxruntime/quantization/fusions/fusion_gelu.py +272 -0
- onnxruntime/quantization/fusions/fusion_layernorm.py +146 -0
- onnxruntime/quantization/fusions/replace_upsample_with_resize.py +96 -0
- onnxruntime/quantization/matmul_bnb4_quantizer.py +239 -0
- onnxruntime/quantization/matmul_nbits_quantizer.py +1638 -0
- onnxruntime/quantization/neural_compressor/__init__.py +1 -0
- onnxruntime/quantization/neural_compressor/onnx_model.py +1251 -0
- onnxruntime/quantization/neural_compressor/util.py +80 -0
- onnxruntime/quantization/neural_compressor/weight_only.py +932 -0
- onnxruntime/quantization/onnx_model.py +600 -0
- onnxruntime/quantization/onnx_quantizer.py +1163 -0
- onnxruntime/quantization/operators/__init__.py +2 -0
- onnxruntime/quantization/operators/activation.py +119 -0
- onnxruntime/quantization/operators/argmax.py +18 -0
- onnxruntime/quantization/operators/attention.py +73 -0
- onnxruntime/quantization/operators/base_operator.py +26 -0
- onnxruntime/quantization/operators/binary_op.py +72 -0
- onnxruntime/quantization/operators/concat.py +62 -0
- onnxruntime/quantization/operators/conv.py +260 -0
- onnxruntime/quantization/operators/direct_q8.py +78 -0
- onnxruntime/quantization/operators/embed_layernorm.py +121 -0
- onnxruntime/quantization/operators/gather.py +64 -0
- onnxruntime/quantization/operators/gavgpool.py +62 -0
- onnxruntime/quantization/operators/gemm.py +172 -0
- onnxruntime/quantization/operators/lstm.py +121 -0
- onnxruntime/quantization/operators/matmul.py +231 -0
- onnxruntime/quantization/operators/maxpool.py +34 -0
- onnxruntime/quantization/operators/norm.py +40 -0
- onnxruntime/quantization/operators/pad.py +172 -0
- onnxruntime/quantization/operators/pooling.py +67 -0
- onnxruntime/quantization/operators/qdq_base_operator.py +22 -0
- onnxruntime/quantization/operators/resize.py +34 -0
- onnxruntime/quantization/operators/softmax.py +74 -0
- onnxruntime/quantization/operators/split.py +63 -0
- onnxruntime/quantization/operators/where.py +87 -0
- onnxruntime/quantization/preprocess.py +141 -0
- onnxruntime/quantization/qdq_loss_debug.py +389 -0
- onnxruntime/quantization/qdq_quantizer.py +1477 -0
- onnxruntime/quantization/quant_utils.py +1051 -0
- onnxruntime/quantization/quantize.py +953 -0
- onnxruntime/quantization/registry.py +110 -0
- onnxruntime/quantization/shape_inference.py +204 -0
- onnxruntime/quantization/static_quantize_runner.py +256 -0
- onnxruntime/quantization/tensor_quant_overrides.py +520 -0
- onnxruntime/tools/__init__.py +10 -0
- onnxruntime/tools/check_onnx_model_mobile_usability.py +47 -0
- onnxruntime/tools/convert_onnx_models_to_ort.py +380 -0
- onnxruntime/tools/file_utils.py +47 -0
- onnxruntime/tools/logger.py +11 -0
- onnxruntime/tools/make_dynamic_shape_fixed.py +73 -0
- onnxruntime/tools/mobile_helpers/__init__.py +0 -0
- onnxruntime/tools/mobile_helpers/coreml_supported_mlprogram_ops.md +53 -0
- onnxruntime/tools/mobile_helpers/coreml_supported_neuralnetwork_ops.md +43 -0
- onnxruntime/tools/mobile_helpers/nnapi_supported_ops.md +58 -0
- onnxruntime/tools/mobile_helpers/usability_checker.py +738 -0
- onnxruntime/tools/offline_tuning.py +169 -0
- onnxruntime/tools/onnx_model_utils.py +416 -0
- onnxruntime/tools/onnx_randomizer.py +85 -0
- onnxruntime/tools/onnxruntime_test.py +164 -0
- onnxruntime/tools/optimize_onnx_model.py +56 -0
- onnxruntime/tools/ort_format_model/__init__.py +27 -0
- onnxruntime/tools/ort_format_model/operator_type_usage_processors.py +653 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/__init__.py +0 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ArgType.py +7 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ArgTypeAndIndex.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Attribute.py +337 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/AttributeType.py +18 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Checkpoint.py +125 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedKernelCreateInfos.py +120 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedNodeIndexAndKernelDefHash.py +68 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedSessionState.py +96 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedSubGraphSessionState.py +72 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Dimension.py +71 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DimensionValue.py +80 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DimensionValueType.py +8 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/EdgeEnd.py +32 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/FloatProperty.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Graph.py +320 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/InferenceSession.py +88 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/IntProperty.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/KernelTypeStrArgsEntry.py +91 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/KernelTypeStrResolver.py +78 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/MapType.py +71 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Model.py +223 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ModuleState.py +141 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Node.py +317 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodeEdge.py +126 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodeType.py +7 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodesToOptimizeIndices.py +160 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OpIdKernelTypeStrArgsEntry.py +91 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OperatorSetId.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OptimizerGroup.py +117 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ParameterOptimizerState.py +91 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/PropertyBag.py +152 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizationRecord.py +105 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizationRecordContainerEntry.py +91 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizations.py +79 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/SequenceType.py +58 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Shape.py +78 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/SparseTensor.py +114 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/StringProperty.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/StringStringEntry.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Tensor.py +203 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TensorDataType.py +26 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TensorTypeAndShape.py +71 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TypeInfo.py +83 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TypeInfoValue.py +9 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ValueInfo.py +84 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/__init__.py +6 -0
- onnxruntime/tools/ort_format_model/ort_model_processor.py +86 -0
- onnxruntime/tools/ort_format_model/types.py +85 -0
- onnxruntime/tools/ort_format_model/utils.py +61 -0
- onnxruntime/tools/pytorch_export_contrib_ops.py +129 -0
- onnxruntime/tools/pytorch_export_helpers.py +131 -0
- onnxruntime/tools/qdq_helpers/__init__.py +0 -0
- onnxruntime/tools/qdq_helpers/optimize_qdq_model.py +37 -0
- onnxruntime/tools/qnn/add_trans_cast.py +292 -0
- onnxruntime/tools/qnn/gen_qnn_ctx_onnx_model.py +364 -0
- onnxruntime/tools/qnn/preprocess.py +165 -0
- onnxruntime/tools/reduced_build_config_parser.py +203 -0
- onnxruntime/tools/remove_initializer_from_input.py +37 -0
- onnxruntime/tools/symbolic_shape_infer.py +3094 -0
- onnxruntime/tools/update_onnx_opset.py +31 -0
- onnxruntime/transformers/__init__.py +8 -0
- onnxruntime/transformers/affinity_helper.py +40 -0
- onnxruntime/transformers/benchmark.py +942 -0
- onnxruntime/transformers/benchmark_helper.py +643 -0
- onnxruntime/transformers/bert_perf_test.py +629 -0
- onnxruntime/transformers/bert_test_data.py +641 -0
- onnxruntime/transformers/compare_bert_results.py +256 -0
- onnxruntime/transformers/constants.py +47 -0
- onnxruntime/transformers/convert_generation.py +3605 -0
- onnxruntime/transformers/convert_tf_models_to_pytorch.py +205 -0
- onnxruntime/transformers/convert_to_packing_mode.py +385 -0
- onnxruntime/transformers/dynamo_onnx_helper.py +205 -0
- onnxruntime/transformers/float16.py +501 -0
- onnxruntime/transformers/fusion_attention.py +1189 -0
- onnxruntime/transformers/fusion_attention_clip.py +340 -0
- onnxruntime/transformers/fusion_attention_sam2.py +533 -0
- onnxruntime/transformers/fusion_attention_unet.py +1307 -0
- onnxruntime/transformers/fusion_attention_vae.py +300 -0
- onnxruntime/transformers/fusion_bart_attention.py +435 -0
- onnxruntime/transformers/fusion_base.py +141 -0
- onnxruntime/transformers/fusion_bias_add.py +57 -0
- onnxruntime/transformers/fusion_biasgelu.py +66 -0
- onnxruntime/transformers/fusion_biassplitgelu.py +110 -0
- onnxruntime/transformers/fusion_conformer_attention.py +222 -0
- onnxruntime/transformers/fusion_constant_fold.py +144 -0
- onnxruntime/transformers/fusion_embedlayer.py +810 -0
- onnxruntime/transformers/fusion_fastgelu.py +492 -0
- onnxruntime/transformers/fusion_gelu.py +258 -0
- onnxruntime/transformers/fusion_gelu_approximation.py +25 -0
- onnxruntime/transformers/fusion_gemmfastgelu.py +121 -0
- onnxruntime/transformers/fusion_gpt_attention.py +546 -0
- onnxruntime/transformers/fusion_gpt_attention_megatron.py +355 -0
- onnxruntime/transformers/fusion_gpt_attention_no_past.py +260 -0
- onnxruntime/transformers/fusion_group_norm.py +180 -0
- onnxruntime/transformers/fusion_layernorm.py +489 -0
- onnxruntime/transformers/fusion_mha_mmdit.py +667 -0
- onnxruntime/transformers/fusion_nhwc_conv.py +99 -0
- onnxruntime/transformers/fusion_options.py +340 -0
- onnxruntime/transformers/fusion_qordered_attention.py +420 -0
- onnxruntime/transformers/fusion_qordered_gelu.py +118 -0
- onnxruntime/transformers/fusion_qordered_layernorm.py +122 -0
- onnxruntime/transformers/fusion_qordered_matmul.py +216 -0
- onnxruntime/transformers/fusion_quickgelu.py +74 -0
- onnxruntime/transformers/fusion_reshape.py +173 -0
- onnxruntime/transformers/fusion_rotary_attention.py +1591 -0
- onnxruntime/transformers/fusion_shape.py +109 -0
- onnxruntime/transformers/fusion_simplified_layernorm.py +165 -0
- onnxruntime/transformers/fusion_skip_group_norm.py +254 -0
- onnxruntime/transformers/fusion_skiplayernorm.py +209 -0
- onnxruntime/transformers/fusion_transpose.py +167 -0
- onnxruntime/transformers/fusion_utils.py +321 -0
- onnxruntime/transformers/huggingface_models.py +74 -0
- onnxruntime/transformers/import_utils.py +20 -0
- onnxruntime/transformers/io_binding_helper.py +487 -0
- onnxruntime/transformers/large_model_exporter.py +395 -0
- onnxruntime/transformers/machine_info.py +230 -0
- onnxruntime/transformers/metrics.py +163 -0
- onnxruntime/transformers/models/bart/__init__.py +12 -0
- onnxruntime/transformers/models/bart/export.py +98 -0
- onnxruntime/transformers/models/bert/__init__.py +12 -0
- onnxruntime/transformers/models/bert/eval_squad.py +329 -0
- onnxruntime/transformers/models/gpt2/__init__.py +12 -0
- onnxruntime/transformers/models/gpt2/benchmark_gpt2.py +413 -0
- onnxruntime/transformers/models/gpt2/convert_to_onnx.py +566 -0
- onnxruntime/transformers/models/gpt2/gpt2_helper.py +1031 -0
- onnxruntime/transformers/models/gpt2/gpt2_parity.py +513 -0
- onnxruntime/transformers/models/gpt2/gpt2_tester.py +501 -0
- onnxruntime/transformers/models/gpt2/parity_check_helper.py +146 -0
- onnxruntime/transformers/models/llama/__init__.py +12 -0
- onnxruntime/transformers/models/llama/benchmark.py +700 -0
- onnxruntime/transformers/models/llama/benchmark_all.py +488 -0
- onnxruntime/transformers/models/llama/benchmark_e2e.py +608 -0
- onnxruntime/transformers/models/llama/convert_to_onnx.py +1064 -0
- onnxruntime/transformers/models/llama/dist_settings.py +57 -0
- onnxruntime/transformers/models/llama/llama_inputs.py +504 -0
- onnxruntime/transformers/models/llama/llama_parity.py +343 -0
- onnxruntime/transformers/models/llama/llama_torch.py +47 -0
- onnxruntime/transformers/models/llama/quant_kv_dataloader.py +108 -0
- onnxruntime/transformers/models/longformer/__init__.py +12 -0
- onnxruntime/transformers/models/longformer/benchmark_longformer.py +821 -0
- onnxruntime/transformers/models/longformer/convert_to_onnx.py +413 -0
- onnxruntime/transformers/models/longformer/generate_test_data.py +347 -0
- onnxruntime/transformers/models/longformer/longformer_helper.py +76 -0
- onnxruntime/transformers/models/phi2/__init__.py +12 -0
- onnxruntime/transformers/models/phi2/convert_to_onnx.py +590 -0
- onnxruntime/transformers/models/phi2/inference_example.py +414 -0
- onnxruntime/transformers/models/sam2/__init__.py +12 -0
- onnxruntime/transformers/models/sam2/benchmark_sam2.py +638 -0
- onnxruntime/transformers/models/sam2/convert_to_onnx.py +270 -0
- onnxruntime/transformers/models/sam2/image_decoder.py +272 -0
- onnxruntime/transformers/models/sam2/image_encoder.py +236 -0
- onnxruntime/transformers/models/sam2/mask_decoder.py +208 -0
- onnxruntime/transformers/models/sam2/nvtx_helper.py +33 -0
- onnxruntime/transformers/models/sam2/prompt_encoder.py +189 -0
- onnxruntime/transformers/models/sam2/sam2_demo.py +321 -0
- onnxruntime/transformers/models/sam2/sam2_image_onnx_predictor.py +279 -0
- onnxruntime/transformers/models/sam2/sam2_utils.py +147 -0
- onnxruntime/transformers/models/stable_diffusion/__init__.py +12 -0
- onnxruntime/transformers/models/stable_diffusion/benchmark.py +1519 -0
- onnxruntime/transformers/models/stable_diffusion/benchmark_controlnet.py +426 -0
- onnxruntime/transformers/models/stable_diffusion/demo_txt2img.py +103 -0
- onnxruntime/transformers/models/stable_diffusion/demo_txt2img_xl.py +269 -0
- onnxruntime/transformers/models/stable_diffusion/demo_utils.py +778 -0
- onnxruntime/transformers/models/stable_diffusion/diffusion_models.py +1318 -0
- onnxruntime/transformers/models/stable_diffusion/diffusion_schedulers.py +1179 -0
- onnxruntime/transformers/models/stable_diffusion/engine_builder.py +295 -0
- onnxruntime/transformers/models/stable_diffusion/engine_builder_ort_cuda.py +387 -0
- onnxruntime/transformers/models/stable_diffusion/engine_builder_ort_trt.py +288 -0
- onnxruntime/transformers/models/stable_diffusion/engine_builder_tensorrt.py +395 -0
- onnxruntime/transformers/models/stable_diffusion/engine_builder_torch.py +108 -0
- onnxruntime/transformers/models/stable_diffusion/optimize_pipeline.py +590 -0
- onnxruntime/transformers/models/stable_diffusion/ort_optimizer.py +136 -0
- onnxruntime/transformers/models/stable_diffusion/pipeline_stable_diffusion.py +831 -0
- onnxruntime/transformers/models/stable_diffusion/trt_utilities.py +12 -0
- onnxruntime/transformers/models/t5/__init__.py +12 -0
- onnxruntime/transformers/models/t5/convert_to_onnx.py +318 -0
- onnxruntime/transformers/models/t5/t5_decoder.py +437 -0
- onnxruntime/transformers/models/t5/t5_encoder.py +70 -0
- onnxruntime/transformers/models/t5/t5_encoder_decoder_init.py +361 -0
- onnxruntime/transformers/models/t5/t5_helper.py +302 -0
- onnxruntime/transformers/models/whisper/__init__.py +12 -0
- onnxruntime/transformers/models/whisper/benchmark.py +585 -0
- onnxruntime/transformers/models/whisper/benchmark_all.py +526 -0
- onnxruntime/transformers/models/whisper/convert_to_onnx.py +609 -0
- onnxruntime/transformers/models/whisper/whisper_chain.py +334 -0
- onnxruntime/transformers/models/whisper/whisper_decoder.py +464 -0
- onnxruntime/transformers/models/whisper/whisper_encoder.py +164 -0
- onnxruntime/transformers/models/whisper/whisper_encoder_decoder_init.py +371 -0
- onnxruntime/transformers/models/whisper/whisper_helper.py +1035 -0
- onnxruntime/transformers/models/whisper/whisper_inputs.py +380 -0
- onnxruntime/transformers/models/whisper/whisper_jump_times.py +477 -0
- onnxruntime/transformers/onnx_exporter.py +719 -0
- onnxruntime/transformers/onnx_model.py +1636 -0
- onnxruntime/transformers/onnx_model_bart.py +141 -0
- onnxruntime/transformers/onnx_model_bert.py +488 -0
- onnxruntime/transformers/onnx_model_bert_keras.py +474 -0
- onnxruntime/transformers/onnx_model_bert_tf.py +588 -0
- onnxruntime/transformers/onnx_model_clip.py +42 -0
- onnxruntime/transformers/onnx_model_conformer.py +32 -0
- onnxruntime/transformers/onnx_model_gpt2.py +101 -0
- onnxruntime/transformers/onnx_model_mmdit.py +112 -0
- onnxruntime/transformers/onnx_model_phi.py +929 -0
- onnxruntime/transformers/onnx_model_sam2.py +137 -0
- onnxruntime/transformers/onnx_model_t5.py +985 -0
- onnxruntime/transformers/onnx_model_tnlr.py +226 -0
- onnxruntime/transformers/onnx_model_unet.py +258 -0
- onnxruntime/transformers/onnx_model_vae.py +42 -0
- onnxruntime/transformers/onnx_utils.py +55 -0
- onnxruntime/transformers/optimizer.py +620 -0
- onnxruntime/transformers/past_helper.py +149 -0
- onnxruntime/transformers/profile_result_processor.py +358 -0
- onnxruntime/transformers/profiler.py +434 -0
- onnxruntime/transformers/quantize_helper.py +76 -0
- onnxruntime/transformers/shape_infer_helper.py +121 -0
- onnxruntime/transformers/shape_optimizer.py +400 -0
- onnxruntime/transformers/torch_onnx_export_helper.py +74 -0
- onnxruntime_directml-1.24.1.dist-info/METADATA +216 -0
- onnxruntime_directml-1.24.1.dist-info/RECORD +322 -0
- onnxruntime_directml-1.24.1.dist-info/WHEEL +5 -0
- onnxruntime_directml-1.24.1.dist-info/entry_points.txt +2 -0
- onnxruntime_directml-1.24.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
# -------------------------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# Licensed under the MIT License.
|
|
4
|
+
# --------------------------------------------------------------------------
|
|
5
|
+
# Modified from TensorRT demo diffusion, which has the following license:
|
|
6
|
+
#
|
|
7
|
+
# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
8
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
9
|
+
#
|
|
10
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
11
|
+
# you may not use this file except in compliance with the License.
|
|
12
|
+
# You may obtain a copy of the License at
|
|
13
|
+
#
|
|
14
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
15
|
+
#
|
|
16
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
17
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
18
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
19
|
+
# See the License for the specific language governing permissions and
|
|
20
|
+
# limitations under the License.
|
|
21
|
+
# --------------------------------------------------------------------------
|
|
22
|
+
|
|
23
|
+
import logging
|
|
24
|
+
|
|
25
|
+
from cuda import cudart
|
|
26
|
+
from demo_utils import (
|
|
27
|
+
add_controlnet_arguments,
|
|
28
|
+
arg_parser,
|
|
29
|
+
get_metadata,
|
|
30
|
+
load_pipelines,
|
|
31
|
+
parse_arguments,
|
|
32
|
+
process_controlnet_arguments,
|
|
33
|
+
repeat_prompt,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def run_pipelines(
|
|
38
|
+
args, base, refiner, prompt, negative_prompt, controlnet_image=None, controlnet_scale=None, is_warm_up=False
|
|
39
|
+
):
|
|
40
|
+
image_height = args.height
|
|
41
|
+
image_width = args.width
|
|
42
|
+
batch_size = len(prompt)
|
|
43
|
+
base.load_resources(image_height, image_width, batch_size)
|
|
44
|
+
if refiner:
|
|
45
|
+
refiner.load_resources(image_height, image_width, batch_size)
|
|
46
|
+
|
|
47
|
+
def run_base_and_refiner(warmup=False):
|
|
48
|
+
images, base_perf = base.run(
|
|
49
|
+
prompt,
|
|
50
|
+
negative_prompt,
|
|
51
|
+
image_height,
|
|
52
|
+
image_width,
|
|
53
|
+
denoising_steps=args.denoising_steps,
|
|
54
|
+
guidance=args.guidance,
|
|
55
|
+
seed=args.seed,
|
|
56
|
+
controlnet_images=controlnet_image,
|
|
57
|
+
controlnet_scales=controlnet_scale,
|
|
58
|
+
show_latency=not warmup,
|
|
59
|
+
output_type="latent" if refiner else "pil",
|
|
60
|
+
)
|
|
61
|
+
if refiner is None:
|
|
62
|
+
return images, base_perf
|
|
63
|
+
|
|
64
|
+
# Use same seed in base and refiner.
|
|
65
|
+
seed = base.get_current_seed()
|
|
66
|
+
|
|
67
|
+
images, refiner_perf = refiner.run(
|
|
68
|
+
prompt,
|
|
69
|
+
negative_prompt,
|
|
70
|
+
image_height,
|
|
71
|
+
image_width,
|
|
72
|
+
denoising_steps=args.refiner_denoising_steps,
|
|
73
|
+
image=images,
|
|
74
|
+
strength=args.strength,
|
|
75
|
+
guidance=args.refiner_guidance,
|
|
76
|
+
seed=seed,
|
|
77
|
+
show_latency=not warmup,
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
perf_data = None
|
|
81
|
+
if base_perf and refiner_perf:
|
|
82
|
+
perf_data = {"latency": base_perf["latency"] + refiner_perf["latency"]}
|
|
83
|
+
perf_data.update({"base." + key: val for key, val in base_perf.items()})
|
|
84
|
+
perf_data.update({"refiner." + key: val for key, val in refiner_perf.items()})
|
|
85
|
+
|
|
86
|
+
return images, perf_data
|
|
87
|
+
|
|
88
|
+
if not args.disable_cuda_graph:
|
|
89
|
+
# inference once to get cuda graph
|
|
90
|
+
_, _ = run_base_and_refiner(warmup=True)
|
|
91
|
+
|
|
92
|
+
if args.num_warmup_runs > 0:
|
|
93
|
+
print("[I] Warming up ..")
|
|
94
|
+
for _ in range(args.num_warmup_runs):
|
|
95
|
+
_, _ = run_base_and_refiner(warmup=True)
|
|
96
|
+
|
|
97
|
+
if is_warm_up:
|
|
98
|
+
return
|
|
99
|
+
|
|
100
|
+
print("[I] Running StableDiffusion XL pipeline")
|
|
101
|
+
if args.nvtx_profile:
|
|
102
|
+
cudart.cudaProfilerStart()
|
|
103
|
+
images, perf_data = run_base_and_refiner(warmup=False)
|
|
104
|
+
if args.nvtx_profile:
|
|
105
|
+
cudart.cudaProfilerStop()
|
|
106
|
+
|
|
107
|
+
if refiner:
|
|
108
|
+
print("|----------------|--------------|")
|
|
109
|
+
print("| {:^14} | {:>9.2f} ms |".format("e2e", perf_data["latency"]))
|
|
110
|
+
print("|----------------|--------------|")
|
|
111
|
+
|
|
112
|
+
metadata = get_metadata(args, True)
|
|
113
|
+
metadata.update({"base." + key: val for key, val in base.metadata().items()})
|
|
114
|
+
if refiner:
|
|
115
|
+
metadata.update({"refiner." + key: val for key, val in refiner.metadata().items()})
|
|
116
|
+
if perf_data:
|
|
117
|
+
metadata.update(perf_data)
|
|
118
|
+
metadata["images"] = len(images)
|
|
119
|
+
print(metadata)
|
|
120
|
+
(refiner or base).save_images(images, prompt, negative_prompt, metadata)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def run_demo(args):
|
|
124
|
+
"""Run Stable Diffusion XL Base + Refiner together (known as ensemble of expert denoisers) to generate an image."""
|
|
125
|
+
controlnet_image, controlnet_scale = process_controlnet_arguments(args)
|
|
126
|
+
prompt, negative_prompt = repeat_prompt(args)
|
|
127
|
+
batch_size = len(prompt)
|
|
128
|
+
base, refiner = load_pipelines(args, batch_size)
|
|
129
|
+
run_pipelines(args, base, refiner, prompt, negative_prompt, controlnet_image, controlnet_scale)
|
|
130
|
+
base.teardown()
|
|
131
|
+
if refiner:
|
|
132
|
+
refiner.teardown()
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def run_dynamic_shape_demo(args):
|
|
136
|
+
"""
|
|
137
|
+
Run demo of generating images with different settings with ORT CUDA provider.
|
|
138
|
+
Try "python demo_txt2img_xl.py --max-cuda-graphs 3 --user-compute-stream" to see the effect of multiple CUDA graphs.
|
|
139
|
+
"""
|
|
140
|
+
args.engine = "ORT_CUDA"
|
|
141
|
+
base, refiner = load_pipelines(args, 1)
|
|
142
|
+
|
|
143
|
+
prompts = [
|
|
144
|
+
"starry night over Golden Gate Bridge by van gogh",
|
|
145
|
+
"beautiful photograph of Mt. Fuji during cherry blossom",
|
|
146
|
+
"little cute gremlin sitting on a bed, cinematic",
|
|
147
|
+
"cute grey cat with blue eyes, wearing a bowtie, acrylic painting",
|
|
148
|
+
"beautiful Renaissance Revival Estate, Hobbit-House, detailed painting, warm colors, 8k, trending on Artstation",
|
|
149
|
+
"blue owl, big green eyes, portrait, intricate metal design, unreal engine, octane render, realistic",
|
|
150
|
+
"An astronaut riding a rainbow unicorn, cinematic, dramatic",
|
|
151
|
+
"close-up photography of old man standing in the rain at night, in a street lit by lamps, leica 35mm",
|
|
152
|
+
]
|
|
153
|
+
|
|
154
|
+
# batch size, height, width, scheduler, steps, prompt, seed, guidance, refiner scheduler, refiner steps, refiner strength
|
|
155
|
+
configs = [
|
|
156
|
+
(1, 832, 1216, "UniPC", 8, prompts[0], None, 5.0, "UniPC", 10, 0.3),
|
|
157
|
+
(1, 1024, 1024, "DDIM", 24, prompts[1], None, 5.0, "DDIM", 30, 0.3),
|
|
158
|
+
(1, 1216, 832, "EulerA", 16, prompts[2], 1716921396712843, 5.0, "EulerA", 10, 0.3),
|
|
159
|
+
(1, 1344, 768, "EulerA", 24, prompts[3], 123698071912362, 5.0, "EulerA", 20, 0.3),
|
|
160
|
+
(2, 640, 1536, "UniPC", 16, prompts[4], 4312973633252712, 5.0, "UniPC", 10, 0.3),
|
|
161
|
+
(2, 1152, 896, "DDIM", 24, prompts[5], 1964684802882906, 5.0, "UniPC", 20, 0.3),
|
|
162
|
+
]
|
|
163
|
+
|
|
164
|
+
# In testing LCM, refiner is disabled so the settings of refiner is not used.
|
|
165
|
+
if args.lcm:
|
|
166
|
+
configs = [
|
|
167
|
+
(1, 1024, 1024, "LCM", 8, prompts[6], None, 1.0, "UniPC", 20, 0.3),
|
|
168
|
+
(1, 1216, 832, "LCM", 6, prompts[7], 1337, 1.0, "UniPC", 20, 0.3),
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
# Warm up each combination of (batch size, height, width) once before serving.
|
|
172
|
+
args.prompt = ["warm up"]
|
|
173
|
+
args.num_warmup_runs = 1
|
|
174
|
+
for batch_size, height, width, _, _, _, _, _, _, _, _ in configs:
|
|
175
|
+
args.batch_size = batch_size
|
|
176
|
+
args.height = height
|
|
177
|
+
args.width = width
|
|
178
|
+
print(f"\nWarm up batch_size={batch_size}, height={height}, width={width}")
|
|
179
|
+
prompt, negative_prompt = repeat_prompt(args)
|
|
180
|
+
run_pipelines(args, base, refiner, prompt, negative_prompt, is_warm_up=True)
|
|
181
|
+
|
|
182
|
+
# Run pipeline on a list of prompts.
|
|
183
|
+
args.num_warmup_runs = 0
|
|
184
|
+
for (
|
|
185
|
+
batch_size,
|
|
186
|
+
height,
|
|
187
|
+
width,
|
|
188
|
+
scheduler,
|
|
189
|
+
steps,
|
|
190
|
+
example_prompt,
|
|
191
|
+
seed,
|
|
192
|
+
guidance,
|
|
193
|
+
refiner_scheduler,
|
|
194
|
+
refiner_denoising_steps,
|
|
195
|
+
strength,
|
|
196
|
+
) in configs:
|
|
197
|
+
args.prompt = [example_prompt]
|
|
198
|
+
args.batch_size = batch_size
|
|
199
|
+
args.height = height
|
|
200
|
+
args.width = width
|
|
201
|
+
args.scheduler = scheduler
|
|
202
|
+
args.denoising_steps = steps
|
|
203
|
+
args.seed = seed
|
|
204
|
+
args.guidance = guidance
|
|
205
|
+
args.refiner_scheduler = refiner_scheduler
|
|
206
|
+
args.refiner_denoising_steps = refiner_denoising_steps
|
|
207
|
+
args.strength = strength
|
|
208
|
+
base.set_scheduler(scheduler)
|
|
209
|
+
if refiner:
|
|
210
|
+
refiner.set_scheduler(refiner_scheduler)
|
|
211
|
+
prompt, negative_prompt = repeat_prompt(args)
|
|
212
|
+
run_pipelines(args, base, refiner, prompt, negative_prompt, is_warm_up=False)
|
|
213
|
+
|
|
214
|
+
base.teardown()
|
|
215
|
+
if refiner:
|
|
216
|
+
refiner.teardown()
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def run_turbo_demo(args):
|
|
220
|
+
"""Run demo of generating images with test prompts with ORT CUDA provider."""
|
|
221
|
+
args.engine = "ORT_CUDA"
|
|
222
|
+
base, refiner = load_pipelines(args, 1)
|
|
223
|
+
|
|
224
|
+
from datasets import load_dataset # noqa: PLC0415
|
|
225
|
+
|
|
226
|
+
dataset = load_dataset("Gustavosta/Stable-Diffusion-Prompts")
|
|
227
|
+
num_rows = dataset["test"].num_rows
|
|
228
|
+
batch_size = args.batch_size
|
|
229
|
+
num_batch = int(num_rows / batch_size)
|
|
230
|
+
args.batch_size = 1
|
|
231
|
+
for i in range(num_batch):
|
|
232
|
+
args.prompt = [dataset["test"][i]["Prompt"] for i in range(i * batch_size, (i + 1) * batch_size)]
|
|
233
|
+
base.set_scheduler(args.scheduler)
|
|
234
|
+
if refiner:
|
|
235
|
+
refiner.set_scheduler(args.refiner_scheduler)
|
|
236
|
+
prompt, negative_prompt = repeat_prompt(args)
|
|
237
|
+
run_pipelines(args, base, refiner, prompt, negative_prompt, is_warm_up=False)
|
|
238
|
+
|
|
239
|
+
base.teardown()
|
|
240
|
+
if refiner:
|
|
241
|
+
refiner.teardown()
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def main(args):
|
|
245
|
+
no_prompt = isinstance(args.prompt, list) and len(args.prompt) == 1 and not args.prompt[0]
|
|
246
|
+
if no_prompt:
|
|
247
|
+
if args.version == "xl-turbo":
|
|
248
|
+
run_turbo_demo(args)
|
|
249
|
+
else:
|
|
250
|
+
run_dynamic_shape_demo(args)
|
|
251
|
+
else:
|
|
252
|
+
run_demo(args)
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
if __name__ == "__main__":
|
|
256
|
+
logging.basicConfig(format="%(funcName)20s: %(message)s", level=logging.INFO)
|
|
257
|
+
|
|
258
|
+
parser = arg_parser("Options for Stable Diffusion XL Demo")
|
|
259
|
+
add_controlnet_arguments(parser)
|
|
260
|
+
args = parse_arguments(is_xl=True, parser=parser)
|
|
261
|
+
|
|
262
|
+
if args.user_compute_stream:
|
|
263
|
+
import torch
|
|
264
|
+
|
|
265
|
+
s = torch.cuda.Stream()
|
|
266
|
+
with torch.cuda.stream(s):
|
|
267
|
+
main(args)
|
|
268
|
+
else:
|
|
269
|
+
main(args)
|