onnxruntime-directml 1.20.0__cp313-cp313-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onnxruntime/LICENSE +21 -0
- onnxruntime/Privacy.md +21 -0
- onnxruntime/ThirdPartyNotices.txt +6508 -0
- onnxruntime/__init__.py +78 -0
- onnxruntime/backend/__init__.py +6 -0
- onnxruntime/backend/backend.py +174 -0
- onnxruntime/backend/backend_rep.py +53 -0
- onnxruntime/capi/DirectML.dll +0 -0
- onnxruntime/capi/__init__.py +4 -0
- onnxruntime/capi/_ld_preload.py +7 -0
- onnxruntime/capi/_pybind_state.py +33 -0
- onnxruntime/capi/convert_npz_to_onnx_adapter.py +48 -0
- onnxruntime/capi/onnxruntime.dll +0 -0
- onnxruntime/capi/onnxruntime_collect_build_info.py +47 -0
- onnxruntime/capi/onnxruntime_inference_collection.py +1108 -0
- onnxruntime/capi/onnxruntime_providers_shared.dll +0 -0
- onnxruntime/capi/onnxruntime_pybind11_state.pyd +0 -0
- onnxruntime/capi/onnxruntime_validation.py +150 -0
- onnxruntime/capi/version_info.py +2 -0
- onnxruntime/datasets/__init__.py +17 -0
- onnxruntime/datasets/logreg_iris.onnx +0 -0
- onnxruntime/datasets/mul_1.onnx +0 -0
- onnxruntime/datasets/sigmoid.onnx +13 -0
- onnxruntime/quantization/CalTableFlatBuffers/KeyValue.py +78 -0
- onnxruntime/quantization/CalTableFlatBuffers/TrtTable.py +90 -0
- onnxruntime/quantization/CalTableFlatBuffers/__init__.py +0 -0
- onnxruntime/quantization/__init__.py +16 -0
- onnxruntime/quantization/base_quantizer.py +532 -0
- onnxruntime/quantization/calibrate.py +1245 -0
- onnxruntime/quantization/execution_providers/qnn/__init__.py +2 -0
- onnxruntime/quantization/execution_providers/qnn/fusion_lpnorm.py +132 -0
- onnxruntime/quantization/execution_providers/qnn/mixed_precision_overrides_utils.py +413 -0
- onnxruntime/quantization/execution_providers/qnn/preprocess.py +307 -0
- onnxruntime/quantization/execution_providers/qnn/quant_config.py +387 -0
- onnxruntime/quantization/fusions/__init__.py +3 -0
- onnxruntime/quantization/fusions/fusion.py +311 -0
- onnxruntime/quantization/fusions/fusion_gelu.py +272 -0
- onnxruntime/quantization/fusions/fusion_layernorm.py +135 -0
- onnxruntime/quantization/matmul_4bits_quantizer.py +1480 -0
- onnxruntime/quantization/matmul_bnb4_quantizer.py +240 -0
- onnxruntime/quantization/onnx_model.py +580 -0
- onnxruntime/quantization/onnx_quantizer.py +1008 -0
- onnxruntime/quantization/operators/__init__.py +2 -0
- onnxruntime/quantization/operators/activation.py +119 -0
- onnxruntime/quantization/operators/argmax.py +18 -0
- onnxruntime/quantization/operators/attention.py +73 -0
- onnxruntime/quantization/operators/base_operator.py +26 -0
- onnxruntime/quantization/operators/binary_op.py +72 -0
- onnxruntime/quantization/operators/concat.py +62 -0
- onnxruntime/quantization/operators/conv.py +258 -0
- onnxruntime/quantization/operators/direct_q8.py +78 -0
- onnxruntime/quantization/operators/embed_layernorm.py +121 -0
- onnxruntime/quantization/operators/gather.py +64 -0
- onnxruntime/quantization/operators/gavgpool.py +62 -0
- onnxruntime/quantization/operators/gemm.py +166 -0
- onnxruntime/quantization/operators/lstm.py +117 -0
- onnxruntime/quantization/operators/matmul.py +231 -0
- onnxruntime/quantization/operators/maxpool.py +34 -0
- onnxruntime/quantization/operators/norm.py +40 -0
- onnxruntime/quantization/operators/pad.py +100 -0
- onnxruntime/quantization/operators/pooling.py +67 -0
- onnxruntime/quantization/operators/qdq_base_operator.py +22 -0
- onnxruntime/quantization/operators/resize.py +34 -0
- onnxruntime/quantization/operators/softmax.py +74 -0
- onnxruntime/quantization/operators/split.py +63 -0
- onnxruntime/quantization/operators/where.py +87 -0
- onnxruntime/quantization/preprocess.py +141 -0
- onnxruntime/quantization/qdq_loss_debug.py +389 -0
- onnxruntime/quantization/qdq_quantizer.py +1187 -0
- onnxruntime/quantization/quant_utils.py +891 -0
- onnxruntime/quantization/quantize.py +748 -0
- onnxruntime/quantization/registry.py +106 -0
- onnxruntime/quantization/shape_inference.py +187 -0
- onnxruntime/quantization/tensor_quant_overrides.py +516 -0
- onnxruntime/tools/__init__.py +10 -0
- onnxruntime/tools/check_onnx_model_mobile_usability.py +47 -0
- onnxruntime/tools/convert_onnx_models_to_ort.py +377 -0
- onnxruntime/tools/file_utils.py +46 -0
- onnxruntime/tools/logger.py +11 -0
- onnxruntime/tools/make_dynamic_shape_fixed.py +72 -0
- onnxruntime/tools/mobile_helpers/__init__.py +0 -0
- onnxruntime/tools/mobile_helpers/coreml_supported_mlprogram_ops.md +33 -0
- onnxruntime/tools/mobile_helpers/coreml_supported_neuralnetwork_ops.md +43 -0
- onnxruntime/tools/mobile_helpers/nnapi_supported_ops.md +58 -0
- onnxruntime/tools/mobile_helpers/usability_checker.py +739 -0
- onnxruntime/tools/offline_tuning.py +169 -0
- onnxruntime/tools/onnx_model_utils.py +413 -0
- onnxruntime/tools/onnx_randomizer.py +85 -0
- onnxruntime/tools/onnxruntime_test.py +164 -0
- onnxruntime/tools/optimize_onnx_model.py +55 -0
- onnxruntime/tools/ort_format_model/__init__.py +25 -0
- onnxruntime/tools/ort_format_model/operator_type_usage_processors.py +663 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/__init__.py +0 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ArgType.py +7 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ArgTypeAndIndex.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Attribute.py +337 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/AttributeType.py +18 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Checkpoint.py +125 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedKernelCreateInfos.py +120 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedNodeIndexAndKernelDefHash.py +68 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedSessionState.py +96 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DeprecatedSubGraphSessionState.py +72 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Dimension.py +71 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DimensionValue.py +80 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/DimensionValueType.py +8 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/EdgeEnd.py +32 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/FloatProperty.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Graph.py +320 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/InferenceSession.py +88 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/IntProperty.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/KernelTypeStrArgsEntry.py +91 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/KernelTypeStrResolver.py +78 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/MapType.py +71 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Model.py +223 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ModuleState.py +141 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Node.py +317 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodeEdge.py +126 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodeType.py +7 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/NodesToOptimizeIndices.py +160 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OpIdKernelTypeStrArgsEntry.py +91 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OperatorSetId.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/OptimizerGroup.py +117 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ParameterOptimizerState.py +91 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/PropertyBag.py +152 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizationRecord.py +105 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizationRecordContainerEntry.py +91 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/RuntimeOptimizations.py +79 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/SequenceType.py +58 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Shape.py +78 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/SparseTensor.py +114 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/StringProperty.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/StringStringEntry.py +67 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/Tensor.py +203 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TensorDataType.py +26 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TensorTypeAndShape.py +71 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TypeInfo.py +83 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/TypeInfoValue.py +9 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/ValueInfo.py +84 -0
- onnxruntime/tools/ort_format_model/ort_flatbuffers_py/fbs/__init__.py +6 -0
- onnxruntime/tools/ort_format_model/ort_model_processor.py +86 -0
- onnxruntime/tools/ort_format_model/types.py +84 -0
- onnxruntime/tools/ort_format_model/utils.py +62 -0
- onnxruntime/tools/pytorch_export_contrib_ops.py +108 -0
- onnxruntime/tools/pytorch_export_helpers.py +131 -0
- onnxruntime/tools/qdq_helpers/__init__.py +0 -0
- onnxruntime/tools/qdq_helpers/optimize_qdq_model.py +37 -0
- onnxruntime/tools/reduced_build_config_parser.py +202 -0
- onnxruntime/tools/symbolic_shape_infer.py +3016 -0
- onnxruntime/tools/update_onnx_opset.py +31 -0
- onnxruntime/transformers/__init__.py +8 -0
- onnxruntime/transformers/affinity_helper.py +40 -0
- onnxruntime/transformers/benchmark.py +944 -0
- onnxruntime/transformers/benchmark_helper.py +646 -0
- onnxruntime/transformers/bert_perf_test.py +634 -0
- onnxruntime/transformers/bert_test_data.py +642 -0
- onnxruntime/transformers/compare_bert_results.py +246 -0
- onnxruntime/transformers/constants.py +47 -0
- onnxruntime/transformers/convert_generation.py +3124 -0
- onnxruntime/transformers/convert_tf_models_to_pytorch.py +205 -0
- onnxruntime/transformers/convert_to_packing_mode.py +387 -0
- onnxruntime/transformers/dynamo_onnx_helper.py +104 -0
- onnxruntime/transformers/float16.py +501 -0
- onnxruntime/transformers/fusion_attention.py +1235 -0
- onnxruntime/transformers/fusion_attention_clip.py +257 -0
- onnxruntime/transformers/fusion_attention_sam2.py +534 -0
- onnxruntime/transformers/fusion_attention_unet.py +1304 -0
- onnxruntime/transformers/fusion_attention_vae.py +301 -0
- onnxruntime/transformers/fusion_bart_attention.py +640 -0
- onnxruntime/transformers/fusion_base.py +137 -0
- onnxruntime/transformers/fusion_bias_add.py +58 -0
- onnxruntime/transformers/fusion_biasgelu.py +66 -0
- onnxruntime/transformers/fusion_biassplitgelu.py +111 -0
- onnxruntime/transformers/fusion_conformer_attention.py +143 -0
- onnxruntime/transformers/fusion_embedlayer.py +811 -0
- onnxruntime/transformers/fusion_fastgelu.py +360 -0
- onnxruntime/transformers/fusion_gelu.py +259 -0
- onnxruntime/transformers/fusion_gelu_approximation.py +25 -0
- onnxruntime/transformers/fusion_gemmfastgelu.py +122 -0
- onnxruntime/transformers/fusion_gpt_attention.py +546 -0
- onnxruntime/transformers/fusion_gpt_attention_megatron.py +355 -0
- onnxruntime/transformers/fusion_gpt_attention_no_past.py +260 -0
- onnxruntime/transformers/fusion_group_norm.py +179 -0
- onnxruntime/transformers/fusion_layernorm.py +465 -0
- onnxruntime/transformers/fusion_nhwc_conv.py +100 -0
- onnxruntime/transformers/fusion_options.py +340 -0
- onnxruntime/transformers/fusion_qordered_attention.py +421 -0
- onnxruntime/transformers/fusion_qordered_gelu.py +119 -0
- onnxruntime/transformers/fusion_qordered_layernorm.py +123 -0
- onnxruntime/transformers/fusion_qordered_matmul.py +217 -0
- onnxruntime/transformers/fusion_quickgelu.py +74 -0
- onnxruntime/transformers/fusion_reshape.py +173 -0
- onnxruntime/transformers/fusion_rotary_attention.py +1592 -0
- onnxruntime/transformers/fusion_shape.py +110 -0
- onnxruntime/transformers/fusion_simplified_layernorm.py +159 -0
- onnxruntime/transformers/fusion_skip_group_norm.py +255 -0
- onnxruntime/transformers/fusion_skiplayernorm.py +209 -0
- onnxruntime/transformers/fusion_transpose.py +168 -0
- onnxruntime/transformers/fusion_utils.py +307 -0
- onnxruntime/transformers/huggingface_models.py +167 -0
- onnxruntime/transformers/import_utils.py +20 -0
- onnxruntime/transformers/io_binding_helper.py +442 -0
- onnxruntime/transformers/large_model_exporter.py +395 -0
- onnxruntime/transformers/machine_info.py +221 -0
- onnxruntime/transformers/metrics.py +164 -0
- onnxruntime/transformers/models/bart/__init__.py +12 -0
- onnxruntime/transformers/models/bart/export.py +98 -0
- onnxruntime/transformers/models/bert/__init__.py +12 -0
- onnxruntime/transformers/models/bert/eval_squad.py +329 -0
- onnxruntime/transformers/models/gpt2/__init__.py +12 -0
- onnxruntime/transformers/models/gpt2/benchmark_gpt2.py +413 -0
- onnxruntime/transformers/models/gpt2/convert_to_onnx.py +561 -0
- onnxruntime/transformers/models/gpt2/gpt2_helper.py +1032 -0
- onnxruntime/transformers/models/gpt2/gpt2_parity.py +513 -0
- onnxruntime/transformers/models/gpt2/gpt2_tester.py +501 -0
- onnxruntime/transformers/models/gpt2/parity_check_helper.py +146 -0
- onnxruntime/transformers/models/llama/__init__.py +12 -0
- onnxruntime/transformers/models/llama/benchmark.py +703 -0
- onnxruntime/transformers/models/llama/benchmark_all.py +488 -0
- onnxruntime/transformers/models/llama/benchmark_e2e.py +606 -0
- onnxruntime/transformers/models/llama/convert_to_onnx.py +1027 -0
- onnxruntime/transformers/models/llama/dist_settings.py +57 -0
- onnxruntime/transformers/models/llama/llama_inputs.py +503 -0
- onnxruntime/transformers/models/llama/llama_parity.py +309 -0
- onnxruntime/transformers/models/llama/llama_torch.py +47 -0
- onnxruntime/transformers/models/llama/quant_kv_dataloader.py +108 -0
- onnxruntime/transformers/models/longformer/__init__.py +12 -0
- onnxruntime/transformers/models/longformer/benchmark_longformer.py +821 -0
- onnxruntime/transformers/models/longformer/convert_to_onnx.py +413 -0
- onnxruntime/transformers/models/longformer/generate_test_data.py +347 -0
- onnxruntime/transformers/models/longformer/longformer_helper.py +77 -0
- onnxruntime/transformers/models/phi2/__init__.py +12 -0
- onnxruntime/transformers/models/phi2/convert_to_onnx.py +576 -0
- onnxruntime/transformers/models/phi2/inference_example.py +414 -0
- onnxruntime/transformers/models/sam2/__init__.py +12 -0
- onnxruntime/transformers/models/sam2/benchmark_sam2.py +625 -0
- onnxruntime/transformers/models/sam2/convert_to_onnx.py +260 -0
- onnxruntime/transformers/models/sam2/image_decoder.py +273 -0
- onnxruntime/transformers/models/sam2/image_encoder.py +186 -0
- onnxruntime/transformers/models/sam2/mask_decoder.py +208 -0
- onnxruntime/transformers/models/sam2/nvtx_helper.py +33 -0
- onnxruntime/transformers/models/sam2/prompt_encoder.py +189 -0
- onnxruntime/transformers/models/sam2/sam2_demo.py +322 -0
- onnxruntime/transformers/models/sam2/sam2_image_onnx_predictor.py +280 -0
- onnxruntime/transformers/models/sam2/sam2_utils.py +147 -0
- onnxruntime/transformers/models/stable_diffusion/__init__.py +12 -0
- onnxruntime/transformers/models/stable_diffusion/benchmark.py +1429 -0
- onnxruntime/transformers/models/stable_diffusion/benchmark_controlnet.py +426 -0
- onnxruntime/transformers/models/stable_diffusion/demo_txt2img.py +102 -0
- onnxruntime/transformers/models/stable_diffusion/demo_txt2img_xl.py +268 -0
- onnxruntime/transformers/models/stable_diffusion/demo_utils.py +778 -0
- onnxruntime/transformers/models/stable_diffusion/diffusion_models.py +1319 -0
- onnxruntime/transformers/models/stable_diffusion/diffusion_schedulers.py +1181 -0
- onnxruntime/transformers/models/stable_diffusion/engine_builder.py +296 -0
- onnxruntime/transformers/models/stable_diffusion/engine_builder_ort_cuda.py +388 -0
- onnxruntime/transformers/models/stable_diffusion/engine_builder_ort_trt.py +288 -0
- onnxruntime/transformers/models/stable_diffusion/engine_builder_tensorrt.py +395 -0
- onnxruntime/transformers/models/stable_diffusion/engine_builder_torch.py +108 -0
- onnxruntime/transformers/models/stable_diffusion/optimize_pipeline.py +350 -0
- onnxruntime/transformers/models/stable_diffusion/ort_optimizer.py +136 -0
- onnxruntime/transformers/models/stable_diffusion/pipeline_stable_diffusion.py +831 -0
- onnxruntime/transformers/models/stable_diffusion/trt_utilities.py +12 -0
- onnxruntime/transformers/models/t5/__init__.py +12 -0
- onnxruntime/transformers/models/t5/convert_to_onnx.py +278 -0
- onnxruntime/transformers/models/t5/past_helper.py +150 -0
- onnxruntime/transformers/models/t5/t5_decoder.py +438 -0
- onnxruntime/transformers/models/t5/t5_encoder.py +171 -0
- onnxruntime/transformers/models/t5/t5_encoder_decoder_init.py +299 -0
- onnxruntime/transformers/models/t5/t5_helper.py +272 -0
- onnxruntime/transformers/models/whisper/__init__.py +12 -0
- onnxruntime/transformers/models/whisper/benchmark.py +610 -0
- onnxruntime/transformers/models/whisper/benchmark_all.py +528 -0
- onnxruntime/transformers/models/whisper/convert_to_onnx.py +536 -0
- onnxruntime/transformers/models/whisper/whisper_chain.py +329 -0
- onnxruntime/transformers/models/whisper/whisper_decoder.py +402 -0
- onnxruntime/transformers/models/whisper/whisper_encoder.py +164 -0
- onnxruntime/transformers/models/whisper/whisper_encoder_decoder_init.py +306 -0
- onnxruntime/transformers/models/whisper/whisper_helper.py +524 -0
- onnxruntime/transformers/models/whisper/whisper_openai_helper.py +84 -0
- onnxruntime/transformers/onnx_exporter.py +717 -0
- onnxruntime/transformers/onnx_model.py +1569 -0
- onnxruntime/transformers/onnx_model_bart.py +142 -0
- onnxruntime/transformers/onnx_model_bert.py +481 -0
- onnxruntime/transformers/onnx_model_bert_keras.py +475 -0
- onnxruntime/transformers/onnx_model_bert_tf.py +589 -0
- onnxruntime/transformers/onnx_model_clip.py +40 -0
- onnxruntime/transformers/onnx_model_conformer.py +33 -0
- onnxruntime/transformers/onnx_model_gpt2.py +101 -0
- onnxruntime/transformers/onnx_model_phi.py +930 -0
- onnxruntime/transformers/onnx_model_sam2.py +138 -0
- onnxruntime/transformers/onnx_model_t5.py +791 -0
- onnxruntime/transformers/onnx_model_tnlr.py +227 -0
- onnxruntime/transformers/onnx_model_unet.py +259 -0
- onnxruntime/transformers/onnx_model_vae.py +43 -0
- onnxruntime/transformers/onnx_utils.py +55 -0
- onnxruntime/transformers/optimizer.py +612 -0
- onnxruntime/transformers/profiler.py +725 -0
- onnxruntime/transformers/quantize_helper.py +76 -0
- onnxruntime/transformers/shape_infer_helper.py +122 -0
- onnxruntime/transformers/shape_optimizer.py +401 -0
- onnxruntime/transformers/torch_onnx_export_helper.py +74 -0
- onnxruntime_directml-1.20.0.dist-info/METADATA +187 -0
- onnxruntime_directml-1.20.0.dist-info/RECORD +305 -0
- onnxruntime_directml-1.20.0.dist-info/WHEEL +5 -0
- onnxruntime_directml-1.20.0.dist-info/entry_points.txt +2 -0
- onnxruntime_directml-1.20.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1032 @@
|
|
|
1
|
+
# -------------------------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# Licensed under the MIT License. See License.txt in the project root for
|
|
4
|
+
# license information.
|
|
5
|
+
# --------------------------------------------------------------------------
|
|
6
|
+
# This script helps onnx conversion and validation for GPT2 model with past state.
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
import pickle
|
|
10
|
+
import random
|
|
11
|
+
import shutil
|
|
12
|
+
import tempfile
|
|
13
|
+
import time
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Dict, List, Tuple, Union
|
|
16
|
+
|
|
17
|
+
import numpy
|
|
18
|
+
import onnx
|
|
19
|
+
import torch
|
|
20
|
+
from benchmark_helper import Precision
|
|
21
|
+
from float16 import float_to_float16_max_diff
|
|
22
|
+
from fusion_options import FusionOptions
|
|
23
|
+
from io_binding_helper import IOBindingHelper
|
|
24
|
+
from onnx_model import OnnxModel
|
|
25
|
+
from optimizer import optimize_model
|
|
26
|
+
from torch_onnx_export_helper import torch_onnx_export
|
|
27
|
+
from transformers import GPT2Config, GPT2LMHeadModel, GPT2Model, TFGPT2Model
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
PRETRAINED_GPT2_MODELS = ["distilgpt2", "gpt2", "gpt2-medium", "gpt2-large", "gpt2-xl"]
|
|
32
|
+
|
|
33
|
+
DEFAULT_TOLERANCE = {
|
|
34
|
+
Precision.FLOAT32: 0.0005,
|
|
35
|
+
Precision.FLOAT16: 0.2,
|
|
36
|
+
Precision.INT8: 3.0,
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class GPT2ModelNoPastState(GPT2Model):
|
|
41
|
+
"""Here we wrap a class to disable past state output."""
|
|
42
|
+
|
|
43
|
+
def __init__(self, config):
|
|
44
|
+
super().__init__(config)
|
|
45
|
+
|
|
46
|
+
def forward(self, input_ids):
|
|
47
|
+
return super().forward(input_ids, use_cache=False, return_dict=False)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class TFGPT2ModelNoPastState(TFGPT2Model):
|
|
51
|
+
"""Here we wrap a class to disable past state output."""
|
|
52
|
+
|
|
53
|
+
def __init__(self, config):
|
|
54
|
+
config.use_cache = False
|
|
55
|
+
super().__init__(config)
|
|
56
|
+
|
|
57
|
+
def forward(self, input_ids):
|
|
58
|
+
return super().call(input_ids, use_cache=False)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class MyGPT2Model(GPT2Model):
|
|
62
|
+
"""Here we wrap a class for Onnx model conversion for GPT2Model with past state."""
|
|
63
|
+
|
|
64
|
+
def __init__(self, config):
|
|
65
|
+
super().__init__(config)
|
|
66
|
+
|
|
67
|
+
@staticmethod
|
|
68
|
+
def post_process(result, num_layer):
|
|
69
|
+
if isinstance(result[1][0], (tuple, list)):
|
|
70
|
+
assert len(result[1]) == num_layer and len(result[1][0]) == 2
|
|
71
|
+
# assert len(result[1][0][0].shape) == 4 and result[1][0][0].shape == result[1][0][1].shape
|
|
72
|
+
present = []
|
|
73
|
+
for i in range(num_layer):
|
|
74
|
+
# Since transformers v4.*, past key and values are separated outputs.
|
|
75
|
+
# Here we concate them into one tensor to be compatible with Attention operator.
|
|
76
|
+
present.append(
|
|
77
|
+
torch.cat(
|
|
78
|
+
(result[1][i][0].unsqueeze(0), result[1][i][1].unsqueeze(0)),
|
|
79
|
+
dim=0,
|
|
80
|
+
)
|
|
81
|
+
)
|
|
82
|
+
return (result[0], tuple(present))
|
|
83
|
+
|
|
84
|
+
return result
|
|
85
|
+
|
|
86
|
+
def forward(self, input_ids, position_ids, attention_mask, *past):
|
|
87
|
+
result = super().forward(
|
|
88
|
+
input_ids,
|
|
89
|
+
position_ids=position_ids,
|
|
90
|
+
attention_mask=attention_mask,
|
|
91
|
+
past_key_values=past,
|
|
92
|
+
return_dict=False,
|
|
93
|
+
)
|
|
94
|
+
return MyGPT2Model.post_process(result, self.config.n_layer)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class MyGPT2LMHeadModel(GPT2LMHeadModel):
|
|
98
|
+
"""Here we wrap a class for Onnx model conversion for GPT2LMHeadModel with past state."""
|
|
99
|
+
|
|
100
|
+
def __init__(self, config):
|
|
101
|
+
super().__init__(config)
|
|
102
|
+
|
|
103
|
+
def forward(self, input_ids, position_ids, attention_mask, *past):
|
|
104
|
+
result = super().forward(
|
|
105
|
+
input_ids,
|
|
106
|
+
position_ids=position_ids,
|
|
107
|
+
attention_mask=attention_mask,
|
|
108
|
+
past_key_values=past,
|
|
109
|
+
return_dict=False,
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
return MyGPT2Model.post_process(result, self.config.n_layer)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class MyGPT2LMHeadModel_NoPadding(GPT2LMHeadModel): # noqa: N801
|
|
116
|
+
"""Here we wrap a class for Onnx model conversion for GPT2LMHeadModel with past state and no padding.
|
|
117
|
+
When you always use batch_size=1 in inference, there is no padding in inputs. In such case, position_ids
|
|
118
|
+
and attention_mask need no be in inputs.
|
|
119
|
+
"""
|
|
120
|
+
|
|
121
|
+
def __init__(self, config):
|
|
122
|
+
super().__init__(config)
|
|
123
|
+
|
|
124
|
+
def forward(self, input_ids, *past):
|
|
125
|
+
result = super().forward(input_ids, past_key_values=past, return_dict=False)
|
|
126
|
+
|
|
127
|
+
return MyGPT2Model.post_process(result, self.config.n_layer)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
# Maps model class name to a tuple of model class, name of first output and use padding or not
|
|
131
|
+
MODEL_CLASSES = {
|
|
132
|
+
"GPT2LMHeadModel": (MyGPT2LMHeadModel, "logits", True),
|
|
133
|
+
"GPT2LMHeadModel_NoPadding": (MyGPT2LMHeadModel_NoPadding, "logits", False),
|
|
134
|
+
"GPT2Model": (MyGPT2Model, "last_state", True),
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class Gpt2Inputs:
|
|
139
|
+
def __init__(self, input_ids, position_ids, attention_mask, past):
|
|
140
|
+
self.input_ids: torch.LongTensor = input_ids
|
|
141
|
+
self.position_ids: torch.LongTensor = position_ids
|
|
142
|
+
self.attention_mask: Union[torch.LongTensor, torch.FloatTensor, torch.HalfTensor] = attention_mask
|
|
143
|
+
self.past: Union[List[torch.FloatTensor], List[torch.HalfTensor]] = past
|
|
144
|
+
|
|
145
|
+
def to_list(self) -> List:
|
|
146
|
+
input_list = [v for v in [self.input_ids, self.position_ids, self.attention_mask] if v is not None]
|
|
147
|
+
if self.past:
|
|
148
|
+
input_list.extend(self.past)
|
|
149
|
+
|
|
150
|
+
return input_list
|
|
151
|
+
|
|
152
|
+
def to_tuple(self) -> Tuple:
|
|
153
|
+
return tuple(v for v in [self.input_ids, self.position_ids, self.attention_mask, self.past] if v is not None)
|
|
154
|
+
|
|
155
|
+
def to_fp32(self):
|
|
156
|
+
# For attention mask, only convert fp16 to fp32, and keep the original type if it is integer.
|
|
157
|
+
attention_mask = None
|
|
158
|
+
if self.attention_mask is not None:
|
|
159
|
+
attention_mask = (
|
|
160
|
+
self.attention_mask.to(dtype=torch.float32)
|
|
161
|
+
if (self.attention_mask.dtype == torch.float16)
|
|
162
|
+
else self.attention_mask
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
past = [p.to(dtype=torch.float32) for p in self.past]
|
|
166
|
+
return Gpt2Inputs(self.input_ids, self.position_ids, attention_mask, past)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
class Gpt2Helper:
|
|
170
|
+
"""A helper class for Gpt2 model conversion, inference and verification."""
|
|
171
|
+
|
|
172
|
+
@staticmethod
|
|
173
|
+
def get_dummy_inputs(
|
|
174
|
+
batch_size: int,
|
|
175
|
+
past_sequence_length: int,
|
|
176
|
+
sequence_length: int,
|
|
177
|
+
num_attention_heads: int,
|
|
178
|
+
hidden_size: int,
|
|
179
|
+
num_layer: int,
|
|
180
|
+
vocab_size: int,
|
|
181
|
+
device: torch.device,
|
|
182
|
+
float16: bool = False,
|
|
183
|
+
has_position_ids: bool = True,
|
|
184
|
+
has_attention_mask: bool = True,
|
|
185
|
+
input_ids_dtype: torch.dtype = torch.int32,
|
|
186
|
+
position_ids_dtype: torch.dtype = torch.int32,
|
|
187
|
+
attention_mask_dtype: torch.dtype = torch.int32,
|
|
188
|
+
left_side_padding: bool = True,
|
|
189
|
+
) -> Gpt2Inputs:
|
|
190
|
+
"""Create random inputs for GPT2 model.
|
|
191
|
+
Returns torch tensors of input_ids, position_ids, attention_mask and a list of past state tensors.
|
|
192
|
+
"""
|
|
193
|
+
float_type = torch.float16 if float16 else torch.float32
|
|
194
|
+
past_shape = [
|
|
195
|
+
2,
|
|
196
|
+
batch_size,
|
|
197
|
+
num_attention_heads,
|
|
198
|
+
past_sequence_length,
|
|
199
|
+
int(hidden_size / num_attention_heads),
|
|
200
|
+
]
|
|
201
|
+
|
|
202
|
+
past = [(torch.rand(past_shape, dtype=float_type, device=device) * 2.0 - 1.0) for _ in range(num_layer)]
|
|
203
|
+
input_ids = torch.randint(
|
|
204
|
+
low=0,
|
|
205
|
+
high=vocab_size - 1,
|
|
206
|
+
size=(batch_size, sequence_length),
|
|
207
|
+
dtype=input_ids_dtype,
|
|
208
|
+
device=device,
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
attention_mask = None
|
|
212
|
+
if has_attention_mask:
|
|
213
|
+
total_sequence_length = past_sequence_length + sequence_length
|
|
214
|
+
attention_mask = torch.ones(
|
|
215
|
+
[batch_size, total_sequence_length],
|
|
216
|
+
dtype=attention_mask_dtype,
|
|
217
|
+
device=device,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
if total_sequence_length >= 2:
|
|
221
|
+
for i in range(batch_size):
|
|
222
|
+
padding_length = random.randint(0, total_sequence_length - 1)
|
|
223
|
+
if left_side_padding:
|
|
224
|
+
attention_mask[i, :padding_length] = 0
|
|
225
|
+
else: # right side padding
|
|
226
|
+
attention_mask[i, total_sequence_length - padding_length :] = 0
|
|
227
|
+
|
|
228
|
+
# Deduce position_ids from attention mask
|
|
229
|
+
position_ids = None
|
|
230
|
+
if has_position_ids:
|
|
231
|
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
|
232
|
+
position_ids.masked_fill_(position_ids < 0, 0)
|
|
233
|
+
position_ids = position_ids[:, past_sequence_length:].to(position_ids_dtype)
|
|
234
|
+
|
|
235
|
+
return Gpt2Inputs(input_ids, position_ids, attention_mask, past)
|
|
236
|
+
|
|
237
|
+
@staticmethod
|
|
238
|
+
def get_output_shapes(
|
|
239
|
+
batch_size: int,
|
|
240
|
+
past_sequence_length: int,
|
|
241
|
+
sequence_length: int,
|
|
242
|
+
config: GPT2Config,
|
|
243
|
+
model_class: str = "GPT2LMHeadModel",
|
|
244
|
+
) -> Dict[str, List[int]]:
|
|
245
|
+
"""Returns a dictionary with output name as key, and shape as value."""
|
|
246
|
+
num_attention_heads = config.num_attention_heads
|
|
247
|
+
hidden_size = config.hidden_size
|
|
248
|
+
num_layer = config.num_hidden_layers
|
|
249
|
+
vocab_size = config.vocab_size
|
|
250
|
+
|
|
251
|
+
output_name = MODEL_CLASSES[model_class][1]
|
|
252
|
+
|
|
253
|
+
last_state_shape = [
|
|
254
|
+
batch_size,
|
|
255
|
+
sequence_length,
|
|
256
|
+
vocab_size if output_name == "logits" else hidden_size,
|
|
257
|
+
]
|
|
258
|
+
present_state_shape = [
|
|
259
|
+
2,
|
|
260
|
+
batch_size,
|
|
261
|
+
num_attention_heads,
|
|
262
|
+
past_sequence_length + sequence_length,
|
|
263
|
+
int(hidden_size / num_attention_heads),
|
|
264
|
+
]
|
|
265
|
+
|
|
266
|
+
output_shapes = {output_name: last_state_shape}
|
|
267
|
+
for i in range(num_layer):
|
|
268
|
+
output_shapes["present_" + str(i)] = present_state_shape
|
|
269
|
+
|
|
270
|
+
return output_shapes
|
|
271
|
+
|
|
272
|
+
@staticmethod
|
|
273
|
+
def auto_increase_buffer_size(output_buffers, output_shapes):
|
|
274
|
+
for key in output_shapes:
|
|
275
|
+
assert key in output_buffers
|
|
276
|
+
buffer = output_buffers[key]
|
|
277
|
+
if numpy.prod(output_shapes[key]) > buffer.nelement():
|
|
278
|
+
output_buffers[key] = torch.empty(
|
|
279
|
+
numpy.prod(output_shapes[key]),
|
|
280
|
+
dtype=buffer.dtype,
|
|
281
|
+
device=buffer.device,
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
@staticmethod
|
|
285
|
+
def get_output_buffers(output_shapes, device, is_float16=False):
|
|
286
|
+
"""Returns a dictionary of output name as key, and 1D tensor as value. The tensor has enough space for given shape."""
|
|
287
|
+
data_type = torch.float16 if is_float16 else torch.float32
|
|
288
|
+
|
|
289
|
+
output_buffers = {}
|
|
290
|
+
for name, shape in output_shapes.items():
|
|
291
|
+
output_buffers[name] = torch.empty(numpy.prod(shape), dtype=data_type, device=device)
|
|
292
|
+
return output_buffers
|
|
293
|
+
|
|
294
|
+
@staticmethod
|
|
295
|
+
def diff_outputs(torch_outputs, ort_outputs, relative=False):
|
|
296
|
+
"""Returns the maximum difference between PyTorch and OnnxRuntime outputs."""
|
|
297
|
+
expected_outputs = torch_outputs[0].cpu().numpy()
|
|
298
|
+
diff = numpy.abs(expected_outputs - ort_outputs[0])
|
|
299
|
+
if relative:
|
|
300
|
+
return numpy.amax(diff / (numpy.abs(expected_outputs) + 1e-6))
|
|
301
|
+
else:
|
|
302
|
+
return numpy.amax(diff)
|
|
303
|
+
|
|
304
|
+
@staticmethod
|
|
305
|
+
def compare_outputs(torch_outputs, ort_outputs, rtol=1e-03, atol=1e-03, **kwargs):
|
|
306
|
+
"""Returns True if torch and ORT outputs are close for given thresholds, and False otherwise.
|
|
307
|
+
Note: need kwargs since Gpt2BeamSearchHelper.compare_outputs has an extra parameter model_class
|
|
308
|
+
"""
|
|
309
|
+
is_close = numpy.allclose(ort_outputs[0], torch_outputs[0].cpu().numpy(), rtol=rtol, atol=atol)
|
|
310
|
+
logger.debug(f"PyTorch and OnnxRuntime output 0 (last_state) are close: {is_close}")
|
|
311
|
+
|
|
312
|
+
is_all_close = is_close
|
|
313
|
+
num_layers = len(ort_outputs) - 1
|
|
314
|
+
|
|
315
|
+
for layer in range(num_layers):
|
|
316
|
+
is_close = numpy.allclose(
|
|
317
|
+
ort_outputs[1 + layer],
|
|
318
|
+
torch_outputs[1][layer].cpu().numpy(),
|
|
319
|
+
rtol=rtol,
|
|
320
|
+
atol=atol,
|
|
321
|
+
)
|
|
322
|
+
logger.debug(f"PyTorch and OnnxRuntime layer {layer} state (present_{layer}) are close:{is_close}")
|
|
323
|
+
is_all_close = is_all_close and is_close
|
|
324
|
+
|
|
325
|
+
if not is_all_close:
|
|
326
|
+
max_abs_diff = Gpt2Helper.diff_outputs(torch_outputs, ort_outputs)
|
|
327
|
+
logger.info(f"PyTorch and OnnxRuntime results are not all close: max_abs_diff={max_abs_diff:.5f}")
|
|
328
|
+
|
|
329
|
+
return is_all_close
|
|
330
|
+
|
|
331
|
+
@staticmethod
|
|
332
|
+
def compare_outputs_v2(torch_outputs, ort_outputs, atol=1e-06):
|
|
333
|
+
"""Compare outputs from PyTorch and OnnxRuntime
|
|
334
|
+
|
|
335
|
+
Args:
|
|
336
|
+
torch_outputs (Tuple[Torch.Tensor]): PyTorch model output
|
|
337
|
+
ort_outputs (List[numpy.ndarray]): OnnxRuntime output
|
|
338
|
+
atol (float, optional): Absolute tollerance. Defaults to 1e-06.
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
is_all_close(bool): whether all elements are close.
|
|
342
|
+
max_abs_diff(float): maximum absolute difference.
|
|
343
|
+
messages(str): a list of debug message for each output
|
|
344
|
+
"""
|
|
345
|
+
is_all_close = True
|
|
346
|
+
is_top1_matched = False
|
|
347
|
+
max_diffs = []
|
|
348
|
+
messages = []
|
|
349
|
+
for i in range(len(ort_outputs)):
|
|
350
|
+
ort_output = ort_outputs[i]
|
|
351
|
+
torch_output = (torch_outputs[0] if i == 0 else torch_outputs[1][i - 1]).cpu().numpy()
|
|
352
|
+
is_close = numpy.allclose(ort_output, torch_output, atol=atol, rtol=0)
|
|
353
|
+
max_diffs.append(numpy.amax(numpy.abs(torch_output - ort_output)))
|
|
354
|
+
is_all_close = is_all_close and is_close
|
|
355
|
+
|
|
356
|
+
if numpy.isnan(torch_output).any():
|
|
357
|
+
logger.debug(f"PyTorch output {i} has nan")
|
|
358
|
+
if numpy.isinf(torch_output).any():
|
|
359
|
+
logger.debug(f"PyTorch output {i} has inf")
|
|
360
|
+
if numpy.isnan(ort_output).any():
|
|
361
|
+
logger.debug(f"ORT output {i} has nan")
|
|
362
|
+
if numpy.isinf(ort_output).any():
|
|
363
|
+
logger.debug(f"ORT output {i} has inf")
|
|
364
|
+
|
|
365
|
+
diff = numpy.fabs(ort_output - torch_output)
|
|
366
|
+
idx = numpy.unravel_index(diff.argmax(), diff.shape)
|
|
367
|
+
messages.append(
|
|
368
|
+
f"diff={diff[idx]:.9f} index={idx} ort={ort_output[idx]:.9f} torch={float(torch_output[idx]):.9f}"
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
if i == 0: # logits
|
|
372
|
+
ort_max_index = numpy.unravel_index(numpy.argmax(ort_output, axis=None), ort_output.shape)
|
|
373
|
+
torch_max_index = numpy.unravel_index(numpy.argmax(torch_output, axis=None), torch_output.shape)
|
|
374
|
+
is_top1_matched = numpy.array_equal(ort_max_index, torch_max_index)
|
|
375
|
+
|
|
376
|
+
max_diff_output_index = max_diffs.index(max(max_diffs))
|
|
377
|
+
return (
|
|
378
|
+
is_all_close,
|
|
379
|
+
max(max_diffs),
|
|
380
|
+
max_diff_output_index,
|
|
381
|
+
messages,
|
|
382
|
+
is_top1_matched,
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
@staticmethod
|
|
386
|
+
def export_onnx(
|
|
387
|
+
model,
|
|
388
|
+
device,
|
|
389
|
+
onnx_model_path: str,
|
|
390
|
+
verbose: bool = False,
|
|
391
|
+
use_external_data_format: bool = False,
|
|
392
|
+
has_position_ids: bool = True,
|
|
393
|
+
has_attention_mask: bool = True,
|
|
394
|
+
input_ids_dtype: torch.dtype = torch.int32,
|
|
395
|
+
position_ids_dtype: torch.dtype = torch.int32,
|
|
396
|
+
attention_mask_dtype: torch.dtype = torch.int32,
|
|
397
|
+
):
|
|
398
|
+
"""Export GPT-2 model with past state to ONNX model."""
|
|
399
|
+
config: GPT2Config = model.config
|
|
400
|
+
num_layer = config.n_layer
|
|
401
|
+
dummy_inputs = Gpt2Helper.get_dummy_inputs(
|
|
402
|
+
batch_size=1,
|
|
403
|
+
past_sequence_length=1,
|
|
404
|
+
sequence_length=1,
|
|
405
|
+
num_attention_heads=config.num_attention_heads,
|
|
406
|
+
hidden_size=config.hidden_size,
|
|
407
|
+
num_layer=num_layer,
|
|
408
|
+
vocab_size=config.vocab_size,
|
|
409
|
+
device=device,
|
|
410
|
+
float16=False,
|
|
411
|
+
has_position_ids=has_position_ids,
|
|
412
|
+
has_attention_mask=has_attention_mask,
|
|
413
|
+
input_ids_dtype=input_ids_dtype,
|
|
414
|
+
position_ids_dtype=position_ids_dtype,
|
|
415
|
+
attention_mask_dtype=attention_mask_dtype,
|
|
416
|
+
)
|
|
417
|
+
input_list = dummy_inputs.to_list()
|
|
418
|
+
|
|
419
|
+
with torch.no_grad():
|
|
420
|
+
outputs = model(*input_list)
|
|
421
|
+
|
|
422
|
+
past_names = [f"past_{i}" for i in range(num_layer)]
|
|
423
|
+
present_names = [f"present_{i}" for i in range(num_layer)]
|
|
424
|
+
|
|
425
|
+
# GPT2Model outputs last_state; GPT2LMHeadModel outputs logits (prediction_scores)
|
|
426
|
+
assert outputs[0].shape[2] == config.vocab_size or outputs[0].shape[2] == config.hidden_size
|
|
427
|
+
output_names = ["logits" if outputs[0].shape[2] == config.vocab_size else "last_state", *present_names]
|
|
428
|
+
|
|
429
|
+
# Shape of input tensors:
|
|
430
|
+
# input_ids: (batch_size, seq_len)
|
|
431
|
+
# past_{i}: (2, batch_size, num_heads, past_seq_len, hidden_size/num_heads)
|
|
432
|
+
# attention_mask: (batch_size, past_seq_len + seq_len)
|
|
433
|
+
# Shape of output tensors:
|
|
434
|
+
# last_state: (batch_size, seq_len, hidden_size)
|
|
435
|
+
# or logits: (batch_size, seq_len, vocab_size)
|
|
436
|
+
# present_{i}: (2, batch_size, num_heads, past_seq_len + seq_len, hidden_size/num_heads)
|
|
437
|
+
dynamic_axes = {
|
|
438
|
+
"input_ids": {0: "batch_size", 1: "seq_len"},
|
|
439
|
+
output_names[0]: {0: "batch_size", 1: "seq_len"},
|
|
440
|
+
}
|
|
441
|
+
for name in past_names:
|
|
442
|
+
dynamic_axes[name] = {1: "batch_size", 3: "past_seq_len"}
|
|
443
|
+
for name in present_names:
|
|
444
|
+
dynamic_axes[name] = {1: "batch_size", 3: "total_seq_len"}
|
|
445
|
+
|
|
446
|
+
input_names = ["input_ids"]
|
|
447
|
+
if has_position_ids:
|
|
448
|
+
dynamic_axes["position_ids"] = {0: "batch_size", 1: "seq_len"}
|
|
449
|
+
input_names.append("position_ids")
|
|
450
|
+
if has_attention_mask:
|
|
451
|
+
dynamic_axes["attention_mask"] = {0: "batch_size", 1: "total_seq_len"}
|
|
452
|
+
input_names.append("attention_mask")
|
|
453
|
+
input_names.extend(past_names)
|
|
454
|
+
|
|
455
|
+
assert len(outputs) == 2 and len(outputs[1]) == num_layer
|
|
456
|
+
|
|
457
|
+
logger.info(
|
|
458
|
+
f"Shapes: input_ids={dummy_inputs.input_ids.shape} past={dummy_inputs.past[0].shape} output={outputs[0].shape} present={outputs[1][0].shape}"
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
Path(onnx_model_path).parent.mkdir(parents=True, exist_ok=True)
|
|
462
|
+
|
|
463
|
+
if use_external_data_format:
|
|
464
|
+
# We let PyTorch export onnx to a temp directory first, then convert external data to one file.
|
|
465
|
+
with tempfile.TemporaryDirectory() as tmp_dir_name:
|
|
466
|
+
temp_onnx_model_path = os.path.join(tmp_dir_name, "gpt2.onnx")
|
|
467
|
+
Path(temp_onnx_model_path).parent.mkdir(parents=True, exist_ok=True)
|
|
468
|
+
|
|
469
|
+
torch_onnx_export(
|
|
470
|
+
model,
|
|
471
|
+
args=tuple(input_list),
|
|
472
|
+
f=temp_onnx_model_path,
|
|
473
|
+
export_params=True,
|
|
474
|
+
input_names=input_names,
|
|
475
|
+
output_names=output_names,
|
|
476
|
+
dynamic_axes=dynamic_axes,
|
|
477
|
+
opset_version=11,
|
|
478
|
+
do_constant_folding=True,
|
|
479
|
+
use_external_data_format=True,
|
|
480
|
+
verbose=verbose,
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
model = onnx.load_model(temp_onnx_model_path, load_external_data=True)
|
|
484
|
+
OnnxModel.save(
|
|
485
|
+
model,
|
|
486
|
+
onnx_model_path,
|
|
487
|
+
save_as_external_data=True,
|
|
488
|
+
all_tensors_to_one_file=True,
|
|
489
|
+
)
|
|
490
|
+
else:
|
|
491
|
+
torch_onnx_export(
|
|
492
|
+
model,
|
|
493
|
+
args=tuple(input_list),
|
|
494
|
+
f=onnx_model_path,
|
|
495
|
+
export_params=True,
|
|
496
|
+
input_names=input_names,
|
|
497
|
+
output_names=output_names,
|
|
498
|
+
dynamic_axes=dynamic_axes,
|
|
499
|
+
opset_version=11,
|
|
500
|
+
do_constant_folding=True,
|
|
501
|
+
use_external_data_format=False,
|
|
502
|
+
verbose=verbose,
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
@staticmethod
|
|
506
|
+
def optimize_onnx(
|
|
507
|
+
onnx_model_path,
|
|
508
|
+
optimized_model_path,
|
|
509
|
+
is_float16,
|
|
510
|
+
num_attention_heads,
|
|
511
|
+
hidden_size,
|
|
512
|
+
use_external_data_format=False,
|
|
513
|
+
auto_mixed_precision=False,
|
|
514
|
+
stage=0,
|
|
515
|
+
**kwargs,
|
|
516
|
+
):
|
|
517
|
+
"""Optimize ONNX model with an option to convert it to use mixed precision."""
|
|
518
|
+
optimization_options = FusionOptions("gpt2")
|
|
519
|
+
|
|
520
|
+
m = optimize_model(
|
|
521
|
+
onnx_model_path,
|
|
522
|
+
model_type="gpt2",
|
|
523
|
+
num_heads=num_attention_heads,
|
|
524
|
+
hidden_size=hidden_size,
|
|
525
|
+
opt_level=0,
|
|
526
|
+
optimization_options=optimization_options,
|
|
527
|
+
use_gpu=False,
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
if is_float16:
|
|
531
|
+
if auto_mixed_precision:
|
|
532
|
+
Gpt2Helper.auto_mixed_precision(m)
|
|
533
|
+
else:
|
|
534
|
+
if "keep_io_types" not in kwargs:
|
|
535
|
+
kwargs["keep_io_types"] = False
|
|
536
|
+
m.convert_float_to_float16(use_symbolic_shape_infer=True, **kwargs)
|
|
537
|
+
|
|
538
|
+
m.save_model_to_file(optimized_model_path, use_external_data_format)
|
|
539
|
+
return m
|
|
540
|
+
|
|
541
|
+
@staticmethod
|
|
542
|
+
def auto_mixed_precision(
|
|
543
|
+
onnx_model: OnnxModel,
|
|
544
|
+
op_block_list: List[str] = [ # noqa: B006
|
|
545
|
+
"Add",
|
|
546
|
+
"LayerNormalization",
|
|
547
|
+
"SkipLayerNormalization",
|
|
548
|
+
"FastGelu",
|
|
549
|
+
"EmbedLayerNormalization",
|
|
550
|
+
],
|
|
551
|
+
):
|
|
552
|
+
"""Convert GPT-2 model to mixed precision.
|
|
553
|
+
It detects whether original model has fp16 weights, and set parameters for float16 conversion automatically.
|
|
554
|
+
Args:
|
|
555
|
+
onnx_model (OnnxModel): optimized ONNX model
|
|
556
|
+
op_block_list (List[str], optional): operators to compute in fp32. Defaults to ["Add", "LayerNormalization",
|
|
557
|
+
"SkipLayerNormalization", "FastGelu", "EmbedLayerNormalization"]
|
|
558
|
+
Returns:
|
|
559
|
+
parameters(dict): a dictionary of parameters used in float16 conversion
|
|
560
|
+
"""
|
|
561
|
+
op_full_set = {node.op_type for node in onnx_model.nodes()}
|
|
562
|
+
fp32_op_set = set(op_block_list)
|
|
563
|
+
fp16_op_set = op_full_set.difference(fp32_op_set)
|
|
564
|
+
logger.info(f"fp32 op: {fp32_op_set} fp16 op: {fp16_op_set}")
|
|
565
|
+
|
|
566
|
+
# logits is the first output
|
|
567
|
+
logits_output_name = onnx_model.graph().output[0].name
|
|
568
|
+
|
|
569
|
+
# We use the weight in last MatMul node to detect whether the model is stored with float16 weights from training.
|
|
570
|
+
is_weight_fp16_precision = False
|
|
571
|
+
output_name_to_node = onnx_model.output_name_to_node()
|
|
572
|
+
assert logits_output_name in output_name_to_node
|
|
573
|
+
node = output_name_to_node[logits_output_name]
|
|
574
|
+
last_matmul_node = None
|
|
575
|
+
if node.op_type == "MatMul":
|
|
576
|
+
last_matmul_node = node
|
|
577
|
+
logger.info(f"Found last MatMul node for logits: {node.name}")
|
|
578
|
+
initializer = None
|
|
579
|
+
for input in node.input:
|
|
580
|
+
initializer = onnx_model.get_initializer(input)
|
|
581
|
+
if initializer is not None:
|
|
582
|
+
break
|
|
583
|
+
|
|
584
|
+
# when the max difference of value after converting float to float16 is lower than a threshold (1e-6),
|
|
585
|
+
# we can deduce that the weights are stored in float16 precision.
|
|
586
|
+
max_diff = float_to_float16_max_diff(initializer)
|
|
587
|
+
logger.debug(f"max diff of converting weights in last MatMul node {node.name}: {max_diff}")
|
|
588
|
+
is_weight_fp16_precision = max_diff < 1e-6
|
|
589
|
+
else:
|
|
590
|
+
logger.warning(f"Failed to find MatMul node for logits. Found {node.op_type} of node {node.name}")
|
|
591
|
+
|
|
592
|
+
keep_io_types = []
|
|
593
|
+
node_block_list = []
|
|
594
|
+
if (not is_weight_fp16_precision) and (last_matmul_node is not None):
|
|
595
|
+
# When original weight is float32 precision, keep logits and last MatMul in float32 could get better precision.
|
|
596
|
+
keep_io_types = [logits_output_name]
|
|
597
|
+
node_block_list = [last_matmul_node.name]
|
|
598
|
+
|
|
599
|
+
parameters = {
|
|
600
|
+
"keep_io_types": keep_io_types,
|
|
601
|
+
"op_block_list": op_block_list,
|
|
602
|
+
"node_block_list": node_block_list,
|
|
603
|
+
"force_fp16_initializers": is_weight_fp16_precision,
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
logger.info(f"auto_mixed_precision parameters: {parameters}")
|
|
607
|
+
onnx_model.convert_float_to_float16(use_symbolic_shape_infer=True, **parameters)
|
|
608
|
+
|
|
609
|
+
return parameters
|
|
610
|
+
|
|
611
|
+
@staticmethod
|
|
612
|
+
def pytorch_inference(model, inputs: Gpt2Inputs, total_runs: int = 0):
|
|
613
|
+
"""Run inference of PyTorch model, and returns average latency in ms when total_runs > 0 besides outputs."""
|
|
614
|
+
logger.debug("start pytorch_inference")
|
|
615
|
+
|
|
616
|
+
# Convert it to fp32 as the PyTroch model cannot deal with half input.
|
|
617
|
+
input_list = inputs.to_fp32().to_list()
|
|
618
|
+
|
|
619
|
+
with torch.no_grad():
|
|
620
|
+
outputs = model(*input_list)
|
|
621
|
+
|
|
622
|
+
if total_runs == 0:
|
|
623
|
+
return outputs
|
|
624
|
+
|
|
625
|
+
latency = []
|
|
626
|
+
with torch.no_grad():
|
|
627
|
+
for _ in range(total_runs):
|
|
628
|
+
start = time.time()
|
|
629
|
+
outputs = model(*input_list)
|
|
630
|
+
latency.append(time.time() - start)
|
|
631
|
+
|
|
632
|
+
average_latency = sum(latency) * 1000 / len(latency)
|
|
633
|
+
logger.debug("PyTorch inference time = {} ms".format(format(average_latency, ".2f"))) # noqa: G001
|
|
634
|
+
|
|
635
|
+
return outputs, average_latency
|
|
636
|
+
|
|
637
|
+
@staticmethod
|
|
638
|
+
def onnxruntime_inference(ort_session, inputs: Gpt2Inputs, total_runs: int = 0):
|
|
639
|
+
"""Run inference of ONNX model, and returns average latency in ms when total_runs > 0 besides outputs."""
|
|
640
|
+
logger.debug("start onnxruntime_inference")
|
|
641
|
+
|
|
642
|
+
ort_inputs = {"input_ids": numpy.ascontiguousarray(inputs.input_ids.cpu().numpy())}
|
|
643
|
+
|
|
644
|
+
if inputs.past is not None:
|
|
645
|
+
for i, past_i in enumerate(inputs.past):
|
|
646
|
+
ort_inputs[f"past_{i}"] = numpy.ascontiguousarray(past_i.cpu().numpy())
|
|
647
|
+
|
|
648
|
+
if inputs.attention_mask is not None:
|
|
649
|
+
ort_inputs["attention_mask"] = numpy.ascontiguousarray(inputs.attention_mask.cpu().numpy())
|
|
650
|
+
|
|
651
|
+
if inputs.position_ids is not None:
|
|
652
|
+
ort_inputs["position_ids"] = numpy.ascontiguousarray(inputs.position_ids.cpu().numpy())
|
|
653
|
+
|
|
654
|
+
ort_outputs = ort_session.run(None, ort_inputs)
|
|
655
|
+
if total_runs == 0:
|
|
656
|
+
return ort_outputs
|
|
657
|
+
|
|
658
|
+
latency = []
|
|
659
|
+
for _ in range(total_runs):
|
|
660
|
+
start = time.time()
|
|
661
|
+
ort_outputs = ort_session.run(None, ort_inputs)
|
|
662
|
+
latency.append(time.time() - start)
|
|
663
|
+
|
|
664
|
+
average_latency = sum(latency) * 1000 / len(latency)
|
|
665
|
+
logger.debug("OnnxRuntime Inference time = {} ms".format(format(average_latency, ".2f"))) # noqa: G001
|
|
666
|
+
|
|
667
|
+
return ort_outputs, average_latency
|
|
668
|
+
|
|
669
|
+
@staticmethod
|
|
670
|
+
def prepare_io_binding(
|
|
671
|
+
ort_session,
|
|
672
|
+
input_ids,
|
|
673
|
+
position_ids,
|
|
674
|
+
attention_mask,
|
|
675
|
+
past,
|
|
676
|
+
output_buffers,
|
|
677
|
+
output_shapes,
|
|
678
|
+
):
|
|
679
|
+
"""Returnas IO binding object for a session."""
|
|
680
|
+
return IOBindingHelper.prepare_io_binding(
|
|
681
|
+
ort_session,
|
|
682
|
+
input_ids,
|
|
683
|
+
position_ids,
|
|
684
|
+
attention_mask,
|
|
685
|
+
past,
|
|
686
|
+
output_buffers,
|
|
687
|
+
output_shapes,
|
|
688
|
+
)
|
|
689
|
+
|
|
690
|
+
@staticmethod
|
|
691
|
+
def get_outputs_from_io_binding_buffer(ort_session, output_buffers, output_shapes, return_numpy=True):
|
|
692
|
+
"""Copy results to cpu. Returns a list of numpy array."""
|
|
693
|
+
return IOBindingHelper.get_outputs_from_io_binding_buffer(
|
|
694
|
+
ort_session, output_buffers, output_shapes, return_numpy
|
|
695
|
+
)
|
|
696
|
+
|
|
697
|
+
@staticmethod
|
|
698
|
+
def onnxruntime_inference_with_binded_io(
|
|
699
|
+
ort_session,
|
|
700
|
+
inputs: Gpt2Inputs,
|
|
701
|
+
output_buffers: Dict[str, torch.Tensor],
|
|
702
|
+
output_shapes: Dict[str, List[int]],
|
|
703
|
+
total_runs: int = 0,
|
|
704
|
+
return_numpy: bool = True,
|
|
705
|
+
include_copy_output_latency: bool = False,
|
|
706
|
+
):
|
|
707
|
+
"""Inference with IO binding. Returns outputs, and optional latency when total_runs > 0."""
|
|
708
|
+
logger.debug("start onnxruntime_inference_with_binded_io")
|
|
709
|
+
|
|
710
|
+
# Bind inputs and outputs to onnxruntime session
|
|
711
|
+
io_binding = Gpt2Helper.prepare_io_binding(
|
|
712
|
+
ort_session,
|
|
713
|
+
inputs.input_ids,
|
|
714
|
+
inputs.position_ids,
|
|
715
|
+
inputs.attention_mask,
|
|
716
|
+
inputs.past,
|
|
717
|
+
output_buffers,
|
|
718
|
+
output_shapes,
|
|
719
|
+
)
|
|
720
|
+
|
|
721
|
+
# Run onnxruntime with io binding
|
|
722
|
+
ort_session.run_with_iobinding(io_binding)
|
|
723
|
+
|
|
724
|
+
# Copy results to cpu for verification
|
|
725
|
+
ort_outputs = Gpt2Helper.get_outputs_from_io_binding_buffer(
|
|
726
|
+
ort_session, output_buffers, output_shapes, return_numpy
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
if total_runs == 0:
|
|
730
|
+
return ort_outputs
|
|
731
|
+
|
|
732
|
+
latency = []
|
|
733
|
+
for _ in range(total_runs):
|
|
734
|
+
start = time.time()
|
|
735
|
+
# Run onnxruntime with io binding
|
|
736
|
+
ort_session.run_with_iobinding(io_binding)
|
|
737
|
+
if include_copy_output_latency:
|
|
738
|
+
_ = Gpt2Helper.get_outputs_from_io_binding_buffer(
|
|
739
|
+
ort_session, output_buffers, output_shapes, return_numpy
|
|
740
|
+
)
|
|
741
|
+
latency.append(time.time() - start)
|
|
742
|
+
|
|
743
|
+
average_latency = sum(latency) * 1000 / len(latency)
|
|
744
|
+
logger.debug("OnnxRuntime with IO binding inference time = %.2f ms", average_latency)
|
|
745
|
+
|
|
746
|
+
return ort_outputs, average_latency
|
|
747
|
+
|
|
748
|
+
@staticmethod
|
|
749
|
+
def save_outputs(i, ort_outputs, torch_outputs):
|
|
750
|
+
with open(f"ort_outputs_{i}.pickle", "wb") as f:
|
|
751
|
+
pickle.dump(ort_outputs, f)
|
|
752
|
+
logger.info(f"ORT output are saved to ort_outputs_{i}.pickle")
|
|
753
|
+
|
|
754
|
+
with open(f"torch_outputs_{i}.pickle", "wb") as f:
|
|
755
|
+
pickle.dump(torch_outputs, f)
|
|
756
|
+
logger.info(f"Torch output are saved to torch_outputs_{i}.pickle")
|
|
757
|
+
|
|
758
|
+
@staticmethod
|
|
759
|
+
def save_inputs(i, dummy_inputs, ort_outputs, torch_outputs):
|
|
760
|
+
with open(f"dummy_inputs_{i}.pickle", "wb") as f:
|
|
761
|
+
pickle.dump(dummy_inputs, f)
|
|
762
|
+
logger.info(f"inputs are saved to dummy_inputs_{i}.pickle")
|
|
763
|
+
|
|
764
|
+
@staticmethod
|
|
765
|
+
def test_parity(
|
|
766
|
+
ort_session,
|
|
767
|
+
model,
|
|
768
|
+
device,
|
|
769
|
+
is_float16=False,
|
|
770
|
+
rtol=5e-4,
|
|
771
|
+
atol=5e-4,
|
|
772
|
+
test_cases_per_run=10000,
|
|
773
|
+
total_runs=1,
|
|
774
|
+
use_io_binding=True,
|
|
775
|
+
model_class="GPT2LMHeadModel",
|
|
776
|
+
has_position_ids=True,
|
|
777
|
+
has_attention_mask=True,
|
|
778
|
+
input_ids_dtype=torch.int32,
|
|
779
|
+
position_ids_dtype=torch.int32,
|
|
780
|
+
attention_mask_dtype=torch.int32,
|
|
781
|
+
stage=0,
|
|
782
|
+
verbose=False,
|
|
783
|
+
enable_pickle_output=False,
|
|
784
|
+
):
|
|
785
|
+
"""Generate random inputs and compare the results of PyTorch and Onnx Runtime."""
|
|
786
|
+
|
|
787
|
+
config: GPT2Config = model.config
|
|
788
|
+
|
|
789
|
+
logger.info(
|
|
790
|
+
f"Running parity test (atol={atol}, test_cases={test_cases_per_run}, runs={total_runs}, use_io_binding={use_io_binding}, model_class={model_class}, is_float16={is_float16}) ..."
|
|
791
|
+
)
|
|
792
|
+
|
|
793
|
+
max_batch_size = 8
|
|
794
|
+
max_past_seq_len = 4 # Do not use large number here for higher chance of hitting empty past (past_seq_len=0)
|
|
795
|
+
max_seq_len = 2
|
|
796
|
+
|
|
797
|
+
output_buffers = None
|
|
798
|
+
if use_io_binding:
|
|
799
|
+
max_output_shapes = Gpt2Helper.get_output_shapes(
|
|
800
|
+
max_batch_size, max_past_seq_len, max_seq_len, config, model_class
|
|
801
|
+
)
|
|
802
|
+
output_buffers = Gpt2Helper.get_output_buffers(max_output_shapes, device, is_float16)
|
|
803
|
+
|
|
804
|
+
passed_test_cases = 0
|
|
805
|
+
top1_matched_cases = 0
|
|
806
|
+
|
|
807
|
+
max_abs_diff_list = []
|
|
808
|
+
top1_matched_cases_per_run = [0] * total_runs
|
|
809
|
+
total_test_cases = test_cases_per_run * total_runs
|
|
810
|
+
for i in range(total_test_cases):
|
|
811
|
+
run_id = int(i / test_cases_per_run)
|
|
812
|
+
sequence_length = random.randint(1, max_seq_len)
|
|
813
|
+
past_sequence_length = 0 if (stage == 1) else random.randint(0, max_past_seq_len)
|
|
814
|
+
batch_size = random.randint(1, max_batch_size)
|
|
815
|
+
|
|
816
|
+
logger.debug(
|
|
817
|
+
f"Running parity test for batch_size={batch_size} past_sequence_length={past_sequence_length}..."
|
|
818
|
+
)
|
|
819
|
+
dummy_inputs = Gpt2Helper.get_dummy_inputs(
|
|
820
|
+
batch_size,
|
|
821
|
+
past_sequence_length,
|
|
822
|
+
sequence_length,
|
|
823
|
+
config.num_attention_heads,
|
|
824
|
+
config.hidden_size,
|
|
825
|
+
config.n_layer,
|
|
826
|
+
config.vocab_size,
|
|
827
|
+
device,
|
|
828
|
+
is_float16,
|
|
829
|
+
has_position_ids,
|
|
830
|
+
has_attention_mask,
|
|
831
|
+
input_ids_dtype=input_ids_dtype,
|
|
832
|
+
position_ids_dtype=position_ids_dtype,
|
|
833
|
+
attention_mask_dtype=attention_mask_dtype,
|
|
834
|
+
left_side_padding=True,
|
|
835
|
+
)
|
|
836
|
+
outputs = Gpt2Helper.pytorch_inference(model, dummy_inputs)
|
|
837
|
+
if use_io_binding:
|
|
838
|
+
ort_outputs = Gpt2Helper.onnxruntime_inference(ort_session, dummy_inputs)
|
|
839
|
+
else:
|
|
840
|
+
output_shapes = Gpt2Helper.get_output_shapes(
|
|
841
|
+
batch_size,
|
|
842
|
+
past_sequence_length,
|
|
843
|
+
sequence_length,
|
|
844
|
+
config,
|
|
845
|
+
model_class,
|
|
846
|
+
)
|
|
847
|
+
ort_outputs = Gpt2Helper.onnxruntime_inference_with_binded_io(
|
|
848
|
+
ort_session, dummy_inputs, output_buffers, output_shapes
|
|
849
|
+
)
|
|
850
|
+
|
|
851
|
+
(
|
|
852
|
+
is_all_close,
|
|
853
|
+
max_abs_diff,
|
|
854
|
+
max_diff_output_index,
|
|
855
|
+
messages,
|
|
856
|
+
is_top1_matched,
|
|
857
|
+
) = Gpt2Helper.compare_outputs_v2(outputs, ort_outputs, atol=atol)
|
|
858
|
+
if not numpy.isnan(max_abs_diff):
|
|
859
|
+
max_abs_diff_list.append(max_abs_diff)
|
|
860
|
+
if is_all_close:
|
|
861
|
+
passed_test_cases += 1
|
|
862
|
+
|
|
863
|
+
if is_top1_matched:
|
|
864
|
+
top1_matched_cases += 1
|
|
865
|
+
top1_matched_cases_per_run[run_id] += 1
|
|
866
|
+
|
|
867
|
+
if verbose and not is_all_close:
|
|
868
|
+
logger.info(
|
|
869
|
+
f"test_case={i} batch_size={batch_size} past_sequence_length={past_sequence_length} sequence_length={sequence_length} MaxDiff={max_abs_diff}"
|
|
870
|
+
)
|
|
871
|
+
for i, message in enumerate(messages): # noqa: PLW2901
|
|
872
|
+
logger.info(f"\t{i}: Name={ort_session.get_outputs()[i].name}, {message}")
|
|
873
|
+
|
|
874
|
+
# Collect data for debugging
|
|
875
|
+
if enable_pickle_output and (numpy.isnan(max_abs_diff) or max_abs_diff > 100 * atol):
|
|
876
|
+
Gpt2Helper.save_inputs(i, dummy_inputs)
|
|
877
|
+
Gpt2Helper.save_outputs(i, ort_outputs, outputs)
|
|
878
|
+
|
|
879
|
+
if max_abs_diff_list:
|
|
880
|
+
result = {
|
|
881
|
+
f"max_diff_percentile_{p}": f"{numpy.percentile(max_abs_diff_list, p):.5f}" for p in [50, 90, 95, 99]
|
|
882
|
+
}
|
|
883
|
+
else:
|
|
884
|
+
result = {f"max_diff_percentile_{p}": "nan" for p in [50, 90, 95, 99]}
|
|
885
|
+
|
|
886
|
+
result["top1_match_rate"] = top1_matched_cases * 1.0 / total_test_cases
|
|
887
|
+
result["top1_match_rate_per_run"] = [x * 1.0 / test_cases_per_run for x in top1_matched_cases_per_run]
|
|
888
|
+
result["diff_pass_rate"] = passed_test_cases * 1.0 / total_test_cases
|
|
889
|
+
result["nan_rate"] = (total_test_cases - len(max_abs_diff_list)) * 1.0 / total_test_cases
|
|
890
|
+
|
|
891
|
+
logger.info(
|
|
892
|
+
f"Parity Test Cases={total_test_cases}; Passed={passed_test_cases}; Nan={total_test_cases-len(max_abs_diff_list)}; Top1_Matched={top1_matched_cases}"
|
|
893
|
+
)
|
|
894
|
+
|
|
895
|
+
if passed_test_cases > 0.95 * total_test_cases:
|
|
896
|
+
logger.info(f"Parity is good: passed rate={int(passed_test_cases*100/total_test_cases):.0f}%")
|
|
897
|
+
|
|
898
|
+
return result
|
|
899
|
+
|
|
900
|
+
@staticmethod
|
|
901
|
+
def test_performance(
|
|
902
|
+
ort_session,
|
|
903
|
+
model,
|
|
904
|
+
device,
|
|
905
|
+
is_float16=False,
|
|
906
|
+
total_runs=100,
|
|
907
|
+
use_io_binding=True,
|
|
908
|
+
model_class="GPT2LMHeadModel",
|
|
909
|
+
has_position_ids=True,
|
|
910
|
+
has_attention_mask=True,
|
|
911
|
+
input_ids_dtype=torch.int32,
|
|
912
|
+
position_ids_dtype=torch.int32,
|
|
913
|
+
attention_mask_dtype=torch.int32,
|
|
914
|
+
batch_size=8,
|
|
915
|
+
sequence_length=1,
|
|
916
|
+
past_sequence_length=32,
|
|
917
|
+
):
|
|
918
|
+
"""Generate random inputs and measure average latency of Onnx Runtime."""
|
|
919
|
+
|
|
920
|
+
config: GPT2Config = model.config
|
|
921
|
+
|
|
922
|
+
output_buffers = None
|
|
923
|
+
if use_io_binding:
|
|
924
|
+
output_shapes = Gpt2Helper.get_output_shapes(
|
|
925
|
+
batch_size, past_sequence_length, sequence_length, config, model_class
|
|
926
|
+
)
|
|
927
|
+
output_buffers = Gpt2Helper.get_output_buffers(output_shapes, device, is_float16)
|
|
928
|
+
|
|
929
|
+
dummy_inputs = Gpt2Helper.get_dummy_inputs(
|
|
930
|
+
batch_size,
|
|
931
|
+
past_sequence_length,
|
|
932
|
+
sequence_length,
|
|
933
|
+
config.num_attention_heads,
|
|
934
|
+
config.hidden_size,
|
|
935
|
+
config.n_layer,
|
|
936
|
+
config.vocab_size,
|
|
937
|
+
device,
|
|
938
|
+
is_float16,
|
|
939
|
+
has_position_ids,
|
|
940
|
+
has_attention_mask,
|
|
941
|
+
input_ids_dtype=input_ids_dtype,
|
|
942
|
+
position_ids_dtype=position_ids_dtype,
|
|
943
|
+
attention_mask_dtype=attention_mask_dtype,
|
|
944
|
+
)
|
|
945
|
+
|
|
946
|
+
if use_io_binding:
|
|
947
|
+
_, latency = Gpt2Helper.onnxruntime_inference(ort_session, dummy_inputs, total_runs)
|
|
948
|
+
else:
|
|
949
|
+
_, latency = Gpt2Helper.onnxruntime_inference_with_binded_io(
|
|
950
|
+
ort_session, dummy_inputs, output_buffers, output_shapes, total_runs
|
|
951
|
+
)
|
|
952
|
+
|
|
953
|
+
return latency
|
|
954
|
+
|
|
955
|
+
@staticmethod
|
|
956
|
+
def torchscript(model, config, device, has_position_ids=True, has_attention_mask=True):
|
|
957
|
+
"""JIT trace for TorchScript."""
|
|
958
|
+
input_list = Gpt2Helper.get_dummy_inputs(
|
|
959
|
+
batch_size=1,
|
|
960
|
+
past_sequence_length=1,
|
|
961
|
+
sequence_length=1,
|
|
962
|
+
num_attention_heads=config.num_attention_heads,
|
|
963
|
+
hidden_size=config.hidden_size,
|
|
964
|
+
num_layer=config.n_layer,
|
|
965
|
+
vocab_size=config.vocab_size,
|
|
966
|
+
device=device,
|
|
967
|
+
float16=False,
|
|
968
|
+
has_position_ids=has_position_ids,
|
|
969
|
+
has_attention_mask=has_attention_mask,
|
|
970
|
+
).to_list()
|
|
971
|
+
return torch.jit.trace(model, input_list)
|
|
972
|
+
|
|
973
|
+
@staticmethod
|
|
974
|
+
def get_onnx_paths(
|
|
975
|
+
output_dir,
|
|
976
|
+
model_name_or_path,
|
|
977
|
+
model_class: str = "GPT2LMHeadModel",
|
|
978
|
+
has_past=True,
|
|
979
|
+
new_folder=False,
|
|
980
|
+
remove_existing=["raw", "fp32", "fp16", "int8"], # noqa: B006
|
|
981
|
+
):
|
|
982
|
+
"""Build a path name for given model based on given attributes."""
|
|
983
|
+
model_name = model_name_or_path
|
|
984
|
+
if os.path.isdir(model_name_or_path):
|
|
985
|
+
model_name = Path(model_name_or_path).parts[-1]
|
|
986
|
+
else:
|
|
987
|
+
model_name.split("/")[-1]
|
|
988
|
+
|
|
989
|
+
if model_class != "GPT2LMHeadModel":
|
|
990
|
+
model_name += "_" + model_class
|
|
991
|
+
|
|
992
|
+
if has_past:
|
|
993
|
+
model_name += "_past"
|
|
994
|
+
|
|
995
|
+
if new_folder:
|
|
996
|
+
suffix = {"raw": "", "fp32": "_fp32", "fp16": "_fp16", "int8": "_int8"}
|
|
997
|
+
# Remove the directories if existed.
|
|
998
|
+
for model_type in ["raw", "fp32", "fp16", "int8"]:
|
|
999
|
+
new_dir = os.path.join(output_dir, model_name + suffix[model_type])
|
|
1000
|
+
if os.path.exists(new_dir):
|
|
1001
|
+
if model_type in remove_existing:
|
|
1002
|
+
try:
|
|
1003
|
+
shutil.rmtree(new_dir)
|
|
1004
|
+
logger.info(f"Removed the existed directory: {new_dir}")
|
|
1005
|
+
except OSError as e:
|
|
1006
|
+
logger.info(f"Failed to remove the directory {new_dir}: {e.strerror}")
|
|
1007
|
+
else:
|
|
1008
|
+
logger.info(f"Directory for {model_type} existed: {new_dir}")
|
|
1009
|
+
|
|
1010
|
+
# store each model to its own directory (for external data format).
|
|
1011
|
+
return {
|
|
1012
|
+
"raw": os.path.join(os.path.join(output_dir, model_name), model_name + ".onnx"),
|
|
1013
|
+
"fp32": os.path.join(
|
|
1014
|
+
os.path.join(output_dir, model_name + "_fp32"),
|
|
1015
|
+
model_name + "_fp32.onnx",
|
|
1016
|
+
),
|
|
1017
|
+
"fp16": os.path.join(
|
|
1018
|
+
os.path.join(output_dir, model_name + "_fp16"),
|
|
1019
|
+
model_name + "_fp16.onnx",
|
|
1020
|
+
),
|
|
1021
|
+
"int8": os.path.join(
|
|
1022
|
+
os.path.join(output_dir, model_name + "_int8"),
|
|
1023
|
+
model_name + "_int8.onnx",
|
|
1024
|
+
),
|
|
1025
|
+
}
|
|
1026
|
+
|
|
1027
|
+
return {
|
|
1028
|
+
"raw": os.path.join(output_dir, model_name + ".onnx"),
|
|
1029
|
+
"fp32": os.path.join(output_dir, model_name + "_fp32.onnx"),
|
|
1030
|
+
"fp16": os.path.join(output_dir, model_name + "_fp16.onnx"),
|
|
1031
|
+
"int8": os.path.join(output_dir, model_name + "_int8.onnx"),
|
|
1032
|
+
}
|