mindstudio-probe 1.0.1__py3-none-any.whl → 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.4.dist-info}/LICENSE +201 -201
- {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.4.dist-info}/METADATA +36 -30
- mindstudio_probe-1.0.4.dist-info/RECORD +276 -0
- {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.4.dist-info}/WHEEL +1 -1
- {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.4.dist-info}/entry_points.txt +1 -0
- msprobe/README.md +101 -182
- msprobe/__init__.py +1 -0
- msprobe/{config/config.json → config.json} +49 -27
- msprobe/core/__init__.py +0 -0
- msprobe/{pytorch → core}/advisor/advisor.py +124 -124
- msprobe/{pytorch → core}/advisor/advisor_const.py +59 -59
- msprobe/{pytorch → core}/advisor/advisor_result.py +58 -58
- msprobe/core/common/const.py +341 -241
- msprobe/core/common/exceptions.py +100 -88
- msprobe/core/common/{file_check.py → file_utils.py} +478 -265
- msprobe/core/common/log.py +76 -55
- msprobe/core/common/utils.py +385 -516
- msprobe/core/common_config.py +85 -58
- msprobe/core/compare/acc_compare.py +300 -0
- msprobe/core/compare/check.py +95 -0
- msprobe/core/compare/compare_cli.py +49 -0
- msprobe/core/compare/highlight.py +223 -0
- msprobe/core/compare/multiprocessing_compute.py +149 -0
- msprobe/{pytorch → core}/compare/npy_compare.py +295 -244
- msprobe/core/compare/utils.py +430 -0
- msprobe/core/data_dump/data_collector.py +154 -140
- msprobe/core/data_dump/data_processor/base.py +314 -245
- msprobe/core/data_dump/data_processor/factory.py +59 -61
- msprobe/core/data_dump/data_processor/mindspore_processor.py +186 -0
- msprobe/core/data_dump/data_processor/pytorch_processor.py +366 -346
- msprobe/core/data_dump/json_writer.py +96 -116
- msprobe/core/data_dump/scope.py +178 -178
- msprobe/core/grad_probe/__init__.py +0 -0
- msprobe/core/grad_probe/constant.py +71 -0
- msprobe/core/grad_probe/grad_compare.py +171 -0
- msprobe/core/grad_probe/utils.py +64 -0
- msprobe/docs/01.installation.md +89 -0
- msprobe/docs/02.config_introduction.md +165 -0
- msprobe/docs/03.config_examples.md +247 -0
- msprobe/docs/04.acl_config_examples.md +76 -0
- msprobe/docs/05.data_dump_PyTorch.md +198 -0
- msprobe/docs/06.data_dump_MindSpore.md +243 -0
- msprobe/docs/07.accuracy_checker_PyTorch.md +274 -0
- msprobe/docs/08.accuracy_checker_online_PyTorch.md +198 -0
- msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
- msprobe/docs/10.accuracy_compare_PyTorch.md +245 -0
- msprobe/docs/11.accuracy_compare_MindSpore.md +202 -0
- msprobe/docs/12.overflow_check_PyTorch.md +79 -0
- msprobe/docs/13.overflow_check_MindSpore.md +31 -0
- msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
- msprobe/docs/15.free_benchmarking_PyTorch.md +164 -0
- msprobe/docs/17.grad_probe.md +207 -0
- msprobe/docs/FAQ_PyTorch.md +177 -0
- msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
- msprobe/docs/img/free_benchmark_framework.png +0 -0
- msprobe/docs/img/grad_probe_image-1.png +0 -0
- msprobe/docs/img/grad_probe_image-2.png +0 -0
- msprobe/docs/img/grad_probe_image-3.png +0 -0
- msprobe/docs/img/grad_probe_image-4.png +0 -0
- msprobe/docs/img/grad_probe_image.png +0 -0
- msprobe/mindspore/__init__.py +1 -1
- msprobe/mindspore/api_accuracy_checker/__init__.py +0 -0
- msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +255 -0
- msprobe/mindspore/api_accuracy_checker/api_info.py +69 -0
- msprobe/mindspore/api_accuracy_checker/api_runner.py +156 -0
- msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +197 -0
- msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
- msprobe/mindspore/api_accuracy_checker/compute_element.py +239 -0
- msprobe/mindspore/api_accuracy_checker/main.py +9 -0
- msprobe/mindspore/api_accuracy_checker/type_mapping.py +114 -0
- msprobe/mindspore/api_accuracy_checker/utils.py +80 -0
- msprobe/mindspore/cell_processor.py +34 -0
- msprobe/mindspore/common/const.py +106 -0
- msprobe/mindspore/common/log.py +38 -0
- msprobe/mindspore/common/utils.py +81 -0
- msprobe/mindspore/compare/distributed_compare.py +75 -0
- msprobe/mindspore/compare/ms_compare.py +219 -0
- msprobe/mindspore/compare/ms_graph_compare.py +348 -0
- msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -0
- msprobe/mindspore/debugger/debugger_config.py +66 -51
- msprobe/mindspore/debugger/precision_debugger.py +126 -32
- msprobe/mindspore/dump/dump_tool_factory.py +35 -38
- msprobe/mindspore/dump/hook_cell/api_registry.py +118 -0
- msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -0
- msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +922 -0
- msprobe/mindspore/dump/hook_cell/wrap_api.py +113 -0
- msprobe/mindspore/dump/jit_dump.py +72 -0
- msprobe/mindspore/dump/kernel_graph_dump.py +59 -60
- msprobe/mindspore/dump/kernel_kbyk_dump.py +64 -0
- msprobe/mindspore/free_benchmark/__init__.py +0 -0
- msprobe/mindspore/free_benchmark/api_pynative_self_check.py +116 -0
- msprobe/mindspore/free_benchmark/common/__init__.py +0 -0
- msprobe/mindspore/free_benchmark/common/config.py +12 -0
- msprobe/mindspore/free_benchmark/common/handler_params.py +17 -0
- msprobe/mindspore/free_benchmark/common/utils.py +71 -0
- msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -0
- msprobe/mindspore/free_benchmark/decorator/__init__.py +0 -0
- msprobe/mindspore/free_benchmark/decorator/dec_forward.py +43 -0
- msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +107 -0
- msprobe/mindspore/free_benchmark/handler/__init__.py +0 -0
- msprobe/mindspore/free_benchmark/handler/base_handler.py +90 -0
- msprobe/mindspore/free_benchmark/handler/check_handler.py +41 -0
- msprobe/mindspore/free_benchmark/handler/fix_handler.py +36 -0
- msprobe/mindspore/free_benchmark/handler/handler_factory.py +21 -0
- msprobe/mindspore/free_benchmark/perturbation/add_noise.py +67 -0
- msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +21 -0
- msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +63 -0
- msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +51 -0
- msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +35 -0
- msprobe/mindspore/free_benchmark/perturbation/no_change.py +12 -0
- msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +29 -0
- msprobe/mindspore/free_benchmark/self_check_tool_factory.py +33 -0
- msprobe/mindspore/grad_probe/__init__.py +0 -0
- msprobe/mindspore/grad_probe/global_context.py +90 -0
- msprobe/mindspore/grad_probe/grad_analyzer.py +231 -0
- msprobe/mindspore/grad_probe/grad_monitor.py +27 -0
- msprobe/mindspore/grad_probe/grad_stat_csv.py +132 -0
- msprobe/mindspore/grad_probe/hook.py +94 -0
- msprobe/mindspore/grad_probe/utils.py +30 -0
- msprobe/mindspore/ms_config.py +128 -78
- msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +44 -45
- msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +34 -32
- msprobe/mindspore/runtime.py +4 -0
- msprobe/mindspore/service.py +378 -0
- msprobe/mindspore/task_handler_factory.py +24 -21
- msprobe/msprobe.py +105 -67
- msprobe/pytorch/__init__.py +4 -4
- msprobe/pytorch/api_accuracy_checker/common/config.py +53 -50
- msprobe/pytorch/api_accuracy_checker/common/utils.py +214 -224
- msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +213 -216
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +606 -545
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
- msprobe/pytorch/api_accuracy_checker/compare/compare.py +386 -345
- msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +73 -73
- msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +245 -248
- msprobe/pytorch/api_accuracy_checker/config.yaml +10 -4
- msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +335 -328
- msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +200 -203
- msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +133 -127
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +592 -493
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +70 -7
- msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/__init__.py +0 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +197 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +325 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +204 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +219 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +10 -0
- msprobe/pytorch/bench_functions/__init__.py +15 -0
- msprobe/pytorch/bench_functions/apply_adam_w.py +28 -0
- msprobe/pytorch/bench_functions/confusion_transpose.py +19 -0
- msprobe/pytorch/bench_functions/fast_gelu.py +55 -0
- msprobe/pytorch/bench_functions/layer_norm_eval.py +6 -0
- msprobe/pytorch/bench_functions/linear.py +12 -0
- msprobe/pytorch/bench_functions/matmul_backward.py +48 -0
- msprobe/pytorch/bench_functions/npu_fusion_attention.py +509 -0
- msprobe/pytorch/bench_functions/rms_norm.py +15 -0
- msprobe/pytorch/bench_functions/rotary_mul.py +52 -0
- msprobe/pytorch/bench_functions/scaled_mask_softmax.py +26 -0
- msprobe/pytorch/bench_functions/swiglu.py +55 -0
- msprobe/pytorch/common/__init__.py +2 -2
- msprobe/pytorch/common/compare_script.template +14 -14
- msprobe/pytorch/common/log.py +20 -31
- msprobe/pytorch/common/parse_json.py +39 -37
- msprobe/pytorch/common/utils.py +305 -224
- msprobe/pytorch/compare/distributed_compare.py +66 -111
- msprobe/pytorch/compare/mapping.yaml +607 -607
- msprobe/pytorch/compare/match.py +34 -36
- msprobe/pytorch/compare/pt_compare.py +50 -0
- msprobe/pytorch/debugger/debugger_config.py +95 -86
- msprobe/pytorch/debugger/precision_debugger.py +125 -95
- msprobe/pytorch/free_benchmark/__init__.py +8 -8
- msprobe/pytorch/free_benchmark/common/constant.py +70 -67
- msprobe/pytorch/free_benchmark/common/counter.py +71 -71
- msprobe/pytorch/free_benchmark/common/enums.py +37 -37
- msprobe/pytorch/free_benchmark/common/params.py +129 -129
- msprobe/pytorch/free_benchmark/common/utils.py +102 -98
- msprobe/pytorch/free_benchmark/compare/grad_saver.py +179 -183
- msprobe/pytorch/free_benchmark/compare/single_benchmark.py +104 -104
- msprobe/pytorch/free_benchmark/main.py +105 -102
- msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +13 -13
- msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +41 -41
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +90 -90
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +104 -104
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +63 -63
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +68 -68
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +28 -28
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +45 -45
- msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +19 -19
- msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +217 -203
- msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +39 -39
- msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +23 -23
- msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +30 -31
- msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +170 -170
- msprobe/pytorch/function_factory.py +76 -0
- msprobe/pytorch/functional/dump_module.py +39 -39
- msprobe/pytorch/grad_probe/__init__.py +0 -0
- msprobe/pytorch/grad_probe/grad_monitor.py +91 -0
- msprobe/pytorch/grad_probe/grad_stat_csv.py +129 -0
- msprobe/pytorch/hook_module/api_registry.py +161 -161
- msprobe/pytorch/hook_module/hook_module.py +120 -109
- msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1876
- msprobe/pytorch/hook_module/utils.py +30 -29
- msprobe/pytorch/hook_module/wrap_aten.py +110 -100
- msprobe/pytorch/hook_module/wrap_distributed.py +78 -75
- msprobe/pytorch/hook_module/wrap_functional.py +105 -108
- msprobe/pytorch/hook_module/wrap_npu_custom.py +93 -73
- msprobe/pytorch/hook_module/wrap_tensor.py +71 -72
- msprobe/pytorch/hook_module/wrap_torch.py +86 -88
- msprobe/pytorch/hook_module/wrap_vf.py +62 -64
- msprobe/pytorch/module_processer.py +138 -98
- msprobe/pytorch/online_dispatch/__init__.py +20 -20
- msprobe/pytorch/online_dispatch/compare.py +236 -236
- msprobe/pytorch/online_dispatch/dispatch.py +271 -273
- msprobe/pytorch/online_dispatch/dump_compare.py +155 -186
- msprobe/pytorch/online_dispatch/single_compare.py +391 -391
- msprobe/pytorch/online_dispatch/torch_ops_config.yaml +49 -49
- msprobe/pytorch/online_dispatch/utils.py +130 -187
- msprobe/pytorch/parse.py +4 -4
- msprobe/pytorch/parse_tool/cli.py +32 -32
- msprobe/pytorch/parse_tool/lib/compare.py +260 -259
- msprobe/pytorch/parse_tool/lib/config.py +52 -51
- msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
- msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
- msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
- msprobe/pytorch/parse_tool/lib/parse_tool.py +158 -158
- msprobe/pytorch/parse_tool/lib/utils.py +316 -367
- msprobe/pytorch/parse_tool/lib/visualization.py +85 -90
- msprobe/pytorch/pt_config.py +188 -93
- msprobe/pytorch/service.py +246 -167
- mindstudio_probe-1.0.1.dist-info/RECORD +0 -228
- msprobe/config/README.md +0 -397
- msprobe/mindspore/doc/dump.md +0 -65
- msprobe/mindspore/dump/api_kbk_dump.py +0 -55
- msprobe/pytorch/compare/acc_compare.py +0 -1024
- msprobe/pytorch/compare/highlight.py +0 -100
- msprobe/pytorch/doc/FAQ.md +0 -193
- msprobe/pytorch/doc/api_accuracy_checker.md +0 -269
- msprobe/pytorch/doc/atat/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
- msprobe/pytorch/doc/dump.md +0 -207
- msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -176
- msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
- msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
- msprobe/pytorch/doc/run_overflow_check.md +0 -25
- msprobe/pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md +0 -90
- msprobe/test/core_ut/common/test_utils.py +0 -345
- msprobe/test/core_ut/data_dump/test_data_collector.py +0 -47
- msprobe/test/core_ut/data_dump/test_json_writer.py +0 -183
- msprobe/test/core_ut/data_dump/test_scope.py +0 -151
- msprobe/test/core_ut/test_common_config.py +0 -152
- msprobe/test/core_ut/test_file_check.py +0 -218
- msprobe/test/core_ut/test_log.py +0 -109
- msprobe/test/mindspore_ut/test_api_kbk_dump.py +0 -51
- msprobe/test/mindspore_ut/test_debugger_config.py +0 -42
- msprobe/test/mindspore_ut/test_dump_tool_factory.py +0 -51
- msprobe/test/mindspore_ut/test_kernel_graph_dump.py +0 -66
- msprobe/test/mindspore_ut/test_kernel_graph_overflow_check.py +0 -63
- msprobe/test/mindspore_ut/test_ms_config.py +0 -69
- msprobe/test/mindspore_ut/test_overflow_check_tool_factory.py +0 -51
- msprobe/test/mindspore_ut/test_precision_debugger.py +0 -56
- msprobe/test/mindspore_ut/test_task_handler_factory.py +0 -58
- msprobe/test/pytorch_ut/advisor/test_advisor.py +0 -83
- msprobe/test/pytorch_ut/api_accuracy_checker/common/test_common_utils.py +0 -108
- msprobe/test/pytorch_ut/api_accuracy_checker/common/test_config.py +0 -39
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_algorithm.py +0 -112
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_api_precision_compare.py +0 -77
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare.py +0 -125
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare_column.py +0 -10
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare_utils.py +0 -43
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/dump.json +0 -179
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/forward.json +0 -63
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_data_generate.py +0 -99
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_multi_run_ut.py +0 -115
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_run_ut.py +0 -72
- msprobe/test/pytorch_ut/compare/test_acc_compare.py +0 -17
- msprobe/test/pytorch_ut/free_benchmark/perturbed_layers/test_perturbed_layser.py +0 -105
- msprobe/test/pytorch_ut/free_benchmark/result_handlers/test_result_handler.py +0 -121
- msprobe/test/pytorch_ut/free_benchmark/test_main.py +0 -101
- msprobe/test/pytorch_ut/functional/test_dump_module.py +0 -15
- msprobe/test/pytorch_ut/hook_module/test_api_registry.py +0 -130
- msprobe/test/pytorch_ut/hook_module/test_hook_module.py +0 -42
- msprobe/test/pytorch_ut/hook_module/test_wrap_aten.py +0 -65
- msprobe/test/pytorch_ut/hook_module/test_wrap_distributed.py +0 -35
- msprobe/test/pytorch_ut/hook_module/test_wrap_functional.py +0 -20
- msprobe/test/pytorch_ut/hook_module/test_wrap_tensor.py +0 -35
- msprobe/test/pytorch_ut/hook_module/test_wrap_torch.py +0 -43
- msprobe/test/pytorch_ut/hook_module/test_wrap_vf.py +0 -11
- msprobe/test/pytorch_ut/test_pt_config.py +0 -69
- msprobe/test/pytorch_ut/test_service.py +0 -59
- msprobe/test/resources/advisor.txt +0 -3
- msprobe/test/resources/compare_result_20230703104808.csv +0 -9
- msprobe/test/resources/compare_result_without_accuracy.csv +0 -9
- msprobe/test/resources/config.yaml +0 -3
- msprobe/test/resources/npu_test.pkl +0 -8
- msprobe/test/run_test.sh +0 -30
- msprobe/test/run_ut.py +0 -58
- msprobe/test/test_module_processer.py +0 -64
- {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.4.dist-info}/top_level.txt +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
- /msprobe/{config → docs}/img/free_benchmark.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from collections import namedtuple
|
|
3
|
+
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import torch
|
|
6
|
+
import torch.multiprocessing as mp
|
|
7
|
+
|
|
8
|
+
from msprobe.core.common.const import Const, CompareConst
|
|
9
|
+
from msprobe.pytorch.api_accuracy_checker.compare.api_precision_compare import online_api_precision_compare
|
|
10
|
+
from msprobe.pytorch.api_accuracy_checker.compare.compare_utils import DETAIL_TEST_ROWS, thousandth_standard_api, \
|
|
11
|
+
binary_standard_api, absolute_standard_api
|
|
12
|
+
from msprobe.pytorch.api_accuracy_checker.run_ut.run_ut_utils import UtDataInfo, exec_api
|
|
13
|
+
from msprobe.pytorch.common.log import logger
|
|
14
|
+
from msprobe.pytorch.api_accuracy_checker.tensor_transport_layer.attl import move2target_device
|
|
15
|
+
|
|
16
|
+
# NPU vs GPU api list
|
|
17
|
+
CompareApi = set(absolute_standard_api) | set(binary_standard_api) | set(thousandth_standard_api)
|
|
18
|
+
|
|
19
|
+
current_time = time.strftime("%Y%m%d%H%M%S")
|
|
20
|
+
ONLINE_API_PRECISION_COMPARE_RESULT_FILE_NAME = "api_precision_compare_result_" + current_time + "_rank*.csv"
|
|
21
|
+
ONLINE_API_PRECISION_COMPARE_DETAILS_FILE_NAME = "api_precision_compare_details_" + current_time + "_rank*.csv"
|
|
22
|
+
|
|
23
|
+
OnlineApiPrecisionCompareConfig = namedtuple('OnlineApiPrecisionCompareConfig',
|
|
24
|
+
['npu_data', 'gpu_data', 'rank', 'result_csv_path', 'details_csv_path'])
|
|
25
|
+
# namedtuple of [instance of Comparator, func of run_touch_api_online, config of run_ut_config]
|
|
26
|
+
CommonCompareConfig = namedtuple('CommonCompareConfig', ['compare', 'handle_func', 'config'])
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def run_ut_process(xpu_id, consumer_queue, common_config, api_precision_csv_file):
|
|
30
|
+
""" When consumer_queue(shared with ConsumerDispatcher) is not empty, consume api data from consumer_queue.
|
|
31
|
+
:param xpu_id: int
|
|
32
|
+
:param consumer_queue: shared queues of ConsumerDispatcher
|
|
33
|
+
:param common_config: namedtuple of CommonCompareConfig
|
|
34
|
+
:param api_precision_csv_file: list, length is 2, result file name and details file name
|
|
35
|
+
:return:
|
|
36
|
+
"""
|
|
37
|
+
gpu_device = torch.device(f'cuda:{xpu_id}')
|
|
38
|
+
|
|
39
|
+
while True:
|
|
40
|
+
if consumer_queue.empty():
|
|
41
|
+
time.sleep(0.1)
|
|
42
|
+
continue
|
|
43
|
+
|
|
44
|
+
api_data = consumer_queue.get()
|
|
45
|
+
if api_data == "KILL_":
|
|
46
|
+
# current consumer finish
|
|
47
|
+
return
|
|
48
|
+
|
|
49
|
+
_, api_name, _ = api_data.name.split(Const.SEP)
|
|
50
|
+
if api_name in CompareApi:
|
|
51
|
+
# NPU vs GPU
|
|
52
|
+
online_compare(api_data, gpu_device, common_config)
|
|
53
|
+
else:
|
|
54
|
+
# NPUvsCPU vs GPUvsCPU
|
|
55
|
+
online_precision_compare(api_data, gpu_device, common_config, api_precision_csv_file)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def online_precision_compare(api_data, device, common_config, api_precision_csv_file):
|
|
59
|
+
"""online run_ut for precision_compare: NPUvsCPU vs GPUvsCPU
|
|
60
|
+
1. get NPUvsCPU compare result
|
|
61
|
+
2. get GPUvsCPU compare result
|
|
62
|
+
3. call online_api_precision_compare
|
|
63
|
+
:param api_data
|
|
64
|
+
:param device
|
|
65
|
+
:param common_config: namedtuple of CommonCompareConfig
|
|
66
|
+
:param api_precision_csv_file: [result_file_name, details_file_name]
|
|
67
|
+
"""
|
|
68
|
+
compare, func, config = common_config.compare, common_config.handle_func, common_config.config
|
|
69
|
+
api_full_name = api_data.name
|
|
70
|
+
[api_type, api_name, _] = api_full_name.split(Const.SEP)
|
|
71
|
+
npu_args, npu_kwargs, npu_out = api_data.args, api_data.kwargs, api_data.result
|
|
72
|
+
|
|
73
|
+
if npu_kwargs.get("device"):
|
|
74
|
+
del npu_kwargs["device"]
|
|
75
|
+
|
|
76
|
+
try:
|
|
77
|
+
# NPU vs CPU
|
|
78
|
+
cpu_out = exec_api(api_type, api_name, Const.CPU_LOWERCASE, npu_args, npu_kwargs)
|
|
79
|
+
npu_data_info = UtDataInfo(None, None, npu_out, cpu_out, None, [], None, rank=api_data.rank)
|
|
80
|
+
npu_detail = compare.compare_output(api_full_name, npu_data_info, True)
|
|
81
|
+
npu_data = pd.DataFrame(npu_detail, columns=DETAIL_TEST_ROWS[-1])
|
|
82
|
+
|
|
83
|
+
# GPU vs CPU
|
|
84
|
+
api_data_gpu = move2target_device(api_data, device) # args, kwargs -> gpu, result -> npu
|
|
85
|
+
data_info = func(api_full_name, api_data_gpu, config.backward_content)
|
|
86
|
+
gpu_out = data_info.bench_output
|
|
87
|
+
gpu_data_info = UtDataInfo(None, None, gpu_out, cpu_out, None, [], None, rank=api_data.rank)
|
|
88
|
+
gpu_detail = compare.compare_output(api_full_name, gpu_data_info, True)
|
|
89
|
+
gpu_data = pd.DataFrame(gpu_detail, columns=DETAIL_TEST_ROWS[-1])
|
|
90
|
+
|
|
91
|
+
# NPUvsCPU vs GPUvsCPU
|
|
92
|
+
result_file_name, details_file_name = api_precision_csv_file
|
|
93
|
+
precision_compare_config = OnlineApiPrecisionCompareConfig(npu_data, gpu_data, api_data.rank,
|
|
94
|
+
result_file_name, details_file_name)
|
|
95
|
+
online_api_precision_compare(precision_compare_config)
|
|
96
|
+
|
|
97
|
+
except Exception as err:
|
|
98
|
+
if "expected scalar type Long" in str(err):
|
|
99
|
+
logger.warning(
|
|
100
|
+
f"API {api_name} not support int32 tensor in CPU, please add {api_name} to CONVERT_API "
|
|
101
|
+
f"'int32_to_int64' list in accuracy_tools/msprobe/core/common/const.py file.")
|
|
102
|
+
elif api_type in [Const.DISTRIBUTED]:
|
|
103
|
+
logger.info(f"{api_full_name} is not supported for run ut. SKIP.")
|
|
104
|
+
else:
|
|
105
|
+
logger.error(f"Run {api_full_name} UT Error: {str(err)}")
|
|
106
|
+
|
|
107
|
+
compare.write_summary_csv((api_full_name, CompareConst.SKIP, CompareConst.SKIP, [[str(err)]], api_data.rank))
|
|
108
|
+
|
|
109
|
+
finally:
|
|
110
|
+
torch.cuda.empty_cache()
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def online_compare(api_data, device, common_config):
|
|
114
|
+
"""online run_ut for compare:NPU vs GPU
|
|
115
|
+
"""
|
|
116
|
+
compare, func, config = common_config.compare, common_config.handle_func, common_config.config
|
|
117
|
+
api_full_name = api_data.name
|
|
118
|
+
api_data = move2target_device(api_data, device)
|
|
119
|
+
try:
|
|
120
|
+
data_info = func(api_full_name, api_data, config.backward_content)
|
|
121
|
+
is_fwd_success, is_bwd_success = compare.compare_output(api_full_name, data_info)
|
|
122
|
+
logger.info(f"running api_full_name {api_full_name} ut, "
|
|
123
|
+
f"is_fwd_success: {is_fwd_success}, "
|
|
124
|
+
f"is_bwd_success: {is_bwd_success}")
|
|
125
|
+
except Exception as err:
|
|
126
|
+
[api_type, api_name, _] = api_full_name.split(Const.SEP)
|
|
127
|
+
if "expected scalar type Long" in str(err):
|
|
128
|
+
logger.warning(
|
|
129
|
+
f"API {api_name} not support int32 tensor in CPU, please add {api_name} to CONVERT_API "
|
|
130
|
+
f"'int32_to_int64' list in accuracy_tools/msprobe/core/common/const.py file.")
|
|
131
|
+
elif api_type in [Const.DISTRIBUTED]:
|
|
132
|
+
logger.info(f"{api_full_name} is not supported for run ut. SKIP.")
|
|
133
|
+
else:
|
|
134
|
+
logger.error(f"Run {api_full_name} UT Error: {str(err)}")
|
|
135
|
+
|
|
136
|
+
compare.write_summary_csv((api_full_name, CompareConst.SKIP, CompareConst.SKIP, [[str(err)]], api_data.rank))
|
|
137
|
+
|
|
138
|
+
finally:
|
|
139
|
+
torch.cuda.empty_cache()
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class ConsumerDispatcher:
|
|
143
|
+
def __init__(self, compare, capacity=10, num_workers=8, device: str = "gpu") -> None:
|
|
144
|
+
self.num_workers = num_workers
|
|
145
|
+
self.capacity = capacity
|
|
146
|
+
self.compare = compare
|
|
147
|
+
self.queues = []
|
|
148
|
+
self.processes = []
|
|
149
|
+
self.reverse_sort = False
|
|
150
|
+
self.pool = None
|
|
151
|
+
self.device = device
|
|
152
|
+
self.data_id = 0
|
|
153
|
+
self.lock = mp.Lock()
|
|
154
|
+
self.result_queue = mp.Queue()
|
|
155
|
+
mp.set_start_method("spawn", force=True)
|
|
156
|
+
|
|
157
|
+
def start(self, handle_func, config):
|
|
158
|
+
self.queues = [mp.Queue(maxsize=self.capacity) for _ in range(self.num_workers)]
|
|
159
|
+
api_precision_csv_file = [ONLINE_API_PRECISION_COMPARE_RESULT_FILE_NAME, ONLINE_API_PRECISION_COMPARE_DETAILS_FILE_NAME]
|
|
160
|
+
common_config = CommonCompareConfig(self.compare, handle_func, config)
|
|
161
|
+
for xpu_id, q in enumerate(self.queues):
|
|
162
|
+
p = mp.Process(name="run_ut_process", target=run_ut_process,
|
|
163
|
+
args=(xpu_id, q, common_config, api_precision_csv_file))
|
|
164
|
+
|
|
165
|
+
p.start()
|
|
166
|
+
self.processes.append(p)
|
|
167
|
+
logger.info(f"Api_precision_compare task result will be saved in {ONLINE_API_PRECISION_COMPARE_RESULT_FILE_NAME}")
|
|
168
|
+
logger.info(f"Api_precision_compare task details will be saved in {ONLINE_API_PRECISION_COMPARE_DETAILS_FILE_NAME}")
|
|
169
|
+
logger.info("Successfully start unittest process.")
|
|
170
|
+
|
|
171
|
+
def stop(self):
|
|
172
|
+
for q in self.queues:
|
|
173
|
+
while q.full():
|
|
174
|
+
time.sleep(0.1)
|
|
175
|
+
q.put("KILL_")
|
|
176
|
+
|
|
177
|
+
for p in self.processes:
|
|
178
|
+
p.join()
|
|
179
|
+
logger.info("Successfully stop unittest process.")
|
|
180
|
+
logger.info(f"Api_precision_compare task result is saved in {ONLINE_API_PRECISION_COMPARE_RESULT_FILE_NAME}")
|
|
181
|
+
logger.info(f"Api_precision_compare task details is saved in {ONLINE_API_PRECISION_COMPARE_DETAILS_FILE_NAME}")
|
|
182
|
+
|
|
183
|
+
def update_consume_queue(self, api_data):
|
|
184
|
+
while True:
|
|
185
|
+
index = self._choose_max_empty_site_strategy()
|
|
186
|
+
if index != -1:
|
|
187
|
+
q = self.queues[index]
|
|
188
|
+
q.put(api_data)
|
|
189
|
+
break
|
|
190
|
+
time.sleep(0.1)
|
|
191
|
+
|
|
192
|
+
def _choose_max_empty_site_strategy(self):
|
|
193
|
+
maximum = 0
|
|
194
|
+
index = -1
|
|
195
|
+
# 充分利用多卡资源,防止任务过多分配给前面的卡
|
|
196
|
+
_reverse = 1 if not self.reverse_sort else -1
|
|
197
|
+
for i, q in enumerate(self.queues[::_reverse]):
|
|
198
|
+
empty_site = self.capacity - q.qsize()
|
|
199
|
+
if empty_site > maximum:
|
|
200
|
+
maximum = empty_site
|
|
201
|
+
index = i
|
|
202
|
+
index = len(self.queues) - index - 1 if index != -1 and self.reverse_sort else index
|
|
203
|
+
self.reverse_sort = not self.reverse_sort
|
|
204
|
+
return index
|
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
import os.path
|
|
2
|
+
import struct
|
|
3
|
+
import hashlib
|
|
4
|
+
import time
|
|
5
|
+
import io
|
|
6
|
+
from threading import Thread
|
|
7
|
+
|
|
8
|
+
from twisted.internet import reactor, protocol, endpoints
|
|
9
|
+
|
|
10
|
+
from msprobe.pytorch.common.utils import logger
|
|
11
|
+
from msprobe.pytorch.api_accuracy_checker.tensor_transport_layer.ssl_config import cipher_list
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class TCPServer:
|
|
15
|
+
def __init__(self, port, shared_queue, check_sum=False, tls_path=None) -> None:
|
|
16
|
+
self.port = port
|
|
17
|
+
self.shared_queue = shared_queue
|
|
18
|
+
self.check_sum = check_sum
|
|
19
|
+
self.tls_path = tls_path
|
|
20
|
+
self.factory = MessageServerFactory()
|
|
21
|
+
self.reactor_thread = None
|
|
22
|
+
|
|
23
|
+
@staticmethod
|
|
24
|
+
def run_reactor():
|
|
25
|
+
reactor.run(installSignalHandlers=False)
|
|
26
|
+
|
|
27
|
+
def start(self):
|
|
28
|
+
self.factory.protocol = self.build_protocol
|
|
29
|
+
|
|
30
|
+
if self.tls_path:
|
|
31
|
+
from OpenSSL import SSL
|
|
32
|
+
from twisted.internet import ssl
|
|
33
|
+
server_key = os.path.join(self.tls_path, "server.key")
|
|
34
|
+
server_crt = os.path.join(self.tls_path, "server.crt")
|
|
35
|
+
server_context_factory = ssl.DefaultOpenSSLContextFactory(server_key, server_crt, SSL.TLSv1_2_METHOD)
|
|
36
|
+
server_context_ = server_context_factory.getContext()
|
|
37
|
+
server_context_.set_cipher_list(cipher_list)
|
|
38
|
+
server_context_.set_options(SSL.OP_NO_RENEGOTIATION)
|
|
39
|
+
endpoint = endpoints.SSL4ServerEndpoint(reactor, self.port, server_context_factory)
|
|
40
|
+
else:
|
|
41
|
+
endpoint = endpoints.TCP4ServerEndpoint(reactor, self.port)
|
|
42
|
+
endpoint.listen(self.factory)
|
|
43
|
+
self.reactor_thread = Thread(target=self.run_reactor, daemon=True)
|
|
44
|
+
self.reactor_thread.start()
|
|
45
|
+
|
|
46
|
+
def is_running(self):
|
|
47
|
+
return not self.factory.is_all_connection_closed()
|
|
48
|
+
|
|
49
|
+
def stop(self):
|
|
50
|
+
self.factory.doStop()
|
|
51
|
+
reactor.callFromThread(reactor.sigInt, 2)
|
|
52
|
+
self.reactor_thread.join()
|
|
53
|
+
|
|
54
|
+
def build_protocol(self):
|
|
55
|
+
return ServerProtocol(self.shared_queue, self.check_sum)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class ServerProtocol(protocol.Protocol):
|
|
59
|
+
ACK_SUCCESS = b"OK___"
|
|
60
|
+
ACK_ERROR = b"ERROR"
|
|
61
|
+
ACK_BUSY = b"BUSY_"
|
|
62
|
+
ACK_STOP = b"STOP_"
|
|
63
|
+
ACK_STOP_CONFIRM = b"OVER_"
|
|
64
|
+
ACK_KILL_PROCESS = b"KILL_"
|
|
65
|
+
|
|
66
|
+
def __init__(self, shared_queue, check_sum=False):
|
|
67
|
+
self.start_time = None
|
|
68
|
+
self.buffer = io.BytesIO()
|
|
69
|
+
self.consumer_queue = shared_queue
|
|
70
|
+
self.check_sum = check_sum
|
|
71
|
+
self.length_width = 8
|
|
72
|
+
self.md5_width = 32
|
|
73
|
+
self.obj_length = None
|
|
74
|
+
self.tell = 0
|
|
75
|
+
self.obj_md5 = None
|
|
76
|
+
self.obj_body = None
|
|
77
|
+
self.sequence_number = -1
|
|
78
|
+
self.rank = -1
|
|
79
|
+
self.step = -1
|
|
80
|
+
self.sequence_number_dict = dict()
|
|
81
|
+
|
|
82
|
+
def connectionMade(self):
|
|
83
|
+
self.buffer = io.BytesIO()
|
|
84
|
+
self.obj_length = None
|
|
85
|
+
self.tell = 0
|
|
86
|
+
self.obj_md5 = None
|
|
87
|
+
self.obj_body = None
|
|
88
|
+
self.factory.transport_dict[self.transport] = 1
|
|
89
|
+
self.factory.transport_list.append(self.transport)
|
|
90
|
+
logger.info(f"Connected to {self.transport.getPeer()} successfully.")
|
|
91
|
+
|
|
92
|
+
def connectionLost(self, reason):
|
|
93
|
+
self.factory.transport_dict.pop(self.transport, None)
|
|
94
|
+
if len(self.factory.transport_dict) == 0:
|
|
95
|
+
self.consumer_queue.put(self.ACK_KILL_PROCESS)
|
|
96
|
+
|
|
97
|
+
logger.info(f"Lost connection with {self.transport.getPeer()}. Reason is: {reason} 与客户端 断开连接, "
|
|
98
|
+
f"current connection number is: {len(self.factory.transport_dict)}")
|
|
99
|
+
|
|
100
|
+
def send_ack(self, ack_info):
|
|
101
|
+
ack_message = b"".join([
|
|
102
|
+
ack_info,
|
|
103
|
+
self.sequence_number.to_bytes(8, byteorder='big'),
|
|
104
|
+
self.rank.to_bytes(8, byteorder='big'),
|
|
105
|
+
self.step.to_bytes(8, byteorder='big')
|
|
106
|
+
])
|
|
107
|
+
self.transport.write(ack_message)
|
|
108
|
+
|
|
109
|
+
def post_process(self):
|
|
110
|
+
send_busy_ack = False
|
|
111
|
+
while self.consumer_queue.full():
|
|
112
|
+
if not send_busy_ack:
|
|
113
|
+
self.send_ack(self.ACK_BUSY)
|
|
114
|
+
logger.debug("sending BUSY ACK")
|
|
115
|
+
send_busy_ack = True
|
|
116
|
+
time.sleep(0.1)
|
|
117
|
+
|
|
118
|
+
obj_key = str(self.sequence_number) + "_" + str(self.rank) + "_" + str(self.step)
|
|
119
|
+
|
|
120
|
+
recv_md5 = hashlib.md5(self.obj_body).hexdigest()
|
|
121
|
+
if self.check_sum and recv_md5 != self.obj_md5:
|
|
122
|
+
# when needs check md5 and check no pass, indicates received data error, send b"ERROR" to client.
|
|
123
|
+
logger.debug(f"Error:接收数据有问题,流水号{self.sequence_number}, expected {self.obj_md5}, but get {recv_md5}")
|
|
124
|
+
self.send_ack(self.ACK_ERROR)
|
|
125
|
+
else:
|
|
126
|
+
if self.obj_body == self.ACK_STOP:
|
|
127
|
+
self.handle_with_stop()
|
|
128
|
+
else:
|
|
129
|
+
self.send_ack(self.ACK_SUCCESS)
|
|
130
|
+
if obj_key in self.sequence_number_dict:
|
|
131
|
+
logger.debug(f"这是一次异常的重传,可以忽略。 {obj_key}, {self.sequence_number_dict}")
|
|
132
|
+
else:
|
|
133
|
+
self.sequence_number_dict[obj_key] = self.obj_md5
|
|
134
|
+
self.consumer_queue.put(self.obj_body, block=True)
|
|
135
|
+
|
|
136
|
+
self.reset_env()
|
|
137
|
+
finish_time = time.time()
|
|
138
|
+
logger.debug(f"finish_time: {finish_time - self.start_time}")
|
|
139
|
+
|
|
140
|
+
def handle_with_stop(self):
|
|
141
|
+
logger.debug(f"接收到停止传输信号 TCP{self.transport.getPeer()}")
|
|
142
|
+
self.send_ack(self.ACK_STOP_CONFIRM)
|
|
143
|
+
if len(self.factory.transport_dict) == 0:
|
|
144
|
+
_rank, _step, _sequence_number = 0, 0, 100000000
|
|
145
|
+
ack_kill = self.ACK_KILL_PROCESS + \
|
|
146
|
+
_sequence_number.to_bytes(8, byteorder='big') + \
|
|
147
|
+
_rank.to_bytes(8, byteorder='big') + \
|
|
148
|
+
_step.to_bytes(8, byteorder='big')
|
|
149
|
+
for trans in self.factory.transport_list:
|
|
150
|
+
trans.write(ack_kill)
|
|
151
|
+
logger.debug(f"发送KILL信息给{self.transport.getPeer()}")
|
|
152
|
+
self.consumer_queue.put(self.ACK_KILL_PROCESS)
|
|
153
|
+
time.sleep(2)
|
|
154
|
+
|
|
155
|
+
def reset_env(self):
|
|
156
|
+
self.obj_length = None
|
|
157
|
+
self.sequence_number = -1
|
|
158
|
+
self.rank = -1
|
|
159
|
+
self.step = -1
|
|
160
|
+
self.obj_md5 = None
|
|
161
|
+
self.obj_body = None
|
|
162
|
+
|
|
163
|
+
def dataReceived(self, data):
|
|
164
|
+
self.buffer.seek(0, 2)
|
|
165
|
+
self.buffer.write(data)
|
|
166
|
+
self.buffer.seek(self.tell)
|
|
167
|
+
|
|
168
|
+
# The first data packet is packet header, it contains obj_length, sequence_number, rank, step
|
|
169
|
+
if self.obj_length is None and len(self.buffer.getvalue()) >= self.length_width * 4:
|
|
170
|
+
self.start_time = time.time()
|
|
171
|
+
self.obj_length = struct.unpack('!Q', self.buffer.read(self.length_width))[0]
|
|
172
|
+
self.sequence_number = struct.unpack('!Q', self.buffer.read(self.length_width))[0]
|
|
173
|
+
self.rank = struct.unpack('!Q', self.buffer.read(self.length_width))[0]
|
|
174
|
+
self.step = struct.unpack('!Q', self.buffer.read(self.length_width))[0]
|
|
175
|
+
self.tell += self.length_width * 4
|
|
176
|
+
logger.debug(
|
|
177
|
+
f"流水号: {self.sequence_number}; RANK: {self.rank}; STEP: {self.step}; Length: {self.obj_length}")
|
|
178
|
+
|
|
179
|
+
# If needs check md5 but not parse md5 yet, read 32b md5 values
|
|
180
|
+
check_sum_and_md5 = (self.check_sum
|
|
181
|
+
and self.obj_length is not None
|
|
182
|
+
and self.obj_md5 is None
|
|
183
|
+
and len(self.buffer.getvalue()) - self.tell >= self.md5_width)
|
|
184
|
+
if check_sum_and_md5:
|
|
185
|
+
self.obj_md5 = self.buffer.read(self.md5_width).decode()
|
|
186
|
+
self.tell += self.md5_width
|
|
187
|
+
logger.debug(f"MD5: {self.obj_md5}")
|
|
188
|
+
|
|
189
|
+
current_length = len(self.buffer.getvalue()) - self.tell
|
|
190
|
+
if self.obj_length is not None and 0 < self.obj_length <= current_length:
|
|
191
|
+
# Current api data receive finished
|
|
192
|
+
self.obj_body = self.buffer.read(self.obj_length)
|
|
193
|
+
|
|
194
|
+
self.tell += self.obj_length
|
|
195
|
+
self.buffer = io.BytesIO(self.buffer.getvalue()[self.tell:])
|
|
196
|
+
self.buffer.seek(0)
|
|
197
|
+
self.tell = 0
|
|
198
|
+
recv_data_time = time.time()
|
|
199
|
+
logger.debug(f"self.sequence_number {self.sequence_number} "
|
|
200
|
+
f"recv_data_time {recv_data_time - self.start_time}")
|
|
201
|
+
|
|
202
|
+
if self.obj_body == self.ACK_STOP:
|
|
203
|
+
# Indicates the current TCP link receives a STOP signal and remove from the transport_dict
|
|
204
|
+
_transport = self.factory.transport_dict.pop(self.transport, None)
|
|
205
|
+
logger.debug(f"接收到b'STOP_' self.sequence_number {self.sequence_number} ")
|
|
206
|
+
self.post_process()
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
class MessageServerFactory(protocol.ServerFactory):
|
|
210
|
+
def __init__(self) -> None:
|
|
211
|
+
"""
|
|
212
|
+
transport_dict: links that have not completed data transmission.
|
|
213
|
+
transport_list: Records all TCP links. Appends TCP link to the transport list when a new TCP link is established.
|
|
214
|
+
"""
|
|
215
|
+
self.transport_dict = {}
|
|
216
|
+
self.transport_list = []
|
|
217
|
+
|
|
218
|
+
def is_all_connection_closed(self):
|
|
219
|
+
return len(self.transport_dict) == 0
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
cipher_list = ":".join([
|
|
2
|
+
'ECDHE-ECDSA-AES128-GCM-SHA256',
|
|
3
|
+
'ECDHE-RSA-AES128-GCM-SHA256',
|
|
4
|
+
'ECDHE-ECDSA-AES256-GCM-SHA384',
|
|
5
|
+
'ECDHE-RSA-AES256-GCM-SHA384',
|
|
6
|
+
'ECDHE-ECDSA-CHACHA20-POLY1305',
|
|
7
|
+
'ECDHE-RSA-CHACHA20-POLY1305',
|
|
8
|
+
'DHE-RSA-AES128-GCM-SHA256',
|
|
9
|
+
'DHE-RSA-AES256-GCM-SHA384'
|
|
10
|
+
]).encode()
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from pkgutil import iter_modules
|
|
3
|
+
from importlib import import_module
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
gpu and cpu not implement benchmark function, supplementary benchmarking function implementation
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
package_path = os.path.dirname(os.path.realpath(__file__))
|
|
10
|
+
for _, module_name, _ in iter_modules([package_path]):
|
|
11
|
+
module = import_module(f"{__name__}.{module_name}")
|
|
12
|
+
for attr_name in dir(module):
|
|
13
|
+
attr = getattr(module, attr_name)
|
|
14
|
+
if callable(attr) and "npu_custom" not in attr_name:
|
|
15
|
+
globals()[attr_name] = attr
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def npu_apply_adam_w(beta1_power, beta2_power, lr, weight_decay,
|
|
5
|
+
beta1, beta2, eps, grad, max_grad_norm, amsgrad, maximize, out):
|
|
6
|
+
var, m, v = out
|
|
7
|
+
if amsgrad:
|
|
8
|
+
max_grad_norm = (torch.rand(var.shape) * 10.0 - 5.0).to(var.dtype)
|
|
9
|
+
beta1_power_out = beta1_power * beta1
|
|
10
|
+
beta2_power_out = beta2_power * beta2
|
|
11
|
+
var_t = var * (1 + (-lr * weight_decay))
|
|
12
|
+
gt = -grad if maximize else grad
|
|
13
|
+
m_out = m * beta1 - (beta1 + (-1)) * gt
|
|
14
|
+
v_out = v * beta2 - (beta2 + (-1)) * gt * gt
|
|
15
|
+
|
|
16
|
+
if amsgrad:
|
|
17
|
+
max_grad_norm_out = torch.max(max_grad_norm, v_out)
|
|
18
|
+
if (1 - beta2_power_out) == 0:
|
|
19
|
+
beta2_power_out -= eps
|
|
20
|
+
denom = torch.sqrt(torch.div(max_grad_norm_out, (1 - beta2_power_out))) + eps
|
|
21
|
+
else:
|
|
22
|
+
vraintain = torch.div(v_out, (1 - beta2_power_out))
|
|
23
|
+
denom = torch.sqrt(vraintain) + eps
|
|
24
|
+
|
|
25
|
+
if (1 - beta1_power_out) == 0:
|
|
26
|
+
beta1_power_out -= eps
|
|
27
|
+
var_out = var_t + torch.div(-lr * m_out, (1 - beta1_power_out)).div(denom)
|
|
28
|
+
return var_out, m_out, v_out
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
def npu_confusion_transpose(data, perm, shape, transpose_first):
|
|
2
|
+
if transpose_first:
|
|
3
|
+
output = data.permute(*perm).contiguous().view(shape)
|
|
4
|
+
else:
|
|
5
|
+
output = data.view(shape).permute(*perm)
|
|
6
|
+
return output
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def npu_confusion_transpose_backward(grad, perm, shape, transpose_first):
|
|
10
|
+
shape_cal = shape if transpose_first else [shape[perm_dim] for perm_dim in perm]
|
|
11
|
+
perm_cal = [0] * len(perm)
|
|
12
|
+
for i, perm_dim in enumerate(perm):
|
|
13
|
+
perm_cal[perm_dim] = i
|
|
14
|
+
|
|
15
|
+
if transpose_first:
|
|
16
|
+
result = grad.permute(*perm_cal).reshape(shape_cal)
|
|
17
|
+
else:
|
|
18
|
+
result = grad.reshape(shape_cal).permute(*perm_cal)
|
|
19
|
+
return result.cpu()
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def npu_fast_gelu(input0):
|
|
5
|
+
attr = 1.702
|
|
6
|
+
const_0 = 0 - attr
|
|
7
|
+
const_1 = 1
|
|
8
|
+
const_2 = attr / 2
|
|
9
|
+
|
|
10
|
+
abs_x = torch.abs(input0)
|
|
11
|
+
mul_abs_x = abs_x * const_0
|
|
12
|
+
exp_abs_x = torch.exp(mul_abs_x)
|
|
13
|
+
div_down = exp_abs_x + const_1
|
|
14
|
+
|
|
15
|
+
pn_x = input0 - abs_x
|
|
16
|
+
mul_pn_x = pn_x * const_2
|
|
17
|
+
exp_pn_x = torch.exp(mul_pn_x)
|
|
18
|
+
div_up = input0 * exp_pn_x
|
|
19
|
+
div_down_rec = torch.reciprocal(div_down)
|
|
20
|
+
result = div_up * div_down_rec
|
|
21
|
+
|
|
22
|
+
return result
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def npu_fast_gelu_backward(grad, input_x):
|
|
26
|
+
const_2 = 1.702
|
|
27
|
+
const_3 = 1.0
|
|
28
|
+
const_1 = 0.0 - const_2
|
|
29
|
+
|
|
30
|
+
# e^(-1.702x)
|
|
31
|
+
abs_x = torch.abs(input_x)
|
|
32
|
+
mul_abs_x = abs_x * const_1
|
|
33
|
+
exp_x = torch.exp(mul_abs_x)
|
|
34
|
+
|
|
35
|
+
# 1.702xe^(-1.702x)
|
|
36
|
+
add_2 = input_x * exp_x
|
|
37
|
+
add_2 = add_2 * const_2
|
|
38
|
+
|
|
39
|
+
# e^(1.702(x-|x|))
|
|
40
|
+
pn_x = input_x - abs_x
|
|
41
|
+
mul_pn_x = pn_x * const_2
|
|
42
|
+
exp_pn_x = torch.exp(mul_pn_x)
|
|
43
|
+
|
|
44
|
+
# e^(-1.702x) + 1.702xe^(-1.702x) + e^(1.702(x-|x|))
|
|
45
|
+
div_up = exp_x + add_2
|
|
46
|
+
div_up = div_up + exp_pn_x
|
|
47
|
+
|
|
48
|
+
# (e^(-1.702x)+1)^2
|
|
49
|
+
div_down_i = exp_x + const_3
|
|
50
|
+
div_down = div_down_i * div_down_i
|
|
51
|
+
div_down_rec = torch.reciprocal(div_down)
|
|
52
|
+
result_temp = div_up * div_down_rec
|
|
53
|
+
result = grad * result_temp
|
|
54
|
+
|
|
55
|
+
return result.cpu()
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def npu_linear(x, weight, bias):
|
|
5
|
+
output = torch.nn.functional.linear(x, weight, bias)
|
|
6
|
+
return output
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def npu_linear_backward(grad, input_data, weight):
|
|
10
|
+
input_grad = torch.matmul(grad, weight)
|
|
11
|
+
weight_grad = torch.matmul(grad.t(), input_data)
|
|
12
|
+
return input_grad.cpu(), weight_grad.cpu()
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def matmul_backward(grad, self, other, mask):
|
|
5
|
+
grad_self, grad_other = None, None
|
|
6
|
+
dim_self = self.dim()
|
|
7
|
+
dim_other = other.dim()
|
|
8
|
+
|
|
9
|
+
size_grad = list(grad.size())
|
|
10
|
+
size_self = list(self.size())
|
|
11
|
+
size_other = list(other.size())
|
|
12
|
+
if dim_self == 1 and dim_other == 1:
|
|
13
|
+
grad_self = other.mul(grad) if mask[0] else grad_self
|
|
14
|
+
grad_other = self.mul(grad) if mask[1] else grad_other
|
|
15
|
+
elif dim_self == 2 and dim_other == 1:
|
|
16
|
+
grad_self = grad.unsqueeze(1).mm(other.unsqueeze(0)) if mask[0] else grad_self
|
|
17
|
+
grad_other = self.transpose(-1, -2).mm(grad.unsqueeze(1)).squeeze_(1) if mask[1] else grad_other
|
|
18
|
+
elif dim_self == 1 and dim_other == 2:
|
|
19
|
+
grad_self = grad.unsqueeze(0).mm(other.transpose(-1, -2)).squeeze_(0) if mask[0] else grad_self
|
|
20
|
+
grad_other = self.unsqueeze(1).mm(grad.unsqueeze(0)) if mask[1] else grad_other
|
|
21
|
+
elif dim_self >= 3 and (dim_other == 1 or dim_other == 2):
|
|
22
|
+
view_size = 1 if dim_other == 1 else size_grad[-1]
|
|
23
|
+
unfolded_grad = (grad.unsqueeze(-1) if dim_other == 1 else grad).contiguous().view(-1, view_size)
|
|
24
|
+
if mask[0]:
|
|
25
|
+
grad_self = unfolded_grad.mm(other.unsqueeze(0) if dim_other == 1 else other.transpose(-1, -2)) \
|
|
26
|
+
.view(size_self)
|
|
27
|
+
if mask[1]:
|
|
28
|
+
unfolded_self = self.contiguous().view([-1, size_self[-1]])
|
|
29
|
+
grad_other = unfolded_self.transpose(-1, -2).mm(unfolded_grad).view(size_other)
|
|
30
|
+
elif (dim_self == 1 or dim_self == 2) and dim_other >= 3:
|
|
31
|
+
view_size = 1 if dim_self == 1 else size_grad[-2]
|
|
32
|
+
unfolded_grad_T = grad.view([-1, view_size]) \
|
|
33
|
+
if dim_self == 1 else grad.transpose(-1, -2).contiguous().view([-1, view_size])
|
|
34
|
+
if mask[0]:
|
|
35
|
+
# create a 2D-matrix from other
|
|
36
|
+
unfolded_other_T = \
|
|
37
|
+
other.transpose(-1, -2).contiguous().view([-1, size_other[-2]]).transpose(-1, -2)
|
|
38
|
+
grad_self = unfolded_other_T.mm(unfolded_grad_T).transpose(-1, -2).view(size_self)
|
|
39
|
+
if mask[1]:
|
|
40
|
+
size_other_T = size_other[:-2]
|
|
41
|
+
size_other_T.extend(size_other[::-1][:2])
|
|
42
|
+
grad_other = \
|
|
43
|
+
unfolded_grad_T.mm(self.unsqueeze(0) if dim_self == 1 else self).view(size_other_T).transpose(-1, -2)
|
|
44
|
+
else:
|
|
45
|
+
grad_self = torch.matmul(grad, other.transpose(-1, -2)) if mask[0] else grad_self
|
|
46
|
+
grad_other = torch.matmul(self.transpose(-1, -2), grad) if mask[1] else grad_other
|
|
47
|
+
|
|
48
|
+
return grad_self.cpu(), grad_other.cpu()
|