mindstudio-probe 1.0.3__py3-none-any.whl → 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/LICENSE +201 -201
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/METADATA +36 -34
- mindstudio_probe-1.0.4.dist-info/RECORD +276 -0
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/WHEEL +1 -1
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/entry_points.txt +1 -0
- msprobe/README.md +101 -237
- msprobe/{config/config.json → config.json} +49 -49
- msprobe/core/advisor/advisor.py +124 -124
- msprobe/core/advisor/advisor_const.py +59 -59
- msprobe/core/advisor/advisor_result.py +58 -58
- msprobe/core/common/const.py +341 -318
- msprobe/core/common/exceptions.py +99 -99
- msprobe/core/common/{file_check.py → file_utils.py} +478 -283
- msprobe/core/common/log.py +76 -69
- msprobe/core/common/utils.py +385 -616
- msprobe/core/common_config.py +85 -71
- msprobe/core/compare/acc_compare.py +299 -298
- msprobe/core/compare/check.py +95 -95
- msprobe/core/compare/compare_cli.py +49 -49
- msprobe/core/compare/highlight.py +223 -222
- msprobe/core/compare/multiprocessing_compute.py +149 -149
- msprobe/core/compare/npy_compare.py +295 -295
- msprobe/core/compare/utils.py +430 -429
- msprobe/core/data_dump/data_collector.py +154 -144
- msprobe/core/data_dump/data_processor/base.py +314 -293
- msprobe/core/data_dump/data_processor/factory.py +59 -59
- msprobe/core/data_dump/data_processor/mindspore_processor.py +186 -198
- msprobe/core/data_dump/data_processor/pytorch_processor.py +366 -389
- msprobe/core/data_dump/json_writer.py +96 -116
- msprobe/core/data_dump/scope.py +178 -178
- msprobe/core/grad_probe/constant.py +70 -70
- msprobe/core/grad_probe/grad_compare.py +171 -175
- msprobe/core/grad_probe/utils.py +64 -52
- msprobe/docs/01.installation.md +89 -0
- msprobe/docs/02.config_introduction.md +165 -0
- msprobe/docs/03.config_examples.md +247 -0
- msprobe/docs/04.acl_config_examples.md +76 -0
- msprobe/docs/05.data_dump_PyTorch.md +198 -0
- msprobe/docs/06.data_dump_MindSpore.md +243 -0
- msprobe/docs/07.accuracy_checker_PyTorch.md +274 -0
- msprobe/docs/08.accuracy_checker_online_PyTorch.md +198 -0
- msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
- msprobe/docs/10.accuracy_compare_PyTorch.md +245 -0
- msprobe/docs/11.accuracy_compare_MindSpore.md +202 -0
- msprobe/docs/12.overflow_check_PyTorch.md +79 -0
- msprobe/docs/13.overflow_check_MindSpore.md +31 -0
- msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
- msprobe/docs/15.free_benchmarking_PyTorch.md +164 -0
- msprobe/{doc/grad_probe/grad_probe.md → docs/17.grad_probe.md} +207 -207
- msprobe/docs/FAQ_PyTorch.md +177 -0
- msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
- msprobe/docs/img/free_benchmark_framework.png +0 -0
- msprobe/mindspore/__init__.py +1 -1
- msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +254 -245
- msprobe/mindspore/api_accuracy_checker/api_info.py +69 -69
- msprobe/mindspore/api_accuracy_checker/api_runner.py +155 -151
- msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +196 -196
- msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
- msprobe/mindspore/api_accuracy_checker/compute_element.py +238 -223
- msprobe/mindspore/api_accuracy_checker/main.py +8 -15
- msprobe/mindspore/api_accuracy_checker/type_mapping.py +113 -113
- msprobe/mindspore/api_accuracy_checker/utils.py +79 -62
- msprobe/mindspore/cell_processor.py +34 -34
- msprobe/mindspore/common/const.py +106 -87
- msprobe/mindspore/common/log.py +37 -37
- msprobe/mindspore/common/utils.py +81 -57
- msprobe/mindspore/compare/distributed_compare.py +75 -75
- msprobe/mindspore/compare/ms_compare.py +219 -117
- msprobe/mindspore/compare/ms_graph_compare.py +348 -317
- msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -399
- msprobe/mindspore/debugger/debugger_config.py +66 -74
- msprobe/mindspore/debugger/precision_debugger.py +126 -107
- msprobe/mindspore/dump/dump_tool_factory.py +35 -35
- msprobe/mindspore/dump/hook_cell/api_registry.py +118 -104
- msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -53
- msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +922 -925
- msprobe/mindspore/dump/hook_cell/wrap_api.py +113 -0
- msprobe/mindspore/dump/jit_dump.py +72 -56
- msprobe/mindspore/dump/kernel_graph_dump.py +59 -60
- msprobe/mindspore/dump/kernel_kbyk_dump.py +64 -65
- msprobe/mindspore/free_benchmark/api_pynative_self_check.py +116 -116
- msprobe/mindspore/free_benchmark/common/config.py +12 -12
- msprobe/mindspore/free_benchmark/common/handler_params.py +17 -17
- msprobe/mindspore/free_benchmark/common/utils.py +71 -71
- msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -842
- msprobe/mindspore/free_benchmark/decorator/dec_forward.py +43 -42
- msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +107 -107
- msprobe/mindspore/free_benchmark/handler/base_handler.py +90 -90
- msprobe/mindspore/free_benchmark/handler/check_handler.py +41 -41
- msprobe/mindspore/free_benchmark/handler/fix_handler.py +36 -36
- msprobe/mindspore/free_benchmark/handler/handler_factory.py +21 -21
- msprobe/mindspore/free_benchmark/perturbation/add_noise.py +67 -67
- msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +21 -21
- msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +63 -63
- msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +51 -0
- msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +35 -34
- msprobe/mindspore/free_benchmark/perturbation/no_change.py +12 -12
- msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +29 -27
- msprobe/mindspore/free_benchmark/self_check_tool_factory.py +33 -33
- msprobe/mindspore/grad_probe/global_context.py +90 -91
- msprobe/mindspore/grad_probe/grad_analyzer.py +231 -231
- msprobe/mindspore/grad_probe/grad_monitor.py +27 -27
- msprobe/mindspore/grad_probe/grad_stat_csv.py +131 -131
- msprobe/mindspore/grad_probe/hook.py +94 -92
- msprobe/mindspore/grad_probe/utils.py +29 -28
- msprobe/mindspore/ms_config.py +128 -126
- msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +44 -45
- msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +34 -34
- msprobe/mindspore/runtime.py +4 -4
- msprobe/mindspore/service.py +378 -354
- msprobe/mindspore/task_handler_factory.py +24 -24
- msprobe/msprobe.py +105 -107
- msprobe/pytorch/__init__.py +3 -3
- msprobe/pytorch/api_accuracy_checker/common/config.py +53 -55
- msprobe/pytorch/api_accuracy_checker/common/utils.py +214 -165
- msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +213 -213
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +606 -581
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
- msprobe/pytorch/api_accuracy_checker/compare/compare.py +386 -381
- msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +73 -73
- msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +245 -244
- msprobe/pytorch/api_accuracy_checker/config.yaml +10 -10
- msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +335 -332
- msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +200 -199
- msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +133 -134
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +592 -581
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +70 -74
- msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +197 -202
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +325 -324
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +204 -204
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +219 -218
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +10 -10
- msprobe/pytorch/bench_functions/__init__.py +15 -15
- msprobe/pytorch/bench_functions/apply_adam_w.py +28 -28
- msprobe/pytorch/bench_functions/confusion_transpose.py +19 -19
- msprobe/pytorch/bench_functions/fast_gelu.py +55 -55
- msprobe/pytorch/bench_functions/layer_norm_eval.py +6 -6
- msprobe/pytorch/bench_functions/linear.py +12 -12
- msprobe/pytorch/bench_functions/matmul_backward.py +48 -48
- msprobe/pytorch/bench_functions/npu_fusion_attention.py +509 -421
- msprobe/pytorch/bench_functions/rms_norm.py +15 -15
- msprobe/pytorch/bench_functions/rotary_mul.py +52 -52
- msprobe/pytorch/bench_functions/scaled_mask_softmax.py +26 -26
- msprobe/pytorch/bench_functions/swiglu.py +55 -55
- msprobe/pytorch/common/__init__.py +2 -2
- msprobe/pytorch/common/compare_script.template +14 -14
- msprobe/pytorch/common/log.py +20 -31
- msprobe/pytorch/common/parse_json.py +39 -39
- msprobe/pytorch/common/utils.py +305 -300
- msprobe/pytorch/compare/distributed_compare.py +66 -66
- msprobe/pytorch/compare/mapping.yaml +607 -607
- msprobe/pytorch/compare/match.py +34 -33
- msprobe/pytorch/compare/pt_compare.py +50 -40
- msprobe/pytorch/debugger/debugger_config.py +95 -95
- msprobe/pytorch/debugger/precision_debugger.py +125 -125
- msprobe/pytorch/free_benchmark/__init__.py +8 -8
- msprobe/pytorch/free_benchmark/common/constant.py +70 -70
- msprobe/pytorch/free_benchmark/common/counter.py +71 -71
- msprobe/pytorch/free_benchmark/common/enums.py +37 -37
- msprobe/pytorch/free_benchmark/common/params.py +129 -129
- msprobe/pytorch/free_benchmark/common/utils.py +102 -102
- msprobe/pytorch/free_benchmark/compare/grad_saver.py +179 -179
- msprobe/pytorch/free_benchmark/compare/single_benchmark.py +104 -104
- msprobe/pytorch/free_benchmark/main.py +105 -105
- msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +13 -13
- msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +41 -41
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +90 -90
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +104 -104
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +63 -63
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +68 -68
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +28 -28
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +45 -45
- msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +19 -19
- msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +217 -217
- msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +39 -39
- msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +23 -23
- msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +30 -30
- msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +170 -170
- msprobe/pytorch/function_factory.py +76 -75
- msprobe/pytorch/functional/dump_module.py +39 -39
- msprobe/pytorch/grad_probe/grad_monitor.py +91 -90
- msprobe/pytorch/grad_probe/grad_stat_csv.py +128 -128
- msprobe/pytorch/hook_module/api_registry.py +161 -161
- msprobe/pytorch/hook_module/hook_module.py +120 -120
- msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1877
- msprobe/pytorch/hook_module/utils.py +30 -29
- msprobe/pytorch/hook_module/wrap_aten.py +110 -110
- msprobe/pytorch/hook_module/wrap_distributed.py +78 -78
- msprobe/pytorch/hook_module/wrap_functional.py +105 -105
- msprobe/pytorch/hook_module/wrap_npu_custom.py +93 -84
- msprobe/pytorch/hook_module/wrap_tensor.py +71 -71
- msprobe/pytorch/hook_module/wrap_torch.py +86 -86
- msprobe/pytorch/hook_module/wrap_vf.py +62 -62
- msprobe/pytorch/module_processer.py +138 -138
- msprobe/pytorch/online_dispatch/__init__.py +20 -20
- msprobe/pytorch/online_dispatch/compare.py +236 -236
- msprobe/pytorch/online_dispatch/dispatch.py +271 -271
- msprobe/pytorch/online_dispatch/dump_compare.py +155 -156
- msprobe/pytorch/online_dispatch/single_compare.py +391 -391
- msprobe/pytorch/online_dispatch/torch_ops_config.yaml +49 -49
- msprobe/pytorch/online_dispatch/utils.py +130 -146
- msprobe/pytorch/parse.py +4 -4
- msprobe/pytorch/parse_tool/cli.py +32 -32
- msprobe/pytorch/parse_tool/lib/compare.py +260 -271
- msprobe/pytorch/parse_tool/lib/config.py +52 -52
- msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
- msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
- msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
- msprobe/pytorch/parse_tool/lib/parse_tool.py +158 -158
- msprobe/pytorch/parse_tool/lib/utils.py +316 -321
- msprobe/pytorch/parse_tool/lib/visualization.py +85 -91
- msprobe/pytorch/pt_config.py +188 -187
- msprobe/pytorch/service.py +246 -252
- mindstudio_probe-1.0.3.dist-info/RECORD +0 -272
- msprobe/config/README.md +0 -539
- msprobe/mindspore/doc/compare.md +0 -58
- msprobe/mindspore/doc/dump.md +0 -217
- msprobe/mindspore/dump/hook_cell/wrap_functional.py +0 -91
- msprobe/mindspore/dump/hook_cell/wrap_tensor.py +0 -63
- msprobe/pytorch/doc/FAQ.md +0 -193
- msprobe/pytorch/doc/api_accuracy_checker.md +0 -313
- msprobe/pytorch/doc/api_accuracy_checker_online.md +0 -187
- msprobe/pytorch/doc/dump.md +0 -260
- msprobe/pytorch/doc/msprobe/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
- msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -240
- msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
- msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
- msprobe/pytorch/doc/run_overflow_check.md +0 -25
- msprobe/pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md +0 -90
- msprobe/pytorch/doc//321/206/320/247/320/260/321/206/320/260/320/227/321/206/320/255/320/226/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/205/320/254/342/225/221/321/206/320/251/320/277/321/211/320/272/320/234/321/210/320/277/320/221/321/205/320/242/320/234/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -151
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/top_level.txt +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
- /msprobe/{config → docs}/img/free_benchmark.png +0 -0
- /msprobe/{doc/grad_probe/img/image-1.png → docs/img/grad_probe_image-1.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-2.png → docs/img/grad_probe_image-2.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-3.png → docs/img/grad_probe_image-3.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-4.png → docs/img/grad_probe_image-4.png} +0 -0
- /msprobe/{doc/grad_probe/img/image.png → docs/img/grad_probe_image.png} +0 -0
- /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
|
@@ -1,389 +1,366 @@
|
|
|
1
|
-
import
|
|
2
|
-
import
|
|
3
|
-
import
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
import
|
|
8
|
-
import
|
|
9
|
-
from msprobe.core.common.
|
|
10
|
-
from msprobe.core.
|
|
11
|
-
|
|
12
|
-
from msprobe.
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
if
|
|
65
|
-
return tensor_stat
|
|
66
|
-
data_clone
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
elif data_clone.
|
|
70
|
-
tensor_stat.max =
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
tensor_stat.
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
if int(torch._C._VariableFunctionsClass.sum(
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
if
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
tensor_json
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
def
|
|
186
|
-
|
|
187
|
-
self.
|
|
188
|
-
self.
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
self.
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
self.
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
self.
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
if
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
def
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
self.checker
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
def
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
def analyze_forward(self, name, module, module_input_output
|
|
313
|
-
|
|
314
|
-
name,
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
self.
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
torch_npu.npu.
|
|
354
|
-
torch_npu.npu.
|
|
355
|
-
torch_npu.npu.synchronize()
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
return True
|
|
368
|
-
return False
|
|
369
|
-
|
|
370
|
-
def dump_mode_backward_acl_dump(self, name, module, module_input_output):
|
|
371
|
-
grad_path = self.config.backward_input.get(name)
|
|
372
|
-
if not KernelDumpDataProcessor.forward_init_status:
|
|
373
|
-
KernelDumpDataProcessor.forward_init_status = True
|
|
374
|
-
output = module.forward(*module_input_output.args, **module_input_output.kwargs)
|
|
375
|
-
grad = torch.load(grad_path).to("npu").requires_grad_()
|
|
376
|
-
torch_npu.npu.init_dump()
|
|
377
|
-
torch_npu.npu.set_dump(self.config.acl_config)
|
|
378
|
-
torch_npu.npu.synchronize()
|
|
379
|
-
if not self.acl_backward_dump_status(output, grad, name):
|
|
380
|
-
logger.warning("The output of {} is not of tensor type and cannot be automatically derived. "
|
|
381
|
-
"you can manually construct a single API backward case for ACL dump.".format(
|
|
382
|
-
name))
|
|
383
|
-
torch_npu.npu.synchronize()
|
|
384
|
-
torch_npu.npu.finalize_dump()
|
|
385
|
-
KernelDumpDataProcessor.forward_init_status = False
|
|
386
|
-
logger.info("Dump %s op file." % name)
|
|
387
|
-
|
|
388
|
-
def op_need_trigger(self, module_name):
|
|
389
|
-
return 'Tensor.__getitem__.' in module_name
|
|
1
|
+
import zlib
|
|
2
|
+
from dataclasses import asdict
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
import torch
|
|
7
|
+
from msprobe.core.common.file_utils import path_len_exceeds_limit, change_mode
|
|
8
|
+
from msprobe.core.common.log import logger
|
|
9
|
+
from msprobe.core.common.const import Const, OverflowConst, FileCheckConst
|
|
10
|
+
from msprobe.core.data_dump.data_processor.base import BaseDataProcessor, ModuleBackwardInputsOutputs, \
|
|
11
|
+
ModuleForwardInputsOutputs, TensorStatInfo
|
|
12
|
+
from msprobe.pytorch.free_benchmark import FreeBenchmarkCheck, UnequalRow
|
|
13
|
+
from msprobe.pytorch.common.utils import save_pt, load_pt
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
import torch_npu
|
|
17
|
+
is_gpu = False
|
|
18
|
+
except ImportError:
|
|
19
|
+
is_gpu = True
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class PytorchDataProcessor(BaseDataProcessor):
|
|
23
|
+
pytorch_special_type = (torch.device, torch.dtype, torch.Size, torch.Tensor)
|
|
24
|
+
|
|
25
|
+
def __init__(self, config, data_writer):
|
|
26
|
+
super().__init__(config, data_writer)
|
|
27
|
+
self.torch_object_key = {
|
|
28
|
+
"device": self.analyze_device_in_kwargs,
|
|
29
|
+
"dtype": self.analyze_dtype_in_kwargs
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
@staticmethod
|
|
33
|
+
def get_md5_for_tensor(x):
|
|
34
|
+
if x.dtype == torch.bfloat16:
|
|
35
|
+
x = x.float()
|
|
36
|
+
tensor_bytes = x.cpu().detach().numpy().tobytes()
|
|
37
|
+
crc32_hash = zlib.crc32(tensor_bytes)
|
|
38
|
+
return f"{crc32_hash:08x}"
|
|
39
|
+
|
|
40
|
+
@staticmethod
|
|
41
|
+
def analyze_device_in_kwargs(element):
|
|
42
|
+
single_arg = {}
|
|
43
|
+
single_arg.update({'type': "torch.device"})
|
|
44
|
+
if not isinstance(element, str):
|
|
45
|
+
if hasattr(element, "index"):
|
|
46
|
+
device_value = element.type + ":" + str(element.index)
|
|
47
|
+
else:
|
|
48
|
+
device_value = element.type
|
|
49
|
+
single_arg.update({"value": device_value})
|
|
50
|
+
else:
|
|
51
|
+
single_arg.update({"value": element})
|
|
52
|
+
return single_arg
|
|
53
|
+
|
|
54
|
+
@staticmethod
|
|
55
|
+
def analyze_dtype_in_kwargs(element):
|
|
56
|
+
return {"type": "torch.dtype", "value": str(element)}
|
|
57
|
+
|
|
58
|
+
@staticmethod
|
|
59
|
+
def get_stat_info(data):
|
|
60
|
+
tensor_stat = TensorStatInfo()
|
|
61
|
+
if data.is_meta:
|
|
62
|
+
return tensor_stat
|
|
63
|
+
data_clone = data.detach()
|
|
64
|
+
if data_clone.numel() == 0:
|
|
65
|
+
return tensor_stat
|
|
66
|
+
elif data_clone.dtype == torch.bool:
|
|
67
|
+
tensor_stat.max = True in data_clone
|
|
68
|
+
tensor_stat.min = False not in data_clone
|
|
69
|
+
elif not data_clone.shape:
|
|
70
|
+
tensor_stat.max = tensor_stat.min = tensor_stat.mean = tensor_stat.norm = data_clone.item()
|
|
71
|
+
elif torch.is_complex(data_clone):
|
|
72
|
+
data_np = data_clone.cpu().numpy()
|
|
73
|
+
data_abs = np.abs(data_np)
|
|
74
|
+
tensor_stat.max = np.max(data_abs).item()
|
|
75
|
+
tensor_stat.min = np.min(data_abs).item()
|
|
76
|
+
tensor_stat.mean = np.mean(data_abs).item()
|
|
77
|
+
else:
|
|
78
|
+
if not data_clone.is_floating_point() or data_clone.dtype == torch.float64:
|
|
79
|
+
data_clone = data_clone.float()
|
|
80
|
+
tensor_stat.max = torch._C._VariableFunctionsClass.max(data_clone).item()
|
|
81
|
+
tensor_stat.min = torch._C._VariableFunctionsClass.min(data_clone).item()
|
|
82
|
+
tensor_stat.mean = torch._C._VariableFunctionsClass.mean(data_clone).item()
|
|
83
|
+
tensor_stat.norm = torch._C._VariableFunctionsClass.norm(data_clone).item()
|
|
84
|
+
return tensor_stat
|
|
85
|
+
|
|
86
|
+
@staticmethod
|
|
87
|
+
def handle_tensor_extremum_nan_inf(tensor, operator):
|
|
88
|
+
data_clone = tensor.detach()
|
|
89
|
+
data_nan = torch._C._VariableFunctionsClass.isnan(data_clone)
|
|
90
|
+
if int(torch._C._VariableFunctionsClass.sum(data_nan)) == data_clone.numel():
|
|
91
|
+
return float('nan')
|
|
92
|
+
finite_mask = torch._C._VariableFunctionsClass.isfinite(data_clone)
|
|
93
|
+
if int(torch._C._VariableFunctionsClass.sum(finite_mask)) > 0:
|
|
94
|
+
finite_values = data_clone[finite_mask]
|
|
95
|
+
return torch._C._VariableFunctionsClass.max(finite_values).item() if operator == 'max' else \
|
|
96
|
+
torch._C._VariableFunctionsClass.min(finite_values).item()
|
|
97
|
+
else:
|
|
98
|
+
data_no_nan = data_clone[~data_nan]
|
|
99
|
+
return torch._C._VariableFunctionsClass.max(data_no_nan).item() if operator == 'max' else \
|
|
100
|
+
torch._C._VariableFunctionsClass.min(data_no_nan).item()
|
|
101
|
+
|
|
102
|
+
@staticmethod
|
|
103
|
+
def _analyze_torch_size(arg):
|
|
104
|
+
return {"type": "torch.Size", "value": list(arg)}
|
|
105
|
+
|
|
106
|
+
@classmethod
|
|
107
|
+
def get_special_types(cls):
|
|
108
|
+
return super().get_special_types() + cls.pytorch_special_type
|
|
109
|
+
|
|
110
|
+
def analyze_single_element(self, element, suffix_stack):
|
|
111
|
+
if suffix_stack and suffix_stack[-1] in self.torch_object_key:
|
|
112
|
+
return self.torch_object_key[suffix_stack[-1]](element)
|
|
113
|
+
if isinstance(element, torch.Size):
|
|
114
|
+
return self._analyze_torch_size(element)
|
|
115
|
+
converted_numpy, numpy_type = self._convert_numpy_to_builtin(element)
|
|
116
|
+
if converted_numpy is not element:
|
|
117
|
+
return self._analyze_numpy(converted_numpy, numpy_type)
|
|
118
|
+
if isinstance(element, torch.Tensor):
|
|
119
|
+
return self._analyze_tensor(element, Const.SEP.join(suffix_stack))
|
|
120
|
+
if isinstance(element, (bool, int, float, str, slice, type(Ellipsis))):
|
|
121
|
+
return self._analyze_builtin(element)
|
|
122
|
+
return {}
|
|
123
|
+
|
|
124
|
+
def _analyze_tensor(self, tensor, suffix):
|
|
125
|
+
tensor_stat = self.get_stat_info(tensor)
|
|
126
|
+
tensor_json = {}
|
|
127
|
+
tensor_json.update({'type': 'torch.Tensor'})
|
|
128
|
+
tensor_json.update({'dtype': str(tensor.dtype)})
|
|
129
|
+
tensor_json.update({"shape": tensor.shape})
|
|
130
|
+
tensor_json.update({"Max": tensor_stat.max})
|
|
131
|
+
tensor_json.update({"Min": tensor_stat.min})
|
|
132
|
+
tensor_json.update({"Mean": tensor_stat.mean})
|
|
133
|
+
tensor_json.update({"Norm": tensor_stat.norm})
|
|
134
|
+
tensor_json.update({"requires_grad": tensor.requires_grad})
|
|
135
|
+
|
|
136
|
+
if tensor_stat.max is not None:
|
|
137
|
+
if np.isinf(tensor_stat.max) or np.isnan(tensor_stat.max):
|
|
138
|
+
tensor_json['Max_except_inf_nan'] = self.handle_tensor_extremum_nan_inf(tensor, "max")
|
|
139
|
+
if tensor_stat.min is not None:
|
|
140
|
+
if np.isinf(tensor_stat.min) or np.isnan(tensor_stat.min):
|
|
141
|
+
tensor_json['Min_except_inf_nan'] = self.handle_tensor_extremum_nan_inf(tensor, "min")
|
|
142
|
+
|
|
143
|
+
if self.config.summary_mode == Const.MD5:
|
|
144
|
+
tensor_md5 = self.get_md5_for_tensor(tensor)
|
|
145
|
+
tensor_json.update({Const.MD5: tensor_md5})
|
|
146
|
+
return tensor_json
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class StatisticsDataProcessor(PytorchDataProcessor):
|
|
150
|
+
pass
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
class TensorDataProcessor(PytorchDataProcessor):
|
|
154
|
+
def _analyze_tensor(self, tensor, suffix):
|
|
155
|
+
dump_data_name, file_path = self.get_save_file_path(suffix)
|
|
156
|
+
saved_tensor = tensor.contiguous().detach()
|
|
157
|
+
save_pt(saved_tensor, file_path)
|
|
158
|
+
single_arg = super()._analyze_tensor(tensor, suffix)
|
|
159
|
+
single_arg.update({"data_name": dump_data_name})
|
|
160
|
+
return single_arg
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class OverflowCheckDataProcessor(PytorchDataProcessor):
|
|
164
|
+
__slots__ = ["cached_tensors_and_file_paths"]
|
|
165
|
+
|
|
166
|
+
def __init__(self, config, data_writer):
|
|
167
|
+
super().__init__(config, data_writer)
|
|
168
|
+
self.has_overflow = False
|
|
169
|
+
self.support_inf_nan = None
|
|
170
|
+
self.cached_inplace_api_info = {}
|
|
171
|
+
self.cached_tensors_and_file_paths = {}
|
|
172
|
+
self.bits_for_overflow = 8
|
|
173
|
+
self.real_overflow_nums = 0
|
|
174
|
+
self.overflow_nums = config.overflow_nums
|
|
175
|
+
|
|
176
|
+
@property
|
|
177
|
+
def is_terminated(self):
|
|
178
|
+
if self.overflow_nums == -1:
|
|
179
|
+
return False
|
|
180
|
+
if self.real_overflow_nums >= self.overflow_nums:
|
|
181
|
+
logger.info(f"[msprobe] 超过预设溢出次数 当前溢出次数: {self.real_overflow_nums}")
|
|
182
|
+
return True
|
|
183
|
+
return False
|
|
184
|
+
|
|
185
|
+
def analyze_pre_forward_inplace(self, name, module_input_output: ModuleForwardInputsOutputs):
|
|
186
|
+
self.has_overflow = False
|
|
187
|
+
self._is_support_inf_nan()
|
|
188
|
+
self.cached_inplace_api_info = super().analyze_pre_forward_inplace(name, module_input_output)
|
|
189
|
+
return None
|
|
190
|
+
|
|
191
|
+
def analyze_forward_inplace(self, name, module_input_output: ModuleForwardInputsOutputs):
|
|
192
|
+
self._is_support_inf_nan()
|
|
193
|
+
api_info_struct = super().analyze_forward_inplace(name, module_input_output)
|
|
194
|
+
if name in self.cached_inplace_api_info and name in api_info_struct:
|
|
195
|
+
self.cached_inplace_api_info[name].update(api_info_struct[name])
|
|
196
|
+
elif name in api_info_struct:
|
|
197
|
+
self.cached_inplace_api_info = api_info_struct
|
|
198
|
+
self.handle_overflow()
|
|
199
|
+
return self.cached_inplace_api_info if self.has_overflow else None
|
|
200
|
+
|
|
201
|
+
def analyze_forward(self, name, module, module_input_output: ModuleForwardInputsOutputs):
|
|
202
|
+
self.has_overflow = False
|
|
203
|
+
self._is_support_inf_nan()
|
|
204
|
+
api_info_struct = super().analyze_forward(name, module, module_input_output)
|
|
205
|
+
self.handle_overflow()
|
|
206
|
+
return api_info_struct if self.has_overflow else None
|
|
207
|
+
|
|
208
|
+
def analyze_backward(self, name, module, module_input_output: ModuleBackwardInputsOutputs):
|
|
209
|
+
self.has_overflow = False
|
|
210
|
+
self._is_support_inf_nan()
|
|
211
|
+
api_info_struct = super().analyze_backward(name, module, module_input_output)
|
|
212
|
+
self.handle_overflow()
|
|
213
|
+
return api_info_struct if self.has_overflow else None
|
|
214
|
+
|
|
215
|
+
def handle_overflow(self):
|
|
216
|
+
if not self.support_inf_nan:
|
|
217
|
+
self._analyze_maybe_overflow_flag()
|
|
218
|
+
if self.has_overflow:
|
|
219
|
+
for file_path, tensor in self.cached_tensors_and_file_paths.items():
|
|
220
|
+
save_pt(tensor, file_path)
|
|
221
|
+
self.real_overflow_nums += 1
|
|
222
|
+
self.cached_tensors_and_file_paths = {}
|
|
223
|
+
|
|
224
|
+
def _is_support_inf_nan(self):
|
|
225
|
+
if self.support_inf_nan is not None:
|
|
226
|
+
return
|
|
227
|
+
try:
|
|
228
|
+
self.support_inf_nan = is_gpu or torch_npu.npu.utils.is_support_inf_nan()
|
|
229
|
+
except Exception:
|
|
230
|
+
logger.warning(f"Unable to determine if the current device supports inf/nan mode, default not supported.")
|
|
231
|
+
self.support_inf_nan = False
|
|
232
|
+
|
|
233
|
+
def _analyze_maybe_overflow_flag(self):
|
|
234
|
+
try:
|
|
235
|
+
self.has_overflow = torch_npu.npu.utils.get_npu_overflow_flag()
|
|
236
|
+
if self.has_overflow:
|
|
237
|
+
torch_npu.npu.utils.clear_npu_overflow_flag()
|
|
238
|
+
except Exception as e:
|
|
239
|
+
logger.error(f"Overflow check failed, the current environment may be abnormal.")
|
|
240
|
+
raise RuntimeError(f"overflow check failed") from e
|
|
241
|
+
|
|
242
|
+
def _analyze_maybe_overflow_tensor(self, tensor_json):
|
|
243
|
+
if tensor_json['Max'] is None or tensor_json['Min'] is None:
|
|
244
|
+
return
|
|
245
|
+
self.has_overflow = np.isinf(tensor_json['Max']) or np.isnan(tensor_json['Max']) or \
|
|
246
|
+
np.isinf(tensor_json['Min']) or np.isnan(tensor_json['Min'])
|
|
247
|
+
|
|
248
|
+
def _analyze_tensor(self, tensor, suffix):
|
|
249
|
+
dump_data_name, file_path = self.get_save_file_path(suffix)
|
|
250
|
+
if not path_len_exceeds_limit(file_path):
|
|
251
|
+
self.cached_tensors_and_file_paths.update({file_path: tensor})
|
|
252
|
+
else:
|
|
253
|
+
logger.warning(f'The file path {file_path} length exceeds limit.')
|
|
254
|
+
single_arg = super()._analyze_tensor(tensor, suffix)
|
|
255
|
+
single_arg.update({"data_name": dump_data_name})
|
|
256
|
+
if not self.has_overflow and self.support_inf_nan:
|
|
257
|
+
self._analyze_maybe_overflow_tensor(single_arg)
|
|
258
|
+
return single_arg
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
class FreeBenchmarkDataProcessor(PytorchDataProcessor):
|
|
262
|
+
|
|
263
|
+
def __init__(self, config, data_writer):
|
|
264
|
+
super().__init__(config, data_writer)
|
|
265
|
+
self.checker = FreeBenchmarkCheck(config=config)
|
|
266
|
+
self._return_forward_new_output = None
|
|
267
|
+
self._forward_new_output = None
|
|
268
|
+
|
|
269
|
+
def update_iter(self, current_iter):
|
|
270
|
+
super().update_iter(current_iter)
|
|
271
|
+
self.checker.update_iter(current_iter)
|
|
272
|
+
|
|
273
|
+
def update_unequal_rows(self, unequal_rows: List[UnequalRow]):
|
|
274
|
+
if not unequal_rows:
|
|
275
|
+
return
|
|
276
|
+
for row in unequal_rows:
|
|
277
|
+
data_dict = asdict(row)
|
|
278
|
+
self.data_writer.write_data_to_csv(
|
|
279
|
+
data_dict.values(),
|
|
280
|
+
data_dict.keys(),
|
|
281
|
+
self.data_writer.free_benchmark_file_path
|
|
282
|
+
)
|
|
283
|
+
return
|
|
284
|
+
|
|
285
|
+
def analyze_pre_forward(self, name, module, module_input_output: ModuleForwardInputsOutputs):
|
|
286
|
+
self.checker.pre_forward(name, module, self, module_input_output.args, module_input_output.kwargs)
|
|
287
|
+
|
|
288
|
+
def analyze_forward(self, name, module, module_input_output: ModuleForwardInputsOutputs):
|
|
289
|
+
new_output, unequal_rows = self.checker.forward(
|
|
290
|
+
name,
|
|
291
|
+
module,
|
|
292
|
+
module_input_output.args,
|
|
293
|
+
module_input_output.kwargs,
|
|
294
|
+
module_input_output.output,
|
|
295
|
+
)
|
|
296
|
+
self.update_unequal_rows(unequal_rows)
|
|
297
|
+
if self.checker.if_fix():
|
|
298
|
+
self._return_forward_new_output = True
|
|
299
|
+
self._forward_new_output = new_output
|
|
300
|
+
|
|
301
|
+
def analyze_backward(self, name, module, module_input_output: ModuleBackwardInputsOutputs):
|
|
302
|
+
self.checker.backward(name, module, module_input_output.grad_input)
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
class KernelDumpDataProcessor(PytorchDataProcessor):
|
|
306
|
+
forward_init_status = False
|
|
307
|
+
multi_output_apis = ["_sort_", "npu_flash_attention"]
|
|
308
|
+
|
|
309
|
+
def __init__(self, config, data_writer):
|
|
310
|
+
super().__init__(config, data_writer)
|
|
311
|
+
|
|
312
|
+
def analyze_forward(self, name, module, module_input_output):
|
|
313
|
+
if self.config.is_forward_acl_dump:
|
|
314
|
+
self.forward_acl_dump(name, module, module_input_output)
|
|
315
|
+
else:
|
|
316
|
+
self.dump_mode_backward_acl_dump(name, module, module_input_output)
|
|
317
|
+
|
|
318
|
+
def forward_acl_dump(self, name, module, module_input_output):
|
|
319
|
+
if not KernelDumpDataProcessor.forward_init_status:
|
|
320
|
+
KernelDumpDataProcessor.forward_init_status = True
|
|
321
|
+
torch_npu.npu.synchronize()
|
|
322
|
+
torch_npu.npu.init_dump()
|
|
323
|
+
torch_npu.npu.set_dump(self.config.acl_config)
|
|
324
|
+
torch_npu.npu.synchronize()
|
|
325
|
+
if self.op_need_trigger(name):
|
|
326
|
+
module.forward(*module_input_output.args, **module_input_output.kwargs).cpu()
|
|
327
|
+
else:
|
|
328
|
+
module.forward(*module_input_output.args, **module_input_output.kwargs)
|
|
329
|
+
torch_npu.npu.synchronize()
|
|
330
|
+
torch_npu.npu.finalize_dump()
|
|
331
|
+
torch_npu.npu.synchronize()
|
|
332
|
+
KernelDumpDataProcessor.forward_init_status = False
|
|
333
|
+
logger.info("Dump %s op file." % name)
|
|
334
|
+
|
|
335
|
+
def acl_backward_dump_status(self, output, grad, module_name):
|
|
336
|
+
if isinstance(output, torch.Tensor):
|
|
337
|
+
output.backward(grad, retain_graph=True)
|
|
338
|
+
return True
|
|
339
|
+
|
|
340
|
+
for api_name in KernelDumpDataProcessor.multi_output_apis:
|
|
341
|
+
if api_name in module_name:
|
|
342
|
+
output[0].backward(grad, retain_graph=True)
|
|
343
|
+
return True
|
|
344
|
+
return False
|
|
345
|
+
|
|
346
|
+
def dump_mode_backward_acl_dump(self, name, module, module_input_output):
|
|
347
|
+
grad_path = self.config.backward_input.get(name)
|
|
348
|
+
if not KernelDumpDataProcessor.forward_init_status:
|
|
349
|
+
KernelDumpDataProcessor.forward_init_status = True
|
|
350
|
+
output = module.forward(*module_input_output.args, **module_input_output.kwargs)
|
|
351
|
+
pt = load_pt(grad_path)
|
|
352
|
+
grad = pt.to("npu").requires_grad_()
|
|
353
|
+
torch_npu.npu.init_dump()
|
|
354
|
+
torch_npu.npu.set_dump(self.config.acl_config)
|
|
355
|
+
torch_npu.npu.synchronize()
|
|
356
|
+
if not self.acl_backward_dump_status(output, grad, name):
|
|
357
|
+
logger.warning("The output of {} is not of tensor type and cannot be automatically derived. "
|
|
358
|
+
"you can manually construct a single API backward case for ACL dump.".format(
|
|
359
|
+
name))
|
|
360
|
+
torch_npu.npu.synchronize()
|
|
361
|
+
torch_npu.npu.finalize_dump()
|
|
362
|
+
KernelDumpDataProcessor.forward_init_status = False
|
|
363
|
+
logger.info("Dump %s op file." % name)
|
|
364
|
+
|
|
365
|
+
def op_need_trigger(self, module_name):
|
|
366
|
+
return 'Tensor.__getitem__.' in module_name
|