mindstudio-probe 1.0.3__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/LICENSE +201 -201
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/METADATA +36 -34
- mindstudio_probe-1.1.0.dist-info/RECORD +287 -0
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/WHEEL +1 -1
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/entry_points.txt +1 -0
- msprobe/README.md +131 -237
- msprobe/__init__.py +16 -1
- msprobe/{config/config.json → config.json} +47 -49
- msprobe/core/advisor/advisor.py +124 -124
- msprobe/core/advisor/advisor_const.py +58 -59
- msprobe/core/advisor/advisor_result.py +58 -58
- msprobe/core/common/const.py +402 -318
- msprobe/core/common/exceptions.py +99 -99
- msprobe/core/common/{file_check.py → file_utils.py} +523 -283
- msprobe/core/common/inplace_op_checker.py +38 -0
- msprobe/core/common/inplace_ops.yaml +251 -0
- msprobe/core/common/log.py +86 -69
- msprobe/core/common/utils.py +371 -616
- msprobe/core/common_config.py +78 -71
- msprobe/core/compare/acc_compare.py +472 -298
- msprobe/core/compare/check.py +180 -95
- msprobe/core/compare/compare_cli.py +69 -49
- msprobe/core/compare/highlight.py +259 -222
- msprobe/core/compare/multiprocessing_compute.py +174 -149
- msprobe/core/compare/npy_compare.py +310 -295
- msprobe/core/compare/utils.py +464 -429
- msprobe/core/data_dump/data_collector.py +153 -144
- msprobe/core/data_dump/data_processor/base.py +337 -293
- msprobe/core/data_dump/data_processor/factory.py +76 -59
- msprobe/core/data_dump/data_processor/mindspore_processor.py +192 -198
- msprobe/core/data_dump/data_processor/pytorch_processor.py +383 -389
- msprobe/core/data_dump/json_writer.py +117 -116
- msprobe/core/data_dump/scope.py +194 -178
- msprobe/core/grad_probe/constant.py +74 -70
- msprobe/core/grad_probe/grad_compare.py +170 -175
- msprobe/core/grad_probe/utils.py +77 -52
- msprobe/docs/01.installation.md +99 -0
- msprobe/docs/02.config_introduction.md +137 -0
- msprobe/docs/03.config_examples.md +237 -0
- msprobe/docs/04.acl_config_examples.md +78 -0
- msprobe/docs/05.data_dump_PyTorch.md +326 -0
- msprobe/docs/06.data_dump_MindSpore.md +285 -0
- msprobe/docs/07.accuracy_checker_PyTorch.md +297 -0
- msprobe/docs/08.accuracy_checker_online_PyTorch.md +238 -0
- msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
- msprobe/docs/10.accuracy_compare_PyTorch.md +327 -0
- msprobe/docs/11.accuracy_compare_MindSpore.md +333 -0
- msprobe/docs/12.overflow_check_PyTorch.md +79 -0
- msprobe/docs/13.overflow_check_MindSpore.md +31 -0
- msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
- msprobe/docs/15.free_benchmarking_PyTorch.md +170 -0
- msprobe/docs/16.free_benchmarking_MindSpore.md +140 -0
- msprobe/{doc/grad_probe/grad_probe.md → docs/17.grad_probe.md} +205 -207
- msprobe/{pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md → docs/18.online_dispatch.md} +89 -90
- msprobe/docs/FAQ.md +189 -0
- msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
- msprobe/docs/img/free_benchmark_framework.png +0 -0
- msprobe/docs/img/ms_dump.png +0 -0
- msprobe/docs/img/ms_layer.png +0 -0
- msprobe/docs/img/pt_dump.png +0 -0
- msprobe/mindspore/__init__.py +2 -1
- msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +278 -245
- msprobe/mindspore/api_accuracy_checker/api_info.py +76 -69
- msprobe/mindspore/api_accuracy_checker/api_runner.py +155 -151
- msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +196 -196
- msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
- msprobe/mindspore/api_accuracy_checker/compute_element.py +238 -223
- msprobe/mindspore/api_accuracy_checker/main.py +8 -15
- msprobe/mindspore/api_accuracy_checker/type_mapping.py +113 -113
- msprobe/mindspore/api_accuracy_checker/utils.py +79 -62
- msprobe/mindspore/cell_processor.py +58 -34
- msprobe/mindspore/common/const.py +108 -87
- msprobe/mindspore/common/log.py +37 -37
- msprobe/mindspore/common/utils.py +97 -57
- msprobe/mindspore/compare/distributed_compare.py +62 -75
- msprobe/mindspore/compare/layer_mapping.py +146 -0
- msprobe/mindspore/compare/modify_mapping.py +107 -0
- msprobe/mindspore/compare/ms_compare.py +357 -117
- msprobe/mindspore/compare/ms_graph_compare.py +364 -317
- msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -399
- msprobe/mindspore/debugger/debugger_config.py +69 -74
- msprobe/mindspore/debugger/precision_debugger.py +150 -107
- msprobe/mindspore/dump/dump_tool_factory.py +50 -35
- msprobe/mindspore/dump/hook_cell/api_registry.py +128 -104
- msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -53
- msprobe/mindspore/dump/hook_cell/primitive_hooks.py +206 -0
- msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +994 -925
- msprobe/mindspore/dump/hook_cell/wrap_api.py +121 -0
- msprobe/mindspore/dump/jit_dump.py +96 -56
- msprobe/mindspore/dump/kernel_graph_dump.py +75 -60
- msprobe/mindspore/dump/kernel_kbyk_dump.py +79 -65
- msprobe/mindspore/free_benchmark/api_pynative_self_check.py +131 -116
- msprobe/mindspore/free_benchmark/common/config.py +27 -12
- msprobe/mindspore/free_benchmark/common/handler_params.py +32 -17
- msprobe/mindspore/free_benchmark/common/utils.py +85 -71
- msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -842
- msprobe/mindspore/free_benchmark/decorator/dec_forward.py +57 -42
- msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +122 -107
- msprobe/mindspore/free_benchmark/handler/base_handler.py +105 -90
- msprobe/mindspore/free_benchmark/handler/check_handler.py +56 -41
- msprobe/mindspore/free_benchmark/handler/fix_handler.py +51 -36
- msprobe/mindspore/free_benchmark/handler/handler_factory.py +36 -21
- msprobe/mindspore/free_benchmark/perturbation/add_noise.py +82 -67
- msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +36 -21
- msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +78 -63
- msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +77 -0
- msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +49 -34
- msprobe/mindspore/free_benchmark/perturbation/no_change.py +27 -12
- msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +44 -27
- msprobe/mindspore/free_benchmark/self_check_tool_factory.py +48 -33
- msprobe/mindspore/grad_probe/global_context.py +100 -91
- msprobe/mindspore/grad_probe/grad_analyzer.py +231 -231
- msprobe/mindspore/grad_probe/grad_monitor.py +27 -27
- msprobe/mindspore/grad_probe/grad_stat_csv.py +131 -131
- msprobe/mindspore/grad_probe/hook.py +94 -92
- msprobe/mindspore/grad_probe/utils.py +29 -28
- msprobe/mindspore/ms_config.py +128 -126
- msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +60 -45
- msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +49 -34
- msprobe/mindspore/runtime.py +4 -4
- msprobe/mindspore/service.py +297 -354
- msprobe/mindspore/task_handler_factory.py +24 -24
- msprobe/msprobe.py +105 -107
- msprobe/pytorch/__init__.py +23 -4
- msprobe/pytorch/api_accuracy_checker/common/config.py +70 -55
- msprobe/pytorch/api_accuracy_checker/common/utils.py +246 -165
- msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +230 -213
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +632 -581
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
- msprobe/pytorch/api_accuracy_checker/compare/compare.py +416 -381
- msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +90 -73
- msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +265 -244
- msprobe/pytorch/api_accuracy_checker/config.yaml +10 -10
- msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +370 -332
- msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +221 -199
- msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +150 -134
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +518 -581
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +213 -74
- msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +218 -202
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +370 -324
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +227 -204
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/dump_dispatch.py +110 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +244 -218
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/torch_ops_config.yaml +63 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/utils.py +44 -0
- msprobe/pytorch/bench_functions/__init__.py +30 -15
- msprobe/pytorch/bench_functions/apply_adam_w.py +43 -28
- msprobe/pytorch/bench_functions/confusion_transpose.py +34 -19
- msprobe/pytorch/bench_functions/fast_gelu.py +70 -55
- msprobe/pytorch/bench_functions/layer_norm_eval.py +21 -6
- msprobe/pytorch/bench_functions/linear.py +27 -12
- msprobe/pytorch/bench_functions/matmul_backward.py +63 -48
- msprobe/pytorch/bench_functions/npu_fusion_attention.py +538 -421
- msprobe/pytorch/bench_functions/rms_norm.py +30 -15
- msprobe/pytorch/bench_functions/rotary_mul.py +71 -52
- msprobe/pytorch/bench_functions/scaled_mask_softmax.py +41 -26
- msprobe/pytorch/bench_functions/swiglu.py +70 -55
- msprobe/pytorch/common/__init__.py +17 -2
- msprobe/pytorch/common/compare_script.template +14 -14
- msprobe/pytorch/common/log.py +33 -32
- msprobe/pytorch/common/parse_json.py +54 -39
- msprobe/pytorch/common/utils.py +310 -300
- msprobe/pytorch/compare/distributed_compare.py +66 -66
- msprobe/pytorch/compare/mapping.yaml +607 -607
- msprobe/pytorch/compare/match.py +49 -33
- msprobe/pytorch/compare/pt_compare.py +82 -40
- msprobe/pytorch/debugger/debugger_config.py +108 -95
- msprobe/pytorch/debugger/precision_debugger.py +173 -125
- msprobe/pytorch/free_benchmark/__init__.py +23 -8
- msprobe/pytorch/free_benchmark/common/constant.py +70 -70
- msprobe/pytorch/free_benchmark/common/counter.py +71 -71
- msprobe/pytorch/free_benchmark/common/enums.py +65 -37
- msprobe/pytorch/free_benchmark/common/params.py +144 -129
- msprobe/pytorch/free_benchmark/common/utils.py +118 -102
- msprobe/pytorch/free_benchmark/compare/grad_saver.py +200 -179
- msprobe/pytorch/free_benchmark/compare/single_benchmark.py +119 -104
- msprobe/pytorch/free_benchmark/main.py +120 -105
- msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +28 -13
- msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +56 -41
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +105 -90
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +119 -104
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +87 -63
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +83 -68
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +43 -28
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +60 -45
- msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +34 -19
- msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +256 -217
- msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +54 -39
- msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +38 -23
- msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +45 -30
- msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +185 -170
- msprobe/pytorch/function_factory.py +91 -75
- msprobe/pytorch/functional/module_dump.py +84 -0
- msprobe/pytorch/grad_probe/grad_monitor.py +91 -90
- msprobe/pytorch/grad_probe/grad_stat_csv.py +128 -128
- msprobe/pytorch/hook_module/__init__.py +16 -1
- msprobe/pytorch/hook_module/api_registry.py +166 -161
- msprobe/pytorch/hook_module/hook_module.py +118 -120
- msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1877
- msprobe/pytorch/hook_module/utils.py +28 -29
- msprobe/pytorch/hook_module/wrap_aten.py +111 -110
- msprobe/pytorch/hook_module/wrap_distributed.py +77 -78
- msprobe/pytorch/hook_module/wrap_functional.py +104 -105
- msprobe/pytorch/hook_module/wrap_npu_custom.py +85 -84
- msprobe/pytorch/hook_module/wrap_tensor.py +69 -71
- msprobe/pytorch/hook_module/wrap_torch.py +84 -86
- msprobe/pytorch/hook_module/wrap_vf.py +60 -62
- msprobe/pytorch/module_processer.py +153 -138
- msprobe/pytorch/online_dispatch/__init__.py +20 -20
- msprobe/pytorch/online_dispatch/compare.py +235 -236
- msprobe/pytorch/online_dispatch/dispatch.py +271 -271
- msprobe/pytorch/online_dispatch/dump_compare.py +155 -156
- msprobe/pytorch/online_dispatch/single_compare.py +391 -391
- msprobe/pytorch/online_dispatch/torch_ops_config.yaml +57 -49
- msprobe/pytorch/online_dispatch/utils.py +127 -146
- msprobe/pytorch/parse.py +19 -4
- msprobe/pytorch/parse_tool/cli.py +31 -32
- msprobe/pytorch/parse_tool/lib/compare.py +259 -271
- msprobe/pytorch/parse_tool/lib/config.py +52 -52
- msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
- msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
- msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
- msprobe/pytorch/parse_tool/lib/parse_tool.py +161 -158
- msprobe/pytorch/parse_tool/lib/utils.py +320 -321
- msprobe/pytorch/parse_tool/lib/visualization.py +85 -91
- msprobe/pytorch/pt_config.py +317 -187
- msprobe/pytorch/service.py +311 -252
- mindstudio_probe-1.0.3.dist-info/RECORD +0 -272
- msprobe/config/README.md +0 -539
- msprobe/mindspore/doc/compare.md +0 -58
- msprobe/mindspore/doc/dump.md +0 -217
- msprobe/mindspore/dump/hook_cell/wrap_functional.py +0 -91
- msprobe/mindspore/dump/hook_cell/wrap_tensor.py +0 -63
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +0 -10
- msprobe/pytorch/doc/FAQ.md +0 -193
- msprobe/pytorch/doc/api_accuracy_checker.md +0 -313
- msprobe/pytorch/doc/api_accuracy_checker_online.md +0 -187
- msprobe/pytorch/doc/dump.md +0 -260
- msprobe/pytorch/doc/msprobe/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
- msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -240
- msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
- msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
- msprobe/pytorch/doc/run_overflow_check.md +0 -25
- msprobe/pytorch/doc//321/206/320/247/320/260/321/206/320/260/320/227/321/206/320/255/320/226/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/205/320/254/342/225/221/321/206/320/251/320/277/321/211/320/272/320/234/321/210/320/277/320/221/321/205/320/242/320/234/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -151
- msprobe/pytorch/functional/data_processor.py +0 -0
- msprobe/pytorch/functional/dump_module.py +0 -39
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/top_level.txt +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
- /msprobe/{config → docs}/img/free_benchmark.png +0 -0
- /msprobe/{doc/grad_probe/img/image-1.png → docs/img/grad_probe_image-1.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-2.png → docs/img/grad_probe_image-2.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-3.png → docs/img/grad_probe_image-3.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-4.png → docs/img/grad_probe_image-4.png} +0 -0
- /msprobe/{doc/grad_probe/img/image.png → docs/img/grad_probe_image.png} +0 -0
- /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
|
@@ -1,272 +1,272 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import time
|
|
3
|
-
import json
|
|
4
|
-
from
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
from msprobe.core.common.
|
|
19
|
-
from msprobe.
|
|
20
|
-
from msprobe.pytorch.
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
from .
|
|
24
|
-
from .
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
current_time = time.strftime("%Y%m%d%H%M%S")
|
|
28
|
-
RESULT_FILE_NAME = "accuracy_checking_result_" + current_time + ".csv"
|
|
29
|
-
DETAILS_FILE_NAME = "accuracy_checking_details_" + current_time + ".csv"
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
class PtdbgDispatch(TorchDispatchMode):
|
|
33
|
-
def __init__(self, dump_mode=Const.OFF, api_list=None, debug=False, dump_path=None, tag=None, process_num=0):
|
|
34
|
-
super(PtdbgDispatch, self).__init__()
|
|
35
|
-
logger.info(COMPARE_LOGO)
|
|
36
|
-
if not is_npu:
|
|
37
|
-
logger.error("Please confirm you run environment installed torch_npu!")
|
|
38
|
-
return
|
|
39
|
-
if dump_path is None:
|
|
40
|
-
logger.error("Please set dump_path when dump_mode is config!")
|
|
41
|
-
check_file_or_directory_path(dump_path, True)
|
|
42
|
-
|
|
43
|
-
self.device_id = torch_npu._C._npu_getDevice()
|
|
44
|
-
self.dump_mode = dump_mode
|
|
45
|
-
self.dump_api_list = api_list
|
|
46
|
-
self.debug_flag = debug
|
|
47
|
-
self.api_index = 0
|
|
48
|
-
self.single_api_index_dict = {}
|
|
49
|
-
self.device_dump_path_cpu = None
|
|
50
|
-
self.device_dump_path_npu = None
|
|
51
|
-
self.all_summary = []
|
|
52
|
-
self.call_stack_list = []
|
|
53
|
-
self.process_num = process_num
|
|
54
|
-
self.filter_dump_api()
|
|
55
|
-
self.check_param()
|
|
56
|
-
dir_name = self.get_dir_name(tag)
|
|
57
|
-
self.root_path = os.path.join(os.path.realpath(dump_path), dir_name)
|
|
58
|
-
self.root_cpu_path = os.path.join(self.root_path, f'cpu')
|
|
59
|
-
self.root_npu_path = os.path.join(self.root_path, f'npu')
|
|
60
|
-
check_path_before_create(self.root_cpu_path)
|
|
61
|
-
check_path_before_create(self.root_npu_path)
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
self.result_csv_path = os.path.join(self.root_path, RESULT_FILE_NAME)
|
|
66
|
-
self.detail_csv_path = os.path.join(self.root_path, DETAILS_FILE_NAME)
|
|
67
|
-
self.comparator = Comparator(self.result_csv_path, self.detail_csv_path, False)
|
|
68
|
-
|
|
69
|
-
self.aten_ops_blacklist = []
|
|
70
|
-
self.npu_adjust_autogard = []
|
|
71
|
-
yaml_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "torch_ops_config.yaml")
|
|
72
|
-
self.get_ops(yaml_path)
|
|
73
|
-
|
|
74
|
-
self.lock = None
|
|
75
|
-
if process_num > 0:
|
|
76
|
-
self.pool = Pool(process_num)
|
|
77
|
-
if debug:
|
|
78
|
-
logger.info(f'Main pid:{os.getpid()} device:{self.device_id} dump_list:{self.dump_api_list} '
|
|
79
|
-
f'dump_mode:{self.dump_mode} cpu_path[{self.root_cpu_path}], npu_path[{self.root_npu_path}], '
|
|
80
|
-
f'process[{process_num}]')
|
|
81
|
-
|
|
82
|
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
83
|
-
super().__exit__(exc_type, exc_val, exc_tb)
|
|
84
|
-
|
|
85
|
-
if not is_npu:
|
|
86
|
-
return
|
|
87
|
-
logger.info(f'start write compare csv: Rank[{self.device_id}], Pid[{os.getpid()}')
|
|
88
|
-
|
|
89
|
-
if self.process_num > 0:
|
|
90
|
-
self.pool.close()
|
|
91
|
-
self.pool.join()
|
|
92
|
-
summary_path = os.path.join(self.root_cpu_path, f'summary.json')
|
|
93
|
-
if not os.path.exists(summary_path):
|
|
94
|
-
logger.error("Please check train log, An exception may have occurred!")
|
|
95
|
-
return
|
|
96
|
-
check_file_or_directory_path(summary_path, False)
|
|
97
|
-
fp_handle =
|
|
98
|
-
while True:
|
|
99
|
-
json_line_data = fp_handle.readline()
|
|
100
|
-
if json_line_data == '\n':
|
|
101
|
-
continue
|
|
102
|
-
if len(json_line_data) == 0:
|
|
103
|
-
break
|
|
104
|
-
msg = json.loads(json_line_data)
|
|
105
|
-
self.all_summary[msg[0]] = msg[1]
|
|
106
|
-
fp_handle.close()
|
|
107
|
-
|
|
108
|
-
if self.debug_flag:
|
|
109
|
-
input_num = 0
|
|
110
|
-
output_num = 0
|
|
111
|
-
total_num = 0
|
|
112
|
-
|
|
113
|
-
for list_data in self.all_summary:
|
|
114
|
-
for data in list_data:
|
|
115
|
-
logger.info(f'summary: Device[{self.device_id}], Pid[{os.getpid()}], Data[{data}]')
|
|
116
|
-
if "_input" in data[CompareConst.NPU_NAME]:
|
|
117
|
-
input_num = input_num + 1
|
|
118
|
-
if "_output" in data[CompareConst.NPU_NAME]:
|
|
119
|
-
output_num = output_num + 1
|
|
120
|
-
total_num = total_num + 1
|
|
121
|
-
logger.info(f'Dispatch exit: Device[{self.device_id}], Pid[{os.getpid()} Input[{input_num}] '
|
|
122
|
-
f'Output[{output_num}] Total[{total_num}] API_Total[{self.api_index}]]')
|
|
123
|
-
|
|
124
|
-
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
|
|
125
|
-
if not is_npu:
|
|
126
|
-
logger.error("Please confirm you run environment installed torch_npu!")
|
|
127
|
-
return func(*args, **kwargs)
|
|
128
|
-
|
|
129
|
-
func_name_split_list = func.__name__.split(".")
|
|
130
|
-
aten_api = func_name_split_list[0]
|
|
131
|
-
try:
|
|
132
|
-
aten_api_overload_name = func_name_split_list[1]
|
|
133
|
-
except IndexError:
|
|
134
|
-
logger.error(f"Please check the func name {func.__name__}!")
|
|
135
|
-
return func(*args, **kwargs)
|
|
136
|
-
|
|
137
|
-
self.enable_autogard(aten_api)
|
|
138
|
-
if aten_api in self.aten_ops_blacklist:
|
|
139
|
-
npu_out = func(*args, **kwargs)
|
|
140
|
-
return npu_out
|
|
141
|
-
|
|
142
|
-
call_stack = get_callstack()
|
|
143
|
-
self.call_stack_list.append(call_stack)
|
|
144
|
-
self.api_index += 1
|
|
145
|
-
if aten_api not in self.single_api_index_dict:
|
|
146
|
-
self.single_api_index_dict[aten_api] = 1
|
|
147
|
-
else:
|
|
148
|
-
self.single_api_index_dict[aten_api] += 1
|
|
149
|
-
|
|
150
|
-
run_param = self.get_run_param(aten_api, func.__name__, aten_api_overload_name)
|
|
151
|
-
|
|
152
|
-
if self.debug_flag:
|
|
153
|
-
logger.info(f'Dispatch Info: Rank[{self.device_id}], Pid[{os.getpid()}], Func[{func.__name__}], '
|
|
154
|
-
f'Name[{run_param.aten_api}_{run_param.single_api_index}], '
|
|
155
|
-
f'Count[{self.api_index}], Sys[{get_sys_info()}]')
|
|
156
|
-
|
|
157
|
-
cpu_args = []
|
|
158
|
-
cpu_kwargs = []
|
|
159
|
-
data_to_cpu(args, 0, cpu_args)
|
|
160
|
-
data_to_cpu(kwargs, 0, cpu_kwargs)
|
|
161
|
-
cpu_args = cpu_args[0]
|
|
162
|
-
cpu_kwargs = cpu_kwargs[0]
|
|
163
|
-
|
|
164
|
-
with TimeStatistics("NPU RUN", run_param):
|
|
165
|
-
npu_out = func(*args, **kwargs)
|
|
166
|
-
npu_out_cpu = []
|
|
167
|
-
data_to_cpu(npu_out, 0, npu_out_cpu)
|
|
168
|
-
npu_out_cpu = npu_out_cpu[0]
|
|
169
|
-
|
|
170
|
-
with TimeStatistics("CPU RUN", run_param):
|
|
171
|
-
cpu_out = func(*cpu_args, **cpu_kwargs)
|
|
172
|
-
|
|
173
|
-
if isinstance(cpu_out, torch.Tensor) and cpu_out.dtype in [torch.bfloat16, torch.float16, torch.half]:
|
|
174
|
-
cpu_out = cpu_out.float()
|
|
175
|
-
|
|
176
|
-
if self.process_num == 0:
|
|
177
|
-
self.all_summary.append([])
|
|
178
|
-
data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, func, npu_out_cpu, cpu_out, self.lock)
|
|
179
|
-
dispatch_workflow(run_param, data_info)
|
|
180
|
-
else:
|
|
181
|
-
self.lock.acquire()
|
|
182
|
-
self.all_summary.append([])
|
|
183
|
-
self.lock.release()
|
|
184
|
-
run_param.process_flag = True
|
|
185
|
-
if self.check_fun(func, run_param):
|
|
186
|
-
data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, None, npu_out_cpu, cpu_out,
|
|
187
|
-
self.lock)
|
|
188
|
-
self.pool.apply_async(func=dispatch_multiprocess, args=(run_param, data_info),
|
|
189
|
-
error_callback=error_call)
|
|
190
|
-
else:
|
|
191
|
-
logger.error("can not get correct function please set process_num=0")
|
|
192
|
-
return npu_out
|
|
193
|
-
|
|
194
|
-
@staticmethod
|
|
195
|
-
def check_fun(func, run_param):
|
|
196
|
-
if hasattr(torch.ops.aten, run_param.aten_api):
|
|
197
|
-
aten_func = getattr(torch.ops.aten, run_param.aten_api)
|
|
198
|
-
if hasattr(aten_func, run_param.aten_api_overload_name):
|
|
199
|
-
aten_overload_func = getattr(aten_func, run_param.aten_api_overload_name)
|
|
200
|
-
if id(aten_overload_func) == id(func):
|
|
201
|
-
run_param.func_namespace = "aten"
|
|
202
|
-
return True
|
|
203
|
-
return False
|
|
204
|
-
|
|
205
|
-
def get_dir_name(self, tag):
|
|
206
|
-
# guarantee file uniqueness
|
|
207
|
-
time.sleep(1)
|
|
208
|
-
time_now = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
|
|
209
|
-
if tag is None or not isinstance(tag, str):
|
|
210
|
-
logger.warning('There is not tag or the type of tag is not string.')
|
|
211
|
-
dir_name = f'msprobe_rank{self.device_id}_{time_now}'
|
|
212
|
-
else:
|
|
213
|
-
dir_name = f'msprobe_{tag}_rank{self.device_id}_{time_now}'
|
|
214
|
-
return dir_name
|
|
215
|
-
|
|
216
|
-
def get_ops(self, file_path):
|
|
217
|
-
yaml_file = load_yaml(file_path)
|
|
218
|
-
self.aten_ops_blacklist = yaml_file.get('aten_ops_blacklist')
|
|
219
|
-
self.npu_adjust_autogard = yaml_file.get('npu_adjust_autogard')
|
|
220
|
-
|
|
221
|
-
def filter_dump_api(self):
|
|
222
|
-
if self.dump_mode != Const.LIST or not self.dump_api_list:
|
|
223
|
-
self.dump_api_list = []
|
|
224
|
-
return
|
|
225
|
-
aten_api_list = dir(torch.ops.aten)
|
|
226
|
-
dump_api_list = []
|
|
227
|
-
for aten_api in self.dump_api_list:
|
|
228
|
-
if aten_api in aten_api_list:
|
|
229
|
-
dump_api_list.append(aten_api)
|
|
230
|
-
else:
|
|
231
|
-
logger.warning(f'{aten_api} is not aten api will not dump, please refer to torch.ops.aten')
|
|
232
|
-
self.dump_api_list = dump_api_list
|
|
233
|
-
|
|
234
|
-
def get_run_param(self, aten_api, func_name, aten_api_overload_name):
|
|
235
|
-
run_param = DispatchRunParam(self.debug_flag, self.device_id, self.root_npu_path, self.root_cpu_path,
|
|
236
|
-
self.process_num, self.comparator)
|
|
237
|
-
run_param.dump_flag, run_param.auto_dump_flag = self.get_dump_flag(aten_api)
|
|
238
|
-
run_param.func_name = func_name
|
|
239
|
-
run_param.aten_api = aten_api
|
|
240
|
-
run_param.aten_api_overload_name = aten_api_overload_name
|
|
241
|
-
run_param.single_api_index = self.single_api_index_dict[aten_api]
|
|
242
|
-
run_param.api_index = self.api_index
|
|
243
|
-
return run_param
|
|
244
|
-
|
|
245
|
-
def get_dump_flag(self, aten_api):
|
|
246
|
-
dump_flag = False
|
|
247
|
-
auto_dump_flag = False
|
|
248
|
-
if self.dump_mode == Const.ALL:
|
|
249
|
-
dump_flag = True
|
|
250
|
-
if self.dump_mode == Const.LIST and aten_api in self.dump_api_list:
|
|
251
|
-
dump_flag = True
|
|
252
|
-
if self.dump_mode == Const.AUTO:
|
|
253
|
-
auto_dump_flag = True
|
|
254
|
-
return dump_flag, auto_dump_flag
|
|
255
|
-
|
|
256
|
-
def check_param(self):
|
|
257
|
-
if self.dump_mode not in Const.ONLINE_DUMP_MODE:
|
|
258
|
-
logger.error('The parameter "dump mode" can only be one of {}.'.format(Const.ONLINE_DUMP_MODE))
|
|
259
|
-
raise DispatchException(DispatchException.INVALID_PARAMETER)
|
|
260
|
-
if not isinstance(self.dump_api_list, list):
|
|
261
|
-
logger.error('The type of parameter "api_list" can only be list.')
|
|
262
|
-
raise DispatchException(DispatchException.INVALID_PARAMETER)
|
|
263
|
-
if not isinstance(self.debug_flag, bool):
|
|
264
|
-
logger.error('The type of parameter "debug" can only be bool.')
|
|
265
|
-
raise DispatchException(DispatchException.INVALID_PARAMETER)
|
|
266
|
-
if not isinstance(self.process_num, int) or self.process_num < 0:
|
|
267
|
-
logger.error('The type of parameter "process_num" can only be int and it should not be less than 0.')
|
|
268
|
-
raise DispatchException(DispatchException.INVALID_PARAMETER)
|
|
269
|
-
|
|
270
|
-
def enable_autogard(self, aten_api):
|
|
271
|
-
if aten_api in self.npu_adjust_autogard:
|
|
1
|
+
import os
|
|
2
|
+
import time
|
|
3
|
+
import json
|
|
4
|
+
from multiprocessing import Pool
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
|
|
8
|
+
from torch.utils._python_dispatch import TorchDispatchMode
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import torch_npu
|
|
12
|
+
except ImportError:
|
|
13
|
+
is_npu = False
|
|
14
|
+
else:
|
|
15
|
+
is_npu = True
|
|
16
|
+
|
|
17
|
+
from msprobe.core.common.file_utils import check_path_before_create, check_file_or_directory_path, load_yaml
|
|
18
|
+
from msprobe.core.common.const import Const, CompareConst
|
|
19
|
+
from msprobe.pytorch.common.log import logger
|
|
20
|
+
from msprobe.pytorch.online_dispatch.dump_compare import dispatch_workflow, dispatch_multiprocess, error_call, TimeStatistics, \
|
|
21
|
+
DispatchRunParam, DisPatchDataInfo
|
|
22
|
+
from msprobe.pytorch.online_dispatch.utils import get_callstack, data_to_cpu, get_sys_info, DispatchException, COMPARE_LOGO
|
|
23
|
+
from msprobe.pytorch.online_dispatch.compare import Comparator
|
|
24
|
+
from msprobe.core.common.file_utils import FileOpen, create_directory
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
current_time = time.strftime("%Y%m%d%H%M%S")
|
|
28
|
+
RESULT_FILE_NAME = "accuracy_checking_result_" + current_time + ".csv"
|
|
29
|
+
DETAILS_FILE_NAME = "accuracy_checking_details_" + current_time + ".csv"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class PtdbgDispatch(TorchDispatchMode):
|
|
33
|
+
def __init__(self, dump_mode=Const.OFF, api_list=None, debug=False, dump_path=None, tag=None, process_num=0):
|
|
34
|
+
super(PtdbgDispatch, self).__init__()
|
|
35
|
+
logger.info(COMPARE_LOGO)
|
|
36
|
+
if not is_npu:
|
|
37
|
+
logger.error("Please confirm you run environment installed torch_npu!")
|
|
38
|
+
return
|
|
39
|
+
if dump_path is None:
|
|
40
|
+
logger.error("Please set dump_path when dump_mode is config!")
|
|
41
|
+
check_file_or_directory_path(dump_path, True)
|
|
42
|
+
|
|
43
|
+
self.device_id = torch_npu._C._npu_getDevice()
|
|
44
|
+
self.dump_mode = dump_mode
|
|
45
|
+
self.dump_api_list = api_list
|
|
46
|
+
self.debug_flag = debug
|
|
47
|
+
self.api_index = 0
|
|
48
|
+
self.single_api_index_dict = {}
|
|
49
|
+
self.device_dump_path_cpu = None
|
|
50
|
+
self.device_dump_path_npu = None
|
|
51
|
+
self.all_summary = []
|
|
52
|
+
self.call_stack_list = []
|
|
53
|
+
self.process_num = process_num
|
|
54
|
+
self.filter_dump_api()
|
|
55
|
+
self.check_param()
|
|
56
|
+
dir_name = self.get_dir_name(tag)
|
|
57
|
+
self.root_path = os.path.join(os.path.realpath(dump_path), dir_name)
|
|
58
|
+
self.root_cpu_path = os.path.join(self.root_path, f'cpu')
|
|
59
|
+
self.root_npu_path = os.path.join(self.root_path, f'npu')
|
|
60
|
+
check_path_before_create(self.root_cpu_path)
|
|
61
|
+
check_path_before_create(self.root_npu_path)
|
|
62
|
+
create_directory(self.root_cpu_path)
|
|
63
|
+
create_directory(self.root_npu_path)
|
|
64
|
+
|
|
65
|
+
self.result_csv_path = os.path.join(self.root_path, RESULT_FILE_NAME)
|
|
66
|
+
self.detail_csv_path = os.path.join(self.root_path, DETAILS_FILE_NAME)
|
|
67
|
+
self.comparator = Comparator(self.result_csv_path, self.detail_csv_path, False)
|
|
68
|
+
|
|
69
|
+
self.aten_ops_blacklist = []
|
|
70
|
+
self.npu_adjust_autogard = []
|
|
71
|
+
yaml_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "torch_ops_config.yaml")
|
|
72
|
+
self.get_ops(yaml_path)
|
|
73
|
+
|
|
74
|
+
self.lock = None
|
|
75
|
+
if process_num > 0:
|
|
76
|
+
self.pool = Pool(process_num)
|
|
77
|
+
if debug:
|
|
78
|
+
logger.info(f'Main pid:{os.getpid()} device:{self.device_id} dump_list:{self.dump_api_list} '
|
|
79
|
+
f'dump_mode:{self.dump_mode} cpu_path[{self.root_cpu_path}], npu_path[{self.root_npu_path}], '
|
|
80
|
+
f'process[{process_num}]')
|
|
81
|
+
|
|
82
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
83
|
+
super().__exit__(exc_type, exc_val, exc_tb)
|
|
84
|
+
|
|
85
|
+
if not is_npu:
|
|
86
|
+
return
|
|
87
|
+
logger.info(f'start write compare csv: Rank[{self.device_id}], Pid[{os.getpid()}')
|
|
88
|
+
|
|
89
|
+
if self.process_num > 0:
|
|
90
|
+
self.pool.close()
|
|
91
|
+
self.pool.join()
|
|
92
|
+
summary_path = os.path.join(self.root_cpu_path, f'summary.json')
|
|
93
|
+
if not os.path.exists(summary_path):
|
|
94
|
+
logger.error("Please check train log, An exception may have occurred!")
|
|
95
|
+
return
|
|
96
|
+
check_file_or_directory_path(summary_path, False)
|
|
97
|
+
fp_handle = FileOpen(summary_path, "r")
|
|
98
|
+
while True:
|
|
99
|
+
json_line_data = fp_handle.readline()
|
|
100
|
+
if json_line_data == '\n':
|
|
101
|
+
continue
|
|
102
|
+
if len(json_line_data) == 0:
|
|
103
|
+
break
|
|
104
|
+
msg = json.loads(json_line_data)
|
|
105
|
+
self.all_summary[msg[0]] = msg[1]
|
|
106
|
+
fp_handle.close()
|
|
107
|
+
|
|
108
|
+
if self.debug_flag:
|
|
109
|
+
input_num = 0
|
|
110
|
+
output_num = 0
|
|
111
|
+
total_num = 0
|
|
112
|
+
|
|
113
|
+
for list_data in self.all_summary:
|
|
114
|
+
for data in list_data:
|
|
115
|
+
logger.info(f'summary: Device[{self.device_id}], Pid[{os.getpid()}], Data[{data}]')
|
|
116
|
+
if "_input" in data[CompareConst.NPU_NAME]:
|
|
117
|
+
input_num = input_num + 1
|
|
118
|
+
if "_output" in data[CompareConst.NPU_NAME]:
|
|
119
|
+
output_num = output_num + 1
|
|
120
|
+
total_num = total_num + 1
|
|
121
|
+
logger.info(f'Dispatch exit: Device[{self.device_id}], Pid[{os.getpid()} Input[{input_num}] '
|
|
122
|
+
f'Output[{output_num}] Total[{total_num}] API_Total[{self.api_index}]]')
|
|
123
|
+
|
|
124
|
+
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
|
|
125
|
+
if not is_npu:
|
|
126
|
+
logger.error("Please confirm you run environment installed torch_npu!")
|
|
127
|
+
return func(*args, **kwargs)
|
|
128
|
+
|
|
129
|
+
func_name_split_list = func.__name__.split(".")
|
|
130
|
+
aten_api = func_name_split_list[0]
|
|
131
|
+
try:
|
|
132
|
+
aten_api_overload_name = func_name_split_list[1]
|
|
133
|
+
except IndexError:
|
|
134
|
+
logger.error(f"Please check the func name {func.__name__}!")
|
|
135
|
+
return func(*args, **kwargs)
|
|
136
|
+
|
|
137
|
+
self.enable_autogard(aten_api)
|
|
138
|
+
if aten_api in self.aten_ops_blacklist:
|
|
139
|
+
npu_out = func(*args, **kwargs)
|
|
140
|
+
return npu_out
|
|
141
|
+
|
|
142
|
+
call_stack = get_callstack()
|
|
143
|
+
self.call_stack_list.append(call_stack)
|
|
144
|
+
self.api_index += 1
|
|
145
|
+
if aten_api not in self.single_api_index_dict:
|
|
146
|
+
self.single_api_index_dict[aten_api] = 1
|
|
147
|
+
else:
|
|
148
|
+
self.single_api_index_dict[aten_api] += 1
|
|
149
|
+
|
|
150
|
+
run_param = self.get_run_param(aten_api, func.__name__, aten_api_overload_name)
|
|
151
|
+
|
|
152
|
+
if self.debug_flag:
|
|
153
|
+
logger.info(f'Dispatch Info: Rank[{self.device_id}], Pid[{os.getpid()}], Func[{func.__name__}], '
|
|
154
|
+
f'Name[{run_param.aten_api}_{run_param.single_api_index}], '
|
|
155
|
+
f'Count[{self.api_index}], Sys[{get_sys_info()}]')
|
|
156
|
+
|
|
157
|
+
cpu_args = []
|
|
158
|
+
cpu_kwargs = []
|
|
159
|
+
data_to_cpu(args, 0, cpu_args)
|
|
160
|
+
data_to_cpu(kwargs, 0, cpu_kwargs)
|
|
161
|
+
cpu_args = cpu_args[0]
|
|
162
|
+
cpu_kwargs = cpu_kwargs[0]
|
|
163
|
+
|
|
164
|
+
with TimeStatistics("NPU RUN", run_param):
|
|
165
|
+
npu_out = func(*args, **kwargs)
|
|
166
|
+
npu_out_cpu = []
|
|
167
|
+
data_to_cpu(npu_out, 0, npu_out_cpu)
|
|
168
|
+
npu_out_cpu = npu_out_cpu[0]
|
|
169
|
+
|
|
170
|
+
with TimeStatistics("CPU RUN", run_param):
|
|
171
|
+
cpu_out = func(*cpu_args, **cpu_kwargs)
|
|
172
|
+
|
|
173
|
+
if isinstance(cpu_out, torch.Tensor) and cpu_out.dtype in [torch.bfloat16, torch.float16, torch.half]:
|
|
174
|
+
cpu_out = cpu_out.float()
|
|
175
|
+
|
|
176
|
+
if self.process_num == 0:
|
|
177
|
+
self.all_summary.append([])
|
|
178
|
+
data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, func, npu_out_cpu, cpu_out, self.lock)
|
|
179
|
+
dispatch_workflow(run_param, data_info)
|
|
180
|
+
else:
|
|
181
|
+
self.lock.acquire()
|
|
182
|
+
self.all_summary.append([])
|
|
183
|
+
self.lock.release()
|
|
184
|
+
run_param.process_flag = True
|
|
185
|
+
if self.check_fun(func, run_param):
|
|
186
|
+
data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, None, npu_out_cpu, cpu_out,
|
|
187
|
+
self.lock)
|
|
188
|
+
self.pool.apply_async(func=dispatch_multiprocess, args=(run_param, data_info),
|
|
189
|
+
error_callback=error_call)
|
|
190
|
+
else:
|
|
191
|
+
logger.error("can not get correct function please set process_num=0")
|
|
192
|
+
return npu_out
|
|
193
|
+
|
|
194
|
+
@staticmethod
|
|
195
|
+
def check_fun(func, run_param):
|
|
196
|
+
if hasattr(torch.ops.aten, run_param.aten_api):
|
|
197
|
+
aten_func = getattr(torch.ops.aten, run_param.aten_api)
|
|
198
|
+
if hasattr(aten_func, run_param.aten_api_overload_name):
|
|
199
|
+
aten_overload_func = getattr(aten_func, run_param.aten_api_overload_name)
|
|
200
|
+
if id(aten_overload_func) == id(func):
|
|
201
|
+
run_param.func_namespace = "aten"
|
|
202
|
+
return True
|
|
203
|
+
return False
|
|
204
|
+
|
|
205
|
+
def get_dir_name(self, tag):
|
|
206
|
+
# guarantee file uniqueness
|
|
207
|
+
time.sleep(1)
|
|
208
|
+
time_now = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
|
|
209
|
+
if tag is None or not isinstance(tag, str):
|
|
210
|
+
logger.warning('There is not tag or the type of tag is not string.')
|
|
211
|
+
dir_name = f'msprobe_rank{self.device_id}_{time_now}'
|
|
212
|
+
else:
|
|
213
|
+
dir_name = f'msprobe_{tag}_rank{self.device_id}_{time_now}'
|
|
214
|
+
return dir_name
|
|
215
|
+
|
|
216
|
+
def get_ops(self, file_path):
|
|
217
|
+
yaml_file = load_yaml(file_path)
|
|
218
|
+
self.aten_ops_blacklist = yaml_file.get('aten_ops_blacklist')
|
|
219
|
+
self.npu_adjust_autogard = yaml_file.get('npu_adjust_autogard')
|
|
220
|
+
|
|
221
|
+
def filter_dump_api(self):
|
|
222
|
+
if self.dump_mode != Const.LIST or not self.dump_api_list:
|
|
223
|
+
self.dump_api_list = []
|
|
224
|
+
return
|
|
225
|
+
aten_api_list = dir(torch.ops.aten)
|
|
226
|
+
dump_api_list = []
|
|
227
|
+
for aten_api in self.dump_api_list:
|
|
228
|
+
if aten_api in aten_api_list:
|
|
229
|
+
dump_api_list.append(aten_api)
|
|
230
|
+
else:
|
|
231
|
+
logger.warning(f'{aten_api} is not aten api will not dump, please refer to torch.ops.aten')
|
|
232
|
+
self.dump_api_list = dump_api_list
|
|
233
|
+
|
|
234
|
+
def get_run_param(self, aten_api, func_name, aten_api_overload_name):
|
|
235
|
+
run_param = DispatchRunParam(self.debug_flag, self.device_id, self.root_npu_path, self.root_cpu_path,
|
|
236
|
+
self.process_num, self.comparator)
|
|
237
|
+
run_param.dump_flag, run_param.auto_dump_flag = self.get_dump_flag(aten_api)
|
|
238
|
+
run_param.func_name = func_name
|
|
239
|
+
run_param.aten_api = aten_api
|
|
240
|
+
run_param.aten_api_overload_name = aten_api_overload_name
|
|
241
|
+
run_param.single_api_index = self.single_api_index_dict[aten_api]
|
|
242
|
+
run_param.api_index = self.api_index
|
|
243
|
+
return run_param
|
|
244
|
+
|
|
245
|
+
def get_dump_flag(self, aten_api):
|
|
246
|
+
dump_flag = False
|
|
247
|
+
auto_dump_flag = False
|
|
248
|
+
if self.dump_mode == Const.ALL:
|
|
249
|
+
dump_flag = True
|
|
250
|
+
if self.dump_mode == Const.LIST and aten_api in self.dump_api_list:
|
|
251
|
+
dump_flag = True
|
|
252
|
+
if self.dump_mode == Const.AUTO:
|
|
253
|
+
auto_dump_flag = True
|
|
254
|
+
return dump_flag, auto_dump_flag
|
|
255
|
+
|
|
256
|
+
def check_param(self):
|
|
257
|
+
if self.dump_mode not in Const.ONLINE_DUMP_MODE:
|
|
258
|
+
logger.error('The parameter "dump mode" can only be one of {}.'.format(Const.ONLINE_DUMP_MODE))
|
|
259
|
+
raise DispatchException(DispatchException.INVALID_PARAMETER)
|
|
260
|
+
if not isinstance(self.dump_api_list, list):
|
|
261
|
+
logger.error('The type of parameter "api_list" can only be list.')
|
|
262
|
+
raise DispatchException(DispatchException.INVALID_PARAMETER)
|
|
263
|
+
if not isinstance(self.debug_flag, bool):
|
|
264
|
+
logger.error('The type of parameter "debug" can only be bool.')
|
|
265
|
+
raise DispatchException(DispatchException.INVALID_PARAMETER)
|
|
266
|
+
if not isinstance(self.process_num, int) or self.process_num < 0:
|
|
267
|
+
logger.error('The type of parameter "process_num" can only be int and it should not be less than 0.')
|
|
268
|
+
raise DispatchException(DispatchException.INVALID_PARAMETER)
|
|
269
|
+
|
|
270
|
+
def enable_autogard(self, aten_api):
|
|
271
|
+
if aten_api in self.npu_adjust_autogard:
|
|
272
272
|
torch._C._dispatch_tls_set_dispatch_key_excluded(torch._C.DispatchKey.AutogradFunctionality, False)
|