mindstudio-probe 1.0.3__py3-none-any.whl → 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/LICENSE +201 -201
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/METADATA +36 -34
- mindstudio_probe-1.0.4.dist-info/RECORD +276 -0
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/WHEEL +1 -1
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/entry_points.txt +1 -0
- msprobe/README.md +101 -237
- msprobe/{config/config.json → config.json} +49 -49
- msprobe/core/advisor/advisor.py +124 -124
- msprobe/core/advisor/advisor_const.py +59 -59
- msprobe/core/advisor/advisor_result.py +58 -58
- msprobe/core/common/const.py +341 -318
- msprobe/core/common/exceptions.py +99 -99
- msprobe/core/common/{file_check.py → file_utils.py} +478 -283
- msprobe/core/common/log.py +76 -69
- msprobe/core/common/utils.py +385 -616
- msprobe/core/common_config.py +85 -71
- msprobe/core/compare/acc_compare.py +299 -298
- msprobe/core/compare/check.py +95 -95
- msprobe/core/compare/compare_cli.py +49 -49
- msprobe/core/compare/highlight.py +223 -222
- msprobe/core/compare/multiprocessing_compute.py +149 -149
- msprobe/core/compare/npy_compare.py +295 -295
- msprobe/core/compare/utils.py +430 -429
- msprobe/core/data_dump/data_collector.py +154 -144
- msprobe/core/data_dump/data_processor/base.py +314 -293
- msprobe/core/data_dump/data_processor/factory.py +59 -59
- msprobe/core/data_dump/data_processor/mindspore_processor.py +186 -198
- msprobe/core/data_dump/data_processor/pytorch_processor.py +366 -389
- msprobe/core/data_dump/json_writer.py +96 -116
- msprobe/core/data_dump/scope.py +178 -178
- msprobe/core/grad_probe/constant.py +70 -70
- msprobe/core/grad_probe/grad_compare.py +171 -175
- msprobe/core/grad_probe/utils.py +64 -52
- msprobe/docs/01.installation.md +89 -0
- msprobe/docs/02.config_introduction.md +165 -0
- msprobe/docs/03.config_examples.md +247 -0
- msprobe/docs/04.acl_config_examples.md +76 -0
- msprobe/docs/05.data_dump_PyTorch.md +198 -0
- msprobe/docs/06.data_dump_MindSpore.md +243 -0
- msprobe/docs/07.accuracy_checker_PyTorch.md +274 -0
- msprobe/docs/08.accuracy_checker_online_PyTorch.md +198 -0
- msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
- msprobe/docs/10.accuracy_compare_PyTorch.md +245 -0
- msprobe/docs/11.accuracy_compare_MindSpore.md +202 -0
- msprobe/docs/12.overflow_check_PyTorch.md +79 -0
- msprobe/docs/13.overflow_check_MindSpore.md +31 -0
- msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
- msprobe/docs/15.free_benchmarking_PyTorch.md +164 -0
- msprobe/{doc/grad_probe/grad_probe.md → docs/17.grad_probe.md} +207 -207
- msprobe/docs/FAQ_PyTorch.md +177 -0
- msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
- msprobe/docs/img/free_benchmark_framework.png +0 -0
- msprobe/mindspore/__init__.py +1 -1
- msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +254 -245
- msprobe/mindspore/api_accuracy_checker/api_info.py +69 -69
- msprobe/mindspore/api_accuracy_checker/api_runner.py +155 -151
- msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +196 -196
- msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
- msprobe/mindspore/api_accuracy_checker/compute_element.py +238 -223
- msprobe/mindspore/api_accuracy_checker/main.py +8 -15
- msprobe/mindspore/api_accuracy_checker/type_mapping.py +113 -113
- msprobe/mindspore/api_accuracy_checker/utils.py +79 -62
- msprobe/mindspore/cell_processor.py +34 -34
- msprobe/mindspore/common/const.py +106 -87
- msprobe/mindspore/common/log.py +37 -37
- msprobe/mindspore/common/utils.py +81 -57
- msprobe/mindspore/compare/distributed_compare.py +75 -75
- msprobe/mindspore/compare/ms_compare.py +219 -117
- msprobe/mindspore/compare/ms_graph_compare.py +348 -317
- msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -399
- msprobe/mindspore/debugger/debugger_config.py +66 -74
- msprobe/mindspore/debugger/precision_debugger.py +126 -107
- msprobe/mindspore/dump/dump_tool_factory.py +35 -35
- msprobe/mindspore/dump/hook_cell/api_registry.py +118 -104
- msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -53
- msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +922 -925
- msprobe/mindspore/dump/hook_cell/wrap_api.py +113 -0
- msprobe/mindspore/dump/jit_dump.py +72 -56
- msprobe/mindspore/dump/kernel_graph_dump.py +59 -60
- msprobe/mindspore/dump/kernel_kbyk_dump.py +64 -65
- msprobe/mindspore/free_benchmark/api_pynative_self_check.py +116 -116
- msprobe/mindspore/free_benchmark/common/config.py +12 -12
- msprobe/mindspore/free_benchmark/common/handler_params.py +17 -17
- msprobe/mindspore/free_benchmark/common/utils.py +71 -71
- msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -842
- msprobe/mindspore/free_benchmark/decorator/dec_forward.py +43 -42
- msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +107 -107
- msprobe/mindspore/free_benchmark/handler/base_handler.py +90 -90
- msprobe/mindspore/free_benchmark/handler/check_handler.py +41 -41
- msprobe/mindspore/free_benchmark/handler/fix_handler.py +36 -36
- msprobe/mindspore/free_benchmark/handler/handler_factory.py +21 -21
- msprobe/mindspore/free_benchmark/perturbation/add_noise.py +67 -67
- msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +21 -21
- msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +63 -63
- msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +51 -0
- msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +35 -34
- msprobe/mindspore/free_benchmark/perturbation/no_change.py +12 -12
- msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +29 -27
- msprobe/mindspore/free_benchmark/self_check_tool_factory.py +33 -33
- msprobe/mindspore/grad_probe/global_context.py +90 -91
- msprobe/mindspore/grad_probe/grad_analyzer.py +231 -231
- msprobe/mindspore/grad_probe/grad_monitor.py +27 -27
- msprobe/mindspore/grad_probe/grad_stat_csv.py +131 -131
- msprobe/mindspore/grad_probe/hook.py +94 -92
- msprobe/mindspore/grad_probe/utils.py +29 -28
- msprobe/mindspore/ms_config.py +128 -126
- msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +44 -45
- msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +34 -34
- msprobe/mindspore/runtime.py +4 -4
- msprobe/mindspore/service.py +378 -354
- msprobe/mindspore/task_handler_factory.py +24 -24
- msprobe/msprobe.py +105 -107
- msprobe/pytorch/__init__.py +3 -3
- msprobe/pytorch/api_accuracy_checker/common/config.py +53 -55
- msprobe/pytorch/api_accuracy_checker/common/utils.py +214 -165
- msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +213 -213
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +606 -581
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
- msprobe/pytorch/api_accuracy_checker/compare/compare.py +386 -381
- msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +73 -73
- msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +245 -244
- msprobe/pytorch/api_accuracy_checker/config.yaml +10 -10
- msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +335 -332
- msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +200 -199
- msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +133 -134
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +592 -581
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +70 -74
- msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +197 -202
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +325 -324
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +204 -204
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +219 -218
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +10 -10
- msprobe/pytorch/bench_functions/__init__.py +15 -15
- msprobe/pytorch/bench_functions/apply_adam_w.py +28 -28
- msprobe/pytorch/bench_functions/confusion_transpose.py +19 -19
- msprobe/pytorch/bench_functions/fast_gelu.py +55 -55
- msprobe/pytorch/bench_functions/layer_norm_eval.py +6 -6
- msprobe/pytorch/bench_functions/linear.py +12 -12
- msprobe/pytorch/bench_functions/matmul_backward.py +48 -48
- msprobe/pytorch/bench_functions/npu_fusion_attention.py +509 -421
- msprobe/pytorch/bench_functions/rms_norm.py +15 -15
- msprobe/pytorch/bench_functions/rotary_mul.py +52 -52
- msprobe/pytorch/bench_functions/scaled_mask_softmax.py +26 -26
- msprobe/pytorch/bench_functions/swiglu.py +55 -55
- msprobe/pytorch/common/__init__.py +2 -2
- msprobe/pytorch/common/compare_script.template +14 -14
- msprobe/pytorch/common/log.py +20 -31
- msprobe/pytorch/common/parse_json.py +39 -39
- msprobe/pytorch/common/utils.py +305 -300
- msprobe/pytorch/compare/distributed_compare.py +66 -66
- msprobe/pytorch/compare/mapping.yaml +607 -607
- msprobe/pytorch/compare/match.py +34 -33
- msprobe/pytorch/compare/pt_compare.py +50 -40
- msprobe/pytorch/debugger/debugger_config.py +95 -95
- msprobe/pytorch/debugger/precision_debugger.py +125 -125
- msprobe/pytorch/free_benchmark/__init__.py +8 -8
- msprobe/pytorch/free_benchmark/common/constant.py +70 -70
- msprobe/pytorch/free_benchmark/common/counter.py +71 -71
- msprobe/pytorch/free_benchmark/common/enums.py +37 -37
- msprobe/pytorch/free_benchmark/common/params.py +129 -129
- msprobe/pytorch/free_benchmark/common/utils.py +102 -102
- msprobe/pytorch/free_benchmark/compare/grad_saver.py +179 -179
- msprobe/pytorch/free_benchmark/compare/single_benchmark.py +104 -104
- msprobe/pytorch/free_benchmark/main.py +105 -105
- msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +13 -13
- msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +41 -41
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +90 -90
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +104 -104
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +63 -63
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +68 -68
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +28 -28
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +45 -45
- msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +19 -19
- msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +217 -217
- msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +39 -39
- msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +23 -23
- msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +30 -30
- msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +170 -170
- msprobe/pytorch/function_factory.py +76 -75
- msprobe/pytorch/functional/dump_module.py +39 -39
- msprobe/pytorch/grad_probe/grad_monitor.py +91 -90
- msprobe/pytorch/grad_probe/grad_stat_csv.py +128 -128
- msprobe/pytorch/hook_module/api_registry.py +161 -161
- msprobe/pytorch/hook_module/hook_module.py +120 -120
- msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1877
- msprobe/pytorch/hook_module/utils.py +30 -29
- msprobe/pytorch/hook_module/wrap_aten.py +110 -110
- msprobe/pytorch/hook_module/wrap_distributed.py +78 -78
- msprobe/pytorch/hook_module/wrap_functional.py +105 -105
- msprobe/pytorch/hook_module/wrap_npu_custom.py +93 -84
- msprobe/pytorch/hook_module/wrap_tensor.py +71 -71
- msprobe/pytorch/hook_module/wrap_torch.py +86 -86
- msprobe/pytorch/hook_module/wrap_vf.py +62 -62
- msprobe/pytorch/module_processer.py +138 -138
- msprobe/pytorch/online_dispatch/__init__.py +20 -20
- msprobe/pytorch/online_dispatch/compare.py +236 -236
- msprobe/pytorch/online_dispatch/dispatch.py +271 -271
- msprobe/pytorch/online_dispatch/dump_compare.py +155 -156
- msprobe/pytorch/online_dispatch/single_compare.py +391 -391
- msprobe/pytorch/online_dispatch/torch_ops_config.yaml +49 -49
- msprobe/pytorch/online_dispatch/utils.py +130 -146
- msprobe/pytorch/parse.py +4 -4
- msprobe/pytorch/parse_tool/cli.py +32 -32
- msprobe/pytorch/parse_tool/lib/compare.py +260 -271
- msprobe/pytorch/parse_tool/lib/config.py +52 -52
- msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
- msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
- msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
- msprobe/pytorch/parse_tool/lib/parse_tool.py +158 -158
- msprobe/pytorch/parse_tool/lib/utils.py +316 -321
- msprobe/pytorch/parse_tool/lib/visualization.py +85 -91
- msprobe/pytorch/pt_config.py +188 -187
- msprobe/pytorch/service.py +246 -252
- mindstudio_probe-1.0.3.dist-info/RECORD +0 -272
- msprobe/config/README.md +0 -539
- msprobe/mindspore/doc/compare.md +0 -58
- msprobe/mindspore/doc/dump.md +0 -217
- msprobe/mindspore/dump/hook_cell/wrap_functional.py +0 -91
- msprobe/mindspore/dump/hook_cell/wrap_tensor.py +0 -63
- msprobe/pytorch/doc/FAQ.md +0 -193
- msprobe/pytorch/doc/api_accuracy_checker.md +0 -313
- msprobe/pytorch/doc/api_accuracy_checker_online.md +0 -187
- msprobe/pytorch/doc/dump.md +0 -260
- msprobe/pytorch/doc/msprobe/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
- msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -240
- msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
- msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
- msprobe/pytorch/doc/run_overflow_check.md +0 -25
- msprobe/pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md +0 -90
- msprobe/pytorch/doc//321/206/320/247/320/260/321/206/320/260/320/227/321/206/320/255/320/226/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/205/320/254/342/225/221/321/206/320/251/320/277/321/211/320/272/320/234/321/210/320/277/320/221/321/205/320/242/320/234/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -151
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.0.4.dist-info}/top_level.txt +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
- /msprobe/{config → docs}/img/free_benchmark.png +0 -0
- /msprobe/{doc/grad_probe/img/image-1.png → docs/img/grad_probe_image-1.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-2.png → docs/img/grad_probe_image-2.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-3.png → docs/img/grad_probe_image-3.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-4.png → docs/img/grad_probe_image-4.png} +0 -0
- /msprobe/{doc/grad_probe/img/image.png → docs/img/grad_probe_image.png} +0 -0
- /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
|
@@ -1,42 +1,43 @@
|
|
|
1
|
-
from msprobe.mindspore.free_benchmark.common.config import Config
|
|
2
|
-
from msprobe.mindspore.common.const import
|
|
3
|
-
from msprobe.mindspore.
|
|
4
|
-
from msprobe.mindspore.free_benchmark.
|
|
5
|
-
from msprobe.mindspore.free_benchmark.
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
params.
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
1
|
+
from msprobe.mindspore.free_benchmark.common.config import Config
|
|
2
|
+
from msprobe.mindspore.common.const import Const
|
|
3
|
+
from msprobe.mindspore.common.const import FreeBenchmarkConst
|
|
4
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
5
|
+
from msprobe.mindspore.free_benchmark.handler.handler_factory import HandlerFactory
|
|
6
|
+
from msprobe.mindspore.free_benchmark.perturbation.perturbation_factory import PerturbationFactory
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ForwardSelfChecker:
|
|
10
|
+
|
|
11
|
+
def __init__(self, api_name: str):
|
|
12
|
+
self.api_name = api_name
|
|
13
|
+
|
|
14
|
+
def handle(self, params: HandlerParams):
|
|
15
|
+
"""
|
|
16
|
+
装饰器实际执行逻辑
|
|
17
|
+
|
|
18
|
+
"""
|
|
19
|
+
perturbation = PerturbationFactory.create(self.api_name)
|
|
20
|
+
params.fuzzed_result = perturbation.handle(params)
|
|
21
|
+
params.original_result = params.original_func(*params.args, **params.kwargs)
|
|
22
|
+
if params.fuzzed_result is not False:
|
|
23
|
+
return self.deal_fuzzed_and_original_result(params)
|
|
24
|
+
return params.original_result
|
|
25
|
+
|
|
26
|
+
def get_compare_data(self, params: HandlerParams):
|
|
27
|
+
if self.api_name not in Const.COMMUNICATION_API_LIST:
|
|
28
|
+
return
|
|
29
|
+
# 以下为通讯类api处理逻辑
|
|
30
|
+
params.fuzzed_result = params.fuzzed_value
|
|
31
|
+
if Config.pert_type == FreeBenchmarkConst.IMPROVE_PRECISION:
|
|
32
|
+
params.original_result = params.args
|
|
33
|
+
else:
|
|
34
|
+
params.original_result = params.args[params.index]
|
|
35
|
+
|
|
36
|
+
def deal_fuzzed_and_original_result(self, params: HandlerParams):
|
|
37
|
+
original_result = params.original_result
|
|
38
|
+
self.get_compare_data(params)
|
|
39
|
+
handler = HandlerFactory.create(self.api_name)
|
|
40
|
+
result = handler.handle(params)
|
|
41
|
+
if self.api_name in Const.COMMUNICATION_API_LIST:
|
|
42
|
+
result = original_result
|
|
43
|
+
return result
|
|
@@ -1,107 +1,107 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import sys
|
|
3
|
-
import traceback
|
|
4
|
-
from functools import wraps
|
|
5
|
-
from typing import Tuple, Dict, List
|
|
6
|
-
|
|
7
|
-
from mindspore import ops
|
|
8
|
-
|
|
9
|
-
from msprobe.mindspore.runtime import Runtime
|
|
10
|
-
from msprobe.mindspore.common.log import logger
|
|
11
|
-
from msprobe.mindspore.free_benchmark.common.config import Config
|
|
12
|
-
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
13
|
-
from .dec_forward import ForwardSelfChecker
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
def decorate(original_func, decorate_func, api_name=None):
|
|
17
|
-
"""
|
|
18
|
-
总装饰器
|
|
19
|
-
"""
|
|
20
|
-
@wraps(original_func)
|
|
21
|
-
def fuzz_wrapper(*args, **kwargs):
|
|
22
|
-
|
|
23
|
-
def __exec_decorate_func():
|
|
24
|
-
params = data_pre_deal(api_name, original_func, *args, **kwargs)
|
|
25
|
-
result = decorate_func(params)
|
|
26
|
-
return result
|
|
27
|
-
|
|
28
|
-
try:
|
|
29
|
-
if Runtime.rank_id == -1:
|
|
30
|
-
Runtime.rank_id = os.environ.get("RANK_ID", -1)
|
|
31
|
-
if need_wrapper_func():
|
|
32
|
-
logger.info(f"[{api_name}] is checking.")
|
|
33
|
-
return __exec_decorate_func()
|
|
34
|
-
except Exception as e:
|
|
35
|
-
logger.error(f"[{api_name}] Error: {str(e)}")
|
|
36
|
-
logger.error(f"[{api_name}] Error detail: {traceback.format_exc()}")
|
|
37
|
-
|
|
38
|
-
return original_func(*args, **kwargs)
|
|
39
|
-
|
|
40
|
-
return fuzz_wrapper
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def decorate_forward_function(func, api_name=None):
|
|
44
|
-
"""
|
|
45
|
-
前向装饰器
|
|
46
|
-
"""
|
|
47
|
-
|
|
48
|
-
if not api_name:
|
|
49
|
-
api_name = func.__name__
|
|
50
|
-
|
|
51
|
-
def forward_func(params: HandlerParams):
|
|
52
|
-
forward = ForwardSelfChecker(api_name)
|
|
53
|
-
result = forward.handle(params)
|
|
54
|
-
return result
|
|
55
|
-
|
|
56
|
-
return decorate(func, forward_func, api_name)
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
def stack_depth_check() -> bool:
|
|
60
|
-
nested_depth = 1
|
|
61
|
-
frame = sys._getframe(1)
|
|
62
|
-
while frame:
|
|
63
|
-
if frame.f_code.co_name == "fuzz_wrapper":
|
|
64
|
-
nested_depth -= 1
|
|
65
|
-
if nested_depth < 0:
|
|
66
|
-
return False
|
|
67
|
-
frame = frame.f_back
|
|
68
|
-
return True
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
def get_target_arg_index(args: Tuple) -> int:
|
|
72
|
-
"""
|
|
73
|
-
类型校验
|
|
74
|
-
|
|
75
|
-
"""
|
|
76
|
-
for i, arg in enumerate(args):
|
|
77
|
-
if ops.is_tensor(arg):
|
|
78
|
-
if not ops.is_floating_point(arg):
|
|
79
|
-
continue
|
|
80
|
-
return i
|
|
81
|
-
if isinstance(arg, (List, Tuple, Dict)):
|
|
82
|
-
return i
|
|
83
|
-
return -1
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
def data_pre_deal(api_name, func, *args, **kwargs):
|
|
87
|
-
params = HandlerParams()
|
|
88
|
-
params.args = args
|
|
89
|
-
params.kwargs = kwargs
|
|
90
|
-
params.original_func = func
|
|
91
|
-
index = get_target_arg_index(args)
|
|
92
|
-
if index == -1:
|
|
93
|
-
raise Exception(f"{api_name} has no supported input type")
|
|
94
|
-
params.index = index
|
|
95
|
-
return params
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
def need_wrapper_func():
|
|
99
|
-
if not (Runtime.is_running and Config.is_enable):
|
|
100
|
-
return False
|
|
101
|
-
if not stack_depth_check():
|
|
102
|
-
return False
|
|
103
|
-
if Config.steps and Runtime.step_count not in Config.steps:
|
|
104
|
-
return False
|
|
105
|
-
if Config.ranks and Runtime.rank_id != -1 and Runtime.rank_id not in Config.ranks:
|
|
106
|
-
return False
|
|
107
|
-
return True
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import traceback
|
|
4
|
+
from functools import wraps
|
|
5
|
+
from typing import Tuple, Dict, List
|
|
6
|
+
|
|
7
|
+
from mindspore import ops
|
|
8
|
+
|
|
9
|
+
from msprobe.mindspore.runtime import Runtime
|
|
10
|
+
from msprobe.mindspore.common.log import logger
|
|
11
|
+
from msprobe.mindspore.free_benchmark.common.config import Config
|
|
12
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
13
|
+
from .dec_forward import ForwardSelfChecker
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def decorate(original_func, decorate_func, api_name=None):
|
|
17
|
+
"""
|
|
18
|
+
总装饰器
|
|
19
|
+
"""
|
|
20
|
+
@wraps(original_func)
|
|
21
|
+
def fuzz_wrapper(*args, **kwargs):
|
|
22
|
+
|
|
23
|
+
def __exec_decorate_func():
|
|
24
|
+
params = data_pre_deal(api_name, original_func, *args, **kwargs)
|
|
25
|
+
result = decorate_func(params)
|
|
26
|
+
return result
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
if Runtime.rank_id == -1:
|
|
30
|
+
Runtime.rank_id = os.environ.get("RANK_ID", -1)
|
|
31
|
+
if need_wrapper_func():
|
|
32
|
+
logger.info(f"[{api_name}] is checking.")
|
|
33
|
+
return __exec_decorate_func()
|
|
34
|
+
except Exception as e:
|
|
35
|
+
logger.error(f"[{api_name}] Error: {str(e)}")
|
|
36
|
+
logger.error(f"[{api_name}] Error detail: {traceback.format_exc()}")
|
|
37
|
+
|
|
38
|
+
return original_func(*args, **kwargs)
|
|
39
|
+
|
|
40
|
+
return fuzz_wrapper
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def decorate_forward_function(func, api_name=None):
|
|
44
|
+
"""
|
|
45
|
+
前向装饰器
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
if not api_name:
|
|
49
|
+
api_name = func.__name__
|
|
50
|
+
|
|
51
|
+
def forward_func(params: HandlerParams):
|
|
52
|
+
forward = ForwardSelfChecker(api_name)
|
|
53
|
+
result = forward.handle(params)
|
|
54
|
+
return result
|
|
55
|
+
|
|
56
|
+
return decorate(func, forward_func, api_name)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def stack_depth_check() -> bool:
|
|
60
|
+
nested_depth = 1
|
|
61
|
+
frame = sys._getframe(1)
|
|
62
|
+
while frame:
|
|
63
|
+
if frame.f_code.co_name == "fuzz_wrapper":
|
|
64
|
+
nested_depth -= 1
|
|
65
|
+
if nested_depth < 0:
|
|
66
|
+
return False
|
|
67
|
+
frame = frame.f_back
|
|
68
|
+
return True
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def get_target_arg_index(args: Tuple) -> int:
|
|
72
|
+
"""
|
|
73
|
+
类型校验
|
|
74
|
+
|
|
75
|
+
"""
|
|
76
|
+
for i, arg in enumerate(args):
|
|
77
|
+
if ops.is_tensor(arg):
|
|
78
|
+
if not ops.is_floating_point(arg):
|
|
79
|
+
continue
|
|
80
|
+
return i
|
|
81
|
+
if isinstance(arg, (List, Tuple, Dict)):
|
|
82
|
+
return i
|
|
83
|
+
return -1
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def data_pre_deal(api_name, func, *args, **kwargs):
|
|
87
|
+
params = HandlerParams()
|
|
88
|
+
params.args = args
|
|
89
|
+
params.kwargs = kwargs
|
|
90
|
+
params.original_func = func
|
|
91
|
+
index = get_target_arg_index(args)
|
|
92
|
+
if index == -1:
|
|
93
|
+
raise Exception(f"{api_name} has no supported input type")
|
|
94
|
+
params.index = index
|
|
95
|
+
return params
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def need_wrapper_func():
|
|
99
|
+
if not (Runtime.is_running and Config.is_enable):
|
|
100
|
+
return False
|
|
101
|
+
if not stack_depth_check():
|
|
102
|
+
return False
|
|
103
|
+
if Config.steps and Runtime.step_count not in Config.steps:
|
|
104
|
+
return False
|
|
105
|
+
if Config.ranks and Runtime.rank_id != -1 and Runtime.rank_id not in Config.ranks:
|
|
106
|
+
return False
|
|
107
|
+
return True
|
|
@@ -1,90 +1,90 @@
|
|
|
1
|
-
import math
|
|
2
|
-
from abc import ABC, abstractmethod
|
|
3
|
-
from typing import Any, Tuple, Optional
|
|
4
|
-
|
|
5
|
-
import mindspore as ms
|
|
6
|
-
from mindspore import Tensor, ops
|
|
7
|
-
|
|
8
|
-
from msprobe.mindspore.common.log import logger
|
|
9
|
-
from msprobe.mindspore.free_benchmark.common.utils import Tools
|
|
10
|
-
from msprobe.mindspore.common.const import FreeBenchmarkConst
|
|
11
|
-
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class BaseHandler(ABC):
|
|
15
|
-
|
|
16
|
-
def __init__(self, api_name: str):
|
|
17
|
-
self.api_name = api_name
|
|
18
|
-
|
|
19
|
-
@staticmethod
|
|
20
|
-
def pre_calculate(original_output, fuzzed_output):
|
|
21
|
-
abs_tol = FreeBenchmarkConst.PERT_VALUE_DICT.get(fuzzed_output.dtype,
|
|
22
|
-
FreeBenchmarkConst.PERT_VALUE_DICT.get(ms.float32))
|
|
23
|
-
|
|
24
|
-
return original_output.to(fuzzed_output.dtype), fuzzed_output, abs_tol
|
|
25
|
-
|
|
26
|
-
@staticmethod
|
|
27
|
-
def get_threshold(dtype):
|
|
28
|
-
err = Tools.get_default_error_threshold(dtype)
|
|
29
|
-
return err
|
|
30
|
-
|
|
31
|
-
@staticmethod
|
|
32
|
-
def convert_overflow_ratio_to_consistent(ratio):
|
|
33
|
-
if math.isnan(ratio) or math.isinf(ratio):
|
|
34
|
-
return FreeBenchmarkConst.NO_CHANGE_ERROR_THRESHOLD
|
|
35
|
-
return ratio
|
|
36
|
-
|
|
37
|
-
@staticmethod
|
|
38
|
-
def get_endless_norm(first_tensor, second_tensor, abs_tol):
|
|
39
|
-
if first_tensor.dtype != ms.bfloat16 and second_tensor.dtype != ms.bfloat16:
|
|
40
|
-
ratio_tensor1 = ops.where(ops.abs(second_tensor) > abs_tol, ops.div(first_tensor, second_tensor), 1)
|
|
41
|
-
ratio_tensor2 = ops.where(ops.abs(first_tensor) > abs_tol, ops.div(second_tensor, first_tensor), 1)
|
|
42
|
-
else:
|
|
43
|
-
ratio_tensor1 = ops.where(ops.abs(second_tensor).to(ms.float32) > abs_tol,
|
|
44
|
-
ops.div(first_tensor.to(ms.float32), second_tensor.to(ms.float32)), 1)
|
|
45
|
-
ratio_tensor2 = ops.where(ops.abs(first_tensor).to(ms.float32) > abs_tol,
|
|
46
|
-
ops.div(second_tensor.to(ms.float32), first_tensor.to(ms.float32)), 1)
|
|
47
|
-
norm1 = BaseHandler.convert_overflow_ratio_to_consistent(ops.max(ratio_tensor1)[0].to(ms.float32).item())
|
|
48
|
-
norm2 = BaseHandler.convert_overflow_ratio_to_consistent(ops.max(ratio_tensor2)[0].to(ms.float32).item())
|
|
49
|
-
norm3 = BaseHandler.convert_overflow_ratio_to_consistent(ops.min(ratio_tensor1)[0].to(ms.float32).item())
|
|
50
|
-
ratio = FreeBenchmarkConst.SYMBOL_FLIPPING_RATIO if norm3 < 0 else max(norm1, norm2)
|
|
51
|
-
|
|
52
|
-
return ratio
|
|
53
|
-
|
|
54
|
-
@staticmethod
|
|
55
|
-
def ratio_calculate(original_output, fuzzed_output) -> float:
|
|
56
|
-
try:
|
|
57
|
-
original_output, fuzzed_output, abs_tol = BaseHandler.pre_calculate(original_output, fuzzed_output)
|
|
58
|
-
except Exception as e:
|
|
59
|
-
logger.error(f"When computing ratio, y1 or y2 dtype is not supported {str(e)}")
|
|
60
|
-
return FreeBenchmarkConst.NO_CHANGE_ERROR_THRESHOLD
|
|
61
|
-
|
|
62
|
-
abs_tol = abs_tol ** 0.5
|
|
63
|
-
|
|
64
|
-
return BaseHandler.get_endless_norm(original_output, fuzzed_output, abs_tol)
|
|
65
|
-
|
|
66
|
-
@staticmethod
|
|
67
|
-
def npu_compare(original_output, fuzzed_output) -> Tuple[bool, Optional[float]]:
|
|
68
|
-
if not isinstance(fuzzed_output, Tensor):
|
|
69
|
-
logger.error(f"The compare for output type `{type(fuzzed_output)}` is not supported")
|
|
70
|
-
return True, 1.0
|
|
71
|
-
|
|
72
|
-
# 范数计算等
|
|
73
|
-
err_thd = BaseHandler.get_threshold(original_output.dtype)
|
|
74
|
-
ratio = BaseHandler.ratio_calculate(original_output, fuzzed_output)
|
|
75
|
-
is_consistent = err_thd >= ratio >= 1.0 / err_thd
|
|
76
|
-
return is_consistent, ratio
|
|
77
|
-
|
|
78
|
-
@staticmethod
|
|
79
|
-
def is_float_tensor(output) -> bool:
|
|
80
|
-
if isinstance(output, Tensor) and ops.is_floating_point(output):
|
|
81
|
-
return True
|
|
82
|
-
if isinstance(output, (list, tuple)):
|
|
83
|
-
for i in output:
|
|
84
|
-
if isinstance(i, Tensor) and ops.is_floating_point(i):
|
|
85
|
-
return True
|
|
86
|
-
return False
|
|
87
|
-
|
|
88
|
-
@abstractmethod
|
|
89
|
-
def handle(self, params: HandlerParams) -> Any:
|
|
90
|
-
pass
|
|
1
|
+
import math
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from typing import Any, Tuple, Optional
|
|
4
|
+
|
|
5
|
+
import mindspore as ms
|
|
6
|
+
from mindspore import Tensor, ops
|
|
7
|
+
|
|
8
|
+
from msprobe.mindspore.common.log import logger
|
|
9
|
+
from msprobe.mindspore.free_benchmark.common.utils import Tools
|
|
10
|
+
from msprobe.mindspore.common.const import FreeBenchmarkConst
|
|
11
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class BaseHandler(ABC):
|
|
15
|
+
|
|
16
|
+
def __init__(self, api_name: str):
|
|
17
|
+
self.api_name = api_name
|
|
18
|
+
|
|
19
|
+
@staticmethod
|
|
20
|
+
def pre_calculate(original_output, fuzzed_output):
|
|
21
|
+
abs_tol = FreeBenchmarkConst.PERT_VALUE_DICT.get(fuzzed_output.dtype,
|
|
22
|
+
FreeBenchmarkConst.PERT_VALUE_DICT.get(ms.float32))
|
|
23
|
+
|
|
24
|
+
return original_output.to(fuzzed_output.dtype), fuzzed_output, abs_tol
|
|
25
|
+
|
|
26
|
+
@staticmethod
|
|
27
|
+
def get_threshold(dtype):
|
|
28
|
+
err = Tools.get_default_error_threshold(dtype)
|
|
29
|
+
return err
|
|
30
|
+
|
|
31
|
+
@staticmethod
|
|
32
|
+
def convert_overflow_ratio_to_consistent(ratio):
|
|
33
|
+
if math.isnan(ratio) or math.isinf(ratio):
|
|
34
|
+
return FreeBenchmarkConst.NO_CHANGE_ERROR_THRESHOLD
|
|
35
|
+
return ratio
|
|
36
|
+
|
|
37
|
+
@staticmethod
|
|
38
|
+
def get_endless_norm(first_tensor, second_tensor, abs_tol):
|
|
39
|
+
if first_tensor.dtype != ms.bfloat16 and second_tensor.dtype != ms.bfloat16:
|
|
40
|
+
ratio_tensor1 = ops.where(ops.abs(second_tensor) > abs_tol, ops.div(first_tensor, second_tensor), 1)
|
|
41
|
+
ratio_tensor2 = ops.where(ops.abs(first_tensor) > abs_tol, ops.div(second_tensor, first_tensor), 1)
|
|
42
|
+
else:
|
|
43
|
+
ratio_tensor1 = ops.where(ops.abs(second_tensor).to(ms.float32) > abs_tol,
|
|
44
|
+
ops.div(first_tensor.to(ms.float32), second_tensor.to(ms.float32)), 1)
|
|
45
|
+
ratio_tensor2 = ops.where(ops.abs(first_tensor).to(ms.float32) > abs_tol,
|
|
46
|
+
ops.div(second_tensor.to(ms.float32), first_tensor.to(ms.float32)), 1)
|
|
47
|
+
norm1 = BaseHandler.convert_overflow_ratio_to_consistent(ops.max(ratio_tensor1)[0].to(ms.float32).item())
|
|
48
|
+
norm2 = BaseHandler.convert_overflow_ratio_to_consistent(ops.max(ratio_tensor2)[0].to(ms.float32).item())
|
|
49
|
+
norm3 = BaseHandler.convert_overflow_ratio_to_consistent(ops.min(ratio_tensor1)[0].to(ms.float32).item())
|
|
50
|
+
ratio = FreeBenchmarkConst.SYMBOL_FLIPPING_RATIO if norm3 < 0 else max(norm1, norm2)
|
|
51
|
+
|
|
52
|
+
return ratio
|
|
53
|
+
|
|
54
|
+
@staticmethod
|
|
55
|
+
def ratio_calculate(original_output, fuzzed_output) -> float:
|
|
56
|
+
try:
|
|
57
|
+
original_output, fuzzed_output, abs_tol = BaseHandler.pre_calculate(original_output, fuzzed_output)
|
|
58
|
+
except Exception as e:
|
|
59
|
+
logger.error(f"When computing ratio, y1 or y2 dtype is not supported {str(e)}")
|
|
60
|
+
return FreeBenchmarkConst.NO_CHANGE_ERROR_THRESHOLD
|
|
61
|
+
|
|
62
|
+
abs_tol = abs_tol ** 0.5
|
|
63
|
+
|
|
64
|
+
return BaseHandler.get_endless_norm(original_output, fuzzed_output, abs_tol)
|
|
65
|
+
|
|
66
|
+
@staticmethod
|
|
67
|
+
def npu_compare(original_output, fuzzed_output) -> Tuple[bool, Optional[float]]:
|
|
68
|
+
if not isinstance(fuzzed_output, Tensor):
|
|
69
|
+
logger.error(f"The compare for output type `{type(fuzzed_output)}` is not supported")
|
|
70
|
+
return True, 1.0
|
|
71
|
+
|
|
72
|
+
# 范数计算等
|
|
73
|
+
err_thd = BaseHandler.get_threshold(original_output.dtype)
|
|
74
|
+
ratio = BaseHandler.ratio_calculate(original_output, fuzzed_output)
|
|
75
|
+
is_consistent = err_thd >= ratio >= 1.0 / err_thd
|
|
76
|
+
return is_consistent, ratio
|
|
77
|
+
|
|
78
|
+
@staticmethod
|
|
79
|
+
def is_float_tensor(output) -> bool:
|
|
80
|
+
if isinstance(output, Tensor) and ops.is_floating_point(output):
|
|
81
|
+
return True
|
|
82
|
+
if isinstance(output, (list, tuple)):
|
|
83
|
+
for i in output:
|
|
84
|
+
if isinstance(i, Tensor) and ops.is_floating_point(i):
|
|
85
|
+
return True
|
|
86
|
+
return False
|
|
87
|
+
|
|
88
|
+
@abstractmethod
|
|
89
|
+
def handle(self, params: HandlerParams) -> Any:
|
|
90
|
+
pass
|
|
@@ -1,41 +1,41 @@
|
|
|
1
|
-
from typing import Any
|
|
2
|
-
from dataclasses import asdict
|
|
3
|
-
|
|
4
|
-
from mindspore import Tensor, ops
|
|
5
|
-
|
|
6
|
-
from msprobe.mindspore.common.log import logger
|
|
7
|
-
from msprobe.mindspore.free_benchmark.common.config import Config
|
|
8
|
-
from msprobe.mindspore.free_benchmark.handler.base_handler import BaseHandler
|
|
9
|
-
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
10
|
-
from msprobe.mindspore.free_benchmark.common.utils import make_unequal_row
|
|
11
|
-
from msprobe.core.data_dump.json_writer import DataWriter
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class CheckHandler(BaseHandler):
|
|
15
|
-
|
|
16
|
-
def npu_compare_and_save(self, original_output, fuzzed_output, params: HandlerParams, output_index=None):
|
|
17
|
-
is_consistent, ratio = self.npu_compare(original_output, fuzzed_output)
|
|
18
|
-
params.is_consistent = params.is_consistent and is_consistent
|
|
19
|
-
if not is_consistent:
|
|
20
|
-
row = make_unequal_row(self.api_name, params, ratio, output_index)
|
|
21
|
-
data_dict = asdict(row)
|
|
22
|
-
DataWriter.write_data_to_csv(
|
|
23
|
-
data_dict.values(),
|
|
24
|
-
data_dict.keys(),
|
|
25
|
-
Config.dump_path
|
|
26
|
-
)
|
|
27
|
-
logger.error(f"{self.api_name} is not consistent")
|
|
28
|
-
|
|
29
|
-
def handle(self, params: HandlerParams) -> Any:
|
|
30
|
-
try:
|
|
31
|
-
if not self.is_float_tensor(params.fuzzed_result):
|
|
32
|
-
return params.original_result
|
|
33
|
-
if isinstance(params.fuzzed_result, Tensor):
|
|
34
|
-
self.npu_compare_and_save(params.original_result, params.fuzzed_result, params)
|
|
35
|
-
elif isinstance(params.fuzzed_result, (list, tuple)):
|
|
36
|
-
for i, item in enumerate(params.original_result):
|
|
37
|
-
if ops.is_tensor(item) and ops.is_floating_point(item):
|
|
38
|
-
self.npu_compare_and_save(item, params.fuzzed_result[i], params, output_index=i)
|
|
39
|
-
except Exception as e:
|
|
40
|
-
logger.error(str(e))
|
|
41
|
-
return params.original_result
|
|
1
|
+
from typing import Any
|
|
2
|
+
from dataclasses import asdict
|
|
3
|
+
|
|
4
|
+
from mindspore import Tensor, ops
|
|
5
|
+
|
|
6
|
+
from msprobe.mindspore.common.log import logger
|
|
7
|
+
from msprobe.mindspore.free_benchmark.common.config import Config
|
|
8
|
+
from msprobe.mindspore.free_benchmark.handler.base_handler import BaseHandler
|
|
9
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
10
|
+
from msprobe.mindspore.free_benchmark.common.utils import make_unequal_row
|
|
11
|
+
from msprobe.core.data_dump.json_writer import DataWriter
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CheckHandler(BaseHandler):
|
|
15
|
+
|
|
16
|
+
def npu_compare_and_save(self, original_output, fuzzed_output, params: HandlerParams, output_index=None):
|
|
17
|
+
is_consistent, ratio = self.npu_compare(original_output, fuzzed_output)
|
|
18
|
+
params.is_consistent = params.is_consistent and is_consistent
|
|
19
|
+
if not is_consistent:
|
|
20
|
+
row = make_unequal_row(self.api_name, params, ratio, output_index)
|
|
21
|
+
data_dict = asdict(row)
|
|
22
|
+
DataWriter.write_data_to_csv(
|
|
23
|
+
data_dict.values(),
|
|
24
|
+
data_dict.keys(),
|
|
25
|
+
Config.dump_path
|
|
26
|
+
)
|
|
27
|
+
logger.error(f"{self.api_name} is not consistent")
|
|
28
|
+
|
|
29
|
+
def handle(self, params: HandlerParams) -> Any:
|
|
30
|
+
try:
|
|
31
|
+
if not self.is_float_tensor(params.fuzzed_result):
|
|
32
|
+
return params.original_result
|
|
33
|
+
if isinstance(params.fuzzed_result, Tensor):
|
|
34
|
+
self.npu_compare_and_save(params.original_result, params.fuzzed_result, params)
|
|
35
|
+
elif isinstance(params.fuzzed_result, (list, tuple)):
|
|
36
|
+
for i, item in enumerate(params.original_result):
|
|
37
|
+
if ops.is_tensor(item) and ops.is_floating_point(item):
|
|
38
|
+
self.npu_compare_and_save(item, params.fuzzed_result[i], params, output_index=i)
|
|
39
|
+
except Exception as e:
|
|
40
|
+
logger.error(str(e))
|
|
41
|
+
return params.original_result
|
|
@@ -1,36 +1,36 @@
|
|
|
1
|
-
from typing import Any
|
|
2
|
-
|
|
3
|
-
from mindspore import Tensor
|
|
4
|
-
|
|
5
|
-
from msprobe.mindspore.common.log import logger
|
|
6
|
-
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class FixHandler:
|
|
10
|
-
|
|
11
|
-
def __init__(self, api_name: str):
|
|
12
|
-
self.api_name = api_name
|
|
13
|
-
|
|
14
|
-
@staticmethod
|
|
15
|
-
def use_fuzzed_result(original_result, fuzzed_result):
|
|
16
|
-
if isinstance(original_result, Tensor):
|
|
17
|
-
return fuzzed_result.to(original_result.dtype)
|
|
18
|
-
if isinstance(original_result, dict):
|
|
19
|
-
dict_fixed_result = dict()
|
|
20
|
-
for k, v in original_result.items():
|
|
21
|
-
dict_fixed_result[k] = FixHandler.use_fuzzed_result(v, fuzzed_result[k])
|
|
22
|
-
return dict_fixed_result
|
|
23
|
-
if isinstance(original_result, (tuple, list)):
|
|
24
|
-
list_fixed_result = list()
|
|
25
|
-
for i, v in enumerate(original_result):
|
|
26
|
-
list_fixed_result.append(FixHandler.use_fuzzed_result(v, fuzzed_result[i]))
|
|
27
|
-
return type(original_result)(list_fixed_result)
|
|
28
|
-
return original_result
|
|
29
|
-
|
|
30
|
-
def handle(self, params: HandlerParams) -> Any:
|
|
31
|
-
try:
|
|
32
|
-
return FixHandler.use_fuzzed_result(params.original_result, params.fuzzed_result)
|
|
33
|
-
except Exception as e:
|
|
34
|
-
logger.error(f"{self.api_name} failed to fix.")
|
|
35
|
-
logger.error(str(e))
|
|
36
|
-
return params.original_result
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from mindspore import Tensor
|
|
4
|
+
|
|
5
|
+
from msprobe.mindspore.common.log import logger
|
|
6
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class FixHandler:
|
|
10
|
+
|
|
11
|
+
def __init__(self, api_name: str):
|
|
12
|
+
self.api_name = api_name
|
|
13
|
+
|
|
14
|
+
@staticmethod
|
|
15
|
+
def use_fuzzed_result(original_result, fuzzed_result):
|
|
16
|
+
if isinstance(original_result, Tensor):
|
|
17
|
+
return fuzzed_result.to(original_result.dtype)
|
|
18
|
+
if isinstance(original_result, dict):
|
|
19
|
+
dict_fixed_result = dict()
|
|
20
|
+
for k, v in original_result.items():
|
|
21
|
+
dict_fixed_result[k] = FixHandler.use_fuzzed_result(v, fuzzed_result[k])
|
|
22
|
+
return dict_fixed_result
|
|
23
|
+
if isinstance(original_result, (tuple, list)):
|
|
24
|
+
list_fixed_result = list()
|
|
25
|
+
for i, v in enumerate(original_result):
|
|
26
|
+
list_fixed_result.append(FixHandler.use_fuzzed_result(v, fuzzed_result[i]))
|
|
27
|
+
return type(original_result)(list_fixed_result)
|
|
28
|
+
return original_result
|
|
29
|
+
|
|
30
|
+
def handle(self, params: HandlerParams) -> Any:
|
|
31
|
+
try:
|
|
32
|
+
return FixHandler.use_fuzzed_result(params.original_result, params.fuzzed_result)
|
|
33
|
+
except Exception as e:
|
|
34
|
+
logger.error(f"{self.api_name} failed to fix.")
|
|
35
|
+
logger.error(str(e))
|
|
36
|
+
return params.original_result
|