mindstudio-probe 1.0.3__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/LICENSE +201 -201
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/METADATA +36 -34
- mindstudio_probe-1.1.0.dist-info/RECORD +287 -0
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/WHEEL +1 -1
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/entry_points.txt +1 -0
- msprobe/README.md +131 -237
- msprobe/__init__.py +16 -1
- msprobe/{config/config.json → config.json} +47 -49
- msprobe/core/advisor/advisor.py +124 -124
- msprobe/core/advisor/advisor_const.py +58 -59
- msprobe/core/advisor/advisor_result.py +58 -58
- msprobe/core/common/const.py +402 -318
- msprobe/core/common/exceptions.py +99 -99
- msprobe/core/common/{file_check.py → file_utils.py} +523 -283
- msprobe/core/common/inplace_op_checker.py +38 -0
- msprobe/core/common/inplace_ops.yaml +251 -0
- msprobe/core/common/log.py +86 -69
- msprobe/core/common/utils.py +371 -616
- msprobe/core/common_config.py +78 -71
- msprobe/core/compare/acc_compare.py +472 -298
- msprobe/core/compare/check.py +180 -95
- msprobe/core/compare/compare_cli.py +69 -49
- msprobe/core/compare/highlight.py +259 -222
- msprobe/core/compare/multiprocessing_compute.py +174 -149
- msprobe/core/compare/npy_compare.py +310 -295
- msprobe/core/compare/utils.py +464 -429
- msprobe/core/data_dump/data_collector.py +153 -144
- msprobe/core/data_dump/data_processor/base.py +337 -293
- msprobe/core/data_dump/data_processor/factory.py +76 -59
- msprobe/core/data_dump/data_processor/mindspore_processor.py +192 -198
- msprobe/core/data_dump/data_processor/pytorch_processor.py +383 -389
- msprobe/core/data_dump/json_writer.py +117 -116
- msprobe/core/data_dump/scope.py +194 -178
- msprobe/core/grad_probe/constant.py +74 -70
- msprobe/core/grad_probe/grad_compare.py +170 -175
- msprobe/core/grad_probe/utils.py +77 -52
- msprobe/docs/01.installation.md +99 -0
- msprobe/docs/02.config_introduction.md +137 -0
- msprobe/docs/03.config_examples.md +237 -0
- msprobe/docs/04.acl_config_examples.md +78 -0
- msprobe/docs/05.data_dump_PyTorch.md +326 -0
- msprobe/docs/06.data_dump_MindSpore.md +285 -0
- msprobe/docs/07.accuracy_checker_PyTorch.md +297 -0
- msprobe/docs/08.accuracy_checker_online_PyTorch.md +238 -0
- msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
- msprobe/docs/10.accuracy_compare_PyTorch.md +327 -0
- msprobe/docs/11.accuracy_compare_MindSpore.md +333 -0
- msprobe/docs/12.overflow_check_PyTorch.md +79 -0
- msprobe/docs/13.overflow_check_MindSpore.md +31 -0
- msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
- msprobe/docs/15.free_benchmarking_PyTorch.md +170 -0
- msprobe/docs/16.free_benchmarking_MindSpore.md +140 -0
- msprobe/{doc/grad_probe/grad_probe.md → docs/17.grad_probe.md} +205 -207
- msprobe/{pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md → docs/18.online_dispatch.md} +89 -90
- msprobe/docs/FAQ.md +189 -0
- msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
- msprobe/docs/img/free_benchmark_framework.png +0 -0
- msprobe/docs/img/ms_dump.png +0 -0
- msprobe/docs/img/ms_layer.png +0 -0
- msprobe/docs/img/pt_dump.png +0 -0
- msprobe/mindspore/__init__.py +2 -1
- msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +278 -245
- msprobe/mindspore/api_accuracy_checker/api_info.py +76 -69
- msprobe/mindspore/api_accuracy_checker/api_runner.py +155 -151
- msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +196 -196
- msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
- msprobe/mindspore/api_accuracy_checker/compute_element.py +238 -223
- msprobe/mindspore/api_accuracy_checker/main.py +8 -15
- msprobe/mindspore/api_accuracy_checker/type_mapping.py +113 -113
- msprobe/mindspore/api_accuracy_checker/utils.py +79 -62
- msprobe/mindspore/cell_processor.py +58 -34
- msprobe/mindspore/common/const.py +108 -87
- msprobe/mindspore/common/log.py +37 -37
- msprobe/mindspore/common/utils.py +97 -57
- msprobe/mindspore/compare/distributed_compare.py +62 -75
- msprobe/mindspore/compare/layer_mapping.py +146 -0
- msprobe/mindspore/compare/modify_mapping.py +107 -0
- msprobe/mindspore/compare/ms_compare.py +357 -117
- msprobe/mindspore/compare/ms_graph_compare.py +364 -317
- msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -399
- msprobe/mindspore/debugger/debugger_config.py +69 -74
- msprobe/mindspore/debugger/precision_debugger.py +150 -107
- msprobe/mindspore/dump/dump_tool_factory.py +50 -35
- msprobe/mindspore/dump/hook_cell/api_registry.py +128 -104
- msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -53
- msprobe/mindspore/dump/hook_cell/primitive_hooks.py +206 -0
- msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +994 -925
- msprobe/mindspore/dump/hook_cell/wrap_api.py +121 -0
- msprobe/mindspore/dump/jit_dump.py +96 -56
- msprobe/mindspore/dump/kernel_graph_dump.py +75 -60
- msprobe/mindspore/dump/kernel_kbyk_dump.py +79 -65
- msprobe/mindspore/free_benchmark/api_pynative_self_check.py +131 -116
- msprobe/mindspore/free_benchmark/common/config.py +27 -12
- msprobe/mindspore/free_benchmark/common/handler_params.py +32 -17
- msprobe/mindspore/free_benchmark/common/utils.py +85 -71
- msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -842
- msprobe/mindspore/free_benchmark/decorator/dec_forward.py +57 -42
- msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +122 -107
- msprobe/mindspore/free_benchmark/handler/base_handler.py +105 -90
- msprobe/mindspore/free_benchmark/handler/check_handler.py +56 -41
- msprobe/mindspore/free_benchmark/handler/fix_handler.py +51 -36
- msprobe/mindspore/free_benchmark/handler/handler_factory.py +36 -21
- msprobe/mindspore/free_benchmark/perturbation/add_noise.py +82 -67
- msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +36 -21
- msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +78 -63
- msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +77 -0
- msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +49 -34
- msprobe/mindspore/free_benchmark/perturbation/no_change.py +27 -12
- msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +44 -27
- msprobe/mindspore/free_benchmark/self_check_tool_factory.py +48 -33
- msprobe/mindspore/grad_probe/global_context.py +100 -91
- msprobe/mindspore/grad_probe/grad_analyzer.py +231 -231
- msprobe/mindspore/grad_probe/grad_monitor.py +27 -27
- msprobe/mindspore/grad_probe/grad_stat_csv.py +131 -131
- msprobe/mindspore/grad_probe/hook.py +94 -92
- msprobe/mindspore/grad_probe/utils.py +29 -28
- msprobe/mindspore/ms_config.py +128 -126
- msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +60 -45
- msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +49 -34
- msprobe/mindspore/runtime.py +4 -4
- msprobe/mindspore/service.py +297 -354
- msprobe/mindspore/task_handler_factory.py +24 -24
- msprobe/msprobe.py +105 -107
- msprobe/pytorch/__init__.py +23 -4
- msprobe/pytorch/api_accuracy_checker/common/config.py +70 -55
- msprobe/pytorch/api_accuracy_checker/common/utils.py +246 -165
- msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +230 -213
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +632 -581
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
- msprobe/pytorch/api_accuracy_checker/compare/compare.py +416 -381
- msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +90 -73
- msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +265 -244
- msprobe/pytorch/api_accuracy_checker/config.yaml +10 -10
- msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +370 -332
- msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +221 -199
- msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +150 -134
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +518 -581
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +213 -74
- msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +218 -202
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +370 -324
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +227 -204
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/dump_dispatch.py +110 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +244 -218
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/torch_ops_config.yaml +63 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/utils.py +44 -0
- msprobe/pytorch/bench_functions/__init__.py +30 -15
- msprobe/pytorch/bench_functions/apply_adam_w.py +43 -28
- msprobe/pytorch/bench_functions/confusion_transpose.py +34 -19
- msprobe/pytorch/bench_functions/fast_gelu.py +70 -55
- msprobe/pytorch/bench_functions/layer_norm_eval.py +21 -6
- msprobe/pytorch/bench_functions/linear.py +27 -12
- msprobe/pytorch/bench_functions/matmul_backward.py +63 -48
- msprobe/pytorch/bench_functions/npu_fusion_attention.py +538 -421
- msprobe/pytorch/bench_functions/rms_norm.py +30 -15
- msprobe/pytorch/bench_functions/rotary_mul.py +71 -52
- msprobe/pytorch/bench_functions/scaled_mask_softmax.py +41 -26
- msprobe/pytorch/bench_functions/swiglu.py +70 -55
- msprobe/pytorch/common/__init__.py +17 -2
- msprobe/pytorch/common/compare_script.template +14 -14
- msprobe/pytorch/common/log.py +33 -32
- msprobe/pytorch/common/parse_json.py +54 -39
- msprobe/pytorch/common/utils.py +310 -300
- msprobe/pytorch/compare/distributed_compare.py +66 -66
- msprobe/pytorch/compare/mapping.yaml +607 -607
- msprobe/pytorch/compare/match.py +49 -33
- msprobe/pytorch/compare/pt_compare.py +82 -40
- msprobe/pytorch/debugger/debugger_config.py +108 -95
- msprobe/pytorch/debugger/precision_debugger.py +173 -125
- msprobe/pytorch/free_benchmark/__init__.py +23 -8
- msprobe/pytorch/free_benchmark/common/constant.py +70 -70
- msprobe/pytorch/free_benchmark/common/counter.py +71 -71
- msprobe/pytorch/free_benchmark/common/enums.py +65 -37
- msprobe/pytorch/free_benchmark/common/params.py +144 -129
- msprobe/pytorch/free_benchmark/common/utils.py +118 -102
- msprobe/pytorch/free_benchmark/compare/grad_saver.py +200 -179
- msprobe/pytorch/free_benchmark/compare/single_benchmark.py +119 -104
- msprobe/pytorch/free_benchmark/main.py +120 -105
- msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +28 -13
- msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +56 -41
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +105 -90
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +119 -104
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +87 -63
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +83 -68
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +43 -28
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +60 -45
- msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +34 -19
- msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +256 -217
- msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +54 -39
- msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +38 -23
- msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +45 -30
- msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +185 -170
- msprobe/pytorch/function_factory.py +91 -75
- msprobe/pytorch/functional/module_dump.py +84 -0
- msprobe/pytorch/grad_probe/grad_monitor.py +91 -90
- msprobe/pytorch/grad_probe/grad_stat_csv.py +128 -128
- msprobe/pytorch/hook_module/__init__.py +16 -1
- msprobe/pytorch/hook_module/api_registry.py +166 -161
- msprobe/pytorch/hook_module/hook_module.py +118 -120
- msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1877
- msprobe/pytorch/hook_module/utils.py +28 -29
- msprobe/pytorch/hook_module/wrap_aten.py +111 -110
- msprobe/pytorch/hook_module/wrap_distributed.py +77 -78
- msprobe/pytorch/hook_module/wrap_functional.py +104 -105
- msprobe/pytorch/hook_module/wrap_npu_custom.py +85 -84
- msprobe/pytorch/hook_module/wrap_tensor.py +69 -71
- msprobe/pytorch/hook_module/wrap_torch.py +84 -86
- msprobe/pytorch/hook_module/wrap_vf.py +60 -62
- msprobe/pytorch/module_processer.py +153 -138
- msprobe/pytorch/online_dispatch/__init__.py +20 -20
- msprobe/pytorch/online_dispatch/compare.py +235 -236
- msprobe/pytorch/online_dispatch/dispatch.py +271 -271
- msprobe/pytorch/online_dispatch/dump_compare.py +155 -156
- msprobe/pytorch/online_dispatch/single_compare.py +391 -391
- msprobe/pytorch/online_dispatch/torch_ops_config.yaml +57 -49
- msprobe/pytorch/online_dispatch/utils.py +127 -146
- msprobe/pytorch/parse.py +19 -4
- msprobe/pytorch/parse_tool/cli.py +31 -32
- msprobe/pytorch/parse_tool/lib/compare.py +259 -271
- msprobe/pytorch/parse_tool/lib/config.py +52 -52
- msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
- msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
- msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
- msprobe/pytorch/parse_tool/lib/parse_tool.py +161 -158
- msprobe/pytorch/parse_tool/lib/utils.py +320 -321
- msprobe/pytorch/parse_tool/lib/visualization.py +85 -91
- msprobe/pytorch/pt_config.py +317 -187
- msprobe/pytorch/service.py +311 -252
- mindstudio_probe-1.0.3.dist-info/RECORD +0 -272
- msprobe/config/README.md +0 -539
- msprobe/mindspore/doc/compare.md +0 -58
- msprobe/mindspore/doc/dump.md +0 -217
- msprobe/mindspore/dump/hook_cell/wrap_functional.py +0 -91
- msprobe/mindspore/dump/hook_cell/wrap_tensor.py +0 -63
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +0 -10
- msprobe/pytorch/doc/FAQ.md +0 -193
- msprobe/pytorch/doc/api_accuracy_checker.md +0 -313
- msprobe/pytorch/doc/api_accuracy_checker_online.md +0 -187
- msprobe/pytorch/doc/dump.md +0 -260
- msprobe/pytorch/doc/msprobe/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
- msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -240
- msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
- msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
- msprobe/pytorch/doc/run_overflow_check.md +0 -25
- msprobe/pytorch/doc//321/206/320/247/320/260/321/206/320/260/320/227/321/206/320/255/320/226/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/205/320/254/342/225/221/321/206/320/251/320/277/321/211/320/272/320/234/321/210/320/277/320/221/321/205/320/242/320/234/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -151
- msprobe/pytorch/functional/data_processor.py +0 -0
- msprobe/pytorch/functional/dump_module.py +0 -39
- {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/top_level.txt +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
- /msprobe/{config → docs}/img/free_benchmark.png +0 -0
- /msprobe/{doc/grad_probe/img/image-1.png → docs/img/grad_probe_image-1.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-2.png → docs/img/grad_probe_image-2.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-3.png → docs/img/grad_probe_image-3.png} +0 -0
- /msprobe/{doc/grad_probe/img/image-4.png → docs/img/grad_probe_image-4.png} +0 -0
- /msprobe/{doc/grad_probe/img/image.png → docs/img/grad_probe_image.png} +0 -0
- /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
|
@@ -1,391 +1,391 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
from functools import wraps
|
|
3
|
-
import torch
|
|
4
|
-
from prettytable import PrettyTable
|
|
5
|
-
from collections import namedtuple
|
|
6
|
-
from msprobe.pytorch.common.log import logger
|
|
7
|
-
|
|
8
|
-
def func_log_wrapper():
|
|
9
|
-
def _out_wrapper(func):
|
|
10
|
-
@wraps(func)
|
|
11
|
-
def _in_wrapper(*kargs, **kwargs):
|
|
12
|
-
logger.info(f"start to run: {func.__name__}")
|
|
13
|
-
x = func(*kargs, **kwargs)
|
|
14
|
-
logger.info(f"end to run: {func.__name__}")
|
|
15
|
-
return x
|
|
16
|
-
|
|
17
|
-
return _in_wrapper
|
|
18
|
-
|
|
19
|
-
return _out_wrapper
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class SingleBenchmarkCompareStandard:
|
|
23
|
-
def __init__(self, high_precision=True):
|
|
24
|
-
self.high_precision = high_precision
|
|
25
|
-
self.small_value = 1.0
|
|
26
|
-
self.error_thd = {torch.float16: [2 ** -11, 2 ** -7],
|
|
27
|
-
torch.bfloat16: [2 ** -8, 2 ** -6],
|
|
28
|
-
torch.float32: [2 ** -14, 2 ** -11],
|
|
29
|
-
torch.float64: [2 ** -14, 2 ** -11]}
|
|
30
|
-
self.eb_thd = {torch.float16: 2 ** -10,
|
|
31
|
-
torch.bfloat16: 2 ** -7,
|
|
32
|
-
torch.float32: 2 ** -14,
|
|
33
|
-
torch.float64: 2 ** -14}
|
|
34
|
-
|
|
35
|
-
def get_error_thd(self, dtype):
|
|
36
|
-
if dtype in self.error_thd.keys():
|
|
37
|
-
if dtype == torch.float64:
|
|
38
|
-
logging.warning("the output data of fp64 uses the same standard as fp32.")
|
|
39
|
-
return self.error_thd.get(dtype)[0] if self.high_precision else self.error_thd.get(dtype)[1]
|
|
40
|
-
logging.error(
|
|
41
|
-
"Single benchmark compare only supports floating point "
|
|
42
|
-
"in fp16, bf16, fp32. "
|
|
43
|
-
)
|
|
44
|
-
return None
|
|
45
|
-
|
|
46
|
-
def get_eb_thd(self, dtype):
|
|
47
|
-
if dtype in self.eb_thd.keys():
|
|
48
|
-
return self.eb_thd.get(dtype)
|
|
49
|
-
return None
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
class SingleBenchmarkAccuracyResult:
|
|
53
|
-
def __init__(
|
|
54
|
-
self,
|
|
55
|
-
result=True,
|
|
56
|
-
error_balance=None,
|
|
57
|
-
max_abs_diff=None,
|
|
58
|
-
max_abs_idx=None,
|
|
59
|
-
max_rel_diff=None,
|
|
60
|
-
max_rel_idx=None
|
|
61
|
-
):
|
|
62
|
-
self.result = result
|
|
63
|
-
self.error_balance = error_balance
|
|
64
|
-
self.max_abs_diff = max_abs_diff
|
|
65
|
-
self.max_abs_idx = max_abs_idx
|
|
66
|
-
self.max_rel_diff = max_rel_diff
|
|
67
|
-
self.max_rel_idx = max_rel_idx
|
|
68
|
-
|
|
69
|
-
def get_result(self, eb_thd, error_thd):
|
|
70
|
-
if (
|
|
71
|
-
self.error_balance > eb_thd
|
|
72
|
-
or self.max_abs_diff > error_thd
|
|
73
|
-
or self.max_rel_diff > error_thd
|
|
74
|
-
):
|
|
75
|
-
self.result = False
|
|
76
|
-
else:
|
|
77
|
-
self.result = True
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
class SingleBenchmarkAccuracyCompare:
|
|
81
|
-
@classmethod
|
|
82
|
-
@func_log_wrapper()
|
|
83
|
-
def check_output_size(cls, npu_out, bench_out):
|
|
84
|
-
acc_result = None
|
|
85
|
-
if npu_out.numel() == 0 and bench_out.nuimel() == 0:
|
|
86
|
-
info = (
|
|
87
|
-
"The npu_output is [], and it is same as benchmark_output, "
|
|
88
|
-
"the result of data_compare is Pass"
|
|
89
|
-
)
|
|
90
|
-
logging.debug(info)
|
|
91
|
-
acc_result = SingleBenchmarkAccuracyResult(result=True)
|
|
92
|
-
|
|
93
|
-
if npu_out.size() != bench_out.size():
|
|
94
|
-
error_info = (
|
|
95
|
-
f"the size of npu output[{npu_out.size()}] and"
|
|
96
|
-
f"benchmark[{bench_out.size()}] is not equal"
|
|
97
|
-
)
|
|
98
|
-
|
|
99
|
-
logging.error(error_info)
|
|
100
|
-
acc_result = SingleBenchmarkAccuracyResult(result=False)
|
|
101
|
-
return acc_result
|
|
102
|
-
|
|
103
|
-
@classmethod
|
|
104
|
-
@func_log_wrapper()
|
|
105
|
-
def check_output_invalid_value(cls, output):
|
|
106
|
-
has_nan = torch.isnan(output).any()
|
|
107
|
-
has_inf = torch.isinf(output).any()
|
|
108
|
-
return has_nan or has_inf
|
|
109
|
-
|
|
110
|
-
@classmethod
|
|
111
|
-
@func_log_wrapper()
|
|
112
|
-
def precision_compare_for_case(cls, npu_out, bench_out, benchmark_standard: SingleBenchmarkCompareStandard):
|
|
113
|
-
error_thd = None
|
|
114
|
-
eb_thd = None
|
|
115
|
-
acc_result = cls.check_output_size(npu_out, bench_out)
|
|
116
|
-
CompareResultInfo = namedtuple("CompareResultInfo",
|
|
117
|
-
['accuracy_result', 'error_threshold', 'eb_threshold', 'failed_information'])
|
|
118
|
-
|
|
119
|
-
if acc_result:
|
|
120
|
-
failed_info = "比对数据的shape不一致"
|
|
121
|
-
return CompareResultInfo(acc_result, error_thd, eb_thd, failed_info)
|
|
122
|
-
|
|
123
|
-
if cls.check_output_invalid_value(bench_out):
|
|
124
|
-
logging.info("The benchmark result contains nan/inf value. ")
|
|
125
|
-
failed_info = "标杆结果存在nan值或inf值, 依照单标杆标准该用例通过"
|
|
126
|
-
acc_result = SingleBenchmarkAccuracyResult(result=True)
|
|
127
|
-
return CompareResultInfo(acc_result, error_thd, eb_thd, failed_info)
|
|
128
|
-
|
|
129
|
-
if cls.check_output_invalid_value(npu_out):
|
|
130
|
-
logging.info("The NPU result contains nan/inf value. ")
|
|
131
|
-
failed_info = "NPU结果存在nan值或inf值, 依照单标杆标准该用例不通过"
|
|
132
|
-
acc_result = SingleBenchmarkAccuracyResult(result=False)
|
|
133
|
-
return CompareResultInfo(acc_result, error_thd, eb_thd, failed_info)
|
|
134
|
-
|
|
135
|
-
data_type = npu_out.dtype
|
|
136
|
-
if data_type not in [torch.float16, torch.float32, torch.float64, torch.bfloat16]:
|
|
137
|
-
acc_result = cls.compute_binary_diff(npu_out, bench_out)
|
|
138
|
-
else:
|
|
139
|
-
error_thd = benchmark_standard.get_error_thd(data_type)
|
|
140
|
-
eb_thd = benchmark_standard.get_eb_thd(data_type)
|
|
141
|
-
if error_thd is None:
|
|
142
|
-
logging.error(
|
|
143
|
-
"single benchmark not support the comparison of %s", str(data_type)
|
|
144
|
-
)
|
|
145
|
-
acc_result = SingleBenchmarkAccuracyResult(result=False)
|
|
146
|
-
else:
|
|
147
|
-
if npu_out.dtype in [torch.float16, torch.bfloat16] and bench_out.dtype in [torch.float32]:
|
|
148
|
-
npu_out = npu_out.to(torch.float32)
|
|
149
|
-
error_balance = cls.compute_error_balance(npu_out, bench_out, benchmark_standard)
|
|
150
|
-
max_abs_diff, max_abs_idx = cls.compute_abs_diff(npu_out, bench_out, error_thd, benchmark_standard)
|
|
151
|
-
max_rel_diff, max_rel_idx = cls.compute_rel_diff(npu_out, bench_out, error_thd, benchmark_standard)
|
|
152
|
-
acc_result = SingleBenchmarkAccuracyResult(
|
|
153
|
-
error_balance=error_balance,
|
|
154
|
-
max_abs_diff=max_abs_diff,
|
|
155
|
-
max_abs_idx=max_abs_idx,
|
|
156
|
-
max_rel_diff=max_rel_diff,
|
|
157
|
-
max_rel_idx=max_rel_idx
|
|
158
|
-
)
|
|
159
|
-
acc_result.get_result(eb_thd, error_thd)
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
@classmethod
|
|
164
|
-
@func_log_wrapper()
|
|
165
|
-
def compute_binary_diff(cls, npu_out, bench_out):
|
|
166
|
-
result = torch.equal(npu_out, bench_out)
|
|
167
|
-
if result:
|
|
168
|
-
logger.info("二进制精度比对通过, 无需单标杆比对法验证")
|
|
169
|
-
return SingleBenchmarkAccuracyResult(result=result, max_abs_diff=0, max_rel_diff=0, error_balance=0)
|
|
170
|
-
|
|
171
|
-
@classmethod
|
|
172
|
-
@func_log_wrapper()
|
|
173
|
-
def compute_error_balance(cls, npu_out, bench_out, benchmark_standard: SingleBenchmarkCompareStandard):
|
|
174
|
-
ones = torch.ones_like(npu_out)
|
|
175
|
-
zeros = torch.zeros_like(npu_out)
|
|
176
|
-
abs_mask_idx = torch.where(torch.abs(bench_out) < benchmark_standard.small_value, ones, zeros)
|
|
177
|
-
abs_mask_idx = abs_mask_idx.type(torch.bool)
|
|
178
|
-
diff_value = torch.subtract(npu_out, bench_out)
|
|
179
|
-
diff_value_rel = diff_value / (torch.abs(bench_out) + torch.finfo(torch.float).eps )
|
|
180
|
-
rel_and_abs = torch.where(abs_mask_idx, diff_value, diff_value_rel)
|
|
181
|
-
eb_float = float(torch.mean(rel_and_abs))
|
|
182
|
-
return eb_float
|
|
183
|
-
|
|
184
|
-
@classmethod
|
|
185
|
-
@func_log_wrapper()
|
|
186
|
-
def compute_abs_diff(cls, npu_out, bench_out, error_thd, benchmark_standard: SingleBenchmarkCompareStandard):
|
|
187
|
-
max_abs_diff = 0
|
|
188
|
-
max_abs_idx = None
|
|
189
|
-
|
|
190
|
-
ones = torch.ones_like(npu_out)
|
|
191
|
-
zeros = torch.zeros_like(npu_out)
|
|
192
|
-
diff_value = torch.subtract(npu_out, bench_out)
|
|
193
|
-
diff_abs = torch.abs(diff_value)
|
|
194
|
-
abs_mask_idx = torch.where(torch.abs(bench_out)
|
|
195
|
-
abs_err_idx = torch.where(diff_abs > error_thd, ones, zeros)
|
|
196
|
-
abs_err_idx = abs_err_idx * abs_mask_idx
|
|
197
|
-
abs_err = diff_abs[torch.where(abs_err_idx == 1)]
|
|
198
|
-
|
|
199
|
-
if len(abs_err) > 0:
|
|
200
|
-
err_for_max = torch.where(abs_err_idx == 1, diff_abs, zeros)
|
|
201
|
-
logging.debug("err_for_max for abs %s", err_for_max)
|
|
202
|
-
max_abs_idx = torch.argmax(err_for_max)
|
|
203
|
-
max_abs_diff = diff_abs[max_abs_idx]
|
|
204
|
-
elif torch.sum(abs_mask_idx) > 0:
|
|
205
|
-
err_for_max = torch.where(abs_mask_idx == 1, diff_abs, zeros)
|
|
206
|
-
logging.debug("error_for_max for abs %s", err_for_max)
|
|
207
|
-
max_abs_idx = torch.argmax(err_for_max)
|
|
208
|
-
if err_for_max.max() != 0:
|
|
209
|
-
max_abs_diff = diff_abs[max_abs_idx]
|
|
210
|
-
return (float(max_abs_diff), int(max_abs_idx) if torch.is_tensor(max_abs_idx) else max_abs_idx)
|
|
211
|
-
|
|
212
|
-
@classmethod
|
|
213
|
-
@func_log_wrapper()
|
|
214
|
-
def compute_rel_diff(cls, npu_out, bench_out, error_thd, benchmark_standard: SingleBenchmarkCompareStandard):
|
|
215
|
-
max_rel_diff = 0
|
|
216
|
-
max_rel_idx = None
|
|
217
|
-
|
|
218
|
-
ones = torch.ones_like(npu_out)
|
|
219
|
-
zeros = torch.zeros_like(npu_out)
|
|
220
|
-
diff_value = torch.subtract(npu_out, bench_out)
|
|
221
|
-
diff_abs = torch.abs(diff_value)
|
|
222
|
-
|
|
223
|
-
rel_mask_idx = torch.where(torch.abs(bench_out) >= benchmark_standard.small_value, ones, zeros)
|
|
224
|
-
rel_err = diff_abs / (torch.abs(bench_out) + torch.finfo(torch.float).eps )
|
|
225
|
-
diff_rel = rel_err
|
|
226
|
-
rel_err_idx = torch.where(rel_err > error_thd, ones, zeros)
|
|
227
|
-
rel_err_idx = rel_err_idx * rel_mask_idx
|
|
228
|
-
rel_err = rel_err[torch.where(rel_err_idx == 1)]
|
|
229
|
-
if len(rel_err) > 0:
|
|
230
|
-
err_for_max = torch.where(rel_err_idx == 1, diff_rel, zeros)
|
|
231
|
-
logging.debug("error_for_max for rel %s", err_for_max)
|
|
232
|
-
max_rel_idx = torch.argmax(err_for_max)
|
|
233
|
-
max_rel_diff = diff_rel[max_rel_idx]
|
|
234
|
-
elif torch.sum(rel_mask_idx > 0):
|
|
235
|
-
err_for_max = torch.where(rel_mask_idx == 1, diff_rel, zeros)
|
|
236
|
-
logging.debug("err_for_max for rel %s", err_for_max)
|
|
237
|
-
max_rel_idx = torch.argmax(err_for_max)
|
|
238
|
-
if torch.sum(err_for_max) != 0:
|
|
239
|
-
max_rel_diff = diff_rel[max_rel_idx]
|
|
240
|
-
return (float(max_rel_diff), int(max_rel_idx) if torch.is_tensor(max_rel_idx) else max_rel_idx)
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
class SingleBenchSummary:
|
|
244
|
-
def __init__(self, precision_result: SingleBenchmarkAccuracyResult, npu_dtype=None,
|
|
245
|
-
bench_dtype=None, shape=None, error_thd=None, eb_thd=None, failed_info=None):
|
|
246
|
-
self.npu_dtype = npu_dtype
|
|
247
|
-
self.bench_dtype = bench_dtype
|
|
248
|
-
self.shape = shape
|
|
249
|
-
self.result = precision_result.result
|
|
250
|
-
self.error_balance = precision_result.error_balance
|
|
251
|
-
self.max_abs_diff = precision_result.max_abs_diff
|
|
252
|
-
self.max_abs_idx = precision_result.max_abs_idx
|
|
253
|
-
self.max_rel_diff = precision_result.max_rel_diff
|
|
254
|
-
self.max_rel_idx = precision_result.max_rel_idx
|
|
255
|
-
self.eb_thd = eb_thd
|
|
256
|
-
self.error_thd = error_thd
|
|
257
|
-
self.failed_info = failed_info
|
|
258
|
-
|
|
259
|
-
def get_check_result(self):
|
|
260
|
-
if self.result:
|
|
261
|
-
return "PASS"
|
|
262
|
-
else:
|
|
263
|
-
return "FAILED"
|
|
264
|
-
|
|
265
|
-
def get_result_msg(self):
|
|
266
|
-
result_str = ""
|
|
267
|
-
if self.failed_info:
|
|
268
|
-
return self.failed_info
|
|
269
|
-
|
|
270
|
-
if self.result:
|
|
271
|
-
result_str += "误差均衡性EB: %s <= 阈值%s\n" % (self.error_balance, self.eb_thd)
|
|
272
|
-
result_str += "最大绝对误差: %s <= 阈值%s\n" % (self.max_abs_diff, self.error_thd)
|
|
273
|
-
result_str += "最大相对误差: %s <= 阈值%s\n" % (self.max_rel_diff, self.error_thd)
|
|
274
|
-
else:
|
|
275
|
-
if self.error_balance > self.eb_thd:
|
|
276
|
-
result_str += "误差均衡性EB超过阈值%s: EB = %s\n" % (
|
|
277
|
-
self.eb_thd,
|
|
278
|
-
self.error_balance,
|
|
279
|
-
)
|
|
280
|
-
if self.max_abs_diff > self.error_thd:
|
|
281
|
-
result_str += "小值域最大绝对误差超过阈值%s: idx = %s, 绝对误差 = %s\n" % (
|
|
282
|
-
self.error_thd,
|
|
283
|
-
self.max_abs_idx,
|
|
284
|
-
self.max_abs_diff
|
|
285
|
-
)
|
|
286
|
-
if self.max_rel_diff > self.error_thd:
|
|
287
|
-
result_str += "大值域最大相对误差超过阈值%s: idx = %s, 相对误差 = %s\n" % (
|
|
288
|
-
self.error_thd,
|
|
289
|
-
self.max_rel_idx,
|
|
290
|
-
self.max_rel_diff,
|
|
291
|
-
)
|
|
292
|
-
return result_str
|
|
293
|
-
|
|
294
|
-
def print_detail_table(self):
|
|
295
|
-
table = PrettyTable()
|
|
296
|
-
table.title = "Single Benchmark Metrics Info"
|
|
297
|
-
table.field_names = ["Index", "Result", "Threshold"]
|
|
298
|
-
table.add_row(["error_balance", self.error_balance, self.eb_thd])
|
|
299
|
-
table.add_row(["max_abs_diff", self.max_abs_diff, self.error_thd])
|
|
300
|
-
table.add_row(["max_abs_idx", self.max_abs_idx, "-"])
|
|
301
|
-
table.add_row(["max_rel_diff", self.max_rel_diff, self.error_thd])
|
|
302
|
-
table.add_row(["max_rel_idx", self.max_rel_idx, "-"])
|
|
303
|
-
|
|
304
|
-
logger.info(table)
|
|
305
|
-
|
|
306
|
-
def to_column_value(self):
|
|
307
|
-
return [self.bench_dtype, self.npu_dtype, self.shape, self.error_balance,
|
|
308
|
-
self.max_abs_diff, self.max_abs_idx, self.max_rel_diff, self.max_rel_idx,
|
|
309
|
-
self.eb_thd, self.error_thd, self.result, self.failed_info]
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
def single_benchmark_compare(npu_out: torch.Tensor, bench_out: torch.Tensor, high_precision: bool = True):
|
|
313
|
-
benchmark_standard = SingleBenchmarkCompareStandard(high_precision)
|
|
314
|
-
npu_out = npu_out.flatten()
|
|
315
|
-
bench_out = bench_out.flatten()
|
|
316
|
-
|
|
317
|
-
compare_results = SingleBenchmarkAccuracyCompare.precision_compare_for_case(npu_out, bench_out, benchmark_standard)
|
|
318
|
-
(
|
|
319
|
-
precision_result,
|
|
320
|
-
error_thd,
|
|
321
|
-
eb_thd,
|
|
322
|
-
failed_info
|
|
323
|
-
) = (compare_results.accuracy_result, compare_results.error_threshold,
|
|
324
|
-
compare_results.eb_threshold, compare_results.failed_information)
|
|
325
|
-
|
|
326
|
-
summary = SingleBenchSummary(precision_result, str(npu_out.dtype), str(bench_out.dtype), tuple(npu_out.shape), error_thd, eb_thd, failed_info)
|
|
327
|
-
result = summary.result
|
|
328
|
-
details = summary.to_column_value()
|
|
329
|
-
return result, details
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
def calc_status_details_list_tuple(npu_out, bench_out,
|
|
333
|
-
status, details = [], []
|
|
334
|
-
if len(bench_out) != len(npu_out):
|
|
335
|
-
summary.result = False
|
|
336
|
-
summary.failed_info = "bench and npu output structure is different."
|
|
337
|
-
return False, summary.to_column_value()
|
|
338
|
-
for b_out_i, n_out_i in zip(bench_out, npu_out):
|
|
339
|
-
status_i, details_i = single_benchmark_compare_wrap(n_out_i, b_out_i
|
|
340
|
-
status.append(status_i)
|
|
341
|
-
details.append(details_i)
|
|
342
|
-
return status, details
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
def calc_status_details_dict(npu_out, bench_out,
|
|
346
|
-
b_keys, n_keys = set(bench_out.keys()), set(npu_out.keys())
|
|
347
|
-
if b_keys != n_keys:
|
|
348
|
-
summary.result = False
|
|
349
|
-
summary.failed_info = "bench and npu_output dict keys are different."
|
|
350
|
-
return False, summary.to_column_value()
|
|
351
|
-
else:
|
|
352
|
-
status, details = single_benchmark_compare_wrap(list(bench_out.values(), list(npu_out.values())))
|
|
353
|
-
return status, details
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
def calc_status_details_tensor(npu_out, bench_out,
|
|
357
|
-
return single_benchmark_compare(
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
def calc_status_details_builtin(npu_out, bench_out, summary):
|
|
361
|
-
summary.bench_dtype = str(type(bench_out))
|
|
362
|
-
summary.npu_dtype = str(type(npu_out))
|
|
363
|
-
status = bench_out == npu_out
|
|
364
|
-
summary.result = status
|
|
365
|
-
return status, summary.to_column_value()
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
def calc_status_details_none(npu_out, bench_out,
|
|
369
|
-
summary.result = True
|
|
370
|
-
summary.failed_info = "Output is None."
|
|
371
|
-
return True, summary.to_column_value()
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
def single_benchmark_compare_wrap(npu_output: torch.Tensor, bench_output: torch.Tensor
|
|
375
|
-
type_method_dict = {
|
|
376
|
-
(list, tuple): calc_status_details_list_tuple,
|
|
377
|
-
dict: calc_status_details_dict,
|
|
378
|
-
torch.Tensor: calc_status_details_tensor,
|
|
379
|
-
(bool, int, float, str): calc_status_details_builtin,
|
|
380
|
-
None: calc_status_details_none,
|
|
381
|
-
}
|
|
382
|
-
|
|
383
|
-
result = SingleBenchmarkAccuracyResult(result=True)
|
|
384
|
-
bench_summary = SingleBenchSummary(result)
|
|
385
|
-
for type1, func in type_method_dict.items():
|
|
386
|
-
if isinstance(bench_output, type1):
|
|
387
|
-
return func(npu_output, bench_output,
|
|
388
|
-
|
|
389
|
-
bench_summary.result = True
|
|
390
|
-
bench_summary.failed_info = "Unexpected output type: {}".format(type(bench_output))
|
|
391
|
-
return True, bench_summary.to_column_value()
|
|
1
|
+
import logging
|
|
2
|
+
from functools import wraps
|
|
3
|
+
import torch
|
|
4
|
+
from prettytable import PrettyTable
|
|
5
|
+
from collections import namedtuple
|
|
6
|
+
from msprobe.pytorch.common.log import logger
|
|
7
|
+
|
|
8
|
+
def func_log_wrapper():
|
|
9
|
+
def _out_wrapper(func):
|
|
10
|
+
@wraps(func)
|
|
11
|
+
def _in_wrapper(*kargs, **kwargs):
|
|
12
|
+
logger.info(f"start to run: {func.__name__}")
|
|
13
|
+
x = func(*kargs, **kwargs)
|
|
14
|
+
logger.info(f"end to run: {func.__name__}")
|
|
15
|
+
return x
|
|
16
|
+
|
|
17
|
+
return _in_wrapper
|
|
18
|
+
|
|
19
|
+
return _out_wrapper
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class SingleBenchmarkCompareStandard:
|
|
23
|
+
def __init__(self, high_precision=True):
|
|
24
|
+
self.high_precision = high_precision
|
|
25
|
+
self.small_value = 1.0
|
|
26
|
+
self.error_thd = {torch.float16: [2 ** -11, 2 ** -7],
|
|
27
|
+
torch.bfloat16: [2 ** -8, 2 ** -6],
|
|
28
|
+
torch.float32: [2 ** -14, 2 ** -11],
|
|
29
|
+
torch.float64: [2 ** -14, 2 ** -11]}
|
|
30
|
+
self.eb_thd = {torch.float16: 2 ** -10,
|
|
31
|
+
torch.bfloat16: 2 ** -7,
|
|
32
|
+
torch.float32: 2 ** -14,
|
|
33
|
+
torch.float64: 2 ** -14}
|
|
34
|
+
|
|
35
|
+
def get_error_thd(self, dtype):
|
|
36
|
+
if dtype in self.error_thd.keys():
|
|
37
|
+
if dtype == torch.float64:
|
|
38
|
+
logging.warning("the output data of fp64 uses the same standard as fp32.")
|
|
39
|
+
return self.error_thd.get(dtype)[0] if self.high_precision else self.error_thd.get(dtype)[1]
|
|
40
|
+
logging.error(
|
|
41
|
+
"Single benchmark compare only supports floating point "
|
|
42
|
+
"in fp16, bf16, fp32. "
|
|
43
|
+
)
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
def get_eb_thd(self, dtype):
|
|
47
|
+
if dtype in self.eb_thd.keys():
|
|
48
|
+
return self.eb_thd.get(dtype)
|
|
49
|
+
return None
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class SingleBenchmarkAccuracyResult:
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
result=True,
|
|
56
|
+
error_balance=None,
|
|
57
|
+
max_abs_diff=None,
|
|
58
|
+
max_abs_idx=None,
|
|
59
|
+
max_rel_diff=None,
|
|
60
|
+
max_rel_idx=None
|
|
61
|
+
):
|
|
62
|
+
self.result = result
|
|
63
|
+
self.error_balance = error_balance
|
|
64
|
+
self.max_abs_diff = max_abs_diff
|
|
65
|
+
self.max_abs_idx = max_abs_idx
|
|
66
|
+
self.max_rel_diff = max_rel_diff
|
|
67
|
+
self.max_rel_idx = max_rel_idx
|
|
68
|
+
|
|
69
|
+
def get_result(self, eb_thd, error_thd):
|
|
70
|
+
if (
|
|
71
|
+
self.error_balance > eb_thd
|
|
72
|
+
or self.max_abs_diff > error_thd
|
|
73
|
+
or self.max_rel_diff > error_thd
|
|
74
|
+
):
|
|
75
|
+
self.result = False
|
|
76
|
+
else:
|
|
77
|
+
self.result = True
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class SingleBenchmarkAccuracyCompare:
|
|
81
|
+
@classmethod
|
|
82
|
+
@func_log_wrapper()
|
|
83
|
+
def check_output_size(cls, npu_out, bench_out):
|
|
84
|
+
acc_result = None
|
|
85
|
+
if npu_out.numel() == 0 and bench_out.nuimel() == 0:
|
|
86
|
+
info = (
|
|
87
|
+
"The npu_output is [], and it is same as benchmark_output, "
|
|
88
|
+
"the result of data_compare is Pass"
|
|
89
|
+
)
|
|
90
|
+
logging.debug(info)
|
|
91
|
+
acc_result = SingleBenchmarkAccuracyResult(result=True)
|
|
92
|
+
|
|
93
|
+
if npu_out.size() != bench_out.size():
|
|
94
|
+
error_info = (
|
|
95
|
+
f"the size of npu output[{npu_out.size()}] and"
|
|
96
|
+
f"benchmark[{bench_out.size()}] is not equal"
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
logging.error(error_info)
|
|
100
|
+
acc_result = SingleBenchmarkAccuracyResult(result=False)
|
|
101
|
+
return acc_result
|
|
102
|
+
|
|
103
|
+
@classmethod
|
|
104
|
+
@func_log_wrapper()
|
|
105
|
+
def check_output_invalid_value(cls, output):
|
|
106
|
+
has_nan = torch.isnan(output).any()
|
|
107
|
+
has_inf = torch.isinf(output).any()
|
|
108
|
+
return has_nan or has_inf
|
|
109
|
+
|
|
110
|
+
@classmethod
|
|
111
|
+
@func_log_wrapper()
|
|
112
|
+
def precision_compare_for_case(cls, npu_out, bench_out, benchmark_standard: SingleBenchmarkCompareStandard):
|
|
113
|
+
error_thd = None
|
|
114
|
+
eb_thd = None
|
|
115
|
+
acc_result = cls.check_output_size(npu_out, bench_out)
|
|
116
|
+
CompareResultInfo = namedtuple("CompareResultInfo",
|
|
117
|
+
['accuracy_result', 'error_threshold', 'eb_threshold', 'failed_information'])
|
|
118
|
+
|
|
119
|
+
if acc_result:
|
|
120
|
+
failed_info = "比对数据的shape不一致"
|
|
121
|
+
return CompareResultInfo(acc_result, error_thd, eb_thd, failed_info)
|
|
122
|
+
|
|
123
|
+
if cls.check_output_invalid_value(bench_out):
|
|
124
|
+
logging.info("The benchmark result contains nan/inf value. ")
|
|
125
|
+
failed_info = "标杆结果存在nan值或inf值, 依照单标杆标准该用例通过"
|
|
126
|
+
acc_result = SingleBenchmarkAccuracyResult(result=True)
|
|
127
|
+
return CompareResultInfo(acc_result, error_thd, eb_thd, failed_info)
|
|
128
|
+
|
|
129
|
+
if cls.check_output_invalid_value(npu_out):
|
|
130
|
+
logging.info("The NPU result contains nan/inf value. ")
|
|
131
|
+
failed_info = "NPU结果存在nan值或inf值, 依照单标杆标准该用例不通过"
|
|
132
|
+
acc_result = SingleBenchmarkAccuracyResult(result=False)
|
|
133
|
+
return CompareResultInfo(acc_result, error_thd, eb_thd, failed_info)
|
|
134
|
+
|
|
135
|
+
data_type = npu_out.dtype
|
|
136
|
+
if data_type not in [torch.float16, torch.float32, torch.float64, torch.bfloat16]:
|
|
137
|
+
acc_result = cls.compute_binary_diff(npu_out, bench_out)
|
|
138
|
+
else:
|
|
139
|
+
error_thd = benchmark_standard.get_error_thd(data_type)
|
|
140
|
+
eb_thd = benchmark_standard.get_eb_thd(data_type)
|
|
141
|
+
if error_thd is None:
|
|
142
|
+
logging.error(
|
|
143
|
+
"single benchmark not support the comparison of %s", str(data_type)
|
|
144
|
+
)
|
|
145
|
+
acc_result = SingleBenchmarkAccuracyResult(result=False)
|
|
146
|
+
else:
|
|
147
|
+
if npu_out.dtype in [torch.float16, torch.bfloat16] and bench_out.dtype in [torch.float32]:
|
|
148
|
+
npu_out = npu_out.to(torch.float32)
|
|
149
|
+
error_balance = cls.compute_error_balance(npu_out, bench_out, benchmark_standard)
|
|
150
|
+
max_abs_diff, max_abs_idx = cls.compute_abs_diff(npu_out, bench_out, error_thd, benchmark_standard)
|
|
151
|
+
max_rel_diff, max_rel_idx = cls.compute_rel_diff(npu_out, bench_out, error_thd, benchmark_standard)
|
|
152
|
+
acc_result = SingleBenchmarkAccuracyResult(
|
|
153
|
+
error_balance=error_balance,
|
|
154
|
+
max_abs_diff=max_abs_diff,
|
|
155
|
+
max_abs_idx=max_abs_idx,
|
|
156
|
+
max_rel_diff=max_rel_diff,
|
|
157
|
+
max_rel_idx=max_rel_idx
|
|
158
|
+
)
|
|
159
|
+
acc_result.get_result(eb_thd, error_thd)
|
|
160
|
+
return CompareResultInfo(acc_result, error_thd, eb_thd, None)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
@classmethod
|
|
164
|
+
@func_log_wrapper()
|
|
165
|
+
def compute_binary_diff(cls, npu_out, bench_out):
|
|
166
|
+
result = torch.equal(npu_out, bench_out)
|
|
167
|
+
if result:
|
|
168
|
+
logger.info("二进制精度比对通过, 无需单标杆比对法验证")
|
|
169
|
+
return SingleBenchmarkAccuracyResult(result=result, max_abs_diff=0, max_rel_diff=0, error_balance=0)
|
|
170
|
+
|
|
171
|
+
@classmethod
|
|
172
|
+
@func_log_wrapper()
|
|
173
|
+
def compute_error_balance(cls, npu_out, bench_out, benchmark_standard: SingleBenchmarkCompareStandard):
|
|
174
|
+
ones = torch.ones_like(npu_out)
|
|
175
|
+
zeros = torch.zeros_like(npu_out)
|
|
176
|
+
abs_mask_idx = torch.where(torch.abs(bench_out) < benchmark_standard.small_value, ones, zeros)
|
|
177
|
+
abs_mask_idx = abs_mask_idx.type(torch.bool)
|
|
178
|
+
diff_value = torch.subtract(npu_out, bench_out)
|
|
179
|
+
diff_value_rel = diff_value / (torch.abs(bench_out) + torch.finfo(torch.float).eps )
|
|
180
|
+
rel_and_abs = torch.where(abs_mask_idx, diff_value, diff_value_rel)
|
|
181
|
+
eb_float = float(torch.mean(rel_and_abs))
|
|
182
|
+
return eb_float
|
|
183
|
+
|
|
184
|
+
@classmethod
|
|
185
|
+
@func_log_wrapper()
|
|
186
|
+
def compute_abs_diff(cls, npu_out, bench_out, error_thd, benchmark_standard: SingleBenchmarkCompareStandard):
|
|
187
|
+
max_abs_diff = 0
|
|
188
|
+
max_abs_idx = None
|
|
189
|
+
|
|
190
|
+
ones = torch.ones_like(npu_out)
|
|
191
|
+
zeros = torch.zeros_like(npu_out)
|
|
192
|
+
diff_value = torch.subtract(npu_out, bench_out)
|
|
193
|
+
diff_abs = torch.abs(diff_value)
|
|
194
|
+
abs_mask_idx = torch.where(torch.abs(bench_out) >= benchmark_standard.small_value, ones, zeros)
|
|
195
|
+
abs_err_idx = torch.where(diff_abs > error_thd, ones, zeros)
|
|
196
|
+
abs_err_idx = abs_err_idx * abs_mask_idx
|
|
197
|
+
abs_err = diff_abs[torch.where(abs_err_idx == 1)]
|
|
198
|
+
|
|
199
|
+
if len(abs_err) > 0:
|
|
200
|
+
err_for_max = torch.where(abs_err_idx == 1, diff_abs, zeros)
|
|
201
|
+
logging.debug("err_for_max for abs %s", err_for_max)
|
|
202
|
+
max_abs_idx = torch.argmax(err_for_max)
|
|
203
|
+
max_abs_diff = diff_abs[max_abs_idx]
|
|
204
|
+
elif torch.sum(abs_mask_idx) > 0:
|
|
205
|
+
err_for_max = torch.where(abs_mask_idx == 1, diff_abs, zeros)
|
|
206
|
+
logging.debug("error_for_max for abs %s", err_for_max)
|
|
207
|
+
max_abs_idx = torch.argmax(err_for_max)
|
|
208
|
+
if err_for_max.max() != 0:
|
|
209
|
+
max_abs_diff = diff_abs[max_abs_idx]
|
|
210
|
+
return (float(max_abs_diff), int(max_abs_idx) if torch.is_tensor(max_abs_idx) else max_abs_idx)
|
|
211
|
+
|
|
212
|
+
@classmethod
|
|
213
|
+
@func_log_wrapper()
|
|
214
|
+
def compute_rel_diff(cls, npu_out, bench_out, error_thd, benchmark_standard: SingleBenchmarkCompareStandard):
|
|
215
|
+
max_rel_diff = 0
|
|
216
|
+
max_rel_idx = None
|
|
217
|
+
|
|
218
|
+
ones = torch.ones_like(npu_out)
|
|
219
|
+
zeros = torch.zeros_like(npu_out)
|
|
220
|
+
diff_value = torch.subtract(npu_out, bench_out)
|
|
221
|
+
diff_abs = torch.abs(diff_value)
|
|
222
|
+
|
|
223
|
+
rel_mask_idx = torch.where(torch.abs(bench_out) >= benchmark_standard.small_value, ones, zeros)
|
|
224
|
+
rel_err = diff_abs / (torch.abs(bench_out) + torch.finfo(torch.float).eps )
|
|
225
|
+
diff_rel = rel_err
|
|
226
|
+
rel_err_idx = torch.where(rel_err > error_thd, ones, zeros)
|
|
227
|
+
rel_err_idx = rel_err_idx * rel_mask_idx
|
|
228
|
+
rel_err = rel_err[torch.where(rel_err_idx == 1)]
|
|
229
|
+
if len(rel_err) > 0:
|
|
230
|
+
err_for_max = torch.where(rel_err_idx == 1, diff_rel, zeros)
|
|
231
|
+
logging.debug("error_for_max for rel %s", err_for_max)
|
|
232
|
+
max_rel_idx = torch.argmax(err_for_max)
|
|
233
|
+
max_rel_diff = diff_rel[max_rel_idx]
|
|
234
|
+
elif torch.sum(rel_mask_idx > 0):
|
|
235
|
+
err_for_max = torch.where(rel_mask_idx == 1, diff_rel, zeros)
|
|
236
|
+
logging.debug("err_for_max for rel %s", err_for_max)
|
|
237
|
+
max_rel_idx = torch.argmax(err_for_max)
|
|
238
|
+
if torch.sum(err_for_max) != 0:
|
|
239
|
+
max_rel_diff = diff_rel[max_rel_idx]
|
|
240
|
+
return (float(max_rel_diff), int(max_rel_idx) if torch.is_tensor(max_rel_idx) else max_rel_idx)
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
class SingleBenchSummary:
|
|
244
|
+
def __init__(self, precision_result: SingleBenchmarkAccuracyResult, npu_dtype=None,
|
|
245
|
+
bench_dtype=None, shape=None, error_thd=None, eb_thd=None, failed_info=None):
|
|
246
|
+
self.npu_dtype = npu_dtype
|
|
247
|
+
self.bench_dtype = bench_dtype
|
|
248
|
+
self.shape = shape
|
|
249
|
+
self.result = precision_result.result
|
|
250
|
+
self.error_balance = precision_result.error_balance
|
|
251
|
+
self.max_abs_diff = precision_result.max_abs_diff
|
|
252
|
+
self.max_abs_idx = precision_result.max_abs_idx
|
|
253
|
+
self.max_rel_diff = precision_result.max_rel_diff
|
|
254
|
+
self.max_rel_idx = precision_result.max_rel_idx
|
|
255
|
+
self.eb_thd = eb_thd
|
|
256
|
+
self.error_thd = error_thd
|
|
257
|
+
self.failed_info = failed_info
|
|
258
|
+
|
|
259
|
+
def get_check_result(self):
|
|
260
|
+
if self.result:
|
|
261
|
+
return "PASS"
|
|
262
|
+
else:
|
|
263
|
+
return "FAILED"
|
|
264
|
+
|
|
265
|
+
def get_result_msg(self):
|
|
266
|
+
result_str = ""
|
|
267
|
+
if self.failed_info:
|
|
268
|
+
return self.failed_info
|
|
269
|
+
|
|
270
|
+
if self.result:
|
|
271
|
+
result_str += "误差均衡性EB: %s <= 阈值%s\n" % (self.error_balance, self.eb_thd)
|
|
272
|
+
result_str += "最大绝对误差: %s <= 阈值%s\n" % (self.max_abs_diff, self.error_thd)
|
|
273
|
+
result_str += "最大相对误差: %s <= 阈值%s\n" % (self.max_rel_diff, self.error_thd)
|
|
274
|
+
else:
|
|
275
|
+
if self.error_balance > self.eb_thd:
|
|
276
|
+
result_str += "误差均衡性EB超过阈值%s: EB = %s\n" % (
|
|
277
|
+
self.eb_thd,
|
|
278
|
+
self.error_balance,
|
|
279
|
+
)
|
|
280
|
+
if self.max_abs_diff > self.error_thd:
|
|
281
|
+
result_str += "小值域最大绝对误差超过阈值%s: idx = %s, 绝对误差 = %s\n" % (
|
|
282
|
+
self.error_thd,
|
|
283
|
+
self.max_abs_idx,
|
|
284
|
+
self.max_abs_diff
|
|
285
|
+
)
|
|
286
|
+
if self.max_rel_diff > self.error_thd:
|
|
287
|
+
result_str += "大值域最大相对误差超过阈值%s: idx = %s, 相对误差 = %s\n" % (
|
|
288
|
+
self.error_thd,
|
|
289
|
+
self.max_rel_idx,
|
|
290
|
+
self.max_rel_diff,
|
|
291
|
+
)
|
|
292
|
+
return result_str
|
|
293
|
+
|
|
294
|
+
def print_detail_table(self):
|
|
295
|
+
table = PrettyTable()
|
|
296
|
+
table.title = "Single Benchmark Metrics Info"
|
|
297
|
+
table.field_names = ["Index", "Result", "Threshold"]
|
|
298
|
+
table.add_row(["error_balance", self.error_balance, self.eb_thd])
|
|
299
|
+
table.add_row(["max_abs_diff", self.max_abs_diff, self.error_thd])
|
|
300
|
+
table.add_row(["max_abs_idx", self.max_abs_idx, "-"])
|
|
301
|
+
table.add_row(["max_rel_diff", self.max_rel_diff, self.error_thd])
|
|
302
|
+
table.add_row(["max_rel_idx", self.max_rel_idx, "-"])
|
|
303
|
+
|
|
304
|
+
logger.info(table)
|
|
305
|
+
|
|
306
|
+
def to_column_value(self):
|
|
307
|
+
return [self.bench_dtype, self.npu_dtype, self.shape, self.error_balance,
|
|
308
|
+
self.max_abs_diff, self.max_abs_idx, self.max_rel_diff, self.max_rel_idx,
|
|
309
|
+
self.eb_thd, self.error_thd, self.result, self.failed_info]
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
def single_benchmark_compare(npu_out: torch.Tensor, bench_out: torch.Tensor, high_precision: bool = True):
|
|
313
|
+
benchmark_standard = SingleBenchmarkCompareStandard(high_precision)
|
|
314
|
+
npu_out = npu_out.flatten()
|
|
315
|
+
bench_out = bench_out.flatten()
|
|
316
|
+
|
|
317
|
+
compare_results = SingleBenchmarkAccuracyCompare.precision_compare_for_case(npu_out, bench_out, benchmark_standard)
|
|
318
|
+
(
|
|
319
|
+
precision_result,
|
|
320
|
+
error_thd,
|
|
321
|
+
eb_thd,
|
|
322
|
+
failed_info
|
|
323
|
+
) = (compare_results.accuracy_result, compare_results.error_threshold,
|
|
324
|
+
compare_results.eb_threshold, compare_results.failed_information)
|
|
325
|
+
|
|
326
|
+
summary = SingleBenchSummary(precision_result, str(npu_out.dtype), str(bench_out.dtype), tuple(npu_out.shape), error_thd, eb_thd, failed_info)
|
|
327
|
+
result = summary.result
|
|
328
|
+
details = summary.to_column_value()
|
|
329
|
+
return result, details
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def calc_status_details_list_tuple(npu_out, bench_out, summary):
|
|
333
|
+
status, details = [], []
|
|
334
|
+
if len(bench_out) != len(npu_out):
|
|
335
|
+
summary.result = False
|
|
336
|
+
summary.failed_info = "bench and npu output structure is different."
|
|
337
|
+
return False, summary.to_column_value()
|
|
338
|
+
for b_out_i, n_out_i in zip(bench_out, npu_out):
|
|
339
|
+
status_i, details_i = single_benchmark_compare_wrap(n_out_i, b_out_i)
|
|
340
|
+
status.append(status_i)
|
|
341
|
+
details.append(details_i)
|
|
342
|
+
return status, details
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
def calc_status_details_dict(npu_out, bench_out, summary):
|
|
346
|
+
b_keys, n_keys = set(bench_out.keys()), set(npu_out.keys())
|
|
347
|
+
if b_keys != n_keys:
|
|
348
|
+
summary.result = False
|
|
349
|
+
summary.failed_info = "bench and npu_output dict keys are different."
|
|
350
|
+
return False, summary.to_column_value()
|
|
351
|
+
else:
|
|
352
|
+
status, details = single_benchmark_compare_wrap(list(bench_out.values(), list(npu_out.values())))
|
|
353
|
+
return status, details
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
def calc_status_details_tensor(npu_out, bench_out, summary):
|
|
357
|
+
return single_benchmark_compare(npu_out, bench_out)
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def calc_status_details_builtin(npu_out, bench_out, summary):
|
|
361
|
+
summary.bench_dtype = str(type(bench_out))
|
|
362
|
+
summary.npu_dtype = str(type(npu_out))
|
|
363
|
+
status = bench_out == npu_out
|
|
364
|
+
summary.result = status
|
|
365
|
+
return status, summary.to_column_value()
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
def calc_status_details_none(npu_out, bench_out, summary):
|
|
369
|
+
summary.result = True
|
|
370
|
+
summary.failed_info = "Output is None."
|
|
371
|
+
return True, summary.to_column_value()
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
def single_benchmark_compare_wrap(npu_output: torch.Tensor, bench_output: torch.Tensor):
|
|
375
|
+
type_method_dict = {
|
|
376
|
+
(list, tuple): calc_status_details_list_tuple,
|
|
377
|
+
dict: calc_status_details_dict,
|
|
378
|
+
torch.Tensor: calc_status_details_tensor,
|
|
379
|
+
(bool, int, float, str): calc_status_details_builtin,
|
|
380
|
+
None: calc_status_details_none,
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
result = SingleBenchmarkAccuracyResult(result=True)
|
|
384
|
+
bench_summary = SingleBenchSummary(result)
|
|
385
|
+
for type1, func in type_method_dict.items():
|
|
386
|
+
if isinstance(bench_output, type1):
|
|
387
|
+
return func(npu_output, bench_output, bench_summary)
|
|
388
|
+
|
|
389
|
+
bench_summary.result = True
|
|
390
|
+
bench_summary.failed_info = "Unexpected output type: {}".format(type(bench_output))
|
|
391
|
+
return True, bench_summary.to_column_value()
|