mindstudio-probe 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mindstudio_probe-1.0.1.dist-info/LICENSE +201 -0
- mindstudio_probe-1.0.1.dist-info/METADATA +30 -0
- mindstudio_probe-1.0.1.dist-info/RECORD +228 -0
- mindstudio_probe-1.0.1.dist-info/WHEEL +5 -0
- mindstudio_probe-1.0.1.dist-info/entry_points.txt +2 -0
- mindstudio_probe-1.0.1.dist-info/top_level.txt +1 -0
- msprobe/README.md +182 -0
- msprobe/__init__.py +0 -0
- msprobe/config/README.md +397 -0
- msprobe/config/config.json +28 -0
- msprobe/config/img/free_benchmark.png +0 -0
- msprobe/core/common/const.py +241 -0
- msprobe/core/common/exceptions.py +88 -0
- msprobe/core/common/file_check.py +265 -0
- msprobe/core/common/log.py +55 -0
- msprobe/core/common/utils.py +516 -0
- msprobe/core/common_config.py +58 -0
- msprobe/core/data_dump/data_collector.py +140 -0
- msprobe/core/data_dump/data_processor/base.py +245 -0
- msprobe/core/data_dump/data_processor/factory.py +61 -0
- msprobe/core/data_dump/data_processor/pytorch_processor.py +346 -0
- msprobe/core/data_dump/json_writer.py +116 -0
- msprobe/core/data_dump/scope.py +178 -0
- msprobe/mindspore/__init__.py +1 -0
- msprobe/mindspore/debugger/__init__.py +0 -0
- msprobe/mindspore/debugger/debugger_config.py +51 -0
- msprobe/mindspore/debugger/precision_debugger.py +32 -0
- msprobe/mindspore/doc/dump.md +65 -0
- msprobe/mindspore/dump/__init__.py +0 -0
- msprobe/mindspore/dump/api_kbk_dump.py +55 -0
- msprobe/mindspore/dump/dump_tool_factory.py +38 -0
- msprobe/mindspore/dump/kernel_graph_dump.py +60 -0
- msprobe/mindspore/ms_config.py +78 -0
- msprobe/mindspore/overflow_check/__init__.py +0 -0
- msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +45 -0
- msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +32 -0
- msprobe/mindspore/task_handler_factory.py +21 -0
- msprobe/msprobe.py +67 -0
- msprobe/pytorch/__init__.py +4 -0
- msprobe/pytorch/advisor/advisor.py +124 -0
- msprobe/pytorch/advisor/advisor_const.py +59 -0
- msprobe/pytorch/advisor/advisor_result.py +58 -0
- msprobe/pytorch/api_accuracy_checker/.keep +0 -0
- msprobe/pytorch/api_accuracy_checker/__init__.py +0 -0
- msprobe/pytorch/api_accuracy_checker/common/.keep +0 -0
- msprobe/pytorch/api_accuracy_checker/common/__init__.py +0 -0
- msprobe/pytorch/api_accuracy_checker/common/config.py +50 -0
- msprobe/pytorch/api_accuracy_checker/common/utils.py +224 -0
- msprobe/pytorch/api_accuracy_checker/compare/__init__.py +0 -0
- msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +216 -0
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +545 -0
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +133 -0
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -0
- msprobe/pytorch/api_accuracy_checker/compare/compare.py +345 -0
- msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +74 -0
- msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +249 -0
- msprobe/pytorch/api_accuracy_checker/config.yaml +4 -0
- msprobe/pytorch/api_accuracy_checker/run_ut/.keep +0 -0
- msprobe/pytorch/api_accuracy_checker/run_ut/__init__.py +0 -0
- msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +328 -0
- msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +203 -0
- msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +127 -0
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +493 -0
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +7 -0
- msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +5 -0
- msprobe/pytorch/common/__init__.py +2 -0
- msprobe/pytorch/common/compare_script.template +14 -0
- msprobe/pytorch/common/log.py +32 -0
- msprobe/pytorch/common/parse_json.py +37 -0
- msprobe/pytorch/common/utils.py +224 -0
- msprobe/pytorch/compare/acc_compare.py +1024 -0
- msprobe/pytorch/compare/distributed_compare.py +111 -0
- msprobe/pytorch/compare/highlight.py +100 -0
- msprobe/pytorch/compare/mapping.yaml +607 -0
- msprobe/pytorch/compare/match.py +36 -0
- msprobe/pytorch/compare/npy_compare.py +244 -0
- msprobe/pytorch/debugger/__init__.py +0 -0
- msprobe/pytorch/debugger/debugger_config.py +86 -0
- msprobe/pytorch/debugger/precision_debugger.py +95 -0
- msprobe/pytorch/doc/FAQ.md +193 -0
- msprobe/pytorch/doc/api_accuracy_checker.md +269 -0
- msprobe/pytorch/doc/atat/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +182 -0
- msprobe/pytorch/doc/dump.md +207 -0
- msprobe/pytorch/doc/img/BLOOM-7B_1.png +0 -0
- msprobe/pytorch/doc/img/BLOOM-7B_2.png +0 -0
- msprobe/pytorch/doc/img/BLOOM-7B_3.png +0 -0
- msprobe/pytorch/doc/img/BLOOM-7B_4.png +0 -0
- msprobe/pytorch/doc/img/GPT-3_1.png +0 -0
- msprobe/pytorch/doc/img/GPT-3_2.png +0 -0
- msprobe/pytorch/doc/img/GPT-3_3.png +0 -0
- msprobe/pytorch/doc/img/GPT-3_4.png +0 -0
- msprobe/pytorch/doc/img/GPT-3_5.png +0 -0
- msprobe/pytorch/doc/img/GPT-3_6.png +0 -0
- msprobe/pytorch/doc/img/GPT-3_7.png +0 -0
- msprobe/pytorch/doc/img/GPT-3_8.png +0 -0
- msprobe/pytorch/doc/img/YOLOV5S_1.png +0 -0
- msprobe/pytorch/doc/img/YOLOV5S_2.png +0 -0
- msprobe/pytorch/doc/img/accuracy_checking_details.png +0 -0
- msprobe/pytorch/doc/img/accuracy_checking_result.png +0 -0
- msprobe/pytorch/doc/img/api_precision_compare_details.png +0 -0
- msprobe/pytorch/doc/img/api_precision_compare_result.png +0 -0
- msprobe/pytorch/doc/img/auto_analyze_log.png +0 -0
- msprobe/pytorch/doc/img/compare_result_pkl.png +0 -0
- msprobe/pytorch/doc/img/compare_result_pkl_md5.png.png +0 -0
- msprobe/pytorch/doc/img/cpu_info.png +0 -0
- msprobe/pytorch/doc/img/module_compare.png +0 -0
- msprobe/pytorch/doc/parse_tool.md +286 -0
- msprobe/pytorch/doc/ptdbg_ascend_compare.md +176 -0
- msprobe/pytorch/doc/ptdbg_ascend_overview.md +68 -0
- msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +381 -0
- msprobe/pytorch/doc/run_overflow_check.md +25 -0
- msprobe/pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md +90 -0
- msprobe/pytorch/free_benchmark/__init__.py +8 -0
- msprobe/pytorch/free_benchmark/common/__init__.py +0 -0
- msprobe/pytorch/free_benchmark/common/constant.py +67 -0
- msprobe/pytorch/free_benchmark/common/counter.py +72 -0
- msprobe/pytorch/free_benchmark/common/enums.py +37 -0
- msprobe/pytorch/free_benchmark/common/params.py +129 -0
- msprobe/pytorch/free_benchmark/common/utils.py +98 -0
- msprobe/pytorch/free_benchmark/compare/grad_saver.py +183 -0
- msprobe/pytorch/free_benchmark/compare/single_benchmark.py +104 -0
- msprobe/pytorch/free_benchmark/main.py +102 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/__init__.py +0 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +13 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +41 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/__init__.py +0 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +90 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +104 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +63 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +68 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +28 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +45 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +19 -0
- msprobe/pytorch/free_benchmark/result_handlers/__init__.py +0 -0
- msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +203 -0
- msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +39 -0
- msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +24 -0
- msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +31 -0
- msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +170 -0
- msprobe/pytorch/functional/__init__.py +0 -0
- msprobe/pytorch/functional/data_processor.py +0 -0
- msprobe/pytorch/functional/dump_module.py +39 -0
- msprobe/pytorch/hook_module/__init__.py +1 -0
- msprobe/pytorch/hook_module/api_registry.py +161 -0
- msprobe/pytorch/hook_module/hook_module.py +109 -0
- msprobe/pytorch/hook_module/support_wrap_ops.yaml +1876 -0
- msprobe/pytorch/hook_module/utils.py +29 -0
- msprobe/pytorch/hook_module/wrap_aten.py +100 -0
- msprobe/pytorch/hook_module/wrap_distributed.py +75 -0
- msprobe/pytorch/hook_module/wrap_functional.py +108 -0
- msprobe/pytorch/hook_module/wrap_npu_custom.py +73 -0
- msprobe/pytorch/hook_module/wrap_tensor.py +72 -0
- msprobe/pytorch/hook_module/wrap_torch.py +88 -0
- msprobe/pytorch/hook_module/wrap_vf.py +64 -0
- msprobe/pytorch/module_processer.py +98 -0
- msprobe/pytorch/online_dispatch/__init__.py +20 -0
- msprobe/pytorch/online_dispatch/compare.py +236 -0
- msprobe/pytorch/online_dispatch/dispatch.py +274 -0
- msprobe/pytorch/online_dispatch/dump_compare.py +186 -0
- msprobe/pytorch/online_dispatch/single_compare.py +391 -0
- msprobe/pytorch/online_dispatch/torch_ops_config.yaml +50 -0
- msprobe/pytorch/online_dispatch/utils.py +187 -0
- msprobe/pytorch/parse.py +4 -0
- msprobe/pytorch/parse_tool/__init__.py +0 -0
- msprobe/pytorch/parse_tool/cli.py +32 -0
- msprobe/pytorch/parse_tool/lib/__init__.py +0 -0
- msprobe/pytorch/parse_tool/lib/compare.py +259 -0
- msprobe/pytorch/parse_tool/lib/config.py +51 -0
- msprobe/pytorch/parse_tool/lib/file_desc.py +31 -0
- msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -0
- msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -0
- msprobe/pytorch/parse_tool/lib/parse_tool.py +158 -0
- msprobe/pytorch/parse_tool/lib/utils.py +367 -0
- msprobe/pytorch/parse_tool/lib/visualization.py +90 -0
- msprobe/pytorch/pt_config.py +93 -0
- msprobe/pytorch/service.py +167 -0
- msprobe/test/core_ut/common/test_utils.py +345 -0
- msprobe/test/core_ut/data_dump/test_data_collector.py +47 -0
- msprobe/test/core_ut/data_dump/test_json_writer.py +183 -0
- msprobe/test/core_ut/data_dump/test_scope.py +151 -0
- msprobe/test/core_ut/test_common_config.py +152 -0
- msprobe/test/core_ut/test_file_check.py +218 -0
- msprobe/test/core_ut/test_log.py +109 -0
- msprobe/test/mindspore_ut/test_api_kbk_dump.py +51 -0
- msprobe/test/mindspore_ut/test_debugger_config.py +42 -0
- msprobe/test/mindspore_ut/test_dump_tool_factory.py +51 -0
- msprobe/test/mindspore_ut/test_kernel_graph_dump.py +66 -0
- msprobe/test/mindspore_ut/test_kernel_graph_overflow_check.py +63 -0
- msprobe/test/mindspore_ut/test_ms_config.py +69 -0
- msprobe/test/mindspore_ut/test_overflow_check_tool_factory.py +51 -0
- msprobe/test/mindspore_ut/test_precision_debugger.py +56 -0
- msprobe/test/mindspore_ut/test_task_handler_factory.py +58 -0
- msprobe/test/pytorch_ut/advisor/test_advisor.py +83 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/common/test_common_utils.py +108 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/common/test_config.py +39 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_algorithm.py +112 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_api_precision_compare.py +77 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare.py +125 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare_column.py +10 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare_utils.py +43 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/dump.json +179 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/forward.json +63 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_data_generate.py +99 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_multi_run_ut.py +115 -0
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_run_ut.py +72 -0
- msprobe/test/pytorch_ut/compare/test_acc_compare.py +17 -0
- msprobe/test/pytorch_ut/free_benchmark/perturbed_layers/test_perturbed_layser.py +105 -0
- msprobe/test/pytorch_ut/free_benchmark/result_handlers/test_result_handler.py +121 -0
- msprobe/test/pytorch_ut/free_benchmark/test_main.py +101 -0
- msprobe/test/pytorch_ut/functional/test_dump_module.py +15 -0
- msprobe/test/pytorch_ut/hook_module/test_api_registry.py +130 -0
- msprobe/test/pytorch_ut/hook_module/test_hook_module.py +42 -0
- msprobe/test/pytorch_ut/hook_module/test_wrap_aten.py +65 -0
- msprobe/test/pytorch_ut/hook_module/test_wrap_distributed.py +35 -0
- msprobe/test/pytorch_ut/hook_module/test_wrap_functional.py +20 -0
- msprobe/test/pytorch_ut/hook_module/test_wrap_tensor.py +35 -0
- msprobe/test/pytorch_ut/hook_module/test_wrap_torch.py +43 -0
- msprobe/test/pytorch_ut/hook_module/test_wrap_vf.py +11 -0
- msprobe/test/pytorch_ut/test_pt_config.py +69 -0
- msprobe/test/pytorch_ut/test_service.py +59 -0
- msprobe/test/resources/advisor.txt +3 -0
- msprobe/test/resources/compare_result_20230703104808.csv +9 -0
- msprobe/test/resources/compare_result_without_accuracy.csv +9 -0
- msprobe/test/resources/config.yaml +3 -0
- msprobe/test/resources/npu_test.pkl +8 -0
- msprobe/test/run_test.sh +30 -0
- msprobe/test/run_ut.py +58 -0
- msprobe/test/test_module_processer.py +64 -0
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
|
3
|
+
|
|
4
|
+
import torch
|
|
5
|
+
from msprobe.pytorch.free_benchmark import logger
|
|
6
|
+
from msprobe.pytorch.free_benchmark.common.enums import (
|
|
7
|
+
DeviceType,
|
|
8
|
+
FuzzLevel,
|
|
9
|
+
PerturbationMode,
|
|
10
|
+
)
|
|
11
|
+
from msprobe.pytorch.free_benchmark.common.utils import Tools
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class DataParams:
|
|
16
|
+
args: Optional[Tuple] = None
|
|
17
|
+
kwargs: Optional[Dict] = None
|
|
18
|
+
valid_input_index: Optional[int] = None
|
|
19
|
+
original_result: Optional[Any] = None
|
|
20
|
+
perturbed_result: Optional[Any] = None
|
|
21
|
+
is_consistent: Optional[bool] = True
|
|
22
|
+
perturbed_value: Optional[Any] = None
|
|
23
|
+
origin_func: Optional[Callable] = None
|
|
24
|
+
api_type: Optional[str] = None
|
|
25
|
+
fuzz_stage: Optional[str] = None
|
|
26
|
+
grad_unequal_flag: Optional[bool] = True
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class HandlerParams:
|
|
31
|
+
handler_type: Optional[str] = None
|
|
32
|
+
api_name: Optional[str] = None
|
|
33
|
+
pert_mode: Optional[PerturbationMode] = None
|
|
34
|
+
step: Optional[int] = None
|
|
35
|
+
fuzz_stage: Optional[str] = None
|
|
36
|
+
fuzz_device: Optional[DeviceType] = None
|
|
37
|
+
preheat_config: Optional[Dict] = None
|
|
38
|
+
fuzz_level: Optional[str] = None
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class UnequalRow:
|
|
43
|
+
rank: Optional[int] = None
|
|
44
|
+
pert_mode: Optional[PerturbationMode] = None
|
|
45
|
+
stage: Optional[str] = None
|
|
46
|
+
step: Optional[int] = None
|
|
47
|
+
api_name: Optional[str] = None
|
|
48
|
+
max_rel: Optional[float] = None
|
|
49
|
+
dtype: Optional[str] = None
|
|
50
|
+
shape: Optional[str] = None
|
|
51
|
+
output_index: Optional[int] = None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class BenchmarkThd:
|
|
56
|
+
rtol: Optional[float] = None # 相对误差阈值
|
|
57
|
+
small_value: Optional[float] = None # 小值域
|
|
58
|
+
small_value_atol: Optional[float] = None # 小值域绝对阈值
|
|
59
|
+
err_balance: Optional[float] = None # 误差均衡性
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def check_args_type(args: Tuple) -> int:
|
|
63
|
+
for i, arg in enumerate(args):
|
|
64
|
+
if torch.is_tensor(arg):
|
|
65
|
+
if arg.is_meta:
|
|
66
|
+
continue
|
|
67
|
+
if not torch.is_floating_point(arg):
|
|
68
|
+
continue
|
|
69
|
+
return i
|
|
70
|
+
if isinstance(arg, (List, Tuple, Dict)):
|
|
71
|
+
return i
|
|
72
|
+
return -1
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def data_pre_deal(name, func, args, kwargs):
|
|
76
|
+
data_params = DataParams(args=args, kwargs=kwargs, origin_func=func)
|
|
77
|
+
index = check_args_type(args)
|
|
78
|
+
data_params.valid_input_index = index
|
|
79
|
+
if index == -1:
|
|
80
|
+
logger.warning_on_rank_0(
|
|
81
|
+
f"[msprobe] Free benchmark: 无标杆工具不支持当前算子的输入类型 {name}."
|
|
82
|
+
)
|
|
83
|
+
return data_params
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def make_handler_params(name, config, step):
|
|
87
|
+
handler_params = HandlerParams()
|
|
88
|
+
handler_params.api_name = name
|
|
89
|
+
handler_params.step = step
|
|
90
|
+
handler_params.handler_type = config.handler_type
|
|
91
|
+
handler_params.fuzz_stage = config.fuzz_stage
|
|
92
|
+
handler_params.fuzz_device = config.fuzz_device
|
|
93
|
+
handler_params.preheat_config = config.preheat_config
|
|
94
|
+
handler_params.fuzz_level = config.fuzz_level
|
|
95
|
+
handler_params.pert_mode = config.pert_mode
|
|
96
|
+
return handler_params
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def make_unequal_row(
|
|
100
|
+
data_params: DataParams,
|
|
101
|
+
handle_params: HandlerParams,
|
|
102
|
+
ratio: float = None,
|
|
103
|
+
index: int = None,
|
|
104
|
+
):
|
|
105
|
+
row = UnequalRow(
|
|
106
|
+
api_name=handle_params.api_name,
|
|
107
|
+
pert_mode=handle_params.pert_mode,
|
|
108
|
+
output_index=index,
|
|
109
|
+
stage=handle_params.fuzz_stage,
|
|
110
|
+
step=handle_params.step,
|
|
111
|
+
)
|
|
112
|
+
if isinstance(ratio, float):
|
|
113
|
+
row.max_rel = ratio - 1
|
|
114
|
+
origin_tensor = data_params.original_result
|
|
115
|
+
perturbed_tensor = data_params.perturbed_result
|
|
116
|
+
if index:
|
|
117
|
+
origin_tensor = origin_tensor[index]
|
|
118
|
+
perturbed_tensor = perturbed_tensor[index]
|
|
119
|
+
row.output_index = index
|
|
120
|
+
if isinstance(origin_tensor, torch.Tensor):
|
|
121
|
+
row.dtype = origin_tensor.dtype
|
|
122
|
+
row.shape = origin_tensor.shape
|
|
123
|
+
row.rank = Tools.get_dist_rank()
|
|
124
|
+
# 以下暂不支持
|
|
125
|
+
if handle_params.fuzz_level == FuzzLevel.ADV_LEVEL:
|
|
126
|
+
pass
|
|
127
|
+
if handle_params.fuzz_level == FuzzLevel.REAL_LEVEL:
|
|
128
|
+
pass
|
|
129
|
+
return row
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from msprobe.pytorch.free_benchmark.common.enums import DeviceType
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class Tools:
|
|
6
|
+
|
|
7
|
+
@staticmethod
|
|
8
|
+
def is_float_tensor(tensor) -> bool:
|
|
9
|
+
if isinstance(tensor, torch.Tensor) and torch.is_floating_point(tensor):
|
|
10
|
+
return True
|
|
11
|
+
if isinstance(tensor, (list, tuple)):
|
|
12
|
+
for value in tensor:
|
|
13
|
+
if isinstance(value, torch.Tensor) and torch.is_floating_point(value):
|
|
14
|
+
return True
|
|
15
|
+
return False
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
def get_dist_rank():
|
|
19
|
+
try:
|
|
20
|
+
return torch.distributed.get_rank()
|
|
21
|
+
except RuntimeError:
|
|
22
|
+
return 0
|
|
23
|
+
|
|
24
|
+
@staticmethod
|
|
25
|
+
def get_first_tensor_dtype(tensor_seq):
|
|
26
|
+
if isinstance(tensor_seq, torch.Tensor):
|
|
27
|
+
return tensor_seq.dtype
|
|
28
|
+
if isinstance(tensor_seq, (list, tuple)):
|
|
29
|
+
for object_ in tensor_seq:
|
|
30
|
+
if isinstance(object_, torch.Tensor):
|
|
31
|
+
return object_.dtype
|
|
32
|
+
raise RuntimeError("The sequence does not contain tensors.")
|
|
33
|
+
|
|
34
|
+
@staticmethod
|
|
35
|
+
def get_pure_api_name(api_name: str):
|
|
36
|
+
return api_name.rsplit(".", 2)[0]
|
|
37
|
+
|
|
38
|
+
@staticmethod
|
|
39
|
+
def convert_device_and_dtype(
|
|
40
|
+
tensor_seq, device: str = DeviceType.CPU, change_dtype: bool = False
|
|
41
|
+
):
|
|
42
|
+
if isinstance(tensor_seq, torch.Tensor):
|
|
43
|
+
if change_dtype and tensor_seq.dtype in [torch.float16, torch.bfloat16]:
|
|
44
|
+
return tensor_seq.detach().to(device).to(torch.float32)
|
|
45
|
+
return tensor_seq.detach().to(device)
|
|
46
|
+
if isinstance(tensor_seq, dict):
|
|
47
|
+
return {
|
|
48
|
+
key: Tools.convert_device_and_dtype(value, device, change_dtype)
|
|
49
|
+
for key, value in tensor_seq.items()
|
|
50
|
+
}
|
|
51
|
+
if isinstance(tensor_seq, (tuple, list)):
|
|
52
|
+
return type(tensor_seq)(
|
|
53
|
+
[
|
|
54
|
+
Tools.convert_device_and_dtype(value, device, change_dtype)
|
|
55
|
+
for value in tensor_seq
|
|
56
|
+
]
|
|
57
|
+
)
|
|
58
|
+
return tensor_seq
|
|
59
|
+
|
|
60
|
+
@staticmethod
|
|
61
|
+
def convert_fuzz_output_to_origin(origin, perturbed):
|
|
62
|
+
if isinstance(origin, torch.Tensor):
|
|
63
|
+
origin.data = perturbed.to(origin.dtype).to(origin.device)
|
|
64
|
+
return origin
|
|
65
|
+
if isinstance(origin, dict):
|
|
66
|
+
output = dict()
|
|
67
|
+
for key, value in origin.items():
|
|
68
|
+
output[key] = Tools.convert_fuzz_output_to_origin(value, perturbed[key])
|
|
69
|
+
return output
|
|
70
|
+
if isinstance(origin, (tuple, list)):
|
|
71
|
+
result = list()
|
|
72
|
+
for index_, value in enumerate(origin):
|
|
73
|
+
result.append(
|
|
74
|
+
Tools.convert_fuzz_output_to_origin(value, perturbed[index_])
|
|
75
|
+
)
|
|
76
|
+
return type(origin)(result)
|
|
77
|
+
return origin
|
|
78
|
+
|
|
79
|
+
class TorchC:
|
|
80
|
+
sum = torch._C._VariableFunctionsClass.sum
|
|
81
|
+
isinf = torch._C._VariableFunctionsClass.isinf
|
|
82
|
+
isfinite = torch._C._VariableFunctionsClass.isfinite
|
|
83
|
+
isnan = torch._C._VariableFunctionsClass.isnan
|
|
84
|
+
logical_not = torch._C._VariableFunctionsClass.logical_not
|
|
85
|
+
subtract = torch._C._VariableFunctionsClass.subtract
|
|
86
|
+
abs = torch._C._VariableFunctionsClass.abs
|
|
87
|
+
where = torch._C._VariableFunctionsClass.where
|
|
88
|
+
div = torch._C._VariableFunctionsClass.div
|
|
89
|
+
max = torch._C._VariableFunctionsClass.max
|
|
90
|
+
min = torch._C._VariableFunctionsClass.min
|
|
91
|
+
gt = torch._C._VariableFunctionsClass.gt
|
|
92
|
+
ge = torch._C._VariableFunctionsClass.ge
|
|
93
|
+
lt = torch._C._VariableFunctionsClass.lt
|
|
94
|
+
mean = torch._C._VariableFunctionsClass.mean
|
|
95
|
+
full = torch._C._VariableFunctionsClass.full
|
|
96
|
+
add = torch._C._VariableFunctionsClass.add
|
|
97
|
+
bitwise_xor = torch._C._VariableFunctionsClass.bitwise_xor
|
|
98
|
+
clone = torch._C._VariableFunctionsClass.clone
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from msprobe.core.common.exceptions import FreeBenchmarkException
|
|
3
|
+
from msprobe.pytorch.free_benchmark import logger
|
|
4
|
+
from msprobe.pytorch.free_benchmark.common.constant import CommonField
|
|
5
|
+
from msprobe.pytorch.free_benchmark.common.params import DataParams, HandlerParams
|
|
6
|
+
from msprobe.pytorch.free_benchmark.perturbed_layers.layer_factory import LayerFactory
|
|
7
|
+
from msprobe.pytorch.free_benchmark.result_handlers.handler_factory import (
|
|
8
|
+
FuzzHandlerFactory,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class GradSaver:
|
|
13
|
+
|
|
14
|
+
def __init__(self, origin_func, handler_params: HandlerParams):
|
|
15
|
+
|
|
16
|
+
self.handler_params = handler_params
|
|
17
|
+
self.api_name = handler_params.api_name
|
|
18
|
+
self.origin_func = origin_func
|
|
19
|
+
self.data_params = DataParams()
|
|
20
|
+
self.is_compare = True
|
|
21
|
+
self.kwargs = dict()
|
|
22
|
+
self.perturbed_grad_input = tuple()
|
|
23
|
+
self.origin_grad_input = tuple()
|
|
24
|
+
self.need_grad_flag = list()
|
|
25
|
+
self.backward_input = tuple()
|
|
26
|
+
|
|
27
|
+
def register_compare_func_for_inputs(self, inputs, data_processor):
|
|
28
|
+
_index = 0
|
|
29
|
+
for j, obj in enumerate(inputs):
|
|
30
|
+
if torch.is_tensor(obj) and obj.requires_grad:
|
|
31
|
+
|
|
32
|
+
def compare_func(grad, new_grad_index=_index, input_index=j):
|
|
33
|
+
if not self.is_compare:
|
|
34
|
+
return grad
|
|
35
|
+
try:
|
|
36
|
+
perturbed_grad = self.check_grad_input(grad, new_grad_index)
|
|
37
|
+
handler = FuzzHandlerFactory.create(self.handler_params)
|
|
38
|
+
self.compare_grad_results(
|
|
39
|
+
handler, grad, perturbed_grad, index=input_index
|
|
40
|
+
)
|
|
41
|
+
data_processor.update_unequal_rows(handler.get_unequal_rows())
|
|
42
|
+
except IndexError:
|
|
43
|
+
logger.warning_on_rank_0(
|
|
44
|
+
f"[msprobe] Free benchmark: grad index out of range. api:{self.handler_params.api_name}."
|
|
45
|
+
f"index:{new_grad_index}, perturbation grad len {len(self.perturbed_grad_input)}"
|
|
46
|
+
)
|
|
47
|
+
return grad
|
|
48
|
+
except FreeBenchmarkException as e:
|
|
49
|
+
logger.warning_on_rank_0(
|
|
50
|
+
f"[msprobe] Free benchmark: grad input check error: {e}"
|
|
51
|
+
)
|
|
52
|
+
return grad
|
|
53
|
+
except Exception as e:
|
|
54
|
+
logger.warning_on_rank_0(
|
|
55
|
+
f"[msprobe] Free benchmark: grad compare error: {e}"
|
|
56
|
+
)
|
|
57
|
+
return grad
|
|
58
|
+
return grad
|
|
59
|
+
|
|
60
|
+
obj.register_hook(compare_func)
|
|
61
|
+
_index += 1
|
|
62
|
+
|
|
63
|
+
def compare_grad_results(self, handler, origin_grad, perturbed_grad, index):
|
|
64
|
+
# TODO get dtype?
|
|
65
|
+
self.data_params.original_result = origin_grad
|
|
66
|
+
self.data_params.perturbed_result = perturbed_grad
|
|
67
|
+
self.data_params.grad_unequal_flag = False
|
|
68
|
+
self.data_params.valid_input_index = index
|
|
69
|
+
try:
|
|
70
|
+
handler.handle(self.data_params)
|
|
71
|
+
if not self.data_params.is_consistent:
|
|
72
|
+
self.is_compare = False
|
|
73
|
+
self.data_params.grad_unequal_flag = True
|
|
74
|
+
self.data_params.is_consistent = True
|
|
75
|
+
self.data_params.perturbed_result = self.perturbed_grad_input
|
|
76
|
+
self.data_params.original_result = self.origin_grad_input
|
|
77
|
+
handler.handle(self.data_params)
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logger.warning_on_rank_0(
|
|
80
|
+
f"[msprobe] Free benchmark: compare two vjp failed: api:{self.handler_params.api_name}."
|
|
81
|
+
f"{e}"
|
|
82
|
+
)
|
|
83
|
+
# 在扰动前后输出对比后释放输出的引用
|
|
84
|
+
self.data_params.perturbed_result = None
|
|
85
|
+
self.data_params.original_result = None
|
|
86
|
+
|
|
87
|
+
def check_grad_input(self, origin_grad, new_grad_index):
|
|
88
|
+
if self.perturbed_grad_input is None:
|
|
89
|
+
raise FreeBenchmarkException(
|
|
90
|
+
FreeBenchmarkException.InvalidGrad,
|
|
91
|
+
f"grad not exists : {self.api_name}."
|
|
92
|
+
)
|
|
93
|
+
with torch.no_grad():
|
|
94
|
+
perturbed_grad = self.perturbed_grad_input[new_grad_index].to(
|
|
95
|
+
origin_grad.device
|
|
96
|
+
)
|
|
97
|
+
if origin_grad.shape != perturbed_grad.shape:
|
|
98
|
+
raise FreeBenchmarkException(
|
|
99
|
+
FreeBenchmarkException.InvalidGrad,
|
|
100
|
+
f"grad shapes are inconsistent. api:{self.handler_params.api_name}."
|
|
101
|
+
f"origin:{origin_grad.shape}, perturbation: {perturbed_grad.shape}"
|
|
102
|
+
)
|
|
103
|
+
return perturbed_grad
|
|
104
|
+
|
|
105
|
+
def cache_backward_input(self, backward_input_list):
|
|
106
|
+
_inputs = []
|
|
107
|
+
with torch.no_grad():
|
|
108
|
+
for backward_input in backward_input_list:
|
|
109
|
+
if torch.is_tensor(backward_input):
|
|
110
|
+
_inputs.append(
|
|
111
|
+
{
|
|
112
|
+
CommonField.DEVICE: backward_input.device,
|
|
113
|
+
CommonField.FUZZ_TENSOR: backward_input.cpu(),
|
|
114
|
+
CommonField.REQUIRES_GRAD: backward_input.requires_grad,
|
|
115
|
+
}
|
|
116
|
+
)
|
|
117
|
+
else:
|
|
118
|
+
_inputs.append(backward_input)
|
|
119
|
+
self.backward_input = _inputs
|
|
120
|
+
|
|
121
|
+
def get_vjp_input(self):
|
|
122
|
+
inner_args_tmp = []
|
|
123
|
+
need_grad_tensors = []
|
|
124
|
+
for object_ in self.backward_input:
|
|
125
|
+
if isinstance(object_, dict) and CommonField.FUZZ_TENSOR in object_.keys():
|
|
126
|
+
tensor_ = torch.tensor(
|
|
127
|
+
object_.get(CommonField.FUZZ_TENSOR).data,
|
|
128
|
+
dtype=object_.get(CommonField.FUZZ_TENSOR).dtype,
|
|
129
|
+
device=object_.get(CommonField.DEVICE),
|
|
130
|
+
requires_grad=object_.get(CommonField.REQUIRES_GRAD),
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
if tensor_.requires_grad:
|
|
134
|
+
inner_args_tmp.append(CommonField.HOLD_PLACE)
|
|
135
|
+
need_grad_tensors.append(tensor_)
|
|
136
|
+
self.need_grad_flag.append(True)
|
|
137
|
+
else:
|
|
138
|
+
self.need_grad_flag.append(False)
|
|
139
|
+
inner_args_tmp.append(tensor_)
|
|
140
|
+
else:
|
|
141
|
+
self.need_grad_flag.append(False)
|
|
142
|
+
inner_args_tmp.append(object_)
|
|
143
|
+
|
|
144
|
+
return need_grad_tensors, tuple(inner_args_tmp)
|
|
145
|
+
|
|
146
|
+
def get_grad_input_from_vjp(self, need_grad_tensors, grad_output, inner_args):
|
|
147
|
+
def vjp_func(*inputs):
|
|
148
|
+
_real_input = []
|
|
149
|
+
index_ = 0
|
|
150
|
+
for object_ in inner_args:
|
|
151
|
+
if object_ is CommonField.HOLD_PLACE:
|
|
152
|
+
_real_input.append(inputs[index_])
|
|
153
|
+
index_ += 1
|
|
154
|
+
else:
|
|
155
|
+
_real_input.append(object_)
|
|
156
|
+
kwargs = self.kwargs.copy()
|
|
157
|
+
if 'inplace' in kwargs:
|
|
158
|
+
kwargs['inplace'] = False
|
|
159
|
+
return self.origin_func(*_real_input, **kwargs)
|
|
160
|
+
|
|
161
|
+
_, grad_input = torch.autograd.functional.vjp(
|
|
162
|
+
vjp_func, tuple(need_grad_tensors), grad_output
|
|
163
|
+
)
|
|
164
|
+
return grad_input
|
|
165
|
+
|
|
166
|
+
def calculate_perturbed_grad_input(self, grad_output, need_grad_tensors, inner_args):
|
|
167
|
+
self.data_params.args = [need_grad_tensors, grad_output, inner_args]
|
|
168
|
+
self.data_params.kwargs = {}
|
|
169
|
+
self.data_params.valid_input_index = 0
|
|
170
|
+
self.data_params.origin_func = self.get_grad_input_from_vjp
|
|
171
|
+
layer = LayerFactory.create(
|
|
172
|
+
self.handler_params.api_name,
|
|
173
|
+
self.handler_params.fuzz_device,
|
|
174
|
+
self.handler_params.pert_mode,
|
|
175
|
+
)
|
|
176
|
+
layer.handle(self.data_params)
|
|
177
|
+
# 在计算扰动输出之后,释放输入的引用
|
|
178
|
+
self.data_params.args = None
|
|
179
|
+
# 确定扰动成功后,才会暂存
|
|
180
|
+
if self.data_params.perturbed_result:
|
|
181
|
+
self.perturbed_grad_input = tuple(
|
|
182
|
+
[x.cpu() for x in self.data_params.perturbed_result]
|
|
183
|
+
)
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import math
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
from msprobe.pytorch.free_benchmark import logger
|
|
5
|
+
from msprobe.pytorch.free_benchmark.common.constant import ThresholdConfig
|
|
6
|
+
from msprobe.pytorch.free_benchmark.common.utils import TorchC
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class SingleCompare:
|
|
10
|
+
def __init__(self) -> None:
|
|
11
|
+
self.relative_err = None
|
|
12
|
+
self.absolute_err = None
|
|
13
|
+
self.eb = None
|
|
14
|
+
self.threshold = None
|
|
15
|
+
|
|
16
|
+
@staticmethod
|
|
17
|
+
def filter_overflow(tensor) -> int:
|
|
18
|
+
inf_num = TorchC.sum(TorchC.isinf(tensor))
|
|
19
|
+
nan_num = TorchC.sum(TorchC.isnan(tensor))
|
|
20
|
+
return inf_num + nan_num
|
|
21
|
+
|
|
22
|
+
@staticmethod
|
|
23
|
+
def replace_inf_or_nan(tensor):
|
|
24
|
+
finite_mask = TorchC.isfinite(tensor)
|
|
25
|
+
inf_or_nan_mask = TorchC.logical_not(finite_mask)
|
|
26
|
+
inf_or_nan_num = TorchC.sum(inf_or_nan_mask).item()
|
|
27
|
+
if inf_or_nan_num > 0:
|
|
28
|
+
tensor[inf_or_nan_mask] = 1
|
|
29
|
+
return tensor
|
|
30
|
+
|
|
31
|
+
@staticmethod
|
|
32
|
+
def compare_float_seq(actual, golden):
|
|
33
|
+
return math.isclose(actual, golden)
|
|
34
|
+
|
|
35
|
+
@staticmethod
|
|
36
|
+
def compare_other_seq(actual, golden):
|
|
37
|
+
return actual == golden
|
|
38
|
+
|
|
39
|
+
def compare_dict_seq(self, actual, golden):
|
|
40
|
+
if len(actual) != len(golden):
|
|
41
|
+
return False
|
|
42
|
+
for key, value in golden.items():
|
|
43
|
+
if not self.compare_seq(value, actual.get(key)):
|
|
44
|
+
return False
|
|
45
|
+
return True
|
|
46
|
+
|
|
47
|
+
def compare_list_seq(self, actual, golden):
|
|
48
|
+
if len(actual) != len(golden):
|
|
49
|
+
return False
|
|
50
|
+
for index_, value in enumerate(golden):
|
|
51
|
+
if not self.compare_seq(value, actual[index_]):
|
|
52
|
+
return False
|
|
53
|
+
return True
|
|
54
|
+
|
|
55
|
+
def compare_seq(self, actual, golden):
|
|
56
|
+
if isinstance(golden, torch.Tensor):
|
|
57
|
+
return self.compare_tensor_seq(actual, golden)
|
|
58
|
+
elif isinstance(golden, dict):
|
|
59
|
+
return self.compare_dict_seq(actual, golden)
|
|
60
|
+
elif isinstance(golden, (tuple, list)):
|
|
61
|
+
return self.compare_list_seq(actual, golden)
|
|
62
|
+
elif isinstance(golden, float):
|
|
63
|
+
return self.compare_float_seq(actual, golden)
|
|
64
|
+
else:
|
|
65
|
+
return self.compare_other_seq(actual, golden)
|
|
66
|
+
|
|
67
|
+
def compare_tensor_seq(self, actual, golden):
|
|
68
|
+
self.threshold = ThresholdConfig.BENCHMARK_THD_DICT.get(
|
|
69
|
+
actual.dtype, ThresholdConfig.BENCHMARK_THD_DICT.get(torch.float32)
|
|
70
|
+
)
|
|
71
|
+
if self.filter_overflow(golden) > 0:
|
|
72
|
+
logger.warning_on_rank_0("[msprobe] Free Benchmark: inf and nan"
|
|
73
|
+
"in golden tensor is not supported.")
|
|
74
|
+
return True
|
|
75
|
+
actual = self.replace_inf_or_nan(actual)
|
|
76
|
+
actual = actual.to(torch.float64)
|
|
77
|
+
golden = golden.to(torch.float64).to(actual.device)
|
|
78
|
+
self._cal_compare_metrics(actual, golden)
|
|
79
|
+
if self.absolute_err > self.threshold.small_value_atol:
|
|
80
|
+
return False
|
|
81
|
+
if self.relative_err > self.threshold.rtol:
|
|
82
|
+
return False
|
|
83
|
+
if self.eb > self.threshold.err_balance:
|
|
84
|
+
return False
|
|
85
|
+
return True
|
|
86
|
+
|
|
87
|
+
def _cal_compare_metrics(self, actual, golden):
|
|
88
|
+
diff_value = TorchC.subtract(actual, golden)
|
|
89
|
+
diff_abs = TorchC.abs(diff_value)
|
|
90
|
+
golden_abs = TorchC.abs(golden)
|
|
91
|
+
# 使用绝对误差的元素
|
|
92
|
+
self.absolute_err = TorchC.max(TorchC.where(
|
|
93
|
+
TorchC.lt(TorchC.abs(actual), self.threshold.small_value), diff_abs, 0
|
|
94
|
+
))
|
|
95
|
+
diff_rel = TorchC.div(diff_abs, golden_abs)
|
|
96
|
+
# 使用相对误差的元素
|
|
97
|
+
self.relative_err = TorchC.max(TorchC.where(
|
|
98
|
+
TorchC.ge(TorchC.abs(actual), self.threshold.small_value), diff_rel, 0
|
|
99
|
+
))
|
|
100
|
+
# 获取误差均衡性
|
|
101
|
+
divided = TorchC.where(
|
|
102
|
+
TorchC.ge(TorchC.abs(golden), self.threshold.small_value), golden_abs, 1
|
|
103
|
+
)
|
|
104
|
+
self.eb = TorchC.mean(TorchC.div(diff_value, divided))
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
from abc import ABC
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
from msprobe.core.common.const import Const
|
|
5
|
+
from msprobe.pytorch.free_benchmark import logger
|
|
6
|
+
from msprobe.pytorch.free_benchmark.common.constant import CommonField
|
|
7
|
+
from msprobe.pytorch.free_benchmark.common.enums import (
|
|
8
|
+
DeviceType,
|
|
9
|
+
FuzzLevel,
|
|
10
|
+
HandlerType,
|
|
11
|
+
PerturbationMode,
|
|
12
|
+
)
|
|
13
|
+
from msprobe.pytorch.free_benchmark.common.params import data_pre_deal, make_handler_params
|
|
14
|
+
from msprobe.pytorch.free_benchmark.compare.grad_saver import GradSaver
|
|
15
|
+
from msprobe.pytorch.free_benchmark.perturbed_layers.layer_factory import LayerFactory
|
|
16
|
+
from msprobe.pytorch.free_benchmark.result_handlers.handler_factory import (
|
|
17
|
+
FuzzHandlerFactory,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class FreeBenchmarkCheck(ABC):
|
|
22
|
+
|
|
23
|
+
def __init__(self, config) -> None:
|
|
24
|
+
super().__init__()
|
|
25
|
+
self.config = config
|
|
26
|
+
if self.config.pert_mode is None:
|
|
27
|
+
self.config.pert_mode = PerturbationMode.IMPROVE_PRECISION
|
|
28
|
+
if self.config.fuzz_level is None:
|
|
29
|
+
self.config.fuzz_level = FuzzLevel.BASE_LEVEL
|
|
30
|
+
if self.config.fuzz_device is None:
|
|
31
|
+
self.config.fuzz_device = DeviceType.NPU
|
|
32
|
+
self.current_iter = 0
|
|
33
|
+
|
|
34
|
+
def update_iter(self, update_iter):
|
|
35
|
+
self.current_iter = update_iter
|
|
36
|
+
|
|
37
|
+
def if_fix(self):
|
|
38
|
+
if self.config.handler_type==HandlerType.FIX:
|
|
39
|
+
return True
|
|
40
|
+
return False
|
|
41
|
+
|
|
42
|
+
def pre_forward(self, name, module, data_processor, args, kwargs):
|
|
43
|
+
if not self.config.fuzz_stage == Const.BACKWARD:
|
|
44
|
+
return
|
|
45
|
+
origin_func = (
|
|
46
|
+
module._slow_forward if torch._C._get_tracing_state() else module.forward
|
|
47
|
+
)
|
|
48
|
+
handler_params = make_handler_params(name, self.config, self.current_iter)
|
|
49
|
+
grad_saver = GradSaver(origin_func, handler_params)
|
|
50
|
+
grad_saver.kwargs = kwargs
|
|
51
|
+
grad_saver.register_compare_func_for_inputs(args, data_processor)
|
|
52
|
+
grad_saver.cache_backward_input(args)
|
|
53
|
+
setattr(module, CommonField.GRADSAVER, grad_saver)
|
|
54
|
+
|
|
55
|
+
def forward(self, name, module, args, kwargs, output):
|
|
56
|
+
if not self.config.fuzz_stage == Const.FORWARD:
|
|
57
|
+
return output, []
|
|
58
|
+
origin_func = (
|
|
59
|
+
module._slow_forward if torch._C._get_tracing_state() else module.forward
|
|
60
|
+
)
|
|
61
|
+
data_params = data_pre_deal(name, origin_func, args, kwargs)
|
|
62
|
+
if data_params.valid_input_index == -1:
|
|
63
|
+
return output, []
|
|
64
|
+
data_params.original_result = output
|
|
65
|
+
data_params.fuzz_stage = self.config.fuzz_stage
|
|
66
|
+
|
|
67
|
+
layer = LayerFactory.create(
|
|
68
|
+
name, self.config.fuzz_device, self.config.pert_mode
|
|
69
|
+
)
|
|
70
|
+
layer.handle(data_params)
|
|
71
|
+
handler_params = make_handler_params(name, self.config, self.current_iter)
|
|
72
|
+
handler = FuzzHandlerFactory.create(handler_params)
|
|
73
|
+
handler.handle(data_params)
|
|
74
|
+
return data_params.perturbed_result, handler.get_unequal_rows()
|
|
75
|
+
|
|
76
|
+
def backward(self, name, module, grad_output):
|
|
77
|
+
|
|
78
|
+
if not self.config.fuzz_stage == Const.BACKWARD:
|
|
79
|
+
return
|
|
80
|
+
try:
|
|
81
|
+
grad_saver = getattr(module, CommonField.GRADSAVER)
|
|
82
|
+
except AttributeError:
|
|
83
|
+
logger.warning_on_rank_0(
|
|
84
|
+
f"[msprobe] Free benchmark: get grad saver failed. api_name:{name}"
|
|
85
|
+
)
|
|
86
|
+
return
|
|
87
|
+
|
|
88
|
+
_new_grad_output = grad_output
|
|
89
|
+
try:
|
|
90
|
+
need_grad_tensors, _inner_args = grad_saver.get_vjp_input()
|
|
91
|
+
origin_grad_input = grad_saver.get_grad_input_from_vjp(
|
|
92
|
+
tuple(need_grad_tensors), _new_grad_output, _inner_args
|
|
93
|
+
)
|
|
94
|
+
grad_saver.origin_grad_input = tuple([x.cpu() for x in origin_grad_input])
|
|
95
|
+
grad_saver.calculate_perturbed_grad_input(
|
|
96
|
+
_new_grad_output, need_grad_tensors, _inner_args
|
|
97
|
+
)
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.warning_on_rank_0(
|
|
100
|
+
f"[msprobe] Free benchmark: grad vjp calculate failed. api_name:{name} error: {e}"
|
|
101
|
+
)
|
|
102
|
+
return
|
|
File without changes
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from msprobe.pytorch.free_benchmark.common.params import DataParams
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class BaseLayer(ABC):
|
|
8
|
+
def __init__(self, api_name: str) -> None:
|
|
9
|
+
self.api_name = api_name
|
|
10
|
+
|
|
11
|
+
@abstractmethod
|
|
12
|
+
def handle(self, params: DataParams) -> Any:
|
|
13
|
+
pass
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from msprobe.pytorch.free_benchmark import FreeBenchmarkException
|
|
2
|
+
from msprobe.pytorch.free_benchmark.common.enums import DeviceType, PerturbationMode
|
|
3
|
+
from msprobe.pytorch.free_benchmark.perturbed_layers.npu.improve_precision import (
|
|
4
|
+
ImprovePrecisionLayer,
|
|
5
|
+
)
|
|
6
|
+
from msprobe.pytorch.free_benchmark.perturbed_layers.npu.add_noise import AddNoiseLayer
|
|
7
|
+
from msprobe.pytorch.free_benchmark.perturbed_layers.npu.bit_noise import BitNoiseLayer
|
|
8
|
+
from msprobe.pytorch.free_benchmark.perturbed_layers.npu.no_change import NoChangeLayer
|
|
9
|
+
from msprobe.pytorch.free_benchmark.perturbed_layers.npu.change_value import (
|
|
10
|
+
ChangeValueLayer,
|
|
11
|
+
)
|
|
12
|
+
from msprobe.pytorch.free_benchmark.perturbed_layers.run_cpu import CpuLayer
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class LayerFactory:
|
|
16
|
+
layers = {
|
|
17
|
+
DeviceType.NPU: {
|
|
18
|
+
PerturbationMode.ADD_NOISE: AddNoiseLayer,
|
|
19
|
+
PerturbationMode.CHANGE_VALUE: ChangeValueLayer,
|
|
20
|
+
PerturbationMode.NO_CHANGE: NoChangeLayer,
|
|
21
|
+
PerturbationMode.BIT_NOISE: BitNoiseLayer,
|
|
22
|
+
PerturbationMode.IMPROVE_PRECISION: ImprovePrecisionLayer,
|
|
23
|
+
},
|
|
24
|
+
DeviceType.CPU: {PerturbationMode.TO_CPU: CpuLayer},
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
@staticmethod
|
|
28
|
+
def create(api_name: str, device_type: str, mode: str):
|
|
29
|
+
layer = LayerFactory.layers.get(device_type)
|
|
30
|
+
if not layer:
|
|
31
|
+
raise FreeBenchmarkException(
|
|
32
|
+
FreeBenchmarkException.UnsupportedType,
|
|
33
|
+
f"无标杆工具不支持当前设备 {device_type}",
|
|
34
|
+
)
|
|
35
|
+
layer = layer.get(mode)
|
|
36
|
+
if not layer:
|
|
37
|
+
raise FreeBenchmarkException(
|
|
38
|
+
FreeBenchmarkException.UnsupportedType,
|
|
39
|
+
f"无标杆工具无法识别该扰动因子 {mode} on {device_type}",
|
|
40
|
+
)
|
|
41
|
+
return layer(api_name)
|
|
File without changes
|