mindstudio-probe 1.0.1__py3-none-any.whl → 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.4.dist-info}/LICENSE +201 -201
- {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.4.dist-info}/METADATA +36 -30
- mindstudio_probe-1.0.4.dist-info/RECORD +276 -0
- {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.4.dist-info}/WHEEL +1 -1
- {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.4.dist-info}/entry_points.txt +1 -0
- msprobe/README.md +101 -182
- msprobe/__init__.py +1 -0
- msprobe/{config/config.json → config.json} +49 -27
- msprobe/core/__init__.py +0 -0
- msprobe/{pytorch → core}/advisor/advisor.py +124 -124
- msprobe/{pytorch → core}/advisor/advisor_const.py +59 -59
- msprobe/{pytorch → core}/advisor/advisor_result.py +58 -58
- msprobe/core/common/const.py +341 -241
- msprobe/core/common/exceptions.py +100 -88
- msprobe/core/common/{file_check.py → file_utils.py} +478 -265
- msprobe/core/common/log.py +76 -55
- msprobe/core/common/utils.py +385 -516
- msprobe/core/common_config.py +85 -58
- msprobe/core/compare/acc_compare.py +300 -0
- msprobe/core/compare/check.py +95 -0
- msprobe/core/compare/compare_cli.py +49 -0
- msprobe/core/compare/highlight.py +223 -0
- msprobe/core/compare/multiprocessing_compute.py +149 -0
- msprobe/{pytorch → core}/compare/npy_compare.py +295 -244
- msprobe/core/compare/utils.py +430 -0
- msprobe/core/data_dump/data_collector.py +154 -140
- msprobe/core/data_dump/data_processor/base.py +314 -245
- msprobe/core/data_dump/data_processor/factory.py +59 -61
- msprobe/core/data_dump/data_processor/mindspore_processor.py +186 -0
- msprobe/core/data_dump/data_processor/pytorch_processor.py +366 -346
- msprobe/core/data_dump/json_writer.py +96 -116
- msprobe/core/data_dump/scope.py +178 -178
- msprobe/core/grad_probe/__init__.py +0 -0
- msprobe/core/grad_probe/constant.py +71 -0
- msprobe/core/grad_probe/grad_compare.py +171 -0
- msprobe/core/grad_probe/utils.py +64 -0
- msprobe/docs/01.installation.md +89 -0
- msprobe/docs/02.config_introduction.md +165 -0
- msprobe/docs/03.config_examples.md +247 -0
- msprobe/docs/04.acl_config_examples.md +76 -0
- msprobe/docs/05.data_dump_PyTorch.md +198 -0
- msprobe/docs/06.data_dump_MindSpore.md +243 -0
- msprobe/docs/07.accuracy_checker_PyTorch.md +274 -0
- msprobe/docs/08.accuracy_checker_online_PyTorch.md +198 -0
- msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
- msprobe/docs/10.accuracy_compare_PyTorch.md +245 -0
- msprobe/docs/11.accuracy_compare_MindSpore.md +202 -0
- msprobe/docs/12.overflow_check_PyTorch.md +79 -0
- msprobe/docs/13.overflow_check_MindSpore.md +31 -0
- msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
- msprobe/docs/15.free_benchmarking_PyTorch.md +164 -0
- msprobe/docs/17.grad_probe.md +207 -0
- msprobe/docs/FAQ_PyTorch.md +177 -0
- msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
- msprobe/docs/img/free_benchmark_framework.png +0 -0
- msprobe/docs/img/grad_probe_image-1.png +0 -0
- msprobe/docs/img/grad_probe_image-2.png +0 -0
- msprobe/docs/img/grad_probe_image-3.png +0 -0
- msprobe/docs/img/grad_probe_image-4.png +0 -0
- msprobe/docs/img/grad_probe_image.png +0 -0
- msprobe/mindspore/__init__.py +1 -1
- msprobe/mindspore/api_accuracy_checker/__init__.py +0 -0
- msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +255 -0
- msprobe/mindspore/api_accuracy_checker/api_info.py +69 -0
- msprobe/mindspore/api_accuracy_checker/api_runner.py +156 -0
- msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +197 -0
- msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
- msprobe/mindspore/api_accuracy_checker/compute_element.py +239 -0
- msprobe/mindspore/api_accuracy_checker/main.py +9 -0
- msprobe/mindspore/api_accuracy_checker/type_mapping.py +114 -0
- msprobe/mindspore/api_accuracy_checker/utils.py +80 -0
- msprobe/mindspore/cell_processor.py +34 -0
- msprobe/mindspore/common/const.py +106 -0
- msprobe/mindspore/common/log.py +38 -0
- msprobe/mindspore/common/utils.py +81 -0
- msprobe/mindspore/compare/distributed_compare.py +75 -0
- msprobe/mindspore/compare/ms_compare.py +219 -0
- msprobe/mindspore/compare/ms_graph_compare.py +348 -0
- msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -0
- msprobe/mindspore/debugger/debugger_config.py +66 -51
- msprobe/mindspore/debugger/precision_debugger.py +126 -32
- msprobe/mindspore/dump/dump_tool_factory.py +35 -38
- msprobe/mindspore/dump/hook_cell/api_registry.py +118 -0
- msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -0
- msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +922 -0
- msprobe/mindspore/dump/hook_cell/wrap_api.py +113 -0
- msprobe/mindspore/dump/jit_dump.py +72 -0
- msprobe/mindspore/dump/kernel_graph_dump.py +59 -60
- msprobe/mindspore/dump/kernel_kbyk_dump.py +64 -0
- msprobe/mindspore/free_benchmark/__init__.py +0 -0
- msprobe/mindspore/free_benchmark/api_pynative_self_check.py +116 -0
- msprobe/mindspore/free_benchmark/common/__init__.py +0 -0
- msprobe/mindspore/free_benchmark/common/config.py +12 -0
- msprobe/mindspore/free_benchmark/common/handler_params.py +17 -0
- msprobe/mindspore/free_benchmark/common/utils.py +71 -0
- msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -0
- msprobe/mindspore/free_benchmark/decorator/__init__.py +0 -0
- msprobe/mindspore/free_benchmark/decorator/dec_forward.py +43 -0
- msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +107 -0
- msprobe/mindspore/free_benchmark/handler/__init__.py +0 -0
- msprobe/mindspore/free_benchmark/handler/base_handler.py +90 -0
- msprobe/mindspore/free_benchmark/handler/check_handler.py +41 -0
- msprobe/mindspore/free_benchmark/handler/fix_handler.py +36 -0
- msprobe/mindspore/free_benchmark/handler/handler_factory.py +21 -0
- msprobe/mindspore/free_benchmark/perturbation/add_noise.py +67 -0
- msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +21 -0
- msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +63 -0
- msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +51 -0
- msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +35 -0
- msprobe/mindspore/free_benchmark/perturbation/no_change.py +12 -0
- msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +29 -0
- msprobe/mindspore/free_benchmark/self_check_tool_factory.py +33 -0
- msprobe/mindspore/grad_probe/__init__.py +0 -0
- msprobe/mindspore/grad_probe/global_context.py +90 -0
- msprobe/mindspore/grad_probe/grad_analyzer.py +231 -0
- msprobe/mindspore/grad_probe/grad_monitor.py +27 -0
- msprobe/mindspore/grad_probe/grad_stat_csv.py +132 -0
- msprobe/mindspore/grad_probe/hook.py +94 -0
- msprobe/mindspore/grad_probe/utils.py +30 -0
- msprobe/mindspore/ms_config.py +128 -78
- msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +44 -45
- msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +34 -32
- msprobe/mindspore/runtime.py +4 -0
- msprobe/mindspore/service.py +378 -0
- msprobe/mindspore/task_handler_factory.py +24 -21
- msprobe/msprobe.py +105 -67
- msprobe/pytorch/__init__.py +4 -4
- msprobe/pytorch/api_accuracy_checker/common/config.py +53 -50
- msprobe/pytorch/api_accuracy_checker/common/utils.py +214 -224
- msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +213 -216
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +606 -545
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
- msprobe/pytorch/api_accuracy_checker/compare/compare.py +386 -345
- msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +73 -73
- msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +245 -248
- msprobe/pytorch/api_accuracy_checker/config.yaml +10 -4
- msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +335 -328
- msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +200 -203
- msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +133 -127
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +592 -493
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +70 -7
- msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/__init__.py +0 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +197 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +325 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +204 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +219 -0
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +10 -0
- msprobe/pytorch/bench_functions/__init__.py +15 -0
- msprobe/pytorch/bench_functions/apply_adam_w.py +28 -0
- msprobe/pytorch/bench_functions/confusion_transpose.py +19 -0
- msprobe/pytorch/bench_functions/fast_gelu.py +55 -0
- msprobe/pytorch/bench_functions/layer_norm_eval.py +6 -0
- msprobe/pytorch/bench_functions/linear.py +12 -0
- msprobe/pytorch/bench_functions/matmul_backward.py +48 -0
- msprobe/pytorch/bench_functions/npu_fusion_attention.py +509 -0
- msprobe/pytorch/bench_functions/rms_norm.py +15 -0
- msprobe/pytorch/bench_functions/rotary_mul.py +52 -0
- msprobe/pytorch/bench_functions/scaled_mask_softmax.py +26 -0
- msprobe/pytorch/bench_functions/swiglu.py +55 -0
- msprobe/pytorch/common/__init__.py +2 -2
- msprobe/pytorch/common/compare_script.template +14 -14
- msprobe/pytorch/common/log.py +20 -31
- msprobe/pytorch/common/parse_json.py +39 -37
- msprobe/pytorch/common/utils.py +305 -224
- msprobe/pytorch/compare/distributed_compare.py +66 -111
- msprobe/pytorch/compare/mapping.yaml +607 -607
- msprobe/pytorch/compare/match.py +34 -36
- msprobe/pytorch/compare/pt_compare.py +50 -0
- msprobe/pytorch/debugger/debugger_config.py +95 -86
- msprobe/pytorch/debugger/precision_debugger.py +125 -95
- msprobe/pytorch/free_benchmark/__init__.py +8 -8
- msprobe/pytorch/free_benchmark/common/constant.py +70 -67
- msprobe/pytorch/free_benchmark/common/counter.py +71 -71
- msprobe/pytorch/free_benchmark/common/enums.py +37 -37
- msprobe/pytorch/free_benchmark/common/params.py +129 -129
- msprobe/pytorch/free_benchmark/common/utils.py +102 -98
- msprobe/pytorch/free_benchmark/compare/grad_saver.py +179 -183
- msprobe/pytorch/free_benchmark/compare/single_benchmark.py +104 -104
- msprobe/pytorch/free_benchmark/main.py +105 -102
- msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +13 -13
- msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +41 -41
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +90 -90
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +104 -104
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +63 -63
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +68 -68
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +28 -28
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +45 -45
- msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +19 -19
- msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +217 -203
- msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +39 -39
- msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +23 -23
- msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +30 -31
- msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +170 -170
- msprobe/pytorch/function_factory.py +76 -0
- msprobe/pytorch/functional/dump_module.py +39 -39
- msprobe/pytorch/grad_probe/__init__.py +0 -0
- msprobe/pytorch/grad_probe/grad_monitor.py +91 -0
- msprobe/pytorch/grad_probe/grad_stat_csv.py +129 -0
- msprobe/pytorch/hook_module/api_registry.py +161 -161
- msprobe/pytorch/hook_module/hook_module.py +120 -109
- msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1876
- msprobe/pytorch/hook_module/utils.py +30 -29
- msprobe/pytorch/hook_module/wrap_aten.py +110 -100
- msprobe/pytorch/hook_module/wrap_distributed.py +78 -75
- msprobe/pytorch/hook_module/wrap_functional.py +105 -108
- msprobe/pytorch/hook_module/wrap_npu_custom.py +93 -73
- msprobe/pytorch/hook_module/wrap_tensor.py +71 -72
- msprobe/pytorch/hook_module/wrap_torch.py +86 -88
- msprobe/pytorch/hook_module/wrap_vf.py +62 -64
- msprobe/pytorch/module_processer.py +138 -98
- msprobe/pytorch/online_dispatch/__init__.py +20 -20
- msprobe/pytorch/online_dispatch/compare.py +236 -236
- msprobe/pytorch/online_dispatch/dispatch.py +271 -273
- msprobe/pytorch/online_dispatch/dump_compare.py +155 -186
- msprobe/pytorch/online_dispatch/single_compare.py +391 -391
- msprobe/pytorch/online_dispatch/torch_ops_config.yaml +49 -49
- msprobe/pytorch/online_dispatch/utils.py +130 -187
- msprobe/pytorch/parse.py +4 -4
- msprobe/pytorch/parse_tool/cli.py +32 -32
- msprobe/pytorch/parse_tool/lib/compare.py +260 -259
- msprobe/pytorch/parse_tool/lib/config.py +52 -51
- msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
- msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
- msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
- msprobe/pytorch/parse_tool/lib/parse_tool.py +158 -158
- msprobe/pytorch/parse_tool/lib/utils.py +316 -367
- msprobe/pytorch/parse_tool/lib/visualization.py +85 -90
- msprobe/pytorch/pt_config.py +188 -93
- msprobe/pytorch/service.py +246 -167
- mindstudio_probe-1.0.1.dist-info/RECORD +0 -228
- msprobe/config/README.md +0 -397
- msprobe/mindspore/doc/dump.md +0 -65
- msprobe/mindspore/dump/api_kbk_dump.py +0 -55
- msprobe/pytorch/compare/acc_compare.py +0 -1024
- msprobe/pytorch/compare/highlight.py +0 -100
- msprobe/pytorch/doc/FAQ.md +0 -193
- msprobe/pytorch/doc/api_accuracy_checker.md +0 -269
- msprobe/pytorch/doc/atat/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
- msprobe/pytorch/doc/dump.md +0 -207
- msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -176
- msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
- msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
- msprobe/pytorch/doc/run_overflow_check.md +0 -25
- msprobe/pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md +0 -90
- msprobe/test/core_ut/common/test_utils.py +0 -345
- msprobe/test/core_ut/data_dump/test_data_collector.py +0 -47
- msprobe/test/core_ut/data_dump/test_json_writer.py +0 -183
- msprobe/test/core_ut/data_dump/test_scope.py +0 -151
- msprobe/test/core_ut/test_common_config.py +0 -152
- msprobe/test/core_ut/test_file_check.py +0 -218
- msprobe/test/core_ut/test_log.py +0 -109
- msprobe/test/mindspore_ut/test_api_kbk_dump.py +0 -51
- msprobe/test/mindspore_ut/test_debugger_config.py +0 -42
- msprobe/test/mindspore_ut/test_dump_tool_factory.py +0 -51
- msprobe/test/mindspore_ut/test_kernel_graph_dump.py +0 -66
- msprobe/test/mindspore_ut/test_kernel_graph_overflow_check.py +0 -63
- msprobe/test/mindspore_ut/test_ms_config.py +0 -69
- msprobe/test/mindspore_ut/test_overflow_check_tool_factory.py +0 -51
- msprobe/test/mindspore_ut/test_precision_debugger.py +0 -56
- msprobe/test/mindspore_ut/test_task_handler_factory.py +0 -58
- msprobe/test/pytorch_ut/advisor/test_advisor.py +0 -83
- msprobe/test/pytorch_ut/api_accuracy_checker/common/test_common_utils.py +0 -108
- msprobe/test/pytorch_ut/api_accuracy_checker/common/test_config.py +0 -39
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_algorithm.py +0 -112
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_api_precision_compare.py +0 -77
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare.py +0 -125
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare_column.py +0 -10
- msprobe/test/pytorch_ut/api_accuracy_checker/compare/test_compare_utils.py +0 -43
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/dump.json +0 -179
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/forward.json +0 -63
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_data_generate.py +0 -99
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_multi_run_ut.py +0 -115
- msprobe/test/pytorch_ut/api_accuracy_checker/run_ut/test_run_ut.py +0 -72
- msprobe/test/pytorch_ut/compare/test_acc_compare.py +0 -17
- msprobe/test/pytorch_ut/free_benchmark/perturbed_layers/test_perturbed_layser.py +0 -105
- msprobe/test/pytorch_ut/free_benchmark/result_handlers/test_result_handler.py +0 -121
- msprobe/test/pytorch_ut/free_benchmark/test_main.py +0 -101
- msprobe/test/pytorch_ut/functional/test_dump_module.py +0 -15
- msprobe/test/pytorch_ut/hook_module/test_api_registry.py +0 -130
- msprobe/test/pytorch_ut/hook_module/test_hook_module.py +0 -42
- msprobe/test/pytorch_ut/hook_module/test_wrap_aten.py +0 -65
- msprobe/test/pytorch_ut/hook_module/test_wrap_distributed.py +0 -35
- msprobe/test/pytorch_ut/hook_module/test_wrap_functional.py +0 -20
- msprobe/test/pytorch_ut/hook_module/test_wrap_tensor.py +0 -35
- msprobe/test/pytorch_ut/hook_module/test_wrap_torch.py +0 -43
- msprobe/test/pytorch_ut/hook_module/test_wrap_vf.py +0 -11
- msprobe/test/pytorch_ut/test_pt_config.py +0 -69
- msprobe/test/pytorch_ut/test_service.py +0 -59
- msprobe/test/resources/advisor.txt +0 -3
- msprobe/test/resources/compare_result_20230703104808.csv +0 -9
- msprobe/test/resources/compare_result_without_accuracy.csv +0 -9
- msprobe/test/resources/config.yaml +0 -3
- msprobe/test/resources/npu_test.pkl +0 -8
- msprobe/test/run_test.sh +0 -30
- msprobe/test/run_ut.py +0 -58
- msprobe/test/test_module_processer.py +0 -64
- {mindstudio_probe-1.0.1.dist-info → mindstudio_probe-1.0.4.dist-info}/top_level.txt +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
- /msprobe/{config → docs}/img/free_benchmark.png +0 -0
- /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
|
File without changes
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from msprobe.mindspore.free_benchmark.common.config import Config
|
|
2
|
+
from msprobe.mindspore.common.const import Const
|
|
3
|
+
from msprobe.mindspore.common.const import FreeBenchmarkConst
|
|
4
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
5
|
+
from msprobe.mindspore.free_benchmark.handler.handler_factory import HandlerFactory
|
|
6
|
+
from msprobe.mindspore.free_benchmark.perturbation.perturbation_factory import PerturbationFactory
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ForwardSelfChecker:
|
|
10
|
+
|
|
11
|
+
def __init__(self, api_name: str):
|
|
12
|
+
self.api_name = api_name
|
|
13
|
+
|
|
14
|
+
def handle(self, params: HandlerParams):
|
|
15
|
+
"""
|
|
16
|
+
装饰器实际执行逻辑
|
|
17
|
+
|
|
18
|
+
"""
|
|
19
|
+
perturbation = PerturbationFactory.create(self.api_name)
|
|
20
|
+
params.fuzzed_result = perturbation.handle(params)
|
|
21
|
+
params.original_result = params.original_func(*params.args, **params.kwargs)
|
|
22
|
+
if params.fuzzed_result is not False:
|
|
23
|
+
return self.deal_fuzzed_and_original_result(params)
|
|
24
|
+
return params.original_result
|
|
25
|
+
|
|
26
|
+
def get_compare_data(self, params: HandlerParams):
|
|
27
|
+
if self.api_name not in Const.COMMUNICATION_API_LIST:
|
|
28
|
+
return
|
|
29
|
+
# 以下为通讯类api处理逻辑
|
|
30
|
+
params.fuzzed_result = params.fuzzed_value
|
|
31
|
+
if Config.pert_type == FreeBenchmarkConst.IMPROVE_PRECISION:
|
|
32
|
+
params.original_result = params.args
|
|
33
|
+
else:
|
|
34
|
+
params.original_result = params.args[params.index]
|
|
35
|
+
|
|
36
|
+
def deal_fuzzed_and_original_result(self, params: HandlerParams):
|
|
37
|
+
original_result = params.original_result
|
|
38
|
+
self.get_compare_data(params)
|
|
39
|
+
handler = HandlerFactory.create(self.api_name)
|
|
40
|
+
result = handler.handle(params)
|
|
41
|
+
if self.api_name in Const.COMMUNICATION_API_LIST:
|
|
42
|
+
result = original_result
|
|
43
|
+
return result
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import traceback
|
|
4
|
+
from functools import wraps
|
|
5
|
+
from typing import Tuple, Dict, List
|
|
6
|
+
|
|
7
|
+
from mindspore import ops
|
|
8
|
+
|
|
9
|
+
from msprobe.mindspore.runtime import Runtime
|
|
10
|
+
from msprobe.mindspore.common.log import logger
|
|
11
|
+
from msprobe.mindspore.free_benchmark.common.config import Config
|
|
12
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
13
|
+
from .dec_forward import ForwardSelfChecker
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def decorate(original_func, decorate_func, api_name=None):
|
|
17
|
+
"""
|
|
18
|
+
总装饰器
|
|
19
|
+
"""
|
|
20
|
+
@wraps(original_func)
|
|
21
|
+
def fuzz_wrapper(*args, **kwargs):
|
|
22
|
+
|
|
23
|
+
def __exec_decorate_func():
|
|
24
|
+
params = data_pre_deal(api_name, original_func, *args, **kwargs)
|
|
25
|
+
result = decorate_func(params)
|
|
26
|
+
return result
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
if Runtime.rank_id == -1:
|
|
30
|
+
Runtime.rank_id = os.environ.get("RANK_ID", -1)
|
|
31
|
+
if need_wrapper_func():
|
|
32
|
+
logger.info(f"[{api_name}] is checking.")
|
|
33
|
+
return __exec_decorate_func()
|
|
34
|
+
except Exception as e:
|
|
35
|
+
logger.error(f"[{api_name}] Error: {str(e)}")
|
|
36
|
+
logger.error(f"[{api_name}] Error detail: {traceback.format_exc()}")
|
|
37
|
+
|
|
38
|
+
return original_func(*args, **kwargs)
|
|
39
|
+
|
|
40
|
+
return fuzz_wrapper
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def decorate_forward_function(func, api_name=None):
|
|
44
|
+
"""
|
|
45
|
+
前向装饰器
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
if not api_name:
|
|
49
|
+
api_name = func.__name__
|
|
50
|
+
|
|
51
|
+
def forward_func(params: HandlerParams):
|
|
52
|
+
forward = ForwardSelfChecker(api_name)
|
|
53
|
+
result = forward.handle(params)
|
|
54
|
+
return result
|
|
55
|
+
|
|
56
|
+
return decorate(func, forward_func, api_name)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def stack_depth_check() -> bool:
|
|
60
|
+
nested_depth = 1
|
|
61
|
+
frame = sys._getframe(1)
|
|
62
|
+
while frame:
|
|
63
|
+
if frame.f_code.co_name == "fuzz_wrapper":
|
|
64
|
+
nested_depth -= 1
|
|
65
|
+
if nested_depth < 0:
|
|
66
|
+
return False
|
|
67
|
+
frame = frame.f_back
|
|
68
|
+
return True
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def get_target_arg_index(args: Tuple) -> int:
|
|
72
|
+
"""
|
|
73
|
+
类型校验
|
|
74
|
+
|
|
75
|
+
"""
|
|
76
|
+
for i, arg in enumerate(args):
|
|
77
|
+
if ops.is_tensor(arg):
|
|
78
|
+
if not ops.is_floating_point(arg):
|
|
79
|
+
continue
|
|
80
|
+
return i
|
|
81
|
+
if isinstance(arg, (List, Tuple, Dict)):
|
|
82
|
+
return i
|
|
83
|
+
return -1
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def data_pre_deal(api_name, func, *args, **kwargs):
|
|
87
|
+
params = HandlerParams()
|
|
88
|
+
params.args = args
|
|
89
|
+
params.kwargs = kwargs
|
|
90
|
+
params.original_func = func
|
|
91
|
+
index = get_target_arg_index(args)
|
|
92
|
+
if index == -1:
|
|
93
|
+
raise Exception(f"{api_name} has no supported input type")
|
|
94
|
+
params.index = index
|
|
95
|
+
return params
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def need_wrapper_func():
|
|
99
|
+
if not (Runtime.is_running and Config.is_enable):
|
|
100
|
+
return False
|
|
101
|
+
if not stack_depth_check():
|
|
102
|
+
return False
|
|
103
|
+
if Config.steps and Runtime.step_count not in Config.steps:
|
|
104
|
+
return False
|
|
105
|
+
if Config.ranks and Runtime.rank_id != -1 and Runtime.rank_id not in Config.ranks:
|
|
106
|
+
return False
|
|
107
|
+
return True
|
|
File without changes
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import math
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from typing import Any, Tuple, Optional
|
|
4
|
+
|
|
5
|
+
import mindspore as ms
|
|
6
|
+
from mindspore import Tensor, ops
|
|
7
|
+
|
|
8
|
+
from msprobe.mindspore.common.log import logger
|
|
9
|
+
from msprobe.mindspore.free_benchmark.common.utils import Tools
|
|
10
|
+
from msprobe.mindspore.common.const import FreeBenchmarkConst
|
|
11
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class BaseHandler(ABC):
|
|
15
|
+
|
|
16
|
+
def __init__(self, api_name: str):
|
|
17
|
+
self.api_name = api_name
|
|
18
|
+
|
|
19
|
+
@staticmethod
|
|
20
|
+
def pre_calculate(original_output, fuzzed_output):
|
|
21
|
+
abs_tol = FreeBenchmarkConst.PERT_VALUE_DICT.get(fuzzed_output.dtype,
|
|
22
|
+
FreeBenchmarkConst.PERT_VALUE_DICT.get(ms.float32))
|
|
23
|
+
|
|
24
|
+
return original_output.to(fuzzed_output.dtype), fuzzed_output, abs_tol
|
|
25
|
+
|
|
26
|
+
@staticmethod
|
|
27
|
+
def get_threshold(dtype):
|
|
28
|
+
err = Tools.get_default_error_threshold(dtype)
|
|
29
|
+
return err
|
|
30
|
+
|
|
31
|
+
@staticmethod
|
|
32
|
+
def convert_overflow_ratio_to_consistent(ratio):
|
|
33
|
+
if math.isnan(ratio) or math.isinf(ratio):
|
|
34
|
+
return FreeBenchmarkConst.NO_CHANGE_ERROR_THRESHOLD
|
|
35
|
+
return ratio
|
|
36
|
+
|
|
37
|
+
@staticmethod
|
|
38
|
+
def get_endless_norm(first_tensor, second_tensor, abs_tol):
|
|
39
|
+
if first_tensor.dtype != ms.bfloat16 and second_tensor.dtype != ms.bfloat16:
|
|
40
|
+
ratio_tensor1 = ops.where(ops.abs(second_tensor) > abs_tol, ops.div(first_tensor, second_tensor), 1)
|
|
41
|
+
ratio_tensor2 = ops.where(ops.abs(first_tensor) > abs_tol, ops.div(second_tensor, first_tensor), 1)
|
|
42
|
+
else:
|
|
43
|
+
ratio_tensor1 = ops.where(ops.abs(second_tensor).to(ms.float32) > abs_tol,
|
|
44
|
+
ops.div(first_tensor.to(ms.float32), second_tensor.to(ms.float32)), 1)
|
|
45
|
+
ratio_tensor2 = ops.where(ops.abs(first_tensor).to(ms.float32) > abs_tol,
|
|
46
|
+
ops.div(second_tensor.to(ms.float32), first_tensor.to(ms.float32)), 1)
|
|
47
|
+
norm1 = BaseHandler.convert_overflow_ratio_to_consistent(ops.max(ratio_tensor1)[0].to(ms.float32).item())
|
|
48
|
+
norm2 = BaseHandler.convert_overflow_ratio_to_consistent(ops.max(ratio_tensor2)[0].to(ms.float32).item())
|
|
49
|
+
norm3 = BaseHandler.convert_overflow_ratio_to_consistent(ops.min(ratio_tensor1)[0].to(ms.float32).item())
|
|
50
|
+
ratio = FreeBenchmarkConst.SYMBOL_FLIPPING_RATIO if norm3 < 0 else max(norm1, norm2)
|
|
51
|
+
|
|
52
|
+
return ratio
|
|
53
|
+
|
|
54
|
+
@staticmethod
|
|
55
|
+
def ratio_calculate(original_output, fuzzed_output) -> float:
|
|
56
|
+
try:
|
|
57
|
+
original_output, fuzzed_output, abs_tol = BaseHandler.pre_calculate(original_output, fuzzed_output)
|
|
58
|
+
except Exception as e:
|
|
59
|
+
logger.error(f"When computing ratio, y1 or y2 dtype is not supported {str(e)}")
|
|
60
|
+
return FreeBenchmarkConst.NO_CHANGE_ERROR_THRESHOLD
|
|
61
|
+
|
|
62
|
+
abs_tol = abs_tol ** 0.5
|
|
63
|
+
|
|
64
|
+
return BaseHandler.get_endless_norm(original_output, fuzzed_output, abs_tol)
|
|
65
|
+
|
|
66
|
+
@staticmethod
|
|
67
|
+
def npu_compare(original_output, fuzzed_output) -> Tuple[bool, Optional[float]]:
|
|
68
|
+
if not isinstance(fuzzed_output, Tensor):
|
|
69
|
+
logger.error(f"The compare for output type `{type(fuzzed_output)}` is not supported")
|
|
70
|
+
return True, 1.0
|
|
71
|
+
|
|
72
|
+
# 范数计算等
|
|
73
|
+
err_thd = BaseHandler.get_threshold(original_output.dtype)
|
|
74
|
+
ratio = BaseHandler.ratio_calculate(original_output, fuzzed_output)
|
|
75
|
+
is_consistent = err_thd >= ratio >= 1.0 / err_thd
|
|
76
|
+
return is_consistent, ratio
|
|
77
|
+
|
|
78
|
+
@staticmethod
|
|
79
|
+
def is_float_tensor(output) -> bool:
|
|
80
|
+
if isinstance(output, Tensor) and ops.is_floating_point(output):
|
|
81
|
+
return True
|
|
82
|
+
if isinstance(output, (list, tuple)):
|
|
83
|
+
for i in output:
|
|
84
|
+
if isinstance(i, Tensor) and ops.is_floating_point(i):
|
|
85
|
+
return True
|
|
86
|
+
return False
|
|
87
|
+
|
|
88
|
+
@abstractmethod
|
|
89
|
+
def handle(self, params: HandlerParams) -> Any:
|
|
90
|
+
pass
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
from dataclasses import asdict
|
|
3
|
+
|
|
4
|
+
from mindspore import Tensor, ops
|
|
5
|
+
|
|
6
|
+
from msprobe.mindspore.common.log import logger
|
|
7
|
+
from msprobe.mindspore.free_benchmark.common.config import Config
|
|
8
|
+
from msprobe.mindspore.free_benchmark.handler.base_handler import BaseHandler
|
|
9
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
10
|
+
from msprobe.mindspore.free_benchmark.common.utils import make_unequal_row
|
|
11
|
+
from msprobe.core.data_dump.json_writer import DataWriter
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CheckHandler(BaseHandler):
|
|
15
|
+
|
|
16
|
+
def npu_compare_and_save(self, original_output, fuzzed_output, params: HandlerParams, output_index=None):
|
|
17
|
+
is_consistent, ratio = self.npu_compare(original_output, fuzzed_output)
|
|
18
|
+
params.is_consistent = params.is_consistent and is_consistent
|
|
19
|
+
if not is_consistent:
|
|
20
|
+
row = make_unequal_row(self.api_name, params, ratio, output_index)
|
|
21
|
+
data_dict = asdict(row)
|
|
22
|
+
DataWriter.write_data_to_csv(
|
|
23
|
+
data_dict.values(),
|
|
24
|
+
data_dict.keys(),
|
|
25
|
+
Config.dump_path
|
|
26
|
+
)
|
|
27
|
+
logger.error(f"{self.api_name} is not consistent")
|
|
28
|
+
|
|
29
|
+
def handle(self, params: HandlerParams) -> Any:
|
|
30
|
+
try:
|
|
31
|
+
if not self.is_float_tensor(params.fuzzed_result):
|
|
32
|
+
return params.original_result
|
|
33
|
+
if isinstance(params.fuzzed_result, Tensor):
|
|
34
|
+
self.npu_compare_and_save(params.original_result, params.fuzzed_result, params)
|
|
35
|
+
elif isinstance(params.fuzzed_result, (list, tuple)):
|
|
36
|
+
for i, item in enumerate(params.original_result):
|
|
37
|
+
if ops.is_tensor(item) and ops.is_floating_point(item):
|
|
38
|
+
self.npu_compare_and_save(item, params.fuzzed_result[i], params, output_index=i)
|
|
39
|
+
except Exception as e:
|
|
40
|
+
logger.error(str(e))
|
|
41
|
+
return params.original_result
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from mindspore import Tensor
|
|
4
|
+
|
|
5
|
+
from msprobe.mindspore.common.log import logger
|
|
6
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class FixHandler:
|
|
10
|
+
|
|
11
|
+
def __init__(self, api_name: str):
|
|
12
|
+
self.api_name = api_name
|
|
13
|
+
|
|
14
|
+
@staticmethod
|
|
15
|
+
def use_fuzzed_result(original_result, fuzzed_result):
|
|
16
|
+
if isinstance(original_result, Tensor):
|
|
17
|
+
return fuzzed_result.to(original_result.dtype)
|
|
18
|
+
if isinstance(original_result, dict):
|
|
19
|
+
dict_fixed_result = dict()
|
|
20
|
+
for k, v in original_result.items():
|
|
21
|
+
dict_fixed_result[k] = FixHandler.use_fuzzed_result(v, fuzzed_result[k])
|
|
22
|
+
return dict_fixed_result
|
|
23
|
+
if isinstance(original_result, (tuple, list)):
|
|
24
|
+
list_fixed_result = list()
|
|
25
|
+
for i, v in enumerate(original_result):
|
|
26
|
+
list_fixed_result.append(FixHandler.use_fuzzed_result(v, fuzzed_result[i]))
|
|
27
|
+
return type(original_result)(list_fixed_result)
|
|
28
|
+
return original_result
|
|
29
|
+
|
|
30
|
+
def handle(self, params: HandlerParams) -> Any:
|
|
31
|
+
try:
|
|
32
|
+
return FixHandler.use_fuzzed_result(params.original_result, params.fuzzed_result)
|
|
33
|
+
except Exception as e:
|
|
34
|
+
logger.error(f"{self.api_name} failed to fix.")
|
|
35
|
+
logger.error(str(e))
|
|
36
|
+
return params.original_result
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from msprobe.mindspore.common.log import logger
|
|
2
|
+
from msprobe.mindspore.free_benchmark.common.config import Config
|
|
3
|
+
from msprobe.mindspore.common.const import FreeBenchmarkConst
|
|
4
|
+
from .check_handler import CheckHandler
|
|
5
|
+
from .fix_handler import FixHandler
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class HandlerFactory:
|
|
9
|
+
result_handlers = {
|
|
10
|
+
FreeBenchmarkConst.CHECK: CheckHandler,
|
|
11
|
+
FreeBenchmarkConst.FIX: FixHandler,
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
@staticmethod
|
|
15
|
+
def create(api_name: str):
|
|
16
|
+
handler = HandlerFactory.result_handlers.get(Config.handler_type)
|
|
17
|
+
if handler:
|
|
18
|
+
return handler(api_name)
|
|
19
|
+
else:
|
|
20
|
+
logger.error(f"{Config.handler_type} is not supported.")
|
|
21
|
+
raise Exception
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from mindspore import Tensor, ops
|
|
4
|
+
|
|
5
|
+
from msprobe.mindspore.common.log import logger
|
|
6
|
+
from msprobe.mindspore.free_benchmark.perturbation.base_perturbation import BasePerturbation
|
|
7
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
8
|
+
from msprobe.mindspore.common.const import FreeBenchmarkConst
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AddNoisePerturbation(BasePerturbation):
|
|
12
|
+
|
|
13
|
+
def handle(self, params: HandlerParams) -> Any:
|
|
14
|
+
"""
|
|
15
|
+
返回增加扰动后的api输出
|
|
16
|
+
|
|
17
|
+
"""
|
|
18
|
+
params.fuzzed_value = self.add_noise(params.args[params.index])
|
|
19
|
+
if not self.is_fuzzed:
|
|
20
|
+
logger.warning(f"{self.api_name} can not add noise.")
|
|
21
|
+
return False
|
|
22
|
+
return self.get_fuzzed_result(params)
|
|
23
|
+
|
|
24
|
+
def add_noise(self, inputs) -> Any:
|
|
25
|
+
"""
|
|
26
|
+
返回增加扰动后的api输入
|
|
27
|
+
|
|
28
|
+
"""
|
|
29
|
+
if isinstance(inputs, Tensor):
|
|
30
|
+
noise = self._get_noise(inputs)
|
|
31
|
+
if noise is not False:
|
|
32
|
+
result = ops.where(ops.abs(inputs) > self.perturbation_value ** 0.5,
|
|
33
|
+
ops.add(noise, inputs), inputs)
|
|
34
|
+
result = result.type(dtype=inputs.dtype)
|
|
35
|
+
self.is_fuzzed = True
|
|
36
|
+
return result
|
|
37
|
+
|
|
38
|
+
if isinstance(inputs, dict):
|
|
39
|
+
return {k: self.add_noise(v) for k, v in inputs.items()}
|
|
40
|
+
|
|
41
|
+
if isinstance(inputs, (list, tuple)):
|
|
42
|
+
return [self.add_noise(v) for v in inputs]
|
|
43
|
+
|
|
44
|
+
return inputs
|
|
45
|
+
|
|
46
|
+
def _get_noise(self, input):
|
|
47
|
+
"""
|
|
48
|
+
得到要添加的噪声值
|
|
49
|
+
|
|
50
|
+
"""
|
|
51
|
+
if self.is_fuzzed:
|
|
52
|
+
return False
|
|
53
|
+
if not ops.is_floating_point(input) or ops.numel(input) == 0:
|
|
54
|
+
return False
|
|
55
|
+
|
|
56
|
+
pert_value = FreeBenchmarkConst.PERT_VALUE_DICT.get(input.dtype)
|
|
57
|
+
if not pert_value:
|
|
58
|
+
return False
|
|
59
|
+
else:
|
|
60
|
+
self.perturbation_value = pert_value
|
|
61
|
+
|
|
62
|
+
max_val = ops.max(ops.abs(input))[0].item()
|
|
63
|
+
if max_val < pert_value:
|
|
64
|
+
return False
|
|
65
|
+
|
|
66
|
+
noise = ops.full(input.shape, self.perturbation_value, dtype=input.dtype)
|
|
67
|
+
return noise
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class BasePerturbation:
|
|
7
|
+
|
|
8
|
+
def __init__(self, api_name: str):
|
|
9
|
+
self.api_name = api_name
|
|
10
|
+
self.is_fuzzed = False
|
|
11
|
+
self.perturbation_value = None
|
|
12
|
+
|
|
13
|
+
@staticmethod
|
|
14
|
+
def get_fuzzed_result(params: HandlerParams):
|
|
15
|
+
args_front = params.args[:params.index]
|
|
16
|
+
args_rear = params.args[params.index + 1:]
|
|
17
|
+
fuzzed_result = params.original_func(*args_front, params.fuzzed_value, *args_rear, **params.kwargs)
|
|
18
|
+
return fuzzed_result
|
|
19
|
+
|
|
20
|
+
def handler(self, params: HandlerParams) -> Any:
|
|
21
|
+
pass
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from mindspore import Tensor, ops
|
|
5
|
+
|
|
6
|
+
from msprobe.mindspore.common.log import logger
|
|
7
|
+
from msprobe.mindspore.common.const import FreeBenchmarkConst
|
|
8
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
9
|
+
from msprobe.mindspore.free_benchmark.perturbation.base_perturbation import BasePerturbation
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class BitNoisePerturbation(BasePerturbation):
|
|
13
|
+
|
|
14
|
+
def add_bit_noise(self, inputs) -> Any:
|
|
15
|
+
if isinstance(inputs, Tensor):
|
|
16
|
+
bit_len_type = self._get_bit_len_type(inputs)
|
|
17
|
+
if bit_len_type is not False:
|
|
18
|
+
sub_normal_np = np.finfo(FreeBenchmarkConst.MS_NUMPY_DTYPE_DICT.get(inputs.dtype)).smallest_normal
|
|
19
|
+
sub_normal = Tensor(sub_normal_np)
|
|
20
|
+
noise_type = list(FreeBenchmarkConst.MS_NUMPY_DTYPE_DICT.keys())[
|
|
21
|
+
list(FreeBenchmarkConst.MS_NUMPY_DTYPE_DICT.values()).index(bit_len_type)]
|
|
22
|
+
noise = ops.full(inputs.shape, 1, dtype=noise_type)
|
|
23
|
+
input_np = inputs.asnumpy()
|
|
24
|
+
input_np_int = input_np.view(bit_len_type)
|
|
25
|
+
result = Tensor(input_np_int)
|
|
26
|
+
result = ops.where(ops.abs(inputs) > sub_normal,
|
|
27
|
+
ops.bitwise_xor(result, noise), result)
|
|
28
|
+
result_np = result.asnumpy()
|
|
29
|
+
result_np_float = result_np.view(FreeBenchmarkConst.MS_NUMPY_DTYPE_DICT.get(inputs.dtype))
|
|
30
|
+
self.is_fuzzed = True
|
|
31
|
+
return Tensor(result_np_float)
|
|
32
|
+
|
|
33
|
+
if isinstance(inputs, dict):
|
|
34
|
+
return {k: self.add_bit_noise(v) for k, v in inputs.items()}
|
|
35
|
+
if isinstance(inputs, (tuple, list)):
|
|
36
|
+
return type(inputs)([self.add_bit_noise(v) for v in inputs])
|
|
37
|
+
return inputs
|
|
38
|
+
|
|
39
|
+
def handle(self, params: HandlerParams) -> any:
|
|
40
|
+
args = params.args
|
|
41
|
+
params.fuzzed_value = self.add_bit_noise(params.args[params.index])
|
|
42
|
+
if not self.is_fuzzed:
|
|
43
|
+
logger.warning(f"{self.api_name} can not add bit noise.")
|
|
44
|
+
return False
|
|
45
|
+
params.args = args
|
|
46
|
+
return self.get_fuzzed_result(params)
|
|
47
|
+
|
|
48
|
+
def _get_bit_len_type(self, input):
|
|
49
|
+
if self.is_fuzzed:
|
|
50
|
+
return False
|
|
51
|
+
if not isinstance(input, Tensor) or not ops.is_floating_point(input) or \
|
|
52
|
+
input.numel() == 0:
|
|
53
|
+
return False
|
|
54
|
+
bit_len_type = FreeBenchmarkConst.PERT_BIT_DICT.get(input.dtype)
|
|
55
|
+
if not bit_len_type:
|
|
56
|
+
return False
|
|
57
|
+
pert_value = FreeBenchmarkConst.PERT_VALUE_DICT.get(input.dtype)
|
|
58
|
+
if not pert_value:
|
|
59
|
+
return False
|
|
60
|
+
max_val = ops.max(ops.abs(input))[0].item()
|
|
61
|
+
if max_val < pert_value:
|
|
62
|
+
return False
|
|
63
|
+
return bit_len_type
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from mindspore import Tensor
|
|
4
|
+
|
|
5
|
+
from msprobe.mindspore.common.log import logger
|
|
6
|
+
from msprobe.mindspore.free_benchmark.perturbation.base_perturbation import BasePerturbation
|
|
7
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ExchangeValuePerturbation(BasePerturbation):
|
|
11
|
+
|
|
12
|
+
def handle(self, params: HandlerParams) -> Any:
|
|
13
|
+
"""
|
|
14
|
+
返回首尾交换后的api输出
|
|
15
|
+
|
|
16
|
+
"""
|
|
17
|
+
params.fuzzed_value = self.exchange_value(params.args[params.index])
|
|
18
|
+
if not self.is_fuzzed:
|
|
19
|
+
logger.warning(f"{self.api_name} can not exchange value.")
|
|
20
|
+
return False
|
|
21
|
+
return self.get_fuzzed_result(params)
|
|
22
|
+
|
|
23
|
+
def exchange_value(self, inputs) -> Any:
|
|
24
|
+
"""
|
|
25
|
+
返回首尾交换后的api输入
|
|
26
|
+
|
|
27
|
+
"""
|
|
28
|
+
if isinstance(inputs, Tensor):
|
|
29
|
+
if not self.is_fuzzed and len(inputs.shape) > 0 and inputs.shape[0] > 1:
|
|
30
|
+
result = inputs.copy()
|
|
31
|
+
if len(inputs.shape) == 1:
|
|
32
|
+
first_element = inputs[0]
|
|
33
|
+
last_element = inputs[-1]
|
|
34
|
+
result[0] = last_element
|
|
35
|
+
result[-1] = first_element
|
|
36
|
+
else:
|
|
37
|
+
first_element = inputs[0][0]
|
|
38
|
+
last_element = inputs[-1][-1]
|
|
39
|
+
result[0][0] = last_element
|
|
40
|
+
result[-1][-1] = first_element
|
|
41
|
+
|
|
42
|
+
self.is_fuzzed = True
|
|
43
|
+
return result
|
|
44
|
+
|
|
45
|
+
if isinstance(inputs, dict):
|
|
46
|
+
return {k: self.exchange_value(v) for k, v in inputs.items()}
|
|
47
|
+
|
|
48
|
+
if isinstance(inputs, (list, tuple)):
|
|
49
|
+
return type(inputs)([self.exchange_value(v) for v in inputs])
|
|
50
|
+
|
|
51
|
+
return inputs
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import mindspore as ms
|
|
4
|
+
from mindspore import Tensor, ops
|
|
5
|
+
|
|
6
|
+
from msprobe.mindspore.free_benchmark.perturbation.base_perturbation import BasePerturbation
|
|
7
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
8
|
+
from msprobe.mindspore.common.const import FreeBenchmarkConst
|
|
9
|
+
from msprobe.mindspore.common.log import logger
|
|
10
|
+
from msprobe.mindspore.common.const import Const
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ImprovePrecisionPerturbation(BasePerturbation):
|
|
14
|
+
|
|
15
|
+
def improve_tensor_precision(self, target_tensor):
|
|
16
|
+
if isinstance(target_tensor, Tensor) and ops.is_floating_point(target_tensor) and \
|
|
17
|
+
target_tensor.dtype not in [ms.float64, ms.float32]:
|
|
18
|
+
self.is_fuzzed = True
|
|
19
|
+
return target_tensor.to(ms.float32)
|
|
20
|
+
if isinstance(target_tensor, dict):
|
|
21
|
+
return {k: self.improve_tensor_precision(v) for k, v in target_tensor.items()}
|
|
22
|
+
if isinstance(target_tensor, (tuple, list)):
|
|
23
|
+
return type(target_tensor)([self.improve_tensor_precision(v) for v in target_tensor])
|
|
24
|
+
return target_tensor
|
|
25
|
+
|
|
26
|
+
def handle(self, params: HandlerParams) -> Any:
|
|
27
|
+
args = self.improve_tensor_precision(params.args)
|
|
28
|
+
kwargs = self.improve_tensor_precision(params.kwargs)
|
|
29
|
+
fuzzed_value = args
|
|
30
|
+
if self.api_name in Const.COMMUNICATION_API_LIST:
|
|
31
|
+
params.fuzzed_value = fuzzed_value
|
|
32
|
+
if not self.is_fuzzed:
|
|
33
|
+
logger.warning(f"{self.api_name} can not improve precision.")
|
|
34
|
+
return False
|
|
35
|
+
return params.original_func(*args, **kwargs)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from msprobe.mindspore.free_benchmark.perturbation.base_perturbation import BasePerturbation
|
|
4
|
+
from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class NoChangePerturbation(BasePerturbation):
|
|
8
|
+
|
|
9
|
+
def handle(self, params: HandlerParams) -> Any:
|
|
10
|
+
params.fuzzed_value = params.args[params.index]
|
|
11
|
+
self.is_fuzzed = True
|
|
12
|
+
return self.get_fuzzed_result(params)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from msprobe.mindspore.common.const import FreeBenchmarkConst
|
|
2
|
+
from msprobe.mindspore.free_benchmark.common.config import Config
|
|
3
|
+
from .add_noise import AddNoisePerturbation
|
|
4
|
+
from .bit_noise import BitNoisePerturbation
|
|
5
|
+
from .no_change import NoChangePerturbation
|
|
6
|
+
from .improve_precision import ImprovePrecisionPerturbation
|
|
7
|
+
from .exchange_value import ExchangeValuePerturbation
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class PerturbationFactory:
|
|
11
|
+
"""
|
|
12
|
+
扰动工厂类
|
|
13
|
+
|
|
14
|
+
"""
|
|
15
|
+
perturbations = {
|
|
16
|
+
FreeBenchmarkConst.IMPROVE_PRECISION: ImprovePrecisionPerturbation,
|
|
17
|
+
FreeBenchmarkConst.ADD_NOISE: AddNoisePerturbation,
|
|
18
|
+
FreeBenchmarkConst.BIT_NOISE: BitNoisePerturbation,
|
|
19
|
+
FreeBenchmarkConst.NO_CHANGE: NoChangePerturbation,
|
|
20
|
+
FreeBenchmarkConst.EXCHANGE_VALUE: ExchangeValuePerturbation
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
@staticmethod
|
|
24
|
+
def create(api_name: str):
|
|
25
|
+
perturbation = PerturbationFactory.perturbations.get(Config.pert_type)
|
|
26
|
+
if perturbation:
|
|
27
|
+
return perturbation(api_name)
|
|
28
|
+
else:
|
|
29
|
+
raise Exception(f'{Config.pert_type} is a invalid perturbation type')
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from msprobe.mindspore.common.const import Const
|
|
2
|
+
from msprobe.mindspore.debugger.debugger_config import DebuggerConfig
|
|
3
|
+
from msprobe.mindspore.free_benchmark.api_pynative_self_check import ApiPyNativeSelFCheck
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SelfCheckToolFactory:
|
|
7
|
+
tools = {
|
|
8
|
+
Const.CELL: {
|
|
9
|
+
Const.GRAPH_KBYK_MODE: None,
|
|
10
|
+
Const.GRAPH_GE_MODE: None,
|
|
11
|
+
Const.PYNATIVE_MODE: None
|
|
12
|
+
},
|
|
13
|
+
Const.API: {
|
|
14
|
+
Const.GRAPH_KBYK_MODE: None,
|
|
15
|
+
Const.GRAPH_GE_MODE: None,
|
|
16
|
+
Const.PYNATIVE_MODE: ApiPyNativeSelFCheck
|
|
17
|
+
},
|
|
18
|
+
Const.KERNEL: {
|
|
19
|
+
Const.GRAPH_KBYK_MODE: None,
|
|
20
|
+
Const.GRAPH_GE_MODE: None,
|
|
21
|
+
Const.PYNATIVE_MODE: None
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
@staticmethod
|
|
26
|
+
def create(config: DebuggerConfig):
|
|
27
|
+
tool = SelfCheckToolFactory.tools.get(config.level)
|
|
28
|
+
if not tool:
|
|
29
|
+
raise Exception(f"{config.level} is not supported.")
|
|
30
|
+
tool = tool.get(config.execution_mode)
|
|
31
|
+
if not tool:
|
|
32
|
+
raise Exception(f"Task free_benchmark is not supported in this mode: {config.execution_mode}.")
|
|
33
|
+
return tool(config)
|
|
File without changes
|