mindstudio-probe 1.1.0__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mindstudio_probe-1.1.0.dist-info → mindstudio_probe-1.2.1.dist-info}/METADATA +7 -6
- mindstudio_probe-1.2.1.dist-info/RECORD +396 -0
- {mindstudio_probe-1.1.0.dist-info → mindstudio_probe-1.2.1.dist-info}/WHEEL +1 -1
- {mindstudio_probe-1.1.0.dist-info → mindstudio_probe-1.2.1.dist-info}/entry_points.txt +0 -1
- msprobe/CMakeLists.txt +5 -0
- msprobe/README.md +51 -20
- msprobe/config.json +2 -3
- msprobe/core/advisor/advisor.py +8 -3
- msprobe/core/common/const.py +264 -15
- msprobe/core/common/exceptions.py +27 -3
- msprobe/core/common/file_utils.py +176 -26
- msprobe/core/common/inplace_op_checker.py +15 -0
- msprobe/core/common/inplace_ops.yaml +3 -0
- msprobe/core/common/log.py +27 -9
- msprobe/core/common/utils.py +204 -77
- msprobe/core/common_config.py +49 -14
- msprobe/core/compare/acc_compare.py +274 -198
- msprobe/core/compare/check.py +32 -33
- msprobe/core/compare/compare_cli.py +32 -14
- msprobe/core/compare/highlight.py +283 -127
- msprobe/core/compare/layer_mapping/__init__.py +19 -0
- msprobe/core/compare/layer_mapping/data_scope_parser.py +246 -0
- msprobe/core/compare/layer_mapping/layer_mapping.py +249 -0
- msprobe/core/compare/layer_mapping/postprocess_pass.py +95 -0
- msprobe/core/compare/merge_result/merge_result.py +380 -0
- msprobe/core/compare/merge_result/merge_result_cli.py +31 -0
- msprobe/core/compare/multiprocessing_compute.py +2 -2
- msprobe/core/compare/npy_compare.py +135 -144
- msprobe/core/compare/utils.py +419 -274
- msprobe/core/data_dump/data_collector.py +60 -28
- msprobe/core/data_dump/data_processor/base.py +84 -36
- msprobe/core/data_dump/data_processor/factory.py +5 -3
- msprobe/core/data_dump/data_processor/mindspore_processor.py +152 -18
- msprobe/core/data_dump/data_processor/pytorch_processor.py +267 -110
- msprobe/core/data_dump/json_writer.py +29 -1
- msprobe/core/data_dump/scope.py +119 -39
- msprobe/core/grad_probe/constant.py +27 -13
- msprobe/core/grad_probe/grad_compare.py +18 -1
- msprobe/core/grad_probe/utils.py +30 -2
- msprobe/core/overflow_check/abnormal_scene.py +189 -0
- msprobe/core/overflow_check/api_info.py +55 -0
- msprobe/core/overflow_check/checker.py +138 -0
- msprobe/core/overflow_check/filter.py +157 -0
- msprobe/core/overflow_check/ignore_rules.yaml +55 -0
- msprobe/core/overflow_check/level.py +22 -0
- msprobe/core/overflow_check/utils.py +28 -0
- msprobe/docs/01.installation.md +96 -7
- msprobe/docs/02.config_introduction.md +50 -23
- msprobe/docs/03.config_examples.md +2 -9
- msprobe/docs/04.kernel_dump_PyTorch.md +73 -0
- msprobe/docs/05.data_dump_PyTorch.md +93 -61
- msprobe/docs/06.data_dump_MindSpore.md +200 -95
- msprobe/docs/07.accuracy_checker_PyTorch.md +28 -28
- msprobe/docs/08.accuracy_checker_online_PyTorch.md +1 -6
- msprobe/docs/09.accuracy_checker_MindSpore.md +44 -8
- msprobe/docs/10.accuracy_compare_PyTorch.md +114 -50
- msprobe/docs/11.accuracy_compare_MindSpore.md +340 -48
- msprobe/docs/12.overflow_check_PyTorch.md +2 -2
- msprobe/docs/13.overflow_check_MindSpore.md +6 -6
- msprobe/docs/15.free_benchmarking_PyTorch.md +4 -5
- msprobe/docs/16.free_benchmarking_MindSpore.md +56 -37
- msprobe/docs/17.grad_probe.md +5 -6
- msprobe/docs/19.monitor.md +561 -0
- msprobe/docs/20.monitor_performance_baseline.md +52 -0
- msprobe/docs/21.visualization_PyTorch.md +466 -0
- msprobe/docs/22.visualization_MindSpore.md +481 -0
- msprobe/docs/23.generate_operator_PyTorch.md +107 -0
- msprobe/docs/24.code_mapping_Mindspore.md +28 -0
- msprobe/docs/25.tool_function_introduction.md +29 -0
- msprobe/docs/26.data_dump_PyTorch_baseline.md +37 -0
- msprobe/docs/27.dump_json_instruction.md +521 -0
- msprobe/docs/FAQ.md +29 -2
- msprobe/docs/accuracy_checker_MindSpore/accuracy_checker_MindSpore_baseline.md +14 -0
- msprobe/docs/data_dump_MindSpore/data_dump_MindSpore_baseline.md +22 -0
- msprobe/docs/data_dump_MindSpore/dynamic_graph_quick_start_example.md +211 -0
- msprobe/docs/img/compare_result.png +0 -0
- msprobe/docs/img/merge_result.png +0 -0
- msprobe/docs/img/monitor/cpu_info.png +0 -0
- msprobe/docs/img/visualization/fuzzy_match_ms.png +0 -0
- msprobe/docs/img/visualization/fuzzy_match_pt.png +0 -0
- msprobe/docs/img/visualization/tensorboard_1.png +0 -0
- msprobe/docs/img/visualization/tensorboard_2.png +0 -0
- msprobe/docs/img/visualization/vis_browser_1.png +0 -0
- msprobe/docs/img/visualization/vis_browser_2.png +0 -0
- msprobe/docs/img/visualization/vis_precision_info.png +0 -0
- msprobe/docs/img/visualization/vis_search_info.png +0 -0
- msprobe/docs/img/visualization/vis_show_info.png +0 -0
- msprobe/docs/img/visualization/vis_showcase.png +0 -0
- msprobe/docs/img/visualization/vis_unmatch_info.png +0 -0
- msprobe/docs/visualization/GPTModel.png +0 -0
- msprobe/docs/visualization/ParallelMLP.png +0 -0
- msprobe/docs/visualization/layer_mapping_example.md +132 -0
- msprobe/docs/visualization/mapping.png +0 -0
- msprobe/docs/visualization/mapping1.png +0 -0
- msprobe/docs/visualization/module_name.png +0 -0
- msprobe/docs/visualization/module_name1.png +0 -0
- msprobe/docs/visualization/no_mapping.png +0 -0
- msprobe/docs/visualization/no_mapping1.png +0 -0
- msprobe/docs/visualization/no_mapping_analyze.png +0 -0
- msprobe/docs/visualization/top_layer.png +0 -0
- msprobe/mindspore/__init__.py +25 -0
- msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +151 -151
- msprobe/mindspore/api_accuracy_checker/api_info.py +21 -6
- msprobe/mindspore/api_accuracy_checker/api_runner.py +43 -18
- msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +21 -7
- msprobe/mindspore/api_accuracy_checker/checker_support_api.yaml +77 -0
- msprobe/mindspore/api_accuracy_checker/cmd_parser.py +64 -1
- msprobe/mindspore/api_accuracy_checker/compute_element.py +64 -31
- msprobe/mindspore/api_accuracy_checker/data_manager.py +301 -0
- msprobe/mindspore/api_accuracy_checker/main.py +28 -3
- msprobe/mindspore/api_accuracy_checker/multi_api_accuracy_checker.py +212 -0
- msprobe/mindspore/api_accuracy_checker/multi_data_manager.py +60 -0
- msprobe/mindspore/api_accuracy_checker/type_mapping.py +22 -5
- msprobe/mindspore/api_accuracy_checker/utils.py +34 -17
- msprobe/mindspore/cell_processor.py +33 -12
- msprobe/mindspore/code_mapping/bind.py +264 -0
- msprobe/mindspore/code_mapping/cmd_parser.py +40 -0
- msprobe/mindspore/code_mapping/graph.py +49 -0
- msprobe/mindspore/code_mapping/graph_parser.py +226 -0
- msprobe/mindspore/code_mapping/main.py +24 -0
- msprobe/mindspore/code_mapping/processor.py +34 -0
- msprobe/mindspore/common/const.py +35 -13
- msprobe/mindspore/common/log.py +5 -9
- msprobe/mindspore/common/utils.py +88 -4
- msprobe/mindspore/compare/distributed_compare.py +22 -24
- msprobe/mindspore/compare/ms_compare.py +333 -268
- msprobe/mindspore/compare/ms_graph_compare.py +95 -52
- msprobe/mindspore/debugger/debugger_config.py +7 -1
- msprobe/mindspore/debugger/precision_debugger.py +87 -12
- msprobe/mindspore/dump/dump_tool_factory.py +3 -1
- msprobe/mindspore/dump/hook_cell/api_registry.py +95 -18
- msprobe/mindspore/dump/hook_cell/hook_cell.py +60 -38
- msprobe/mindspore/dump/hook_cell/primitive_hooks.py +45 -30
- msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +36 -1
- msprobe/mindspore/dump/hook_cell/wrap_api.py +92 -1
- msprobe/mindspore/dump/jit_dump.py +17 -5
- msprobe/mindspore/dump/kernel_dump/kernel_config.py +33 -0
- msprobe/mindspore/dump/kernel_graph_dump.py +9 -4
- msprobe/mindspore/dump/kernel_kbyk_dump.py +2 -4
- msprobe/mindspore/dym_loader/hook_dynamic_loader.cc +140 -0
- msprobe/mindspore/dym_loader/hook_dynamic_loader.h +53 -0
- msprobe/mindspore/free_benchmark/api_pynative_self_check.py +156 -41
- msprobe/mindspore/free_benchmark/common/handler_params.py +1 -2
- msprobe/mindspore/free_benchmark/common/utils.py +19 -4
- msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +0 -204
- msprobe/mindspore/free_benchmark/handler/base_handler.py +3 -3
- msprobe/mindspore/free_benchmark/handler/check_handler.py +4 -5
- msprobe/mindspore/free_benchmark/handler/fix_handler.py +4 -4
- msprobe/mindspore/free_benchmark/handler/handler_factory.py +4 -4
- msprobe/mindspore/free_benchmark/perturbation/add_noise.py +2 -2
- msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +15 -6
- msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +2 -2
- msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +2 -2
- msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +13 -6
- msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +2 -2
- msprobe/mindspore/free_benchmark/self_check_tool_factory.py +2 -2
- msprobe/mindspore/grad_probe/global_context.py +28 -8
- msprobe/mindspore/grad_probe/grad_analyzer.py +50 -24
- msprobe/mindspore/grad_probe/grad_monitor.py +16 -1
- msprobe/mindspore/grad_probe/grad_stat_csv.py +33 -5
- msprobe/mindspore/grad_probe/hook.py +35 -12
- msprobe/mindspore/grad_probe/utils.py +18 -5
- msprobe/mindspore/mindtorch/__init__.py +18 -0
- msprobe/mindspore/mindtorch/mindtorch_adaptor.py +255 -0
- msprobe/mindspore/ms_config.py +27 -16
- msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +9 -4
- msprobe/mindspore/runtime.py +15 -0
- msprobe/mindspore/service.py +285 -113
- msprobe/mindspore/task_handler_factory.py +15 -0
- msprobe/msprobe.py +48 -10
- msprobe/pytorch/__init__.py +8 -6
- msprobe/pytorch/api_accuracy_checker/common/config.py +62 -0
- msprobe/pytorch/api_accuracy_checker/common/utils.py +31 -16
- msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +41 -8
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +103 -271
- msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +4 -1
- msprobe/pytorch/api_accuracy_checker/compare/compare.py +69 -68
- msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +54 -0
- msprobe/pytorch/api_accuracy_checker/compare/compare_input.py +51 -0
- msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +2 -4
- msprobe/pytorch/api_accuracy_checker/generate_op_script/config_op.json +9 -0
- msprobe/pytorch/api_accuracy_checker/generate_op_script/op_generator.py +478 -0
- msprobe/pytorch/api_accuracy_checker/generate_op_script/operator_replication.template +365 -0
- msprobe/pytorch/api_accuracy_checker/precision_standard/absolute_threshold.py +106 -0
- msprobe/pytorch/api_accuracy_checker/precision_standard/accumulative_error_compare.py +107 -0
- msprobe/pytorch/api_accuracy_checker/precision_standard/base_standard.py +151 -0
- msprobe/pytorch/api_accuracy_checker/precision_standard/benchmark_compare.py +226 -0
- msprobe/pytorch/api_accuracy_checker/precision_standard/binary_consistency.py +68 -0
- msprobe/pytorch/api_accuracy_checker/precision_standard/standard_config.py +218 -0
- msprobe/pytorch/api_accuracy_checker/precision_standard/standard_register.py +104 -0
- msprobe/pytorch/api_accuracy_checker/precision_standard/thousandth_standard.py +63 -0
- msprobe/pytorch/api_accuracy_checker/precision_standard/ulp_compare.py +200 -0
- msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +63 -2
- msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +21 -15
- msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +54 -22
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +140 -71
- msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +49 -8
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +9 -24
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +4 -12
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +5 -3
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/dump_dispatch.py +9 -4
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +3 -11
- msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/utils.py +2 -2
- msprobe/pytorch/bench_functions/confusion_transpose.py +5 -1
- msprobe/pytorch/bench_functions/matmul_backward.py +12 -0
- msprobe/pytorch/bench_functions/npu_fusion_attention.py +142 -16
- msprobe/pytorch/bench_functions/rotary_mul.py +4 -0
- msprobe/pytorch/bench_functions/swiglu.py +10 -2
- msprobe/pytorch/common/parse_json.py +7 -6
- msprobe/pytorch/common/utils.py +101 -7
- msprobe/pytorch/compare/distributed_compare.py +17 -30
- msprobe/pytorch/compare/pt_compare.py +44 -22
- msprobe/pytorch/debugger/debugger_config.py +46 -27
- msprobe/pytorch/debugger/precision_debugger.py +42 -12
- msprobe/pytorch/dump/kernel_dump/kernel_config.py +33 -0
- msprobe/pytorch/dump/module_dump/module_dump.py +86 -0
- msprobe/pytorch/{module_processer.py → dump/module_dump/module_processer.py} +81 -10
- msprobe/pytorch/free_benchmark/common/constant.py +15 -0
- msprobe/pytorch/free_benchmark/common/counter.py +15 -0
- msprobe/pytorch/free_benchmark/common/enums.py +15 -0
- msprobe/pytorch/free_benchmark/common/params.py +10 -2
- msprobe/pytorch/free_benchmark/common/utils.py +29 -4
- msprobe/pytorch/free_benchmark/compare/grad_saver.py +20 -5
- msprobe/pytorch/free_benchmark/compare/single_benchmark.py +2 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +3 -1
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +6 -4
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +2 -0
- msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +4 -0
- msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +41 -47
- msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +6 -5
- msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +0 -4
- msprobe/pytorch/grad_probe/grad_monitor.py +23 -6
- msprobe/pytorch/grad_probe/grad_stat_csv.py +40 -10
- msprobe/pytorch/hook_module/__init__.py +1 -1
- msprobe/pytorch/hook_module/hook_module.py +14 -11
- msprobe/pytorch/hook_module/register_optimizer_hook.py +59 -0
- msprobe/pytorch/hook_module/support_wrap_ops.yaml +35 -0
- msprobe/pytorch/hook_module/wrap_distributed.py +6 -8
- msprobe/pytorch/hook_module/wrap_functional.py +0 -38
- msprobe/pytorch/monitor/__init__.py +0 -0
- msprobe/pytorch/monitor/anomaly_analyse.py +201 -0
- msprobe/pytorch/monitor/anomaly_detect.py +425 -0
- msprobe/pytorch/monitor/csv2tb.py +166 -0
- msprobe/pytorch/monitor/distributed/__init__.py +0 -0
- msprobe/pytorch/monitor/distributed/distributed_ops.yaml +19 -0
- msprobe/pytorch/monitor/distributed/stack_blacklist.yaml +5 -0
- msprobe/pytorch/monitor/distributed/wrap_distributed.py +283 -0
- msprobe/pytorch/monitor/features.py +108 -0
- msprobe/pytorch/monitor/module_hook.py +1076 -0
- msprobe/pytorch/monitor/module_metric.py +172 -0
- msprobe/pytorch/monitor/module_spec_verifier.py +95 -0
- msprobe/pytorch/monitor/optimizer_collect.py +333 -0
- msprobe/pytorch/monitor/unittest/__init__.py +0 -0
- msprobe/pytorch/monitor/unittest/test_monitor.py +160 -0
- msprobe/pytorch/monitor/utils.py +321 -0
- msprobe/pytorch/monitor/visualizer.py +59 -0
- msprobe/pytorch/online_dispatch/__init__.py +2 -3
- msprobe/pytorch/online_dispatch/compare.py +29 -38
- msprobe/pytorch/online_dispatch/dispatch.py +58 -27
- msprobe/pytorch/online_dispatch/dump_compare.py +21 -9
- msprobe/pytorch/online_dispatch/single_compare.py +53 -32
- msprobe/pytorch/online_dispatch/torch_ops_config.yaml +1 -1
- msprobe/pytorch/online_dispatch/utils.py +49 -21
- msprobe/pytorch/parse_tool/lib/compare.py +21 -27
- msprobe/pytorch/parse_tool/lib/config.py +6 -8
- msprobe/pytorch/parse_tool/lib/file_desc.py +15 -1
- msprobe/pytorch/parse_tool/lib/interactive_cli.py +10 -10
- msprobe/pytorch/parse_tool/lib/parse_exception.py +7 -7
- msprobe/pytorch/parse_tool/lib/parse_tool.py +12 -12
- msprobe/pytorch/parse_tool/lib/utils.py +33 -53
- msprobe/pytorch/parse_tool/lib/visualization.py +11 -10
- msprobe/pytorch/pt_config.py +31 -8
- msprobe/pytorch/service.py +188 -108
- msprobe/visualization/__init__.py +14 -0
- msprobe/visualization/builder/__init__.py +14 -0
- msprobe/visualization/builder/graph_builder.py +222 -0
- msprobe/visualization/builder/msprobe_adapter.py +227 -0
- msprobe/visualization/compare/__init__.py +14 -0
- msprobe/visualization/compare/graph_comparator.py +180 -0
- msprobe/visualization/compare/mode_adapter.py +197 -0
- msprobe/visualization/graph/__init__.py +14 -0
- msprobe/visualization/graph/base_node.py +119 -0
- msprobe/visualization/graph/distributed_analyzer.py +318 -0
- msprobe/visualization/graph/graph.py +209 -0
- msprobe/visualization/graph/node_colors.py +95 -0
- msprobe/visualization/graph/node_op.py +39 -0
- msprobe/visualization/graph_service.py +288 -0
- msprobe/visualization/utils.py +217 -0
- mindstudio_probe-1.1.0.dist-info/RECORD +0 -287
- msprobe/docs/04.acl_config_examples.md +0 -78
- msprobe/mindspore/compare/layer_mapping.py +0 -146
- msprobe/mindspore/compare/modify_mapping.py +0 -107
- msprobe/mindspore/free_benchmark/decorator/dec_forward.py +0 -57
- msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +0 -122
- msprobe/pytorch/functional/module_dump.py +0 -84
- {mindstudio_probe-1.1.0.dist-info → mindstudio_probe-1.2.1.dist-info}/LICENSE +0 -0
- {mindstudio_probe-1.1.0.dist-info → mindstudio_probe-1.2.1.dist-info}/top_level.txt +0 -0
- /msprobe/mindspore/{free_benchmark/decorator → code_mapping}/__init__.py +0 -0
- /msprobe/pytorch/{functional → dump/module_dump}/__init__.py +0 -0
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
# Copyright (c) 2024-2025, Huawei Technologies Co., Ltd.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
import datetime
|
|
16
|
+
import os
|
|
17
|
+
import re
|
|
18
|
+
from multiprocessing import Process
|
|
19
|
+
|
|
20
|
+
import pytz
|
|
21
|
+
from torch.utils.tensorboard import SummaryWriter
|
|
22
|
+
from tqdm import tqdm
|
|
23
|
+
|
|
24
|
+
from msprobe.core.common.const import MonitorConst
|
|
25
|
+
from msprobe.core.common.file_utils import read_csv, create_directory, remove_path
|
|
26
|
+
from msprobe.core.common.utils import is_int
|
|
27
|
+
from msprobe.pytorch.common.log import logger
|
|
28
|
+
from msprobe.pytorch.monitor.utils import get_target_output_dir
|
|
29
|
+
|
|
30
|
+
all_data_type_list = ["actv", "actv_grad", "exp_avg", "exp_avg_sq", "grad_unreduced", "grad_reduced", "param"]
|
|
31
|
+
CSV_FILE_SUFFIX = r"_\d+-\d+\.csv"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def parse_step_line(data, line_id, name, ops):
|
|
35
|
+
vp_id = data["vpp_stage"][line_id]
|
|
36
|
+
module_name = data[name][line_id]
|
|
37
|
+
step = data["step"][line_id]
|
|
38
|
+
vpp_name = f"vp{vp_id}:{module_name}"
|
|
39
|
+
ops_result = {}
|
|
40
|
+
for op in ops:
|
|
41
|
+
ops_result[op] = data[op][line_id]
|
|
42
|
+
return vpp_name, step, ops_result
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def parse_step_fn(filepath):
|
|
46
|
+
data = read_csv(filepath)
|
|
47
|
+
|
|
48
|
+
header = list(data.keys())
|
|
49
|
+
name = header[MonitorConst.HEADER_NAME_INDEX]
|
|
50
|
+
ops = header[MonitorConst.OPS_START_INDEX:]
|
|
51
|
+
|
|
52
|
+
parse_step_result = {}
|
|
53
|
+
|
|
54
|
+
for line_id in range(len(data)):
|
|
55
|
+
vpp_name, step, ops_result = parse_step_line(data, line_id, name, ops)
|
|
56
|
+
if vpp_name not in parse_step_result:
|
|
57
|
+
parse_step_result[vpp_name] = {}
|
|
58
|
+
if step in parse_step_result[vpp_name]:
|
|
59
|
+
raise Exception(f"duplicated step({step})")
|
|
60
|
+
parse_step_result[vpp_name][step] = ops_result
|
|
61
|
+
return parse_step_result
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def write_step(output_dirpath, parse_step_result, rank, data_type):
|
|
65
|
+
tb_output_path = os.path.join(output_dirpath, f"rank{rank}", data_type)
|
|
66
|
+
if os.path.exists(tb_output_path):
|
|
67
|
+
remove_path(tb_output_path)
|
|
68
|
+
logger.warning(f"existing path {tb_output_path} will be recovered")
|
|
69
|
+
writer = SummaryWriter(tb_output_path)
|
|
70
|
+
for vpp_name, step_data_dict in parse_step_result.items():
|
|
71
|
+
step_data_list = [(step, ops) for step, ops in step_data_dict.items()]
|
|
72
|
+
step_data_list.sort(key=lambda x: x[0])
|
|
73
|
+
for step_data in step_data_list:
|
|
74
|
+
step = step_data[0]
|
|
75
|
+
ops = step_data[1]
|
|
76
|
+
for op, value in ops.items():
|
|
77
|
+
tag = f"{vpp_name}/{op}"
|
|
78
|
+
writer.add_scalar(tag, value, step)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def update_dict(dict1, dict2):
|
|
82
|
+
for key, value in dict2.items():
|
|
83
|
+
if key in dict1:
|
|
84
|
+
if isinstance(dict1[key], dict) and isinstance(value, dict):
|
|
85
|
+
try:
|
|
86
|
+
update_dict(dict1[key], value)
|
|
87
|
+
except Exception as e:
|
|
88
|
+
raise Exception(f"Error updating nested dict failed at key '{key}': {e}") from e
|
|
89
|
+
else:
|
|
90
|
+
raise Exception(f"duplicate key: {key}")
|
|
91
|
+
else:
|
|
92
|
+
dict1[key] = value
|
|
93
|
+
return dict1
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def csv2tb_by_step_work(target_output_dirs, output_dirpath, data_type_list):
|
|
97
|
+
for directory in tqdm(target_output_dirs):
|
|
98
|
+
dirpath = directory["path"]
|
|
99
|
+
rank = directory["rank"]
|
|
100
|
+
for data_type in data_type_list:
|
|
101
|
+
all_step_result = {}
|
|
102
|
+
for filename in os.listdir(dirpath):
|
|
103
|
+
if not re.match(f"{data_type}{CSV_FILE_SUFFIX}", filename):
|
|
104
|
+
continue
|
|
105
|
+
filepath = os.path.join(dirpath, filename)
|
|
106
|
+
try:
|
|
107
|
+
parse_step_result = parse_step_fn(filepath)
|
|
108
|
+
except Exception as e:
|
|
109
|
+
logger.error(f"csv2tensorboard parse {filepath} failed \n {e}")
|
|
110
|
+
break
|
|
111
|
+
|
|
112
|
+
all_step_result = update_dict(all_step_result, parse_step_result)
|
|
113
|
+
if all_step_result:
|
|
114
|
+
write_step(output_dirpath, all_step_result, rank, data_type)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def check_process_num(process_num):
|
|
118
|
+
if not is_int(process_num) or process_num <= 0:
|
|
119
|
+
raise ValueError(f"process_num({process_num}) is not a positive integer")
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def check_data_type_list(data_type_list):
|
|
123
|
+
if data_type_list is None:
|
|
124
|
+
logger.info(f"data_type_list is None, use defualt all_data_type_list: {all_data_type_list}")
|
|
125
|
+
return
|
|
126
|
+
if not isinstance(data_type_list, list):
|
|
127
|
+
raise ValueError(f"data_type_list({data_type_list}) is not a list")
|
|
128
|
+
for data_type in data_type_list:
|
|
129
|
+
if data_type not in all_data_type_list:
|
|
130
|
+
raise ValueError(f"data type({data_type}) is not supported, supported data type: {all_data_type_list}")
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def csv2tensorboard_by_step(
|
|
134
|
+
monitor_path,
|
|
135
|
+
time_start=None,
|
|
136
|
+
time_end=None,
|
|
137
|
+
process_num=1,
|
|
138
|
+
data_type_list=None,
|
|
139
|
+
output_dirpath=None
|
|
140
|
+
):
|
|
141
|
+
check_process_num(process_num)
|
|
142
|
+
check_data_type_list(data_type_list)
|
|
143
|
+
target_output_dirs = get_target_output_dir(monitor_path, time_start, time_end)
|
|
144
|
+
target_output_dirs = [{"rank": rank, "path": path} for rank, path in target_output_dirs.items()]
|
|
145
|
+
if output_dirpath is None:
|
|
146
|
+
local_tz = pytz.timezone("Asia/Shanghai") # 根据需要调整为目标时区
|
|
147
|
+
cur_time = datetime.datetime.now(local_tz).strftime("%b%d_%H-%M-%S")
|
|
148
|
+
output_dirpath = os.path.join(monitor_path, f"{cur_time}-csv2tensorboard_by_step")
|
|
149
|
+
create_directory(output_dirpath)
|
|
150
|
+
|
|
151
|
+
task_num = len(target_output_dirs)
|
|
152
|
+
task_num_per_pro = task_num // process_num
|
|
153
|
+
target_data_type = data_type_list if data_type_list else all_data_type_list
|
|
154
|
+
|
|
155
|
+
processes = []
|
|
156
|
+
for pro_id in range(process_num):
|
|
157
|
+
task_start_id = pro_id * task_num_per_pro
|
|
158
|
+
task_end_id = (pro_id + 1) * task_num_per_pro if pro_id != process_num - 1 else task_num
|
|
159
|
+
task_dirs = target_output_dirs[task_start_id: task_end_id]
|
|
160
|
+
|
|
161
|
+
p = Process(target=csv2tb_by_step_work, args=(task_dirs, output_dirpath, target_data_type))
|
|
162
|
+
processes.append(p)
|
|
163
|
+
p.start()
|
|
164
|
+
for p in processes:
|
|
165
|
+
p.join()
|
|
166
|
+
logger.info(f"output has been saved to: {output_dirpath}")
|
|
File without changes
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
distributed:
|
|
2
|
+
- send
|
|
3
|
+
- recv
|
|
4
|
+
- isend
|
|
5
|
+
- irecv
|
|
6
|
+
- batch_isend_irecv
|
|
7
|
+
- broadcast
|
|
8
|
+
- all_reduce
|
|
9
|
+
- reduce
|
|
10
|
+
- scatter
|
|
11
|
+
- reduce_scatter
|
|
12
|
+
- _reduce_scatter_base
|
|
13
|
+
- reduce_scatter_tensor
|
|
14
|
+
- all_gather
|
|
15
|
+
- gather
|
|
16
|
+
- _all_gather_base
|
|
17
|
+
- all_gather_into_tensor
|
|
18
|
+
|
|
19
|
+
|
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
# Copyright (c) 2024-2025, Huawei Technologies Co., Ltd.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import inspect
|
|
17
|
+
import os
|
|
18
|
+
import re
|
|
19
|
+
|
|
20
|
+
import torch
|
|
21
|
+
import torch.distributed as dist
|
|
22
|
+
import torch.nn as nn
|
|
23
|
+
|
|
24
|
+
from msprobe.core.common.const import MonitorConst
|
|
25
|
+
from msprobe.core.common.file_utils import load_yaml
|
|
26
|
+
from msprobe.pytorch.monitor.module_metric import get_metrics, get_summary_writer_tag_name
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
import torch_npu
|
|
30
|
+
except ImportError:
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
RANK = None
|
|
34
|
+
|
|
35
|
+
OpsPath = os.path.join(os.path.dirname(__file__), "distributed_ops.yaml")
|
|
36
|
+
WrapDistributedOps = load_yaml(OpsPath).get("distributed", [])
|
|
37
|
+
|
|
38
|
+
StackBlackListPath = os.path.join(os.path.dirname(__file__), "stack_blacklist.yaml")
|
|
39
|
+
StackBlackList = load_yaml(StackBlackListPath).get("stack", [])
|
|
40
|
+
|
|
41
|
+
distributed_func = {}
|
|
42
|
+
for f in dir(dist):
|
|
43
|
+
distributed_func[f] = getattr(dist, f)
|
|
44
|
+
|
|
45
|
+
ORIGIN_WAIT = getattr(dist.Work, 'wait')
|
|
46
|
+
PENDING_ASYNC_CC_BY_HANDLE = {}
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def get_distributed_ops():
|
|
50
|
+
global WrapDistributedOps
|
|
51
|
+
_all_distributed_ops = dir(dist)
|
|
52
|
+
return set(WrapDistributedOps) & set(_all_distributed_ops)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class DistributedOPTemplate(nn.Module):
|
|
56
|
+
def __init__(self, op_name, pre_hooks, post_hooks):
|
|
57
|
+
super(DistributedOPTemplate, self).__init__()
|
|
58
|
+
self.op_name_ = str(op_name)
|
|
59
|
+
self.__name__ = self.op_name_
|
|
60
|
+
self.cc_hooks = []
|
|
61
|
+
for pre_hook in pre_hooks:
|
|
62
|
+
handle = self.register_forward_pre_hook(pre_hook, with_kwargs=True)
|
|
63
|
+
self.cc_hooks.append(handle)
|
|
64
|
+
for hook in post_hooks:
|
|
65
|
+
handle = self.register_forward_hook(hook, with_kwargs=True)
|
|
66
|
+
self.cc_hooks.append(handle)
|
|
67
|
+
|
|
68
|
+
def forward(self, *args, **kwargs):
|
|
69
|
+
return distributed_func.get(self.op_name_)(*args, **kwargs)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class ApiRegistry:
|
|
73
|
+
def __init__(self):
|
|
74
|
+
self.distributed_attr_origin = {}
|
|
75
|
+
self.distributed_attr_hooked = {}
|
|
76
|
+
|
|
77
|
+
@staticmethod
|
|
78
|
+
def store_ori_attr(ori_api_group, api_list, api_ori_attr):
|
|
79
|
+
for api in api_list:
|
|
80
|
+
if '.' in api:
|
|
81
|
+
sub_module_name, sub_op = api.rsplit('.', 1)
|
|
82
|
+
sub_module = getattr(ori_api_group, sub_module_name)
|
|
83
|
+
api_ori_attr[api] = getattr(sub_module, sub_op)
|
|
84
|
+
else:
|
|
85
|
+
api_ori_attr[api] = getattr(ori_api_group, api)
|
|
86
|
+
|
|
87
|
+
@staticmethod
|
|
88
|
+
def set_api_attr(api_group, attr_dict):
|
|
89
|
+
for cc_api_name, cc_api_entry_func in attr_dict.items():
|
|
90
|
+
if '.' in cc_api_name:
|
|
91
|
+
sub_module_name, sub_op = cc_api_name.rsplit('.', 1)
|
|
92
|
+
sub_module = getattr(api_group, sub_module_name, None)
|
|
93
|
+
if sub_module is not None:
|
|
94
|
+
setattr(sub_module, sub_op, cc_api_entry_func)
|
|
95
|
+
else:
|
|
96
|
+
setattr(api_group, cc_api_name, cc_api_entry_func)
|
|
97
|
+
|
|
98
|
+
@staticmethod
|
|
99
|
+
def redirect_wait():
|
|
100
|
+
global ORIGIN_WAIT
|
|
101
|
+
global PENDING_ASYNC_CC_BY_HANDLE
|
|
102
|
+
|
|
103
|
+
def wrapped_wait(work):
|
|
104
|
+
def wrapped_wait(*args, **kwargs):
|
|
105
|
+
ORIGIN_WAIT(*args, **kwargs)
|
|
106
|
+
if args[0] in PENDING_ASYNC_CC_BY_HANDLE:
|
|
107
|
+
store_func = PENDING_ASYNC_CC_BY_HANDLE.pop(args[0])
|
|
108
|
+
store_func()
|
|
109
|
+
|
|
110
|
+
return wrapped_wait
|
|
111
|
+
|
|
112
|
+
dist.Work.wait = wrapped_wait(dist.Work)
|
|
113
|
+
|
|
114
|
+
def redirect_api(self):
|
|
115
|
+
self.set_api_attr(dist, self.distributed_attr_hooked)
|
|
116
|
+
self.set_api_attr(dist.distributed_c10d, self.distributed_attr_hooked)
|
|
117
|
+
self.redirect_wait()
|
|
118
|
+
|
|
119
|
+
def restore_api(self):
|
|
120
|
+
self.set_api_attr(dist, self.distributed_attr_origin)
|
|
121
|
+
self.set_api_attr(dist.distributed_c10d, self.distributed_attr_origin)
|
|
122
|
+
setattr(dist.Work, 'wait', ORIGIN_WAIT)
|
|
123
|
+
|
|
124
|
+
def initialize_hook(self, pre_hooks, post_hooks):
|
|
125
|
+
self.store_ori_attr(dist, get_distributed_ops(), self.distributed_attr_origin)
|
|
126
|
+
cc_hooks = []
|
|
127
|
+
for op_name in get_distributed_ops():
|
|
128
|
+
self.distributed_attr_hooked[op_name] = DistributedOPTemplate(op_name, pre_hooks, post_hooks)
|
|
129
|
+
cc_hooks.extend(self.distributed_attr_hooked[op_name].cc_hooks)
|
|
130
|
+
return cc_hooks
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def get_process_group(process_group):
|
|
134
|
+
return (
|
|
135
|
+
process_group
|
|
136
|
+
if isinstance(process_group, dist.ProcessGroup)
|
|
137
|
+
else dist.GroupMember.WORLD
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def stack_filter(stack):
|
|
142
|
+
for pattern in StackBlackList:
|
|
143
|
+
if re.search(pattern, stack):
|
|
144
|
+
return False
|
|
145
|
+
return True
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def get_callstack():
|
|
149
|
+
callstack = []
|
|
150
|
+
for (_, path, line, func, _, _) in inspect.stack():
|
|
151
|
+
stack_line = f'{path}[{line}]'
|
|
152
|
+
if stack_filter(stack_line):
|
|
153
|
+
callstack.append(stack_line + ' ' + func)
|
|
154
|
+
return callstack
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
@torch.no_grad()
|
|
158
|
+
def op_aggregate(op, tensorlist):
|
|
159
|
+
if isinstance(tensorlist, torch.Tensor):
|
|
160
|
+
return tensorlist
|
|
161
|
+
if not tensorlist:
|
|
162
|
+
return torch.tensor(torch.nan)
|
|
163
|
+
if op == 'min':
|
|
164
|
+
return min(tensorlist)
|
|
165
|
+
if op == 'max':
|
|
166
|
+
return max(tensorlist)
|
|
167
|
+
if op == 'norm':
|
|
168
|
+
return sum(tensorlist)
|
|
169
|
+
if op == 'zeros':
|
|
170
|
+
return sum(tensorlist) / len(tensorlist)
|
|
171
|
+
if op == 'nans':
|
|
172
|
+
return sum(tensorlist)
|
|
173
|
+
if op == 'mean':
|
|
174
|
+
return sum(tensorlist) / len(tensorlist)
|
|
175
|
+
return torch.tensor(torch.nan)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def update_data(old, new):
|
|
179
|
+
for tag, op2tensor in new.items():
|
|
180
|
+
if tag not in old:
|
|
181
|
+
old[tag] = {}
|
|
182
|
+
for op, tensor in op2tensor.items():
|
|
183
|
+
if op not in old[tag]:
|
|
184
|
+
old[tag][op] = [tensor]
|
|
185
|
+
else:
|
|
186
|
+
old[tag][op].append(tensor)
|
|
187
|
+
return old
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def is_target_line(codeline):
|
|
191
|
+
stack = get_callstack()
|
|
192
|
+
whole_stack = ';'.join(stack)
|
|
193
|
+
if codeline == []:
|
|
194
|
+
return True
|
|
195
|
+
for pattern in codeline:
|
|
196
|
+
if re.search(pattern, whole_stack):
|
|
197
|
+
return True
|
|
198
|
+
return False
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
@torch.no_grad()
|
|
202
|
+
def catch_data(cc_context, cc_name, ops, args, prefix):
|
|
203
|
+
tensor_args = {}
|
|
204
|
+
for arg in args:
|
|
205
|
+
if isinstance(arg, torch.Tensor):
|
|
206
|
+
key = get_summary_writer_tag_name(cc_name, f'{prefix}_{len(tensor_args)}', RANK)
|
|
207
|
+
tensor_args[key] = arg
|
|
208
|
+
elif isinstance(arg, list):
|
|
209
|
+
if isinstance(arg[0], torch.Tensor):
|
|
210
|
+
stacked_arg = torch.stack(arg)
|
|
211
|
+
elif isinstance(arg[0], dist.P2POp):
|
|
212
|
+
stacked_arg = torch.stack([op.tensor for op in arg])
|
|
213
|
+
key = get_summary_writer_tag_name(cc_name, f'{prefix}_{len(tensor_args)}', RANK)
|
|
214
|
+
tensor_args[key] = stacked_arg
|
|
215
|
+
|
|
216
|
+
new_data = get_metrics(ops, tensor_args, 1e-8)
|
|
217
|
+
cc_context.data = update_data(cc_context.data, new_data)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def create_async_callback_func(context, cc_name, ops, args, prefix):
|
|
221
|
+
def store_data():
|
|
222
|
+
catch_data(context, cc_name, ops, args, prefix)
|
|
223
|
+
|
|
224
|
+
return store_data
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def create_hooks(context, monitor):
|
|
228
|
+
def cc_log_hook(module, args, kwargs):
|
|
229
|
+
stack = ';'.join(get_callstack())
|
|
230
|
+
monitor.cc_logged_stack[module.op_name_].add(stack)
|
|
231
|
+
return
|
|
232
|
+
|
|
233
|
+
def cc_pre_hook(module, args, kwargs):
|
|
234
|
+
if not is_target_line(monitor.cc_codeline):
|
|
235
|
+
return
|
|
236
|
+
args = args + tuple(kwargs.values())
|
|
237
|
+
catch_data(context[module.op_name_], module.op_name_, monitor.ops, args, MonitorConst.PREFIX_PRE)
|
|
238
|
+
return
|
|
239
|
+
|
|
240
|
+
def cc_hook(module, args, kwargs, out=None):
|
|
241
|
+
if not is_target_line(monitor.cc_codeline):
|
|
242
|
+
return out
|
|
243
|
+
args = args + tuple(kwargs.values())
|
|
244
|
+
if out: # async
|
|
245
|
+
if isinstance(out, dist.Work):
|
|
246
|
+
PENDING_ASYNC_CC_BY_HANDLE[out] = create_async_callback_func(
|
|
247
|
+
context[module.op_name_],
|
|
248
|
+
module.op_name_,
|
|
249
|
+
monitor.ops, args,
|
|
250
|
+
MonitorConst.PREFIX_POST
|
|
251
|
+
)
|
|
252
|
+
elif isinstance(out, list): # batch_isend_irecv
|
|
253
|
+
for out_element in out:
|
|
254
|
+
PENDING_ASYNC_CC_BY_HANDLE[out_element] = create_async_callback_func(
|
|
255
|
+
context[module.op_name_],
|
|
256
|
+
module.op_name_,
|
|
257
|
+
monitor.ops, args,
|
|
258
|
+
MonitorConst.PREFIX_POST
|
|
259
|
+
)
|
|
260
|
+
return out
|
|
261
|
+
catch_data(context[module.op_name_], module.op_name_, monitor.ops, args, MonitorConst.PREFIX_POST)
|
|
262
|
+
return out
|
|
263
|
+
|
|
264
|
+
global RANK
|
|
265
|
+
pre_hooks = []
|
|
266
|
+
hooks = []
|
|
267
|
+
RANK = dist.get_rank()
|
|
268
|
+
if dist.is_initialized() and RANK not in monitor.module_rank_list and monitor.module_rank_list != []:
|
|
269
|
+
return [pre_hooks, hooks]
|
|
270
|
+
|
|
271
|
+
if monitor.cc_log_only:
|
|
272
|
+
pre_hooks.append(cc_log_hook)
|
|
273
|
+
return [pre_hooks, hooks]
|
|
274
|
+
|
|
275
|
+
if monitor.cc_pre_hook:
|
|
276
|
+
pre_hooks.append(cc_pre_hook)
|
|
277
|
+
|
|
278
|
+
hooks.append(cc_hook)
|
|
279
|
+
|
|
280
|
+
return [pre_hooks, hooks]
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
api_register = ApiRegistry()
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
# Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import torch
|
|
17
|
+
from torch.autograd.functional import jacobian
|
|
18
|
+
from msprobe.pytorch.common.log import logger
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@torch.no_grad()
|
|
22
|
+
def square_sum(x: torch.tensor):
|
|
23
|
+
return (x * x).sum()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@torch.no_grad()
|
|
27
|
+
def get_min(x: torch.tensor):
|
|
28
|
+
return torch.min(x)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@torch.no_grad()
|
|
32
|
+
def get_mean(x: torch.tensor):
|
|
33
|
+
return torch.mean(x.to(torch.float64))
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@torch.no_grad()
|
|
37
|
+
def get_norm(x: torch.tensor):
|
|
38
|
+
return torch.norm(x.to(torch.float64), p=2)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@torch.no_grad()
|
|
42
|
+
def get_max(x: torch.tensor):
|
|
43
|
+
return torch.max(x)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@torch.no_grad()
|
|
47
|
+
def get_zeros(x: torch.tensor, eps: float):
|
|
48
|
+
return torch.sum(torch.abs(x) < eps) / x.numel()
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
@torch.no_grad()
|
|
52
|
+
def get_sign_matches(x: torch.tensor, y: torch.tensor):
|
|
53
|
+
xs = x.sign()
|
|
54
|
+
ys = y.sign()
|
|
55
|
+
try:
|
|
56
|
+
same_direction_ratio = ((xs * ys).sum() / ys.numel() + 1) / 2
|
|
57
|
+
except RuntimeError as e:
|
|
58
|
+
logger.info(f"RuntimeError: {e}")
|
|
59
|
+
same_direction_ratio = torch.tensor(0.)
|
|
60
|
+
return same_direction_ratio
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@torch.no_grad()
|
|
64
|
+
def eff_rank(param: torch.tensor, threshold=1e-10):
|
|
65
|
+
U, S, Vh = torch.linalg.svd(param.float())
|
|
66
|
+
rank = torch.sum(S > threshold)
|
|
67
|
+
return rank
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
# modular neural tangent kernel
|
|
71
|
+
@torch.no_grad()
|
|
72
|
+
def mNTK(module: torch.nn.Module, x: torch.tensor):
|
|
73
|
+
J_theta_l = jacobian(module, x)
|
|
74
|
+
mntk = torch.matmul(J_theta_l, J_theta_l.t())
|
|
75
|
+
return mntk
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@torch.no_grad()
|
|
79
|
+
def power_iteration(a, num_iterations):
|
|
80
|
+
b = torch.randn(a.size(1), 1)
|
|
81
|
+
for _ in range(num_iterations):
|
|
82
|
+
b = torch.matmul(a, b)
|
|
83
|
+
b_norm = torch.norm(b)
|
|
84
|
+
b = b / b_norm if b_norm != 0 else 0
|
|
85
|
+
eigval = torch.matmul(torch.matmul(b.t(), a), b)
|
|
86
|
+
return eigval
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
@torch.no_grad()
|
|
90
|
+
def lambda_max_subsample(module: torch.nn.Module, x: torch.tensor, num_iterations=100, subsample_size=None):
|
|
91
|
+
mntk = mNTK(module, x)
|
|
92
|
+
if subsample_size is None:
|
|
93
|
+
subsample_size = min(mntk.size(0), mntk.size(1))
|
|
94
|
+
idx = torch.randperm(mntk.size(0))[:subsample_size]
|
|
95
|
+
subsampled = mntk[idx, :]
|
|
96
|
+
subsampled = subsampled[:, idx]
|
|
97
|
+
eigval = power_iteration(subsampled, num_iterations)
|
|
98
|
+
return eigval
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
@torch.no_grad()
|
|
102
|
+
def cal_histc(tensor_cal, bins_total, min_val, max_val):
|
|
103
|
+
return torch.histc(tensor_cal, bins=bins_total, min=min_val, max=max_val)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
@torch.no_grad()
|
|
107
|
+
def get_nans(t):
|
|
108
|
+
return torch.isnan(t).sum()
|