mindstudio-probe 1.2.1__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (177) hide show
  1. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/METADATA +3 -3
  2. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/RECORD +168 -150
  3. msprobe/README.md +27 -22
  4. msprobe/core/common/const.py +129 -60
  5. msprobe/core/common/decorator.py +50 -0
  6. msprobe/core/common/exceptions.py +3 -1
  7. msprobe/core/common/file_utils.py +25 -2
  8. msprobe/core/common/inplace_ops.yaml +1 -0
  9. msprobe/core/common/utils.py +43 -33
  10. msprobe/core/compare/acc_compare.py +43 -74
  11. msprobe/core/compare/check.py +2 -6
  12. msprobe/core/compare/highlight.py +2 -0
  13. msprobe/core/compare/layer_mapping/data_scope_parser.py +1 -1
  14. msprobe/core/compare/layer_mapping/layer_mapping.py +2 -1
  15. msprobe/core/compare/merge_result/merge_result.py +16 -9
  16. msprobe/core/compare/merge_result/utils.py +81 -0
  17. msprobe/core/compare/multiprocessing_compute.py +19 -12
  18. msprobe/core/compare/npy_compare.py +30 -12
  19. msprobe/core/compare/utils.py +30 -10
  20. msprobe/core/data_dump/api_registry.py +176 -0
  21. msprobe/core/data_dump/data_collector.py +58 -13
  22. msprobe/core/data_dump/data_processor/base.py +94 -10
  23. msprobe/core/data_dump/data_processor/factory.py +3 -0
  24. msprobe/core/data_dump/data_processor/mindspore_processor.py +33 -33
  25. msprobe/core/data_dump/data_processor/pytorch_processor.py +99 -18
  26. msprobe/core/data_dump/json_writer.py +61 -40
  27. msprobe/core/grad_probe/constant.py +1 -0
  28. msprobe/core/grad_probe/grad_compare.py +1 -1
  29. msprobe/core/overflow_check/abnormal_scene.py +2 -0
  30. msprobe/docs/01.installation.md +27 -1
  31. msprobe/docs/02.config_introduction.md +27 -23
  32. msprobe/docs/03.config_examples.md +24 -0
  33. msprobe/docs/05.data_dump_PyTorch.md +103 -16
  34. msprobe/docs/06.data_dump_MindSpore.md +76 -32
  35. msprobe/docs/07.accuracy_checker_PyTorch.md +11 -1
  36. msprobe/docs/08.accuracy_checker_online_PyTorch.md +3 -1
  37. msprobe/docs/09.accuracy_checker_MindSpore.md +5 -3
  38. msprobe/docs/10.accuracy_compare_PyTorch.md +59 -33
  39. msprobe/docs/11.accuracy_compare_MindSpore.md +40 -16
  40. msprobe/docs/12.overflow_check_PyTorch.md +3 -1
  41. msprobe/docs/13.overflow_check_MindSpore.md +4 -2
  42. msprobe/docs/14.data_parse_PyTorch.md +1 -7
  43. msprobe/docs/18.online_dispatch.md +1 -1
  44. msprobe/docs/19.monitor.md +332 -273
  45. msprobe/docs/21.visualization_PyTorch.md +42 -13
  46. msprobe/docs/22.visualization_MindSpore.md +43 -13
  47. msprobe/docs/23.generate_operator_PyTorch.md +9 -9
  48. msprobe/docs/27.dump_json_instruction.md +301 -27
  49. msprobe/docs/28.debugger_save_instruction.md +94 -0
  50. msprobe/docs/28.kernel_dump_MindSpore.md +69 -0
  51. msprobe/docs/29.data_dump_MSAdapter.md +229 -0
  52. msprobe/docs/30.overflow_check_MSAdapter.md +31 -0
  53. msprobe/docs/FAQ.md +3 -11
  54. msprobe/docs/img/compare_result.png +0 -0
  55. msprobe/docs/img/merge_result.png +0 -0
  56. msprobe/docs/img/monitor/step_count_per_record.png +0 -0
  57. msprobe/docs/img/visualization/vis_browser_1.png +0 -0
  58. msprobe/docs/img/visualization/vis_match_info.png +0 -0
  59. msprobe/docs/img/visualization/vis_precision_info.png +0 -0
  60. msprobe/docs/img/visualization/vis_search_info.png +0 -0
  61. msprobe/docs/img/visualization/vis_show_info.png +0 -0
  62. msprobe/docs/img/visualization/vis_showcase.png +0 -0
  63. msprobe/docs/img/visualization/vis_unmatch_info.png +0 -0
  64. msprobe/mindspore/__init__.py +4 -2
  65. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +32 -7
  66. msprobe/mindspore/api_accuracy_checker/api_runner.py +70 -22
  67. msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +2 -1
  68. msprobe/mindspore/api_accuracy_checker/bench_functions/flash_attention_score.py +602 -0
  69. msprobe/mindspore/api_accuracy_checker/bench_functions/fusion_operator.py +41 -0
  70. msprobe/mindspore/api_accuracy_checker/compute_element.py +47 -1
  71. msprobe/mindspore/api_accuracy_checker/data_manager.py +2 -1
  72. msprobe/mindspore/api_accuracy_checker/multi_api_accuracy_checker.py +2 -1
  73. msprobe/mindspore/api_accuracy_checker/torch_mindtorch_importer.py +130 -0
  74. msprobe/mindspore/api_accuracy_checker/type_mapping.py +24 -1
  75. msprobe/mindspore/api_accuracy_checker/utils.py +6 -1
  76. msprobe/mindspore/common/const.py +61 -0
  77. msprobe/mindspore/common/utils.py +48 -18
  78. msprobe/mindspore/compare/ms_compare.py +27 -19
  79. msprobe/mindspore/compare/ms_graph_compare.py +6 -5
  80. msprobe/mindspore/debugger/debugger_config.py +31 -6
  81. msprobe/mindspore/debugger/precision_debugger.py +45 -14
  82. msprobe/mindspore/dump/dump_tool_factory.py +5 -3
  83. msprobe/mindspore/dump/hook_cell/api_register.py +142 -0
  84. msprobe/mindspore/dump/hook_cell/hook_cell.py +9 -10
  85. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +24 -26
  86. msprobe/mindspore/dump/jit_dump.py +21 -15
  87. msprobe/mindspore/dym_loader/hook_dynamic_loader.cc +22 -56
  88. msprobe/mindspore/dym_loader/hook_dynamic_loader.h +0 -1
  89. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +10 -6
  90. msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +4 -2
  91. msprobe/mindspore/free_benchmark/self_check_tool_factory.py +6 -3
  92. msprobe/mindspore/grad_probe/global_context.py +2 -0
  93. msprobe/mindspore/grad_probe/grad_analyzer.py +2 -1
  94. msprobe/mindspore/grad_probe/hook.py +2 -4
  95. msprobe/mindspore/monitor/anomaly_detect.py +404 -0
  96. msprobe/mindspore/monitor/distributed/__init__.py +0 -0
  97. msprobe/mindspore/monitor/distributed/distributed_ops.yaml +15 -0
  98. msprobe/mindspore/monitor/distributed/stack_blacklist.yaml +5 -0
  99. msprobe/mindspore/monitor/distributed/wrap_distributed.py +300 -0
  100. msprobe/mindspore/monitor/features.py +63 -0
  101. msprobe/mindspore/monitor/module_hook.py +873 -0
  102. msprobe/mindspore/monitor/module_spec_verifier.py +94 -0
  103. msprobe/mindspore/monitor/utils.py +309 -0
  104. msprobe/mindspore/ms_config.py +8 -2
  105. msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +5 -3
  106. msprobe/mindspore/service.py +114 -34
  107. msprobe/pytorch/__init__.py +0 -1
  108. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +3 -6
  109. msprobe/pytorch/api_accuracy_checker/generate_op_script/op_generator.py +12 -7
  110. msprobe/pytorch/api_accuracy_checker/generate_op_script/operator_replication.template +2 -2
  111. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +4 -5
  112. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +5 -5
  113. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +25 -6
  114. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +28 -19
  115. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +3 -1
  116. msprobe/pytorch/bench_functions/apply_adam.py +215 -0
  117. msprobe/pytorch/bench_functions/group_norm_silu.py +27 -0
  118. msprobe/pytorch/{parse.py → bench_functions/mish.py} +6 -4
  119. msprobe/pytorch/bench_functions/moe_gating_top_k_softmax.py +50 -0
  120. msprobe/pytorch/bench_functions/sort_v2.py +21 -0
  121. msprobe/pytorch/common/utils.py +97 -4
  122. msprobe/pytorch/debugger/debugger_config.py +19 -9
  123. msprobe/pytorch/debugger/precision_debugger.py +24 -1
  124. msprobe/pytorch/dump/module_dump/module_dump.py +4 -3
  125. msprobe/pytorch/dump/module_dump/module_processer.py +21 -35
  126. msprobe/pytorch/free_benchmark/common/utils.py +1 -1
  127. msprobe/pytorch/free_benchmark/compare/single_benchmark.py +1 -1
  128. msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +3 -3
  129. msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +3 -3
  130. msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +1 -1
  131. msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +1 -1
  132. msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +1 -1
  133. msprobe/pytorch/function_factory.py +8 -2
  134. msprobe/pytorch/grad_probe/grad_monitor.py +2 -2
  135. msprobe/pytorch/hook_module/api_register.py +131 -0
  136. msprobe/pytorch/hook_module/hook_module.py +19 -14
  137. msprobe/pytorch/hook_module/register_optimizer_hook.py +2 -1
  138. msprobe/pytorch/hook_module/support_wrap_ops.yaml +173 -75
  139. msprobe/pytorch/monitor/anomaly_detect.py +14 -29
  140. msprobe/pytorch/monitor/csv2tb.py +18 -14
  141. msprobe/pytorch/monitor/distributed/wrap_distributed.py +8 -2
  142. msprobe/pytorch/monitor/module_hook.py +238 -193
  143. msprobe/pytorch/monitor/module_metric.py +9 -6
  144. msprobe/pytorch/monitor/optimizer_collect.py +100 -67
  145. msprobe/pytorch/monitor/unittest/test_monitor.py +1 -1
  146. msprobe/pytorch/monitor/utils.py +76 -44
  147. msprobe/pytorch/online_dispatch/compare.py +0 -2
  148. msprobe/pytorch/online_dispatch/dispatch.py +9 -0
  149. msprobe/pytorch/online_dispatch/dump_compare.py +3 -0
  150. msprobe/pytorch/online_dispatch/utils.py +3 -0
  151. msprobe/pytorch/parse_tool/lib/interactive_cli.py +1 -6
  152. msprobe/pytorch/parse_tool/lib/utils.py +2 -1
  153. msprobe/pytorch/pt_config.py +30 -29
  154. msprobe/pytorch/service.py +114 -32
  155. msprobe/visualization/builder/graph_builder.py +75 -10
  156. msprobe/visualization/builder/msprobe_adapter.py +7 -6
  157. msprobe/visualization/compare/graph_comparator.py +42 -38
  158. msprobe/visualization/compare/mode_adapter.py +0 -19
  159. msprobe/visualization/graph/base_node.py +11 -3
  160. msprobe/visualization/graph/distributed_analyzer.py +71 -3
  161. msprobe/visualization/graph/graph.py +0 -11
  162. msprobe/visualization/graph/node_op.py +4 -3
  163. msprobe/visualization/graph_service.py +4 -5
  164. msprobe/visualization/utils.py +12 -35
  165. msprobe/mindspore/dump/hook_cell/api_registry.py +0 -205
  166. msprobe/mindspore/dump/hook_cell/wrap_api.py +0 -212
  167. msprobe/pytorch/hook_module/api_registry.py +0 -166
  168. msprobe/pytorch/hook_module/wrap_distributed.py +0 -75
  169. msprobe/pytorch/hook_module/wrap_functional.py +0 -66
  170. msprobe/pytorch/hook_module/wrap_npu_custom.py +0 -85
  171. msprobe/pytorch/hook_module/wrap_tensor.py +0 -69
  172. msprobe/pytorch/hook_module/wrap_torch.py +0 -84
  173. msprobe/pytorch/hook_module/wrap_vf.py +0 -60
  174. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/LICENSE +0 -0
  175. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/WHEEL +0 -0
  176. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/entry_points.txt +0 -0
  177. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/top_level.txt +0 -0
@@ -19,6 +19,7 @@ import os
19
19
  import traceback
20
20
 
21
21
  import mindspore as ms
22
+
22
23
  from msprobe.core.common.const import Const
23
24
  from msprobe.core.common.exceptions import DistributedNotInitializedError
24
25
  from msprobe.core.common.file_utils import check_path_length, load_yaml
@@ -27,7 +28,7 @@ from msprobe.mindspore.common.const import FreeBenchmarkConst
27
28
  from msprobe.mindspore.common.log import logger
28
29
  from msprobe.mindspore.common.utils import get_rank_if_initialized
29
30
  from msprobe.mindspore.debugger.debugger_config import DebuggerConfig
30
- from msprobe.mindspore.dump.hook_cell.api_registry import api_register
31
+ from msprobe.mindspore.dump.hook_cell.api_register import get_api_register
31
32
  from msprobe.mindspore.dump.hook_cell.hook_cell import HOOKCell
32
33
  from msprobe.mindspore.free_benchmark.common.config import Config
33
34
  from msprobe.mindspore.free_benchmark.common.handler_params import HandlerParams
@@ -37,6 +38,9 @@ from msprobe.mindspore.free_benchmark.perturbation.perturbation_factory import P
37
38
  from msprobe.mindspore.runtime import Runtime
38
39
 
39
40
 
41
+ _api_register = get_api_register()
42
+
43
+
40
44
  class ApiPyNativeSelfCheck:
41
45
  def __init__(self, config: DebuggerConfig):
42
46
  Config.is_enable = True
@@ -60,8 +64,8 @@ class ApiPyNativeSelfCheck:
60
64
  self.store_original_func()
61
65
 
62
66
  def handle(self):
63
- api_register.initialize_hook(self.build_hook)
64
- api_register.api_set_hook_func()
67
+ _api_register.initialize_hook(self.build_hook)
68
+ _api_register.register_all_api()
65
69
 
66
70
  def build_hook(self, api_name):
67
71
  def pre_hook(cell, input_data):
@@ -166,13 +170,13 @@ def check_self(api_name_with_id, output, ori_func, *args, **kwargs):
166
170
  return ret
167
171
 
168
172
  logger.info(f"[{api_name_with_id}] is {Config.handler_type}ing.")
169
- api_register.api_set_ori_func()
173
+ _api_register.restore_all_api()
170
174
 
171
175
  try:
172
176
  perturbation = PerturbationFactory.create(api_name_with_id)
173
177
  params.fuzzed_result = perturbation.handle(params)
174
178
  if params.fuzzed_result is False:
175
- api_register.api_set_hook_func()
179
+ _api_register.register_all_api()
176
180
  return ret
177
181
  if Config.stage == Const.BACKWARD:
178
182
  params.original_result = Tools.get_grad(params.original_func, *params.args, **params.kwargs)
@@ -183,7 +187,7 @@ def check_self(api_name_with_id, output, ori_func, *args, **kwargs):
183
187
  logger.error(f"[{api_name_with_id}] Error: {str(e)}")
184
188
  logger.error(f"[{api_name_with_id}] Error detail: {traceback.format_exc()}")
185
189
 
186
- api_register.api_set_hook_func()
190
+ _api_register.register_all_api()
187
191
  return ret
188
192
 
189
193
 
@@ -1,4 +1,4 @@
1
- # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
1
+ # Copyright (c) 2024-2025, Huawei Technologies Co., Ltd.
2
2
  # All rights reserved.
3
3
  #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,6 +14,7 @@
14
14
  # limitations under the License.
15
15
 
16
16
  from msprobe.mindspore.common.const import FreeBenchmarkConst
17
+ from msprobe.mindspore.common.log import logger
17
18
  from msprobe.mindspore.free_benchmark.common.config import Config
18
19
  from msprobe.mindspore.free_benchmark.perturbation.add_noise import AddNoisePerturbation
19
20
  from msprobe.mindspore.free_benchmark.perturbation.bit_noise import BitNoisePerturbation
@@ -41,4 +42,5 @@ class PerturbationFactory:
41
42
  if perturbation:
42
43
  return perturbation(api_name_with_id)
43
44
  else:
44
- raise Exception(f'{Config.pert_type} is a invalid perturbation type')
45
+ logger.error(f'{Config.pert_type} is a invalid perturbation type')
46
+ raise ValueError
@@ -1,4 +1,4 @@
1
- # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
1
+ # Copyright (c) 2024-2025, Huawei Technologies Co., Ltd.
2
2
  # All rights reserved.
3
3
  #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,6 +14,7 @@
14
14
  # limitations under the License.
15
15
 
16
16
  from msprobe.mindspore.common.const import Const
17
+ from msprobe.core.common.log import logger
17
18
  from msprobe.mindspore.debugger.debugger_config import DebuggerConfig
18
19
  from msprobe.mindspore.free_benchmark.api_pynative_self_check import ApiPyNativeSelfCheck
19
20
 
@@ -41,8 +42,10 @@ class SelfCheckToolFactory:
41
42
  def create(config: DebuggerConfig):
42
43
  tool = SelfCheckToolFactory.tools.get(config.level)
43
44
  if not tool:
44
- raise Exception(f"{config.level} is not supported.")
45
+ logger.error(f"{config.level} is not supported.")
46
+ raise ValueError
45
47
  tool = tool.get(config.execution_mode)
46
48
  if not tool:
47
- raise Exception(f"Task free_benchmark is not supported in this mode: {config.execution_mode}.")
49
+ logger.error(f"Task free_benchmark is not supported in this mode: {config.execution_mode}.")
50
+ raise ValueError
48
51
  return tool(config)
@@ -16,6 +16,7 @@
16
16
  import os
17
17
  import threading
18
18
  from typing import Dict, Union, Tuple
19
+ import time
19
20
 
20
21
  from msprobe.core.common.utils import is_int
21
22
  from msprobe.core.common.file_utils import create_directory, check_path_before_create
@@ -68,6 +69,7 @@ class GlobalContext:
68
69
  create_directory(self._setting.get(GradConst.OUTPUT_PATH))
69
70
  else:
70
71
  logger.warning("The output_path exists, the data will be covered.")
72
+ self._setting[GradConst.TIME_STAMP] = str(int(time.time()))
71
73
 
72
74
  def get_context(self, key: str):
73
75
  if key not in self._setting:
@@ -111,7 +111,8 @@ class CSVGenerator(Process):
111
111
  output_path = context.get_context(GradConst.OUTPUT_PATH)
112
112
  self.level = context.get_context(GradConst.LEVEL)
113
113
  self.bounds = context.get_context(GradConst.BOUNDS)
114
- self.dump_dir = f"{output_path}/rank{rank_id}/Dump/"
114
+ time_stamp = context.get_context(GradConst.TIME_STAMP)
115
+ self.dump_dir = f"{output_path}/rank{rank_id}/Dump{time_stamp}/"
115
116
  self.save_dir = f"{output_path}/rank{rank_id}/"
116
117
  self.current_step = None
117
118
  self.stop_event = multiprocessing.Event()
@@ -49,12 +49,10 @@ class HookInput:
49
49
  self.param_list = grad_context.get_context(GradConst.PARAM_LIST)
50
50
  self.rank_id = get_rank_id()
51
51
  output_path = grad_context.get_context(GradConst.OUTPUT_PATH)
52
- self.dump_dir = os.path.join(output_path, f"rank{self.rank_id}", "Dump")
52
+ time_stamp = grad_context.get_context(GradConst.TIME_STAMP)
53
+ self.dump_dir = os.path.join(output_path, f"rank{self.rank_id}", f"Dump{time_stamp}")
53
54
  self.save_dir = os.path.join(output_path, f"rank{self.rank_id}")
54
55
  self.step_finish_flag = os.path.join(self.dump_dir, GradConst.STEP_FINISH)
55
- if os.path.exists(self.save_dir):
56
- logger.warning(f"Delete existing path {self.save_dir}.")
57
- remove_path(self.save_dir)
58
56
  self.level = grad_context.get_context(GradConst.LEVEL)
59
57
  self.bounds = grad_context.get_context(GradConst.BOUNDS)
60
58
  self.mode = mindspore.get_context("mode")
@@ -0,0 +1,404 @@
1
+ # Copyright (c) 2024-2025, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import itertools
17
+ import os
18
+ import sys
19
+ import statistics as st
20
+ from abc import ABC
21
+ from dataclasses import dataclass, field
22
+ from typing import List
23
+ from collections import defaultdict
24
+
25
+ import pandas as pd
26
+
27
+ from mindspore import ops
28
+ from mindspore import _no_grad
29
+ from msprobe.core.common.log import logger
30
+ from msprobe.core.common.file_utils import change_mode, create_directory, write_df_to_csv
31
+ from msprobe.core.common.const import FileCheckConst, MonitorConst
32
+
33
+
34
+ class ScanRule(ABC):
35
+ name = "ScanRule"
36
+
37
+ def apply(self, history, cur):
38
+ raise NotImplementedError("abstract method apply is not implemented")
39
+
40
+
41
+ class AnomalyTurbulence(ScanRule):
42
+ name = "AnomalyTurbulence"
43
+
44
+ def __init__(self, threshold) -> None:
45
+ self.threshold = threshold
46
+
47
+ def apply(self, history, cur):
48
+ baseline = st.mean(history) if isinstance(history, list) else history
49
+
50
+ up_bound = baseline + baseline * self.threshold
51
+ if baseline > 0:
52
+ return cur > up_bound
53
+ else:
54
+ return cur < up_bound
55
+
56
+
57
+ class AnomalyScanner:
58
+
59
+ @staticmethod
60
+ def load_rules(specs: List[dict]):
61
+ """
62
+ specs: [{"rule_name": "AnomalyTurbulence", "args": {"threshold": 0.5}}]
63
+ """
64
+ if specs is None:
65
+ return []
66
+ alert_rules = []
67
+ for spec in specs:
68
+ # 使用get方法获取键值,如果键不存在则返回None
69
+ rule_cls_name = spec.get("rule_name")
70
+ rule_args = spec.get("args")
71
+
72
+ # 检查必要的键是否存在
73
+ if rule_cls_name is None or rule_args is None:
74
+ logger.warning(f"Spec is missing required keys: {spec}")
75
+ continue
76
+
77
+ cur_module = sys.modules.get(__name__)
78
+ try:
79
+ rule_cls = getattr(cur_module, rule_cls_name)
80
+ except AttributeError:
81
+ logger.error(f"Rule class '{rule_cls_name}' not found in the current module.")
82
+ continue
83
+
84
+ try:
85
+ rule_instance = rule_cls(**rule_args)
86
+ alert_rules.append(rule_instance)
87
+ except Exception as e:
88
+ logger.error(f"Error creating instance of rule '{rule_cls_name}': {e}")
89
+ continue
90
+
91
+ return alert_rules
92
+
93
+ @staticmethod
94
+ def scan(scan_rules: List[ScanRule], history, cur):
95
+ anomaly = False
96
+ for rule in scan_rules:
97
+ anomaly = rule.apply(history, cur)
98
+ if anomaly:
99
+ return anomaly, rule.name
100
+ return anomaly, None
101
+
102
+
103
+ class BCOLORS:
104
+ HEADER = '\033[95m'
105
+ OKBLUE = '\033[94m'
106
+ OKCYAN = '\033[96m'
107
+ OKGREEN = '\033[92m'
108
+ WARNING = '\033[93m'
109
+ FAIL = '\033[91m'
110
+ ENDC = '\033[0m'
111
+ BOLD = '\033[1m'
112
+ UNDERLINE = '\033[4m'
113
+
114
+
115
+ class AnomalyDataFactory(ABC):
116
+ def __init__(self, rank, pp_stage, group_mates):
117
+ super().__init__()
118
+ self.rank = rank
119
+ self.pp_stage = pp_stage
120
+ self.group_mates = group_mates
121
+ self.micro_step = 0
122
+ self.name2callid = {}
123
+
124
+ def set_call_id(self, name2callid):
125
+ """根据当前GradContext信息更新call_id vpp_stage等信息
126
+ """
127
+ self.name2callid = name2callid
128
+
129
+ def create(self, tag, message, step):
130
+ """如果检查出异常, 调用当前接口生成GradAnomalyData实例
131
+ tag (tuple): metric tag ('0:1.post_attention_norm.weight/rank0/pre_grad', 'min')
132
+ message (str): anomaly detect message
133
+ step (int): training step
134
+ """
135
+ if not isinstance(tag, tuple) or len(tag) != 2:
136
+ raise ValueError("tag must be a tuple with length 2")
137
+ tag_name = tag[0]
138
+ param_name = tag_name.split('/')[0]
139
+ call_id = self.name2callid.get(tag_name, -1)
140
+ if MonitorConst.NAME_SEP in param_name:
141
+ vpp_stage = int(param_name.split(MonitorConst.NAME_SEP)[0])
142
+ else:
143
+ vpp_stage = 0
144
+
145
+ return GradAnomalyData(
146
+ self.rank,
147
+ step,
148
+ self.micro_step,
149
+ self.pp_stage,
150
+ vpp_stage,
151
+ call_id,
152
+ tag_name,
153
+ message,
154
+ self.group_mates
155
+ )
156
+
157
+
158
+ class TrainStage:
159
+ DEFAULT_STAGE = -1
160
+ FORWARD_STAGE = 0
161
+ BACKWARD_STAGE = 1
162
+ OPTIMIZER_STAGE = 2
163
+
164
+
165
+ FORWARD_KEY = [MonitorConst.ACTV_IN, MonitorConst.ACTV_OUT]
166
+ BACKWARD_KEY = [MonitorConst.ACTVGRAD_IN, MonitorConst.ACTVGRAD_OUT,
167
+ MonitorConst.PRE_GRAD, MonitorConst.POST_GRAD, MonitorConst.ACC_GRAD]
168
+ OPTIMIZER_KEY = [MonitorConst.EXP_AVG, MonitorConst.EXP_AVG_SQ]
169
+ TRAIN_STAGE = {
170
+ **{key_: TrainStage.FORWARD_STAGE for key_ in FORWARD_KEY},
171
+ **{key_: TrainStage.BACKWARD_STAGE for key_ in BACKWARD_KEY},
172
+ **{key_: TrainStage.OPTIMIZER_STAGE for key_ in OPTIMIZER_KEY}
173
+ }
174
+
175
+
176
+ @dataclass(eq=True)
177
+ class GradAnomalyData:
178
+ rank: int = 0
179
+ step: int = 0
180
+ micro_step: int = 0
181
+ pp_stage: int = 0
182
+ vpp_stage: int = 0
183
+ call_id: int = 0
184
+ tag_name: str = field(default=None, compare=False)
185
+ message: str = field(default="", compare=False)
186
+ group_mates: list = field(default=None, compare=False)
187
+
188
+ def __lt__(self, other):
189
+ """
190
+ 自定义比较函数,用于确定 GradAnomalyData 实例之间的顺序。
191
+ 比较规则为:
192
+ step 和 micro_step 值越小优先级越高;
193
+ vpp 和 pp 在前向阶段值越小优先级越高,在非前向阶段值越大优先级越高;
194
+ call_id 值越小优先级越高。
195
+ """
196
+ if not isinstance(other, GradAnomalyData):
197
+ return NotImplemented
198
+
199
+ self_train_stage = self.get_train_stage(self.tag_name)
200
+ other_train_stage = self.get_train_stage(other.tag_name)
201
+
202
+ def vpp_pp_comparator(anomaly):
203
+ """
204
+ Determine the priority rule for vpp and pp based on train stage
205
+ Forward stage prefers smaller vpp and pp
206
+ Other stages prefer larger vpp and pp
207
+ """
208
+ if self_train_stage == TrainStage.FORWARD_STAGE:
209
+ return anomaly.vpp_stage, anomaly.pp_stage
210
+ else:
211
+ return -anomaly.vpp_stage, -anomaly.pp_stage
212
+
213
+ self_cmp = [self.step, self.micro_step, self_train_stage, *vpp_pp_comparator(self), self.call_id]
214
+ other_cmp = [other.step, other.micro_step, other_train_stage, *vpp_pp_comparator(other), other.call_id]
215
+ return self_cmp < other_cmp
216
+
217
+ def __le__(self, other):
218
+ if not isinstance(other, GradAnomalyData):
219
+ return NotImplemented
220
+ return self == other or self < other
221
+
222
+ @staticmethod
223
+ def get_train_stage(tag_name):
224
+ """
225
+ :param tag_name: "0:fc2_0/rank0/input", "0:fc1.weight/rank0/post_grad", "0:fc2.weight/rank0/exp_avg_sq"
226
+ :return: int, if forward return 0; if backward return 1; if optimizer return 2
227
+ """
228
+ key_ = tag_name.split("/")[-1]
229
+ return TRAIN_STAGE.get(key_, TrainStage.DEFAULT_STAGE)
230
+
231
+ def to_dict(self):
232
+ return self.__dict__
233
+
234
+ def get_key(self):
235
+ # 0:1.self_attention.core_attention_flash_0/rank0/input_grad
236
+ return ''.join([str(self.tag_name), "_step_", str(self.step), "_call_", str(self.call_id)])
237
+
238
+
239
+ @dataclass
240
+ class WriterInput:
241
+ path: str
242
+ ad_rules: list
243
+ job_id: str
244
+ anomaly_factory: AnomalyDataFactory = None
245
+ ndigits: int = 6
246
+ step_count_per_record: int = 1
247
+
248
+
249
+ class BaseWriterWithAD:
250
+ def __init__(self, writer_input: WriterInput):
251
+ self.tag2scalars = {}
252
+ self.ad_rules = writer_input.ad_rules
253
+ self.job_id = writer_input.job_id
254
+ self.anomaly_factory = writer_input.anomaly_factory
255
+ self.anomalies = []
256
+ self.ndigits = writer_input.ndigits
257
+
258
+ def get_anomalies(self):
259
+ """返回已检测到的异常列表
260
+ """
261
+ return self.anomalies
262
+
263
+ def clear_anomalies(self):
264
+ self.anomalies.clear()
265
+
266
+ def add_scalar(self, tag, scalar_value, global_step=None, need_explain=False):
267
+ """If an anomaly is detected, the anomaly information is recorded and added to self.anomalies.
268
+ Args:
269
+ tag (tuple): tuple of tag_name and tag like ('0:1.post_attention_norm.weight/rank0/pre_grad', 'min').
270
+ scalar_value (float): scalar_value.
271
+ global_step (int): global_step.
272
+ Returns:
273
+ None
274
+ """
275
+ detected = False
276
+ if self.ad_rules:
277
+ avg = self._update_tag2scalars(tag, scalar_value)
278
+ detected, rule_name = self._ad(scalar_value, history=avg)
279
+ if detected:
280
+ exception_message = f"Rule {rule_name} reports anomaly signal in {tag} at step {global_step}."
281
+ logger.info(f"{BCOLORS.WARNING}> {exception_message}{BCOLORS.ENDC}")
282
+ # append to self.anomalies for dump
283
+ if self.anomaly_factory:
284
+ self.anomalies.append(self.anomaly_factory.create(tag, exception_message, global_step))
285
+
286
+ def write_metrics(self, op_list, metric_value, step, prefix='', need_explain=False):
287
+ if not metric_value:
288
+ return
289
+ tensors = []
290
+ tags = list(itertools.product(metric_value.keys(), op_list))
291
+ for op2tensor in metric_value.values():
292
+ tensors.extend(op2tensor.values())
293
+ with _no_grad():
294
+ metric_list = ops.stack(tensors).tolist() if tensors else []
295
+ for tag, metric in zip(tags, metric_list):
296
+ self.add_scalar(tag, metric, step, need_explain)
297
+
298
+ def _ad(self, scalar_value, history):
299
+ return AnomalyScanner.scan(self.ad_rules, history, cur=scalar_value)
300
+
301
+ def _update_tag2scalars(self, tag, scalar_value):
302
+ """Update the average and count of a scalar value associated with a tag.
303
+
304
+ This method is used to maintain a running average of scalar values for each tag.
305
+
306
+
307
+ Args:
308
+ tag (str): The tag identifier.
309
+ scalar_value (float): The scalar value to be added.
310
+
311
+ Returns:
312
+ float: The average value before update.
313
+ """
314
+ if tag not in self.tag2scalars:
315
+ self.tag2scalars[tag] = {'avg': scalar_value, 'count': 0}
316
+ avg = self.tag2scalars[tag]['avg']
317
+ new_avg = (avg * self.tag2scalars[tag]['count'] + scalar_value) / (self.tag2scalars[tag]['count'] + 1)
318
+ self.tag2scalars[tag]['avg'] = new_avg
319
+ self.tag2scalars[tag]['count'] += 1
320
+ return avg
321
+
322
+
323
+ class CSVWriterWithAD(BaseWriterWithAD):
324
+ def __init__(self, writer_input: WriterInput):
325
+ super().__init__(writer_input)
326
+
327
+ path = writer_input.path
328
+ self.log_dir = path
329
+ create_directory(path)
330
+ change_mode(path, FileCheckConst.DATA_DIR_AUTHORITY)
331
+ self.context_dict = defaultdict(list)
332
+ self.header = []
333
+ self.step_count_per_record = writer_input.step_count_per_record
334
+
335
+ def get_step_interval(self, step):
336
+ count = step // self.step_count_per_record
337
+ return count * self.step_count_per_record, (count + 1) * self.step_count_per_record - 1
338
+
339
+ def write_csv(self, prefix, step):
340
+ """
341
+ Args:
342
+ prefix[str]: prefix of output csv file e.g. grad_unreduced
343
+ step[int]
344
+ """
345
+ if len(self.context_dict) == 0:
346
+ return
347
+
348
+ ster_start, step_end = self.get_step_interval(step)
349
+ filepath = os.path.join(self.log_dir, f'{prefix}_{ster_start}-{step_end}.csv')
350
+ if not os.path.exists(filepath):
351
+ data_frame = pd.DataFrame(columns=self.header)
352
+ write_df_to_csv(data_frame, filepath)
353
+
354
+ new_data = []
355
+ for name, metric_value in self.context_dict.items():
356
+ if MonitorConst.NAME_SEP not in name:
357
+ new_data.append([name] + [step] + metric_value)
358
+ else:
359
+ new_data.append(name.split(MonitorConst.NAME_SEP) + [step] + metric_value)
360
+ new_data = pd.DataFrame(new_data).round(self.ndigits)
361
+ write_df_to_csv(new_data, filepath, mode='a+', header=False)
362
+ self.context_dict = defaultdict(list)
363
+
364
+ def add_scalar(self, tag, scalar_value, global_step, need_explain=False):
365
+ """
366
+ ('0:1.post_attention_norm.weight/rank0/pre_grad', 'min')
367
+ """
368
+ super().add_scalar(tag, scalar_value, global_step, need_explain=False)
369
+ split_name = tag[0].split('/')
370
+ name = split_name[0]
371
+ if need_explain:
372
+ if 'pre' in split_name[-1]:
373
+ name += '.input'
374
+ if 'post' in split_name[-1]:
375
+ name += '.output'
376
+ self.context_dict[name].append(scalar_value)
377
+
378
+ def write_metrics(self, op_list, metric_value, step, prefix='', need_explain=False):
379
+ need_explain = prefix == 'other'
380
+ super().write_metrics(op_list, metric_value, step, prefix='', need_explain=need_explain)
381
+
382
+ # generate csv headers
383
+ # set hashmap to reduce the number of headers generated.
384
+ # 前向的norm用input.ops_和output.ops_,反向的用input_grad.ops_和output_grad.ops_
385
+ if prefix in {"actv", "actv_grad"}:
386
+ if prefix == "actv":
387
+ input_and_output = [MonitorConst.ACTV_IN, MonitorConst.ACTV_OUT]
388
+ else:
389
+ input_and_output = [MonitorConst.ACTVGRAD_IN, MonitorConst.ACTVGRAD_OUT]
390
+ ops_ = [MonitorConst.DOT.join(i) for i in itertools.product(input_and_output, op_list)]
391
+ csv_header = ["module_name", "step", *ops_]
392
+ else:
393
+ csv_header = ["param_name", "step", *op_list]
394
+
395
+ keys = list(metric_value.keys())
396
+ if keys and MonitorConst.NAME_SEP in keys[0]:
397
+ csv_header.insert(0, "vpp_stage")
398
+
399
+ self.header = csv_header
400
+ self.write_csv(prefix, step)
401
+ self.header = []
402
+
403
+ def close(self):
404
+ pass
File without changes
@@ -0,0 +1,15 @@
1
+ communication.comm_func:
2
+ - all_reduce
3
+ - all_gather_into_tensor
4
+ - reduce
5
+ - reduce_scatter_tensor
6
+ - all_to_all_single_with_output_shape
7
+ - all_to_all_with_output_shape
8
+ - batch_isend_irecv
9
+ - broadcast
10
+ - gather_into_tensor
11
+ - scatter_tensor
12
+ - send
13
+ - recv
14
+ - isend
15
+ - irecv
@@ -0,0 +1,5 @@
1
+ stack:
2
+ - msprobe/mindspore/monitor/distributed
3
+ - site-packages/mindspore/nn/cell.py
4
+ - multiprocessing
5
+ - debugpy