mindstudio-probe 1.0.4__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (278) hide show
  1. {mindstudio_probe-1.0.4.dist-info → mindstudio_probe-1.1.1.dist-info}/METADATA +5 -5
  2. mindstudio_probe-1.1.1.dist-info/RECORD +341 -0
  3. {mindstudio_probe-1.0.4.dist-info → mindstudio_probe-1.1.1.dist-info}/WHEEL +1 -1
  4. {mindstudio_probe-1.0.4.dist-info → mindstudio_probe-1.1.1.dist-info}/entry_points.txt +0 -1
  5. msprobe/README.md +84 -18
  6. msprobe/__init__.py +16 -1
  7. msprobe/config.json +1 -5
  8. msprobe/core/advisor/advisor.py +16 -11
  9. msprobe/core/advisor/advisor_const.py +6 -7
  10. msprobe/core/advisor/advisor_result.py +12 -12
  11. msprobe/core/common/const.py +164 -3
  12. msprobe/core/common/exceptions.py +26 -4
  13. msprobe/core/common/file_utils.py +196 -27
  14. msprobe/core/common/inplace_op_checker.py +53 -0
  15. msprobe/core/common/inplace_ops.yaml +251 -0
  16. msprobe/core/common/log.py +46 -18
  17. msprobe/core/common/utils.py +308 -209
  18. msprobe/core/common_config.py +60 -38
  19. msprobe/core/compare/acc_compare.py +332 -94
  20. msprobe/core/compare/check.py +104 -22
  21. msprobe/core/compare/compare_cli.py +42 -5
  22. msprobe/core/compare/highlight.py +162 -57
  23. msprobe/core/compare/layer_mapping/__init__.py +19 -0
  24. msprobe/core/compare/layer_mapping/data_scope_parser.py +235 -0
  25. msprobe/core/compare/layer_mapping/layer_mapping.py +242 -0
  26. msprobe/core/compare/layer_mapping/postprocess_pass.py +94 -0
  27. msprobe/core/compare/multiprocessing_compute.py +33 -8
  28. msprobe/core/compare/npy_compare.py +73 -29
  29. msprobe/core/compare/utils.py +306 -247
  30. msprobe/core/data_dump/data_collector.py +44 -43
  31. msprobe/core/data_dump/data_processor/base.py +88 -35
  32. msprobe/core/data_dump/data_processor/factory.py +20 -3
  33. msprobe/core/data_dump/data_processor/mindspore_processor.py +14 -8
  34. msprobe/core/data_dump/data_processor/pytorch_processor.py +180 -66
  35. msprobe/core/data_dump/json_writer.py +63 -42
  36. msprobe/core/data_dump/scope.py +143 -48
  37. msprobe/core/grad_probe/constant.py +31 -13
  38. msprobe/core/grad_probe/grad_compare.py +20 -4
  39. msprobe/core/grad_probe/utils.py +44 -3
  40. msprobe/core/overflow_check/abnormal_scene.py +185 -0
  41. msprobe/core/overflow_check/api_info.py +55 -0
  42. msprobe/core/overflow_check/checker.py +138 -0
  43. msprobe/core/overflow_check/filter.py +157 -0
  44. msprobe/core/overflow_check/ignore_rules.yaml +55 -0
  45. msprobe/core/overflow_check/level.py +22 -0
  46. msprobe/core/overflow_check/utils.py +28 -0
  47. msprobe/docs/01.installation.md +29 -9
  48. msprobe/docs/02.config_introduction.md +83 -84
  49. msprobe/docs/03.config_examples.md +3 -20
  50. msprobe/docs/04.kernel_dump_PyTorch.md +73 -0
  51. msprobe/docs/05.data_dump_PyTorch.md +143 -13
  52. msprobe/docs/06.data_dump_MindSpore.md +197 -88
  53. msprobe/docs/07.accuracy_checker_PyTorch.md +69 -46
  54. msprobe/docs/08.accuracy_checker_online_PyTorch.md +52 -17
  55. msprobe/docs/09.accuracy_checker_MindSpore.md +51 -15
  56. msprobe/docs/10.accuracy_compare_PyTorch.md +187 -99
  57. msprobe/docs/11.accuracy_compare_MindSpore.md +253 -31
  58. msprobe/docs/12.overflow_check_PyTorch.md +1 -1
  59. msprobe/docs/13.overflow_check_MindSpore.md +6 -6
  60. msprobe/docs/15.free_benchmarking_PyTorch.md +60 -55
  61. msprobe/docs/16.free_benchmarking_MindSpore.md +159 -0
  62. msprobe/docs/17.grad_probe.md +19 -22
  63. msprobe/docs/18.online_dispatch.md +89 -0
  64. msprobe/docs/19.monitor.md +468 -0
  65. msprobe/docs/20.monitor_performance_baseline.md +52 -0
  66. msprobe/docs/21.visualization_PyTorch.md +386 -0
  67. msprobe/docs/22.visualization_MindSpore.md +384 -0
  68. msprobe/docs/23.tool_function_introduction.md +28 -0
  69. msprobe/docs/{FAQ_PyTorch.md → FAQ.md} +25 -10
  70. msprobe/docs/data_dump_Mindspore/dynamic_graph_quick_start_example.md +211 -0
  71. msprobe/docs/img/compare_result.png +0 -0
  72. msprobe/docs/img/monitor/cpu_info.png +0 -0
  73. msprobe/docs/img/ms_dump.png +0 -0
  74. msprobe/docs/img/ms_layer.png +0 -0
  75. msprobe/docs/img/pt_dump.png +0 -0
  76. msprobe/mindspore/__init__.py +16 -0
  77. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +130 -138
  78. msprobe/mindspore/api_accuracy_checker/api_info.py +27 -5
  79. msprobe/mindspore/api_accuracy_checker/api_runner.py +43 -18
  80. msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +21 -7
  81. msprobe/mindspore/api_accuracy_checker/checker_support_api.yaml +77 -0
  82. msprobe/mindspore/api_accuracy_checker/cmd_parser.py +63 -1
  83. msprobe/mindspore/api_accuracy_checker/compute_element.py +59 -24
  84. msprobe/mindspore/api_accuracy_checker/data_manager.py +264 -0
  85. msprobe/mindspore/api_accuracy_checker/main.py +27 -3
  86. msprobe/mindspore/api_accuracy_checker/multi_api_accuracy_checker.py +206 -0
  87. msprobe/mindspore/api_accuracy_checker/multi_data_manager.py +58 -0
  88. msprobe/mindspore/api_accuracy_checker/type_mapping.py +22 -5
  89. msprobe/mindspore/api_accuracy_checker/utils.py +34 -17
  90. msprobe/mindspore/cell_processor.py +58 -13
  91. msprobe/mindspore/common/const.py +35 -13
  92. msprobe/mindspore/common/log.py +5 -9
  93. msprobe/mindspore/common/utils.py +60 -5
  94. msprobe/mindspore/compare/distributed_compare.py +15 -28
  95. msprobe/mindspore/compare/ms_compare.py +319 -158
  96. msprobe/mindspore/compare/ms_graph_compare.py +99 -49
  97. msprobe/mindspore/debugger/debugger_config.py +20 -14
  98. msprobe/mindspore/debugger/precision_debugger.py +43 -13
  99. msprobe/mindspore/dump/dump_tool_factory.py +18 -1
  100. msprobe/mindspore/dump/hook_cell/api_registry.py +23 -3
  101. msprobe/mindspore/dump/hook_cell/primitive_hooks.py +203 -0
  102. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +107 -10
  103. msprobe/mindspore/dump/hook_cell/wrap_api.py +21 -13
  104. msprobe/mindspore/dump/jit_dump.py +56 -20
  105. msprobe/mindspore/dump/kernel_graph_dump.py +19 -5
  106. msprobe/mindspore/dump/kernel_kbyk_dump.py +19 -6
  107. msprobe/mindspore/dym_loader/hook_dynamic_loader.cc +140 -0
  108. msprobe/mindspore/dym_loader/hook_dynamic_loader.h +53 -0
  109. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +162 -41
  110. msprobe/mindspore/free_benchmark/common/config.py +15 -0
  111. msprobe/mindspore/free_benchmark/common/handler_params.py +15 -1
  112. msprobe/mindspore/free_benchmark/common/utils.py +37 -8
  113. msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +0 -204
  114. msprobe/mindspore/free_benchmark/handler/base_handler.py +20 -5
  115. msprobe/mindspore/free_benchmark/handler/check_handler.py +21 -7
  116. msprobe/mindspore/free_benchmark/handler/fix_handler.py +18 -3
  117. msprobe/mindspore/free_benchmark/handler/handler_factory.py +21 -6
  118. msprobe/mindspore/free_benchmark/perturbation/add_noise.py +23 -8
  119. msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +29 -5
  120. msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +25 -10
  121. msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +45 -19
  122. msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +29 -8
  123. msprobe/mindspore/free_benchmark/perturbation/no_change.py +16 -1
  124. msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +22 -7
  125. msprobe/mindspore/free_benchmark/self_check_tool_factory.py +17 -2
  126. msprobe/mindspore/grad_probe/global_context.py +44 -14
  127. msprobe/mindspore/grad_probe/grad_analyzer.py +27 -13
  128. msprobe/mindspore/grad_probe/grad_monitor.py +16 -1
  129. msprobe/mindspore/grad_probe/grad_stat_csv.py +33 -5
  130. msprobe/mindspore/grad_probe/hook.py +24 -10
  131. msprobe/mindspore/grad_probe/utils.py +18 -5
  132. msprobe/mindspore/ms_config.py +22 -15
  133. msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +20 -6
  134. msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +15 -0
  135. msprobe/mindspore/runtime.py +15 -0
  136. msprobe/mindspore/service.py +75 -150
  137. msprobe/mindspore/task_handler_factory.py +15 -0
  138. msprobe/msprobe.py +24 -7
  139. msprobe/pytorch/__init__.py +23 -3
  140. msprobe/pytorch/api_accuracy_checker/common/config.py +81 -2
  141. msprobe/pytorch/api_accuracy_checker/common/utils.py +53 -21
  142. msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +19 -2
  143. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +50 -25
  144. msprobe/pytorch/api_accuracy_checker/compare/compare.py +51 -21
  145. msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +23 -6
  146. msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +28 -8
  147. msprobe/pytorch/api_accuracy_checker/config.yaml +1 -1
  148. msprobe/pytorch/api_accuracy_checker/generate_op_script/config_op.json +9 -0
  149. msprobe/pytorch/api_accuracy_checker/generate_op_script/op_generator.py +454 -0
  150. msprobe/pytorch/api_accuracy_checker/generate_op_script/operator_replication.template +365 -0
  151. msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +73 -33
  152. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +44 -18
  153. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +32 -11
  154. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +122 -172
  155. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +158 -4
  156. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +30 -24
  157. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +68 -31
  158. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +27 -4
  159. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/dump_dispatch.py +115 -0
  160. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +26 -9
  161. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/torch_ops_config.yaml +63 -0
  162. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/utils.py +44 -0
  163. msprobe/pytorch/bench_functions/__init__.py +18 -3
  164. msprobe/pytorch/bench_functions/apply_adam_w.py +15 -0
  165. msprobe/pytorch/bench_functions/confusion_transpose.py +20 -1
  166. msprobe/pytorch/bench_functions/fast_gelu.py +15 -0
  167. msprobe/pytorch/bench_functions/layer_norm_eval.py +15 -0
  168. msprobe/pytorch/bench_functions/linear.py +15 -0
  169. msprobe/pytorch/bench_functions/matmul_backward.py +33 -6
  170. msprobe/pytorch/bench_functions/npu_fusion_attention.py +280 -157
  171. msprobe/pytorch/bench_functions/rms_norm.py +15 -0
  172. msprobe/pytorch/bench_functions/rotary_mul.py +32 -9
  173. msprobe/pytorch/bench_functions/scaled_mask_softmax.py +15 -0
  174. msprobe/pytorch/bench_functions/swiglu.py +29 -6
  175. msprobe/pytorch/common/__init__.py +15 -0
  176. msprobe/pytorch/common/log.py +18 -6
  177. msprobe/pytorch/common/parse_json.py +31 -16
  178. msprobe/pytorch/common/utils.py +96 -40
  179. msprobe/pytorch/compare/distributed_compare.py +13 -14
  180. msprobe/pytorch/compare/match.py +15 -0
  181. msprobe/pytorch/compare/pt_compare.py +44 -10
  182. msprobe/pytorch/debugger/debugger_config.py +69 -52
  183. msprobe/pytorch/debugger/precision_debugger.py +72 -24
  184. msprobe/pytorch/dump/kernel_dump/kernel_config.py +33 -0
  185. msprobe/pytorch/free_benchmark/__init__.py +20 -5
  186. msprobe/pytorch/free_benchmark/common/constant.py +15 -0
  187. msprobe/pytorch/free_benchmark/common/counter.py +15 -0
  188. msprobe/pytorch/free_benchmark/common/enums.py +43 -0
  189. msprobe/pytorch/free_benchmark/common/params.py +23 -1
  190. msprobe/pytorch/free_benchmark/common/utils.py +43 -5
  191. msprobe/pytorch/free_benchmark/compare/grad_saver.py +47 -9
  192. msprobe/pytorch/free_benchmark/compare/single_benchmark.py +17 -0
  193. msprobe/pytorch/free_benchmark/main.py +19 -4
  194. msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +15 -0
  195. msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +19 -4
  196. msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +18 -1
  197. msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +21 -4
  198. msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +28 -2
  199. msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +19 -0
  200. msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +15 -0
  201. msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +15 -0
  202. msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +15 -0
  203. msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +65 -16
  204. msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +15 -0
  205. msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +21 -5
  206. msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +15 -0
  207. msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +19 -4
  208. msprobe/pytorch/function_factory.py +17 -2
  209. msprobe/pytorch/functional/module_dump.py +84 -0
  210. msprobe/pytorch/grad_probe/grad_monitor.py +23 -6
  211. msprobe/pytorch/grad_probe/grad_stat_csv.py +40 -10
  212. msprobe/pytorch/hook_module/__init__.py +16 -1
  213. msprobe/pytorch/hook_module/api_registry.py +13 -8
  214. msprobe/pytorch/hook_module/hook_module.py +17 -19
  215. msprobe/pytorch/hook_module/support_wrap_ops.yaml +1 -0
  216. msprobe/pytorch/hook_module/utils.py +4 -6
  217. msprobe/pytorch/hook_module/wrap_aten.py +12 -11
  218. msprobe/pytorch/hook_module/wrap_distributed.py +6 -7
  219. msprobe/pytorch/hook_module/wrap_functional.py +21 -20
  220. msprobe/pytorch/hook_module/wrap_npu_custom.py +9 -17
  221. msprobe/pytorch/hook_module/wrap_tensor.py +4 -6
  222. msprobe/pytorch/hook_module/wrap_torch.py +4 -6
  223. msprobe/pytorch/hook_module/wrap_vf.py +4 -6
  224. msprobe/pytorch/module_processer.py +18 -6
  225. msprobe/pytorch/monitor/anomaly_analyse.py +201 -0
  226. msprobe/pytorch/monitor/anomaly_detect.py +340 -0
  227. msprobe/pytorch/monitor/distributed/distributed_ops.yaml +19 -0
  228. msprobe/pytorch/monitor/distributed/stack_blacklist.yaml +5 -0
  229. msprobe/pytorch/monitor/distributed/wrap_distributed.py +272 -0
  230. msprobe/pytorch/monitor/features.py +108 -0
  231. msprobe/pytorch/monitor/module_hook.py +870 -0
  232. msprobe/pytorch/monitor/module_metric.py +193 -0
  233. msprobe/pytorch/monitor/module_spec_verifier.py +93 -0
  234. msprobe/pytorch/monitor/optimizer_collect.py +295 -0
  235. msprobe/pytorch/monitor/unittest/__init__.py +0 -0
  236. msprobe/pytorch/monitor/unittest/test_monitor.py +145 -0
  237. msprobe/pytorch/monitor/utils.py +250 -0
  238. msprobe/pytorch/monitor/visualizer.py +59 -0
  239. msprobe/pytorch/online_dispatch/__init__.py +2 -3
  240. msprobe/pytorch/online_dispatch/compare.py +38 -48
  241. msprobe/pytorch/online_dispatch/dispatch.py +50 -25
  242. msprobe/pytorch/online_dispatch/dump_compare.py +21 -9
  243. msprobe/pytorch/online_dispatch/single_compare.py +60 -39
  244. msprobe/pytorch/online_dispatch/torch_ops_config.yaml +9 -1
  245. msprobe/pytorch/online_dispatch/utils.py +48 -23
  246. msprobe/pytorch/parse.py +15 -0
  247. msprobe/pytorch/parse_tool/cli.py +5 -6
  248. msprobe/pytorch/parse_tool/lib/compare.py +19 -26
  249. msprobe/pytorch/parse_tool/lib/config.py +1 -1
  250. msprobe/pytorch/parse_tool/lib/parse_tool.py +4 -2
  251. msprobe/pytorch/parse_tool/lib/utils.py +40 -55
  252. msprobe/pytorch/parse_tool/lib/visualization.py +3 -1
  253. msprobe/pytorch/pt_config.py +192 -40
  254. msprobe/pytorch/service.py +110 -35
  255. msprobe/visualization/__init__.py +14 -0
  256. msprobe/visualization/builder/__init__.py +14 -0
  257. msprobe/visualization/builder/graph_builder.py +165 -0
  258. msprobe/visualization/builder/msprobe_adapter.py +205 -0
  259. msprobe/visualization/compare/__init__.py +14 -0
  260. msprobe/visualization/compare/graph_comparator.py +130 -0
  261. msprobe/visualization/compare/mode_adapter.py +211 -0
  262. msprobe/visualization/graph/__init__.py +14 -0
  263. msprobe/visualization/graph/base_node.py +124 -0
  264. msprobe/visualization/graph/graph.py +200 -0
  265. msprobe/visualization/graph/node_colors.py +95 -0
  266. msprobe/visualization/graph/node_op.py +39 -0
  267. msprobe/visualization/graph_service.py +214 -0
  268. msprobe/visualization/utils.py +232 -0
  269. mindstudio_probe-1.0.4.dist-info/RECORD +0 -276
  270. msprobe/docs/04.acl_config_examples.md +0 -76
  271. msprobe/mindspore/free_benchmark/decorator/dec_forward.py +0 -43
  272. msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +0 -107
  273. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +0 -10
  274. msprobe/pytorch/functional/dump_module.py +0 -39
  275. {mindstudio_probe-1.0.4.dist-info → mindstudio_probe-1.1.1.dist-info}/LICENSE +0 -0
  276. {mindstudio_probe-1.0.4.dist-info → mindstudio_probe-1.1.1.dist-info}/top_level.txt +0 -0
  277. /msprobe/{mindspore/free_benchmark/decorator → pytorch/monitor}/__init__.py +0 -0
  278. /msprobe/pytorch/{functional/data_processor.py → monitor/distributed/__init__.py} +0 -0
@@ -0,0 +1,340 @@
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ import sys
18
+ import statistics as st
19
+ from abc import ABC
20
+ from dataclasses import dataclass, field
21
+ from typing import List
22
+ from collections import defaultdict
23
+
24
+ import pandas as pd
25
+ from torch.utils.tensorboard import SummaryWriter
26
+
27
+ from msprobe.core.common.log import logger
28
+ from msprobe.core.common.file_utils import change_mode, create_directory, write_df_to_csv
29
+ from msprobe.core.common.const import FileCheckConst, MonitorConst
30
+
31
+
32
+ class ScanRule(ABC):
33
+ name = "ScanRule"
34
+
35
+ def apply(self, history, cur):
36
+ raise NotImplementedError("abstract method apply is not implemented")
37
+
38
+
39
+ class AnomalyTurbulence(ScanRule):
40
+ name = "AnomalyTurbulence"
41
+
42
+ def __init__(self, threshold) -> None:
43
+ self.threshold = threshold
44
+
45
+ def apply(self, history, cur):
46
+ baseline = st.mean(history) if isinstance(history, list) else history
47
+
48
+ up_bound = baseline + baseline * self.threshold
49
+ if baseline > 0:
50
+ return cur > up_bound
51
+ else:
52
+ return cur < up_bound
53
+
54
+
55
+ class AnomalyScanner:
56
+
57
+ @staticmethod
58
+ def load_rules(specs: List[dict]):
59
+ """
60
+ specs: [{"rule_name": "AnomalyTurbulence", "args": {"threshold": 0.5}}]
61
+ """
62
+ if specs is None:
63
+ return []
64
+ alert_rules = []
65
+ for spec in specs:
66
+ # 使用get方法获取键值,如果键不存在则返回None
67
+ rule_cls_name = spec.get("rule_name")
68
+ rule_args = spec.get("args")
69
+
70
+ # 检查必要的键是否存在
71
+ if rule_cls_name is None or rule_args is None:
72
+ logger.warning(f"Spec is missing required keys: {spec}")
73
+ continue
74
+
75
+ cur_module = sys.modules.get(__name__)
76
+ try:
77
+ rule_cls = getattr(cur_module, rule_cls_name)
78
+ except AttributeError:
79
+ logger.error(f"Rule class '{rule_cls_name}' not found in the current module.")
80
+ continue
81
+
82
+ try:
83
+ rule_instance = rule_cls(**rule_args)
84
+ alert_rules.append(rule_instance)
85
+ except Exception as e:
86
+ logger.error(f"Error creating instance of rule '{rule_cls_name}': {e}")
87
+ continue
88
+
89
+ return alert_rules
90
+
91
+ @staticmethod
92
+ def scan(scan_rules: List[ScanRule], history, cur):
93
+ anomaly = False
94
+ for rule in scan_rules:
95
+ anomaly = rule.apply(history, cur)
96
+ if anomaly:
97
+ return anomaly, rule.name
98
+ return anomaly, None
99
+
100
+
101
+ class BCOLORS:
102
+ HEADER = '\033[95m'
103
+ OKBLUE = '\033[94m'
104
+ OKCYAN = '\033[96m'
105
+ OKGREEN = '\033[92m'
106
+ WARNING = '\033[93m'
107
+ FAIL = '\033[91m'
108
+ ENDC = '\033[0m'
109
+ BOLD = '\033[1m'
110
+ UNDERLINE = '\033[4m'
111
+
112
+
113
+ class AnomalyDataFactory(ABC):
114
+ def __init__(self, rank, pp_stage, group_mates):
115
+ super().__init__()
116
+ self.rank = rank
117
+ self.pp_stage = pp_stage
118
+ self.group_mates = group_mates
119
+ self.micro_step = 0
120
+ self.name2callid = {}
121
+
122
+ def set_call_id(self, name2callid):
123
+ """根据当前GradContext信息更新call_id vpp_stage等信息
124
+ """
125
+ self.name2callid = name2callid
126
+
127
+ def create(self, tag, message, step):
128
+ """如果检查出异常, 调用当前接口生成GradAnomalyData实例
129
+ tag (tuple): metric tag ('0:1.post_attention_norm.weight/rank0/pre_grad', 'min')
130
+ message (str): anomaly detect message
131
+ step (int): training step
132
+ """
133
+ if not isinstance(tag, tuple) or len(tag) != 2:
134
+ raise ValueError("tag must be a tuple with length 2")
135
+ tag_name = tag[0]
136
+ param_name = tag_name.split('/')[0]
137
+ call_id = self.name2callid.get(param_name, -1)
138
+ if MonitorConst.VPP_SEP in param_name:
139
+ vpp_stage = int(param_name.split(MonitorConst.VPP_SEP)[0])
140
+ else:
141
+ vpp_stage = 0
142
+
143
+ return GradAnomalyData(
144
+ self.rank,
145
+ step,
146
+ self.micro_step,
147
+ self.pp_stage,
148
+ vpp_stage,
149
+ call_id,
150
+ tag_name,
151
+ message,
152
+ self.group_mates
153
+ )
154
+
155
+
156
+ @dataclass(eq=True)
157
+ class GradAnomalyData:
158
+ rank: int = 0
159
+ step: int = 0
160
+ micro_step: int = 0
161
+ pp_stage: int = 0
162
+ vpp_stage: int = 0
163
+ call_id: int = 0
164
+ tag_name: str = field(default=None, compare=False)
165
+ message: str = field(default="", compare=False)
166
+ group_mates: list = field(default=None, compare=False)
167
+
168
+ def __lt__(self, other):
169
+ if not isinstance(other, GradAnomalyData):
170
+ return NotImplemented
171
+ if self.step != other.step:
172
+ return self.step < other.step
173
+ if self.micro_step != other.micro_step:
174
+ return self.micro_step < other.micro_step
175
+ if self.vpp_stage != other.vpp_stage:
176
+ return self.vpp_stage > other.vpp_stage
177
+ if self.pp_stage != other.pp_stage:
178
+ return self.pp_stage > other.pp_stage
179
+ if self.call_id != other.call_id:
180
+ return self.call_id < other.call_id
181
+ return False
182
+
183
+ def __le__(self, other):
184
+ if not isinstance(other, GradAnomalyData):
185
+ return NotImplemented
186
+ return self == other or self < other
187
+
188
+ def to_dict(self):
189
+ return self.__dict__
190
+
191
+ def get_key(self):
192
+ # 0:1.self_attention.core_attention_flash_0/rank0/input_grad
193
+ return ''.join([str(self.tag_name), "_step_", str(self.step), "_call_", str(self.call_id)])
194
+
195
+
196
+ @dataclass
197
+ class WriterInput:
198
+ path: str
199
+ ad_rules: list
200
+ job_id: str
201
+ anomaly_inform: bool = False
202
+ anomaly_factory: AnomalyDataFactory = None
203
+ ndigits: int = 6
204
+ step_count_per_record: int = 1
205
+
206
+
207
+ class BaseWriterWithAD:
208
+ def __init__(self, writer_input: WriterInput):
209
+ self.tag2scalars = {}
210
+ self.ad_rules = writer_input.ad_rules
211
+ self.job_id = writer_input.job_id
212
+ self.anomaly_inform = writer_input.anomaly_inform
213
+ self.anomaly_factory = writer_input.anomaly_factory
214
+ self.anomalies = []
215
+ self.ndigits = writer_input.ndigits
216
+
217
+ def get_anomalies(self):
218
+ """返回已检测到的异常列表
219
+ """
220
+ return self.anomalies
221
+
222
+ def clear_anomalies(self):
223
+ self.anomalies.clear()
224
+
225
+ def add_scalar(self, tag, scalar_value, global_step=None):
226
+ """If an anomaly is detected, the anomaly information is recorded and added to self.anomalies.
227
+ Args:
228
+ tag (tuple): tuple of tag_name and tag like ('0:1.post_attention_norm.weight/rank0/pre_grad', 'min').
229
+ scalar_value (float): scalar_value.
230
+ global_step (int): global_step.
231
+ Returns:
232
+ None
233
+ """
234
+ detected = False
235
+ if self.ad_rules:
236
+ avg = self._update_tag2scalars(tag, scalar_value)
237
+ detected, rule_name = self._ad(scalar_value, history=avg)
238
+ if detected:
239
+ exception_message = f"Rule {rule_name} reports anomaly signal in {tag} at step {global_step}."
240
+ logger.info(f"{BCOLORS.WARNING}> {exception_message}{BCOLORS.ENDC}")
241
+ # append to self.anomalies for dump
242
+ if self.anomaly_factory:
243
+ self.anomalies.append(self.anomaly_factory.create(tag, exception_message, global_step))
244
+
245
+ def _ad(self, scalar_value, history):
246
+ return AnomalyScanner.scan(self.ad_rules, history, cur=scalar_value)
247
+
248
+ def _update_tag2scalars(self, tag, scalar_value):
249
+ """Update the average and count of a scalar value associated with a tag.
250
+
251
+ This method is used to maintain a running average of scalar values for each tag.
252
+
253
+
254
+ Args:
255
+ tag (str): The tag identifier.
256
+ scalar_value (float): The scalar value to be added.
257
+
258
+ Returns:
259
+ float: The average value before update.
260
+ """
261
+ if tag not in self.tag2scalars:
262
+ self.tag2scalars[tag] = {'avg': scalar_value, 'count': 0}
263
+ avg = self.tag2scalars[tag]['avg']
264
+ new_avg = (avg * self.tag2scalars[tag]['count'] + scalar_value) / (self.tag2scalars[tag]['count'] + 1)
265
+ self.tag2scalars[tag]['avg'] = new_avg
266
+ self.tag2scalars[tag]['count'] += 1
267
+ return avg
268
+
269
+
270
+ class CSVWriterWithAD(BaseWriterWithAD):
271
+ def __init__(self, writer_input: WriterInput):
272
+ super().__init__(writer_input)
273
+
274
+ path = writer_input.path
275
+ self.log_dir = path
276
+ create_directory(path)
277
+ change_mode(path, FileCheckConst.DATA_DIR_AUTHORITY)
278
+ self.context_dict = defaultdict(list)
279
+ self.header = []
280
+ self.step_count_per_record = writer_input.step_count_per_record
281
+
282
+ def get_step_interval(self, step):
283
+ count = step // self.step_count_per_record
284
+ return count * self.step_count_per_record, (count + 1) * self.step_count_per_record - 1
285
+
286
+ def write_csv(self, prefix, step):
287
+ """
288
+ Args:
289
+ prefix[str]: prefix of output csv file e.g. grad_unreduced
290
+ step[int]
291
+ """
292
+ if len(self.context_dict) == 0:
293
+ return
294
+
295
+ ster_start, step_end = self.get_step_interval(step)
296
+ filepath = os.path.join(self.log_dir, f'{prefix}_{ster_start}-{step_end}.csv')
297
+ if not os.path.exists(filepath):
298
+ data_frame = pd.DataFrame(columns=self.header)
299
+ write_df_to_csv(data_frame, filepath)
300
+
301
+ new_data = []
302
+ for name, metric_value in self.context_dict.items():
303
+ if MonitorConst.VPP_SEP not in name:
304
+ new_data.append([name] + [step] + metric_value)
305
+ else:
306
+ new_data.append(name.split(MonitorConst.VPP_SEP) + [step] + metric_value)
307
+ new_data = pd.DataFrame(new_data).round(self.ndigits)
308
+ write_df_to_csv(new_data, filepath, mode='a+', header=False)
309
+ self.context_dict = defaultdict(list)
310
+
311
+ def add_scalar(self, tag, scalar_value, global_step):
312
+ """
313
+ ('0:1.post_attention_norm.weight/rank0/pre_grad', 'min')
314
+ """
315
+ super().add_scalar(tag, scalar_value, global_step)
316
+
317
+ name = tag[0].split('/')[0]
318
+ self.context_dict[name].append(scalar_value.item())
319
+
320
+ def close(self):
321
+ pass
322
+
323
+
324
+ class SummaryWriterWithAD(SummaryWriter, BaseWriterWithAD):
325
+ def __init__(self, writer_input: WriterInput):
326
+
327
+ path = writer_input.path
328
+ if not os.path.exists(path):
329
+ create_directory(path)
330
+ try:
331
+ super(SummaryWriter, self).__init__(writer_input)
332
+ super().__init__(path)
333
+ except Exception as e:
334
+ logger.error(f'error when init summary writer at {path}: {e}')
335
+ raise ValueError("Init summary writer error.") from e
336
+
337
+ def add_scalar(self, tag, scalar_value, global_step):
338
+ super(SummaryWriter, self).add_scalar(tag, scalar_value, global_step)
339
+ tag = f'{tag[0]}_{tag[1]}'
340
+ super().add_scalar(tag, scalar_value, global_step)
@@ -0,0 +1,19 @@
1
+ distributed:
2
+ - send
3
+ - recv
4
+ - isend
5
+ - irecv
6
+ - batch_isend_irecv
7
+ - broadcast
8
+ - all_reduce
9
+ - reduce
10
+ - scatter
11
+ - reduce_scatter
12
+ - _reduce_scatter_base
13
+ - reduce_scatter_tensor
14
+ - all_gather
15
+ - gather
16
+ - _all_gather_base
17
+ - all_gather_into_tensor
18
+
19
+
@@ -0,0 +1,5 @@
1
+ stack:
2
+ - msprobe/pytorch/monitor/distributed
3
+ - site-packages/torch/nn/modules/module.py
4
+ - multiprocessing
5
+ - debugpy
@@ -0,0 +1,272 @@
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ import re
18
+ import inspect
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.distributed as dist
23
+
24
+ from msprobe.core.common.file_utils import load_yaml
25
+ from msprobe.core.common.const import MonitorConst
26
+ from msprobe.pytorch.monitor.module_metric import get_metrics, get_summary_writer_tag_name
27
+
28
+ try:
29
+ import torch_npu
30
+ except ImportError:
31
+ pass
32
+
33
+ RANK = None
34
+
35
+ OpsPath = os.path.join(os.path.dirname(__file__), "distributed_ops.yaml")
36
+ WrapDistributedOps = load_yaml(OpsPath).get("distributed", [])
37
+
38
+ StackBlackListPath = os.path.join(os.path.dirname(__file__), "stack_blacklist.yaml")
39
+ StackBlackList = load_yaml(StackBlackListPath).get("stack", [])
40
+
41
+ distributed_func = {}
42
+ for f in dir(dist):
43
+ distributed_func[f] = getattr(dist, f)
44
+
45
+ ORIGIN_WAIT = getattr(dist.Work, 'wait')
46
+ PENDING_ASYNC_CC_BY_HANDLE = {}
47
+
48
+
49
+ def get_distributed_ops():
50
+ global WrapDistributedOps
51
+ _all_distributed_ops = dir(dist)
52
+ return set(WrapDistributedOps) & set(_all_distributed_ops)
53
+
54
+
55
+ class DistributedOPTemplate(nn.Module):
56
+ def __init__(self, op_name, pre_hooks, post_hooks):
57
+ super(DistributedOPTemplate, self).__init__()
58
+ self.op_name_ = str(op_name)
59
+ self.__name__ = self.op_name_
60
+ for pre_hook in pre_hooks:
61
+ self.register_forward_pre_hook(pre_hook, with_kwargs=True)
62
+ for hook in post_hooks:
63
+ self.register_forward_hook(hook, with_kwargs=True)
64
+
65
+ def forward(self, *args, **kwargs):
66
+ return distributed_func.get(self.op_name_)(*args, **kwargs)
67
+
68
+
69
+ class ApiRegistry:
70
+ def __init__(self):
71
+ self.distributed_attr_origin = {}
72
+ self.distributed_attr_hooked = {}
73
+
74
+ @staticmethod
75
+ def store_ori_attr(ori_api_group, api_list, api_ori_attr):
76
+ for api in api_list:
77
+ if '.' in api:
78
+ sub_module_name, sub_op = api.rsplit('.', 1)
79
+ sub_module = getattr(ori_api_group, sub_module_name)
80
+ api_ori_attr[api] = getattr(sub_module, sub_op)
81
+ else:
82
+ api_ori_attr[api] = getattr(ori_api_group, api)
83
+
84
+ @staticmethod
85
+ def set_api_attr(api_group, attr_dict):
86
+ for cc_api_name, cc_api_entry_func in attr_dict.items():
87
+ if '.' in cc_api_name:
88
+ sub_module_name, sub_op = cc_api_name.rsplit('.', 1)
89
+ sub_module = getattr(api_group, sub_module_name, None)
90
+ if sub_module is not None:
91
+ setattr(sub_module, sub_op, cc_api_entry_func)
92
+ else:
93
+ setattr(api_group, cc_api_name, cc_api_entry_func)
94
+
95
+ @staticmethod
96
+ def redirect_wait():
97
+ global ORIGIN_WAIT
98
+ global PENDING_ASYNC_CC_BY_HANDLE
99
+
100
+ def wrapped_wait(work):
101
+ def wrapped_wait(*args, **kwargs):
102
+ ORIGIN_WAIT(*args, **kwargs)
103
+ if args[0] in PENDING_ASYNC_CC_BY_HANDLE:
104
+ store_func = PENDING_ASYNC_CC_BY_HANDLE.pop(args[0])
105
+ store_func()
106
+
107
+ return wrapped_wait
108
+
109
+ dist.Work.wait = wrapped_wait(dist.Work)
110
+
111
+ def redirect_api(self):
112
+ self.set_api_attr(dist, self.distributed_attr_hooked)
113
+ self.set_api_attr(dist.distributed_c10d, self.distributed_attr_hooked)
114
+ self.redirect_wait()
115
+
116
+ def restore_api(self):
117
+ self.set_api_attr(dist, self.distributed_attr_origin)
118
+ self.set_api_attr(dist.distributed_c10d, self.distributed_attr_origin)
119
+ setattr(dist.Work, 'wait', ORIGIN_WAIT)
120
+
121
+ def initialize_hook(self, pre_hooks, post_hooks):
122
+ self.store_ori_attr(dist, get_distributed_ops(), self.distributed_attr_origin)
123
+ for op_name in get_distributed_ops():
124
+ self.distributed_attr_hooked[op_name] = DistributedOPTemplate(op_name, pre_hooks, post_hooks)
125
+
126
+
127
+ def get_process_group(process_group):
128
+ return (
129
+ process_group
130
+ if isinstance(process_group, dist.ProcessGroup)
131
+ else dist.GroupMember.WORLD
132
+ )
133
+
134
+
135
+ def stack_filter(stack):
136
+ for pattern in StackBlackList:
137
+ if re.search(pattern, stack):
138
+ return False
139
+ return True
140
+
141
+
142
+ def get_callstack():
143
+ callstack = []
144
+ for (_, path, line, func, _, _) in inspect.stack():
145
+ stack_line = f'{path}[{line}]'
146
+ if stack_filter(stack_line):
147
+ callstack.append(stack_line + ' ' + func)
148
+ return callstack
149
+
150
+
151
+ @torch.no_grad()
152
+ def op_aggregate(op, tensorlist):
153
+ if isinstance(tensorlist, torch.Tensor):
154
+ return tensorlist
155
+ if not tensorlist:
156
+ return torch.tensor(torch.nan)
157
+ if op == 'min':
158
+ return min(tensorlist)
159
+ if op == 'max':
160
+ return max(tensorlist)
161
+ if op == 'norm':
162
+ return sum(tensorlist)
163
+ if op == 'zeros':
164
+ return sum(tensorlist) / len(tensorlist)
165
+ if op == 'nans':
166
+ return sum(tensorlist)
167
+ if op == 'mean':
168
+ return sum(tensorlist) / len(tensorlist)
169
+ return torch.tensor(torch.nan)
170
+
171
+
172
+ def update_data(old, new):
173
+ for tag, op2tensor in new.items():
174
+ if tag not in old:
175
+ old[tag] = {}
176
+ for op, tensor in op2tensor.items():
177
+ if op not in old[tag]:
178
+ old[tag][op] = [tensor]
179
+ else:
180
+ old[tag][op].append(tensor)
181
+ return old
182
+
183
+
184
+ def is_target_line(codeline):
185
+ stack = get_callstack()
186
+ whole_stack = ';'.join(stack)
187
+ if codeline == []:
188
+ return True
189
+ for pattern in codeline:
190
+ if re.search(pattern, whole_stack):
191
+ return True
192
+ return False
193
+
194
+
195
+ @torch.no_grad()
196
+ def catch_data(cc_context, cc_name, ops, args, prefix):
197
+ tensor_args = {}
198
+ for arg in args:
199
+ if isinstance(arg, torch.Tensor):
200
+ key = get_summary_writer_tag_name(cc_name, f'{prefix}_{len(tensor_args)}', RANK)
201
+ tensor_args[key] = arg
202
+ elif isinstance(arg, list):
203
+ if isinstance(arg[0], torch.Tensor):
204
+ stacked_arg = torch.stack(arg)
205
+ elif isinstance(arg[0], dist.P2POp):
206
+ stacked_arg = torch.stack([op.tensor for op in arg])
207
+ key = get_summary_writer_tag_name(cc_name, f'{prefix}_{len(tensor_args)}', RANK)
208
+ tensor_args[key] = stacked_arg
209
+
210
+ new_data = get_metrics(ops, tensor_args, 1e-8)
211
+ cc_context.data = update_data(cc_context.data, new_data)
212
+
213
+
214
+ def create_async_callback_func(context, cc_name, ops, args, prefix):
215
+ def store_data():
216
+ catch_data(context, cc_name, ops, args, prefix)
217
+
218
+ return store_data
219
+
220
+
221
+ def create_hooks(context, monitor):
222
+ def cc_log_hook(module, args, kwargs):
223
+ stack = ';'.join(get_callstack())
224
+ monitor.cc_logged_stack[module.op_name_].add(stack)
225
+ return
226
+
227
+ def cc_pre_hook(module, args, kwargs):
228
+ if not is_target_line(monitor.cc_codeline):
229
+ return
230
+ args = args + tuple(kwargs.values())
231
+ catch_data(context[module.op_name_], module.op_name_, monitor.ops, args, MonitorConst.PREFIX_PRE)
232
+ return
233
+
234
+ def cc_hook(module, args, kwargs, out=None):
235
+ if not is_target_line(monitor.cc_codeline):
236
+ return out
237
+ args = args + tuple(kwargs.values())
238
+ if out: # async
239
+ if isinstance(out, dist.Work):
240
+ PENDING_ASYNC_CC_BY_HANDLE[out] = create_async_callback_func(context[module.op_name_],
241
+ module.op_name_,
242
+ monitor.ops, args, MonitorConst.PREFIX_POST)
243
+ elif isinstance(out, list): # batch_isend_irecv
244
+ for out_element in out:
245
+ PENDING_ASYNC_CC_BY_HANDLE[out_element] = create_async_callback_func(context[module.op_name_],
246
+ module.op_name_,
247
+ monitor.ops, args,
248
+ MonitorConst.PREFIX_POST)
249
+ return out
250
+ catch_data(context[module.op_name_], module.op_name_, monitor.ops, args, MonitorConst.PREFIX_POST)
251
+ return out
252
+
253
+ global RANK
254
+ pre_hooks = []
255
+ hooks = []
256
+ RANK = dist.get_rank()
257
+ if dist.is_initialized() and RANK not in monitor.module_rank_list and monitor.module_rank_list != []:
258
+ return [pre_hooks, hooks]
259
+
260
+ if monitor.cc_log_only:
261
+ pre_hooks.append(cc_log_hook)
262
+ return [pre_hooks, hooks]
263
+
264
+ if monitor.cc_pre_hook:
265
+ pre_hooks.append(cc_pre_hook)
266
+
267
+ hooks.append(cc_hook)
268
+
269
+ return [pre_hooks, hooks]
270
+
271
+
272
+ api_register = ApiRegistry()