mindstudio-probe 1.1.0__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (220) hide show
  1. {mindstudio_probe-1.1.0.dist-info → mindstudio_probe-1.1.1.dist-info}/METADATA +5 -5
  2. mindstudio_probe-1.1.1.dist-info/RECORD +341 -0
  3. {mindstudio_probe-1.1.0.dist-info → mindstudio_probe-1.1.1.dist-info}/WHEEL +1 -1
  4. {mindstudio_probe-1.1.0.dist-info → mindstudio_probe-1.1.1.dist-info}/entry_points.txt +0 -1
  5. msprobe/README.md +39 -3
  6. msprobe/config.json +1 -3
  7. msprobe/core/advisor/advisor.py +8 -3
  8. msprobe/core/common/const.py +113 -13
  9. msprobe/core/common/exceptions.py +25 -3
  10. msprobe/core/common/file_utils.py +150 -26
  11. msprobe/core/common/inplace_op_checker.py +15 -0
  12. msprobe/core/common/log.py +27 -9
  13. msprobe/core/common/utils.py +182 -69
  14. msprobe/core/common_config.py +44 -15
  15. msprobe/core/compare/acc_compare.py +207 -142
  16. msprobe/core/compare/check.py +2 -5
  17. msprobe/core/compare/compare_cli.py +21 -4
  18. msprobe/core/compare/highlight.py +124 -55
  19. msprobe/core/compare/layer_mapping/__init__.py +19 -0
  20. msprobe/core/compare/layer_mapping/data_scope_parser.py +235 -0
  21. msprobe/core/compare/layer_mapping/layer_mapping.py +242 -0
  22. msprobe/core/compare/layer_mapping/postprocess_pass.py +94 -0
  23. msprobe/core/compare/npy_compare.py +52 -23
  24. msprobe/core/compare/utils.py +272 -247
  25. msprobe/core/data_dump/data_collector.py +13 -11
  26. msprobe/core/data_dump/data_processor/base.py +46 -16
  27. msprobe/core/data_dump/data_processor/mindspore_processor.py +4 -4
  28. msprobe/core/data_dump/data_processor/pytorch_processor.py +156 -59
  29. msprobe/core/data_dump/scope.py +113 -34
  30. msprobe/core/grad_probe/constant.py +27 -13
  31. msprobe/core/grad_probe/grad_compare.py +18 -1
  32. msprobe/core/grad_probe/utils.py +30 -2
  33. msprobe/core/overflow_check/abnormal_scene.py +185 -0
  34. msprobe/core/overflow_check/api_info.py +55 -0
  35. msprobe/core/overflow_check/checker.py +138 -0
  36. msprobe/core/overflow_check/filter.py +157 -0
  37. msprobe/core/overflow_check/ignore_rules.yaml +55 -0
  38. msprobe/core/overflow_check/level.py +22 -0
  39. msprobe/core/overflow_check/utils.py +28 -0
  40. msprobe/docs/01.installation.md +10 -0
  41. msprobe/docs/02.config_introduction.md +49 -22
  42. msprobe/docs/03.config_examples.md +2 -9
  43. msprobe/docs/04.kernel_dump_PyTorch.md +73 -0
  44. msprobe/docs/05.data_dump_PyTorch.md +3 -1
  45. msprobe/docs/06.data_dump_MindSpore.md +157 -90
  46. msprobe/docs/07.accuracy_checker_PyTorch.md +12 -12
  47. msprobe/docs/08.accuracy_checker_online_PyTorch.md +1 -6
  48. msprobe/docs/09.accuracy_checker_MindSpore.md +44 -8
  49. msprobe/docs/10.accuracy_compare_PyTorch.md +19 -13
  50. msprobe/docs/11.accuracy_compare_MindSpore.md +104 -13
  51. msprobe/docs/12.overflow_check_PyTorch.md +1 -1
  52. msprobe/docs/13.overflow_check_MindSpore.md +6 -6
  53. msprobe/docs/15.free_benchmarking_PyTorch.md +4 -5
  54. msprobe/docs/16.free_benchmarking_MindSpore.md +56 -37
  55. msprobe/docs/17.grad_probe.md +5 -6
  56. msprobe/docs/19.monitor.md +468 -0
  57. msprobe/docs/20.monitor_performance_baseline.md +52 -0
  58. msprobe/docs/21.visualization_PyTorch.md +386 -0
  59. msprobe/docs/22.visualization_MindSpore.md +384 -0
  60. msprobe/docs/23.tool_function_introduction.md +28 -0
  61. msprobe/docs/FAQ.md +3 -0
  62. msprobe/docs/data_dump_Mindspore/dynamic_graph_quick_start_example.md +211 -0
  63. msprobe/docs/img/compare_result.png +0 -0
  64. msprobe/docs/img/monitor/cpu_info.png +0 -0
  65. msprobe/mindspore/__init__.py +15 -0
  66. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +113 -145
  67. msprobe/mindspore/api_accuracy_checker/api_info.py +21 -6
  68. msprobe/mindspore/api_accuracy_checker/api_runner.py +43 -18
  69. msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +21 -7
  70. msprobe/mindspore/api_accuracy_checker/checker_support_api.yaml +77 -0
  71. msprobe/mindspore/api_accuracy_checker/cmd_parser.py +63 -1
  72. msprobe/mindspore/api_accuracy_checker/compute_element.py +59 -24
  73. msprobe/mindspore/api_accuracy_checker/data_manager.py +264 -0
  74. msprobe/mindspore/api_accuracy_checker/main.py +27 -3
  75. msprobe/mindspore/api_accuracy_checker/multi_api_accuracy_checker.py +206 -0
  76. msprobe/mindspore/api_accuracy_checker/multi_data_manager.py +58 -0
  77. msprobe/mindspore/api_accuracy_checker/type_mapping.py +22 -5
  78. msprobe/mindspore/api_accuracy_checker/utils.py +34 -17
  79. msprobe/mindspore/cell_processor.py +33 -12
  80. msprobe/mindspore/common/const.py +33 -13
  81. msprobe/mindspore/common/log.py +5 -9
  82. msprobe/mindspore/common/utils.py +43 -4
  83. msprobe/mindspore/compare/distributed_compare.py +22 -22
  84. msprobe/mindspore/compare/ms_compare.py +271 -248
  85. msprobe/mindspore/compare/ms_graph_compare.py +81 -47
  86. msprobe/mindspore/debugger/debugger_config.py +4 -1
  87. msprobe/mindspore/debugger/precision_debugger.py +7 -1
  88. msprobe/mindspore/dump/dump_tool_factory.py +3 -1
  89. msprobe/mindspore/dump/hook_cell/api_registry.py +12 -2
  90. msprobe/mindspore/dump/hook_cell/primitive_hooks.py +13 -16
  91. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +25 -0
  92. msprobe/mindspore/dump/jit_dump.py +17 -5
  93. msprobe/mindspore/dump/kernel_graph_dump.py +2 -4
  94. msprobe/mindspore/dump/kernel_kbyk_dump.py +2 -4
  95. msprobe/mindspore/dym_loader/hook_dynamic_loader.cc +140 -0
  96. msprobe/mindspore/dym_loader/hook_dynamic_loader.h +53 -0
  97. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +145 -39
  98. msprobe/mindspore/free_benchmark/common/handler_params.py +1 -2
  99. msprobe/mindspore/free_benchmark/common/utils.py +19 -4
  100. msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +0 -204
  101. msprobe/mindspore/free_benchmark/handler/base_handler.py +3 -3
  102. msprobe/mindspore/free_benchmark/handler/check_handler.py +4 -5
  103. msprobe/mindspore/free_benchmark/handler/fix_handler.py +4 -4
  104. msprobe/mindspore/free_benchmark/handler/handler_factory.py +4 -4
  105. msprobe/mindspore/free_benchmark/perturbation/add_noise.py +2 -2
  106. msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +15 -6
  107. msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +4 -4
  108. msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +2 -2
  109. msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +13 -6
  110. msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +2 -2
  111. msprobe/mindspore/free_benchmark/self_check_tool_factory.py +2 -2
  112. msprobe/mindspore/grad_probe/global_context.py +28 -8
  113. msprobe/mindspore/grad_probe/grad_analyzer.py +27 -13
  114. msprobe/mindspore/grad_probe/grad_monitor.py +16 -1
  115. msprobe/mindspore/grad_probe/grad_stat_csv.py +33 -5
  116. msprobe/mindspore/grad_probe/hook.py +24 -10
  117. msprobe/mindspore/grad_probe/utils.py +18 -5
  118. msprobe/mindspore/ms_config.py +22 -15
  119. msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +2 -4
  120. msprobe/mindspore/runtime.py +15 -0
  121. msprobe/mindspore/service.py +36 -30
  122. msprobe/mindspore/task_handler_factory.py +15 -0
  123. msprobe/msprobe.py +24 -7
  124. msprobe/pytorch/__init__.py +3 -2
  125. msprobe/pytorch/api_accuracy_checker/common/config.py +62 -0
  126. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +3 -4
  127. msprobe/pytorch/api_accuracy_checker/generate_op_script/config_op.json +9 -0
  128. msprobe/pytorch/api_accuracy_checker/generate_op_script/op_generator.py +454 -0
  129. msprobe/pytorch/api_accuracy_checker/generate_op_script/operator_replication.template +365 -0
  130. msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +6 -1
  131. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +19 -14
  132. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +13 -9
  133. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +77 -53
  134. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +15 -4
  135. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +9 -24
  136. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +4 -12
  137. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/dump_dispatch.py +9 -4
  138. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +3 -11
  139. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/utils.py +2 -2
  140. msprobe/pytorch/bench_functions/confusion_transpose.py +5 -1
  141. msprobe/pytorch/bench_functions/matmul_backward.py +12 -0
  142. msprobe/pytorch/bench_functions/npu_fusion_attention.py +100 -6
  143. msprobe/pytorch/bench_functions/rotary_mul.py +4 -0
  144. msprobe/pytorch/bench_functions/swiglu.py +10 -2
  145. msprobe/pytorch/common/parse_json.py +6 -6
  146. msprobe/pytorch/common/utils.py +56 -5
  147. msprobe/pytorch/compare/distributed_compare.py +8 -9
  148. msprobe/pytorch/compare/pt_compare.py +8 -6
  149. msprobe/pytorch/debugger/debugger_config.py +19 -15
  150. msprobe/pytorch/dump/kernel_dump/kernel_config.py +33 -0
  151. msprobe/pytorch/free_benchmark/common/constant.py +15 -0
  152. msprobe/pytorch/free_benchmark/common/counter.py +15 -0
  153. msprobe/pytorch/free_benchmark/common/enums.py +15 -0
  154. msprobe/pytorch/free_benchmark/common/params.py +8 -1
  155. msprobe/pytorch/free_benchmark/common/utils.py +26 -4
  156. msprobe/pytorch/free_benchmark/compare/grad_saver.py +20 -3
  157. msprobe/pytorch/free_benchmark/compare/single_benchmark.py +2 -0
  158. msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +3 -1
  159. msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +6 -4
  160. msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +2 -0
  161. msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +4 -0
  162. msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +10 -0
  163. msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +6 -5
  164. msprobe/pytorch/grad_probe/grad_monitor.py +23 -6
  165. msprobe/pytorch/grad_probe/grad_stat_csv.py +40 -10
  166. msprobe/pytorch/hook_module/support_wrap_ops.yaml +1 -0
  167. msprobe/pytorch/hook_module/wrap_functional.py +14 -12
  168. msprobe/pytorch/module_processer.py +2 -5
  169. msprobe/pytorch/monitor/anomaly_analyse.py +201 -0
  170. msprobe/pytorch/monitor/anomaly_detect.py +340 -0
  171. msprobe/pytorch/monitor/distributed/__init__.py +0 -0
  172. msprobe/pytorch/monitor/distributed/distributed_ops.yaml +19 -0
  173. msprobe/pytorch/monitor/distributed/stack_blacklist.yaml +5 -0
  174. msprobe/pytorch/monitor/distributed/wrap_distributed.py +272 -0
  175. msprobe/pytorch/monitor/features.py +108 -0
  176. msprobe/pytorch/monitor/module_hook.py +870 -0
  177. msprobe/pytorch/monitor/module_metric.py +193 -0
  178. msprobe/pytorch/monitor/module_spec_verifier.py +93 -0
  179. msprobe/pytorch/monitor/optimizer_collect.py +295 -0
  180. msprobe/pytorch/monitor/unittest/__init__.py +0 -0
  181. msprobe/pytorch/monitor/unittest/test_monitor.py +145 -0
  182. msprobe/pytorch/monitor/utils.py +250 -0
  183. msprobe/pytorch/monitor/visualizer.py +59 -0
  184. msprobe/pytorch/online_dispatch/__init__.py +2 -3
  185. msprobe/pytorch/online_dispatch/compare.py +29 -38
  186. msprobe/pytorch/online_dispatch/dispatch.py +50 -25
  187. msprobe/pytorch/online_dispatch/dump_compare.py +21 -9
  188. msprobe/pytorch/online_dispatch/single_compare.py +53 -32
  189. msprobe/pytorch/online_dispatch/torch_ops_config.yaml +1 -1
  190. msprobe/pytorch/online_dispatch/utils.py +49 -21
  191. msprobe/pytorch/parse_tool/lib/compare.py +12 -18
  192. msprobe/pytorch/parse_tool/lib/config.py +1 -1
  193. msprobe/pytorch/parse_tool/lib/parse_tool.py +1 -2
  194. msprobe/pytorch/parse_tool/lib/utils.py +16 -35
  195. msprobe/pytorch/parse_tool/lib/visualization.py +2 -0
  196. msprobe/pytorch/pt_config.py +31 -8
  197. msprobe/pytorch/service.py +15 -5
  198. msprobe/visualization/__init__.py +14 -0
  199. msprobe/visualization/builder/__init__.py +14 -0
  200. msprobe/visualization/builder/graph_builder.py +165 -0
  201. msprobe/visualization/builder/msprobe_adapter.py +205 -0
  202. msprobe/visualization/compare/__init__.py +14 -0
  203. msprobe/visualization/compare/graph_comparator.py +130 -0
  204. msprobe/visualization/compare/mode_adapter.py +211 -0
  205. msprobe/visualization/graph/__init__.py +14 -0
  206. msprobe/visualization/graph/base_node.py +124 -0
  207. msprobe/visualization/graph/graph.py +200 -0
  208. msprobe/visualization/graph/node_colors.py +95 -0
  209. msprobe/visualization/graph/node_op.py +39 -0
  210. msprobe/visualization/graph_service.py +214 -0
  211. msprobe/visualization/utils.py +232 -0
  212. mindstudio_probe-1.1.0.dist-info/RECORD +0 -287
  213. msprobe/docs/04.acl_config_examples.md +0 -78
  214. msprobe/mindspore/compare/layer_mapping.py +0 -146
  215. msprobe/mindspore/compare/modify_mapping.py +0 -107
  216. msprobe/mindspore/free_benchmark/decorator/dec_forward.py +0 -57
  217. msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +0 -122
  218. {mindstudio_probe-1.1.0.dist-info → mindstudio_probe-1.1.1.dist-info}/LICENSE +0 -0
  219. {mindstudio_probe-1.1.0.dist-info → mindstudio_probe-1.1.1.dist-info}/top_level.txt +0 -0
  220. /msprobe/{mindspore/free_benchmark/decorator → pytorch/monitor}/__init__.py +0 -0
@@ -0,0 +1,272 @@
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ import re
18
+ import inspect
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.distributed as dist
23
+
24
+ from msprobe.core.common.file_utils import load_yaml
25
+ from msprobe.core.common.const import MonitorConst
26
+ from msprobe.pytorch.monitor.module_metric import get_metrics, get_summary_writer_tag_name
27
+
28
+ try:
29
+ import torch_npu
30
+ except ImportError:
31
+ pass
32
+
33
+ RANK = None
34
+
35
+ OpsPath = os.path.join(os.path.dirname(__file__), "distributed_ops.yaml")
36
+ WrapDistributedOps = load_yaml(OpsPath).get("distributed", [])
37
+
38
+ StackBlackListPath = os.path.join(os.path.dirname(__file__), "stack_blacklist.yaml")
39
+ StackBlackList = load_yaml(StackBlackListPath).get("stack", [])
40
+
41
+ distributed_func = {}
42
+ for f in dir(dist):
43
+ distributed_func[f] = getattr(dist, f)
44
+
45
+ ORIGIN_WAIT = getattr(dist.Work, 'wait')
46
+ PENDING_ASYNC_CC_BY_HANDLE = {}
47
+
48
+
49
+ def get_distributed_ops():
50
+ global WrapDistributedOps
51
+ _all_distributed_ops = dir(dist)
52
+ return set(WrapDistributedOps) & set(_all_distributed_ops)
53
+
54
+
55
+ class DistributedOPTemplate(nn.Module):
56
+ def __init__(self, op_name, pre_hooks, post_hooks):
57
+ super(DistributedOPTemplate, self).__init__()
58
+ self.op_name_ = str(op_name)
59
+ self.__name__ = self.op_name_
60
+ for pre_hook in pre_hooks:
61
+ self.register_forward_pre_hook(pre_hook, with_kwargs=True)
62
+ for hook in post_hooks:
63
+ self.register_forward_hook(hook, with_kwargs=True)
64
+
65
+ def forward(self, *args, **kwargs):
66
+ return distributed_func.get(self.op_name_)(*args, **kwargs)
67
+
68
+
69
+ class ApiRegistry:
70
+ def __init__(self):
71
+ self.distributed_attr_origin = {}
72
+ self.distributed_attr_hooked = {}
73
+
74
+ @staticmethod
75
+ def store_ori_attr(ori_api_group, api_list, api_ori_attr):
76
+ for api in api_list:
77
+ if '.' in api:
78
+ sub_module_name, sub_op = api.rsplit('.', 1)
79
+ sub_module = getattr(ori_api_group, sub_module_name)
80
+ api_ori_attr[api] = getattr(sub_module, sub_op)
81
+ else:
82
+ api_ori_attr[api] = getattr(ori_api_group, api)
83
+
84
+ @staticmethod
85
+ def set_api_attr(api_group, attr_dict):
86
+ for cc_api_name, cc_api_entry_func in attr_dict.items():
87
+ if '.' in cc_api_name:
88
+ sub_module_name, sub_op = cc_api_name.rsplit('.', 1)
89
+ sub_module = getattr(api_group, sub_module_name, None)
90
+ if sub_module is not None:
91
+ setattr(sub_module, sub_op, cc_api_entry_func)
92
+ else:
93
+ setattr(api_group, cc_api_name, cc_api_entry_func)
94
+
95
+ @staticmethod
96
+ def redirect_wait():
97
+ global ORIGIN_WAIT
98
+ global PENDING_ASYNC_CC_BY_HANDLE
99
+
100
+ def wrapped_wait(work):
101
+ def wrapped_wait(*args, **kwargs):
102
+ ORIGIN_WAIT(*args, **kwargs)
103
+ if args[0] in PENDING_ASYNC_CC_BY_HANDLE:
104
+ store_func = PENDING_ASYNC_CC_BY_HANDLE.pop(args[0])
105
+ store_func()
106
+
107
+ return wrapped_wait
108
+
109
+ dist.Work.wait = wrapped_wait(dist.Work)
110
+
111
+ def redirect_api(self):
112
+ self.set_api_attr(dist, self.distributed_attr_hooked)
113
+ self.set_api_attr(dist.distributed_c10d, self.distributed_attr_hooked)
114
+ self.redirect_wait()
115
+
116
+ def restore_api(self):
117
+ self.set_api_attr(dist, self.distributed_attr_origin)
118
+ self.set_api_attr(dist.distributed_c10d, self.distributed_attr_origin)
119
+ setattr(dist.Work, 'wait', ORIGIN_WAIT)
120
+
121
+ def initialize_hook(self, pre_hooks, post_hooks):
122
+ self.store_ori_attr(dist, get_distributed_ops(), self.distributed_attr_origin)
123
+ for op_name in get_distributed_ops():
124
+ self.distributed_attr_hooked[op_name] = DistributedOPTemplate(op_name, pre_hooks, post_hooks)
125
+
126
+
127
+ def get_process_group(process_group):
128
+ return (
129
+ process_group
130
+ if isinstance(process_group, dist.ProcessGroup)
131
+ else dist.GroupMember.WORLD
132
+ )
133
+
134
+
135
+ def stack_filter(stack):
136
+ for pattern in StackBlackList:
137
+ if re.search(pattern, stack):
138
+ return False
139
+ return True
140
+
141
+
142
+ def get_callstack():
143
+ callstack = []
144
+ for (_, path, line, func, _, _) in inspect.stack():
145
+ stack_line = f'{path}[{line}]'
146
+ if stack_filter(stack_line):
147
+ callstack.append(stack_line + ' ' + func)
148
+ return callstack
149
+
150
+
151
+ @torch.no_grad()
152
+ def op_aggregate(op, tensorlist):
153
+ if isinstance(tensorlist, torch.Tensor):
154
+ return tensorlist
155
+ if not tensorlist:
156
+ return torch.tensor(torch.nan)
157
+ if op == 'min':
158
+ return min(tensorlist)
159
+ if op == 'max':
160
+ return max(tensorlist)
161
+ if op == 'norm':
162
+ return sum(tensorlist)
163
+ if op == 'zeros':
164
+ return sum(tensorlist) / len(tensorlist)
165
+ if op == 'nans':
166
+ return sum(tensorlist)
167
+ if op == 'mean':
168
+ return sum(tensorlist) / len(tensorlist)
169
+ return torch.tensor(torch.nan)
170
+
171
+
172
+ def update_data(old, new):
173
+ for tag, op2tensor in new.items():
174
+ if tag not in old:
175
+ old[tag] = {}
176
+ for op, tensor in op2tensor.items():
177
+ if op not in old[tag]:
178
+ old[tag][op] = [tensor]
179
+ else:
180
+ old[tag][op].append(tensor)
181
+ return old
182
+
183
+
184
+ def is_target_line(codeline):
185
+ stack = get_callstack()
186
+ whole_stack = ';'.join(stack)
187
+ if codeline == []:
188
+ return True
189
+ for pattern in codeline:
190
+ if re.search(pattern, whole_stack):
191
+ return True
192
+ return False
193
+
194
+
195
+ @torch.no_grad()
196
+ def catch_data(cc_context, cc_name, ops, args, prefix):
197
+ tensor_args = {}
198
+ for arg in args:
199
+ if isinstance(arg, torch.Tensor):
200
+ key = get_summary_writer_tag_name(cc_name, f'{prefix}_{len(tensor_args)}', RANK)
201
+ tensor_args[key] = arg
202
+ elif isinstance(arg, list):
203
+ if isinstance(arg[0], torch.Tensor):
204
+ stacked_arg = torch.stack(arg)
205
+ elif isinstance(arg[0], dist.P2POp):
206
+ stacked_arg = torch.stack([op.tensor for op in arg])
207
+ key = get_summary_writer_tag_name(cc_name, f'{prefix}_{len(tensor_args)}', RANK)
208
+ tensor_args[key] = stacked_arg
209
+
210
+ new_data = get_metrics(ops, tensor_args, 1e-8)
211
+ cc_context.data = update_data(cc_context.data, new_data)
212
+
213
+
214
+ def create_async_callback_func(context, cc_name, ops, args, prefix):
215
+ def store_data():
216
+ catch_data(context, cc_name, ops, args, prefix)
217
+
218
+ return store_data
219
+
220
+
221
+ def create_hooks(context, monitor):
222
+ def cc_log_hook(module, args, kwargs):
223
+ stack = ';'.join(get_callstack())
224
+ monitor.cc_logged_stack[module.op_name_].add(stack)
225
+ return
226
+
227
+ def cc_pre_hook(module, args, kwargs):
228
+ if not is_target_line(monitor.cc_codeline):
229
+ return
230
+ args = args + tuple(kwargs.values())
231
+ catch_data(context[module.op_name_], module.op_name_, monitor.ops, args, MonitorConst.PREFIX_PRE)
232
+ return
233
+
234
+ def cc_hook(module, args, kwargs, out=None):
235
+ if not is_target_line(monitor.cc_codeline):
236
+ return out
237
+ args = args + tuple(kwargs.values())
238
+ if out: # async
239
+ if isinstance(out, dist.Work):
240
+ PENDING_ASYNC_CC_BY_HANDLE[out] = create_async_callback_func(context[module.op_name_],
241
+ module.op_name_,
242
+ monitor.ops, args, MonitorConst.PREFIX_POST)
243
+ elif isinstance(out, list): # batch_isend_irecv
244
+ for out_element in out:
245
+ PENDING_ASYNC_CC_BY_HANDLE[out_element] = create_async_callback_func(context[module.op_name_],
246
+ module.op_name_,
247
+ monitor.ops, args,
248
+ MonitorConst.PREFIX_POST)
249
+ return out
250
+ catch_data(context[module.op_name_], module.op_name_, monitor.ops, args, MonitorConst.PREFIX_POST)
251
+ return out
252
+
253
+ global RANK
254
+ pre_hooks = []
255
+ hooks = []
256
+ RANK = dist.get_rank()
257
+ if dist.is_initialized() and RANK not in monitor.module_rank_list and monitor.module_rank_list != []:
258
+ return [pre_hooks, hooks]
259
+
260
+ if monitor.cc_log_only:
261
+ pre_hooks.append(cc_log_hook)
262
+ return [pre_hooks, hooks]
263
+
264
+ if monitor.cc_pre_hook:
265
+ pre_hooks.append(cc_pre_hook)
266
+
267
+ hooks.append(cc_hook)
268
+
269
+ return [pre_hooks, hooks]
270
+
271
+
272
+ api_register = ApiRegistry()
@@ -0,0 +1,108 @@
1
+ # Copyright (c) 2024-2024, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import torch
17
+ from torch.autograd.functional import jacobian
18
+ from msprobe.core.common.log import logger
19
+
20
+
21
+ @torch.no_grad()
22
+ def square_sum(x: torch.tensor):
23
+ return (x * x).sum()
24
+
25
+
26
+ @torch.no_grad()
27
+ def get_min(x: torch.tensor):
28
+ return torch.min(x)
29
+
30
+
31
+ @torch.no_grad()
32
+ def get_mean(x: torch.tensor):
33
+ return torch.mean(x)
34
+
35
+
36
+ @torch.no_grad()
37
+ def get_norm(x: torch.tensor):
38
+ return torch.norm(x, p=2)
39
+
40
+
41
+ @torch.no_grad()
42
+ def get_max(x: torch.tensor):
43
+ return torch.max(x)
44
+
45
+
46
+ @torch.no_grad()
47
+ def get_zeros(x: torch.tensor, eps: float):
48
+ return torch.sum(torch.abs(x) < eps) / x.numel()
49
+
50
+
51
+ @torch.no_grad()
52
+ def get_sign_matches(x: torch.tensor, y: torch.tensor):
53
+ xs = x.sign()
54
+ ys = y.sign()
55
+ try:
56
+ same_direction_ratio = ((xs * ys).sum() / ys.numel() + 1) / 2
57
+ except RuntimeError as e:
58
+ logger.info(f"RuntimeError: {e}")
59
+ same_direction_ratio = torch.tensor(0.)
60
+ return same_direction_ratio
61
+
62
+
63
+ @torch.no_grad()
64
+ def eff_rank(param: torch.tensor, threshold=1e-10):
65
+ U, S, Vh = torch.linalg.svd(param.float())
66
+ rank = torch.sum(S > threshold)
67
+ return rank
68
+
69
+
70
+ # modular neural tangent kernel
71
+ @torch.no_grad()
72
+ def mNTK(module: torch.nn.Module, x: torch.tensor):
73
+ J_theta_l = jacobian(module, x)
74
+ mntk = torch.matmul(J_theta_l, J_theta_l.t())
75
+ return mntk
76
+
77
+
78
+ @torch.no_grad()
79
+ def power_iteration(a, num_iterations):
80
+ b = torch.randn(a.size(1), 1)
81
+ for _ in range(num_iterations):
82
+ b = torch.matmul(a, b)
83
+ b_norm = torch.norm(b)
84
+ b = b / b_norm if b_norm != 0 else 0
85
+ eigval = torch.matmul(torch.matmul(b.t(), a), b)
86
+ return eigval
87
+
88
+
89
+ @torch.no_grad()
90
+ def lambda_max_subsample(module: torch.nn.Module, x: torch.tensor, num_iterations=100, subsample_size=None):
91
+ mntk = mNTK(module, x)
92
+ if subsample_size is None:
93
+ subsample_size = min(mntk.size(0), mntk.size(1))
94
+ idx = torch.randperm(mntk.size(0))[:subsample_size]
95
+ subsampled = mntk[idx, :]
96
+ subsampled = subsampled[:, idx]
97
+ eigval = power_iteration(subsampled, num_iterations)
98
+ return eigval
99
+
100
+
101
+ @torch.no_grad()
102
+ def cal_histc(tensor_cal, bins_total, min_val, max_val):
103
+ return torch.histc(tensor_cal, bins=bins_total, min=min_val, max=max_val)
104
+
105
+
106
+ @torch.no_grad()
107
+ def get_nans(t):
108
+ return torch.isnan(t).sum()