mindstudio-probe 1.2.1__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (177) hide show
  1. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/METADATA +3 -3
  2. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/RECORD +168 -150
  3. msprobe/README.md +27 -22
  4. msprobe/core/common/const.py +129 -60
  5. msprobe/core/common/decorator.py +50 -0
  6. msprobe/core/common/exceptions.py +3 -1
  7. msprobe/core/common/file_utils.py +25 -2
  8. msprobe/core/common/inplace_ops.yaml +1 -0
  9. msprobe/core/common/utils.py +43 -33
  10. msprobe/core/compare/acc_compare.py +43 -74
  11. msprobe/core/compare/check.py +2 -6
  12. msprobe/core/compare/highlight.py +2 -0
  13. msprobe/core/compare/layer_mapping/data_scope_parser.py +1 -1
  14. msprobe/core/compare/layer_mapping/layer_mapping.py +2 -1
  15. msprobe/core/compare/merge_result/merge_result.py +16 -9
  16. msprobe/core/compare/merge_result/utils.py +81 -0
  17. msprobe/core/compare/multiprocessing_compute.py +19 -12
  18. msprobe/core/compare/npy_compare.py +30 -12
  19. msprobe/core/compare/utils.py +30 -10
  20. msprobe/core/data_dump/api_registry.py +176 -0
  21. msprobe/core/data_dump/data_collector.py +58 -13
  22. msprobe/core/data_dump/data_processor/base.py +94 -10
  23. msprobe/core/data_dump/data_processor/factory.py +3 -0
  24. msprobe/core/data_dump/data_processor/mindspore_processor.py +33 -33
  25. msprobe/core/data_dump/data_processor/pytorch_processor.py +99 -18
  26. msprobe/core/data_dump/json_writer.py +61 -40
  27. msprobe/core/grad_probe/constant.py +1 -0
  28. msprobe/core/grad_probe/grad_compare.py +1 -1
  29. msprobe/core/overflow_check/abnormal_scene.py +2 -0
  30. msprobe/docs/01.installation.md +27 -1
  31. msprobe/docs/02.config_introduction.md +27 -23
  32. msprobe/docs/03.config_examples.md +24 -0
  33. msprobe/docs/05.data_dump_PyTorch.md +103 -16
  34. msprobe/docs/06.data_dump_MindSpore.md +76 -32
  35. msprobe/docs/07.accuracy_checker_PyTorch.md +11 -1
  36. msprobe/docs/08.accuracy_checker_online_PyTorch.md +3 -1
  37. msprobe/docs/09.accuracy_checker_MindSpore.md +5 -3
  38. msprobe/docs/10.accuracy_compare_PyTorch.md +59 -33
  39. msprobe/docs/11.accuracy_compare_MindSpore.md +40 -16
  40. msprobe/docs/12.overflow_check_PyTorch.md +3 -1
  41. msprobe/docs/13.overflow_check_MindSpore.md +4 -2
  42. msprobe/docs/14.data_parse_PyTorch.md +1 -7
  43. msprobe/docs/18.online_dispatch.md +1 -1
  44. msprobe/docs/19.monitor.md +332 -273
  45. msprobe/docs/21.visualization_PyTorch.md +42 -13
  46. msprobe/docs/22.visualization_MindSpore.md +43 -13
  47. msprobe/docs/23.generate_operator_PyTorch.md +9 -9
  48. msprobe/docs/27.dump_json_instruction.md +301 -27
  49. msprobe/docs/28.debugger_save_instruction.md +94 -0
  50. msprobe/docs/28.kernel_dump_MindSpore.md +69 -0
  51. msprobe/docs/29.data_dump_MSAdapter.md +229 -0
  52. msprobe/docs/30.overflow_check_MSAdapter.md +31 -0
  53. msprobe/docs/FAQ.md +3 -11
  54. msprobe/docs/img/compare_result.png +0 -0
  55. msprobe/docs/img/merge_result.png +0 -0
  56. msprobe/docs/img/monitor/step_count_per_record.png +0 -0
  57. msprobe/docs/img/visualization/vis_browser_1.png +0 -0
  58. msprobe/docs/img/visualization/vis_match_info.png +0 -0
  59. msprobe/docs/img/visualization/vis_precision_info.png +0 -0
  60. msprobe/docs/img/visualization/vis_search_info.png +0 -0
  61. msprobe/docs/img/visualization/vis_show_info.png +0 -0
  62. msprobe/docs/img/visualization/vis_showcase.png +0 -0
  63. msprobe/docs/img/visualization/vis_unmatch_info.png +0 -0
  64. msprobe/mindspore/__init__.py +4 -2
  65. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +32 -7
  66. msprobe/mindspore/api_accuracy_checker/api_runner.py +70 -22
  67. msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +2 -1
  68. msprobe/mindspore/api_accuracy_checker/bench_functions/flash_attention_score.py +602 -0
  69. msprobe/mindspore/api_accuracy_checker/bench_functions/fusion_operator.py +41 -0
  70. msprobe/mindspore/api_accuracy_checker/compute_element.py +47 -1
  71. msprobe/mindspore/api_accuracy_checker/data_manager.py +2 -1
  72. msprobe/mindspore/api_accuracy_checker/multi_api_accuracy_checker.py +2 -1
  73. msprobe/mindspore/api_accuracy_checker/torch_mindtorch_importer.py +130 -0
  74. msprobe/mindspore/api_accuracy_checker/type_mapping.py +24 -1
  75. msprobe/mindspore/api_accuracy_checker/utils.py +6 -1
  76. msprobe/mindspore/common/const.py +61 -0
  77. msprobe/mindspore/common/utils.py +48 -18
  78. msprobe/mindspore/compare/ms_compare.py +27 -19
  79. msprobe/mindspore/compare/ms_graph_compare.py +6 -5
  80. msprobe/mindspore/debugger/debugger_config.py +31 -6
  81. msprobe/mindspore/debugger/precision_debugger.py +45 -14
  82. msprobe/mindspore/dump/dump_tool_factory.py +5 -3
  83. msprobe/mindspore/dump/hook_cell/api_register.py +142 -0
  84. msprobe/mindspore/dump/hook_cell/hook_cell.py +9 -10
  85. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +24 -26
  86. msprobe/mindspore/dump/jit_dump.py +21 -15
  87. msprobe/mindspore/dym_loader/hook_dynamic_loader.cc +22 -56
  88. msprobe/mindspore/dym_loader/hook_dynamic_loader.h +0 -1
  89. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +10 -6
  90. msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +4 -2
  91. msprobe/mindspore/free_benchmark/self_check_tool_factory.py +6 -3
  92. msprobe/mindspore/grad_probe/global_context.py +2 -0
  93. msprobe/mindspore/grad_probe/grad_analyzer.py +2 -1
  94. msprobe/mindspore/grad_probe/hook.py +2 -4
  95. msprobe/mindspore/monitor/anomaly_detect.py +404 -0
  96. msprobe/mindspore/monitor/distributed/__init__.py +0 -0
  97. msprobe/mindspore/monitor/distributed/distributed_ops.yaml +15 -0
  98. msprobe/mindspore/monitor/distributed/stack_blacklist.yaml +5 -0
  99. msprobe/mindspore/monitor/distributed/wrap_distributed.py +300 -0
  100. msprobe/mindspore/monitor/features.py +63 -0
  101. msprobe/mindspore/monitor/module_hook.py +873 -0
  102. msprobe/mindspore/monitor/module_spec_verifier.py +94 -0
  103. msprobe/mindspore/monitor/utils.py +309 -0
  104. msprobe/mindspore/ms_config.py +8 -2
  105. msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +5 -3
  106. msprobe/mindspore/service.py +114 -34
  107. msprobe/pytorch/__init__.py +0 -1
  108. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +3 -6
  109. msprobe/pytorch/api_accuracy_checker/generate_op_script/op_generator.py +12 -7
  110. msprobe/pytorch/api_accuracy_checker/generate_op_script/operator_replication.template +2 -2
  111. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +4 -5
  112. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +5 -5
  113. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +25 -6
  114. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +28 -19
  115. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +3 -1
  116. msprobe/pytorch/bench_functions/apply_adam.py +215 -0
  117. msprobe/pytorch/bench_functions/group_norm_silu.py +27 -0
  118. msprobe/pytorch/{parse.py → bench_functions/mish.py} +6 -4
  119. msprobe/pytorch/bench_functions/moe_gating_top_k_softmax.py +50 -0
  120. msprobe/pytorch/bench_functions/sort_v2.py +21 -0
  121. msprobe/pytorch/common/utils.py +97 -4
  122. msprobe/pytorch/debugger/debugger_config.py +19 -9
  123. msprobe/pytorch/debugger/precision_debugger.py +24 -1
  124. msprobe/pytorch/dump/module_dump/module_dump.py +4 -3
  125. msprobe/pytorch/dump/module_dump/module_processer.py +21 -35
  126. msprobe/pytorch/free_benchmark/common/utils.py +1 -1
  127. msprobe/pytorch/free_benchmark/compare/single_benchmark.py +1 -1
  128. msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +3 -3
  129. msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +3 -3
  130. msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +1 -1
  131. msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +1 -1
  132. msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +1 -1
  133. msprobe/pytorch/function_factory.py +8 -2
  134. msprobe/pytorch/grad_probe/grad_monitor.py +2 -2
  135. msprobe/pytorch/hook_module/api_register.py +131 -0
  136. msprobe/pytorch/hook_module/hook_module.py +19 -14
  137. msprobe/pytorch/hook_module/register_optimizer_hook.py +2 -1
  138. msprobe/pytorch/hook_module/support_wrap_ops.yaml +173 -75
  139. msprobe/pytorch/monitor/anomaly_detect.py +14 -29
  140. msprobe/pytorch/monitor/csv2tb.py +18 -14
  141. msprobe/pytorch/monitor/distributed/wrap_distributed.py +8 -2
  142. msprobe/pytorch/monitor/module_hook.py +238 -193
  143. msprobe/pytorch/monitor/module_metric.py +9 -6
  144. msprobe/pytorch/monitor/optimizer_collect.py +100 -67
  145. msprobe/pytorch/monitor/unittest/test_monitor.py +1 -1
  146. msprobe/pytorch/monitor/utils.py +76 -44
  147. msprobe/pytorch/online_dispatch/compare.py +0 -2
  148. msprobe/pytorch/online_dispatch/dispatch.py +9 -0
  149. msprobe/pytorch/online_dispatch/dump_compare.py +3 -0
  150. msprobe/pytorch/online_dispatch/utils.py +3 -0
  151. msprobe/pytorch/parse_tool/lib/interactive_cli.py +1 -6
  152. msprobe/pytorch/parse_tool/lib/utils.py +2 -1
  153. msprobe/pytorch/pt_config.py +30 -29
  154. msprobe/pytorch/service.py +114 -32
  155. msprobe/visualization/builder/graph_builder.py +75 -10
  156. msprobe/visualization/builder/msprobe_adapter.py +7 -6
  157. msprobe/visualization/compare/graph_comparator.py +42 -38
  158. msprobe/visualization/compare/mode_adapter.py +0 -19
  159. msprobe/visualization/graph/base_node.py +11 -3
  160. msprobe/visualization/graph/distributed_analyzer.py +71 -3
  161. msprobe/visualization/graph/graph.py +0 -11
  162. msprobe/visualization/graph/node_op.py +4 -3
  163. msprobe/visualization/graph_service.py +4 -5
  164. msprobe/visualization/utils.py +12 -35
  165. msprobe/mindspore/dump/hook_cell/api_registry.py +0 -205
  166. msprobe/mindspore/dump/hook_cell/wrap_api.py +0 -212
  167. msprobe/pytorch/hook_module/api_registry.py +0 -166
  168. msprobe/pytorch/hook_module/wrap_distributed.py +0 -75
  169. msprobe/pytorch/hook_module/wrap_functional.py +0 -66
  170. msprobe/pytorch/hook_module/wrap_npu_custom.py +0 -85
  171. msprobe/pytorch/hook_module/wrap_tensor.py +0 -69
  172. msprobe/pytorch/hook_module/wrap_torch.py +0 -84
  173. msprobe/pytorch/hook_module/wrap_vf.py +0 -60
  174. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/LICENSE +0 -0
  175. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/WHEEL +0 -0
  176. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/entry_points.txt +0 -0
  177. {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,300 @@
1
+ # Copyright (c) 2024-2025, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import inspect
17
+ import os
18
+ import re
19
+
20
+ import numpy as np
21
+
22
+ from mindspore import nn, Tensor, ops, _no_grad
23
+ from mindspore import communication
24
+ from mindspore.communication import comm_func, get_rank
25
+
26
+ from msprobe.core.common.const import MonitorConst, Const
27
+ from msprobe.core.common.file_utils import load_yaml
28
+ from msprobe.mindspore.monitor.utils import get_metrics, get_summary_writer_tag_name
29
+
30
+ enable_communication = True
31
+ try:
32
+ from mindspore._c_expression import CommHandle as CommHandle_
33
+ except ImportError:
34
+ enable_communication = False
35
+
36
+
37
+ RANK = None
38
+
39
+ OpsPath = os.path.join(os.path.dirname(__file__), "distributed_ops.yaml")
40
+ WrapDistributedOps = load_yaml(OpsPath).get("communication.comm_func", [])
41
+
42
+ StackBlackListPath = os.path.join(os.path.dirname(__file__), "stack_blacklist.yaml")
43
+ StackBlackList = load_yaml(StackBlackListPath).get("stack", [])
44
+
45
+ distributed_func = {}
46
+ for f in dir(comm_func):
47
+ distributed_func[f] = getattr(comm_func, f)
48
+
49
+ ORIGIN_WAIT = CommHandle_.wait if enable_communication else None
50
+ PENDING_ASYNC_CC_BY_HANDLE = {}
51
+
52
+
53
+ def get_distributed_ops():
54
+ global WrapDistributedOps
55
+ _all_distributed_ops = dir(comm_func)
56
+ return set(WrapDistributedOps) & set(_all_distributed_ops)
57
+
58
+
59
+ class DistributedOPTemplate(nn.Cell):
60
+ def __init__(self, op_name, pre_hooks, post_hooks):
61
+ super(DistributedOPTemplate, self).__init__()
62
+ self.op_name_ = str(op_name)
63
+ self.__name__ = self.op_name_
64
+ self.cc_hooks = []
65
+ for pre_hook in pre_hooks:
66
+ handle = self.register_forward_pre_hook(pre_hook)
67
+ self.cc_hooks.append(handle)
68
+ for hook in post_hooks:
69
+ handle = self.register_forward_hook(hook)
70
+ self.cc_hooks.append(handle)
71
+
72
+ def construct(self, *args, **kwargs):
73
+ return distributed_func.get(self.op_name_)(*args, **kwargs)
74
+
75
+ def forward(self, *args, **kwargs):
76
+ return distributed_func.get(self.op_name_)(*args, **kwargs)
77
+
78
+
79
+ class ApiRegistry:
80
+ def __init__(self):
81
+ self.distributed_attr_origin = {}
82
+ self.distributed_attr_hooked = {}
83
+
84
+ @staticmethod
85
+ def store_ori_attr(ori_api_group, api_list, api_ori_attr):
86
+ for api in api_list:
87
+ if Const.SEP in api:
88
+ sub_module_name, sub_op = api.rsplit(Const.SEP, 1)
89
+ sub_module = getattr(ori_api_group, sub_module_name)
90
+ api_ori_attr[api] = getattr(sub_module, sub_op)
91
+ else:
92
+ api_ori_attr[api] = getattr(ori_api_group, api)
93
+
94
+ @staticmethod
95
+ def set_api_attr(api_group, attr_dict):
96
+ for cc_api_name, cc_api_entry_func in attr_dict.items():
97
+ if Const.SEP in cc_api_name:
98
+ sub_module_name, sub_op = cc_api_name.rsplit(Const.SEP, 1)
99
+ sub_module = getattr(api_group, sub_module_name, None)
100
+ if sub_module is not None:
101
+ setattr(sub_module, sub_op, cc_api_entry_func)
102
+ else:
103
+ setattr(api_group, cc_api_name, cc_api_entry_func)
104
+
105
+ @staticmethod
106
+ def redirect_wait():
107
+ global ORIGIN_WAIT
108
+ global PENDING_ASYNC_CC_BY_HANDLE
109
+ if not ORIGIN_WAIT:
110
+ return
111
+
112
+ def wrapped_wait(work):
113
+ def wrapped_wait(*args, **kwargs):
114
+ ORIGIN_WAIT(*args, **kwargs)
115
+ if args[0] in PENDING_ASYNC_CC_BY_HANDLE:
116
+ store_func = PENDING_ASYNC_CC_BY_HANDLE.pop(args[0])
117
+ store_func()
118
+
119
+ return wrapped_wait
120
+
121
+ CommHandle_.wait = wrapped_wait(CommHandle_)
122
+
123
+ def redirect_api(self):
124
+ self.set_api_attr(comm_func, self.distributed_attr_hooked)
125
+ self.redirect_wait()
126
+
127
+ def restore_api(self):
128
+ if not ORIGIN_WAIT:
129
+ return
130
+ self.set_api_attr(comm_func, self.distributed_attr_origin)
131
+ setattr(CommHandle_, 'wait', ORIGIN_WAIT)
132
+
133
+ def initialize_hook(self, pre_hooks, post_hooks):
134
+ self.store_ori_attr(comm_func, get_distributed_ops(), self.distributed_attr_origin)
135
+ cc_hooks = []
136
+ for op_name in get_distributed_ops():
137
+ self.distributed_attr_hooked[op_name] = DistributedOPTemplate(op_name, pre_hooks, post_hooks)
138
+ cc_hooks.extend(self.distributed_attr_hooked[op_name].cc_hooks)
139
+ return cc_hooks
140
+
141
+
142
+ def get_process_group(process_group):
143
+ return (
144
+ process_group
145
+ if process_group
146
+ else comm_func.HCCL_WORLD_GROUP
147
+ )
148
+
149
+
150
+ def stack_filter(stack):
151
+ for pattern in StackBlackList:
152
+ if re.search(pattern, stack):
153
+ return False
154
+ return True
155
+
156
+
157
+ def get_callstack():
158
+ callstack = []
159
+ for (_, path, line, func, _, _) in inspect.stack():
160
+ stack_line = f'{path}[{line}]'
161
+ if stack_filter(stack_line):
162
+ callstack.append(stack_line + ' ' + func)
163
+ return callstack
164
+
165
+
166
+ @_no_grad()
167
+ def op_aggregate(op, tensorlist):
168
+ if isinstance(tensorlist, Tensor):
169
+ return tensorlist
170
+ if not tensorlist:
171
+ return Tensor(float('nan'))
172
+ if op == 'min':
173
+ return min(tensorlist)
174
+ if op == 'max':
175
+ return max(tensorlist)
176
+ if op == 'norm':
177
+ return sum(tensorlist)
178
+ if op == 'zeros':
179
+ return sum(tensorlist) / len(tensorlist)
180
+ if op == 'nans':
181
+ return sum(tensorlist)
182
+ if op == 'mean':
183
+ return sum(tensorlist) / len(tensorlist)
184
+ return Tensor(float('nan'))
185
+
186
+
187
+ def update_data(old, new):
188
+ for tag, op2tensor in new.items():
189
+ if tag not in old:
190
+ old[tag] = {}
191
+ for op, tensor in op2tensor.items():
192
+ if op not in old[tag]:
193
+ old[tag][op] = [tensor]
194
+ else:
195
+ old[tag][op].append(tensor)
196
+ return old
197
+
198
+
199
+ def is_target_line(codeline):
200
+ stack = get_callstack()
201
+ whole_stack = ';'.join(stack)
202
+ if codeline == []:
203
+ return True
204
+ for pattern in codeline:
205
+ if re.search(pattern, whole_stack):
206
+ return True
207
+ return False
208
+
209
+
210
+ @_no_grad()
211
+ def catch_data(cc_context, cc_name, ops_list, args, prefix):
212
+ tensor_args = {}
213
+ for arg in args:
214
+ if isinstance(arg, Tensor):
215
+ key = get_summary_writer_tag_name(cc_name, f'{prefix}_{len(tensor_args)}', RANK)
216
+ tensor_args[key] = arg
217
+ elif isinstance(arg, list):
218
+ if isinstance(arg[0], Tensor):
219
+ stacked_arg = ops.stack(arg)
220
+ elif isinstance(arg[0], comm_func.P2POp):
221
+ stacked_arg = ops.stack([op.tensor for op in arg])
222
+ key = get_summary_writer_tag_name(cc_name, f'{prefix}_{len(tensor_args)}', RANK)
223
+ tensor_args[key] = stacked_arg
224
+
225
+ new_data = get_metrics(ops_list, tensor_args, 1e-8)
226
+ cc_context.data = update_data(cc_context.data, new_data)
227
+
228
+
229
+ def create_async_callback_func(context, cc_name, ops_list, args, prefix):
230
+ def store_data():
231
+ catch_data(context, cc_name, ops_list, args, prefix)
232
+
233
+ return store_data
234
+
235
+
236
+ def create_hooks(context, monitor):
237
+ def cc_log_hook(module, inputs):
238
+ stack = ';'.join(get_callstack())
239
+ monitor.cc_logged_stack[module.op_name_].add(stack)
240
+ return
241
+
242
+ def cc_pre_hook(module, inputs):
243
+ if not is_target_line(monitor.cc_codeline):
244
+ return
245
+ catch_data(context[module.op_name_], module.op_name_, monitor.ops, inputs, MonitorConst.PREFIX_PRE)
246
+ return
247
+
248
+ def cc_hook(module, inputs, out=None):
249
+ if not is_target_line(monitor.cc_codeline):
250
+ return out
251
+ if out and enable_communication: # async
252
+ if isinstance(out, CommHandle_):
253
+ PENDING_ASYNC_CC_BY_HANDLE[out] = create_async_callback_func(
254
+ context[module.op_name_],
255
+ module.op_name_,
256
+ monitor.ops, inputs,
257
+ MonitorConst.PREFIX_POST
258
+ )
259
+ elif isinstance(out, list): # batch_isend_irecv
260
+ for out_element in out:
261
+ if isinstance(out_element, comm_func.P2POp):
262
+ PENDING_ASYNC_CC_BY_HANDLE[out_element] = create_async_callback_func(
263
+ context[module.op_name_],
264
+ module.op_name_,
265
+ monitor.ops, inputs,
266
+ MonitorConst.PREFIX_POST
267
+ )
268
+ elif isinstance(out, tuple):
269
+ if len(out) == 2 and isinstance(out[1], CommHandle_):
270
+ PENDING_ASYNC_CC_BY_HANDLE[out[1]] = create_async_callback_func(
271
+ context[module.op_name_],
272
+ module.op_name_,
273
+ monitor.ops, inputs,
274
+ MonitorConst.PREFIX_POST
275
+ )
276
+
277
+ return out
278
+ catch_data(context[module.op_name_], module.op_name_, monitor.ops, inputs, MonitorConst.PREFIX_POST)
279
+ return out
280
+
281
+ global RANK
282
+ pre_hooks = []
283
+ hooks = []
284
+ RANK = get_rank()
285
+ if communication.GlobalComm.INITED and RANK not in monitor.module_rank_list and monitor.module_rank_list != []:
286
+ return [pre_hooks, hooks]
287
+
288
+ if monitor.cc_log_only:
289
+ pre_hooks.append(cc_log_hook)
290
+ return [pre_hooks, hooks]
291
+
292
+ if monitor.cc_pre_hook:
293
+ pre_hooks.append(cc_pre_hook)
294
+
295
+ hooks.append(cc_hook)
296
+
297
+ return [pre_hooks, hooks]
298
+
299
+
300
+ api_register = ApiRegistry()
@@ -0,0 +1,63 @@
1
+ # Copyright (c) 2024-2025, Huawei Technologies Co., Ltd.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from mindspore import mint, ops, _no_grad
17
+ from mindspore import Tensor
18
+ from mindspore import dtype as mstype
19
+
20
+
21
+ @_no_grad()
22
+ def square_sum(x: Tensor):
23
+ return (x * x).sum()
24
+
25
+
26
+ @_no_grad()
27
+ def get_min(x: Tensor):
28
+ return mint.min(x)
29
+
30
+
31
+ @_no_grad()
32
+ def get_mean(x: Tensor):
33
+ return mint.mean(x.astype(mstype.float32))
34
+
35
+
36
+ @_no_grad()
37
+ def get_norm(x: Tensor):
38
+ norm_func = mint.norm if hasattr(mint, "norm") else ops.norm
39
+ return norm_func(x.astype(mstype.float32))
40
+
41
+
42
+ @_no_grad()
43
+ def get_max(x: Tensor):
44
+ return mint.max(x)
45
+
46
+
47
+ @_no_grad()
48
+ def get_zeros(x: Tensor, eps: float):
49
+ return mint.sum(mint.abs(x) < eps) / x.numel()
50
+
51
+
52
+ @_no_grad()
53
+ def get_nans(t):
54
+ return ops.isnan(t.astype(mstype.float32)).sum()
55
+
56
+
57
+ FUNC_MAP = {"min" : get_min,
58
+ "max" : get_max,
59
+ "mean" : get_mean,
60
+ "norm" : get_norm,
61
+ "nans" : get_nans,
62
+ "zeros": get_zeros
63
+ }