mindstudio-probe 1.2.1__py3-none-any.whl → 1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.2.2.dist-info}/METADATA +1 -1
- {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.2.2.dist-info}/RECORD +85 -66
- msprobe/README.md +2 -2
- msprobe/core/common/const.py +34 -9
- msprobe/core/common/inplace_ops.yaml +1 -0
- msprobe/core/common/utils.py +14 -0
- msprobe/core/compare/layer_mapping/data_scope_parser.py +1 -1
- msprobe/core/compare/merge_result/merge_result.py +8 -7
- msprobe/core/compare/merge_result/utils.py +81 -0
- msprobe/core/compare/utils.py +10 -0
- msprobe/core/data_dump/data_collector.py +58 -13
- msprobe/core/data_dump/data_processor/base.py +92 -8
- msprobe/core/data_dump/data_processor/factory.py +3 -0
- msprobe/core/data_dump/data_processor/mindspore_processor.py +17 -4
- msprobe/core/data_dump/data_processor/pytorch_processor.py +58 -7
- msprobe/core/data_dump/json_writer.py +26 -8
- msprobe/docs/01.installation.md +25 -0
- msprobe/docs/02.config_introduction.md +14 -12
- msprobe/docs/03.config_examples.md +24 -0
- msprobe/docs/05.data_dump_PyTorch.md +34 -15
- msprobe/docs/06.data_dump_MindSpore.md +45 -22
- msprobe/docs/09.accuracy_checker_MindSpore.md +4 -2
- msprobe/docs/19.monitor.md +257 -260
- msprobe/docs/21.visualization_PyTorch.md +10 -0
- msprobe/docs/22.visualization_MindSpore.md +11 -0
- msprobe/docs/27.dump_json_instruction.md +24 -20
- msprobe/docs/28.debugger_save_instruction.md +94 -0
- msprobe/docs/28.kernel_dump_MindSpore.md +69 -0
- msprobe/docs/img/monitor/step_count_per_record.png +0 -0
- msprobe/mindspore/__init__.py +1 -0
- msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +26 -6
- msprobe/mindspore/api_accuracy_checker/api_runner.py +54 -16
- msprobe/mindspore/api_accuracy_checker/compute_element.py +47 -1
- msprobe/mindspore/api_accuracy_checker/torch_mindtorch_importer.py +129 -0
- msprobe/mindspore/api_accuracy_checker/type_mapping.py +24 -1
- msprobe/mindspore/api_accuracy_checker/utils.py +6 -1
- msprobe/mindspore/common/utils.py +20 -2
- msprobe/mindspore/debugger/debugger_config.py +25 -2
- msprobe/mindspore/debugger/precision_debugger.py +25 -6
- msprobe/mindspore/dump/hook_cell/api_registry.py +2 -0
- msprobe/mindspore/dump/jit_dump.py +7 -6
- msprobe/mindspore/monitor/anomaly_detect.py +404 -0
- msprobe/mindspore/monitor/distributed/__init__.py +0 -0
- msprobe/mindspore/monitor/distributed/distributed_ops.yaml +15 -0
- msprobe/mindspore/monitor/distributed/stack_blacklist.yaml +5 -0
- msprobe/mindspore/monitor/distributed/wrap_distributed.py +300 -0
- msprobe/mindspore/monitor/features.py +63 -0
- msprobe/mindspore/monitor/module_hook.py +821 -0
- msprobe/mindspore/monitor/module_spec_verifier.py +94 -0
- msprobe/mindspore/monitor/utils.py +267 -0
- msprobe/mindspore/ms_config.py +8 -2
- msprobe/mindspore/service.py +95 -21
- msprobe/pytorch/__init__.py +0 -1
- msprobe/pytorch/api_accuracy_checker/generate_op_script/op_generator.py +1 -1
- msprobe/pytorch/bench_functions/apply_adam.py +215 -0
- msprobe/pytorch/bench_functions/group_norm_silu.py +27 -0
- msprobe/pytorch/bench_functions/mish.py +21 -0
- msprobe/pytorch/bench_functions/moe_gating_top_k_softmax.py +44 -0
- msprobe/pytorch/bench_functions/sort_v2.py +21 -0
- msprobe/pytorch/common/utils.py +71 -0
- msprobe/pytorch/debugger/debugger_config.py +19 -9
- msprobe/pytorch/debugger/precision_debugger.py +14 -0
- msprobe/pytorch/dump/module_dump/module_processer.py +10 -30
- msprobe/pytorch/function_factory.py +7 -1
- msprobe/pytorch/hook_module/support_wrap_ops.yaml +2 -1
- msprobe/pytorch/hook_module/wrap_distributed.py +4 -0
- msprobe/pytorch/monitor/anomaly_detect.py +14 -29
- msprobe/pytorch/monitor/csv2tb.py +10 -12
- msprobe/pytorch/monitor/module_hook.py +123 -104
- msprobe/pytorch/monitor/module_metric.py +6 -6
- msprobe/pytorch/monitor/optimizer_collect.py +45 -63
- msprobe/pytorch/monitor/utils.py +8 -43
- msprobe/pytorch/pt_config.py +19 -22
- msprobe/pytorch/service.py +103 -24
- msprobe/visualization/builder/graph_builder.py +31 -5
- msprobe/visualization/builder/msprobe_adapter.py +7 -5
- msprobe/visualization/graph/base_node.py +3 -2
- msprobe/visualization/graph/distributed_analyzer.py +80 -3
- msprobe/visualization/graph/node_op.py +4 -2
- msprobe/visualization/graph_service.py +3 -4
- msprobe/visualization/utils.py +10 -2
- {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.2.2.dist-info}/LICENSE +0 -0
- {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.2.2.dist-info}/WHEEL +0 -0
- {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.2.2.dist-info}/entry_points.txt +0 -0
- {mindstudio_probe-1.2.1.dist-info → mindstudio_probe-1.2.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
# Copyright (c) 2024-2025, Huawei Technologies Co., Ltd.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import inspect
|
|
17
|
+
import os
|
|
18
|
+
import re
|
|
19
|
+
|
|
20
|
+
import numpy as np
|
|
21
|
+
|
|
22
|
+
from mindspore import nn, Tensor, ops, _no_grad
|
|
23
|
+
from mindspore import communication
|
|
24
|
+
from mindspore.communication import comm_func, get_rank
|
|
25
|
+
|
|
26
|
+
from msprobe.core.common.const import MonitorConst, Const
|
|
27
|
+
from msprobe.core.common.file_utils import load_yaml
|
|
28
|
+
from msprobe.mindspore.monitor.utils import get_metrics, get_summary_writer_tag_name
|
|
29
|
+
|
|
30
|
+
enable_communication = True
|
|
31
|
+
try:
|
|
32
|
+
from mindspore._c_expression import CommHandle as CommHandle_
|
|
33
|
+
except ImportError:
|
|
34
|
+
enable_communication = False
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
RANK = None
|
|
38
|
+
|
|
39
|
+
OpsPath = os.path.join(os.path.dirname(__file__), "distributed_ops.yaml")
|
|
40
|
+
WrapDistributedOps = load_yaml(OpsPath).get("communication.comm_func", [])
|
|
41
|
+
|
|
42
|
+
StackBlackListPath = os.path.join(os.path.dirname(__file__), "stack_blacklist.yaml")
|
|
43
|
+
StackBlackList = load_yaml(StackBlackListPath).get("stack", [])
|
|
44
|
+
|
|
45
|
+
distributed_func = {}
|
|
46
|
+
for f in dir(comm_func):
|
|
47
|
+
distributed_func[f] = getattr(comm_func, f)
|
|
48
|
+
|
|
49
|
+
ORIGIN_WAIT = CommHandle_.wait if enable_communication else None
|
|
50
|
+
PENDING_ASYNC_CC_BY_HANDLE = {}
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def get_distributed_ops():
|
|
54
|
+
global WrapDistributedOps
|
|
55
|
+
_all_distributed_ops = dir(comm_func)
|
|
56
|
+
return set(WrapDistributedOps) & set(_all_distributed_ops)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class DistributedOPTemplate(nn.Cell):
|
|
60
|
+
def __init__(self, op_name, pre_hooks, post_hooks):
|
|
61
|
+
super(DistributedOPTemplate, self).__init__()
|
|
62
|
+
self.op_name_ = str(op_name)
|
|
63
|
+
self.__name__ = self.op_name_
|
|
64
|
+
self.cc_hooks = []
|
|
65
|
+
for pre_hook in pre_hooks:
|
|
66
|
+
handle = self.register_forward_pre_hook(pre_hook)
|
|
67
|
+
self.cc_hooks.append(handle)
|
|
68
|
+
for hook in post_hooks:
|
|
69
|
+
handle = self.register_forward_hook(hook)
|
|
70
|
+
self.cc_hooks.append(handle)
|
|
71
|
+
|
|
72
|
+
def construct(self, *args, **kwargs):
|
|
73
|
+
return distributed_func.get(self.op_name_)(*args, **kwargs)
|
|
74
|
+
|
|
75
|
+
def forward(self, *args, **kwargs):
|
|
76
|
+
return distributed_func.get(self.op_name_)(*args, **kwargs)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class ApiRegistry:
|
|
80
|
+
def __init__(self):
|
|
81
|
+
self.distributed_attr_origin = {}
|
|
82
|
+
self.distributed_attr_hooked = {}
|
|
83
|
+
|
|
84
|
+
@staticmethod
|
|
85
|
+
def store_ori_attr(ori_api_group, api_list, api_ori_attr):
|
|
86
|
+
for api in api_list:
|
|
87
|
+
if Const.SEP in api:
|
|
88
|
+
sub_module_name, sub_op = api.rsplit(Const.SEP, 1)
|
|
89
|
+
sub_module = getattr(ori_api_group, sub_module_name)
|
|
90
|
+
api_ori_attr[api] = getattr(sub_module, sub_op)
|
|
91
|
+
else:
|
|
92
|
+
api_ori_attr[api] = getattr(ori_api_group, api)
|
|
93
|
+
|
|
94
|
+
@staticmethod
|
|
95
|
+
def set_api_attr(api_group, attr_dict):
|
|
96
|
+
for cc_api_name, cc_api_entry_func in attr_dict.items():
|
|
97
|
+
if Const.SEP in cc_api_name:
|
|
98
|
+
sub_module_name, sub_op = cc_api_name.rsplit(Const.SEP, 1)
|
|
99
|
+
sub_module = getattr(api_group, sub_module_name, None)
|
|
100
|
+
if sub_module is not None:
|
|
101
|
+
setattr(sub_module, sub_op, cc_api_entry_func)
|
|
102
|
+
else:
|
|
103
|
+
setattr(api_group, cc_api_name, cc_api_entry_func)
|
|
104
|
+
|
|
105
|
+
@staticmethod
|
|
106
|
+
def redirect_wait():
|
|
107
|
+
global ORIGIN_WAIT
|
|
108
|
+
global PENDING_ASYNC_CC_BY_HANDLE
|
|
109
|
+
if not ORIGIN_WAIT:
|
|
110
|
+
return
|
|
111
|
+
|
|
112
|
+
def wrapped_wait(work):
|
|
113
|
+
def wrapped_wait(*args, **kwargs):
|
|
114
|
+
ORIGIN_WAIT(*args, **kwargs)
|
|
115
|
+
if args[0] in PENDING_ASYNC_CC_BY_HANDLE:
|
|
116
|
+
store_func = PENDING_ASYNC_CC_BY_HANDLE.pop(args[0])
|
|
117
|
+
store_func()
|
|
118
|
+
|
|
119
|
+
return wrapped_wait
|
|
120
|
+
|
|
121
|
+
CommHandle_.wait = wrapped_wait(CommHandle_)
|
|
122
|
+
|
|
123
|
+
def redirect_api(self):
|
|
124
|
+
self.set_api_attr(comm_func, self.distributed_attr_hooked)
|
|
125
|
+
self.redirect_wait()
|
|
126
|
+
|
|
127
|
+
def restore_api(self):
|
|
128
|
+
if not ORIGIN_WAIT:
|
|
129
|
+
return
|
|
130
|
+
self.set_api_attr(comm_func, self.distributed_attr_origin)
|
|
131
|
+
setattr(CommHandle_, 'wait', ORIGIN_WAIT)
|
|
132
|
+
|
|
133
|
+
def initialize_hook(self, pre_hooks, post_hooks):
|
|
134
|
+
self.store_ori_attr(comm_func, get_distributed_ops(), self.distributed_attr_origin)
|
|
135
|
+
cc_hooks = []
|
|
136
|
+
for op_name in get_distributed_ops():
|
|
137
|
+
self.distributed_attr_hooked[op_name] = DistributedOPTemplate(op_name, pre_hooks, post_hooks)
|
|
138
|
+
cc_hooks.extend(self.distributed_attr_hooked[op_name].cc_hooks)
|
|
139
|
+
return cc_hooks
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def get_process_group(process_group):
|
|
143
|
+
return (
|
|
144
|
+
process_group
|
|
145
|
+
if process_group
|
|
146
|
+
else comm_func.HCCL_WORLD_GROUP
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def stack_filter(stack):
|
|
151
|
+
for pattern in StackBlackList:
|
|
152
|
+
if re.search(pattern, stack):
|
|
153
|
+
return False
|
|
154
|
+
return True
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def get_callstack():
|
|
158
|
+
callstack = []
|
|
159
|
+
for (_, path, line, func, _, _) in inspect.stack():
|
|
160
|
+
stack_line = f'{path}[{line}]'
|
|
161
|
+
if stack_filter(stack_line):
|
|
162
|
+
callstack.append(stack_line + ' ' + func)
|
|
163
|
+
return callstack
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
@_no_grad()
|
|
167
|
+
def op_aggregate(op, tensorlist):
|
|
168
|
+
if isinstance(tensorlist, Tensor):
|
|
169
|
+
return tensorlist
|
|
170
|
+
if not tensorlist:
|
|
171
|
+
return Tensor(float('nan'))
|
|
172
|
+
if op == 'min':
|
|
173
|
+
return min(tensorlist)
|
|
174
|
+
if op == 'max':
|
|
175
|
+
return max(tensorlist)
|
|
176
|
+
if op == 'norm':
|
|
177
|
+
return sum(tensorlist)
|
|
178
|
+
if op == 'zeros':
|
|
179
|
+
return sum(tensorlist) / len(tensorlist)
|
|
180
|
+
if op == 'nans':
|
|
181
|
+
return sum(tensorlist)
|
|
182
|
+
if op == 'mean':
|
|
183
|
+
return sum(tensorlist) / len(tensorlist)
|
|
184
|
+
return Tensor(float('nan'))
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def update_data(old, new):
|
|
188
|
+
for tag, op2tensor in new.items():
|
|
189
|
+
if tag not in old:
|
|
190
|
+
old[tag] = {}
|
|
191
|
+
for op, tensor in op2tensor.items():
|
|
192
|
+
if op not in old[tag]:
|
|
193
|
+
old[tag][op] = [tensor]
|
|
194
|
+
else:
|
|
195
|
+
old[tag][op].append(tensor)
|
|
196
|
+
return old
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def is_target_line(codeline):
|
|
200
|
+
stack = get_callstack()
|
|
201
|
+
whole_stack = ';'.join(stack)
|
|
202
|
+
if codeline == []:
|
|
203
|
+
return True
|
|
204
|
+
for pattern in codeline:
|
|
205
|
+
if re.search(pattern, whole_stack):
|
|
206
|
+
return True
|
|
207
|
+
return False
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
@_no_grad()
|
|
211
|
+
def catch_data(cc_context, cc_name, ops_list, args, prefix):
|
|
212
|
+
tensor_args = {}
|
|
213
|
+
for arg in args:
|
|
214
|
+
if isinstance(arg, Tensor):
|
|
215
|
+
key = get_summary_writer_tag_name(cc_name, f'{prefix}_{len(tensor_args)}', RANK)
|
|
216
|
+
tensor_args[key] = arg
|
|
217
|
+
elif isinstance(arg, list):
|
|
218
|
+
if isinstance(arg[0], Tensor):
|
|
219
|
+
stacked_arg = ops.stack(arg)
|
|
220
|
+
elif isinstance(arg[0], comm_func.P2POp):
|
|
221
|
+
stacked_arg = ops.stack([op.tensor for op in arg])
|
|
222
|
+
key = get_summary_writer_tag_name(cc_name, f'{prefix}_{len(tensor_args)}', RANK)
|
|
223
|
+
tensor_args[key] = stacked_arg
|
|
224
|
+
|
|
225
|
+
new_data = get_metrics(ops_list, tensor_args, 1e-8)
|
|
226
|
+
cc_context.data = update_data(cc_context.data, new_data)
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
def create_async_callback_func(context, cc_name, ops_list, args, prefix):
|
|
230
|
+
def store_data():
|
|
231
|
+
catch_data(context, cc_name, ops_list, args, prefix)
|
|
232
|
+
|
|
233
|
+
return store_data
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def create_hooks(context, monitor):
|
|
237
|
+
def cc_log_hook(module, inputs):
|
|
238
|
+
stack = ';'.join(get_callstack())
|
|
239
|
+
monitor.cc_logged_stack[module.op_name_].add(stack)
|
|
240
|
+
return
|
|
241
|
+
|
|
242
|
+
def cc_pre_hook(module, inputs):
|
|
243
|
+
if not is_target_line(monitor.cc_codeline):
|
|
244
|
+
return
|
|
245
|
+
catch_data(context[module.op_name_], module.op_name_, monitor.ops, inputs, MonitorConst.PREFIX_PRE)
|
|
246
|
+
return
|
|
247
|
+
|
|
248
|
+
def cc_hook(module, inputs, out=None):
|
|
249
|
+
if not is_target_line(monitor.cc_codeline):
|
|
250
|
+
return out
|
|
251
|
+
if out and enable_communication: # async
|
|
252
|
+
if isinstance(out, CommHandle_):
|
|
253
|
+
PENDING_ASYNC_CC_BY_HANDLE[out] = create_async_callback_func(
|
|
254
|
+
context[module.op_name_],
|
|
255
|
+
module.op_name_,
|
|
256
|
+
monitor.ops, inputs,
|
|
257
|
+
MonitorConst.PREFIX_POST
|
|
258
|
+
)
|
|
259
|
+
elif isinstance(out, list): # batch_isend_irecv
|
|
260
|
+
for out_element in out:
|
|
261
|
+
if isinstance(out_element, comm_func.P2POp):
|
|
262
|
+
PENDING_ASYNC_CC_BY_HANDLE[out_element] = create_async_callback_func(
|
|
263
|
+
context[module.op_name_],
|
|
264
|
+
module.op_name_,
|
|
265
|
+
monitor.ops, inputs,
|
|
266
|
+
MonitorConst.PREFIX_POST
|
|
267
|
+
)
|
|
268
|
+
elif isinstance(out, tuple):
|
|
269
|
+
if len(out) == 2 and isinstance(out[1], CommHandle_):
|
|
270
|
+
PENDING_ASYNC_CC_BY_HANDLE[out[1]] = create_async_callback_func(
|
|
271
|
+
context[module.op_name_],
|
|
272
|
+
module.op_name_,
|
|
273
|
+
monitor.ops, inputs,
|
|
274
|
+
MonitorConst.PREFIX_POST
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
return out
|
|
278
|
+
catch_data(context[module.op_name_], module.op_name_, monitor.ops, inputs, MonitorConst.PREFIX_POST)
|
|
279
|
+
return out
|
|
280
|
+
|
|
281
|
+
global RANK
|
|
282
|
+
pre_hooks = []
|
|
283
|
+
hooks = []
|
|
284
|
+
RANK = str(get_rank())
|
|
285
|
+
if communication.GlobalComm.INITED and RANK not in monitor.module_rank_list and monitor.module_rank_list != []:
|
|
286
|
+
return [pre_hooks, hooks]
|
|
287
|
+
|
|
288
|
+
if monitor.cc_log_only:
|
|
289
|
+
pre_hooks.append(cc_log_hook)
|
|
290
|
+
return [pre_hooks, hooks]
|
|
291
|
+
|
|
292
|
+
if monitor.cc_pre_hook:
|
|
293
|
+
pre_hooks.append(cc_pre_hook)
|
|
294
|
+
|
|
295
|
+
hooks.append(cc_hook)
|
|
296
|
+
|
|
297
|
+
return [pre_hooks, hooks]
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
api_register = ApiRegistry()
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# Copyright (c) 2024-2025, Huawei Technologies Co., Ltd.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from mindspore import mint, ops, _no_grad
|
|
17
|
+
from mindspore import Tensor
|
|
18
|
+
from mindspore import dtype as mstype
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@_no_grad()
|
|
22
|
+
def square_sum(x: Tensor):
|
|
23
|
+
return (x * x).sum()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@_no_grad()
|
|
27
|
+
def get_min(x: Tensor):
|
|
28
|
+
return mint.min(x)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@_no_grad()
|
|
32
|
+
def get_mean(x: Tensor):
|
|
33
|
+
return mint.mean(x.astype(mstype.float32))
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@_no_grad()
|
|
37
|
+
def get_norm(x: Tensor):
|
|
38
|
+
norm_func = mint.norm if hasattr(mint, "norm") else ops.norm
|
|
39
|
+
return norm_func(x.astype(mstype.float32))
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@_no_grad()
|
|
43
|
+
def get_max(x: Tensor):
|
|
44
|
+
return mint.max(x)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@_no_grad()
|
|
48
|
+
def get_zeros(x: Tensor, eps: float):
|
|
49
|
+
return mint.sum(mint.abs(x) < eps) / x.numel()
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@_no_grad()
|
|
53
|
+
def get_nans(t):
|
|
54
|
+
return ops.isnan(t.astype(mstype.float32)).sum()
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
FUNC_MAP = {"min" : get_min,
|
|
58
|
+
"max" : get_max,
|
|
59
|
+
"mean" : get_mean,
|
|
60
|
+
"norm" : get_norm,
|
|
61
|
+
"nans" : get_nans,
|
|
62
|
+
"zeros": get_zeros
|
|
63
|
+
}
|