mindstudio-probe 1.0.3__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (278) hide show
  1. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/LICENSE +201 -201
  2. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/METADATA +36 -34
  3. mindstudio_probe-1.1.0.dist-info/RECORD +287 -0
  4. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/WHEEL +1 -1
  5. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/entry_points.txt +1 -0
  6. msprobe/README.md +131 -237
  7. msprobe/__init__.py +16 -1
  8. msprobe/{config/config.json → config.json} +47 -49
  9. msprobe/core/advisor/advisor.py +124 -124
  10. msprobe/core/advisor/advisor_const.py +58 -59
  11. msprobe/core/advisor/advisor_result.py +58 -58
  12. msprobe/core/common/const.py +402 -318
  13. msprobe/core/common/exceptions.py +99 -99
  14. msprobe/core/common/{file_check.py → file_utils.py} +523 -283
  15. msprobe/core/common/inplace_op_checker.py +38 -0
  16. msprobe/core/common/inplace_ops.yaml +251 -0
  17. msprobe/core/common/log.py +86 -69
  18. msprobe/core/common/utils.py +371 -616
  19. msprobe/core/common_config.py +78 -71
  20. msprobe/core/compare/acc_compare.py +472 -298
  21. msprobe/core/compare/check.py +180 -95
  22. msprobe/core/compare/compare_cli.py +69 -49
  23. msprobe/core/compare/highlight.py +259 -222
  24. msprobe/core/compare/multiprocessing_compute.py +174 -149
  25. msprobe/core/compare/npy_compare.py +310 -295
  26. msprobe/core/compare/utils.py +464 -429
  27. msprobe/core/data_dump/data_collector.py +153 -144
  28. msprobe/core/data_dump/data_processor/base.py +337 -293
  29. msprobe/core/data_dump/data_processor/factory.py +76 -59
  30. msprobe/core/data_dump/data_processor/mindspore_processor.py +192 -198
  31. msprobe/core/data_dump/data_processor/pytorch_processor.py +383 -389
  32. msprobe/core/data_dump/json_writer.py +117 -116
  33. msprobe/core/data_dump/scope.py +194 -178
  34. msprobe/core/grad_probe/constant.py +74 -70
  35. msprobe/core/grad_probe/grad_compare.py +170 -175
  36. msprobe/core/grad_probe/utils.py +77 -52
  37. msprobe/docs/01.installation.md +99 -0
  38. msprobe/docs/02.config_introduction.md +137 -0
  39. msprobe/docs/03.config_examples.md +237 -0
  40. msprobe/docs/04.acl_config_examples.md +78 -0
  41. msprobe/docs/05.data_dump_PyTorch.md +326 -0
  42. msprobe/docs/06.data_dump_MindSpore.md +285 -0
  43. msprobe/docs/07.accuracy_checker_PyTorch.md +297 -0
  44. msprobe/docs/08.accuracy_checker_online_PyTorch.md +238 -0
  45. msprobe/docs/09.accuracy_checker_MindSpore.md +68 -0
  46. msprobe/docs/10.accuracy_compare_PyTorch.md +327 -0
  47. msprobe/docs/11.accuracy_compare_MindSpore.md +333 -0
  48. msprobe/docs/12.overflow_check_PyTorch.md +79 -0
  49. msprobe/docs/13.overflow_check_MindSpore.md +31 -0
  50. msprobe/{pytorch/doc/parse_tool.md → docs/14.data_parse_PyTorch.md} +283 -286
  51. msprobe/docs/15.free_benchmarking_PyTorch.md +170 -0
  52. msprobe/docs/16.free_benchmarking_MindSpore.md +140 -0
  53. msprobe/{doc/grad_probe/grad_probe.md → docs/17.grad_probe.md} +205 -207
  54. msprobe/{pytorch/doc//321/205/320/254/320/270/321/207/342/225/221/342/224/220/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/206/320/277/320/244/321/205/320/277/342/225/243.md → docs/18.online_dispatch.md} +89 -90
  55. msprobe/docs/FAQ.md +189 -0
  56. msprobe/docs/S02.report_free_benchmarking_validation_performance_baseline.md +146 -0
  57. msprobe/docs/img/free_benchmark_framework.png +0 -0
  58. msprobe/docs/img/ms_dump.png +0 -0
  59. msprobe/docs/img/ms_layer.png +0 -0
  60. msprobe/docs/img/pt_dump.png +0 -0
  61. msprobe/mindspore/__init__.py +2 -1
  62. msprobe/mindspore/api_accuracy_checker/api_accuracy_checker.py +278 -245
  63. msprobe/mindspore/api_accuracy_checker/api_info.py +76 -69
  64. msprobe/mindspore/api_accuracy_checker/api_runner.py +155 -151
  65. msprobe/mindspore/api_accuracy_checker/base_compare_algorithm.py +196 -196
  66. msprobe/mindspore/api_accuracy_checker/cmd_parser.py +6 -0
  67. msprobe/mindspore/api_accuracy_checker/compute_element.py +238 -223
  68. msprobe/mindspore/api_accuracy_checker/main.py +8 -15
  69. msprobe/mindspore/api_accuracy_checker/type_mapping.py +113 -113
  70. msprobe/mindspore/api_accuracy_checker/utils.py +79 -62
  71. msprobe/mindspore/cell_processor.py +58 -34
  72. msprobe/mindspore/common/const.py +108 -87
  73. msprobe/mindspore/common/log.py +37 -37
  74. msprobe/mindspore/common/utils.py +97 -57
  75. msprobe/mindspore/compare/distributed_compare.py +62 -75
  76. msprobe/mindspore/compare/layer_mapping.py +146 -0
  77. msprobe/mindspore/compare/modify_mapping.py +107 -0
  78. msprobe/mindspore/compare/ms_compare.py +357 -117
  79. msprobe/mindspore/compare/ms_graph_compare.py +364 -317
  80. msprobe/mindspore/compare/ms_to_pt_api.yaml +399 -399
  81. msprobe/mindspore/debugger/debugger_config.py +69 -74
  82. msprobe/mindspore/debugger/precision_debugger.py +150 -107
  83. msprobe/mindspore/dump/dump_tool_factory.py +50 -35
  84. msprobe/mindspore/dump/hook_cell/api_registry.py +128 -104
  85. msprobe/mindspore/dump/hook_cell/hook_cell.py +55 -53
  86. msprobe/mindspore/dump/hook_cell/primitive_hooks.py +206 -0
  87. msprobe/mindspore/dump/hook_cell/support_wrap_ops.yaml +994 -925
  88. msprobe/mindspore/dump/hook_cell/wrap_api.py +121 -0
  89. msprobe/mindspore/dump/jit_dump.py +96 -56
  90. msprobe/mindspore/dump/kernel_graph_dump.py +75 -60
  91. msprobe/mindspore/dump/kernel_kbyk_dump.py +79 -65
  92. msprobe/mindspore/free_benchmark/api_pynative_self_check.py +131 -116
  93. msprobe/mindspore/free_benchmark/common/config.py +27 -12
  94. msprobe/mindspore/free_benchmark/common/handler_params.py +32 -17
  95. msprobe/mindspore/free_benchmark/common/utils.py +85 -71
  96. msprobe/mindspore/free_benchmark/data/support_wrap_ops.yaml +842 -842
  97. msprobe/mindspore/free_benchmark/decorator/dec_forward.py +57 -42
  98. msprobe/mindspore/free_benchmark/decorator/decorator_factory.py +122 -107
  99. msprobe/mindspore/free_benchmark/handler/base_handler.py +105 -90
  100. msprobe/mindspore/free_benchmark/handler/check_handler.py +56 -41
  101. msprobe/mindspore/free_benchmark/handler/fix_handler.py +51 -36
  102. msprobe/mindspore/free_benchmark/handler/handler_factory.py +36 -21
  103. msprobe/mindspore/free_benchmark/perturbation/add_noise.py +82 -67
  104. msprobe/mindspore/free_benchmark/perturbation/base_perturbation.py +36 -21
  105. msprobe/mindspore/free_benchmark/perturbation/bit_noise.py +78 -63
  106. msprobe/mindspore/free_benchmark/perturbation/exchange_value.py +77 -0
  107. msprobe/mindspore/free_benchmark/perturbation/improve_precision.py +49 -34
  108. msprobe/mindspore/free_benchmark/perturbation/no_change.py +27 -12
  109. msprobe/mindspore/free_benchmark/perturbation/perturbation_factory.py +44 -27
  110. msprobe/mindspore/free_benchmark/self_check_tool_factory.py +48 -33
  111. msprobe/mindspore/grad_probe/global_context.py +100 -91
  112. msprobe/mindspore/grad_probe/grad_analyzer.py +231 -231
  113. msprobe/mindspore/grad_probe/grad_monitor.py +27 -27
  114. msprobe/mindspore/grad_probe/grad_stat_csv.py +131 -131
  115. msprobe/mindspore/grad_probe/hook.py +94 -92
  116. msprobe/mindspore/grad_probe/utils.py +29 -28
  117. msprobe/mindspore/ms_config.py +128 -126
  118. msprobe/mindspore/overflow_check/kernel_graph_overflow_check.py +60 -45
  119. msprobe/mindspore/overflow_check/overflow_check_tool_factory.py +49 -34
  120. msprobe/mindspore/runtime.py +4 -4
  121. msprobe/mindspore/service.py +297 -354
  122. msprobe/mindspore/task_handler_factory.py +24 -24
  123. msprobe/msprobe.py +105 -107
  124. msprobe/pytorch/__init__.py +23 -4
  125. msprobe/pytorch/api_accuracy_checker/common/config.py +70 -55
  126. msprobe/pytorch/api_accuracy_checker/common/utils.py +246 -165
  127. msprobe/pytorch/api_accuracy_checker/compare/algorithm.py +230 -213
  128. msprobe/pytorch/api_accuracy_checker/compare/api_precision_compare.py +632 -581
  129. msprobe/pytorch/api_accuracy_checker/compare/api_precision_standard.yaml +132 -132
  130. msprobe/pytorch/api_accuracy_checker/compare/api_precision_threshold.yaml +390 -390
  131. msprobe/pytorch/api_accuracy_checker/compare/compare.py +416 -381
  132. msprobe/pytorch/api_accuracy_checker/compare/compare_column.py +90 -73
  133. msprobe/pytorch/api_accuracy_checker/compare/compare_utils.py +265 -244
  134. msprobe/pytorch/api_accuracy_checker/config.yaml +10 -10
  135. msprobe/pytorch/api_accuracy_checker/run_ut/data_generate.py +370 -332
  136. msprobe/pytorch/api_accuracy_checker/run_ut/multi_run_ut.py +221 -199
  137. msprobe/pytorch/api_accuracy_checker/run_ut/run_overflow_check.py +150 -134
  138. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut.py +518 -581
  139. msprobe/pytorch/api_accuracy_checker/run_ut/run_ut_utils.py +213 -74
  140. msprobe/pytorch/api_accuracy_checker/run_ut/torch_ut_setting.json +7 -4
  141. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/attl.py +218 -202
  142. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/client.py +370 -324
  143. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/device_dispatch.py +227 -204
  144. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/dump_dispatch.py +110 -0
  145. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/server.py +244 -218
  146. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/torch_ops_config.yaml +63 -0
  147. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/utils.py +44 -0
  148. msprobe/pytorch/bench_functions/__init__.py +30 -15
  149. msprobe/pytorch/bench_functions/apply_adam_w.py +43 -28
  150. msprobe/pytorch/bench_functions/confusion_transpose.py +34 -19
  151. msprobe/pytorch/bench_functions/fast_gelu.py +70 -55
  152. msprobe/pytorch/bench_functions/layer_norm_eval.py +21 -6
  153. msprobe/pytorch/bench_functions/linear.py +27 -12
  154. msprobe/pytorch/bench_functions/matmul_backward.py +63 -48
  155. msprobe/pytorch/bench_functions/npu_fusion_attention.py +538 -421
  156. msprobe/pytorch/bench_functions/rms_norm.py +30 -15
  157. msprobe/pytorch/bench_functions/rotary_mul.py +71 -52
  158. msprobe/pytorch/bench_functions/scaled_mask_softmax.py +41 -26
  159. msprobe/pytorch/bench_functions/swiglu.py +70 -55
  160. msprobe/pytorch/common/__init__.py +17 -2
  161. msprobe/pytorch/common/compare_script.template +14 -14
  162. msprobe/pytorch/common/log.py +33 -32
  163. msprobe/pytorch/common/parse_json.py +54 -39
  164. msprobe/pytorch/common/utils.py +310 -300
  165. msprobe/pytorch/compare/distributed_compare.py +66 -66
  166. msprobe/pytorch/compare/mapping.yaml +607 -607
  167. msprobe/pytorch/compare/match.py +49 -33
  168. msprobe/pytorch/compare/pt_compare.py +82 -40
  169. msprobe/pytorch/debugger/debugger_config.py +108 -95
  170. msprobe/pytorch/debugger/precision_debugger.py +173 -125
  171. msprobe/pytorch/free_benchmark/__init__.py +23 -8
  172. msprobe/pytorch/free_benchmark/common/constant.py +70 -70
  173. msprobe/pytorch/free_benchmark/common/counter.py +71 -71
  174. msprobe/pytorch/free_benchmark/common/enums.py +65 -37
  175. msprobe/pytorch/free_benchmark/common/params.py +144 -129
  176. msprobe/pytorch/free_benchmark/common/utils.py +118 -102
  177. msprobe/pytorch/free_benchmark/compare/grad_saver.py +200 -179
  178. msprobe/pytorch/free_benchmark/compare/single_benchmark.py +119 -104
  179. msprobe/pytorch/free_benchmark/main.py +120 -105
  180. msprobe/pytorch/free_benchmark/perturbed_layers/base_layer.py +28 -13
  181. msprobe/pytorch/free_benchmark/perturbed_layers/layer_factory.py +56 -41
  182. msprobe/pytorch/free_benchmark/perturbed_layers/npu/add_noise.py +105 -90
  183. msprobe/pytorch/free_benchmark/perturbed_layers/npu/bit_noise.py +119 -104
  184. msprobe/pytorch/free_benchmark/perturbed_layers/npu/change_value.py +87 -63
  185. msprobe/pytorch/free_benchmark/perturbed_layers/npu/improve_precision.py +83 -68
  186. msprobe/pytorch/free_benchmark/perturbed_layers/npu/no_change.py +43 -28
  187. msprobe/pytorch/free_benchmark/perturbed_layers/npu/npu_base_layser.py +60 -45
  188. msprobe/pytorch/free_benchmark/perturbed_layers/run_cpu.py +34 -19
  189. msprobe/pytorch/free_benchmark/result_handlers/base_handler.py +256 -217
  190. msprobe/pytorch/free_benchmark/result_handlers/check_handler.py +54 -39
  191. msprobe/pytorch/free_benchmark/result_handlers/fix_handler.py +38 -23
  192. msprobe/pytorch/free_benchmark/result_handlers/handler_factory.py +45 -30
  193. msprobe/pytorch/free_benchmark/result_handlers/preheat_handler.py +185 -170
  194. msprobe/pytorch/function_factory.py +91 -75
  195. msprobe/pytorch/functional/module_dump.py +84 -0
  196. msprobe/pytorch/grad_probe/grad_monitor.py +91 -90
  197. msprobe/pytorch/grad_probe/grad_stat_csv.py +128 -128
  198. msprobe/pytorch/hook_module/__init__.py +16 -1
  199. msprobe/pytorch/hook_module/api_registry.py +166 -161
  200. msprobe/pytorch/hook_module/hook_module.py +118 -120
  201. msprobe/pytorch/hook_module/support_wrap_ops.yaml +1879 -1877
  202. msprobe/pytorch/hook_module/utils.py +28 -29
  203. msprobe/pytorch/hook_module/wrap_aten.py +111 -110
  204. msprobe/pytorch/hook_module/wrap_distributed.py +77 -78
  205. msprobe/pytorch/hook_module/wrap_functional.py +104 -105
  206. msprobe/pytorch/hook_module/wrap_npu_custom.py +85 -84
  207. msprobe/pytorch/hook_module/wrap_tensor.py +69 -71
  208. msprobe/pytorch/hook_module/wrap_torch.py +84 -86
  209. msprobe/pytorch/hook_module/wrap_vf.py +60 -62
  210. msprobe/pytorch/module_processer.py +153 -138
  211. msprobe/pytorch/online_dispatch/__init__.py +20 -20
  212. msprobe/pytorch/online_dispatch/compare.py +235 -236
  213. msprobe/pytorch/online_dispatch/dispatch.py +271 -271
  214. msprobe/pytorch/online_dispatch/dump_compare.py +155 -156
  215. msprobe/pytorch/online_dispatch/single_compare.py +391 -391
  216. msprobe/pytorch/online_dispatch/torch_ops_config.yaml +57 -49
  217. msprobe/pytorch/online_dispatch/utils.py +127 -146
  218. msprobe/pytorch/parse.py +19 -4
  219. msprobe/pytorch/parse_tool/cli.py +31 -32
  220. msprobe/pytorch/parse_tool/lib/compare.py +259 -271
  221. msprobe/pytorch/parse_tool/lib/config.py +52 -52
  222. msprobe/pytorch/parse_tool/lib/file_desc.py +31 -31
  223. msprobe/pytorch/parse_tool/lib/interactive_cli.py +102 -102
  224. msprobe/pytorch/parse_tool/lib/parse_exception.py +54 -54
  225. msprobe/pytorch/parse_tool/lib/parse_tool.py +161 -158
  226. msprobe/pytorch/parse_tool/lib/utils.py +320 -321
  227. msprobe/pytorch/parse_tool/lib/visualization.py +85 -91
  228. msprobe/pytorch/pt_config.py +317 -187
  229. msprobe/pytorch/service.py +311 -252
  230. mindstudio_probe-1.0.3.dist-info/RECORD +0 -272
  231. msprobe/config/README.md +0 -539
  232. msprobe/mindspore/doc/compare.md +0 -58
  233. msprobe/mindspore/doc/dump.md +0 -217
  234. msprobe/mindspore/dump/hook_cell/wrap_functional.py +0 -91
  235. msprobe/mindspore/dump/hook_cell/wrap_tensor.py +0 -63
  236. msprobe/pytorch/api_accuracy_checker/tensor_transport_layer/ssl_config.py +0 -10
  237. msprobe/pytorch/doc/FAQ.md +0 -193
  238. msprobe/pytorch/doc/api_accuracy_checker.md +0 -313
  239. msprobe/pytorch/doc/api_accuracy_checker_online.md +0 -187
  240. msprobe/pytorch/doc/dump.md +0 -260
  241. msprobe/pytorch/doc/msprobe/321/207/342/226/223/342/225/233/321/205/342/225/221/320/266/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/206/320/245/342/226/221/321/206/320/235/320/276dump/321/206/320/260/320/227/321/205/320/227/320/226/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -182
  242. msprobe/pytorch/doc/ptdbg_ascend_compare.md +0 -240
  243. msprobe/pytorch/doc/ptdbg_ascend_overview.md +0 -68
  244. msprobe/pytorch/doc/ptdbg_ascend_quickstart.md +0 -381
  245. msprobe/pytorch/doc/run_overflow_check.md +0 -25
  246. msprobe/pytorch/doc//321/206/320/247/320/260/321/206/320/260/320/227/321/206/320/255/320/226/321/205/342/225/226/320/265/321/205/320/225/342/225/226/321/205/320/254/342/225/221/321/206/320/251/320/277/321/211/320/272/320/234/321/210/320/277/320/221/321/205/320/242/320/234/321/206/320/220/320/267/321/210/320/223/342/225/234/321/205/320/257/342/225/221/321/207/342/225/221/342/224/220/321/206/320/232/320/265/321/205/320/241/320/232.md +0 -151
  247. msprobe/pytorch/functional/data_processor.py +0 -0
  248. msprobe/pytorch/functional/dump_module.py +0 -39
  249. {mindstudio_probe-1.0.3.dist-info → mindstudio_probe-1.1.0.dist-info}/top_level.txt +0 -0
  250. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_1.png +0 -0
  251. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_2.png +0 -0
  252. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_3.png +0 -0
  253. /msprobe/{pytorch/doc → docs}/img/BLOOM-7B_4.png +0 -0
  254. /msprobe/{pytorch/doc → docs}/img/GPT-3_1.png +0 -0
  255. /msprobe/{pytorch/doc → docs}/img/GPT-3_2.png +0 -0
  256. /msprobe/{pytorch/doc → docs}/img/GPT-3_3.png +0 -0
  257. /msprobe/{pytorch/doc → docs}/img/GPT-3_4.png +0 -0
  258. /msprobe/{pytorch/doc → docs}/img/GPT-3_5.png +0 -0
  259. /msprobe/{pytorch/doc → docs}/img/GPT-3_6.png +0 -0
  260. /msprobe/{pytorch/doc → docs}/img/GPT-3_7.png +0 -0
  261. /msprobe/{pytorch/doc → docs}/img/GPT-3_8.png +0 -0
  262. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_1.png +0 -0
  263. /msprobe/{pytorch/doc → docs}/img/YOLOV5S_2.png +0 -0
  264. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_details.png +0 -0
  265. /msprobe/{pytorch/doc → docs}/img/accuracy_checking_result.png +0 -0
  266. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_details.png +0 -0
  267. /msprobe/{pytorch/doc → docs}/img/api_precision_compare_result.png +0 -0
  268. /msprobe/{pytorch/doc → docs}/img/auto_analyze_log.png +0 -0
  269. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl.png +0 -0
  270. /msprobe/{pytorch/doc → docs}/img/compare_result_pkl_md5.png.png +0 -0
  271. /msprobe/{pytorch/doc → docs}/img/cpu_info.png +0 -0
  272. /msprobe/{config → docs}/img/free_benchmark.png +0 -0
  273. /msprobe/{doc/grad_probe/img/image-1.png → docs/img/grad_probe_image-1.png} +0 -0
  274. /msprobe/{doc/grad_probe/img/image-2.png → docs/img/grad_probe_image-2.png} +0 -0
  275. /msprobe/{doc/grad_probe/img/image-3.png → docs/img/grad_probe_image-3.png} +0 -0
  276. /msprobe/{doc/grad_probe/img/image-4.png → docs/img/grad_probe_image-4.png} +0 -0
  277. /msprobe/{doc/grad_probe/img/image.png → docs/img/grad_probe_image.png} +0 -0
  278. /msprobe/{pytorch/doc → docs}/img/module_compare.png +0 -0
@@ -1,272 +1,272 @@
1
- import os
2
- import time
3
- import json
4
- from pathlib import Path
5
- from multiprocessing import Manager, Pool
6
-
7
- import torch
8
-
9
- from torch.utils._python_dispatch import TorchDispatchMode
10
-
11
- try:
12
- import torch_npu
13
- except ImportError:
14
- is_npu = False
15
- else:
16
- is_npu = True
17
-
18
- from msprobe.core.common.utils import check_file_or_directory_path, check_path_before_create, load_yaml
19
- from msprobe.core.common.const import Const, CompareConst
20
- from msprobe.pytorch.common.log import logger
21
- from .dump_compare import dispatch_workflow, dispatch_multiprocess, error_call, TimeStatistics, \
22
- DispatchRunParam, DisPatchDataInfo
23
- from .utils import get_callstack, data_to_cpu, get_sys_info, DispatchException, COMPARE_LOGO
24
- from .compare import Comparator
25
-
26
-
27
- current_time = time.strftime("%Y%m%d%H%M%S")
28
- RESULT_FILE_NAME = "accuracy_checking_result_" + current_time + ".csv"
29
- DETAILS_FILE_NAME = "accuracy_checking_details_" + current_time + ".csv"
30
-
31
-
32
- class PtdbgDispatch(TorchDispatchMode):
33
- def __init__(self, dump_mode=Const.OFF, api_list=None, debug=False, dump_path=None, tag=None, process_num=0):
34
- super(PtdbgDispatch, self).__init__()
35
- logger.info(COMPARE_LOGO)
36
- if not is_npu:
37
- logger.error("Please confirm you run environment installed torch_npu!")
38
- return
39
- if dump_path is None:
40
- logger.error("Please set dump_path when dump_mode is config!")
41
- check_file_or_directory_path(dump_path, True)
42
-
43
- self.device_id = torch_npu._C._npu_getDevice()
44
- self.dump_mode = dump_mode
45
- self.dump_api_list = api_list
46
- self.debug_flag = debug
47
- self.api_index = 0
48
- self.single_api_index_dict = {}
49
- self.device_dump_path_cpu = None
50
- self.device_dump_path_npu = None
51
- self.all_summary = []
52
- self.call_stack_list = []
53
- self.process_num = process_num
54
- self.filter_dump_api()
55
- self.check_param()
56
- dir_name = self.get_dir_name(tag)
57
- self.root_path = os.path.join(os.path.realpath(dump_path), dir_name)
58
- self.root_cpu_path = os.path.join(self.root_path, f'cpu')
59
- self.root_npu_path = os.path.join(self.root_path, f'npu')
60
- check_path_before_create(self.root_cpu_path)
61
- check_path_before_create(self.root_npu_path)
62
- Path(self.root_cpu_path).mkdir(mode=0o750, parents=True, exist_ok=True)
63
- Path(self.root_npu_path).mkdir(mode=0o750, parents=True, exist_ok=True)
64
-
65
- self.result_csv_path = os.path.join(self.root_path, RESULT_FILE_NAME)
66
- self.detail_csv_path = os.path.join(self.root_path, DETAILS_FILE_NAME)
67
- self.comparator = Comparator(self.result_csv_path, self.detail_csv_path, False)
68
-
69
- self.aten_ops_blacklist = []
70
- self.npu_adjust_autogard = []
71
- yaml_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "torch_ops_config.yaml")
72
- self.get_ops(yaml_path)
73
-
74
- self.lock = None
75
- if process_num > 0:
76
- self.pool = Pool(process_num)
77
- if debug:
78
- logger.info(f'Main pid:{os.getpid()} device:{self.device_id} dump_list:{self.dump_api_list} '
79
- f'dump_mode:{self.dump_mode} cpu_path[{self.root_cpu_path}], npu_path[{self.root_npu_path}], '
80
- f'process[{process_num}]')
81
-
82
- def __exit__(self, exc_type, exc_val, exc_tb):
83
- super().__exit__(exc_type, exc_val, exc_tb)
84
-
85
- if not is_npu:
86
- return
87
- logger.info(f'start write compare csv: Rank[{self.device_id}], Pid[{os.getpid()}')
88
-
89
- if self.process_num > 0:
90
- self.pool.close()
91
- self.pool.join()
92
- summary_path = os.path.join(self.root_cpu_path, f'summary.json')
93
- if not os.path.exists(summary_path):
94
- logger.error("Please check train log, An exception may have occurred!")
95
- return
96
- check_file_or_directory_path(summary_path, False)
97
- fp_handle = open(summary_path, "r")
98
- while True:
99
- json_line_data = fp_handle.readline()
100
- if json_line_data == '\n':
101
- continue
102
- if len(json_line_data) == 0:
103
- break
104
- msg = json.loads(json_line_data)
105
- self.all_summary[msg[0]] = msg[1]
106
- fp_handle.close()
107
-
108
- if self.debug_flag:
109
- input_num = 0
110
- output_num = 0
111
- total_num = 0
112
-
113
- for list_data in self.all_summary:
114
- for data in list_data:
115
- logger.info(f'summary: Device[{self.device_id}], Pid[{os.getpid()}], Data[{data}]')
116
- if "_input" in data[CompareConst.NPU_NAME]:
117
- input_num = input_num + 1
118
- if "_output" in data[CompareConst.NPU_NAME]:
119
- output_num = output_num + 1
120
- total_num = total_num + 1
121
- logger.info(f'Dispatch exit: Device[{self.device_id}], Pid[{os.getpid()} Input[{input_num}] '
122
- f'Output[{output_num}] Total[{total_num}] API_Total[{self.api_index}]]')
123
-
124
- def __torch_dispatch__(self, func, types, args=(), kwargs=None):
125
- if not is_npu:
126
- logger.error("Please confirm you run environment installed torch_npu!")
127
- return func(*args, **kwargs)
128
-
129
- func_name_split_list = func.__name__.split(".")
130
- aten_api = func_name_split_list[0]
131
- try:
132
- aten_api_overload_name = func_name_split_list[1]
133
- except IndexError:
134
- logger.error(f"Please check the func name {func.__name__}!")
135
- return func(*args, **kwargs)
136
-
137
- self.enable_autogard(aten_api)
138
- if aten_api in self.aten_ops_blacklist:
139
- npu_out = func(*args, **kwargs)
140
- return npu_out
141
-
142
- call_stack = get_callstack()
143
- self.call_stack_list.append(call_stack)
144
- self.api_index += 1
145
- if aten_api not in self.single_api_index_dict:
146
- self.single_api_index_dict[aten_api] = 1
147
- else:
148
- self.single_api_index_dict[aten_api] += 1
149
-
150
- run_param = self.get_run_param(aten_api, func.__name__, aten_api_overload_name)
151
-
152
- if self.debug_flag:
153
- logger.info(f'Dispatch Info: Rank[{self.device_id}], Pid[{os.getpid()}], Func[{func.__name__}], '
154
- f'Name[{run_param.aten_api}_{run_param.single_api_index}], '
155
- f'Count[{self.api_index}], Sys[{get_sys_info()}]')
156
-
157
- cpu_args = []
158
- cpu_kwargs = []
159
- data_to_cpu(args, 0, cpu_args)
160
- data_to_cpu(kwargs, 0, cpu_kwargs)
161
- cpu_args = cpu_args[0]
162
- cpu_kwargs = cpu_kwargs[0]
163
-
164
- with TimeStatistics("NPU RUN", run_param):
165
- npu_out = func(*args, **kwargs)
166
- npu_out_cpu = []
167
- data_to_cpu(npu_out, 0, npu_out_cpu)
168
- npu_out_cpu = npu_out_cpu[0]
169
-
170
- with TimeStatistics("CPU RUN", run_param):
171
- cpu_out = func(*cpu_args, **cpu_kwargs)
172
-
173
- if isinstance(cpu_out, torch.Tensor) and cpu_out.dtype in [torch.bfloat16, torch.float16, torch.half]:
174
- cpu_out = cpu_out.float()
175
-
176
- if self.process_num == 0:
177
- self.all_summary.append([])
178
- data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, func, npu_out_cpu, cpu_out, self.lock)
179
- dispatch_workflow(run_param, data_info)
180
- else:
181
- self.lock.acquire()
182
- self.all_summary.append([])
183
- self.lock.release()
184
- run_param.process_flag = True
185
- if self.check_fun(func, run_param):
186
- data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, None, npu_out_cpu, cpu_out,
187
- self.lock)
188
- self.pool.apply_async(func=dispatch_multiprocess, args=(run_param, data_info),
189
- error_callback=error_call)
190
- else:
191
- logger.error("can not get correct function please set process_num=0")
192
- return npu_out
193
-
194
- @staticmethod
195
- def check_fun(func, run_param):
196
- if hasattr(torch.ops.aten, run_param.aten_api):
197
- aten_func = getattr(torch.ops.aten, run_param.aten_api)
198
- if hasattr(aten_func, run_param.aten_api_overload_name):
199
- aten_overload_func = getattr(aten_func, run_param.aten_api_overload_name)
200
- if id(aten_overload_func) == id(func):
201
- run_param.func_namespace = "aten"
202
- return True
203
- return False
204
-
205
- def get_dir_name(self, tag):
206
- # guarantee file uniqueness
207
- time.sleep(1)
208
- time_now = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
209
- if tag is None or not isinstance(tag, str):
210
- logger.warning('There is not tag or the type of tag is not string.')
211
- dir_name = f'msprobe_rank{self.device_id}_{time_now}'
212
- else:
213
- dir_name = f'msprobe_{tag}_rank{self.device_id}_{time_now}'
214
- return dir_name
215
-
216
- def get_ops(self, file_path):
217
- yaml_file = load_yaml(file_path)
218
- self.aten_ops_blacklist = yaml_file.get('aten_ops_blacklist')
219
- self.npu_adjust_autogard = yaml_file.get('npu_adjust_autogard')
220
-
221
- def filter_dump_api(self):
222
- if self.dump_mode != Const.LIST or not self.dump_api_list:
223
- self.dump_api_list = []
224
- return
225
- aten_api_list = dir(torch.ops.aten)
226
- dump_api_list = []
227
- for aten_api in self.dump_api_list:
228
- if aten_api in aten_api_list:
229
- dump_api_list.append(aten_api)
230
- else:
231
- logger.warning(f'{aten_api} is not aten api will not dump, please refer to torch.ops.aten')
232
- self.dump_api_list = dump_api_list
233
-
234
- def get_run_param(self, aten_api, func_name, aten_api_overload_name):
235
- run_param = DispatchRunParam(self.debug_flag, self.device_id, self.root_npu_path, self.root_cpu_path,
236
- self.process_num, self.comparator)
237
- run_param.dump_flag, run_param.auto_dump_flag = self.get_dump_flag(aten_api)
238
- run_param.func_name = func_name
239
- run_param.aten_api = aten_api
240
- run_param.aten_api_overload_name = aten_api_overload_name
241
- run_param.single_api_index = self.single_api_index_dict[aten_api]
242
- run_param.api_index = self.api_index
243
- return run_param
244
-
245
- def get_dump_flag(self, aten_api):
246
- dump_flag = False
247
- auto_dump_flag = False
248
- if self.dump_mode == Const.ALL:
249
- dump_flag = True
250
- if self.dump_mode == Const.LIST and aten_api in self.dump_api_list:
251
- dump_flag = True
252
- if self.dump_mode == Const.AUTO:
253
- auto_dump_flag = True
254
- return dump_flag, auto_dump_flag
255
-
256
- def check_param(self):
257
- if self.dump_mode not in Const.ONLINE_DUMP_MODE:
258
- logger.error('The parameter "dump mode" can only be one of {}.'.format(Const.ONLINE_DUMP_MODE))
259
- raise DispatchException(DispatchException.INVALID_PARAMETER)
260
- if not isinstance(self.dump_api_list, list):
261
- logger.error('The type of parameter "api_list" can only be list.')
262
- raise DispatchException(DispatchException.INVALID_PARAMETER)
263
- if not isinstance(self.debug_flag, bool):
264
- logger.error('The type of parameter "debug" can only be bool.')
265
- raise DispatchException(DispatchException.INVALID_PARAMETER)
266
- if not isinstance(self.process_num, int) or self.process_num < 0:
267
- logger.error('The type of parameter "process_num" can only be int and it should not be less than 0.')
268
- raise DispatchException(DispatchException.INVALID_PARAMETER)
269
-
270
- def enable_autogard(self, aten_api):
271
- if aten_api in self.npu_adjust_autogard:
1
+ import os
2
+ import time
3
+ import json
4
+ from multiprocessing import Pool
5
+
6
+ import torch
7
+
8
+ from torch.utils._python_dispatch import TorchDispatchMode
9
+
10
+ try:
11
+ import torch_npu
12
+ except ImportError:
13
+ is_npu = False
14
+ else:
15
+ is_npu = True
16
+
17
+ from msprobe.core.common.file_utils import check_path_before_create, check_file_or_directory_path, load_yaml
18
+ from msprobe.core.common.const import Const, CompareConst
19
+ from msprobe.pytorch.common.log import logger
20
+ from msprobe.pytorch.online_dispatch.dump_compare import dispatch_workflow, dispatch_multiprocess, error_call, TimeStatistics, \
21
+ DispatchRunParam, DisPatchDataInfo
22
+ from msprobe.pytorch.online_dispatch.utils import get_callstack, data_to_cpu, get_sys_info, DispatchException, COMPARE_LOGO
23
+ from msprobe.pytorch.online_dispatch.compare import Comparator
24
+ from msprobe.core.common.file_utils import FileOpen, create_directory
25
+
26
+
27
+ current_time = time.strftime("%Y%m%d%H%M%S")
28
+ RESULT_FILE_NAME = "accuracy_checking_result_" + current_time + ".csv"
29
+ DETAILS_FILE_NAME = "accuracy_checking_details_" + current_time + ".csv"
30
+
31
+
32
+ class PtdbgDispatch(TorchDispatchMode):
33
+ def __init__(self, dump_mode=Const.OFF, api_list=None, debug=False, dump_path=None, tag=None, process_num=0):
34
+ super(PtdbgDispatch, self).__init__()
35
+ logger.info(COMPARE_LOGO)
36
+ if not is_npu:
37
+ logger.error("Please confirm you run environment installed torch_npu!")
38
+ return
39
+ if dump_path is None:
40
+ logger.error("Please set dump_path when dump_mode is config!")
41
+ check_file_or_directory_path(dump_path, True)
42
+
43
+ self.device_id = torch_npu._C._npu_getDevice()
44
+ self.dump_mode = dump_mode
45
+ self.dump_api_list = api_list
46
+ self.debug_flag = debug
47
+ self.api_index = 0
48
+ self.single_api_index_dict = {}
49
+ self.device_dump_path_cpu = None
50
+ self.device_dump_path_npu = None
51
+ self.all_summary = []
52
+ self.call_stack_list = []
53
+ self.process_num = process_num
54
+ self.filter_dump_api()
55
+ self.check_param()
56
+ dir_name = self.get_dir_name(tag)
57
+ self.root_path = os.path.join(os.path.realpath(dump_path), dir_name)
58
+ self.root_cpu_path = os.path.join(self.root_path, f'cpu')
59
+ self.root_npu_path = os.path.join(self.root_path, f'npu')
60
+ check_path_before_create(self.root_cpu_path)
61
+ check_path_before_create(self.root_npu_path)
62
+ create_directory(self.root_cpu_path)
63
+ create_directory(self.root_npu_path)
64
+
65
+ self.result_csv_path = os.path.join(self.root_path, RESULT_FILE_NAME)
66
+ self.detail_csv_path = os.path.join(self.root_path, DETAILS_FILE_NAME)
67
+ self.comparator = Comparator(self.result_csv_path, self.detail_csv_path, False)
68
+
69
+ self.aten_ops_blacklist = []
70
+ self.npu_adjust_autogard = []
71
+ yaml_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "torch_ops_config.yaml")
72
+ self.get_ops(yaml_path)
73
+
74
+ self.lock = None
75
+ if process_num > 0:
76
+ self.pool = Pool(process_num)
77
+ if debug:
78
+ logger.info(f'Main pid:{os.getpid()} device:{self.device_id} dump_list:{self.dump_api_list} '
79
+ f'dump_mode:{self.dump_mode} cpu_path[{self.root_cpu_path}], npu_path[{self.root_npu_path}], '
80
+ f'process[{process_num}]')
81
+
82
+ def __exit__(self, exc_type, exc_val, exc_tb):
83
+ super().__exit__(exc_type, exc_val, exc_tb)
84
+
85
+ if not is_npu:
86
+ return
87
+ logger.info(f'start write compare csv: Rank[{self.device_id}], Pid[{os.getpid()}')
88
+
89
+ if self.process_num > 0:
90
+ self.pool.close()
91
+ self.pool.join()
92
+ summary_path = os.path.join(self.root_cpu_path, f'summary.json')
93
+ if not os.path.exists(summary_path):
94
+ logger.error("Please check train log, An exception may have occurred!")
95
+ return
96
+ check_file_or_directory_path(summary_path, False)
97
+ fp_handle = FileOpen(summary_path, "r")
98
+ while True:
99
+ json_line_data = fp_handle.readline()
100
+ if json_line_data == '\n':
101
+ continue
102
+ if len(json_line_data) == 0:
103
+ break
104
+ msg = json.loads(json_line_data)
105
+ self.all_summary[msg[0]] = msg[1]
106
+ fp_handle.close()
107
+
108
+ if self.debug_flag:
109
+ input_num = 0
110
+ output_num = 0
111
+ total_num = 0
112
+
113
+ for list_data in self.all_summary:
114
+ for data in list_data:
115
+ logger.info(f'summary: Device[{self.device_id}], Pid[{os.getpid()}], Data[{data}]')
116
+ if "_input" in data[CompareConst.NPU_NAME]:
117
+ input_num = input_num + 1
118
+ if "_output" in data[CompareConst.NPU_NAME]:
119
+ output_num = output_num + 1
120
+ total_num = total_num + 1
121
+ logger.info(f'Dispatch exit: Device[{self.device_id}], Pid[{os.getpid()} Input[{input_num}] '
122
+ f'Output[{output_num}] Total[{total_num}] API_Total[{self.api_index}]]')
123
+
124
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
125
+ if not is_npu:
126
+ logger.error("Please confirm you run environment installed torch_npu!")
127
+ return func(*args, **kwargs)
128
+
129
+ func_name_split_list = func.__name__.split(".")
130
+ aten_api = func_name_split_list[0]
131
+ try:
132
+ aten_api_overload_name = func_name_split_list[1]
133
+ except IndexError:
134
+ logger.error(f"Please check the func name {func.__name__}!")
135
+ return func(*args, **kwargs)
136
+
137
+ self.enable_autogard(aten_api)
138
+ if aten_api in self.aten_ops_blacklist:
139
+ npu_out = func(*args, **kwargs)
140
+ return npu_out
141
+
142
+ call_stack = get_callstack()
143
+ self.call_stack_list.append(call_stack)
144
+ self.api_index += 1
145
+ if aten_api not in self.single_api_index_dict:
146
+ self.single_api_index_dict[aten_api] = 1
147
+ else:
148
+ self.single_api_index_dict[aten_api] += 1
149
+
150
+ run_param = self.get_run_param(aten_api, func.__name__, aten_api_overload_name)
151
+
152
+ if self.debug_flag:
153
+ logger.info(f'Dispatch Info: Rank[{self.device_id}], Pid[{os.getpid()}], Func[{func.__name__}], '
154
+ f'Name[{run_param.aten_api}_{run_param.single_api_index}], '
155
+ f'Count[{self.api_index}], Sys[{get_sys_info()}]')
156
+
157
+ cpu_args = []
158
+ cpu_kwargs = []
159
+ data_to_cpu(args, 0, cpu_args)
160
+ data_to_cpu(kwargs, 0, cpu_kwargs)
161
+ cpu_args = cpu_args[0]
162
+ cpu_kwargs = cpu_kwargs[0]
163
+
164
+ with TimeStatistics("NPU RUN", run_param):
165
+ npu_out = func(*args, **kwargs)
166
+ npu_out_cpu = []
167
+ data_to_cpu(npu_out, 0, npu_out_cpu)
168
+ npu_out_cpu = npu_out_cpu[0]
169
+
170
+ with TimeStatistics("CPU RUN", run_param):
171
+ cpu_out = func(*cpu_args, **cpu_kwargs)
172
+
173
+ if isinstance(cpu_out, torch.Tensor) and cpu_out.dtype in [torch.bfloat16, torch.float16, torch.half]:
174
+ cpu_out = cpu_out.float()
175
+
176
+ if self.process_num == 0:
177
+ self.all_summary.append([])
178
+ data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, func, npu_out_cpu, cpu_out, self.lock)
179
+ dispatch_workflow(run_param, data_info)
180
+ else:
181
+ self.lock.acquire()
182
+ self.all_summary.append([])
183
+ self.lock.release()
184
+ run_param.process_flag = True
185
+ if self.check_fun(func, run_param):
186
+ data_info = DisPatchDataInfo(cpu_args, cpu_kwargs, self.all_summary, None, npu_out_cpu, cpu_out,
187
+ self.lock)
188
+ self.pool.apply_async(func=dispatch_multiprocess, args=(run_param, data_info),
189
+ error_callback=error_call)
190
+ else:
191
+ logger.error("can not get correct function please set process_num=0")
192
+ return npu_out
193
+
194
+ @staticmethod
195
+ def check_fun(func, run_param):
196
+ if hasattr(torch.ops.aten, run_param.aten_api):
197
+ aten_func = getattr(torch.ops.aten, run_param.aten_api)
198
+ if hasattr(aten_func, run_param.aten_api_overload_name):
199
+ aten_overload_func = getattr(aten_func, run_param.aten_api_overload_name)
200
+ if id(aten_overload_func) == id(func):
201
+ run_param.func_namespace = "aten"
202
+ return True
203
+ return False
204
+
205
+ def get_dir_name(self, tag):
206
+ # guarantee file uniqueness
207
+ time.sleep(1)
208
+ time_now = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
209
+ if tag is None or not isinstance(tag, str):
210
+ logger.warning('There is not tag or the type of tag is not string.')
211
+ dir_name = f'msprobe_rank{self.device_id}_{time_now}'
212
+ else:
213
+ dir_name = f'msprobe_{tag}_rank{self.device_id}_{time_now}'
214
+ return dir_name
215
+
216
+ def get_ops(self, file_path):
217
+ yaml_file = load_yaml(file_path)
218
+ self.aten_ops_blacklist = yaml_file.get('aten_ops_blacklist')
219
+ self.npu_adjust_autogard = yaml_file.get('npu_adjust_autogard')
220
+
221
+ def filter_dump_api(self):
222
+ if self.dump_mode != Const.LIST or not self.dump_api_list:
223
+ self.dump_api_list = []
224
+ return
225
+ aten_api_list = dir(torch.ops.aten)
226
+ dump_api_list = []
227
+ for aten_api in self.dump_api_list:
228
+ if aten_api in aten_api_list:
229
+ dump_api_list.append(aten_api)
230
+ else:
231
+ logger.warning(f'{aten_api} is not aten api will not dump, please refer to torch.ops.aten')
232
+ self.dump_api_list = dump_api_list
233
+
234
+ def get_run_param(self, aten_api, func_name, aten_api_overload_name):
235
+ run_param = DispatchRunParam(self.debug_flag, self.device_id, self.root_npu_path, self.root_cpu_path,
236
+ self.process_num, self.comparator)
237
+ run_param.dump_flag, run_param.auto_dump_flag = self.get_dump_flag(aten_api)
238
+ run_param.func_name = func_name
239
+ run_param.aten_api = aten_api
240
+ run_param.aten_api_overload_name = aten_api_overload_name
241
+ run_param.single_api_index = self.single_api_index_dict[aten_api]
242
+ run_param.api_index = self.api_index
243
+ return run_param
244
+
245
+ def get_dump_flag(self, aten_api):
246
+ dump_flag = False
247
+ auto_dump_flag = False
248
+ if self.dump_mode == Const.ALL:
249
+ dump_flag = True
250
+ if self.dump_mode == Const.LIST and aten_api in self.dump_api_list:
251
+ dump_flag = True
252
+ if self.dump_mode == Const.AUTO:
253
+ auto_dump_flag = True
254
+ return dump_flag, auto_dump_flag
255
+
256
+ def check_param(self):
257
+ if self.dump_mode not in Const.ONLINE_DUMP_MODE:
258
+ logger.error('The parameter "dump mode" can only be one of {}.'.format(Const.ONLINE_DUMP_MODE))
259
+ raise DispatchException(DispatchException.INVALID_PARAMETER)
260
+ if not isinstance(self.dump_api_list, list):
261
+ logger.error('The type of parameter "api_list" can only be list.')
262
+ raise DispatchException(DispatchException.INVALID_PARAMETER)
263
+ if not isinstance(self.debug_flag, bool):
264
+ logger.error('The type of parameter "debug" can only be bool.')
265
+ raise DispatchException(DispatchException.INVALID_PARAMETER)
266
+ if not isinstance(self.process_num, int) or self.process_num < 0:
267
+ logger.error('The type of parameter "process_num" can only be int and it should not be less than 0.')
268
+ raise DispatchException(DispatchException.INVALID_PARAMETER)
269
+
270
+ def enable_autogard(self, aten_api):
271
+ if aten_api in self.npu_adjust_autogard:
272
272
  torch._C._dispatch_tls_set_dispatch_key_excluded(torch._C.DispatchKey.AutogradFunctionality, False)